//! A shared, generic implementation of the _Current_ QMDB. //! //! The impl blocks in this file defines shared functionality across all Current QMDB variants. use crate::{ bitmap, index::Unordered as UnorderedIndex, journal::{ contiguous::{Contiguous, MutableContiguous}, Error as JournalError, }, mmr::{ self, grafting::{Hasher as GraftingHasher, Storage as GraftingStorage}, hasher::Hasher as MmrHasher, journaled::Mmr, Location, Proof, StandardHasher, }, qmdb::{ any::{ self, operation::{update::Update, Operation}, ValueEncoding, }, current::proof::RangeProof, store::{self, LogStore, MerkleizedStore, PrunableStore}, DurabilityState, Durable, Error, NonDurable, }, Persistable, }; use commonware_codec::{Codec, CodecShared}; use commonware_cryptography::{Digest, DigestOf, Hasher}; use commonware_runtime::{Clock, Metrics, Storage}; use commonware_utils::{bitmap::Prunable as PrunableBitMap, Array}; use core::{num::NonZeroU64, ops::Range}; /// Get the grafting height for a bitmap with chunk size determined by N. const fn grafting_height() -> u32 { PrunableBitMap::::CHUNK_SIZE_BITS.trailing_zeros() } mod private { pub trait Sealed {} } /// Trait for valid [Db] type states. pub trait State: private::Sealed + Sized + Send + Sync { /// The merkleization type state for the inner `any::db::Db`. type AnyState: mmr::mem::State; /// The merkleization type state for the inner [bitmap::BitMap]. type BitMapState: bitmap::State; } /// Merkleized state: the database has been merkleized and the root is cached. pub struct Merkleized { /// The cached root of the database (combining bitmap and operations MMR). pub(super) root: D, } impl private::Sealed for Merkleized {} impl State for Merkleized { type AnyState = mmr::mem::Clean; type BitMapState = bitmap::Merkleized; } /// Unmerkleized state: the database has pending changes not yet merkleized. pub struct Unmerkleized; impl private::Sealed for Unmerkleized {} impl State for Unmerkleized { type AnyState = mmr::mem::Dirty; type BitMapState = bitmap::Unmerkleized; } /// A Current QMDB implementation generic over ordered/unordered keys and variable/fixed values. pub struct Db< E: Storage + Clock + Metrics, C: Contiguous, I: UnorderedIndex, H: Hasher, U: Send + Sync, const N: usize, S: State> = Merkleized>, D: DurabilityState = Durable, > { /// An authenticated database that provides the ability to prove whether a key ever had a /// specific value. pub(super) any: any::db::Db, /// The bitmap over the activity status of each operation. Supports augmenting [Db] proofs in /// order to further prove whether a key _currently_ has a specific value. pub(super) status: bitmap::BitMap, /// Type state based on whether the database is [Merkleized] or [Unmerkleized]. pub(super) state: S, } // Functionality shared across all DB states, such as most non-mutating operations. impl Db where E: Storage + Clock + Metrics, K: Array, V: ValueEncoding, U: Update, C: Contiguous>, I: UnorderedIndex, H: Hasher, S: State>, D: DurabilityState, Operation: Codec, { /// Return the inactivity floor location. This is the location before which all operations are /// known to be inactive. Operations before this point can be safely pruned. pub const fn inactivity_floor_loc(&self) -> Location { self.any.inactivity_floor_loc() } /// Whether the snapshot currently has no active keys. pub const fn is_empty(&self) -> bool { self.any.is_empty() } /// Get the metadata associated with the last commit. pub async fn get_metadata(&self) -> Result, Error> { self.any.get_metadata().await } /// Get the level of the base MMR into which we are grafting. /// /// This value is log2 of the chunk size in bits. Since we assume the chunk size is a power of /// 2, we compute this from trailing_zeros. pub const fn grafting_height() -> u32 { grafting_height::() } /// Returns a proof that the specified range of operations are part of the database, along with /// the operations from the range. A truncated range (from hitting the max) can be detected by /// looking at the length of the returned operations vector. Also returns the bitmap chunks /// required to verify the proof. /// /// # Errors /// /// Returns [mmr::Error::LocationOverflow] if `start_loc` > [mmr::MAX_LOCATION]. /// Returns [mmr::Error::RangeOutOfBounds] if `start_loc` >= number of leaves in the MMR. /// Return true if the given sequence of `ops` were applied starting at location `start_loc` in /// the log with the provided root. pub fn verify_range_proof( hasher: &mut H, proof: &RangeProof, start_loc: Location, ops: &[Operation], chunks: &[[u8; N]], root: &H::Digest, ) -> bool { let height = Self::grafting_height(); proof.verify(hasher, height, start_loc, ops, chunks, root) } } // Functionality shared across Merkleized states, such as the ability to prune the log and retrieve // the state root impl Db>, D> where E: Storage + Clock + Metrics, K: Array, V: ValueEncoding, U: Update, C: MutableContiguous> + Persistable, I: UnorderedIndex, H: Hasher, D: DurabilityState, Operation: Codec, { pub const fn root(&self) -> H::Digest { self.state.root } /// Returns a proof that the specified range of operations are part of the database, along with /// the operations from the range. A truncated range (from hitting the max) can be detected by /// looking at the length of the returned operations vector. Also returns the bitmap chunks /// required to verify the proof. /// /// # Errors /// /// Returns [mmr::Error::LocationOverflow] if `start_loc` > [mmr::MAX_LOCATION]. /// Returns [mmr::Error::RangeOutOfBounds] if `start_loc` >= number of leaves in the MMR. pub async fn range_proof( &self, hasher: &mut H, start_loc: Location, max_ops: NonZeroU64, ) -> Result<(RangeProof, Vec>, Vec<[u8; N]>), Error> { RangeProof::::new_with_ops( hasher, &self.status, Self::grafting_height(), &self.any.log.mmr, &self.any.log, start_loc, max_ops, ) .await } /// Prunes historical operations prior to `prune_loc`. This does not affect the db's root or /// snapshot. /// /// # Errors /// /// - Returns [Error::PruneBeyondMinRequired] if `prune_loc` > inactivity floor. /// - Returns [mmr::Error::LocationOverflow] if `prune_loc` > [mmr::MAX_LOCATION]. pub async fn prune(&mut self, prune_loc: Location) -> Result<(), Error> { // Write the pruned portion of the bitmap to disk *first* to ensure recovery in case of // failure during pruning. If we don't do this, we may not be able to recover the bitmap // because it may require replaying of pruned operations. self.status.write_pruned().await?; self.any.prune(prune_loc).await } } // Functionality specific to (Merkleized, Durable) state, such as ability to persist the database. impl Db>, Durable> where E: Storage + Clock + Metrics, K: Array, V: ValueEncoding, U: Update, C: MutableContiguous> + Persistable, I: UnorderedIndex, H: Hasher, Operation: Codec, { /// Sync all database state to disk. pub async fn sync(&mut self) -> Result<(), Error> { self.any.sync().await?; // Write the bitmap pruning boundary to disk so that next startup doesn't have to // re-Merkleize the inactive portion up to the inactivity floor. self.status.write_pruned().await.map_err(Into::into) } /// Destroy the db, removing all data from disk. pub async fn destroy(self) -> Result<(), Error> { // Clean up bitmap metadata partition. self.status.destroy().await?; // Clean up Any components (MMR and log). self.any.destroy().await } /// Convert this database into a mutable state. pub fn into_mutable(self) -> Db { Db { any: self.any.into_mutable(), status: self.status.into_dirty(), state: Unmerkleized, } } } // Functionality shared across Unmerkleized states. impl Db where E: Storage + Clock + Metrics, K: Array, V: ValueEncoding, U: Update, C: Contiguous>, I: UnorderedIndex, H: Hasher, D: DurabilityState, Operation: Codec, { /// Merkleize the database and transition to the provable state. pub async fn into_merkleized( self, ) -> Result>, D>, Error> { // Merkleize the any db let mut any = self.any.into_merkleized(); // Merkleize the bitmap using the clean MMR let hasher = &mut any.log.hasher; let mut status = merkleize_grafted_bitmap(hasher, self.status, &any.log.mmr).await?; // Prune the bitmap of no-longer-necessary bits. status.prune_to_bit(*any.inactivity_floor_loc)?; // Compute and cache the root let root = root(hasher, &status, &any.log.mmr).await?; Ok(Db { any, status, state: Merkleized { root }, }) } } // Functionality specific to (Unmerkleized,Durable) state. impl Db where E: Storage + Clock + Metrics, K: Array, V: ValueEncoding, U: Update, C: Contiguous>, I: UnorderedIndex, H: Hasher, Operation: Codec, { /// Convert this database into a mutable state. pub fn into_mutable(self) -> Db { Db { any: self.any.into_mutable(), status: self.status, state: Unmerkleized, } } } // Functionality specific to (Unmerkleized, NonDurable) state. impl Db where E: Storage + Clock + Metrics, K: Array, V: ValueEncoding, U: Update, C: MutableContiguous> + Persistable, I: UnorderedIndex, H: Hasher, Operation: Codec, { /// Raises the activity floor according to policy followed by appending a commit operation with /// the provided `metadata` and the new inactivity floor value. Returns the `[start_loc, /// end_loc)` location range of committed operations. async fn apply_commit_op( &mut self, metadata: Option, ) -> Result, Error> { let start_loc = self.any.last_commit_loc + 1; // Inactivate the current commit operation. self.status.set_bit(*self.any.last_commit_loc, false); // Raise the inactivity floor by taking `self.steps` steps, plus 1 to account for the // previous commit becoming inactive. let inactivity_floor_loc = self.any.raise_floor_with_bitmap(&mut self.status).await?; // Append the commit operation with the new floor and tag it as active in the bitmap. self.status.push(true); let commit_op = Operation::CommitFloor(metadata, inactivity_floor_loc); self.any.apply_commit_op(commit_op).await?; Ok(start_loc..self.any.log.bounds().end) } /// Commit any pending operations to the database, ensuring their durability upon return. /// This transitions to the Durable state without merkleizing. Returns the committed database /// and the `[start_loc, end_loc)` range of committed operations. pub async fn commit( mut self, metadata: Option, ) -> Result<(Db, Range), Error> { let range = self.apply_commit_op(metadata).await?; // Transition to Durable state without merkleizing let any = any::db::Db { log: self.any.log, inactivity_floor_loc: self.any.inactivity_floor_loc, last_commit_loc: self.any.last_commit_loc, snapshot: self.any.snapshot, durable_state: store::Durable, active_keys: self.any.active_keys, _update: core::marker::PhantomData, }; Ok(( Db { any, status: self.status, state: Unmerkleized, }, range, )) } } // Functionality specific to (Merkleized, NonDurable) state. impl Db>, NonDurable> where E: Storage + Clock + Metrics, K: Array, V: ValueEncoding, U: Update, C: MutableContiguous> + Persistable, I: UnorderedIndex, H: Hasher, Operation: Codec, { /// Convert this database into a mutable state. pub fn into_mutable(self) -> Db { Db { any: self.any.into_mutable(), status: self.status.into_dirty(), state: Unmerkleized, } } } impl Persistable for Db>, Durable> where E: Storage + Clock + Metrics, K: Array, V: ValueEncoding, U: Update, C: MutableContiguous> + Persistable, I: UnorderedIndex, H: Hasher, Operation: Codec, { type Error = Error; async fn commit(&mut self) -> Result<(), Error> { // No-op, DB already in recoverable state. Ok(()) } async fn sync(&mut self) -> Result<(), Error> { self.sync().await } async fn destroy(self) -> Result<(), Error> { self.destroy().await } } // MerkleizedStore for Merkleized states (both Durable and NonDurable) // TODO(https://github.com/commonwarexyz/monorepo/issues/2560): This is broken -- it's computing // proofs only over the any db mmr not the grafted mmr, so they won't validate against the grafted // root. impl MerkleizedStore for Db>, D> where E: Storage + Clock + Metrics, K: Array, V: ValueEncoding, U: Update, C: MutableContiguous> + Persistable, I: UnorderedIndex, H: Hasher, D: DurabilityState, Operation: Codec, { type Digest = H::Digest; type Operation = Operation; fn root(&self) -> H::Digest { self.root() } async fn historical_proof( &self, historical_size: Location, start_loc: Location, max_ops: NonZeroU64, ) -> Result<(Proof, Vec), Error> { self.any .historical_proof(historical_size, start_loc, max_ops) .await } } impl LogStore for Db where E: Storage + Clock + Metrics, K: Array, V: ValueEncoding, U: Update, C: Contiguous>, I: UnorderedIndex, H: Hasher, S: State>, D: DurabilityState, Operation: Codec, { type Value = V::Value; fn bounds(&self) -> std::ops::Range { self.any.bounds() } fn inactivity_floor_loc(&self) -> Location { self.inactivity_floor_loc() } async fn get_metadata(&self) -> Result, Error> { self.get_metadata().await } fn is_empty(&self) -> bool { self.is_empty() } } impl PrunableStore for Db>, D> where E: Storage + Clock + Metrics, K: Array, V: ValueEncoding, U: Update, C: MutableContiguous> + Persistable, I: UnorderedIndex, H: Hasher, D: DurabilityState, Operation: Codec, { async fn prune(&mut self, prune_loc: Location) -> Result<(), Error> { self.prune(prune_loc).await } } /// Return the root of the current QMDB represented by the provided mmr and bitmap. pub(super) async fn root( hasher: &mut StandardHasher, status: &bitmap::MerkleizedBitMap, mmr: &Mmr>>, ) -> Result { let grafted_mmr = GraftingStorage::<'_, H, _, _>::new(status, mmr, grafting_height::()); let mmr_root = grafted_mmr.root(hasher).await?; // If we are on a chunk boundary, then the mmr_root fully captures the state of the DB. let (last_chunk, next_bit) = status.last_chunk(); if next_bit == PrunableBitMap::::CHUNK_SIZE_BITS { // Last chunk is complete, no partial chunk to add return Ok(mmr_root); } // There are bits in an uncommitted (partial) chunk, so we need to incorporate that information // into the root digest to fully capture the database state. We do so by hashing the mmr root // along with the number of bits within the last chunk and the digest of the last chunk. hasher.inner().update(last_chunk); let last_chunk_digest = hasher.inner().finalize(); Ok(bitmap::partial_chunk_root::( hasher.inner(), &mmr_root, next_bit, &last_chunk_digest, )) } /// Consumes an `UnmerkleizedBitMap`, performs merkleization using the provided hasher and MMR storage, /// and returns a `MerkleizedBitMap` containing the merkleized result. /// /// # Arguments /// * `hasher` - The hasher used for merkleization. /// * `status` - The `UnmerkleizedBitMap` to be merkleized. Ownership is taken. /// * `mmr` - The MMR storage used for grafting. pub(super) async fn merkleize_grafted_bitmap( hasher: &mut StandardHasher, status: bitmap::UnmerkleizedBitMap, mmr: &impl mmr::storage::Storage, ) -> Result, Error> where E: Storage + Clock + Metrics, H: Hasher, { let mut grafter = GraftingHasher::new(hasher, grafting_height::()); grafter .load_grafted_digests(&status.dirty_chunks(), mmr) .await?; status.merkleize(&mut grafter).await.map_err(Into::into) }