//! Generic in-memory Merkle structure, parameterized by [`Family`]. //! //! Both MMR and MMB share the same node storage, pruning, root computation, and proof logic. //! This module provides the unified [`Mem`] struct; per-family modules re-export it as //! `mmr::mem::Mmr` and `mmb::mem::Mmb` via type aliases. use crate::merkle::{ batch, hasher::Hasher, proof as merkle_proof, Error, Family, Location, Position, Proof, Readable, }; use alloc::{ collections::{BTreeMap, VecDeque}, vec::Vec, }; use commonware_cryptography::Digest; use core::ops::Range; /// Configuration for initializing a [`Mem`]. pub struct Config { /// The retained nodes. pub nodes: Vec, /// The leaf location up to which pruning has been performed, or 0 if never pruned. pub pruning_boundary: Location, /// The pinned nodes, in the order expected by [`Family::nodes_to_pin`]. pub pinned_nodes: Vec, } /// A basic, `no_std`-compatible Merkle structure where all nodes are stored in-memory. /// /// Nodes are either _retained_, _pruned_, or _pinned_. Retained nodes are stored in the main /// deque. Pruned nodes precede `pruning_boundary` and are no longer stored unless they are still /// required for root computation or proof generation, in which case they are kept in /// `pinned_nodes`. /// /// The structure is always merkleized (its root is always computed). Mutations go through the /// batch API: create an [`UnmerkleizedBatch`](batch::UnmerkleizedBatch) via [`Self::new_batch`], /// accumulate changes, merkleize, then apply the result via [`Self::apply_batch`]. #[derive(Clone, Debug)] pub struct Mem { /// The retained nodes, starting at `pruning_boundary`. nodes: VecDeque, /// The highest position for which pruning has been performed, or 0 if never pruned. /// /// # Invariant /// /// This is always leaf-aligned (the position corresponding to some `Location`). pruning_boundary: Position, /// Auxiliary map from node position to the digest of any pinned node. pinned_nodes: BTreeMap, D>, /// The root digest. root: D, } impl Mem { /// Create a new, empty structure. pub fn new(hasher: &impl Hasher) -> Self { let root = hasher.root(Location::new(0), core::iter::empty::<&D>()); Self { nodes: VecDeque::new(), pruning_boundary: Position::new(0), pinned_nodes: BTreeMap::new(), root, } } /// Return a [`Mem`] initialized with the given `config`. /// /// # Errors /// /// Returns [`Error::InvalidPinnedNodes`] if the number of pinned nodes doesn't match the /// expected count for `config.pruning_boundary`. /// /// Returns [`Error::InvalidSize`] if the resulting size is invalid. pub fn init( config: Config, hasher: &impl Hasher, ) -> Result> { let pruning_boundary = Position::try_from(config.pruning_boundary)?; let Some(size) = pruning_boundary.checked_add(config.nodes.len() as u64) else { return Err(Error::InvalidSize(u64::MAX)); }; if !size.is_valid_size() { return Err(Error::InvalidSize(*size)); } let expected_pinned_positions: Vec<_> = F::nodes_to_pin(config.pruning_boundary).collect(); if config.pinned_nodes.len() != expected_pinned_positions.len() { return Err(Error::InvalidPinnedNodes); } let pinned_nodes = expected_pinned_positions .into_iter() .zip(config.pinned_nodes) .collect(); let nodes = VecDeque::from(config.nodes); let root = Self::compute_root(hasher, &nodes, &pinned_nodes, pruning_boundary); Ok(Self { nodes, pruning_boundary, pinned_nodes, root, }) } /// Re-initialize with the given nodes, pruning boundary, and pinned nodes. /// /// # Errors /// /// Returns [`Error::InvalidPinnedNodes`] if the provided pinned node count is invalid for the /// given state. /// /// Returns [`Error::LocationOverflow`] if `pruning_boundary` exceeds [`Family::MAX_LEAVES`]. pub fn from_components( hasher: &impl Hasher, nodes: Vec, pruning_boundary: Location, pinned_nodes: Vec, ) -> Result> { Self::init( Config { nodes, pruning_boundary, pinned_nodes, }, hasher, ) } /// Build a pruned structure that retains nodes above the prune boundary. /// /// Like `from_components` but also accepts retained nodes (stored in the /// `nodes` deque). Used by the grafted MMR which has no disk fallback. #[cfg(feature = "std")] pub(crate) fn from_pruned_with_retained( root: D, pruning_boundary: Position, pinned_nodes: BTreeMap, D>, retained_nodes: Vec, ) -> Self { Self { nodes: VecDeque::from(retained_nodes), pruning_boundary, pinned_nodes, root, } } /// Compute the root digest from the current peaks. pub(crate) fn compute_root( hasher: &impl Hasher, nodes: &VecDeque, pinned_nodes: &BTreeMap, D>, pruning_boundary: Position, ) -> D { let size = Position::new(nodes.len() as u64 + *pruning_boundary); let leaves = Location::try_from(size).expect("invalid merkle size"); let get_node = |pos: Position| -> &D { if pos < pruning_boundary { return pinned_nodes .get(&pos) .expect("requested node is pruned and not pinned"); } let index = (*pos - *pruning_boundary) as usize; &nodes[index] }; let peaks = F::peaks(size).map(|(p, _)| get_node(p)); hasher.root(leaves, peaks) } /// Return the total number of nodes, irrespective of any pruning. pub fn size(&self) -> Position { Position::new(self.nodes.len() as u64 + *self.pruning_boundary) } /// Return the total number of leaves. pub fn leaves(&self) -> Location { Location::try_from(self.size()).expect("invalid merkle size") } /// Returns `[start, end)` where `start` is the oldest retained leaf and `end` is the total /// leaf count. pub fn bounds(&self) -> Range> { Location::try_from(self.pruning_boundary).expect("valid pruning_boundary")..self.leaves() } /// Return a new iterator over the peaks. pub fn peak_iterator(&self) -> impl Iterator, u32)> { F::peaks(self.size()) } /// Get the root digest. pub const fn root(&self) -> &D { &self.root } /// Return the requested node if it is either retained or present in the pinned_nodes map, and /// panic otherwise. /// /// # Panics /// /// Panics if the requested node does not exist. pub(crate) fn get_node_unchecked(&self, pos: Position) -> &D { if pos < self.pruning_boundary { return self .pinned_nodes .get(&pos) .expect("requested node is pruned and not pinned"); } &self.nodes[self.pos_to_index(pos)] } /// Return the index of the element in the current nodes vector given its position. /// /// # Panics /// /// Panics if `pos` precedes the oldest retained position. fn pos_to_index(&self, pos: Position) -> usize { assert!( pos >= self.pruning_boundary, "pos precedes oldest retained position" ); (*pos - *self.pruning_boundary) as usize } /// Return the requested node or `None` if it is not stored. pub fn get_node(&self, pos: Position) -> Option { if pos < self.pruning_boundary { return self.pinned_nodes.get(&pos).copied(); } self.nodes.get(self.pos_to_index(pos)).copied() } /// Get the nodes (position + digest) that need to be pinned when pruned to `prune_loc`. pub(crate) fn nodes_to_pin(&self, prune_loc: Location) -> BTreeMap, D> { F::nodes_to_pin(prune_loc) .map(|pos| (pos, *self.get_node_unchecked(pos))) .collect() } /// Prune all nodes up to but not including the given leaf location, and pin the nodes still /// required for root computation and proof generation. /// /// # Errors /// /// Returns [`Error::LocationOverflow`] if `loc` exceeds [`Family::MAX_LEAVES`]. /// Returns [`Error::LeafOutOfBounds`] if `loc` exceeds the current leaf count. pub fn prune(&mut self, loc: Location) -> Result<(), Error> { if loc > self.leaves() { return Err(Error::LeafOutOfBounds(loc)); } let pos = Position::try_from(loc)?; if pos <= self.pruning_boundary { return Ok(()); } self.prune_to_loc(loc); Ok(()) } /// Prune all retained nodes. pub fn prune_all(&mut self) { if !self.nodes.is_empty() { self.prune_to_loc(self.leaves()); } } /// Location-based pruning. fn prune_to_loc(&mut self, loc: Location) { let pinned = self.nodes_to_pin(loc); let pos = Position::try_from(loc).expect("valid location"); let retained_nodes = self.pos_to_index(pos); self.pinned_nodes = pinned; self.nodes.drain(0..retained_nodes); self.pruning_boundary = pos; } /// Return an inclusion proof for the element at location `loc`. /// /// # Errors /// /// Returns [`Error::LocationOverflow`] if `loc` exceeds the valid range. /// Returns [`Error::LeafOutOfBounds`] if `loc` >= [`Self::leaves()`]. /// Returns [`Error::ElementPruned`] if a required node is missing. pub fn proof( &self, hasher: &impl Hasher, loc: Location, ) -> Result, Error> { if !loc.is_valid_index() { return Err(Error::LocationOverflow(loc)); } self.range_proof(hasher, loc..loc + 1).map_err(|e| match e { Error::RangeOutOfBounds(_) => Error::LeafOutOfBounds(loc), _ => e, }) } /// Return an inclusion proof for all elements within the provided `range` of locations. /// /// # Errors /// /// Returns [`Error::Empty`] if the range is empty. /// Returns [`Error::LocationOverflow`] if any location exceeds the valid range. /// Returns [`Error::RangeOutOfBounds`] if `range.end` > [`Self::leaves()`]. /// Returns [`Error::ElementPruned`] if a required node is missing. pub fn range_proof( &self, hasher: &impl Hasher, range: Range>, ) -> Result, Error> { merkle_proof::build_range_proof( hasher, self.leaves(), range, |pos| self.get_node(pos), Error::ElementPruned, ) } /// Get the digests of nodes that need to be pinned at the provided pruning boundary. #[cfg(test)] pub(crate) fn node_digests_to_pin(&self, prune_loc: Location) -> Vec { F::nodes_to_pin(prune_loc) .map(|pos| *self.get_node_unchecked(pos)) .collect() } /// Pin extra nodes. It's up to the caller to ensure this set is valid. #[cfg(any(feature = "std", test))] pub(crate) fn add_pinned_nodes(&mut self, pinned_nodes: BTreeMap, D>) { for (pos, node) in pinned_nodes { self.pinned_nodes.insert(pos, node); } } /// Truncate the structure to a smaller valid size, discarding all nodes beyond that size. /// Recomputes the root after truncation. #[cfg(feature = "std")] #[allow(dead_code)] pub(crate) fn truncate(&mut self, new_size: Position, hasher: &impl Hasher) { debug_assert!(new_size.is_valid_size()); debug_assert!(new_size >= self.pruning_boundary); let keep = (*new_size - *self.pruning_boundary) as usize; self.nodes.truncate(keep); self.root = Self::compute_root( hasher, &self.nodes, &self.pinned_nodes, self.pruning_boundary, ); } /// Return the nodes this structure currently has pinned. #[cfg(test)] pub(crate) fn pinned_nodes(&self) -> BTreeMap, D> { self.pinned_nodes.clone() } /// Create a new speculative batch with this structure as its parent. pub fn new_batch(&self) -> batch::UnmerkleizedBatch { let root = batch::MerkleizedBatch::from_mem(self); root.new_batch() } /// Apply a merkleized batch. Already-committed ancestors are skipped automatically. pub fn apply_batch(&mut self, batch: &batch::MerkleizedBatch) -> Result<(), Error> { let skip_ancestors = if self.size() == batch.base_size { false } else if self.size() > batch.base_size && self.size() < batch.size() { true } else if self.size() == batch.size() && batch.appended.is_empty() { // All ancestors committed and this batch has overwrites only (no appends). true } else { return Err(Error::StaleBatch { expected: batch.base_size, actual: self.size(), }); }; // Apply ancestor batches in root-to-tip order. Already-committed // batches (whose appended nodes are already in the Mem) are skipped // by tracking a running position through the ancestor chain. let mut batch_pos = *batch.base_size; for (appended, overwrites) in batch .ancestor_appended .iter() .zip(&batch.ancestor_overwrites) { batch_pos += appended.len() as u64; // Overwrite-only ancestors don't advance batch_pos, so they can't be // distinguished from their predecessor by size. Use strict < to // avoid skipping them at the boundary. Re-applying committed // overwrites is harmless (idempotent). let committed = if appended.is_empty() { skip_ancestors && batch_pos < *self.size() } else { skip_ancestors && batch_pos <= *self.size() }; if committed { continue; } for (&pos, &digest) in overwrites.iter() { if pos < self.pruning_boundary { continue; } let index = self.pos_to_index(pos); self.nodes[index] = digest; } for &digest in appended.iter() { self.nodes.push_back(digest); } } // Apply this batch's own data. for (&pos, &digest) in batch.overwrites.iter() { if skip_ancestors && pos < self.pruning_boundary { continue; } let index = self.pos_to_index(pos); self.nodes[index] = digest; } for &digest in batch.appended.iter() { self.nodes.push_back(digest); } // Detect missing ancestor data. If an uncommitted ancestor was dropped // before this batch was merkleized, its appended nodes are absent and the // Mem ends up smaller than expected. This does not catch dropped // overwrite-only ancestors (they don't change the size). if self.size() != batch.size() { return Err(Error::AncestorDropped { expected: batch.size(), actual: self.size(), }); } self.root = batch.root(); Ok(()) } } impl Readable for Mem { type Family = F; type Digest = D; type Error = Error; fn size(&self) -> Position { self.size() } fn get_node(&self, pos: Position) -> Option { self.get_node(pos) } fn root(&self) -> D { *self.root() } fn pruning_boundary(&self) -> Location { Location::try_from(self.pruning_boundary).expect("valid pruning_boundary") } fn proof( &self, hasher: &impl Hasher, loc: Location, ) -> Result, Error> { self.proof(hasher, loc) } fn range_proof( &self, hasher: &impl Hasher, range: Range>, ) -> Result, Error> { self.range_proof(hasher, range) } } #[cfg(test)] mod tests { use super::*; use crate::merkle::{hasher::Standard, Error, Location, Position}; use commonware_cryptography::{sha256, Sha256}; use commonware_runtime::{deterministic, Runner as _, ThreadPooler}; use commonware_utils::NZUsize; type D = sha256::Digest; type H = Standard; fn build(hasher: &H, n: u64) -> Mem { let mut mem = Mem::new(hasher); let batch = { let mut batch = mem.new_batch(); for i in 0u64..n { let element = hasher.digest(&i.to_be_bytes()); batch = batch.add(hasher, &element); } batch.merkleize(&mem, hasher) }; mem.apply_batch(&batch).unwrap(); mem } fn build_raw(hasher: &H, n: u64) -> Mem { let mut mem = Mem::new(hasher); let batch = { let mut batch = mem.new_batch(); for i in 0u64..n { batch = batch.add(hasher, &i.to_be_bytes()); } batch.merkleize(&mem, hasher) }; mem.apply_batch(&batch).unwrap(); mem } fn empty() { let hasher: H = Standard::new(); let mem = Mem::::new(&hasher); assert_eq!(*mem.leaves(), 0); assert_eq!(*mem.size(), 0); assert!(mem.bounds().is_empty()); } fn validity() { let executor = deterministic::Runner::default(); executor.start(|_| async move { let hasher: H = Standard::new(); let mut mem = Mem::::new(&hasher); for i in 0u64..256 { assert!( mem.size().is_valid_size(), "size should be valid at step {i}" ); let old_size = mem.size(); let batch = mem .new_batch() .add(&hasher, &i.to_be_bytes()) .merkleize(&mem, &hasher); mem.apply_batch(&batch).unwrap(); for size in *old_size + 1..*mem.size() { assert!( !Position::::new(size).is_valid_size(), "size {size} should not be valid" ); } } }); } fn prune_all_then_append() { let executor = deterministic::Runner::default(); executor.start(|_| async move { let hasher: H = Standard::new(); let mut mem = Mem::::new(&hasher); for i in 0u64..256 { mem.prune_all(); let batch = mem .new_batch() .add(&hasher, &i.to_be_bytes()) .merkleize(&mem, &hasher); mem.apply_batch(&batch).unwrap(); assert_eq!(*mem.leaves(), i + 1); } }); } fn range_proof_out_of_bounds() { let executor = deterministic::Runner::default(); executor.start(|_| async move { let hasher: H = Standard::new(); let mem = Mem::::new(&hasher); assert!(matches!( mem.range_proof(&hasher, Location::new(0)..Location::new(1)), Err(Error::RangeOutOfBounds(_)) )); let mem = build::(&hasher, 10); assert!(matches!( mem.range_proof(&hasher, Location::new(5)..Location::new(11)), Err(Error::RangeOutOfBounds(_)) )); assert!(mem .range_proof(&hasher, Location::new(5)..Location::new(10)) .is_ok()); }); } fn proof_out_of_bounds() { let executor = deterministic::Runner::default(); executor.start(|_| async move { let hasher: H = Standard::new(); let mem = Mem::::new(&hasher); assert!(matches!( mem.proof(&hasher, Location::new(0)), Err(Error::LeafOutOfBounds(_)) )); let mem = build::(&hasher, 10); assert!(matches!( mem.proof(&hasher, Location::new(10)), Err(Error::LeafOutOfBounds(_)) )); assert!(mem.proof(&hasher, Location::new(9)).is_ok()); }); } fn init_pinned_nodes_validation() { let executor = deterministic::Runner::default(); executor.start(|_| async move { let hasher: H = Standard::new(); assert!(Mem::::init( Config { nodes: vec![], pruning_boundary: Location::new(0), pinned_nodes: vec![], }, &hasher, ) .is_ok()); assert!(matches!( Mem::::init( Config { nodes: vec![], pruning_boundary: Location::new(8), pinned_nodes: vec![], }, &hasher, ), Err(Error::InvalidPinnedNodes) )); assert!(matches!( Mem::::init( Config { nodes: vec![], pruning_boundary: Location::new(0), pinned_nodes: vec![hasher.digest(b"dummy")], }, &hasher, ), Err(Error::InvalidPinnedNodes) )); let mem = build::(&hasher, 50); let prune_loc = Location::::new(25); let pinned_nodes = mem.node_digests_to_pin(prune_loc); assert!(Mem::::init( Config { nodes: vec![], pruning_boundary: prune_loc, pinned_nodes, }, &hasher, ) .is_ok()); }); } fn root_stable_under_pruning() { let executor = deterministic::Runner::default(); executor.start(|_| async move { let hasher: H = Standard::new(); let mut reference = Mem::::new(&hasher); let mut pruned = Mem::::new(&hasher); for i in 0u64..200 { let element = hasher.digest(&i.to_be_bytes()); let cs = reference .new_batch() .add(&hasher, &element) .merkleize(&reference, &hasher); reference.apply_batch(&cs).unwrap(); let cs = pruned .new_batch() .add(&hasher, &element) .merkleize(&pruned, &hasher); pruned.apply_batch(&cs).unwrap(); pruned.prune_all(); assert_eq!(pruned.root(), reference.root()); } }); } fn do_batch_update( hasher: &H, mut mem: Mem, pool: Option, ) { let element = D::from(*b"01234567012345670123456701234567"); let root = *mem.root(); let batch = { let mut batch = mem.new_batch(); if let Some(ref pool) = pool { batch = batch.with_pool(Some(pool.clone())); } for leaf in [0u64, 1, 10, 50, 100, 150, 197, 198] { batch = batch .update_leaf(hasher, Location::new(leaf), &element) .unwrap(); } batch.merkleize(&mem, hasher) }; mem.apply_batch(&batch).unwrap(); assert_ne!(*mem.root(), root); let batch = { let mut batch = mem.new_batch(); for leaf in [0u64, 1, 10, 50, 100, 150, 197, 198] { let element = hasher.digest(&leaf.to_be_bytes()); batch = batch .update_leaf(hasher, Location::new(leaf), &element) .unwrap(); } batch.merkleize(&mem, hasher) }; mem.apply_batch(&batch).unwrap(); assert_eq!(*mem.root(), root); } fn batch_update_leaf() { let executor = deterministic::Runner::default(); executor.start(|_| async move { let hasher: H = Standard::new(); let mem = build::(&hasher, 200); do_batch_update(&hasher, mem, None); }); } fn batch_parallel_update_leaf() { let executor = commonware_runtime::tokio::Runner::default(); executor.start(|ctx| async move { let hasher: H = Standard::new(); let mem = build::(&hasher, 200); let pool = ctx.create_thread_pool(NZUsize!(4)).unwrap(); do_batch_update(&hasher, mem, Some(pool)); }); } fn root_changes_with_each_append() { let hasher: H = Standard::new(); let mut mem = Mem::::new(&hasher); let mut prev_root = *mem.root(); for i in 0u64..16 { let batch = { let batch = mem.new_batch(); let batch = batch.add(&hasher, &i.to_be_bytes()); batch.merkleize(&mem, &hasher) }; mem.apply_batch(&batch).unwrap(); assert_ne!( *mem.root(), prev_root, "root should change after append {i}" ); prev_root = *mem.root(); } } fn single_element_proof_roundtrip() { let hasher: H = Standard::new(); let mem = build_raw::(&hasher, 16); let root = *mem.root(); for i in 0u64..16 { let proof = mem .proof(&hasher, Location::new(i)) .unwrap_or_else(|e| panic!("loc={i}: {e:?}")); assert!( proof.verify_element_inclusion(&hasher, &i.to_be_bytes(), Location::new(i), &root), "loc={i}: proof should verify" ); } } fn range_proof_roundtrip_exhaustive() { for n in 1u64..=24 { let hasher: H = Standard::new(); let mem = build_raw::(&hasher, n); let root = *mem.root(); for start in 0..n { for end in start + 1..=n { let range = Location::new(start)..Location::new(end); let proof = mem .range_proof(&hasher, range.clone()) .unwrap_or_else(|e| panic!("n={n}, range={start}..{end}: {e:?}")); let elements: Vec<_> = (start..end).map(|i| i.to_be_bytes()).collect(); assert!( proof.verify_range_inclusion(&hasher, &elements, range.start, &root), "n={n}, range={start}..{end}: range proof should verify" ); } } } } fn root_with_repeated_pruning() { let hasher: H = Standard::new(); let mut mem = build::(&hasher, 32); let root = *mem.root(); for prune_leaf in 1..*mem.leaves() { let prune_loc = Location::new(prune_leaf); mem.prune(prune_loc).unwrap(); assert_eq!( *mem.root(), root, "root changed after pruning to {prune_loc}" ); assert_eq!(mem.bounds().start, prune_loc); assert!( mem.proof(&hasher, prune_loc).is_ok(), "boundary leaf {prune_loc} should remain provable" ); assert!( mem.proof(&hasher, mem.leaves() - 1).is_ok(), "latest leaf should remain provable after pruning to {prune_loc}" ); } mem.prune_all(); assert_eq!(*mem.root(), root, "root changed after prune_all"); assert!(mem.bounds().is_empty(), "prune_all should retain no leaves"); } fn append_after_partial_prune() { let hasher: H = Standard::new(); let mut mem = build_raw::(&hasher, 20); mem.prune(Location::new(7)).unwrap(); let batch = { let mut batch = mem.new_batch(); for i in 20u64..48 { batch = batch.add(&hasher, &i.to_be_bytes()); } batch.merkleize(&mem, &hasher) }; mem.apply_batch(&batch).unwrap(); let root = *mem.root(); for loc in *mem.bounds().start..*mem.leaves() { let proof = mem .proof(&hasher, Location::new(loc)) .unwrap_or_else(|e| panic!("loc={loc}: {e:?}")); assert!( proof.verify_element_inclusion( &hasher, &loc.to_be_bytes(), Location::new(loc), &root ), "loc={loc}: proof should verify after append on pruned structure" ); } } fn update_leaf() { let hasher: H = Standard::new(); let mut mem = build_raw::(&hasher, 11); let root_before = *mem.root(); let batch = { let batch = mem.new_batch(); let batch = batch .update_leaf(&hasher, Location::new(5), b"updated-5") .unwrap(); batch.merkleize(&mem, &hasher) }; mem.apply_batch(&batch).unwrap(); assert_ne!(*mem.root(), root_before, "root should change after update"); assert_eq!(*mem.leaves(), 11); let proof = mem.proof(&hasher, Location::new(5)).unwrap(); assert!( proof.verify_element_inclusion(&hasher, b"updated-5", Location::new(5), mem.root()), "updated leaf should verify with new data" ); assert!( !proof.verify_element_inclusion( &hasher, &5u64.to_be_bytes(), Location::new(5), mem.root() ), "old data should not verify" ); for i in [0u64, 3, 7, 10] { let p = mem.proof(&hasher, Location::new(i)).unwrap(); assert!( p.verify_element_inclusion(&hasher, &i.to_be_bytes(), Location::new(i), mem.root()), "leaf {i} should still verify with original data" ); } } fn update_leaf_every_position() { let n = 20u64; let hasher: H = Standard::new(); let mut mem = build::(&hasher, n); for update_loc in 0..n { let batch = { let batch = mem.new_batch(); let batch = batch .update_leaf(&hasher, Location::new(update_loc), b"new-value") .unwrap(); batch.merkleize(&mem, &hasher) }; mem.apply_batch(&batch).unwrap(); let proof = mem.proof(&hasher, Location::new(update_loc)).unwrap(); assert!( proof.verify_element_inclusion( &hasher, b"new-value", Location::new(update_loc), mem.root() ), "update at {update_loc} should verify" ); } } fn update_leaf_errors() { let hasher: H = Standard::new(); let mut mem = build::(&hasher, 10); { let batch = mem.new_batch(); assert!(matches!( batch.update_leaf(&hasher, Location::new(10), b"x"), Err(Error::LeafOutOfBounds(_)) )); } mem.prune(Location::new(5)).unwrap(); { let batch = mem.new_batch(); assert!(matches!( batch.update_leaf(&hasher, Location::new(3), b"x"), Err(Error::ElementPruned(_)) )); let batch = mem.new_batch(); assert!(batch.update_leaf(&hasher, Location::new(5), b"x").is_ok()); } } fn update_leaf_with_append() { let hasher: H = Standard::new(); let mut mem = build::(&hasher, 8); let batch = { let batch = mem.new_batch(); let batch = batch .update_leaf(&hasher, Location::new(3), b"updated-3") .unwrap(); let batch = batch.add(&hasher, &100u64.to_be_bytes()); let batch = batch.add(&hasher, &101u64.to_be_bytes()); batch.merkleize(&mem, &hasher) }; mem.apply_batch(&batch).unwrap(); assert_eq!(*mem.leaves(), 10); let proof = mem.proof(&hasher, Location::new(3)).unwrap(); assert!(proof.verify_element_inclusion( &hasher, b"updated-3", Location::new(3), mem.root() )); let proof = mem.proof(&hasher, Location::new(8)).unwrap(); assert!(proof.verify_element_inclusion( &hasher, &100u64.to_be_bytes(), Location::new(8), mem.root() )); } fn update_leaf_under_merge_parent() { let hasher: H = Standard::new(); let mut mem = build::(&hasher, 2); let batch = { let batch = mem.new_batch(); let batch = batch.add(&hasher, &2u64.to_be_bytes()); let batch = batch .update_leaf(&hasher, Location::new(0), b"updated-0") .unwrap(); batch.merkleize(&mem, &hasher) }; mem.apply_batch(&batch).unwrap(); let ref_hasher: H = Standard::new(); let mut ref_mem = build::(&ref_hasher, 2); let cs = { let batch = ref_mem.new_batch(); let batch = batch.add(&ref_hasher, &2u64.to_be_bytes()); batch.merkleize(&ref_mem, &ref_hasher) }; ref_mem.apply_batch(&cs).unwrap(); let cs = { let batch = ref_mem.new_batch(); let batch = batch .update_leaf(&ref_hasher, Location::new(0), b"updated-0") .unwrap(); batch.merkleize(&ref_mem, &ref_hasher) }; ref_mem.apply_batch(&cs).unwrap(); assert_eq!(*mem.root(), *ref_mem.root(), "roots must match"); let proof = mem.proof(&hasher, Location::new(0)).unwrap(); assert!( proof.verify_element_inclusion(&hasher, b"updated-0", Location::new(0), mem.root()), "updated leaf should verify" ); } /// Prune to every valid boundary in structures of size 1..=max_n, then update_leaf + /// merkleize each retained leaf and verify its inclusion proof. This exercises the pinned /// nodes produced by `nodes_to_pin` under re-merkleization. fn update_leaf_after_prune() { let max_n = 20u64; let hasher: H = Standard::new(); for n in 1..=max_n { for prune_to in 1..n { let mut mem = build_raw::(&hasher, n); mem.prune(Location::new(prune_to)).unwrap(); for update_loc in prune_to..n { // Clone so each update starts from the same pruned state. let mut m = mem.clone(); let batch = { let batch = m.new_batch(); let batch = batch .update_leaf(&hasher, Location::new(update_loc), b"new") .unwrap(); batch.merkleize(&m, &hasher) }; m.apply_batch(&batch).unwrap(); let proof = m.proof(&hasher, Location::new(update_loc)).unwrap(); assert!( proof.verify_element_inclusion( &hasher, b"new", Location::new(update_loc), m.root() ), "n={n} prune={prune_to} update={update_loc}: proof should verify" ); } } } } /// Applying C (child of B, grandchild of A) after only A is applied /// must apply B's uncommitted data + C's data, skipping only A. fn apply_batch_skips_only_committed_ancestors() { let hasher: H = Standard::new(); let mut mem = Mem::::new(&hasher); // Chain: Mem -> A -> B -> C let a = mem.new_batch().add(&hasher, b"a").merkleize(&mem, &hasher); let b = a.new_batch().add(&hasher, b"b").merkleize(&mem, &hasher); let c = b.new_batch().add(&hasher, b"c").merkleize(&mem, &hasher); // Apply A, then apply C directly (skipping B's apply_batch). // C's ancestor batches carry [A.data, B.data]. A is already committed // so only B + C should be applied. mem.apply_batch(&a).unwrap(); mem.apply_batch(&c).unwrap(); // Verify against a reference that applied all three in order. let mut reference = Mem::::new(&hasher); let full = { let mut batch = reference.new_batch(); for leaf in [b"a".as_slice(), b"b", b"c"] { batch = batch.add(&hasher, leaf); } batch.merkleize(&reference, &hasher) }; reference.apply_batch(&full).unwrap(); assert_eq!(mem.root(), reference.root()); } /// Dropping an uncommitted ancestor before merkleizing a descendant must /// be detected at apply time, not silently corrupt data. fn apply_batch_detects_dropped_ancestor() { let hasher: H = Standard::new(); let mut mem = Mem::::new(&hasher); let a = mem.new_batch().add(&hasher, b"a").merkleize(&mem, &hasher); let b = a.new_batch().add(&hasher, b"b").merkleize(&mem, &hasher); drop(a); // A dropped before C is merkleized — its data is lost let c = b.new_batch().add(&hasher, b"c").merkleize(&mem, &hasher); let result = mem.apply_batch(&c); assert!( matches!(result, Err(Error::AncestorDropped { .. })), "expected AncestorDropped, got {result:?}" ); } /// Overwrite-only ancestor B must not be skipped when applying C after A. fn apply_batch_overwrite_only_ancestor() { let hasher: H = Standard::new(); let mut mem = build_raw::(&hasher, 10); let pos0 = Position::::try_from(Location::new(0)).unwrap(); // A: add 5 leaves. let a = { let mut b = mem.new_batch(); for i in 100u64..105 { b = b.add(&hasher, &i.to_be_bytes()); } b.merkleize(&mem, &hasher) }; // B: overwrite leaf 0, no appends. let b = a .new_batch() .update_leaf(&hasher, Location::new(0), b"updated-0") .unwrap() .merkleize(&mem, &hasher); // C: add 5 more leaves. let c = { let mut batch = b.new_batch(); for i in 200u64..205 { batch = batch.add(&hasher, &i.to_be_bytes()); } batch.merkleize(&mem, &hasher) }; // Apply A, then C (skipping B's apply_batch). mem.apply_batch(&a).unwrap(); mem.apply_batch(&c).unwrap(); // B's overwrite must have been applied. let updated = hasher.leaf_digest(pos0, b"updated-0"); assert_eq!( mem.get_node(pos0), Some(updated), "overwrite-only ancestor B's overwrites were skipped" ); } // --- MMR tests --- #[test] fn mmr_empty() { empty::(); } #[test] fn mmr_validity() { validity::(); } #[test] fn mmr_prune_all_then_append() { prune_all_then_append::(); } #[test] fn mmr_range_proof_oob() { range_proof_out_of_bounds::(); } #[test] fn mmr_proof_oob() { proof_out_of_bounds::(); } #[test] fn mmr_init_pinned_nodes() { init_pinned_nodes_validation::(); } #[test] fn mmr_root_stable_under_pruning() { root_stable_under_pruning::(); } #[test] fn mmr_batch_update_leaf() { batch_update_leaf::(); } #[test] fn mmr_batch_parallel_update_leaf() { batch_parallel_update_leaf::(); } #[test] fn mmr_root_changes_with_each_append() { root_changes_with_each_append::(); } #[test] fn mmr_single_element_proof_roundtrip() { single_element_proof_roundtrip::(); } #[test] fn mmr_range_proof_roundtrip_exhaustive() { range_proof_roundtrip_exhaustive::(); } #[test] fn mmr_root_with_repeated_pruning() { root_with_repeated_pruning::(); } #[test] fn mmr_append_after_partial_prune() { append_after_partial_prune::(); } #[test] fn mmr_update_leaf() { update_leaf::(); } #[test] fn mmr_update_leaf_every_position() { update_leaf_every_position::(); } #[test] fn mmr_update_leaf_errors() { update_leaf_errors::(); } #[test] fn mmr_update_leaf_with_append() { update_leaf_with_append::(); } #[test] fn mmr_update_leaf_under_merge_parent() { update_leaf_under_merge_parent::(); } #[test] fn mmr_update_leaf_after_prune() { update_leaf_after_prune::(); } #[test] fn mmr_apply_batch_skips_only_committed_ancestors() { apply_batch_skips_only_committed_ancestors::(); } #[test] fn mmr_apply_batch_detects_dropped_ancestor() { apply_batch_detects_dropped_ancestor::(); } #[test] fn mmr_apply_batch_overwrite_only_ancestor() { apply_batch_overwrite_only_ancestor::(); } // --- MMB tests --- #[test] fn mmb_empty() { empty::(); } #[test] fn mmb_validity() { validity::(); } #[test] fn mmb_prune_all_then_append() { prune_all_then_append::(); } #[test] fn mmb_range_proof_oob() { range_proof_out_of_bounds::(); } #[test] fn mmb_proof_oob() { proof_out_of_bounds::(); } #[test] fn mmb_init_pinned_nodes() { init_pinned_nodes_validation::(); } #[test] fn mmb_root_stable_under_pruning() { root_stable_under_pruning::(); } #[test] fn mmb_batch_update_leaf() { batch_update_leaf::(); } #[test] fn mmb_batch_parallel_update_leaf() { batch_parallel_update_leaf::(); } #[test] fn mmb_root_changes_with_each_append() { root_changes_with_each_append::(); } #[test] fn mmb_single_element_proof_roundtrip() { single_element_proof_roundtrip::(); } #[test] fn mmb_range_proof_roundtrip_exhaustive() { range_proof_roundtrip_exhaustive::(); } #[test] fn mmb_root_with_repeated_pruning() { root_with_repeated_pruning::(); } #[test] fn mmb_append_after_partial_prune() { append_after_partial_prune::(); } #[test] fn mmb_update_leaf() { update_leaf::(); } #[test] fn mmb_update_leaf_every_position() { update_leaf_every_position::(); } #[test] fn mmb_update_leaf_errors() { update_leaf_errors::(); } #[test] fn mmb_update_leaf_with_append() { update_leaf_with_append::(); } #[test] fn mmb_update_leaf_under_merge_parent() { update_leaf_under_merge_parent::(); } #[test] fn mmb_update_leaf_after_prune() { update_leaf_after_prune::(); } #[test] fn mmb_apply_batch_skips_only_committed_ancestors() { apply_batch_skips_only_committed_ancestors::(); } #[test] fn mmb_apply_batch_detects_dropped_ancestor() { apply_batch_detects_dropped_ancestor::(); } #[test] fn mmb_apply_batch_overwrite_only_ancestor() { apply_batch_overwrite_only_ancestor::(); } }