sp_domains/
valued_trie.rs

1#[cfg(not(feature = "std"))]
2extern crate alloc;
3
4#[cfg(not(feature = "std"))]
5use alloc::vec::Vec;
6use hash_db::Hasher;
7use parity_scale_codec::{Compact, Encode};
8use sp_std::cmp::max;
9use trie_db::node::Value;
10use trie_db::{
11    nibble_ops, ChildReference, NibbleSlice, NodeCodec, ProcessEncodedNode, TrieHash, TrieLayout,
12    TrieRoot,
13};
14
15macro_rules! exponential_out {
16	(@3, [$($inpp:expr),*]) => { exponential_out!(@2, [$($inpp,)* $($inpp),*]) };
17	(@2, [$($inpp:expr),*]) => { exponential_out!(@1, [$($inpp,)* $($inpp),*]) };
18	(@1, [$($inpp:expr),*]) => { [$($inpp,)* $($inpp),*] };
19}
20
21type CacheNode<HO> = Option<ChildReference<HO>>;
22
23#[inline(always)]
24fn new_vec_slice_buffer<HO>() -> [CacheNode<HO>; 16] {
25    exponential_out!(@3, [None, None])
26}
27
28type ArrayNode<T> = [CacheNode<TrieHash<T>>; 16];
29
30/// This is a modified version of trie root that takes trie node values instead of deriving from
31/// the actual data. Taken from `trie-db` as is.
32///
33/// Note: This is an opportunity to push this change upstream but I'm not sure how to present these
34/// changes yet. Need to be discussed further.
35pub fn valued_ordered_trie_root<Layout>(
36    input: Vec<Value>,
37) -> <<Layout as TrieLayout>::Hash as Hasher>::Out
38where
39    Layout: TrieLayout,
40{
41    let input = input
42        .into_iter()
43        .enumerate()
44        .map(|(i, v)| (Compact(i as u32).encode(), v))
45        .collect();
46
47    let mut cb = TrieRoot::<Layout>::default();
48    trie_visit::<Layout, _>(input, &mut cb);
49    cb.root.unwrap_or_default()
50}
51
52fn trie_visit<T, F>(input: Vec<(Vec<u8>, Value)>, callback: &mut F)
53where
54    T: TrieLayout,
55    F: ProcessEncodedNode<TrieHash<T>>,
56{
57    let mut depth_queue = CacheAccum::<T>::new();
58    // compare iter ordering
59    let mut iter_input = input.into_iter();
60    if let Some(mut previous_value) = iter_input.next() {
61        // depth of last item
62        let mut last_depth = 0;
63
64        let mut single = true;
65        for (k, v) in iter_input {
66            single = false;
67            let common_depth = nibble_ops::biggest_depth(&previous_value.0, &k);
68            // 0 is a reserved value: could use option
69            let depth_item = common_depth;
70            if common_depth == previous_value.0.len() * nibble_ops::NIBBLE_PER_BYTE {
71                // the new key include the previous one: branch value case
72                // just stored value at branch depth
73                depth_queue.set_cache_value(common_depth, Some(previous_value.1));
74            } else if depth_item >= last_depth {
75                // put previous with next (common branch previous value can be flush)
76                depth_queue.flush_value(callback, depth_item, &previous_value);
77            } else if depth_item < last_depth {
78                // do not put with next, previous is last of a branch
79                depth_queue.flush_value(callback, last_depth, &previous_value);
80                let ref_branches = previous_value.0;
81                depth_queue.flush_branch(callback, ref_branches, depth_item, false);
82            }
83
84            previous_value = (k, v);
85            last_depth = depth_item;
86        }
87        // last pendings
88        if single {
89            // one single element corner case
90            let (k2, v2) = previous_value;
91            let nkey = NibbleSlice::new_offset(&k2, last_depth);
92            let pr =
93                NibbleSlice::new_offset(&k2, k2.len() * nibble_ops::NIBBLE_PER_BYTE - nkey.len());
94
95            let encoded = T::Codec::leaf_node(nkey.right_iter(), nkey.len(), v2);
96            callback.process(pr.left(), encoded, true);
97        } else {
98            depth_queue.flush_value(callback, last_depth, &previous_value);
99            let ref_branches = previous_value.0;
100            depth_queue.flush_branch(callback, ref_branches, 0, true);
101        }
102    } else {
103        // nothing null root corner case
104        callback.process(hash_db::EMPTY_PREFIX, T::Codec::empty_node().to_vec(), true);
105    }
106}
107
108struct CacheAccum<'a, T: TrieLayout>(Vec<(ArrayNode<T>, Option<Value<'a>>, usize)>);
109
110/// Initially allocated cache depth.
111const INITIAL_DEPTH: usize = 10;
112
113impl<'a, T> CacheAccum<'a, T>
114where
115    T: TrieLayout,
116{
117    fn new() -> Self {
118        let v = Vec::with_capacity(INITIAL_DEPTH);
119        CacheAccum(v)
120    }
121
122    #[inline(always)]
123    fn set_cache_value(&mut self, depth: usize, value: Option<Value<'a>>) {
124        if self.0.is_empty() || self.0[self.0.len() - 1].2 < depth {
125            self.0.push((new_vec_slice_buffer(), None, depth));
126        }
127        let last = self.0.len() - 1;
128        debug_assert!(self.0[last].2 <= depth);
129        self.0[last].1 = value;
130    }
131
132    #[inline(always)]
133    fn set_node(&mut self, depth: usize, nibble_index: usize, node: CacheNode<TrieHash<T>>) {
134        if self.0.is_empty() || self.0[self.0.len() - 1].2 < depth {
135            self.0.push((new_vec_slice_buffer(), None, depth));
136        }
137
138        let last = self.0.len() - 1;
139        debug_assert!(self.0[last].2 == depth);
140
141        self.0[last].0.as_mut()[nibble_index] = node;
142    }
143
144    #[inline(always)]
145    fn last_depth(&self) -> usize {
146        let ix = self.0.len();
147        if ix > 0 {
148            let last = ix - 1;
149            self.0[last].2
150        } else {
151            0
152        }
153    }
154
155    #[inline(always)]
156    fn last_last_depth(&self) -> usize {
157        let ix = self.0.len();
158        if ix > 1 {
159            let last = ix - 2;
160            self.0[last].2
161        } else {
162            0
163        }
164    }
165
166    #[inline(always)]
167    fn is_empty(&self) -> bool {
168        self.0.is_empty()
169    }
170    #[inline(always)]
171    fn is_one(&self) -> bool {
172        self.0.len() == 1
173    }
174
175    fn flush_value(
176        &mut self,
177        callback: &mut impl ProcessEncodedNode<TrieHash<T>>,
178        target_depth: usize,
179        (k2, v2): &(impl AsRef<[u8]>, Value),
180    ) {
181        let nibble_value = nibble_ops::left_nibble_at(k2.as_ref(), target_depth);
182        // is it a branch value (two candidate same ix)
183        let nkey = NibbleSlice::new_offset(k2.as_ref(), target_depth + 1);
184        let pr = NibbleSlice::new_offset(
185            k2.as_ref(),
186            k2.as_ref().len() * nibble_ops::NIBBLE_PER_BYTE - nkey.len(),
187        );
188
189        let encoded = T::Codec::leaf_node(nkey.right_iter(), nkey.len(), v2.clone());
190        let hash = callback.process(pr.left(), encoded, false);
191
192        // insert hash in branch (first level branch only at this point)
193        self.set_node(target_depth, nibble_value as usize, Some(hash));
194    }
195
196    fn flush_branch(
197        &mut self,
198        callback: &mut impl ProcessEncodedNode<TrieHash<T>>,
199        ref_branch: impl AsRef<[u8]> + Ord,
200        new_depth: usize,
201        is_last: bool,
202    ) {
203        while self.last_depth() > new_depth || is_last && !self.is_empty() {
204            let lix = self.last_depth();
205            let llix = max(self.last_last_depth(), new_depth);
206
207            let (offset, slice_size, is_root) = if llix == 0 && is_last && self.is_one() {
208                // branch root
209                (llix, lix - llix, true)
210            } else {
211                (llix + 1, lix - llix - 1, false)
212            };
213            let nkey = if slice_size > 0 {
214                Some((offset, slice_size))
215            } else {
216                None
217            };
218
219            let h = self.no_extension(ref_branch.as_ref(), callback, lix, is_root, nkey);
220            if !is_root {
221                // put hash in parent
222                let nibble: u8 = nibble_ops::left_nibble_at(ref_branch.as_ref(), llix);
223                self.set_node(llix, nibble as usize, Some(h));
224            }
225        }
226    }
227
228    #[inline(always)]
229    fn no_extension(
230        &mut self,
231        key_branch: &[u8],
232        callback: &mut impl ProcessEncodedNode<TrieHash<T>>,
233        branch_d: usize,
234        is_root: bool,
235        nkey: Option<(usize, usize)>,
236    ) -> ChildReference<TrieHash<T>> {
237        let (children, v, depth) = self.0.pop().expect("checked");
238
239        debug_assert!(branch_d == depth);
240        // encode branch
241        let nkeyix = nkey.unwrap_or((branch_d, 0));
242        let pr = NibbleSlice::new_offset(key_branch, nkeyix.0);
243        let encoded = T::Codec::branch_node_nibbled(
244            pr.right_range_iter(nkeyix.1),
245            nkeyix.1,
246            children.iter(),
247            v,
248        );
249        callback.process(pr.left(), encoded, is_root)
250    }
251}
252
253#[cfg(test)]
254mod test {
255    use crate::proof_provider_and_verifier::{
256        StorageProofProvider, StorageProofVerifier, VerificationError,
257    };
258    use crate::valued_trie::valued_ordered_trie_root;
259    use frame_support::assert_err;
260    use parity_scale_codec::{Compact, Encode};
261    use rand::rngs::StdRng;
262    use rand::{Rng, SeedableRng};
263    use sp_core::storage::StorageKey;
264    use sp_core::H256;
265    use sp_runtime::traits::{BlakeTwo256, Hash};
266    use sp_trie::{LayoutV1, StorageProof};
267    use trie_db::node::Value;
268
269    #[test]
270    fn test_extrinsics_root() {
271        let mut rng = StdRng::seed_from_u64(10000);
272        let exts_length = vec![35, 31, 50, 100, 20, 10, 120];
273        let mut exts = Vec::new();
274        let mut exts_hashed = Vec::new();
275        for ext_length in &exts_length {
276            let mut ext = vec![0u8; *ext_length];
277            rng.fill(ext.as_mut_slice());
278            let hashed = if *ext_length <= 32 {
279                ext.clone()
280            } else {
281                BlakeTwo256::hash(&ext).0.to_vec()
282            };
283            exts.push(ext);
284            exts_hashed.push(hashed);
285        }
286
287        let exts_values: Vec<_> = exts_hashed
288            .iter()
289            .zip(exts_length)
290            .map(|(ext_hashed, ext_length)| {
291                let value = if ext_length <= 32 {
292                    Value::Inline(ext_hashed)
293                } else {
294                    Value::Node(ext_hashed)
295                };
296                value
297            })
298            .collect();
299
300        let root = BlakeTwo256::ordered_trie_root(exts.clone(), sp_core::storage::StateVersion::V1);
301        let got_root = valued_ordered_trie_root::<LayoutV1<BlakeTwo256>>(exts_values);
302        assert_eq!(root, got_root);
303
304        for (i, ext) in exts.clone().into_iter().enumerate() {
305            // Generate a proof-of-inclusion and verify it with the above `root`
306            let storage_key = StorageKey(Compact(i as u32).encode());
307            let storage_proof =
308                StorageProofProvider::<LayoutV1<BlakeTwo256>>::generate_enumerated_proof_of_inclusion(
309                    &exts,
310                    i as u32,
311                )
312                    .unwrap();
313
314            assert_eq!(
315                StorageProofVerifier::<BlakeTwo256>::get_bare_value(
316                    &root,
317                    storage_proof.clone(),
318                    storage_key.clone(),
319                )
320                .unwrap(),
321                ext.clone()
322            );
323
324            // Verifying the proof with a wrong root/key will fail
325            assert!(StorageProofVerifier::<BlakeTwo256>::get_bare_value(
326                &H256::random(),
327                storage_proof.clone(),
328                storage_key.clone(),
329            )
330            .is_err());
331
332            let storage_key = StorageKey(Compact(i as u32 + 1).encode());
333            let result = StorageProofVerifier::<BlakeTwo256>::get_bare_value(
334                &root,
335                storage_proof,
336                storage_key,
337            );
338
339            // there is a possibility that wrong key ends up being a different leaf in the merkle tree
340            // but the data that key holds is neither valid extrinsic nor the one we expect.
341            if let Ok(data) = result {
342                assert_ne!(data, ext.clone())
343            }
344        }
345
346        // fails to generate storage key for unknown index
347        assert!(
348            StorageProofProvider::<LayoutV1<BlakeTwo256>>::generate_enumerated_proof_of_inclusion(
349                &exts, 100,
350            )
351            .is_none()
352        );
353    }
354
355    fn craft_valid_storage_proof_with_multiple_keys() -> (sp_core::H256, StorageProof) {
356        use sp_state_machine::backend::Backend;
357        use sp_state_machine::{prove_read, InMemoryBackend};
358
359        let state_version = sp_runtime::StateVersion::V1;
360
361        // construct storage proof
362        let backend = <InMemoryBackend<sp_core::Blake2Hasher>>::from((
363            vec![
364                (None, vec![(b"key1".to_vec(), Some(b"value1".to_vec()))]),
365                (None, vec![(b"key2".to_vec(), Some(b"value2".to_vec()))]),
366                (None, vec![(b"key3".to_vec(), Some(b"value3".to_vec()))]),
367                (
368                    None,
369                    vec![(b"key4".to_vec(), Some((42u64, 42u32, 42u16, 42u8).encode()))],
370                ),
371                // Value is too big to fit in a branch node
372                (None, vec![(b"key11".to_vec(), Some(vec![0u8; 32]))]),
373            ],
374            state_version,
375        ));
376        let root = backend.storage_root(std::iter::empty(), state_version).0;
377        let proof = prove_read(
378            backend,
379            &[&b"key1"[..], &b"key2"[..], &b"key4"[..], &b"key22"[..]],
380        )
381        .unwrap();
382
383        (root, proof)
384    }
385
386    #[test]
387    fn test_storage_proof_with_unused_nodes() {
388        let (root, storage_proof) = craft_valid_storage_proof_with_multiple_keys();
389        // Verifying the proof with unused nodes should fail
390        assert_err!(
391            StorageProofVerifier::<BlakeTwo256>::get_bare_value(
392                &root,
393                storage_proof,
394                StorageKey(b"key2".to_vec()),
395            ),
396            VerificationError::UnusedNodesInTheProof
397        );
398    }
399}