Skip to main content

sc_consensus_subspace/
aux_schema.rs

1//! Schema for Subspace block weight in the aux-db.
2
3use parity_scale_codec::{Decode, Encode};
4use sc_client_api::backend::AuxStore;
5use sp_blockchain::{Error as ClientError, Result as ClientResult};
6use sp_runtime::traits::{One, Saturating};
7use subspace_core_primitives::BlockForkWeight;
8
9fn load_decode<B, T>(backend: &B, key: &[u8]) -> ClientResult<Option<T>>
10where
11    B: AuxStore,
12    T: Decode,
13{
14    match backend.get_aux(key)? {
15        Some(t) => T::decode(&mut &t[..])
16            .map(Some)
17            .map_err(|e: parity_scale_codec::Error| {
18                ClientError::Backend(format!("Subspace DB is corrupted. Decode error: {e}"))
19            }),
20        None => Ok(None),
21    }
22}
23
24/// The aux storage key used to store the block weight of the given block hash.
25pub(crate) fn block_weight_key<H: Encode>(block_hash: H) -> Vec<u8> {
26    (b"block_weight", block_hash).encode()
27}
28
29/// The aux storage key recording how far the canonical-history sweep has progressed.
30fn block_weight_cleaned_to_key() -> Vec<u8> {
31    b"block_weight_cleaned_to".to_vec()
32}
33
34/// Write the cumulative chain-weight of a block to aux storage.
35pub(crate) fn write_block_weight<H, F, R>(
36    block_hash: H,
37    block_weight: BlockForkWeight,
38    write_aux: F,
39) -> R
40where
41    H: Encode,
42    F: FnOnce(&[(Vec<u8>, &[u8])]) -> R,
43{
44    let key = block_weight_key(block_hash);
45    block_weight.using_encoded(|s| write_aux(&[(key, s)]))
46}
47
48/// Load the cumulative chain-weight associated with a block.
49pub(crate) fn load_block_weight<H: Encode, B: AuxStore>(
50    backend: &B,
51    block_hash: H,
52) -> ClientResult<Option<BlockForkWeight>> {
53    load_decode(backend, block_weight_key(block_hash).as_slice())
54}
55
56/// Load the canonical-history sweep marker. `None` on a fresh DB.
57pub(crate) fn load_block_weight_cleaned_to<N: Decode, B: AuxStore>(
58    backend: &B,
59) -> ClientResult<Option<N>> {
60    load_decode(backend, block_weight_cleaned_to_key().as_slice())
61}
62
63/// Build the `(key, encoded_value)` pair recording an updated sweep marker.
64pub(crate) fn block_weight_cleaned_to_aux_entry<N: Encode>(cleaned_to: N) -> (Vec<u8>, Vec<u8>) {
65    (block_weight_cleaned_to_key(), cleaned_to.encode())
66}
67
68/// `(key, Some(value))` for a write, `(key, None)` for a delete — the shape
69/// `BlockImportParams::auxiliary` expects.
70pub(crate) type SweepAuxEntry = (Vec<u8>, Option<Vec<u8>>);
71
72/// Advance the canonical-history sweep one batch.
73///
74/// Walks `(cleaned_to, min(cleaned_to + batch, finalized - 1)]` and emits a
75/// tombstone per canonical block plus a marker entry for the highest height
76/// actually swept. Stops one short of `finalized` so the just-finalized
77/// block's weight stays available as `parent_weight` for any fork imported
78/// on top of it. Stops on the first `Ok(None)` (snap-sync gap) so the marker
79/// doesn't advance past entries that arrive later via gap-fill.
80///
81/// Callers MUST write the result as one atomic aux batch — splitting would
82/// re-introduce the leak.
83pub(crate) fn build_canonical_sweep_entries<N, H, F>(
84    cleaned_to: N,
85    finalized: N,
86    batch: N,
87    mut height_to_hash: F,
88) -> ClientResult<Vec<SweepAuxEntry>>
89where
90    N: Copy + Ord + Saturating + One + Encode,
91    H: Encode,
92    F: FnMut(N) -> ClientResult<Option<H>>,
93{
94    let one = N::one();
95    let end = cleaned_to
96        .saturating_add(batch)
97        .min(finalized.saturating_sub(one));
98    let mut height = cleaned_to.saturating_add(one);
99    let mut last_swept = cleaned_to;
100    let mut out = Vec::new();
101
102    while height <= end {
103        let Some(hash) = height_to_hash(height)? else {
104            break;
105        };
106        out.push((block_weight_key(hash), None));
107        last_swept = height;
108        // `saturating_add(one)` at `N::MAX` would loop forever; break first.
109        if height == end {
110            break;
111        }
112        height = height.saturating_add(one);
113    }
114
115    if last_swept > cleaned_to {
116        let (k, v) = block_weight_cleaned_to_aux_entry(last_swept);
117        out.push((k, Some(v)));
118    }
119    Ok(out)
120}
121
122#[cfg(test)]
123mod tests {
124    use super::*;
125    use parking_lot::RwLock;
126    use std::collections::HashMap;
127
128    #[derive(Default)]
129    struct MemAuxStore {
130        store: RwLock<HashMap<Vec<u8>, Vec<u8>>>,
131    }
132
133    impl AuxStore for MemAuxStore {
134        fn insert_aux<
135            'a,
136            'b: 'a,
137            'c: 'a,
138            I: IntoIterator<Item = &'a (&'c [u8], &'c [u8])>,
139            D: IntoIterator<Item = &'a &'b [u8]>,
140        >(
141            &self,
142            insert: I,
143            delete: D,
144        ) -> sp_blockchain::Result<()> {
145            let mut storage = self.store.write();
146            for (k, v) in insert {
147                storage.insert(k.to_vec(), v.to_vec());
148            }
149            for k in delete {
150                storage.remove(*k);
151            }
152            Ok(())
153        }
154
155        fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result<Option<Vec<u8>>> {
156            Ok(self.store.read().get(key).cloned())
157        }
158    }
159
160    fn seed_weight(store: &MemAuxStore, hash: u64, weight: BlockForkWeight) {
161        write_block_weight(hash, weight, |values| {
162            let pairs: Vec<(&[u8], &[u8])> =
163                values.iter().map(|(k, v)| (k.as_slice(), *v)).collect();
164            store.insert_aux(&pairs, &[]).unwrap();
165        });
166    }
167
168    #[test]
169    fn block_weight_key_roundtrips_through_write_and_load() {
170        let store = MemAuxStore::default();
171        let h: u64 = 0x1234;
172        seed_weight(&store, h, 42);
173        assert!(load_block_weight(&store, h).unwrap().is_some());
174
175        let key = block_weight_key(h);
176        let key_refs: &[&[u8]] = &[key.as_slice()];
177        let empty: &[(&[u8], &[u8])] = &[];
178        store.insert_aux(empty, key_refs).unwrap();
179
180        assert!(load_block_weight(&store, h).unwrap().is_none());
181    }
182
183    #[test]
184    fn cleaned_to_marker_roundtrips() {
185        let store = MemAuxStore::default();
186
187        let initial: Option<u32> = load_block_weight_cleaned_to(&store).unwrap();
188        assert!(initial.is_none());
189
190        let (key, value) = block_weight_cleaned_to_aux_entry::<u32>(12_345);
191        let pairs: &[(&[u8], &[u8])] = &[(key.as_slice(), value.as_slice())];
192        store.insert_aux(pairs, &[]).unwrap();
193
194        let read: Option<u32> = load_block_weight_cleaned_to(&store).unwrap();
195        assert_eq!(read, Some(12_345));
196    }
197
198    fn fake_hash(n: u32) -> u64 {
199        0xaa00_0000_0000_0000_u64 | u64::from(n)
200    }
201
202    #[test]
203    fn sweep_no_op_when_nothing_to_clean() {
204        let entries =
205            build_canonical_sweep_entries::<u32, u64, _>(10, 10, 100, |_| Ok(Some(0))).unwrap();
206        assert!(
207            entries.is_empty(),
208            "finalized == cleaned_to should produce no entries"
209        );
210    }
211
212    #[test]
213    fn sweep_walks_up_to_finalized_minus_one_when_under_batch() {
214        // finalized=6 → sweep stops at 5, leaving block 6's weight intact.
215        let entries =
216            build_canonical_sweep_entries::<u32, u64, _>(0, 6, 100, |n| Ok(Some(fake_hash(n))))
217                .unwrap();
218        // 5 tombstones (heights 1..=5) + 1 marker
219        assert_eq!(entries.len(), 6);
220        let expected_keys: Vec<Vec<u8>> = (1..=5).map(|h| block_weight_key(fake_hash(h))).collect();
221        for (i, tomb) in entries[..5].iter().enumerate() {
222            assert_eq!(
223                tomb.0, expected_keys[i],
224                "tombstone key mismatch at index {i}"
225            );
226            assert!(tomb.1.is_none());
227        }
228        // Finalized block (height 6) must NOT appear in the tombstones — its
229        // weight is still needed as parent_weight for forks built on it.
230        let finalized_key = block_weight_key(fake_hash(6));
231        for tomb in &entries[..5] {
232            assert_ne!(
233                tomb.0, finalized_key,
234                "finalized block must not be tombstoned"
235            );
236        }
237        let (marker_key, marker_value) = block_weight_cleaned_to_aux_entry::<u32>(5);
238        assert_eq!(entries[5].0, marker_key);
239        assert_eq!(entries[5].1.as_deref(), Some(marker_value.as_slice()));
240    }
241
242    #[test]
243    fn sweep_no_op_when_marker_exceeds_finalized() {
244        let entries = build_canonical_sweep_entries::<u32, u64, _>(50, 30, 100, |_| {
245            panic!("closure must not be called when finalized < cleaned_to")
246        })
247        .unwrap();
248        assert!(entries.is_empty());
249    }
250
251    #[test]
252    fn sweep_no_op_when_batch_is_zero() {
253        let entries = build_canonical_sweep_entries::<u32, u64, _>(0, 5, 0, |_| {
254            panic!("closure must not be called when batch == 0")
255        })
256        .unwrap();
257        assert!(entries.is_empty());
258    }
259
260    #[test]
261    fn sweep_propagates_height_to_hash_error() {
262        let result = build_canonical_sweep_entries::<u32, u64, _>(0, 5, 100, |n| {
263            if n == 3 {
264                Err(sp_blockchain::Error::Backend("simulated".into()))
265            } else {
266                Ok(Some(fake_hash(n)))
267            }
268        });
269        assert!(matches!(result, Err(sp_blockchain::Error::Backend(_))));
270    }
271
272    #[test]
273    fn sweep_caps_at_batch_size() {
274        let entries = build_canonical_sweep_entries::<u32, u64, _>(0, 1_000_000, 100, |n| {
275            Ok(Some(fake_hash(n)))
276        })
277        .unwrap();
278        // 100 tombstones + 1 marker
279        assert_eq!(entries.len(), 101);
280        let (marker_key, marker_value) = block_weight_cleaned_to_aux_entry::<u32>(100);
281        assert_eq!(entries[100].0, marker_key);
282        assert_eq!(entries[100].1.as_deref(), Some(marker_value.as_slice()));
283    }
284
285    #[test]
286    fn sweep_stops_at_first_missing_height_and_marker_reflects_last_swept() {
287        // Heights 2 and 4 missing → stop at 2, marker stays at 1 so 2..=5 retry next sweep.
288        let entries = build_canonical_sweep_entries::<u32, u64, _>(0, 6, 100, |n| {
289            if n == 2 || n == 4 {
290                Ok(None)
291            } else {
292                Ok(Some(fake_hash(n)))
293            }
294        })
295        .unwrap();
296        assert_eq!(entries.len(), 2);
297        let (marker_key, marker_value) = block_weight_cleaned_to_aux_entry::<u32>(1);
298        assert_eq!(entries[1].0, marker_key);
299        assert_eq!(entries[1].1.as_deref(), Some(marker_value.as_slice()));
300    }
301
302    #[test]
303    fn sweep_emits_no_marker_when_first_height_is_missing() {
304        let entries =
305            build_canonical_sweep_entries::<u32, u64, _>(0, 6, 100, |_| Ok(None)).unwrap();
306        assert!(entries.is_empty());
307    }
308
309    #[test]
310    fn sweep_resumes_from_marker_across_calls() {
311        let mut cleaned_to: u32 = 0;
312        let finalized: u32 = 251;
313
314        for expected_marker in [100u32, 200, 250] {
315            let entries =
316                build_canonical_sweep_entries::<u32, u64, _>(cleaned_to, finalized, 100, |n| {
317                    Ok(Some(fake_hash(n)))
318                })
319                .unwrap();
320            let (marker_key, marker_value) =
321                block_weight_cleaned_to_aux_entry::<u32>(expected_marker);
322            assert_eq!(entries.last().unwrap().0, marker_key);
323            assert_eq!(
324                entries.last().unwrap().1.as_deref(),
325                Some(marker_value.as_slice())
326            );
327            cleaned_to = expected_marker;
328        }
329
330        let entries =
331            build_canonical_sweep_entries::<u32, u64, _>(cleaned_to, finalized, 100, |n| {
332                Ok(Some(fake_hash(n)))
333            })
334            .unwrap();
335        assert!(entries.is_empty());
336    }
337
338    #[test]
339    fn sweep_terminates_at_numeric_max_boundary() {
340        // cleaned_to = MAX-6 → tombstones MAX-5..=MAX-1, marker = MAX-1.
341        // (Finalized block at MAX itself is preserved.)
342        let cleaned_to = u32::MAX - 6;
343        let entries =
344            build_canonical_sweep_entries::<u32, u64, _>(cleaned_to, u32::MAX, 100, |n| {
345                Ok(Some(fake_hash(n)))
346            })
347            .unwrap();
348        // 5 tombstones + 1 marker
349        assert_eq!(entries.len(), 6);
350        let (marker_key, marker_value) = block_weight_cleaned_to_aux_entry::<u32>(u32::MAX - 1);
351        assert_eq!(entries[5].0, marker_key);
352        assert_eq!(entries[5].1.as_deref(), Some(marker_value.as_slice()));
353    }
354
355    #[test]
356    fn sweep_is_no_op_when_cleaned_to_and_finalized_both_at_max() {
357        // Without the `finalized - 1` upper bound, this would tombstone N::MAX
358        // forever (marker never advances because last_swept == cleaned_to).
359        let entries = build_canonical_sweep_entries::<u32, u64, _>(u32::MAX, u32::MAX, 100, |_| {
360            panic!("closure must not be called")
361        })
362        .unwrap();
363        assert!(entries.is_empty());
364    }
365}