subspace_core_primitives/
segments.rs

1//! Segments-related data structures.
2
3#[cfg(not(feature = "std"))]
4extern crate alloc;
5
6use crate::BlockNumber;
7use crate::hashes::{Blake3Hash, blake3_hash};
8use crate::pieces::{FlatPieces, Piece, PieceIndex, RawRecord};
9#[cfg(not(feature = "std"))]
10use alloc::boxed::Box;
11use core::array::TryFromSliceError;
12use core::fmt;
13use core::iter::Step;
14use core::num::NonZeroU64;
15use derive_more::{
16    Add, AddAssign, Deref, DerefMut, Display, Div, DivAssign, From, Into, Mul, MulAssign, Sub,
17    SubAssign,
18};
19use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
20use scale_info::TypeInfo;
21#[cfg(feature = "serde")]
22use serde::{Deserialize, Serialize};
23#[cfg(feature = "serde")]
24use serde::{Deserializer, Serializer};
25#[cfg(feature = "serde")]
26use serde_big_array::BigArray;
27
28/// Segment index type.
29#[derive(
30    Debug,
31    Display,
32    Default,
33    Copy,
34    Clone,
35    Ord,
36    PartialOrd,
37    Eq,
38    PartialEq,
39    Hash,
40    From,
41    Into,
42    Encode,
43    Decode,
44    Add,
45    AddAssign,
46    Sub,
47    SubAssign,
48    Mul,
49    MulAssign,
50    Div,
51    DivAssign,
52    TypeInfo,
53    MaxEncodedLen,
54)]
55#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
56#[repr(transparent)]
57pub struct SegmentIndex(u64);
58
59impl Step for SegmentIndex {
60    #[inline]
61    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
62        u64::steps_between(&start.0, &end.0)
63    }
64
65    #[inline]
66    fn forward_checked(start: Self, count: usize) -> Option<Self> {
67        u64::forward_checked(start.0, count).map(Self)
68    }
69
70    #[inline]
71    fn backward_checked(start: Self, count: usize) -> Option<Self> {
72        u64::backward_checked(start.0, count).map(Self)
73    }
74}
75
76impl SegmentIndex {
77    /// Segment index 0.
78    pub const ZERO: SegmentIndex = SegmentIndex(0);
79    /// Segment index 1.
80    pub const ONE: SegmentIndex = SegmentIndex(1);
81
82    /// Create new instance
83    #[inline]
84    pub const fn new(n: u64) -> Self {
85        Self(n)
86    }
87
88    /// Get the first piece index in this segment.
89    #[inline]
90    pub const fn first_piece_index(&self) -> PieceIndex {
91        PieceIndex::new(self.0 * ArchivedHistorySegment::NUM_PIECES as u64)
92    }
93
94    /// Get the last piece index in this segment.
95    #[inline]
96    pub const fn last_piece_index(&self) -> PieceIndex {
97        PieceIndex::new((self.0 + 1) * ArchivedHistorySegment::NUM_PIECES as u64 - 1)
98    }
99
100    /// List of piece indexes that belong to this segment.
101    pub fn segment_piece_indexes(&self) -> [PieceIndex; ArchivedHistorySegment::NUM_PIECES] {
102        let mut piece_indices = [PieceIndex::ZERO; ArchivedHistorySegment::NUM_PIECES];
103        (self.first_piece_index()..=self.last_piece_index())
104            .zip(&mut piece_indices)
105            .for_each(|(input, output)| {
106                *output = input;
107            });
108
109        piece_indices
110    }
111
112    /// List of piece indexes that belong to this segment with source pieces first.
113    pub fn segment_piece_indexes_source_first(
114        &self,
115    ) -> [PieceIndex; ArchivedHistorySegment::NUM_PIECES] {
116        let mut source_first_piece_indices = [PieceIndex::ZERO; ArchivedHistorySegment::NUM_PIECES];
117
118        let piece_indices = self.segment_piece_indexes();
119        piece_indices
120            .into_iter()
121            .step_by(2)
122            .chain(piece_indices.into_iter().skip(1).step_by(2))
123            .zip(&mut source_first_piece_indices)
124            .for_each(|(input, output)| {
125                *output = input;
126            });
127
128        source_first_piece_indices
129    }
130
131    /// Checked integer subtraction. Computes `self - rhs`, returning `None` if underflow occurred.
132    #[inline]
133    pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
134        // TODO: when Option::map becomes const, use it here
135        match self.0.checked_sub(rhs.0) {
136            Some(segment_index) => Some(Self(segment_index)),
137            None => None,
138        }
139    }
140
141    /// Saturating integer subtraction. Computes `self - rhs`, returning zero if underflow
142    /// occurred.
143    #[inline]
144    pub const fn saturating_sub(self, rhs: Self) -> Self {
145        Self(self.0.saturating_sub(rhs.0))
146    }
147}
148
149/// Segment commitment contained within segment header.
150#[derive(
151    Copy,
152    Clone,
153    Eq,
154    PartialEq,
155    Hash,
156    Deref,
157    DerefMut,
158    From,
159    Into,
160    Encode,
161    Decode,
162    TypeInfo,
163    MaxEncodedLen,
164)]
165#[repr(transparent)]
166pub struct SegmentCommitment([u8; SegmentCommitment::SIZE]);
167
168impl fmt::Debug for SegmentCommitment {
169    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
170        write!(f, "{}", hex::encode(self.0))
171    }
172}
173
174#[cfg(feature = "serde")]
175#[derive(Serialize, Deserialize)]
176#[serde(transparent)]
177struct SegmentCommitmentBinary(#[serde(with = "BigArray")] [u8; SegmentCommitment::SIZE]);
178
179#[cfg(feature = "serde")]
180#[derive(Serialize, Deserialize)]
181#[serde(transparent)]
182struct SegmentCommitmentHex(#[serde(with = "hex")] [u8; SegmentCommitment::SIZE]);
183
184#[cfg(feature = "serde")]
185impl Serialize for SegmentCommitment {
186    #[inline]
187    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
188    where
189        S: Serializer,
190    {
191        if serializer.is_human_readable() {
192            SegmentCommitmentHex(self.0).serialize(serializer)
193        } else {
194            SegmentCommitmentBinary(self.0).serialize(serializer)
195        }
196    }
197}
198
199#[cfg(feature = "serde")]
200impl<'de> Deserialize<'de> for SegmentCommitment {
201    #[inline]
202    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
203    where
204        D: Deserializer<'de>,
205    {
206        Ok(Self(if deserializer.is_human_readable() {
207            SegmentCommitmentHex::deserialize(deserializer)?.0
208        } else {
209            SegmentCommitmentBinary::deserialize(deserializer)?.0
210        }))
211    }
212}
213
214impl Default for SegmentCommitment {
215    #[inline]
216    fn default() -> Self {
217        Self([0; Self::SIZE])
218    }
219}
220
221impl TryFrom<&[u8]> for SegmentCommitment {
222    type Error = TryFromSliceError;
223
224    #[inline]
225    fn try_from(slice: &[u8]) -> Result<Self, Self::Error> {
226        <[u8; Self::SIZE]>::try_from(slice).map(Self)
227    }
228}
229
230impl AsRef<[u8]> for SegmentCommitment {
231    #[inline]
232    fn as_ref(&self) -> &[u8] {
233        &self.0
234    }
235}
236
237impl AsMut<[u8]> for SegmentCommitment {
238    #[inline]
239    fn as_mut(&mut self) -> &mut [u8] {
240        &mut self.0
241    }
242}
243
244impl SegmentCommitment {
245    /// Size of segment commitment in bytes.
246    pub const SIZE: usize = 48;
247}
248
249/// Size of blockchain history in segments.
250#[derive(
251    Debug,
252    Display,
253    Copy,
254    Clone,
255    Ord,
256    PartialOrd,
257    Eq,
258    PartialEq,
259    Hash,
260    From,
261    Into,
262    Deref,
263    DerefMut,
264    Encode,
265    Decode,
266    TypeInfo,
267    MaxEncodedLen,
268)]
269#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
270#[repr(transparent)]
271pub struct HistorySize(NonZeroU64);
272
273impl From<SegmentIndex> for HistorySize {
274    #[inline]
275    fn from(value: SegmentIndex) -> Self {
276        Self(NonZeroU64::new(value.0 + 1).expect("Not zero; qed"))
277    }
278}
279
280impl HistorySize {
281    /// History size of one
282    pub const ONE: Self = Self(NonZeroU64::new(1).expect("Not zero; qed"));
283
284    /// Create new instance.
285    pub const fn new(value: NonZeroU64) -> Self {
286        Self(value)
287    }
288
289    /// Size of blockchain history in pieces.
290    pub const fn in_pieces(&self) -> NonZeroU64 {
291        self.0.saturating_mul(
292            NonZeroU64::new(ArchivedHistorySegment::NUM_PIECES as u64).expect("Not zero; qed"),
293        )
294    }
295
296    /// Segment index that corresponds to this history size.
297    pub fn segment_index(&self) -> SegmentIndex {
298        SegmentIndex::from(self.0.get() - 1)
299    }
300
301    /// History size at which expiration check for sector happens.
302    ///
303    /// Returns `None` on overflow.
304    pub fn sector_expiration_check(&self, min_sector_lifetime: Self) -> Option<Self> {
305        self.0.checked_add(min_sector_lifetime.0.get()).map(Self)
306    }
307}
308
309/// Progress of an archived block.
310#[derive(Debug, Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, Encode, Decode, TypeInfo)]
311#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
312#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
313pub enum ArchivedBlockProgress {
314    /// The block has been fully archived.
315    Complete,
316
317    /// Number of partially archived bytes of a block.
318    Partial(u32),
319}
320
321impl Default for ArchivedBlockProgress {
322    /// We assume a block can always fit into the segment initially, but it is definitely possible
323    /// to be transitioned into the partial state after some overflow checking.
324    #[inline]
325    fn default() -> Self {
326        Self::Complete
327    }
328}
329
330impl ArchivedBlockProgress {
331    /// Return the number of partially archived bytes if the progress is not complete.
332    pub fn partial(&self) -> Option<u32> {
333        match self {
334            Self::Complete => None,
335            Self::Partial(number) => Some(*number),
336        }
337    }
338
339    /// Sets new number of partially archived bytes.
340    pub fn set_partial(&mut self, new_partial: u32) {
341        *self = Self::Partial(new_partial);
342    }
343}
344
345/// Last archived block
346#[derive(Debug, Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, Encode, Decode, TypeInfo)]
347#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
348#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
349pub struct LastArchivedBlock {
350    /// Block number
351    pub number: BlockNumber,
352    /// Progress of an archived block.
353    pub archived_progress: ArchivedBlockProgress,
354}
355
356impl LastArchivedBlock {
357    /// Returns the number of partially archived bytes for a block.
358    pub fn partial_archived(&self) -> Option<u32> {
359        self.archived_progress.partial()
360    }
361
362    /// Sets new number of partially archived bytes.
363    pub fn set_partial_archived(&mut self, new_partial: u32) {
364        self.archived_progress.set_partial(new_partial);
365    }
366
367    /// Sets the archived state of this block to [`ArchivedBlockProgress::Complete`].
368    pub fn set_complete(&mut self) {
369        self.archived_progress = ArchivedBlockProgress::Complete;
370    }
371}
372
373/// Segment header for a specific segment.
374///
375/// Each segment will have corresponding [`SegmentHeader`] included as the first item in the next
376/// segment. Each `SegmentHeader` includes hash of the previous one and all together form a chain of
377/// segment headers that is used for quick and efficient verification that some [`Piece`]
378/// corresponds to the actual archival history of the blockchain.
379#[derive(Debug, Copy, Clone, PartialEq, Eq, Encode, Decode, TypeInfo, Hash)]
380#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
381#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
382pub enum SegmentHeader {
383    /// V0 of the segment header data structure
384    #[codec(index = 0)]
385    #[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
386    V0 {
387        /// Segment index
388        segment_index: SegmentIndex,
389        /// Root of commitments of all records in a segment.
390        segment_commitment: SegmentCommitment,
391        /// Hash of the segment header of the previous segment
392        prev_segment_header_hash: Blake3Hash,
393        /// Last archived block
394        last_archived_block: LastArchivedBlock,
395    },
396}
397
398impl SegmentHeader {
399    /// Hash of the whole segment header
400    pub fn hash(&self) -> Blake3Hash {
401        blake3_hash(&self.encode())
402    }
403
404    /// Segment index
405    pub fn segment_index(&self) -> SegmentIndex {
406        match self {
407            Self::V0 { segment_index, .. } => *segment_index,
408        }
409    }
410
411    /// Segment commitment of the records in a segment.
412    pub fn segment_commitment(&self) -> SegmentCommitment {
413        match self {
414            Self::V0 {
415                segment_commitment, ..
416            } => *segment_commitment,
417        }
418    }
419
420    /// Hash of the segment header of the previous segment
421    pub fn prev_segment_header_hash(&self) -> Blake3Hash {
422        match self {
423            Self::V0 {
424                prev_segment_header_hash,
425                ..
426            } => *prev_segment_header_hash,
427        }
428    }
429
430    /// Last archived block
431    pub fn last_archived_block(&self) -> LastArchivedBlock {
432        match self {
433            Self::V0 {
434                last_archived_block,
435                ..
436            } => *last_archived_block,
437        }
438    }
439}
440
441/// Recorded history segment before archiving is applied.
442///
443/// NOTE: This is a stack-allocated data structure and can cause stack overflow!
444#[derive(Copy, Clone, Eq, PartialEq, Deref, DerefMut)]
445#[repr(transparent)]
446pub struct RecordedHistorySegment([RawRecord; Self::NUM_RAW_RECORDS]);
447
448impl fmt::Debug for RecordedHistorySegment {
449    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
450        f.debug_struct("RecordedHistorySegment")
451            .finish_non_exhaustive()
452    }
453}
454
455impl Default for RecordedHistorySegment {
456    #[inline]
457    fn default() -> Self {
458        Self([RawRecord::default(); Self::NUM_RAW_RECORDS])
459    }
460}
461
462impl AsRef<[u8]> for RecordedHistorySegment {
463    #[inline]
464    fn as_ref(&self) -> &[u8] {
465        RawRecord::slice_to_repr(&self.0)
466            .as_flattened()
467            .as_flattened()
468    }
469}
470
471impl AsMut<[u8]> for RecordedHistorySegment {
472    #[inline]
473    fn as_mut(&mut self) -> &mut [u8] {
474        RawRecord::slice_mut_to_repr(&mut self.0)
475            .as_flattened_mut()
476            .as_flattened_mut()
477    }
478}
479
480impl RecordedHistorySegment {
481    /// Number of raw records in one segment of recorded history.
482    pub const NUM_RAW_RECORDS: usize = 128;
483    /// Erasure coding rate for records during archiving process.
484    pub const ERASURE_CODING_RATE: (usize, usize) = (1, 2);
485    /// Size of recorded history segment in bytes.
486    ///
487    /// It includes half of the records (just source records) that will later be erasure coded and
488    /// together with corresponding commitments and witnesses will result in
489    /// [`ArchivedHistorySegment::NUM_PIECES`] [`Piece`]s of archival history.
490    pub const SIZE: usize = RawRecord::SIZE * Self::NUM_RAW_RECORDS;
491
492    /// Create boxed value without hitting stack overflow
493    #[inline]
494    pub fn new_boxed() -> Box<Self> {
495        // TODO: Should have been just `::new()`, but https://github.com/rust-lang/rust/issues/53827
496        // SAFETY: Data structure filled with zeroes is a valid invariant
497        unsafe { Box::<Self>::new_zeroed().assume_init() }
498    }
499}
500
501/// Archived history segment after archiving is applied.
502#[derive(Debug, Clone, Eq, PartialEq, Deref, DerefMut)]
503#[repr(transparent)]
504pub struct ArchivedHistorySegment(FlatPieces);
505
506impl Default for ArchivedHistorySegment {
507    #[inline]
508    fn default() -> Self {
509        Self(FlatPieces::new(Self::NUM_PIECES))
510    }
511}
512
513impl ArchivedHistorySegment {
514    /// Number of pieces in one segment of archived history.
515    pub const NUM_PIECES: usize = RecordedHistorySegment::NUM_RAW_RECORDS
516        * RecordedHistorySegment::ERASURE_CODING_RATE.1
517        / RecordedHistorySegment::ERASURE_CODING_RATE.0;
518    /// Size of archived history segment in bytes.
519    ///
520    /// It includes erasure coded [`crate::pieces::PieceArray`]s (both source and parity) that are
521    /// composed of [`crate::pieces::Record`]s together with corresponding commitments and
522    /// witnesses.
523    pub const SIZE: usize = Piece::SIZE * Self::NUM_PIECES;
524
525    /// Ensure archived history segment contains cheaply cloneable shared data.
526    ///
527    /// Internally archived history segment uses CoW mechanism and can store either mutable owned
528    /// data or data that is cheap to clone, calling this method will ensure further clones and
529    /// returned pieces will not result in additional memory allocations.
530    pub fn to_shared(self) -> Self {
531        Self(self.0.to_shared())
532    }
533}