subspace_core_primitives/
segments.rs

1//! Segments-related data structures.
2
3#[cfg(not(feature = "std"))]
4extern crate alloc;
5
6use crate::hashes::{blake3_hash, Blake3Hash};
7use crate::pieces::{FlatPieces, Piece, PieceIndex, RawRecord};
8use crate::BlockNumber;
9#[cfg(not(feature = "std"))]
10use alloc::boxed::Box;
11use core::array::TryFromSliceError;
12use core::fmt;
13use core::iter::Step;
14use core::num::NonZeroU64;
15use derive_more::{
16    Add, AddAssign, Deref, DerefMut, Display, Div, DivAssign, From, Into, Mul, MulAssign, Sub,
17    SubAssign,
18};
19use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
20use scale_info::TypeInfo;
21#[cfg(feature = "serde")]
22use serde::{Deserialize, Serialize};
23#[cfg(feature = "serde")]
24use serde::{Deserializer, Serializer};
25#[cfg(feature = "serde")]
26use serde_big_array::BigArray;
27
28/// Segment index type.
29#[derive(
30    Debug,
31    Display,
32    Default,
33    Copy,
34    Clone,
35    Ord,
36    PartialOrd,
37    Eq,
38    PartialEq,
39    Hash,
40    From,
41    Into,
42    Encode,
43    Decode,
44    Add,
45    AddAssign,
46    Sub,
47    SubAssign,
48    Mul,
49    MulAssign,
50    Div,
51    DivAssign,
52    TypeInfo,
53    MaxEncodedLen,
54)]
55#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
56#[repr(transparent)]
57pub struct SegmentIndex(u64);
58
59impl Step for SegmentIndex {
60    #[inline]
61    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
62        u64::steps_between(&start.0, &end.0)
63    }
64
65    #[inline]
66    fn forward_checked(start: Self, count: usize) -> Option<Self> {
67        u64::forward_checked(start.0, count).map(Self)
68    }
69
70    #[inline]
71    fn backward_checked(start: Self, count: usize) -> Option<Self> {
72        u64::backward_checked(start.0, count).map(Self)
73    }
74}
75
76impl SegmentIndex {
77    /// Segment index 0.
78    pub const ZERO: SegmentIndex = SegmentIndex(0);
79    /// Segment index 1.
80    pub const ONE: SegmentIndex = SegmentIndex(1);
81
82    /// Create new instance
83    #[inline]
84    pub const fn new(n: u64) -> Self {
85        Self(n)
86    }
87
88    /// Get the first piece index in this segment.
89    #[inline]
90    pub const fn first_piece_index(&self) -> PieceIndex {
91        PieceIndex::new(self.0 * ArchivedHistorySegment::NUM_PIECES as u64)
92    }
93
94    /// Get the last piece index in this segment.
95    #[inline]
96    pub const fn last_piece_index(&self) -> PieceIndex {
97        PieceIndex::new((self.0 + 1) * ArchivedHistorySegment::NUM_PIECES as u64 - 1)
98    }
99
100    /// List of piece indexes that belong to this segment.
101    pub fn segment_piece_indexes(&self) -> [PieceIndex; ArchivedHistorySegment::NUM_PIECES] {
102        let mut piece_indices = [PieceIndex::ZERO; ArchivedHistorySegment::NUM_PIECES];
103        (self.first_piece_index()..=self.last_piece_index())
104            .zip(&mut piece_indices)
105            .for_each(|(input, output)| {
106                *output = input;
107            });
108
109        piece_indices
110    }
111
112    /// List of piece indexes that belong to this segment with source pieces first.
113    pub fn segment_piece_indexes_source_first(
114        &self,
115    ) -> [PieceIndex; ArchivedHistorySegment::NUM_PIECES] {
116        let mut source_first_piece_indices = [PieceIndex::ZERO; ArchivedHistorySegment::NUM_PIECES];
117
118        let piece_indices = self.segment_piece_indexes();
119        piece_indices
120            .into_iter()
121            .step_by(2)
122            .chain(piece_indices.into_iter().skip(1).step_by(2))
123            .zip(&mut source_first_piece_indices)
124            .for_each(|(input, output)| {
125                *output = input;
126            });
127
128        source_first_piece_indices
129    }
130
131    /// Checked integer subtraction. Computes `self - rhs`, returning `None` if overflow occurred.
132    #[inline]
133    pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
134        // TODO: when Option::map becomes const, use it here
135        match self.0.checked_sub(rhs.0) {
136            Some(segment_index) => Some(Self(segment_index)),
137            None => None,
138        }
139    }
140}
141
142/// Segment commitment contained within segment header.
143#[derive(
144    Copy,
145    Clone,
146    Eq,
147    PartialEq,
148    Hash,
149    Deref,
150    DerefMut,
151    From,
152    Into,
153    Encode,
154    Decode,
155    TypeInfo,
156    MaxEncodedLen,
157)]
158#[repr(transparent)]
159pub struct SegmentCommitment([u8; SegmentCommitment::SIZE]);
160
161impl fmt::Debug for SegmentCommitment {
162    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
163        write!(f, "{}", hex::encode(self.0))
164    }
165}
166
167#[cfg(feature = "serde")]
168#[derive(Serialize, Deserialize)]
169#[serde(transparent)]
170struct SegmentCommitmentBinary(#[serde(with = "BigArray")] [u8; SegmentCommitment::SIZE]);
171
172#[cfg(feature = "serde")]
173#[derive(Serialize, Deserialize)]
174#[serde(transparent)]
175struct SegmentCommitmentHex(#[serde(with = "hex")] [u8; SegmentCommitment::SIZE]);
176
177#[cfg(feature = "serde")]
178impl Serialize for SegmentCommitment {
179    #[inline]
180    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
181    where
182        S: Serializer,
183    {
184        if serializer.is_human_readable() {
185            SegmentCommitmentHex(self.0).serialize(serializer)
186        } else {
187            SegmentCommitmentBinary(self.0).serialize(serializer)
188        }
189    }
190}
191
192#[cfg(feature = "serde")]
193impl<'de> Deserialize<'de> for SegmentCommitment {
194    #[inline]
195    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
196    where
197        D: Deserializer<'de>,
198    {
199        Ok(Self(if deserializer.is_human_readable() {
200            SegmentCommitmentHex::deserialize(deserializer)?.0
201        } else {
202            SegmentCommitmentBinary::deserialize(deserializer)?.0
203        }))
204    }
205}
206
207impl Default for SegmentCommitment {
208    #[inline]
209    fn default() -> Self {
210        Self([0; Self::SIZE])
211    }
212}
213
214impl TryFrom<&[u8]> for SegmentCommitment {
215    type Error = TryFromSliceError;
216
217    #[inline]
218    fn try_from(slice: &[u8]) -> Result<Self, Self::Error> {
219        <[u8; Self::SIZE]>::try_from(slice).map(Self)
220    }
221}
222
223impl AsRef<[u8]> for SegmentCommitment {
224    #[inline]
225    fn as_ref(&self) -> &[u8] {
226        &self.0
227    }
228}
229
230impl AsMut<[u8]> for SegmentCommitment {
231    #[inline]
232    fn as_mut(&mut self) -> &mut [u8] {
233        &mut self.0
234    }
235}
236
237impl SegmentCommitment {
238    /// Size of segment commitment in bytes.
239    pub const SIZE: usize = 48;
240}
241
242/// Size of blockchain history in segments.
243#[derive(
244    Debug,
245    Display,
246    Copy,
247    Clone,
248    Ord,
249    PartialOrd,
250    Eq,
251    PartialEq,
252    Hash,
253    From,
254    Into,
255    Deref,
256    DerefMut,
257    Encode,
258    Decode,
259    TypeInfo,
260    MaxEncodedLen,
261)]
262#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
263#[repr(transparent)]
264pub struct HistorySize(NonZeroU64);
265
266impl From<SegmentIndex> for HistorySize {
267    #[inline]
268    fn from(value: SegmentIndex) -> Self {
269        Self(NonZeroU64::new(value.0 + 1).expect("Not zero; qed"))
270    }
271}
272
273impl HistorySize {
274    /// History size of one
275    pub const ONE: Self = Self(NonZeroU64::new(1).expect("Not zero; qed"));
276
277    /// Create new instance.
278    pub const fn new(value: NonZeroU64) -> Self {
279        Self(value)
280    }
281
282    /// Size of blockchain history in pieces.
283    pub const fn in_pieces(&self) -> NonZeroU64 {
284        self.0.saturating_mul(
285            NonZeroU64::new(ArchivedHistorySegment::NUM_PIECES as u64).expect("Not zero; qed"),
286        )
287    }
288
289    /// Segment index that corresponds to this history size.
290    pub fn segment_index(&self) -> SegmentIndex {
291        SegmentIndex::from(self.0.get() - 1)
292    }
293
294    /// History size at which expiration check for sector happens.
295    ///
296    /// Returns `None` on overflow.
297    pub fn sector_expiration_check(&self, min_sector_lifetime: Self) -> Option<Self> {
298        self.0.checked_add(min_sector_lifetime.0.get()).map(Self)
299    }
300}
301
302/// Progress of an archived block.
303#[derive(Debug, Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, Encode, Decode, TypeInfo)]
304#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
305#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
306pub enum ArchivedBlockProgress {
307    /// The block has been fully archived.
308    Complete,
309
310    /// Number of partially archived bytes of a block.
311    Partial(u32),
312}
313
314impl Default for ArchivedBlockProgress {
315    /// We assume a block can always fit into the segment initially, but it is definitely possible
316    /// to be transitioned into the partial state after some overflow checking.
317    #[inline]
318    fn default() -> Self {
319        Self::Complete
320    }
321}
322
323impl ArchivedBlockProgress {
324    /// Return the number of partially archived bytes if the progress is not complete.
325    pub fn partial(&self) -> Option<u32> {
326        match self {
327            Self::Complete => None,
328            Self::Partial(number) => Some(*number),
329        }
330    }
331
332    /// Sets new number of partially archived bytes.
333    pub fn set_partial(&mut self, new_partial: u32) {
334        *self = Self::Partial(new_partial);
335    }
336}
337
338/// Last archived block
339#[derive(Debug, Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, Encode, Decode, TypeInfo)]
340#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
341#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
342pub struct LastArchivedBlock {
343    /// Block number
344    pub number: BlockNumber,
345    /// Progress of an archived block.
346    pub archived_progress: ArchivedBlockProgress,
347}
348
349impl LastArchivedBlock {
350    /// Returns the number of partially archived bytes for a block.
351    pub fn partial_archived(&self) -> Option<u32> {
352        self.archived_progress.partial()
353    }
354
355    /// Sets new number of partially archived bytes.
356    pub fn set_partial_archived(&mut self, new_partial: u32) {
357        self.archived_progress.set_partial(new_partial);
358    }
359
360    /// Sets the archived state of this block to [`ArchivedBlockProgress::Complete`].
361    pub fn set_complete(&mut self) {
362        self.archived_progress = ArchivedBlockProgress::Complete;
363    }
364}
365
366/// Segment header for a specific segment.
367///
368/// Each segment will have corresponding [`SegmentHeader`] included as the first item in the next
369/// segment. Each `SegmentHeader` includes hash of the previous one and all together form a chain of
370/// segment headers that is used for quick and efficient verification that some [`Piece`]
371/// corresponds to the actual archival history of the blockchain.
372#[derive(Debug, Copy, Clone, PartialEq, Eq, Encode, Decode, TypeInfo, Hash)]
373#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
374#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
375pub enum SegmentHeader {
376    /// V0 of the segment header data structure
377    #[codec(index = 0)]
378    #[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
379    V0 {
380        /// Segment index
381        segment_index: SegmentIndex,
382        /// Root of commitments of all records in a segment.
383        segment_commitment: SegmentCommitment,
384        /// Hash of the segment header of the previous segment
385        prev_segment_header_hash: Blake3Hash,
386        /// Last archived block
387        last_archived_block: LastArchivedBlock,
388    },
389}
390
391impl SegmentHeader {
392    /// Hash of the whole segment header
393    pub fn hash(&self) -> Blake3Hash {
394        blake3_hash(&self.encode())
395    }
396
397    /// Segment index
398    pub fn segment_index(&self) -> SegmentIndex {
399        match self {
400            Self::V0 { segment_index, .. } => *segment_index,
401        }
402    }
403
404    /// Segment commitment of the records in a segment.
405    pub fn segment_commitment(&self) -> SegmentCommitment {
406        match self {
407            Self::V0 {
408                segment_commitment, ..
409            } => *segment_commitment,
410        }
411    }
412
413    /// Hash of the segment header of the previous segment
414    pub fn prev_segment_header_hash(&self) -> Blake3Hash {
415        match self {
416            Self::V0 {
417                prev_segment_header_hash,
418                ..
419            } => *prev_segment_header_hash,
420        }
421    }
422
423    /// Last archived block
424    pub fn last_archived_block(&self) -> LastArchivedBlock {
425        match self {
426            Self::V0 {
427                last_archived_block,
428                ..
429            } => *last_archived_block,
430        }
431    }
432}
433
434/// Recorded history segment before archiving is applied.
435///
436/// NOTE: This is a stack-allocated data structure and can cause stack overflow!
437#[derive(Copy, Clone, Eq, PartialEq, Deref, DerefMut)]
438#[repr(transparent)]
439pub struct RecordedHistorySegment([RawRecord; Self::NUM_RAW_RECORDS]);
440
441impl fmt::Debug for RecordedHistorySegment {
442    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
443        f.debug_struct("RecordedHistorySegment")
444            .finish_non_exhaustive()
445    }
446}
447
448impl Default for RecordedHistorySegment {
449    #[inline]
450    fn default() -> Self {
451        Self([RawRecord::default(); Self::NUM_RAW_RECORDS])
452    }
453}
454
455impl AsRef<[u8]> for RecordedHistorySegment {
456    #[inline]
457    fn as_ref(&self) -> &[u8] {
458        RawRecord::slice_to_repr(&self.0)
459            .as_flattened()
460            .as_flattened()
461    }
462}
463
464impl AsMut<[u8]> for RecordedHistorySegment {
465    #[inline]
466    fn as_mut(&mut self) -> &mut [u8] {
467        RawRecord::slice_mut_to_repr(&mut self.0)
468            .as_flattened_mut()
469            .as_flattened_mut()
470    }
471}
472
473impl RecordedHistorySegment {
474    /// Number of raw records in one segment of recorded history.
475    pub const NUM_RAW_RECORDS: usize = 128;
476    /// Erasure coding rate for records during archiving process.
477    pub const ERASURE_CODING_RATE: (usize, usize) = (1, 2);
478    /// Size of recorded history segment in bytes.
479    ///
480    /// It includes half of the records (just source records) that will later be erasure coded and
481    /// together with corresponding commitments and witnesses will result in
482    /// [`ArchivedHistorySegment::NUM_PIECES`] [`Piece`]s of archival history.
483    pub const SIZE: usize = RawRecord::SIZE * Self::NUM_RAW_RECORDS;
484
485    /// Create boxed value without hitting stack overflow
486    #[inline]
487    pub fn new_boxed() -> Box<Self> {
488        // TODO: Should have been just `::new()`, but https://github.com/rust-lang/rust/issues/53827
489        // SAFETY: Data structure filled with zeroes is a valid invariant
490        unsafe { Box::<Self>::new_zeroed().assume_init() }
491    }
492}
493
494/// Archived history segment after archiving is applied.
495#[derive(Debug, Clone, Eq, PartialEq, Deref, DerefMut)]
496#[repr(transparent)]
497pub struct ArchivedHistorySegment(FlatPieces);
498
499impl Default for ArchivedHistorySegment {
500    #[inline]
501    fn default() -> Self {
502        Self(FlatPieces::new(Self::NUM_PIECES))
503    }
504}
505
506impl ArchivedHistorySegment {
507    /// Number of pieces in one segment of archived history.
508    pub const NUM_PIECES: usize = RecordedHistorySegment::NUM_RAW_RECORDS
509        * RecordedHistorySegment::ERASURE_CODING_RATE.1
510        / RecordedHistorySegment::ERASURE_CODING_RATE.0;
511    /// Size of archived history segment in bytes.
512    ///
513    /// It includes erasure coded [`crate::pieces::PieceArray`]s (both source and parity) that are
514    /// composed of [`crate::pieces::Record`]s together with corresponding commitments and
515    /// witnesses.
516    pub const SIZE: usize = Piece::SIZE * Self::NUM_PIECES;
517
518    /// Ensure archived history segment contains cheaply cloneable shared data.
519    ///
520    /// Internally archived history segment uses CoW mechanism and can store either mutable owned
521    /// data or data that is cheap to clone, calling this method will ensure further clones and
522    /// returned pieces will not result in additional memory allocations.
523    pub fn to_shared(self) -> Self {
524        Self(self.0.to_shared())
525    }
526}