subspace_verification/
lib.rs

1//! Verification primitives for Subspace.
2#![forbid(unsafe_code)]
3#![warn(rust_2018_idioms, missing_debug_implementations, missing_docs)]
4#![feature(array_chunks, portable_simd)]
5// `generic_const_exprs` is an incomplete feature
6#![allow(incomplete_features)]
7// TODO: This feature is not actually used in this crate, but is added as a workaround for
8//  https://github.com/rust-lang/rust/issues/133199
9#![feature(generic_const_exprs)]
10#![cfg_attr(not(feature = "std"), no_std)]
11
12#[cfg(not(feature = "std"))]
13extern crate alloc;
14
15#[cfg(not(feature = "std"))]
16use alloc::string::String;
17#[cfg(all(feature = "kzg", not(feature = "std")))]
18use alloc::vec::Vec;
19use core::mem;
20#[cfg(feature = "kzg")]
21use core::num::NonZeroU64;
22#[cfg(feature = "kzg")]
23use core::simd::Simd;
24use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
25use schnorrkel::SignatureError;
26use schnorrkel::context::SigningContext;
27#[cfg(feature = "kzg")]
28use subspace_core_primitives::hashes::blake3_254_hash_to_scalar;
29use subspace_core_primitives::hashes::{Blake3Hash, blake3_hash_list, blake3_hash_with_key};
30#[cfg(feature = "kzg")]
31use subspace_core_primitives::pieces::{PieceArray, Record, RecordWitness};
32use subspace_core_primitives::pot::PotOutput;
33#[cfg(feature = "kzg")]
34use subspace_core_primitives::sectors::SectorId;
35use subspace_core_primitives::sectors::SectorSlotChallenge;
36#[cfg(feature = "kzg")]
37use subspace_core_primitives::segments::ArchivedHistorySegment;
38use subspace_core_primitives::segments::{HistorySize, SegmentCommitment};
39#[cfg(feature = "kzg")]
40use subspace_core_primitives::solutions::Solution;
41use subspace_core_primitives::solutions::{RewardSignature, SolutionRange};
42use subspace_core_primitives::{BlockForkWeight, BlockNumber, PublicKey, ScalarBytes, SlotNumber};
43#[cfg(feature = "kzg")]
44use subspace_kzg::{Commitment, Kzg, Scalar, Witness};
45#[cfg(feature = "kzg")]
46use subspace_proof_of_space::Table;
47
48/// Errors encountered by the Subspace consensus primitives.
49#[derive(Debug, Eq, PartialEq, thiserror::Error)]
50pub enum Error {
51    /// Invalid piece offset
52    #[error("Piece verification failed")]
53    InvalidPieceOffset {
54        /// Index of the piece that failed verification
55        piece_offset: u16,
56        /// How many pieces one sector is supposed to contain (max)
57        max_pieces_in_sector: u16,
58    },
59    /// History size is in the future
60    #[error("History size {solution} is in the future, current is {current}")]
61    FutureHistorySize {
62        /// Current history size
63        current: HistorySize,
64        /// History size solution was created for
65        solution: HistorySize,
66    },
67    /// Sector expired
68    #[error("Sector expired")]
69    SectorExpired {
70        /// Expiration history size
71        expiration_history_size: HistorySize,
72        /// Current history size
73        current_history_size: HistorySize,
74    },
75    /// Piece verification failed
76    #[error("Piece verification failed")]
77    InvalidPiece,
78    /// Solution is outside of challenge range
79    #[error(
80        "Solution distance {solution_distance} is outside of solution range \
81        {half_solution_range} (half of actual solution range)"
82    )]
83    OutsideSolutionRange {
84        /// Half of solution range
85        half_solution_range: SolutionRange,
86        /// Solution distance
87        solution_distance: SolutionRange,
88    },
89    /// Invalid proof of space
90    #[error("Invalid proof of space")]
91    InvalidProofOfSpace,
92    /// Invalid audit chunk offset
93    #[error("Invalid audit chunk offset")]
94    InvalidAuditChunkOffset,
95    /// Invalid chunk
96    #[error("Invalid chunk: {0}")]
97    InvalidChunk(String),
98    /// Invalid chunk witness
99    #[error("Invalid chunk witness")]
100    InvalidChunkWitness,
101    /// Invalid history size
102    #[error("Invalid history size")]
103    InvalidHistorySize,
104}
105
106/// Check the reward signature validity.
107pub fn check_reward_signature(
108    hash: &[u8],
109    signature: &RewardSignature,
110    public_key: &PublicKey,
111    reward_signing_context: &SigningContext,
112) -> Result<(), SignatureError> {
113    let public_key = schnorrkel::PublicKey::from_bytes(public_key.as_ref())?;
114    let signature = schnorrkel::Signature::from_bytes(signature.as_ref())?;
115    public_key.verify(reward_signing_context.bytes(hash), &signature)
116}
117
118/// Calculates solution distance for given parameters, is used as a primitive to check whether
119/// solution distance is within solution range (see [`is_within_solution_range()`]).
120fn calculate_solution_distance(
121    global_challenge: &Blake3Hash,
122    chunk: &[u8; 32],
123    sector_slot_challenge: &SectorSlotChallenge,
124) -> SolutionRange {
125    let audit_chunk = blake3_hash_with_key(sector_slot_challenge, chunk);
126    let audit_chunk_as_solution_range: SolutionRange = SolutionRange::from_le_bytes(
127        *audit_chunk
128            .array_chunks::<{ mem::size_of::<SolutionRange>() }>()
129            .next()
130            .expect("Solution range is smaller in size than global challenge; qed"),
131    );
132    let global_challenge_as_solution_range: SolutionRange = SolutionRange::from_le_bytes(
133        *global_challenge
134            .array_chunks::<{ mem::size_of::<SolutionRange>() }>()
135            .next()
136            .expect("Solution range is smaller in size than global challenge; qed"),
137    );
138    subspace_core_primitives::solutions::bidirectional_distance(
139        &global_challenge_as_solution_range,
140        &audit_chunk_as_solution_range,
141    )
142}
143
144/// Returns `Some(solution_distance)` if solution distance is within the solution range for provided
145/// parameters.
146pub fn is_within_solution_range(
147    global_challenge: &Blake3Hash,
148    chunk: &[u8; 32],
149    sector_slot_challenge: &SectorSlotChallenge,
150    solution_range: SolutionRange,
151) -> Option<SolutionRange> {
152    let solution_distance =
153        calculate_solution_distance(global_challenge, chunk, sector_slot_challenge);
154    (solution_distance <= solution_range / 2).then_some(solution_distance)
155}
156
157/// Parameters for checking piece validity
158#[derive(Debug, Clone, Encode, Decode, MaxEncodedLen)]
159pub struct PieceCheckParams {
160    /// How many pieces one sector is supposed to contain (max)
161    pub max_pieces_in_sector: u16,
162    /// Segment commitment of segment to which piece belongs
163    pub segment_commitment: SegmentCommitment,
164    /// Number of latest archived segments that are considered "recent history"
165    pub recent_segments: HistorySize,
166    /// Fraction of pieces from the "recent history" (`recent_segments`) in each sector
167    pub recent_history_fraction: (HistorySize, HistorySize),
168    /// Minimum lifetime of a plotted sector, measured in archived segment
169    pub min_sector_lifetime: HistorySize,
170    /// Current size of the history
171    pub current_history_size: HistorySize,
172    /// Segment commitment at `min_sector_lifetime` from sector creation (if exists)
173    pub sector_expiration_check_segment_commitment: Option<SegmentCommitment>,
174}
175
176/// Parameters for solution verification
177#[derive(Debug, Clone, Encode, Decode, MaxEncodedLen)]
178pub struct VerifySolutionParams {
179    /// Proof of time for which solution is built
180    pub proof_of_time: PotOutput,
181    /// Solution range
182    pub solution_range: SolutionRange,
183    /// Parameters for checking piece validity.
184    ///
185    /// If `None`, piece validity check will be skipped.
186    pub piece_check_params: Option<PieceCheckParams>,
187}
188
189/// Calculate the block's contribution to the fork weight, which is derived from the provided
190/// solution range.
191pub fn calculate_block_fork_weight(solution_range: SolutionRange) -> BlockForkWeight {
192    // Work around the test runtime accepting all solutions, which causes blocks to have zero
193    // fork weight. This makes each node keep its own blocks, and never reorg to a common chain.
194    #[cfg(feature = "testing")]
195    let solution_range = if solution_range == SolutionRange::MAX {
196        SolutionRange::MAX - 1
197    } else {
198        solution_range
199    };
200
201    BlockForkWeight::from(SolutionRange::MAX - solution_range)
202}
203
204/// Verify whether solution is valid, returns solution distance that is `<= solution_range/2` on
205/// success.
206#[cfg(feature = "kzg")]
207pub fn verify_solution<'a, PosTable, RewardAddress>(
208    solution: &'a Solution<RewardAddress>,
209    slot: SlotNumber,
210    params: &'a VerifySolutionParams,
211    kzg: &'a Kzg,
212) -> Result<SolutionRange, Error>
213where
214    PosTable: Table,
215{
216    let VerifySolutionParams {
217        proof_of_time,
218        solution_range,
219        piece_check_params,
220    } = params;
221
222    let sector_id = SectorId::new(
223        solution.public_key.hash(),
224        solution.sector_index,
225        solution.history_size,
226    );
227
228    let global_randomness = proof_of_time.derive_global_randomness();
229    let global_challenge = global_randomness.derive_global_challenge(slot);
230    let sector_slot_challenge = sector_id.derive_sector_slot_challenge(&global_challenge);
231    let s_bucket_audit_index = sector_slot_challenge.s_bucket_audit_index();
232
233    // Check that proof of space is valid
234    if !PosTable::is_proof_valid(
235        &sector_id.derive_evaluation_seed(solution.piece_offset),
236        s_bucket_audit_index.into(),
237        &solution.proof_of_space,
238    ) {
239        return Err(Error::InvalidProofOfSpace);
240    };
241
242    let masked_chunk =
243        (Simd::from(*solution.chunk) ^ Simd::from(*solution.proof_of_space.hash())).to_array();
244
245    let solution_distance =
246        calculate_solution_distance(&global_challenge, &masked_chunk, &sector_slot_challenge);
247
248    // Check that solution is within solution range
249    if solution_distance > solution_range / 2 {
250        return Err(Error::OutsideSolutionRange {
251            half_solution_range: solution_range / 2,
252            solution_distance,
253        });
254    }
255
256    // Check that chunk belongs to the record
257    if !kzg.verify(
258        &Commitment::try_from(solution.record_commitment)
259            .map_err(|_error| Error::InvalidChunkWitness)?,
260        Record::NUM_S_BUCKETS,
261        s_bucket_audit_index.into(),
262        &Scalar::try_from(solution.chunk).map_err(Error::InvalidChunk)?,
263        &Witness::try_from(solution.chunk_witness).map_err(|_error| Error::InvalidChunkWitness)?,
264    ) {
265        return Err(Error::InvalidChunkWitness);
266    }
267
268    if let Some(PieceCheckParams {
269        max_pieces_in_sector,
270        segment_commitment,
271        recent_segments,
272        recent_history_fraction,
273        min_sector_lifetime,
274        current_history_size,
275        sector_expiration_check_segment_commitment,
276    }) = piece_check_params
277    {
278        // `+1` here is due to the possibility of plotting a sector that was just archived and whose
279        // segment root is just being included in this very block we're checking (parent block,
280        // which is where `current_history_size` comes from doesn't know about this block yet)
281        if NonZeroU64::from(solution.history_size).get()
282            > NonZeroU64::from(*current_history_size).get() + 1
283        {
284            return Err(Error::FutureHistorySize {
285                current: *current_history_size,
286                solution: solution.history_size,
287            });
288        }
289
290        if u16::from(solution.piece_offset) >= *max_pieces_in_sector {
291            return Err(Error::InvalidPieceOffset {
292                piece_offset: u16::from(solution.piece_offset),
293                max_pieces_in_sector: *max_pieces_in_sector,
294            });
295        }
296
297        if let Some(sector_expiration_check_segment_commitment) =
298            sector_expiration_check_segment_commitment
299        {
300            let expiration_history_size = match sector_id.derive_expiration_history_size(
301                solution.history_size,
302                sector_expiration_check_segment_commitment,
303                *min_sector_lifetime,
304            ) {
305                Some(expiration_history_size) => expiration_history_size,
306                None => {
307                    return Err(Error::InvalidHistorySize);
308                }
309            };
310
311            if expiration_history_size <= *current_history_size {
312                return Err(Error::SectorExpired {
313                    expiration_history_size,
314                    current_history_size: *current_history_size,
315                });
316            }
317        }
318
319        let position = sector_id
320            .derive_piece_index(
321                solution.piece_offset,
322                solution.history_size,
323                *max_pieces_in_sector,
324                *recent_segments,
325                *recent_history_fraction,
326            )
327            .position();
328
329        // Check that piece is part of the blockchain history
330        if !is_record_commitment_hash_valid(
331            kzg,
332            &Scalar::try_from(blake3_254_hash_to_scalar(
333                solution.record_commitment.as_ref(),
334            ))
335            .expect("Create correctly by dedicated hash function; qed"),
336            segment_commitment,
337            &solution.record_witness,
338            position,
339        ) {
340            return Err(Error::InvalidPiece);
341        }
342    }
343
344    Ok(solution_distance)
345}
346
347/// Validate witness embedded within a piece produced by archiver
348#[cfg(feature = "kzg")]
349pub fn is_piece_valid(
350    kzg: &Kzg,
351    piece: &PieceArray,
352    segment_commitment: &SegmentCommitment,
353    position: u32,
354) -> bool {
355    let (record, commitment, witness) = piece.split();
356    let witness = match Witness::try_from_bytes(witness) {
357        Ok(witness) => witness,
358        _ => {
359            return false;
360        }
361    };
362
363    let mut scalars = Vec::with_capacity(record.len().next_power_of_two());
364
365    for record_chunk in record.iter() {
366        match Scalar::try_from(record_chunk) {
367            Ok(scalar) => {
368                scalars.push(scalar);
369            }
370            _ => {
371                return false;
372            }
373        }
374    }
375
376    // Number of scalars for KZG must be a power of two elements
377    scalars.resize(scalars.capacity(), Scalar::default());
378
379    let polynomial = match kzg.poly(&scalars) {
380        Ok(polynomial) => polynomial,
381        _ => {
382            return false;
383        }
384    };
385
386    if kzg
387        .commit(&polynomial)
388        .map(|commitment| commitment.to_bytes())
389        .as_ref()
390        != Ok(commitment)
391    {
392        return false;
393    }
394
395    let Ok(segment_commitment) = Commitment::try_from(segment_commitment) else {
396        return false;
397    };
398
399    let commitment_hash = Scalar::try_from(blake3_254_hash_to_scalar(commitment.as_ref()))
400        .expect("Create correctly by dedicated hash function; qed");
401
402    kzg.verify(
403        &segment_commitment,
404        ArchivedHistorySegment::NUM_PIECES,
405        position,
406        &commitment_hash,
407        &witness,
408    )
409}
410
411/// Validate witness for record commitment hash produced by archiver
412#[cfg(feature = "kzg")]
413pub fn is_record_commitment_hash_valid(
414    kzg: &Kzg,
415    record_commitment_hash: &Scalar,
416    commitment: &SegmentCommitment,
417    witness: &RecordWitness,
418    position: u32,
419) -> bool {
420    let Ok(commitment) = Commitment::try_from(commitment) else {
421        return false;
422    };
423    let Ok(witness) = Witness::try_from(witness) else {
424        return false;
425    };
426
427    kzg.verify(
428        &commitment,
429        ArchivedHistorySegment::NUM_PIECES,
430        position,
431        record_commitment_hash,
432        &witness,
433    )
434}
435
436/// Derive proof of time entropy from chunk and proof of time for injection purposes.
437#[inline]
438pub fn derive_pot_entropy(chunk: &ScalarBytes, proof_of_time: PotOutput) -> Blake3Hash {
439    blake3_hash_list(&[chunk.as_ref(), proof_of_time.as_ref()])
440}
441
442/// Derives next solution range based on the total era slots and slot probability
443pub fn derive_next_solution_range(
444    start_slot: SlotNumber,
445    current_slot: SlotNumber,
446    slot_probability: (u64, u64),
447    current_solution_range: SolutionRange,
448    era_duration: BlockNumber,
449) -> u64 {
450    // calculate total slots within this era
451    let era_slot_count = current_slot - start_slot;
452
453    // Now we need to re-calculate solution range. The idea here is to keep block production at
454    // the same pace while space pledged on the network changes. For this we adjust previous
455    // solution range according to actual and expected number of blocks per era.
456
457    // Below is code analogous to the following, but without using floats:
458    // ```rust
459    // let actual_slots_per_block = era_slot_count as f64 / era_duration as f64;
460    // let expected_slots_per_block =
461    //     slot_probability.1 as f64 / slot_probability.0 as f64;
462    // let adjustment_factor =
463    //     (actual_slots_per_block / expected_slots_per_block).clamp(0.25, 4.0);
464    //
465    // next_solution_range =
466    //     (solution_ranges.current as f64 * adjustment_factor).round() as u64;
467    // ```
468    u64::try_from(
469        u128::from(current_solution_range)
470            .saturating_mul(u128::from(era_slot_count))
471            .saturating_mul(u128::from(slot_probability.0))
472            / u128::from(era_duration)
473            / u128::from(slot_probability.1),
474    )
475    .unwrap_or(u64::MAX)
476    .clamp(
477        current_solution_range / 4,
478        current_solution_range.saturating_mul(4),
479    )
480}