subspace_verification/
lib.rs

1//! Verification primitives for Subspace.
2#![forbid(unsafe_code)]
3#![warn(rust_2018_idioms, missing_debug_implementations, missing_docs)]
4#![feature(portable_simd)]
5// `generic_const_exprs` is an incomplete feature
6#![allow(incomplete_features)]
7// TODO: This feature is not actually used in this crate, but is added as a workaround for
8//  https://github.com/rust-lang/rust/issues/133199
9#![feature(generic_const_exprs)]
10#![cfg_attr(not(feature = "std"), no_std)]
11
12#[cfg(not(feature = "std"))]
13extern crate alloc;
14
15#[cfg(not(feature = "std"))]
16use alloc::string::String;
17#[cfg(all(feature = "kzg", not(feature = "std")))]
18use alloc::vec::Vec;
19use core::mem;
20#[cfg(feature = "kzg")]
21use core::num::NonZeroU64;
22#[cfg(feature = "kzg")]
23use core::simd::Simd;
24use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
25use schnorrkel::SignatureError;
26use schnorrkel::context::SigningContext;
27#[cfg(feature = "kzg")]
28use subspace_core_primitives::hashes::blake3_254_hash_to_scalar;
29use subspace_core_primitives::hashes::{Blake3Hash, blake3_hash_list, blake3_hash_with_key};
30#[cfg(feature = "kzg")]
31use subspace_core_primitives::pieces::{PieceArray, Record, RecordWitness};
32use subspace_core_primitives::pot::PotOutput;
33#[cfg(feature = "kzg")]
34use subspace_core_primitives::sectors::SectorId;
35use subspace_core_primitives::sectors::SectorSlotChallenge;
36#[cfg(feature = "kzg")]
37use subspace_core_primitives::segments::ArchivedHistorySegment;
38use subspace_core_primitives::segments::{HistorySize, SegmentCommitment};
39#[cfg(feature = "kzg")]
40use subspace_core_primitives::solutions::Solution;
41use subspace_core_primitives::solutions::{RewardSignature, SolutionRange};
42use subspace_core_primitives::{BlockForkWeight, BlockNumber, PublicKey, ScalarBytes, SlotNumber};
43#[cfg(feature = "kzg")]
44use subspace_kzg::{Commitment, Kzg, Scalar, Witness};
45#[cfg(feature = "kzg")]
46use subspace_proof_of_space::Table;
47
48/// Errors encountered by the Subspace consensus primitives.
49#[derive(Debug, Eq, PartialEq, thiserror::Error)]
50pub enum Error {
51    /// Invalid piece offset
52    #[error("Piece verification failed")]
53    InvalidPieceOffset {
54        /// Index of the piece that failed verification
55        piece_offset: u16,
56        /// How many pieces one sector is supposed to contain (max)
57        max_pieces_in_sector: u16,
58    },
59    /// History size is in the future
60    #[error("History size {solution} is in the future, current is {current}")]
61    FutureHistorySize {
62        /// Current history size
63        current: HistorySize,
64        /// History size solution was created for
65        solution: HistorySize,
66    },
67    /// Sector expired
68    #[error("Sector expired")]
69    SectorExpired {
70        /// Expiration history size
71        expiration_history_size: HistorySize,
72        /// Current history size
73        current_history_size: HistorySize,
74    },
75    /// Piece verification failed
76    #[error("Piece verification failed")]
77    InvalidPiece,
78    /// Solution is outside of challenge range
79    #[error(
80        "Solution distance {solution_distance} is outside of solution range \
81        {half_solution_range} (half of actual solution range)"
82    )]
83    OutsideSolutionRange {
84        /// Half of solution range
85        half_solution_range: SolutionRange,
86        /// Solution distance
87        solution_distance: SolutionRange,
88    },
89    /// Invalid proof of space
90    #[error("Invalid proof of space")]
91    InvalidProofOfSpace,
92    /// Invalid audit chunk offset
93    #[error("Invalid audit chunk offset")]
94    InvalidAuditChunkOffset,
95    /// Invalid chunk
96    #[error("Invalid chunk: {0}")]
97    InvalidChunk(String),
98    /// Invalid chunk witness
99    #[error("Invalid chunk witness")]
100    InvalidChunkWitness,
101    /// Invalid history size
102    #[error("Invalid history size")]
103    InvalidHistorySize,
104}
105
106/// Check the reward signature validity.
107pub fn check_reward_signature(
108    hash: &[u8],
109    signature: &RewardSignature,
110    public_key: &PublicKey,
111    reward_signing_context: &SigningContext,
112) -> Result<(), SignatureError> {
113    let public_key = schnorrkel::PublicKey::from_bytes(public_key.as_ref())?;
114    let signature = schnorrkel::Signature::from_bytes(signature.as_ref())?;
115    public_key.verify(reward_signing_context.bytes(hash), &signature)
116}
117
118/// Calculates solution distance for given parameters, is used as a primitive to check whether
119/// solution distance is within solution range (see [`is_within_solution_range()`]).
120fn calculate_solution_distance(
121    global_challenge: &Blake3Hash,
122    chunk: &[u8; 32],
123    sector_slot_challenge: &SectorSlotChallenge,
124) -> SolutionRange {
125    let audit_chunk = blake3_hash_with_key(sector_slot_challenge, chunk);
126    let audit_chunk_as_solution_range: SolutionRange = SolutionRange::from_le_bytes(
127        *audit_chunk
128            .as_chunks::<{ mem::size_of::<SolutionRange>() }>()
129            .0
130            .iter()
131            .next()
132            .expect("Solution range is smaller in size than global challenge; qed"),
133    );
134    let global_challenge_as_solution_range: SolutionRange = SolutionRange::from_le_bytes(
135        *global_challenge
136            .as_chunks::<{ mem::size_of::<SolutionRange>() }>()
137            .0
138            .iter()
139            .next()
140            .expect("Solution range is smaller in size than global challenge; qed"),
141    );
142    subspace_core_primitives::solutions::bidirectional_distance(
143        &global_challenge_as_solution_range,
144        &audit_chunk_as_solution_range,
145    )
146}
147
148/// Returns `Some(solution_distance)` if solution distance is within the solution range for provided
149/// parameters.
150pub fn is_within_solution_range(
151    global_challenge: &Blake3Hash,
152    chunk: &[u8; 32],
153    sector_slot_challenge: &SectorSlotChallenge,
154    solution_range: SolutionRange,
155) -> Option<SolutionRange> {
156    let solution_distance =
157        calculate_solution_distance(global_challenge, chunk, sector_slot_challenge);
158    (solution_distance <= solution_range / 2).then_some(solution_distance)
159}
160
161/// Parameters for checking piece validity
162#[derive(Debug, Clone, Encode, Decode, MaxEncodedLen)]
163pub struct PieceCheckParams {
164    /// How many pieces one sector is supposed to contain (max)
165    pub max_pieces_in_sector: u16,
166    /// Segment commitment of segment to which piece belongs
167    pub segment_commitment: SegmentCommitment,
168    /// Number of latest archived segments that are considered "recent history"
169    pub recent_segments: HistorySize,
170    /// Fraction of pieces from the "recent history" (`recent_segments`) in each sector
171    pub recent_history_fraction: (HistorySize, HistorySize),
172    /// Minimum lifetime of a plotted sector, measured in archived segment
173    pub min_sector_lifetime: HistorySize,
174    /// Current size of the history
175    pub current_history_size: HistorySize,
176    /// Segment commitment at `min_sector_lifetime` from sector creation (if exists)
177    pub sector_expiration_check_segment_commitment: Option<SegmentCommitment>,
178}
179
180/// Parameters for solution verification
181#[derive(Debug, Clone, Encode, Decode, MaxEncodedLen)]
182pub struct VerifySolutionParams {
183    /// Proof of time for which solution is built
184    pub proof_of_time: PotOutput,
185    /// Solution range
186    pub solution_range: SolutionRange,
187    /// Parameters for checking piece validity.
188    ///
189    /// If `None`, piece validity check will be skipped.
190    pub piece_check_params: Option<PieceCheckParams>,
191}
192
193/// Calculate the block's contribution to the fork weight, which is derived from the provided
194/// solution range.
195pub fn calculate_block_fork_weight(solution_range: SolutionRange) -> BlockForkWeight {
196    // Work around the test runtime accepting all solutions, which causes blocks to have zero
197    // fork weight. This makes each node keep its own blocks, and never reorg to a common chain.
198    #[cfg(feature = "testing")]
199    let solution_range = if solution_range == SolutionRange::MAX {
200        SolutionRange::MAX - 1
201    } else {
202        solution_range
203    };
204
205    BlockForkWeight::from(SolutionRange::MAX - solution_range)
206}
207
208/// Verify whether solution is valid, returns solution distance that is `<= solution_range/2` on
209/// success.
210#[cfg(feature = "kzg")]
211pub fn verify_solution<'a, PosTable, RewardAddress>(
212    solution: &'a Solution<RewardAddress>,
213    slot: SlotNumber,
214    params: &'a VerifySolutionParams,
215    kzg: &'a Kzg,
216) -> Result<SolutionRange, Error>
217where
218    PosTable: Table,
219{
220    let VerifySolutionParams {
221        proof_of_time,
222        solution_range,
223        piece_check_params,
224    } = params;
225
226    let sector_id = SectorId::new(
227        solution.public_key.hash(),
228        solution.sector_index,
229        solution.history_size,
230    );
231
232    let global_randomness = proof_of_time.derive_global_randomness();
233    let global_challenge = global_randomness.derive_global_challenge(slot);
234    let sector_slot_challenge = sector_id.derive_sector_slot_challenge(&global_challenge);
235    let s_bucket_audit_index = sector_slot_challenge.s_bucket_audit_index();
236
237    // Check that proof of space is valid
238    if !PosTable::is_proof_valid(
239        &sector_id.derive_evaluation_seed(solution.piece_offset),
240        s_bucket_audit_index.into(),
241        &solution.proof_of_space,
242    ) {
243        return Err(Error::InvalidProofOfSpace);
244    };
245
246    let masked_chunk =
247        (Simd::from(*solution.chunk) ^ Simd::from(*solution.proof_of_space.hash())).to_array();
248
249    let solution_distance =
250        calculate_solution_distance(&global_challenge, &masked_chunk, &sector_slot_challenge);
251
252    // Check that solution is within solution range
253    if solution_distance > solution_range / 2 {
254        return Err(Error::OutsideSolutionRange {
255            half_solution_range: solution_range / 2,
256            solution_distance,
257        });
258    }
259
260    // Check that chunk belongs to the record
261    if !kzg.verify(
262        &Commitment::try_from(solution.record_commitment)
263            .map_err(|_error| Error::InvalidChunkWitness)?,
264        Record::NUM_S_BUCKETS,
265        s_bucket_audit_index.into(),
266        &Scalar::try_from(solution.chunk).map_err(Error::InvalidChunk)?,
267        &Witness::try_from(solution.chunk_witness).map_err(|_error| Error::InvalidChunkWitness)?,
268    ) {
269        return Err(Error::InvalidChunkWitness);
270    }
271
272    if let Some(PieceCheckParams {
273        max_pieces_in_sector,
274        segment_commitment,
275        recent_segments,
276        recent_history_fraction,
277        min_sector_lifetime,
278        current_history_size,
279        sector_expiration_check_segment_commitment,
280    }) = piece_check_params
281    {
282        // `+1` here is due to the possibility of plotting a sector that was just archived and whose
283        // segment root is just being included in this very block we're checking (parent block,
284        // which is where `current_history_size` comes from doesn't know about this block yet)
285        if NonZeroU64::from(solution.history_size).get()
286            > NonZeroU64::from(*current_history_size).get() + 1
287        {
288            return Err(Error::FutureHistorySize {
289                current: *current_history_size,
290                solution: solution.history_size,
291            });
292        }
293
294        if u16::from(solution.piece_offset) >= *max_pieces_in_sector {
295            return Err(Error::InvalidPieceOffset {
296                piece_offset: u16::from(solution.piece_offset),
297                max_pieces_in_sector: *max_pieces_in_sector,
298            });
299        }
300
301        if let Some(sector_expiration_check_segment_commitment) =
302            sector_expiration_check_segment_commitment
303        {
304            let expiration_history_size = match sector_id.derive_expiration_history_size(
305                solution.history_size,
306                sector_expiration_check_segment_commitment,
307                *min_sector_lifetime,
308            ) {
309                Some(expiration_history_size) => expiration_history_size,
310                None => {
311                    return Err(Error::InvalidHistorySize);
312                }
313            };
314
315            if expiration_history_size <= *current_history_size {
316                return Err(Error::SectorExpired {
317                    expiration_history_size,
318                    current_history_size: *current_history_size,
319                });
320            }
321        }
322
323        let position = sector_id
324            .derive_piece_index(
325                solution.piece_offset,
326                solution.history_size,
327                *max_pieces_in_sector,
328                *recent_segments,
329                *recent_history_fraction,
330            )
331            .position();
332
333        // Check that piece is part of the blockchain history
334        if !is_record_commitment_hash_valid(
335            kzg,
336            &Scalar::try_from(blake3_254_hash_to_scalar(
337                solution.record_commitment.as_ref(),
338            ))
339            .expect("Create correctly by dedicated hash function; qed"),
340            segment_commitment,
341            &solution.record_witness,
342            position,
343        ) {
344            return Err(Error::InvalidPiece);
345        }
346    }
347
348    Ok(solution_distance)
349}
350
351/// Validate witness embedded within a piece produced by archiver
352#[cfg(feature = "kzg")]
353pub fn is_piece_valid(
354    kzg: &Kzg,
355    piece: &PieceArray,
356    segment_commitment: &SegmentCommitment,
357    position: u32,
358) -> bool {
359    let (record, commitment, witness) = piece.split();
360    let witness = match Witness::try_from_bytes(witness) {
361        Ok(witness) => witness,
362        _ => {
363            return false;
364        }
365    };
366
367    let mut scalars = Vec::with_capacity(record.len().next_power_of_two());
368
369    for record_chunk in record.iter() {
370        match Scalar::try_from(record_chunk) {
371            Ok(scalar) => {
372                scalars.push(scalar);
373            }
374            _ => {
375                return false;
376            }
377        }
378    }
379
380    // Number of scalars for KZG must be a power of two elements
381    scalars.resize(scalars.capacity(), Scalar::default());
382
383    let polynomial = match kzg.poly(&scalars) {
384        Ok(polynomial) => polynomial,
385        _ => {
386            return false;
387        }
388    };
389
390    if kzg
391        .commit(&polynomial)
392        .map(|commitment| commitment.to_bytes())
393        .as_ref()
394        != Ok(commitment)
395    {
396        return false;
397    }
398
399    let Ok(segment_commitment) = Commitment::try_from(segment_commitment) else {
400        return false;
401    };
402
403    let commitment_hash = Scalar::try_from(blake3_254_hash_to_scalar(commitment.as_ref()))
404        .expect("Create correctly by dedicated hash function; qed");
405
406    kzg.verify(
407        &segment_commitment,
408        ArchivedHistorySegment::NUM_PIECES,
409        position,
410        &commitment_hash,
411        &witness,
412    )
413}
414
415/// Validate witness for record commitment hash produced by archiver
416#[cfg(feature = "kzg")]
417pub fn is_record_commitment_hash_valid(
418    kzg: &Kzg,
419    record_commitment_hash: &Scalar,
420    commitment: &SegmentCommitment,
421    witness: &RecordWitness,
422    position: u32,
423) -> bool {
424    let Ok(commitment) = Commitment::try_from(commitment) else {
425        return false;
426    };
427    let Ok(witness) = Witness::try_from(witness) else {
428        return false;
429    };
430
431    kzg.verify(
432        &commitment,
433        ArchivedHistorySegment::NUM_PIECES,
434        position,
435        record_commitment_hash,
436        &witness,
437    )
438}
439
440/// Derive proof of time entropy from chunk and proof of time for injection purposes.
441#[inline]
442pub fn derive_pot_entropy(chunk: &ScalarBytes, proof_of_time: PotOutput) -> Blake3Hash {
443    blake3_hash_list(&[chunk.as_ref(), proof_of_time.as_ref()])
444}
445
446/// Derives next solution range based on the total era slots and slot probability
447pub fn derive_next_solution_range(
448    start_slot: SlotNumber,
449    current_slot: SlotNumber,
450    slot_probability: (u64, u64),
451    current_solution_range: SolutionRange,
452    era_duration: BlockNumber,
453) -> u64 {
454    // calculate total slots within this era
455    let era_slot_count = current_slot - start_slot;
456
457    // Now we need to re-calculate solution range. The idea here is to keep block production at
458    // the same pace while space pledged on the network changes. For this we adjust previous
459    // solution range according to actual and expected number of blocks per era.
460
461    // Below is code analogous to the following, but without using floats:
462    // ```rust
463    // let actual_slots_per_block = era_slot_count as f64 / era_duration as f64;
464    // let expected_slots_per_block =
465    //     slot_probability.1 as f64 / slot_probability.0 as f64;
466    // let adjustment_factor =
467    //     (actual_slots_per_block / expected_slots_per_block).clamp(0.25, 4.0);
468    //
469    // next_solution_range =
470    //     (solution_ranges.current as f64 * adjustment_factor).round() as u64;
471    // ```
472    u64::try_from(
473        u128::from(current_solution_range)
474            .saturating_mul(u128::from(era_slot_count))
475            .saturating_mul(u128::from(slot_probability.0))
476            / u128::from(era_duration)
477            / u128::from(slot_probability.1),
478    )
479    .unwrap_or(u64::MAX)
480    .clamp(
481        current_solution_range / 4,
482        current_solution_range.saturating_mul(4),
483    )
484}