subspace_verification/
lib.rs

1//! Verification primitives for Subspace.
2#![forbid(unsafe_code)]
3#![warn(rust_2018_idioms, missing_debug_implementations, missing_docs)]
4#![feature(portable_simd)]
5// `generic_const_exprs` is an incomplete feature
6#![allow(incomplete_features)]
7// TODO: This feature is not actually used in this crate, but is added as a workaround for
8//  https://github.com/rust-lang/rust/issues/133199
9#![feature(generic_const_exprs)]
10#![cfg_attr(not(feature = "std"), no_std)]
11
12#[cfg(not(feature = "std"))]
13extern crate alloc;
14
15#[cfg(not(feature = "std"))]
16use alloc::string::String;
17#[cfg(all(feature = "kzg", not(feature = "std")))]
18use alloc::vec::Vec;
19use core::mem;
20#[cfg(feature = "kzg")]
21use core::num::NonZeroU64;
22#[cfg(feature = "kzg")]
23use core::simd::Simd;
24use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
25use schnorrkel::SignatureError;
26use schnorrkel::context::SigningContext;
27#[cfg(feature = "kzg")]
28use subspace_core_primitives::hashes::blake3_254_hash_to_scalar;
29use subspace_core_primitives::hashes::{Blake3Hash, blake3_hash_list, blake3_hash_with_key};
30#[cfg(feature = "kzg")]
31use subspace_core_primitives::pieces::{PieceArray, Record, RecordWitness};
32use subspace_core_primitives::pot::PotOutput;
33#[cfg(feature = "kzg")]
34use subspace_core_primitives::sectors::SectorId;
35use subspace_core_primitives::sectors::SectorSlotChallenge;
36#[cfg(feature = "kzg")]
37use subspace_core_primitives::segments::ArchivedHistorySegment;
38use subspace_core_primitives::segments::{HistorySize, SegmentCommitment};
39#[cfg(feature = "kzg")]
40use subspace_core_primitives::solutions::Solution;
41use subspace_core_primitives::solutions::{RewardSignature, SolutionRange};
42use subspace_core_primitives::{BlockForkWeight, BlockNumber, PublicKey, ScalarBytes, SlotNumber};
43#[cfg(feature = "kzg")]
44use subspace_kzg::{Commitment, Kzg, Scalar, Witness};
45#[cfg(feature = "kzg")]
46use subspace_proof_of_space::Table;
47
48/// Errors encountered by the Subspace consensus primitives.
49#[derive(Debug, Eq, PartialEq, thiserror::Error)]
50pub enum Error {
51    /// Invalid piece offset
52    #[error("Piece verification failed")]
53    InvalidPieceOffset {
54        /// Index of the piece that failed verification
55        piece_offset: u16,
56        /// How many pieces one sector is supposed to contain (max)
57        max_pieces_in_sector: u16,
58    },
59    /// History size is in the future
60    #[error("History size {solution} is in the future, current is {current}")]
61    FutureHistorySize {
62        /// Current history size
63        current: HistorySize,
64        /// History size solution was created for
65        solution: HistorySize,
66    },
67    /// Sector expired
68    #[error("Sector expired")]
69    SectorExpired {
70        /// Expiration history size
71        expiration_history_size: HistorySize,
72        /// Current history size
73        current_history_size: HistorySize,
74    },
75    /// Piece verification failed
76    #[error("Piece verification failed")]
77    InvalidPiece,
78    /// Solution is outside of challenge range
79    #[error(
80        "Solution distance {solution_distance} is outside of solution range \
81        {half_solution_range} (half of actual solution range)"
82    )]
83    OutsideSolutionRange {
84        /// Half of solution range
85        half_solution_range: SolutionRange,
86        /// Solution distance
87        solution_distance: SolutionRange,
88    },
89    /// Invalid proof of space
90    #[error("Invalid proof of space")]
91    InvalidProofOfSpace,
92    /// Invalid audit chunk offset
93    #[error("Invalid audit chunk offset")]
94    InvalidAuditChunkOffset,
95    /// Invalid chunk
96    #[error("Invalid chunk: {0}")]
97    InvalidChunk(String),
98    /// Invalid chunk witness
99    #[error("Invalid chunk witness")]
100    InvalidChunkWitness,
101    /// Invalid history size
102    #[error("Invalid history size")]
103    InvalidHistorySize,
104}
105
106/// Check the reward signature validity.
107pub fn check_reward_signature(
108    hash: &[u8],
109    signature: &RewardSignature,
110    public_key: &PublicKey,
111    reward_signing_context: &SigningContext,
112) -> Result<(), SignatureError> {
113    let public_key = schnorrkel::PublicKey::from_bytes(public_key.as_ref())?;
114    let signature = schnorrkel::Signature::from_bytes(signature.as_ref())?;
115    public_key.verify(reward_signing_context.bytes(hash), &signature)
116}
117
118/// Calculates solution distance for given parameters, is used as a primitive to check whether
119/// solution distance is within solution range (see [`is_within_solution_range()`]).
120fn calculate_solution_distance(
121    global_challenge: &Blake3Hash,
122    chunk: &[u8; 32],
123    sector_slot_challenge: &SectorSlotChallenge,
124) -> SolutionRange {
125    let audit_chunk = blake3_hash_with_key(sector_slot_challenge, chunk);
126    let audit_chunk_as_solution_range: SolutionRange = SolutionRange::from_le_bytes(
127        *audit_chunk
128            .as_chunks::<{ mem::size_of::<SolutionRange>() }>()
129            .0
130            .iter()
131            .next()
132            .expect("Solution range is smaller in size than global challenge; qed"),
133    );
134    let global_challenge_as_solution_range: SolutionRange = SolutionRange::from_le_bytes(
135        *global_challenge
136            .as_chunks::<{ mem::size_of::<SolutionRange>() }>()
137            .0
138            .iter()
139            .next()
140            .expect("Solution range is smaller in size than global challenge; qed"),
141    );
142    subspace_core_primitives::solutions::bidirectional_distance(
143        &global_challenge_as_solution_range,
144        &audit_chunk_as_solution_range,
145    )
146}
147
148/// Returns `Some(solution_distance)` if solution distance is within the solution range for provided
149/// parameters.
150pub fn is_within_solution_range(
151    global_challenge: &Blake3Hash,
152    chunk: &[u8; 32],
153    sector_slot_challenge: &SectorSlotChallenge,
154    solution_range: SolutionRange,
155) -> Option<SolutionRange> {
156    let solution_distance =
157        calculate_solution_distance(global_challenge, chunk, sector_slot_challenge);
158    (solution_distance <= solution_range / 2).then_some(solution_distance)
159}
160
161/// Parameters for checking piece validity
162#[derive(Debug, Clone, Encode, Decode, MaxEncodedLen)]
163pub struct PieceCheckParams {
164    /// How many pieces one sector is supposed to contain (max)
165    pub max_pieces_in_sector: u16,
166    /// Segment commitment of segment to which piece belongs
167    pub segment_commitment: SegmentCommitment,
168    /// Number of latest archived segments that are considered "recent history"
169    pub recent_segments: HistorySize,
170    /// Fraction of pieces from the "recent history" (`recent_segments`) in each sector
171    pub recent_history_fraction: (HistorySize, HistorySize),
172    /// Minimum lifetime of a plotted sector, measured in archived segment
173    pub min_sector_lifetime: HistorySize,
174    /// Current size of the history
175    pub current_history_size: HistorySize,
176    /// Segment commitment at `min_sector_lifetime` from sector creation (if exists)
177    pub sector_expiration_check_segment_commitment: Option<SegmentCommitment>,
178}
179
180/// Parameters for solution verification
181#[derive(Debug, Clone, Encode, Decode, MaxEncodedLen)]
182pub struct VerifySolutionParams {
183    /// Proof of time for which solution is built
184    pub proof_of_time: PotOutput,
185    /// Solution range
186    pub solution_range: SolutionRange,
187    /// Parameters for checking piece validity.
188    ///
189    /// If `None`, piece validity check will be skipped.
190    pub piece_check_params: Option<PieceCheckParams>,
191}
192
193/// Calculate the block's contribution to the fork weight, which is derived from the provided
194/// solution range.
195pub fn calculate_block_fork_weight(solution_range: SolutionRange) -> BlockForkWeight {
196    // Work around the test runtime accepting all solutions, which causes blocks to have zero
197    // fork weight. This makes each node keep its own blocks, and never reorg to a common chain.
198    #[cfg(feature = "testing")]
199    let solution_range = if solution_range == SolutionRange::MAX {
200        SolutionRange::MAX - 1
201    } else {
202        solution_range
203    };
204
205    BlockForkWeight::from(SolutionRange::MAX - solution_range)
206}
207
208/// Verify whether solution is valid, returns solution distance that is `<= solution_range/2` on
209/// success.
210#[cfg(feature = "kzg")]
211pub fn verify_solution<'a, PosTable, RewardAddress>(
212    solution: &'a Solution<RewardAddress>,
213    slot: SlotNumber,
214    params: &'a VerifySolutionParams,
215    kzg: &'a Kzg,
216) -> Result<SolutionRange, Error>
217where
218    PosTable: Table,
219{
220    use subspace_core_primitives::solutions::SolutionPotVerifier;
221
222    let VerifySolutionParams {
223        proof_of_time,
224        solution_range,
225        piece_check_params,
226    } = params;
227
228    let sector_id = SectorId::new(
229        solution.public_key.hash(),
230        solution.sector_index,
231        solution.history_size,
232    );
233
234    let global_randomness = proof_of_time.derive_global_randomness();
235    let global_challenge = global_randomness.derive_global_challenge(slot);
236    let sector_slot_challenge = sector_id.derive_sector_slot_challenge(&global_challenge);
237    let s_bucket_audit_index = sector_slot_challenge.s_bucket_audit_index();
238
239    // Check that proof of space is valid
240    if !<PosTable as SolutionPotVerifier>::is_proof_valid(
241        &sector_id.derive_evaluation_seed(solution.piece_offset),
242        s_bucket_audit_index.into(),
243        &solution.proof_of_space,
244    ) {
245        return Err(Error::InvalidProofOfSpace);
246    };
247
248    let masked_chunk =
249        (Simd::from(*solution.chunk) ^ Simd::from(*solution.proof_of_space.hash())).to_array();
250
251    let solution_distance =
252        calculate_solution_distance(&global_challenge, &masked_chunk, &sector_slot_challenge);
253
254    // Check that solution is within solution range
255    if solution_distance > solution_range / 2 {
256        return Err(Error::OutsideSolutionRange {
257            half_solution_range: solution_range / 2,
258            solution_distance,
259        });
260    }
261
262    // Check that chunk belongs to the record
263    if !kzg.verify(
264        &Commitment::try_from(solution.record_commitment)
265            .map_err(|_error| Error::InvalidChunkWitness)?,
266        Record::NUM_S_BUCKETS,
267        s_bucket_audit_index.into(),
268        &Scalar::try_from(solution.chunk).map_err(Error::InvalidChunk)?,
269        &Witness::try_from(solution.chunk_witness).map_err(|_error| Error::InvalidChunkWitness)?,
270    ) {
271        return Err(Error::InvalidChunkWitness);
272    }
273
274    if let Some(PieceCheckParams {
275        max_pieces_in_sector,
276        segment_commitment,
277        recent_segments,
278        recent_history_fraction,
279        min_sector_lifetime,
280        current_history_size,
281        sector_expiration_check_segment_commitment,
282    }) = piece_check_params
283    {
284        // `+1` here is due to the possibility of plotting a sector that was just archived and whose
285        // segment root is just being included in this very block we're checking (parent block,
286        // which is where `current_history_size` comes from doesn't know about this block yet)
287        if NonZeroU64::from(solution.history_size).get()
288            > NonZeroU64::from(*current_history_size).get() + 1
289        {
290            return Err(Error::FutureHistorySize {
291                current: *current_history_size,
292                solution: solution.history_size,
293            });
294        }
295
296        if u16::from(solution.piece_offset) >= *max_pieces_in_sector {
297            return Err(Error::InvalidPieceOffset {
298                piece_offset: u16::from(solution.piece_offset),
299                max_pieces_in_sector: *max_pieces_in_sector,
300            });
301        }
302
303        if let Some(sector_expiration_check_segment_commitment) =
304            sector_expiration_check_segment_commitment
305        {
306            let expiration_history_size = match sector_id.derive_expiration_history_size(
307                solution.history_size,
308                sector_expiration_check_segment_commitment,
309                *min_sector_lifetime,
310            ) {
311                Some(expiration_history_size) => expiration_history_size,
312                None => {
313                    return Err(Error::InvalidHistorySize);
314                }
315            };
316
317            if expiration_history_size <= *current_history_size {
318                return Err(Error::SectorExpired {
319                    expiration_history_size,
320                    current_history_size: *current_history_size,
321                });
322            }
323        }
324
325        let position = sector_id
326            .derive_piece_index(
327                solution.piece_offset,
328                solution.history_size,
329                *max_pieces_in_sector,
330                *recent_segments,
331                *recent_history_fraction,
332            )
333            .position();
334
335        // Check that piece is part of the blockchain history
336        if !is_record_commitment_hash_valid(
337            kzg,
338            &Scalar::try_from(blake3_254_hash_to_scalar(
339                solution.record_commitment.as_ref(),
340            ))
341            .expect("Create correctly by dedicated hash function; qed"),
342            segment_commitment,
343            &solution.record_witness,
344            position,
345        ) {
346            return Err(Error::InvalidPiece);
347        }
348    }
349
350    Ok(solution_distance)
351}
352
353/// Validate witness embedded within a piece produced by archiver
354#[cfg(feature = "kzg")]
355pub fn is_piece_valid(
356    kzg: &Kzg,
357    piece: &PieceArray,
358    segment_commitment: &SegmentCommitment,
359    position: u32,
360) -> bool {
361    let (record, commitment, witness) = piece.split();
362    let witness = match Witness::try_from_bytes(witness) {
363        Ok(witness) => witness,
364        _ => {
365            return false;
366        }
367    };
368
369    let mut scalars = Vec::with_capacity(record.len().next_power_of_two());
370
371    for record_chunk in record.iter() {
372        match Scalar::try_from(record_chunk) {
373            Ok(scalar) => {
374                scalars.push(scalar);
375            }
376            _ => {
377                return false;
378            }
379        }
380    }
381
382    // Number of scalars for KZG must be a power of two elements
383    scalars.resize(scalars.capacity(), Scalar::default());
384
385    let polynomial = match kzg.poly(&scalars) {
386        Ok(polynomial) => polynomial,
387        _ => {
388            return false;
389        }
390    };
391
392    if kzg
393        .commit(&polynomial)
394        .map(|commitment| commitment.to_bytes())
395        .as_ref()
396        != Ok(commitment)
397    {
398        return false;
399    }
400
401    let Ok(segment_commitment) = Commitment::try_from(segment_commitment) else {
402        return false;
403    };
404
405    let commitment_hash = Scalar::try_from(blake3_254_hash_to_scalar(commitment.as_ref()))
406        .expect("Create correctly by dedicated hash function; qed");
407
408    kzg.verify(
409        &segment_commitment,
410        ArchivedHistorySegment::NUM_PIECES,
411        position,
412        &commitment_hash,
413        &witness,
414    )
415}
416
417/// Validate witness for record commitment hash produced by archiver
418#[cfg(feature = "kzg")]
419pub fn is_record_commitment_hash_valid(
420    kzg: &Kzg,
421    record_commitment_hash: &Scalar,
422    commitment: &SegmentCommitment,
423    witness: &RecordWitness,
424    position: u32,
425) -> bool {
426    let Ok(commitment) = Commitment::try_from(commitment) else {
427        return false;
428    };
429    let Ok(witness) = Witness::try_from(witness) else {
430        return false;
431    };
432
433    kzg.verify(
434        &commitment,
435        ArchivedHistorySegment::NUM_PIECES,
436        position,
437        record_commitment_hash,
438        &witness,
439    )
440}
441
442/// Derive proof of time entropy from chunk and proof of time for injection purposes.
443#[inline]
444pub fn derive_pot_entropy(chunk: &ScalarBytes, proof_of_time: PotOutput) -> Blake3Hash {
445    blake3_hash_list(&[chunk.as_ref(), proof_of_time.as_ref()])
446}
447
448/// Derives next solution range based on the total era slots and slot probability
449pub fn derive_next_solution_range(
450    start_slot: SlotNumber,
451    current_slot: SlotNumber,
452    slot_probability: (u64, u64),
453    current_solution_range: SolutionRange,
454    era_duration: BlockNumber,
455) -> u64 {
456    // calculate total slots within this era
457    let era_slot_count = current_slot - start_slot;
458
459    // Now we need to re-calculate solution range. The idea here is to keep block production at
460    // the same pace while space pledged on the network changes. For this we adjust previous
461    // solution range according to actual and expected number of blocks per era.
462
463    // Below is code analogous to the following, but without using floats:
464    // ```rust
465    // let actual_slots_per_block = era_slot_count as f64 / era_duration as f64;
466    // let expected_slots_per_block =
467    //     slot_probability.1 as f64 / slot_probability.0 as f64;
468    // let adjustment_factor =
469    //     (actual_slots_per_block / expected_slots_per_block).clamp(0.25, 4.0);
470    //
471    // next_solution_range =
472    //     (solution_ranges.current as f64 * adjustment_factor).round() as u64;
473    // ```
474    u64::try_from(
475        u128::from(current_solution_range)
476            .saturating_mul(u128::from(era_slot_count))
477            .saturating_mul(u128::from(slot_probability.0))
478            / u128::from(era_duration)
479            / u128::from(slot_probability.1),
480    )
481    .unwrap_or(u64::MAX)
482    .clamp(
483        current_solution_range / 4,
484        current_solution_range.saturating_mul(4),
485    )
486}