subspace_verification/
lib.rs

1//! Verification primitives for Subspace.
2#![forbid(unsafe_code)]
3#![warn(rust_2018_idioms, missing_debug_implementations, missing_docs)]
4#![feature(array_chunks, portable_simd)]
5// `generic_const_exprs` is an incomplete feature
6#![allow(incomplete_features)]
7// TODO: This feature is not actually used in this crate, but is added as a workaround for
8//  https://github.com/rust-lang/rust/issues/133199
9#![feature(generic_const_exprs)]
10#![cfg_attr(not(feature = "std"), no_std)]
11
12#[cfg(not(feature = "std"))]
13extern crate alloc;
14
15#[cfg(not(feature = "std"))]
16use alloc::string::String;
17#[cfg(all(feature = "kzg", not(feature = "std")))]
18use alloc::vec::Vec;
19use core::mem;
20#[cfg(feature = "kzg")]
21use core::simd::Simd;
22use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
23use schnorrkel::context::SigningContext;
24use schnorrkel::SignatureError;
25#[cfg(feature = "kzg")]
26use subspace_core_primitives::hashes::blake3_254_hash_to_scalar;
27use subspace_core_primitives::hashes::{blake3_hash_list, blake3_hash_with_key, Blake3Hash};
28#[cfg(feature = "kzg")]
29use subspace_core_primitives::pieces::{PieceArray, Record, RecordWitness};
30use subspace_core_primitives::pot::PotOutput;
31#[cfg(feature = "kzg")]
32use subspace_core_primitives::sectors::SectorId;
33use subspace_core_primitives::sectors::SectorSlotChallenge;
34#[cfg(feature = "kzg")]
35use subspace_core_primitives::segments::ArchivedHistorySegment;
36use subspace_core_primitives::segments::{HistorySize, SegmentCommitment};
37#[cfg(feature = "kzg")]
38use subspace_core_primitives::solutions::Solution;
39use subspace_core_primitives::solutions::{RewardSignature, SolutionRange};
40use subspace_core_primitives::{BlockNumber, BlockWeight, PublicKey, ScalarBytes, SlotNumber};
41#[cfg(feature = "kzg")]
42use subspace_kzg::{Commitment, Kzg, Scalar, Witness};
43#[cfg(feature = "kzg")]
44use subspace_proof_of_space::Table;
45
46/// Errors encountered by the Subspace consensus primitives.
47#[derive(Debug, Eq, PartialEq, thiserror::Error)]
48pub enum Error {
49    /// Invalid piece offset
50    #[error("Piece verification failed")]
51    InvalidPieceOffset {
52        /// Index of the piece that failed verification
53        piece_offset: u16,
54        /// How many pieces one sector is supposed to contain (max)
55        max_pieces_in_sector: u16,
56    },
57    /// Sector expired
58    #[error("Sector expired")]
59    SectorExpired {
60        /// Expiration history size
61        expiration_history_size: HistorySize,
62        /// Current history size
63        current_history_size: HistorySize,
64    },
65    /// Piece verification failed
66    #[error("Piece verification failed")]
67    InvalidPiece,
68    /// Solution is outside of challenge range
69    #[error(
70        "Solution distance {solution_distance} is outside of solution range \
71        {half_solution_range} (half of actual solution range)"
72    )]
73    OutsideSolutionRange {
74        /// Half of solution range
75        half_solution_range: SolutionRange,
76        /// Solution distance
77        solution_distance: SolutionRange,
78    },
79    /// Invalid proof of space
80    #[error("Invalid proof of space")]
81    InvalidProofOfSpace,
82    /// Invalid audit chunk offset
83    #[error("Invalid audit chunk offset")]
84    InvalidAuditChunkOffset,
85    /// Invalid chunk
86    #[error("Invalid chunk: {0}")]
87    InvalidChunk(String),
88    /// Invalid chunk witness
89    #[error("Invalid chunk witness")]
90    InvalidChunkWitness,
91    /// Invalid history size
92    #[error("Invalid history size")]
93    InvalidHistorySize,
94}
95
96/// Check the reward signature validity.
97pub fn check_reward_signature(
98    hash: &[u8],
99    signature: &RewardSignature,
100    public_key: &PublicKey,
101    reward_signing_context: &SigningContext,
102) -> Result<(), SignatureError> {
103    let public_key = schnorrkel::PublicKey::from_bytes(public_key.as_ref())?;
104    let signature = schnorrkel::Signature::from_bytes(signature.as_ref())?;
105    public_key.verify(reward_signing_context.bytes(hash), &signature)
106}
107
108/// Calculates solution distance for given parameters, is used as a primitive to check whether
109/// solution distance is within solution range (see [`is_within_solution_range()`]).
110fn calculate_solution_distance(
111    global_challenge: &Blake3Hash,
112    chunk: &[u8; 32],
113    sector_slot_challenge: &SectorSlotChallenge,
114) -> SolutionRange {
115    let audit_chunk = blake3_hash_with_key(sector_slot_challenge, chunk);
116    let audit_chunk_as_solution_range: SolutionRange = SolutionRange::from_le_bytes(
117        *audit_chunk
118            .array_chunks::<{ mem::size_of::<SolutionRange>() }>()
119            .next()
120            .expect("Solution range is smaller in size than global challenge; qed"),
121    );
122    let global_challenge_as_solution_range: SolutionRange = SolutionRange::from_le_bytes(
123        *global_challenge
124            .array_chunks::<{ mem::size_of::<SolutionRange>() }>()
125            .next()
126            .expect("Solution range is smaller in size than global challenge; qed"),
127    );
128    subspace_core_primitives::solutions::bidirectional_distance(
129        &global_challenge_as_solution_range,
130        &audit_chunk_as_solution_range,
131    )
132}
133
134/// Returns `Some(solution_distance)` if solution distance is within the solution range for provided
135/// parameters.
136pub fn is_within_solution_range(
137    global_challenge: &Blake3Hash,
138    chunk: &[u8; 32],
139    sector_slot_challenge: &SectorSlotChallenge,
140    solution_range: SolutionRange,
141) -> Option<SolutionRange> {
142    let solution_distance =
143        calculate_solution_distance(global_challenge, chunk, sector_slot_challenge);
144    (solution_distance <= solution_range / 2).then_some(solution_distance)
145}
146
147/// Parameters for checking piece validity
148#[derive(Debug, Clone, Encode, Decode, MaxEncodedLen)]
149pub struct PieceCheckParams {
150    /// How many pieces one sector is supposed to contain (max)
151    pub max_pieces_in_sector: u16,
152    /// Segment commitment of segment to which piece belongs
153    pub segment_commitment: SegmentCommitment,
154    /// Number of latest archived segments that are considered "recent history"
155    pub recent_segments: HistorySize,
156    /// Fraction of pieces from the "recent history" (`recent_segments`) in each sector
157    pub recent_history_fraction: (HistorySize, HistorySize),
158    /// Minimum lifetime of a plotted sector, measured in archived segment
159    pub min_sector_lifetime: HistorySize,
160    /// Current size of the history
161    pub current_history_size: HistorySize,
162    /// Segment commitment at `min_sector_lifetime` from sector creation (if exists)
163    pub sector_expiration_check_segment_commitment: Option<SegmentCommitment>,
164}
165
166/// Parameters for solution verification
167#[derive(Debug, Clone, Encode, Decode, MaxEncodedLen)]
168pub struct VerifySolutionParams {
169    /// Proof of time for which solution is built
170    pub proof_of_time: PotOutput,
171    /// Solution range
172    pub solution_range: SolutionRange,
173    /// Parameters for checking piece validity.
174    ///
175    /// If `None`, piece validity check will be skipped.
176    pub piece_check_params: Option<PieceCheckParams>,
177}
178
179/// Calculate weight derived from provided solution range
180pub fn calculate_block_weight(solution_range: SolutionRange) -> BlockWeight {
181    BlockWeight::from(SolutionRange::MAX - solution_range)
182}
183
184/// Verify whether solution is valid, returns solution distance that is `<= solution_range/2` on
185/// success.
186#[cfg(feature = "kzg")]
187pub fn verify_solution<'a, PosTable, RewardAddress>(
188    solution: &'a Solution<RewardAddress>,
189    slot: SlotNumber,
190    params: &'a VerifySolutionParams,
191    kzg: &'a Kzg,
192) -> Result<SolutionRange, Error>
193where
194    PosTable: Table,
195{
196    let VerifySolutionParams {
197        proof_of_time,
198        solution_range,
199        piece_check_params,
200    } = params;
201
202    let sector_id = SectorId::new(
203        solution.public_key.hash(),
204        solution.sector_index,
205        solution.history_size,
206    );
207
208    let global_randomness = proof_of_time.derive_global_randomness();
209    let global_challenge = global_randomness.derive_global_challenge(slot);
210    let sector_slot_challenge = sector_id.derive_sector_slot_challenge(&global_challenge);
211    let s_bucket_audit_index = sector_slot_challenge.s_bucket_audit_index();
212
213    // Check that proof of space is valid
214    if !PosTable::is_proof_valid(
215        &sector_id.derive_evaluation_seed(solution.piece_offset),
216        s_bucket_audit_index.into(),
217        &solution.proof_of_space,
218    ) {
219        return Err(Error::InvalidProofOfSpace);
220    };
221
222    let masked_chunk =
223        (Simd::from(*solution.chunk) ^ Simd::from(*solution.proof_of_space.hash())).to_array();
224
225    let solution_distance =
226        calculate_solution_distance(&global_challenge, &masked_chunk, &sector_slot_challenge);
227
228    // Check that solution is within solution range
229    if solution_distance > solution_range / 2 {
230        return Err(Error::OutsideSolutionRange {
231            half_solution_range: solution_range / 2,
232            solution_distance,
233        });
234    }
235
236    // Check that chunk belongs to the record
237    if !kzg.verify(
238        &Commitment::try_from(solution.record_commitment)
239            .map_err(|_error| Error::InvalidChunkWitness)?,
240        Record::NUM_S_BUCKETS,
241        s_bucket_audit_index.into(),
242        &Scalar::try_from(solution.chunk).map_err(Error::InvalidChunk)?,
243        &Witness::try_from(solution.chunk_witness).map_err(|_error| Error::InvalidChunkWitness)?,
244    ) {
245        return Err(Error::InvalidChunkWitness);
246    }
247
248    if let Some(PieceCheckParams {
249        max_pieces_in_sector,
250        segment_commitment,
251        recent_segments,
252        recent_history_fraction,
253        min_sector_lifetime,
254        current_history_size,
255        sector_expiration_check_segment_commitment,
256    }) = piece_check_params
257    {
258        if u16::from(solution.piece_offset) >= *max_pieces_in_sector {
259            return Err(Error::InvalidPieceOffset {
260                piece_offset: u16::from(solution.piece_offset),
261                max_pieces_in_sector: *max_pieces_in_sector,
262            });
263        }
264        if let Some(sector_expiration_check_segment_commitment) =
265            sector_expiration_check_segment_commitment
266        {
267            let expiration_history_size = match sector_id.derive_expiration_history_size(
268                solution.history_size,
269                sector_expiration_check_segment_commitment,
270                *min_sector_lifetime,
271            ) {
272                Some(expiration_history_size) => expiration_history_size,
273                None => {
274                    return Err(Error::InvalidHistorySize);
275                }
276            };
277
278            if expiration_history_size <= *current_history_size {
279                return Err(Error::SectorExpired {
280                    expiration_history_size,
281                    current_history_size: *current_history_size,
282                });
283            }
284        }
285
286        let position = sector_id
287            .derive_piece_index(
288                solution.piece_offset,
289                solution.history_size,
290                *max_pieces_in_sector,
291                *recent_segments,
292                *recent_history_fraction,
293            )
294            .position();
295
296        // Check that piece is part of the blockchain history
297        if !is_record_commitment_hash_valid(
298            kzg,
299            &Scalar::try_from(blake3_254_hash_to_scalar(
300                solution.record_commitment.as_ref(),
301            ))
302            .expect("Create correctly by dedicated hash function; qed"),
303            segment_commitment,
304            &solution.record_witness,
305            position,
306        ) {
307            return Err(Error::InvalidPiece);
308        }
309    }
310
311    Ok(solution_distance)
312}
313
314/// Validate witness embedded within a piece produced by archiver
315#[cfg(feature = "kzg")]
316pub fn is_piece_valid(
317    kzg: &Kzg,
318    piece: &PieceArray,
319    segment_commitment: &SegmentCommitment,
320    position: u32,
321) -> bool {
322    let (record, commitment, witness) = piece.split();
323    let witness = match Witness::try_from_bytes(witness) {
324        Ok(witness) => witness,
325        _ => {
326            return false;
327        }
328    };
329
330    let mut scalars = Vec::with_capacity(record.len().next_power_of_two());
331
332    for record_chunk in record.iter() {
333        match Scalar::try_from(record_chunk) {
334            Ok(scalar) => {
335                scalars.push(scalar);
336            }
337            _ => {
338                return false;
339            }
340        }
341    }
342
343    // Number of scalars for KZG must be a power of two elements
344    scalars.resize(scalars.capacity(), Scalar::default());
345
346    let polynomial = match kzg.poly(&scalars) {
347        Ok(polynomial) => polynomial,
348        _ => {
349            return false;
350        }
351    };
352
353    if kzg
354        .commit(&polynomial)
355        .map(|commitment| commitment.to_bytes())
356        .as_ref()
357        != Ok(commitment)
358    {
359        return false;
360    }
361
362    let Ok(segment_commitment) = Commitment::try_from(segment_commitment) else {
363        return false;
364    };
365
366    let commitment_hash = Scalar::try_from(blake3_254_hash_to_scalar(commitment.as_ref()))
367        .expect("Create correctly by dedicated hash function; qed");
368
369    kzg.verify(
370        &segment_commitment,
371        ArchivedHistorySegment::NUM_PIECES,
372        position,
373        &commitment_hash,
374        &witness,
375    )
376}
377
378/// Validate witness for record commitment hash produced by archiver
379#[cfg(feature = "kzg")]
380pub fn is_record_commitment_hash_valid(
381    kzg: &Kzg,
382    record_commitment_hash: &Scalar,
383    commitment: &SegmentCommitment,
384    witness: &RecordWitness,
385    position: u32,
386) -> bool {
387    let Ok(commitment) = Commitment::try_from(commitment) else {
388        return false;
389    };
390    let Ok(witness) = Witness::try_from(witness) else {
391        return false;
392    };
393
394    kzg.verify(
395        &commitment,
396        ArchivedHistorySegment::NUM_PIECES,
397        position,
398        record_commitment_hash,
399        &witness,
400    )
401}
402
403/// Derive proof of time entropy from chunk and proof of time for injection purposes.
404#[inline]
405pub fn derive_pot_entropy(chunk: &ScalarBytes, proof_of_time: PotOutput) -> Blake3Hash {
406    blake3_hash_list(&[chunk.as_ref(), proof_of_time.as_ref()])
407}
408
409/// Derives next solution range based on the total era slots and slot probability
410pub fn derive_next_solution_range(
411    start_slot: SlotNumber,
412    current_slot: SlotNumber,
413    slot_probability: (u64, u64),
414    current_solution_range: SolutionRange,
415    era_duration: BlockNumber,
416) -> u64 {
417    // calculate total slots within this era
418    let era_slot_count = current_slot - start_slot;
419
420    // Now we need to re-calculate solution range. The idea here is to keep block production at
421    // the same pace while space pledged on the network changes. For this we adjust previous
422    // solution range according to actual and expected number of blocks per era.
423
424    // Below is code analogous to the following, but without using floats:
425    // ```rust
426    // let actual_slots_per_block = era_slot_count as f64 / era_duration as f64;
427    // let expected_slots_per_block =
428    //     slot_probability.1 as f64 / slot_probability.0 as f64;
429    // let adjustment_factor =
430    //     (actual_slots_per_block / expected_slots_per_block).clamp(0.25, 4.0);
431    //
432    // next_solution_range =
433    //     (solution_ranges.current as f64 * adjustment_factor).round() as u64;
434    // ```
435    u64::try_from(
436        u128::from(current_solution_range)
437            .saturating_mul(u128::from(era_slot_count))
438            .saturating_mul(u128::from(slot_probability.0))
439            / u128::from(era_duration)
440            / u128::from(slot_probability.1),
441    )
442    .unwrap_or(u64::MAX)
443    .clamp(
444        current_solution_range / 4,
445        current_solution_range.saturating_mul(4),
446    )
447}