subspace_verification/
lib.rs

1//! Verification primitives for Subspace.
2#![forbid(unsafe_code)]
3#![warn(rust_2018_idioms, missing_debug_implementations, missing_docs)]
4#![feature(array_chunks, portable_simd)]
5// `generic_const_exprs` is an incomplete feature
6#![allow(incomplete_features)]
7// TODO: This feature is not actually used in this crate, but is added as a workaround for
8//  https://github.com/rust-lang/rust/issues/133199
9#![feature(generic_const_exprs)]
10#![cfg_attr(not(feature = "std"), no_std)]
11
12#[cfg(not(feature = "std"))]
13extern crate alloc;
14
15#[cfg(not(feature = "std"))]
16use alloc::string::String;
17#[cfg(all(feature = "kzg", not(feature = "std")))]
18use alloc::vec::Vec;
19use core::mem;
20#[cfg(feature = "kzg")]
21use core::num::NonZeroU64;
22#[cfg(feature = "kzg")]
23use core::simd::Simd;
24use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
25use schnorrkel::SignatureError;
26use schnorrkel::context::SigningContext;
27#[cfg(feature = "kzg")]
28use subspace_core_primitives::hashes::blake3_254_hash_to_scalar;
29use subspace_core_primitives::hashes::{Blake3Hash, blake3_hash_list, blake3_hash_with_key};
30#[cfg(feature = "kzg")]
31use subspace_core_primitives::pieces::{PieceArray, Record, RecordWitness};
32use subspace_core_primitives::pot::PotOutput;
33#[cfg(feature = "kzg")]
34use subspace_core_primitives::sectors::SectorId;
35use subspace_core_primitives::sectors::SectorSlotChallenge;
36#[cfg(feature = "kzg")]
37use subspace_core_primitives::segments::ArchivedHistorySegment;
38use subspace_core_primitives::segments::{HistorySize, SegmentCommitment};
39#[cfg(feature = "kzg")]
40use subspace_core_primitives::solutions::Solution;
41use subspace_core_primitives::solutions::{RewardSignature, SolutionRange};
42use subspace_core_primitives::{BlockNumber, BlockWeight, PublicKey, ScalarBytes, SlotNumber};
43#[cfg(feature = "kzg")]
44use subspace_kzg::{Commitment, Kzg, Scalar, Witness};
45#[cfg(feature = "kzg")]
46use subspace_proof_of_space::Table;
47
48/// Errors encountered by the Subspace consensus primitives.
49#[derive(Debug, Eq, PartialEq, thiserror::Error)]
50pub enum Error {
51    /// Invalid piece offset
52    #[error("Piece verification failed")]
53    InvalidPieceOffset {
54        /// Index of the piece that failed verification
55        piece_offset: u16,
56        /// How many pieces one sector is supposed to contain (max)
57        max_pieces_in_sector: u16,
58    },
59    /// History size is in the future
60    #[error("History size {solution} is in the future, current is {current}")]
61    FutureHistorySize {
62        /// Current history size
63        current: HistorySize,
64        /// History size solution was created for
65        solution: HistorySize,
66    },
67    /// Sector expired
68    #[error("Sector expired")]
69    SectorExpired {
70        /// Expiration history size
71        expiration_history_size: HistorySize,
72        /// Current history size
73        current_history_size: HistorySize,
74    },
75    /// Piece verification failed
76    #[error("Piece verification failed")]
77    InvalidPiece,
78    /// Solution is outside of challenge range
79    #[error(
80        "Solution distance {solution_distance} is outside of solution range \
81        {half_solution_range} (half of actual solution range)"
82    )]
83    OutsideSolutionRange {
84        /// Half of solution range
85        half_solution_range: SolutionRange,
86        /// Solution distance
87        solution_distance: SolutionRange,
88    },
89    /// Invalid proof of space
90    #[error("Invalid proof of space")]
91    InvalidProofOfSpace,
92    /// Invalid audit chunk offset
93    #[error("Invalid audit chunk offset")]
94    InvalidAuditChunkOffset,
95    /// Invalid chunk
96    #[error("Invalid chunk: {0}")]
97    InvalidChunk(String),
98    /// Invalid chunk witness
99    #[error("Invalid chunk witness")]
100    InvalidChunkWitness,
101    /// Invalid history size
102    #[error("Invalid history size")]
103    InvalidHistorySize,
104}
105
106/// Check the reward signature validity.
107pub fn check_reward_signature(
108    hash: &[u8],
109    signature: &RewardSignature,
110    public_key: &PublicKey,
111    reward_signing_context: &SigningContext,
112) -> Result<(), SignatureError> {
113    let public_key = schnorrkel::PublicKey::from_bytes(public_key.as_ref())?;
114    let signature = schnorrkel::Signature::from_bytes(signature.as_ref())?;
115    public_key.verify(reward_signing_context.bytes(hash), &signature)
116}
117
118/// Calculates solution distance for given parameters, is used as a primitive to check whether
119/// solution distance is within solution range (see [`is_within_solution_range()`]).
120fn calculate_solution_distance(
121    global_challenge: &Blake3Hash,
122    chunk: &[u8; 32],
123    sector_slot_challenge: &SectorSlotChallenge,
124) -> SolutionRange {
125    let audit_chunk = blake3_hash_with_key(sector_slot_challenge, chunk);
126    let audit_chunk_as_solution_range: SolutionRange = SolutionRange::from_le_bytes(
127        *audit_chunk
128            .array_chunks::<{ mem::size_of::<SolutionRange>() }>()
129            .next()
130            .expect("Solution range is smaller in size than global challenge; qed"),
131    );
132    let global_challenge_as_solution_range: SolutionRange = SolutionRange::from_le_bytes(
133        *global_challenge
134            .array_chunks::<{ mem::size_of::<SolutionRange>() }>()
135            .next()
136            .expect("Solution range is smaller in size than global challenge; qed"),
137    );
138    subspace_core_primitives::solutions::bidirectional_distance(
139        &global_challenge_as_solution_range,
140        &audit_chunk_as_solution_range,
141    )
142}
143
144/// Returns `Some(solution_distance)` if solution distance is within the solution range for provided
145/// parameters.
146pub fn is_within_solution_range(
147    global_challenge: &Blake3Hash,
148    chunk: &[u8; 32],
149    sector_slot_challenge: &SectorSlotChallenge,
150    solution_range: SolutionRange,
151) -> Option<SolutionRange> {
152    let solution_distance =
153        calculate_solution_distance(global_challenge, chunk, sector_slot_challenge);
154    (solution_distance <= solution_range / 2).then_some(solution_distance)
155}
156
157/// Parameters for checking piece validity
158#[derive(Debug, Clone, Encode, Decode, MaxEncodedLen)]
159pub struct PieceCheckParams {
160    /// How many pieces one sector is supposed to contain (max)
161    pub max_pieces_in_sector: u16,
162    /// Segment commitment of segment to which piece belongs
163    pub segment_commitment: SegmentCommitment,
164    /// Number of latest archived segments that are considered "recent history"
165    pub recent_segments: HistorySize,
166    /// Fraction of pieces from the "recent history" (`recent_segments`) in each sector
167    pub recent_history_fraction: (HistorySize, HistorySize),
168    /// Minimum lifetime of a plotted sector, measured in archived segment
169    pub min_sector_lifetime: HistorySize,
170    /// Current size of the history
171    pub current_history_size: HistorySize,
172    /// Segment commitment at `min_sector_lifetime` from sector creation (if exists)
173    pub sector_expiration_check_segment_commitment: Option<SegmentCommitment>,
174}
175
176/// Parameters for solution verification
177#[derive(Debug, Clone, Encode, Decode, MaxEncodedLen)]
178pub struct VerifySolutionParams {
179    /// Proof of time for which solution is built
180    pub proof_of_time: PotOutput,
181    /// Solution range
182    pub solution_range: SolutionRange,
183    /// Parameters for checking piece validity.
184    ///
185    /// If `None`, piece validity check will be skipped.
186    pub piece_check_params: Option<PieceCheckParams>,
187}
188
189/// Calculate weight derived from provided solution range
190pub fn calculate_block_weight(solution_range: SolutionRange) -> BlockWeight {
191    BlockWeight::from(SolutionRange::MAX - solution_range)
192}
193
194/// Verify whether solution is valid, returns solution distance that is `<= solution_range/2` on
195/// success.
196#[cfg(feature = "kzg")]
197pub fn verify_solution<'a, PosTable, RewardAddress>(
198    solution: &'a Solution<RewardAddress>,
199    slot: SlotNumber,
200    params: &'a VerifySolutionParams,
201    kzg: &'a Kzg,
202) -> Result<SolutionRange, Error>
203where
204    PosTable: Table,
205{
206    let VerifySolutionParams {
207        proof_of_time,
208        solution_range,
209        piece_check_params,
210    } = params;
211
212    let sector_id = SectorId::new(
213        solution.public_key.hash(),
214        solution.sector_index,
215        solution.history_size,
216    );
217
218    let global_randomness = proof_of_time.derive_global_randomness();
219    let global_challenge = global_randomness.derive_global_challenge(slot);
220    let sector_slot_challenge = sector_id.derive_sector_slot_challenge(&global_challenge);
221    let s_bucket_audit_index = sector_slot_challenge.s_bucket_audit_index();
222
223    // Check that proof of space is valid
224    if !PosTable::is_proof_valid(
225        &sector_id.derive_evaluation_seed(solution.piece_offset),
226        s_bucket_audit_index.into(),
227        &solution.proof_of_space,
228    ) {
229        return Err(Error::InvalidProofOfSpace);
230    };
231
232    let masked_chunk =
233        (Simd::from(*solution.chunk) ^ Simd::from(*solution.proof_of_space.hash())).to_array();
234
235    let solution_distance =
236        calculate_solution_distance(&global_challenge, &masked_chunk, &sector_slot_challenge);
237
238    // Check that solution is within solution range
239    if solution_distance > solution_range / 2 {
240        return Err(Error::OutsideSolutionRange {
241            half_solution_range: solution_range / 2,
242            solution_distance,
243        });
244    }
245
246    // Check that chunk belongs to the record
247    if !kzg.verify(
248        &Commitment::try_from(solution.record_commitment)
249            .map_err(|_error| Error::InvalidChunkWitness)?,
250        Record::NUM_S_BUCKETS,
251        s_bucket_audit_index.into(),
252        &Scalar::try_from(solution.chunk).map_err(Error::InvalidChunk)?,
253        &Witness::try_from(solution.chunk_witness).map_err(|_error| Error::InvalidChunkWitness)?,
254    ) {
255        return Err(Error::InvalidChunkWitness);
256    }
257
258    if let Some(PieceCheckParams {
259        max_pieces_in_sector,
260        segment_commitment,
261        recent_segments,
262        recent_history_fraction,
263        min_sector_lifetime,
264        current_history_size,
265        sector_expiration_check_segment_commitment,
266    }) = piece_check_params
267    {
268        // `+1` here is due to the possibility of plotting a sector that was just archived and whose
269        // segment root is just being included in this very block we're checking (parent block,
270        // which is where `current_history_size` comes from doesn't know about this block yet)
271        if NonZeroU64::from(solution.history_size).get()
272            > NonZeroU64::from(*current_history_size).get() + 1
273        {
274            return Err(Error::FutureHistorySize {
275                current: *current_history_size,
276                solution: solution.history_size,
277            });
278        }
279
280        if u16::from(solution.piece_offset) >= *max_pieces_in_sector {
281            return Err(Error::InvalidPieceOffset {
282                piece_offset: u16::from(solution.piece_offset),
283                max_pieces_in_sector: *max_pieces_in_sector,
284            });
285        }
286
287        if let Some(sector_expiration_check_segment_commitment) =
288            sector_expiration_check_segment_commitment
289        {
290            let expiration_history_size = match sector_id.derive_expiration_history_size(
291                solution.history_size,
292                sector_expiration_check_segment_commitment,
293                *min_sector_lifetime,
294            ) {
295                Some(expiration_history_size) => expiration_history_size,
296                None => {
297                    return Err(Error::InvalidHistorySize);
298                }
299            };
300
301            if expiration_history_size <= *current_history_size {
302                return Err(Error::SectorExpired {
303                    expiration_history_size,
304                    current_history_size: *current_history_size,
305                });
306            }
307        }
308
309        let position = sector_id
310            .derive_piece_index(
311                solution.piece_offset,
312                solution.history_size,
313                *max_pieces_in_sector,
314                *recent_segments,
315                *recent_history_fraction,
316            )
317            .position();
318
319        // Check that piece is part of the blockchain history
320        if !is_record_commitment_hash_valid(
321            kzg,
322            &Scalar::try_from(blake3_254_hash_to_scalar(
323                solution.record_commitment.as_ref(),
324            ))
325            .expect("Create correctly by dedicated hash function; qed"),
326            segment_commitment,
327            &solution.record_witness,
328            position,
329        ) {
330            return Err(Error::InvalidPiece);
331        }
332    }
333
334    Ok(solution_distance)
335}
336
337/// Validate witness embedded within a piece produced by archiver
338#[cfg(feature = "kzg")]
339pub fn is_piece_valid(
340    kzg: &Kzg,
341    piece: &PieceArray,
342    segment_commitment: &SegmentCommitment,
343    position: u32,
344) -> bool {
345    let (record, commitment, witness) = piece.split();
346    let witness = match Witness::try_from_bytes(witness) {
347        Ok(witness) => witness,
348        _ => {
349            return false;
350        }
351    };
352
353    let mut scalars = Vec::with_capacity(record.len().next_power_of_two());
354
355    for record_chunk in record.iter() {
356        match Scalar::try_from(record_chunk) {
357            Ok(scalar) => {
358                scalars.push(scalar);
359            }
360            _ => {
361                return false;
362            }
363        }
364    }
365
366    // Number of scalars for KZG must be a power of two elements
367    scalars.resize(scalars.capacity(), Scalar::default());
368
369    let polynomial = match kzg.poly(&scalars) {
370        Ok(polynomial) => polynomial,
371        _ => {
372            return false;
373        }
374    };
375
376    if kzg
377        .commit(&polynomial)
378        .map(|commitment| commitment.to_bytes())
379        .as_ref()
380        != Ok(commitment)
381    {
382        return false;
383    }
384
385    let Ok(segment_commitment) = Commitment::try_from(segment_commitment) else {
386        return false;
387    };
388
389    let commitment_hash = Scalar::try_from(blake3_254_hash_to_scalar(commitment.as_ref()))
390        .expect("Create correctly by dedicated hash function; qed");
391
392    kzg.verify(
393        &segment_commitment,
394        ArchivedHistorySegment::NUM_PIECES,
395        position,
396        &commitment_hash,
397        &witness,
398    )
399}
400
401/// Validate witness for record commitment hash produced by archiver
402#[cfg(feature = "kzg")]
403pub fn is_record_commitment_hash_valid(
404    kzg: &Kzg,
405    record_commitment_hash: &Scalar,
406    commitment: &SegmentCommitment,
407    witness: &RecordWitness,
408    position: u32,
409) -> bool {
410    let Ok(commitment) = Commitment::try_from(commitment) else {
411        return false;
412    };
413    let Ok(witness) = Witness::try_from(witness) else {
414        return false;
415    };
416
417    kzg.verify(
418        &commitment,
419        ArchivedHistorySegment::NUM_PIECES,
420        position,
421        record_commitment_hash,
422        &witness,
423    )
424}
425
426/// Derive proof of time entropy from chunk and proof of time for injection purposes.
427#[inline]
428pub fn derive_pot_entropy(chunk: &ScalarBytes, proof_of_time: PotOutput) -> Blake3Hash {
429    blake3_hash_list(&[chunk.as_ref(), proof_of_time.as_ref()])
430}
431
432/// Derives next solution range based on the total era slots and slot probability
433pub fn derive_next_solution_range(
434    start_slot: SlotNumber,
435    current_slot: SlotNumber,
436    slot_probability: (u64, u64),
437    current_solution_range: SolutionRange,
438    era_duration: BlockNumber,
439) -> u64 {
440    // calculate total slots within this era
441    let era_slot_count = current_slot - start_slot;
442
443    // Now we need to re-calculate solution range. The idea here is to keep block production at
444    // the same pace while space pledged on the network changes. For this we adjust previous
445    // solution range according to actual and expected number of blocks per era.
446
447    // Below is code analogous to the following, but without using floats:
448    // ```rust
449    // let actual_slots_per_block = era_slot_count as f64 / era_duration as f64;
450    // let expected_slots_per_block =
451    //     slot_probability.1 as f64 / slot_probability.0 as f64;
452    // let adjustment_factor =
453    //     (actual_slots_per_block / expected_slots_per_block).clamp(0.25, 4.0);
454    //
455    // next_solution_range =
456    //     (solution_ranges.current as f64 * adjustment_factor).round() as u64;
457    // ```
458    u64::try_from(
459        u128::from(current_solution_range)
460            .saturating_mul(u128::from(era_slot_count))
461            .saturating_mul(u128::from(slot_probability.0))
462            / u128::from(era_duration)
463            / u128::from(slot_probability.1),
464    )
465    .unwrap_or(u64::MAX)
466    .clamp(
467        current_solution_range / 4,
468        current_solution_range.saturating_mul(4),
469    )
470}