1#![forbid(unsafe_code)]
3#![warn(rust_2018_idioms, missing_debug_implementations, missing_docs)]
4#![feature(array_chunks, portable_simd)]
5#![allow(incomplete_features)]
7#![feature(generic_const_exprs)]
10#![cfg_attr(not(feature = "std"), no_std)]
11
12#[cfg(not(feature = "std"))]
13extern crate alloc;
14
15#[cfg(not(feature = "std"))]
16use alloc::string::String;
17#[cfg(all(feature = "kzg", not(feature = "std")))]
18use alloc::vec::Vec;
19use core::mem;
20#[cfg(feature = "kzg")]
21use core::num::NonZeroU64;
22#[cfg(feature = "kzg")]
23use core::simd::Simd;
24use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
25use schnorrkel::SignatureError;
26use schnorrkel::context::SigningContext;
27#[cfg(feature = "kzg")]
28use subspace_core_primitives::hashes::blake3_254_hash_to_scalar;
29use subspace_core_primitives::hashes::{Blake3Hash, blake3_hash_list, blake3_hash_with_key};
30#[cfg(feature = "kzg")]
31use subspace_core_primitives::pieces::{PieceArray, Record, RecordWitness};
32use subspace_core_primitives::pot::PotOutput;
33#[cfg(feature = "kzg")]
34use subspace_core_primitives::sectors::SectorId;
35use subspace_core_primitives::sectors::SectorSlotChallenge;
36#[cfg(feature = "kzg")]
37use subspace_core_primitives::segments::ArchivedHistorySegment;
38use subspace_core_primitives::segments::{HistorySize, SegmentCommitment};
39#[cfg(feature = "kzg")]
40use subspace_core_primitives::solutions::Solution;
41use subspace_core_primitives::solutions::{RewardSignature, SolutionRange};
42use subspace_core_primitives::{BlockForkWeight, BlockNumber, PublicKey, ScalarBytes, SlotNumber};
43#[cfg(feature = "kzg")]
44use subspace_kzg::{Commitment, Kzg, Scalar, Witness};
45#[cfg(feature = "kzg")]
46use subspace_proof_of_space::Table;
47
48#[derive(Debug, Eq, PartialEq, thiserror::Error)]
50pub enum Error {
51 #[error("Piece verification failed")]
53 InvalidPieceOffset {
54 piece_offset: u16,
56 max_pieces_in_sector: u16,
58 },
59 #[error("History size {solution} is in the future, current is {current}")]
61 FutureHistorySize {
62 current: HistorySize,
64 solution: HistorySize,
66 },
67 #[error("Sector expired")]
69 SectorExpired {
70 expiration_history_size: HistorySize,
72 current_history_size: HistorySize,
74 },
75 #[error("Piece verification failed")]
77 InvalidPiece,
78 #[error(
80 "Solution distance {solution_distance} is outside of solution range \
81 {half_solution_range} (half of actual solution range)"
82 )]
83 OutsideSolutionRange {
84 half_solution_range: SolutionRange,
86 solution_distance: SolutionRange,
88 },
89 #[error("Invalid proof of space")]
91 InvalidProofOfSpace,
92 #[error("Invalid audit chunk offset")]
94 InvalidAuditChunkOffset,
95 #[error("Invalid chunk: {0}")]
97 InvalidChunk(String),
98 #[error("Invalid chunk witness")]
100 InvalidChunkWitness,
101 #[error("Invalid history size")]
103 InvalidHistorySize,
104}
105
106pub fn check_reward_signature(
108 hash: &[u8],
109 signature: &RewardSignature,
110 public_key: &PublicKey,
111 reward_signing_context: &SigningContext,
112) -> Result<(), SignatureError> {
113 let public_key = schnorrkel::PublicKey::from_bytes(public_key.as_ref())?;
114 let signature = schnorrkel::Signature::from_bytes(signature.as_ref())?;
115 public_key.verify(reward_signing_context.bytes(hash), &signature)
116}
117
118fn calculate_solution_distance(
121 global_challenge: &Blake3Hash,
122 chunk: &[u8; 32],
123 sector_slot_challenge: &SectorSlotChallenge,
124) -> SolutionRange {
125 let audit_chunk = blake3_hash_with_key(sector_slot_challenge, chunk);
126 let audit_chunk_as_solution_range: SolutionRange = SolutionRange::from_le_bytes(
127 *audit_chunk
128 .array_chunks::<{ mem::size_of::<SolutionRange>() }>()
129 .next()
130 .expect("Solution range is smaller in size than global challenge; qed"),
131 );
132 let global_challenge_as_solution_range: SolutionRange = SolutionRange::from_le_bytes(
133 *global_challenge
134 .array_chunks::<{ mem::size_of::<SolutionRange>() }>()
135 .next()
136 .expect("Solution range is smaller in size than global challenge; qed"),
137 );
138 subspace_core_primitives::solutions::bidirectional_distance(
139 &global_challenge_as_solution_range,
140 &audit_chunk_as_solution_range,
141 )
142}
143
144pub fn is_within_solution_range(
147 global_challenge: &Blake3Hash,
148 chunk: &[u8; 32],
149 sector_slot_challenge: &SectorSlotChallenge,
150 solution_range: SolutionRange,
151) -> Option<SolutionRange> {
152 let solution_distance =
153 calculate_solution_distance(global_challenge, chunk, sector_slot_challenge);
154 (solution_distance <= solution_range / 2).then_some(solution_distance)
155}
156
157#[derive(Debug, Clone, Encode, Decode, MaxEncodedLen)]
159pub struct PieceCheckParams {
160 pub max_pieces_in_sector: u16,
162 pub segment_commitment: SegmentCommitment,
164 pub recent_segments: HistorySize,
166 pub recent_history_fraction: (HistorySize, HistorySize),
168 pub min_sector_lifetime: HistorySize,
170 pub current_history_size: HistorySize,
172 pub sector_expiration_check_segment_commitment: Option<SegmentCommitment>,
174}
175
176#[derive(Debug, Clone, Encode, Decode, MaxEncodedLen)]
178pub struct VerifySolutionParams {
179 pub proof_of_time: PotOutput,
181 pub solution_range: SolutionRange,
183 pub piece_check_params: Option<PieceCheckParams>,
187}
188
189pub fn calculate_block_fork_weight(solution_range: SolutionRange) -> BlockForkWeight {
192 #[cfg(feature = "testing")]
195 let solution_range = if solution_range == SolutionRange::MAX {
196 SolutionRange::MAX - 1
197 } else {
198 solution_range
199 };
200
201 BlockForkWeight::from(SolutionRange::MAX - solution_range)
202}
203
204#[cfg(feature = "kzg")]
207pub fn verify_solution<'a, PosTable, RewardAddress>(
208 solution: &'a Solution<RewardAddress>,
209 slot: SlotNumber,
210 params: &'a VerifySolutionParams,
211 kzg: &'a Kzg,
212) -> Result<SolutionRange, Error>
213where
214 PosTable: Table,
215{
216 let VerifySolutionParams {
217 proof_of_time,
218 solution_range,
219 piece_check_params,
220 } = params;
221
222 let sector_id = SectorId::new(
223 solution.public_key.hash(),
224 solution.sector_index,
225 solution.history_size,
226 );
227
228 let global_randomness = proof_of_time.derive_global_randomness();
229 let global_challenge = global_randomness.derive_global_challenge(slot);
230 let sector_slot_challenge = sector_id.derive_sector_slot_challenge(&global_challenge);
231 let s_bucket_audit_index = sector_slot_challenge.s_bucket_audit_index();
232
233 if !PosTable::is_proof_valid(
235 §or_id.derive_evaluation_seed(solution.piece_offset),
236 s_bucket_audit_index.into(),
237 &solution.proof_of_space,
238 ) {
239 return Err(Error::InvalidProofOfSpace);
240 };
241
242 let masked_chunk =
243 (Simd::from(*solution.chunk) ^ Simd::from(*solution.proof_of_space.hash())).to_array();
244
245 let solution_distance =
246 calculate_solution_distance(&global_challenge, &masked_chunk, §or_slot_challenge);
247
248 if solution_distance > solution_range / 2 {
250 return Err(Error::OutsideSolutionRange {
251 half_solution_range: solution_range / 2,
252 solution_distance,
253 });
254 }
255
256 if !kzg.verify(
258 &Commitment::try_from(solution.record_commitment)
259 .map_err(|_error| Error::InvalidChunkWitness)?,
260 Record::NUM_S_BUCKETS,
261 s_bucket_audit_index.into(),
262 &Scalar::try_from(solution.chunk).map_err(Error::InvalidChunk)?,
263 &Witness::try_from(solution.chunk_witness).map_err(|_error| Error::InvalidChunkWitness)?,
264 ) {
265 return Err(Error::InvalidChunkWitness);
266 }
267
268 if let Some(PieceCheckParams {
269 max_pieces_in_sector,
270 segment_commitment,
271 recent_segments,
272 recent_history_fraction,
273 min_sector_lifetime,
274 current_history_size,
275 sector_expiration_check_segment_commitment,
276 }) = piece_check_params
277 {
278 if NonZeroU64::from(solution.history_size).get()
282 > NonZeroU64::from(*current_history_size).get() + 1
283 {
284 return Err(Error::FutureHistorySize {
285 current: *current_history_size,
286 solution: solution.history_size,
287 });
288 }
289
290 if u16::from(solution.piece_offset) >= *max_pieces_in_sector {
291 return Err(Error::InvalidPieceOffset {
292 piece_offset: u16::from(solution.piece_offset),
293 max_pieces_in_sector: *max_pieces_in_sector,
294 });
295 }
296
297 if let Some(sector_expiration_check_segment_commitment) =
298 sector_expiration_check_segment_commitment
299 {
300 let expiration_history_size = match sector_id.derive_expiration_history_size(
301 solution.history_size,
302 sector_expiration_check_segment_commitment,
303 *min_sector_lifetime,
304 ) {
305 Some(expiration_history_size) => expiration_history_size,
306 None => {
307 return Err(Error::InvalidHistorySize);
308 }
309 };
310
311 if expiration_history_size <= *current_history_size {
312 return Err(Error::SectorExpired {
313 expiration_history_size,
314 current_history_size: *current_history_size,
315 });
316 }
317 }
318
319 let position = sector_id
320 .derive_piece_index(
321 solution.piece_offset,
322 solution.history_size,
323 *max_pieces_in_sector,
324 *recent_segments,
325 *recent_history_fraction,
326 )
327 .position();
328
329 if !is_record_commitment_hash_valid(
331 kzg,
332 &Scalar::try_from(blake3_254_hash_to_scalar(
333 solution.record_commitment.as_ref(),
334 ))
335 .expect("Create correctly by dedicated hash function; qed"),
336 segment_commitment,
337 &solution.record_witness,
338 position,
339 ) {
340 return Err(Error::InvalidPiece);
341 }
342 }
343
344 Ok(solution_distance)
345}
346
347#[cfg(feature = "kzg")]
349pub fn is_piece_valid(
350 kzg: &Kzg,
351 piece: &PieceArray,
352 segment_commitment: &SegmentCommitment,
353 position: u32,
354) -> bool {
355 let (record, commitment, witness) = piece.split();
356 let witness = match Witness::try_from_bytes(witness) {
357 Ok(witness) => witness,
358 _ => {
359 return false;
360 }
361 };
362
363 let mut scalars = Vec::with_capacity(record.len().next_power_of_two());
364
365 for record_chunk in record.iter() {
366 match Scalar::try_from(record_chunk) {
367 Ok(scalar) => {
368 scalars.push(scalar);
369 }
370 _ => {
371 return false;
372 }
373 }
374 }
375
376 scalars.resize(scalars.capacity(), Scalar::default());
378
379 let polynomial = match kzg.poly(&scalars) {
380 Ok(polynomial) => polynomial,
381 _ => {
382 return false;
383 }
384 };
385
386 if kzg
387 .commit(&polynomial)
388 .map(|commitment| commitment.to_bytes())
389 .as_ref()
390 != Ok(commitment)
391 {
392 return false;
393 }
394
395 let Ok(segment_commitment) = Commitment::try_from(segment_commitment) else {
396 return false;
397 };
398
399 let commitment_hash = Scalar::try_from(blake3_254_hash_to_scalar(commitment.as_ref()))
400 .expect("Create correctly by dedicated hash function; qed");
401
402 kzg.verify(
403 &segment_commitment,
404 ArchivedHistorySegment::NUM_PIECES,
405 position,
406 &commitment_hash,
407 &witness,
408 )
409}
410
411#[cfg(feature = "kzg")]
413pub fn is_record_commitment_hash_valid(
414 kzg: &Kzg,
415 record_commitment_hash: &Scalar,
416 commitment: &SegmentCommitment,
417 witness: &RecordWitness,
418 position: u32,
419) -> bool {
420 let Ok(commitment) = Commitment::try_from(commitment) else {
421 return false;
422 };
423 let Ok(witness) = Witness::try_from(witness) else {
424 return false;
425 };
426
427 kzg.verify(
428 &commitment,
429 ArchivedHistorySegment::NUM_PIECES,
430 position,
431 record_commitment_hash,
432 &witness,
433 )
434}
435
436#[inline]
438pub fn derive_pot_entropy(chunk: &ScalarBytes, proof_of_time: PotOutput) -> Blake3Hash {
439 blake3_hash_list(&[chunk.as_ref(), proof_of_time.as_ref()])
440}
441
442pub fn derive_next_solution_range(
444 start_slot: SlotNumber,
445 current_slot: SlotNumber,
446 slot_probability: (u64, u64),
447 current_solution_range: SolutionRange,
448 era_duration: BlockNumber,
449) -> u64 {
450 let era_slot_count = current_slot - start_slot;
452
453 u64::try_from(
469 u128::from(current_solution_range)
470 .saturating_mul(u128::from(era_slot_count))
471 .saturating_mul(u128::from(slot_probability.0))
472 / u128::from(era_duration)
473 / u128::from(slot_probability.1),
474 )
475 .unwrap_or(u64::MAX)
476 .clamp(
477 current_solution_range / 4,
478 current_solution_range.saturating_mul(4),
479 )
480}