1#![forbid(unsafe_code)]
3#![warn(rust_2018_idioms, missing_debug_implementations, missing_docs)]
4#![feature(portable_simd)]
5#![allow(incomplete_features)]
7#![feature(generic_const_exprs)]
10#![cfg_attr(not(feature = "std"), no_std)]
11
12#[cfg(not(feature = "std"))]
13extern crate alloc;
14
15#[cfg(not(feature = "std"))]
16use alloc::string::String;
17#[cfg(all(feature = "kzg", not(feature = "std")))]
18use alloc::vec::Vec;
19use core::mem;
20#[cfg(feature = "kzg")]
21use core::num::NonZeroU64;
22#[cfg(feature = "kzg")]
23use core::simd::Simd;
24use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
25use schnorrkel::SignatureError;
26use schnorrkel::context::SigningContext;
27#[cfg(feature = "kzg")]
28use subspace_core_primitives::hashes::blake3_254_hash_to_scalar;
29use subspace_core_primitives::hashes::{Blake3Hash, blake3_hash_list, blake3_hash_with_key};
30#[cfg(feature = "kzg")]
31use subspace_core_primitives::pieces::{PieceArray, Record, RecordWitness};
32use subspace_core_primitives::pot::PotOutput;
33#[cfg(feature = "kzg")]
34use subspace_core_primitives::sectors::SectorId;
35use subspace_core_primitives::sectors::SectorSlotChallenge;
36#[cfg(feature = "kzg")]
37use subspace_core_primitives::segments::ArchivedHistorySegment;
38use subspace_core_primitives::segments::{HistorySize, SegmentCommitment};
39#[cfg(feature = "kzg")]
40use subspace_core_primitives::solutions::Solution;
41use subspace_core_primitives::solutions::{RewardSignature, SolutionRange};
42use subspace_core_primitives::{BlockForkWeight, BlockNumber, PublicKey, ScalarBytes, SlotNumber};
43#[cfg(feature = "kzg")]
44use subspace_kzg::{Commitment, Kzg, Scalar, Witness};
45#[cfg(feature = "kzg")]
46use subspace_proof_of_space::Table;
47
48#[derive(Debug, Eq, PartialEq, thiserror::Error)]
50pub enum Error {
51 #[error("Piece verification failed")]
53 InvalidPieceOffset {
54 piece_offset: u16,
56 max_pieces_in_sector: u16,
58 },
59 #[error("History size {solution} is in the future, current is {current}")]
61 FutureHistorySize {
62 current: HistorySize,
64 solution: HistorySize,
66 },
67 #[error("Sector expired")]
69 SectorExpired {
70 expiration_history_size: HistorySize,
72 current_history_size: HistorySize,
74 },
75 #[error("Piece verification failed")]
77 InvalidPiece,
78 #[error(
80 "Solution distance {solution_distance} is outside of solution range \
81 {half_solution_range} (half of actual solution range)"
82 )]
83 OutsideSolutionRange {
84 half_solution_range: SolutionRange,
86 solution_distance: SolutionRange,
88 },
89 #[error("Invalid proof of space")]
91 InvalidProofOfSpace,
92 #[error("Invalid audit chunk offset")]
94 InvalidAuditChunkOffset,
95 #[error("Invalid chunk: {0}")]
97 InvalidChunk(String),
98 #[error("Invalid chunk witness")]
100 InvalidChunkWitness,
101 #[error("Invalid history size")]
103 InvalidHistorySize,
104}
105
106pub fn check_reward_signature(
108 hash: &[u8],
109 signature: &RewardSignature,
110 public_key: &PublicKey,
111 reward_signing_context: &SigningContext,
112) -> Result<(), SignatureError> {
113 let public_key = schnorrkel::PublicKey::from_bytes(public_key.as_ref())?;
114 let signature = schnorrkel::Signature::from_bytes(signature.as_ref())?;
115 public_key.verify(reward_signing_context.bytes(hash), &signature)
116}
117
118fn calculate_solution_distance(
121 global_challenge: &Blake3Hash,
122 chunk: &[u8; 32],
123 sector_slot_challenge: &SectorSlotChallenge,
124) -> SolutionRange {
125 let audit_chunk = blake3_hash_with_key(sector_slot_challenge, chunk);
126 let audit_chunk_as_solution_range: SolutionRange = SolutionRange::from_le_bytes(
127 *audit_chunk
128 .as_chunks::<{ mem::size_of::<SolutionRange>() }>()
129 .0
130 .iter()
131 .next()
132 .expect("Solution range is smaller in size than global challenge; qed"),
133 );
134 let global_challenge_as_solution_range: SolutionRange = SolutionRange::from_le_bytes(
135 *global_challenge
136 .as_chunks::<{ mem::size_of::<SolutionRange>() }>()
137 .0
138 .iter()
139 .next()
140 .expect("Solution range is smaller in size than global challenge; qed"),
141 );
142 subspace_core_primitives::solutions::bidirectional_distance(
143 &global_challenge_as_solution_range,
144 &audit_chunk_as_solution_range,
145 )
146}
147
148pub fn is_within_solution_range(
151 global_challenge: &Blake3Hash,
152 chunk: &[u8; 32],
153 sector_slot_challenge: &SectorSlotChallenge,
154 solution_range: SolutionRange,
155) -> Option<SolutionRange> {
156 let solution_distance =
157 calculate_solution_distance(global_challenge, chunk, sector_slot_challenge);
158 (solution_distance <= solution_range / 2).then_some(solution_distance)
159}
160
161#[derive(Debug, Clone, Encode, Decode, MaxEncodedLen)]
163pub struct PieceCheckParams {
164 pub max_pieces_in_sector: u16,
166 pub segment_commitment: SegmentCommitment,
168 pub recent_segments: HistorySize,
170 pub recent_history_fraction: (HistorySize, HistorySize),
172 pub min_sector_lifetime: HistorySize,
174 pub current_history_size: HistorySize,
176 pub sector_expiration_check_segment_commitment: Option<SegmentCommitment>,
178}
179
180#[derive(Debug, Clone, Encode, Decode, MaxEncodedLen)]
182pub struct VerifySolutionParams {
183 pub proof_of_time: PotOutput,
185 pub solution_range: SolutionRange,
187 pub piece_check_params: Option<PieceCheckParams>,
191}
192
193pub fn calculate_block_fork_weight(solution_range: SolutionRange) -> BlockForkWeight {
196 #[cfg(feature = "testing")]
199 let solution_range = if solution_range == SolutionRange::MAX {
200 SolutionRange::MAX - 1
201 } else {
202 solution_range
203 };
204
205 BlockForkWeight::from(SolutionRange::MAX - solution_range)
206}
207
208#[cfg(feature = "kzg")]
211pub fn verify_solution<'a, PosTable, RewardAddress>(
212 solution: &'a Solution<RewardAddress>,
213 slot: SlotNumber,
214 params: &'a VerifySolutionParams,
215 kzg: &'a Kzg,
216) -> Result<SolutionRange, Error>
217where
218 PosTable: Table,
219{
220 let VerifySolutionParams {
221 proof_of_time,
222 solution_range,
223 piece_check_params,
224 } = params;
225
226 let sector_id = SectorId::new(
227 solution.public_key.hash(),
228 solution.sector_index,
229 solution.history_size,
230 );
231
232 let global_randomness = proof_of_time.derive_global_randomness();
233 let global_challenge = global_randomness.derive_global_challenge(slot);
234 let sector_slot_challenge = sector_id.derive_sector_slot_challenge(&global_challenge);
235 let s_bucket_audit_index = sector_slot_challenge.s_bucket_audit_index();
236
237 if !PosTable::is_proof_valid(
239 §or_id.derive_evaluation_seed(solution.piece_offset),
240 s_bucket_audit_index.into(),
241 &solution.proof_of_space,
242 ) {
243 return Err(Error::InvalidProofOfSpace);
244 };
245
246 let masked_chunk =
247 (Simd::from(*solution.chunk) ^ Simd::from(*solution.proof_of_space.hash())).to_array();
248
249 let solution_distance =
250 calculate_solution_distance(&global_challenge, &masked_chunk, §or_slot_challenge);
251
252 if solution_distance > solution_range / 2 {
254 return Err(Error::OutsideSolutionRange {
255 half_solution_range: solution_range / 2,
256 solution_distance,
257 });
258 }
259
260 if !kzg.verify(
262 &Commitment::try_from(solution.record_commitment)
263 .map_err(|_error| Error::InvalidChunkWitness)?,
264 Record::NUM_S_BUCKETS,
265 s_bucket_audit_index.into(),
266 &Scalar::try_from(solution.chunk).map_err(Error::InvalidChunk)?,
267 &Witness::try_from(solution.chunk_witness).map_err(|_error| Error::InvalidChunkWitness)?,
268 ) {
269 return Err(Error::InvalidChunkWitness);
270 }
271
272 if let Some(PieceCheckParams {
273 max_pieces_in_sector,
274 segment_commitment,
275 recent_segments,
276 recent_history_fraction,
277 min_sector_lifetime,
278 current_history_size,
279 sector_expiration_check_segment_commitment,
280 }) = piece_check_params
281 {
282 if NonZeroU64::from(solution.history_size).get()
286 > NonZeroU64::from(*current_history_size).get() + 1
287 {
288 return Err(Error::FutureHistorySize {
289 current: *current_history_size,
290 solution: solution.history_size,
291 });
292 }
293
294 if u16::from(solution.piece_offset) >= *max_pieces_in_sector {
295 return Err(Error::InvalidPieceOffset {
296 piece_offset: u16::from(solution.piece_offset),
297 max_pieces_in_sector: *max_pieces_in_sector,
298 });
299 }
300
301 if let Some(sector_expiration_check_segment_commitment) =
302 sector_expiration_check_segment_commitment
303 {
304 let expiration_history_size = match sector_id.derive_expiration_history_size(
305 solution.history_size,
306 sector_expiration_check_segment_commitment,
307 *min_sector_lifetime,
308 ) {
309 Some(expiration_history_size) => expiration_history_size,
310 None => {
311 return Err(Error::InvalidHistorySize);
312 }
313 };
314
315 if expiration_history_size <= *current_history_size {
316 return Err(Error::SectorExpired {
317 expiration_history_size,
318 current_history_size: *current_history_size,
319 });
320 }
321 }
322
323 let position = sector_id
324 .derive_piece_index(
325 solution.piece_offset,
326 solution.history_size,
327 *max_pieces_in_sector,
328 *recent_segments,
329 *recent_history_fraction,
330 )
331 .position();
332
333 if !is_record_commitment_hash_valid(
335 kzg,
336 &Scalar::try_from(blake3_254_hash_to_scalar(
337 solution.record_commitment.as_ref(),
338 ))
339 .expect("Create correctly by dedicated hash function; qed"),
340 segment_commitment,
341 &solution.record_witness,
342 position,
343 ) {
344 return Err(Error::InvalidPiece);
345 }
346 }
347
348 Ok(solution_distance)
349}
350
351#[cfg(feature = "kzg")]
353pub fn is_piece_valid(
354 kzg: &Kzg,
355 piece: &PieceArray,
356 segment_commitment: &SegmentCommitment,
357 position: u32,
358) -> bool {
359 let (record, commitment, witness) = piece.split();
360 let witness = match Witness::try_from_bytes(witness) {
361 Ok(witness) => witness,
362 _ => {
363 return false;
364 }
365 };
366
367 let mut scalars = Vec::with_capacity(record.len().next_power_of_two());
368
369 for record_chunk in record.iter() {
370 match Scalar::try_from(record_chunk) {
371 Ok(scalar) => {
372 scalars.push(scalar);
373 }
374 _ => {
375 return false;
376 }
377 }
378 }
379
380 scalars.resize(scalars.capacity(), Scalar::default());
382
383 let polynomial = match kzg.poly(&scalars) {
384 Ok(polynomial) => polynomial,
385 _ => {
386 return false;
387 }
388 };
389
390 if kzg
391 .commit(&polynomial)
392 .map(|commitment| commitment.to_bytes())
393 .as_ref()
394 != Ok(commitment)
395 {
396 return false;
397 }
398
399 let Ok(segment_commitment) = Commitment::try_from(segment_commitment) else {
400 return false;
401 };
402
403 let commitment_hash = Scalar::try_from(blake3_254_hash_to_scalar(commitment.as_ref()))
404 .expect("Create correctly by dedicated hash function; qed");
405
406 kzg.verify(
407 &segment_commitment,
408 ArchivedHistorySegment::NUM_PIECES,
409 position,
410 &commitment_hash,
411 &witness,
412 )
413}
414
415#[cfg(feature = "kzg")]
417pub fn is_record_commitment_hash_valid(
418 kzg: &Kzg,
419 record_commitment_hash: &Scalar,
420 commitment: &SegmentCommitment,
421 witness: &RecordWitness,
422 position: u32,
423) -> bool {
424 let Ok(commitment) = Commitment::try_from(commitment) else {
425 return false;
426 };
427 let Ok(witness) = Witness::try_from(witness) else {
428 return false;
429 };
430
431 kzg.verify(
432 &commitment,
433 ArchivedHistorySegment::NUM_PIECES,
434 position,
435 record_commitment_hash,
436 &witness,
437 )
438}
439
440#[inline]
442pub fn derive_pot_entropy(chunk: &ScalarBytes, proof_of_time: PotOutput) -> Blake3Hash {
443 blake3_hash_list(&[chunk.as_ref(), proof_of_time.as_ref()])
444}
445
446pub fn derive_next_solution_range(
448 start_slot: SlotNumber,
449 current_slot: SlotNumber,
450 slot_probability: (u64, u64),
451 current_solution_range: SolutionRange,
452 era_duration: BlockNumber,
453) -> u64 {
454 let era_slot_count = current_slot - start_slot;
456
457 u64::try_from(
473 u128::from(current_solution_range)
474 .saturating_mul(u128::from(era_slot_count))
475 .saturating_mul(u128::from(slot_probability.0))
476 / u128::from(era_duration)
477 / u128::from(slot_probability.1),
478 )
479 .unwrap_or(u64::MAX)
480 .clamp(
481 current_solution_range / 4,
482 current_solution_range.saturating_mul(4),
483 )
484}