#![forbid(unsafe_code)]
#![warn(rust_2018_idioms, missing_debug_implementations, missing_docs)]
#![feature(array_chunks, portable_simd)]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(not(feature = "std"))]
extern crate alloc;
#[cfg(not(feature = "std"))]
use alloc::string::String;
use codec::{Decode, Encode, MaxEncodedLen};
use core::mem;
#[cfg(feature = "kzg")]
use core::simd::Simd;
use schnorrkel::context::SigningContext;
use schnorrkel::SignatureError;
#[cfg(feature = "kzg")]
use subspace_core_primitives::hashes::blake3_254_hash_to_scalar;
use subspace_core_primitives::hashes::{blake3_hash_list, blake3_hash_with_key, Blake3Hash};
#[cfg(feature = "kzg")]
use subspace_core_primitives::pieces::{PieceArray, Record, RecordWitness};
use subspace_core_primitives::pot::PotOutput;
#[cfg(feature = "kzg")]
use subspace_core_primitives::sectors::SectorId;
use subspace_core_primitives::sectors::SectorSlotChallenge;
#[cfg(feature = "kzg")]
use subspace_core_primitives::segments::ArchivedHistorySegment;
use subspace_core_primitives::segments::{HistorySize, SegmentCommitment};
#[cfg(feature = "kzg")]
use subspace_core_primitives::solutions::Solution;
use subspace_core_primitives::solutions::{RewardSignature, SolutionRange};
use subspace_core_primitives::{BlockNumber, BlockWeight, PublicKey, ScalarBytes, SlotNumber};
#[cfg(feature = "kzg")]
use subspace_kzg::{Commitment, Kzg, Scalar, Witness};
#[cfg(feature = "kzg")]
use subspace_proof_of_space::Table;
#[derive(Debug, Eq, PartialEq, thiserror::Error)]
pub enum Error {
#[error("Piece verification failed")]
InvalidPieceOffset {
piece_offset: u16,
max_pieces_in_sector: u16,
},
#[error("Sector expired")]
SectorExpired {
expiration_history_size: HistorySize,
current_history_size: HistorySize,
},
#[error("Piece verification failed")]
InvalidPiece,
#[error(
"Solution distance {solution_distance} is outside of solution range \
{half_solution_range} (half of actual solution range)"
)]
OutsideSolutionRange {
half_solution_range: SolutionRange,
solution_distance: SolutionRange,
},
#[error("Invalid proof of space")]
InvalidProofOfSpace,
#[error("Invalid audit chunk offset")]
InvalidAuditChunkOffset,
#[error("Invalid chunk: {0}")]
InvalidChunk(String),
#[error("Invalid chunk witness")]
InvalidChunkWitness,
#[error("Invalid history size")]
InvalidHistorySize,
}
pub fn check_reward_signature(
hash: &[u8],
signature: &RewardSignature,
public_key: &PublicKey,
reward_signing_context: &SigningContext,
) -> Result<(), SignatureError> {
let public_key = schnorrkel::PublicKey::from_bytes(public_key.as_ref())?;
let signature = schnorrkel::Signature::from_bytes(signature.as_ref())?;
public_key.verify(reward_signing_context.bytes(hash), &signature)
}
fn calculate_solution_distance(
global_challenge: &Blake3Hash,
chunk: &[u8; 32],
sector_slot_challenge: &SectorSlotChallenge,
) -> SolutionRange {
let audit_chunk = blake3_hash_with_key(sector_slot_challenge, chunk);
let audit_chunk_as_solution_range: SolutionRange = SolutionRange::from_le_bytes(
*audit_chunk
.array_chunks::<{ mem::size_of::<SolutionRange>() }>()
.next()
.expect("Solution range is smaller in size than global challenge; qed"),
);
let global_challenge_as_solution_range: SolutionRange = SolutionRange::from_le_bytes(
*global_challenge
.array_chunks::<{ mem::size_of::<SolutionRange>() }>()
.next()
.expect("Solution range is smaller in size than global challenge; qed"),
);
subspace_core_primitives::solutions::bidirectional_distance(
&global_challenge_as_solution_range,
&audit_chunk_as_solution_range,
)
}
pub fn is_within_solution_range(
global_challenge: &Blake3Hash,
chunk: &[u8; 32],
sector_slot_challenge: &SectorSlotChallenge,
solution_range: SolutionRange,
) -> Option<SolutionRange> {
let solution_distance =
calculate_solution_distance(global_challenge, chunk, sector_slot_challenge);
(solution_distance <= solution_range / 2).then_some(solution_distance)
}
#[derive(Debug, Clone, Encode, Decode, MaxEncodedLen)]
pub struct PieceCheckParams {
pub max_pieces_in_sector: u16,
pub segment_commitment: SegmentCommitment,
pub recent_segments: HistorySize,
pub recent_history_fraction: (HistorySize, HistorySize),
pub min_sector_lifetime: HistorySize,
pub current_history_size: HistorySize,
pub sector_expiration_check_segment_commitment: Option<SegmentCommitment>,
}
#[derive(Debug, Clone, Encode, Decode, MaxEncodedLen)]
pub struct VerifySolutionParams {
pub proof_of_time: PotOutput,
pub solution_range: SolutionRange,
pub piece_check_params: Option<PieceCheckParams>,
}
pub fn calculate_block_weight(solution_range: SolutionRange) -> BlockWeight {
BlockWeight::from(SolutionRange::MAX - solution_range)
}
#[cfg(feature = "kzg")]
pub fn verify_solution<'a, PosTable, RewardAddress>(
solution: &'a Solution<RewardAddress>,
slot: SlotNumber,
params: &'a VerifySolutionParams,
kzg: &'a Kzg,
) -> Result<SolutionRange, Error>
where
PosTable: Table,
{
let VerifySolutionParams {
proof_of_time,
solution_range,
piece_check_params,
} = params;
let sector_id = SectorId::new(
solution.public_key.hash(),
solution.sector_index,
solution.history_size,
);
let global_randomness = proof_of_time.derive_global_randomness();
let global_challenge = global_randomness.derive_global_challenge(slot);
let sector_slot_challenge = sector_id.derive_sector_slot_challenge(&global_challenge);
let s_bucket_audit_index = sector_slot_challenge.s_bucket_audit_index();
if !PosTable::is_proof_valid(
§or_id.derive_evaluation_seed(solution.piece_offset),
s_bucket_audit_index.into(),
&solution.proof_of_space,
) {
return Err(Error::InvalidProofOfSpace);
};
let masked_chunk =
(Simd::from(*solution.chunk) ^ Simd::from(*solution.proof_of_space.hash())).to_array();
let solution_distance =
calculate_solution_distance(&global_challenge, &masked_chunk, §or_slot_challenge);
if solution_distance > solution_range / 2 {
return Err(Error::OutsideSolutionRange {
half_solution_range: solution_range / 2,
solution_distance,
});
}
if !kzg.verify(
&Commitment::try_from(solution.record_commitment)
.map_err(|_error| Error::InvalidChunkWitness)?,
Record::NUM_S_BUCKETS,
s_bucket_audit_index.into(),
&Scalar::try_from(solution.chunk).map_err(Error::InvalidChunk)?,
&Witness::try_from(solution.chunk_witness).map_err(|_error| Error::InvalidChunkWitness)?,
) {
return Err(Error::InvalidChunkWitness);
}
if let Some(PieceCheckParams {
max_pieces_in_sector,
segment_commitment,
recent_segments,
recent_history_fraction,
min_sector_lifetime,
current_history_size,
sector_expiration_check_segment_commitment,
}) = piece_check_params
{
if u16::from(solution.piece_offset) >= *max_pieces_in_sector {
return Err(Error::InvalidPieceOffset {
piece_offset: u16::from(solution.piece_offset),
max_pieces_in_sector: *max_pieces_in_sector,
});
}
if let Some(sector_expiration_check_segment_commitment) =
sector_expiration_check_segment_commitment
{
let expiration_history_size = match sector_id.derive_expiration_history_size(
solution.history_size,
sector_expiration_check_segment_commitment,
*min_sector_lifetime,
) {
Some(expiration_history_size) => expiration_history_size,
None => {
return Err(Error::InvalidHistorySize);
}
};
if expiration_history_size <= *current_history_size {
return Err(Error::SectorExpired {
expiration_history_size,
current_history_size: *current_history_size,
});
}
}
let position = sector_id
.derive_piece_index(
solution.piece_offset,
solution.history_size,
*max_pieces_in_sector,
*recent_segments,
*recent_history_fraction,
)
.position();
if !is_record_commitment_hash_valid(
kzg,
&Scalar::try_from(blake3_254_hash_to_scalar(
solution.record_commitment.as_ref(),
))
.expect("Create correctly by dedicated hash function; qed"),
segment_commitment,
&solution.record_witness,
position,
) {
return Err(Error::InvalidPiece);
}
}
Ok(solution_distance)
}
#[cfg(feature = "kzg")]
pub fn is_piece_valid(
kzg: &Kzg,
piece: &PieceArray,
segment_commitment: &SegmentCommitment,
position: u32,
) -> bool {
let (record, commitment, witness) = piece.split();
let witness = match Witness::try_from_bytes(witness) {
Ok(witness) => witness,
_ => {
return false;
}
};
let mut scalars = Vec::with_capacity(record.len().next_power_of_two());
for record_chunk in record.iter() {
match Scalar::try_from(record_chunk) {
Ok(scalar) => {
scalars.push(scalar);
}
_ => {
return false;
}
}
}
scalars.resize(scalars.capacity(), Scalar::default());
let polynomial = match kzg.poly(&scalars) {
Ok(polynomial) => polynomial,
_ => {
return false;
}
};
if kzg
.commit(&polynomial)
.map(|commitment| commitment.to_bytes())
.as_ref()
!= Ok(commitment)
{
return false;
}
let Ok(segment_commitment) = Commitment::try_from(segment_commitment) else {
return false;
};
let commitment_hash = Scalar::try_from(blake3_254_hash_to_scalar(commitment.as_ref()))
.expect("Create correctly by dedicated hash function; qed");
kzg.verify(
&segment_commitment,
ArchivedHistorySegment::NUM_PIECES,
position,
&commitment_hash,
&witness,
)
}
#[cfg(feature = "kzg")]
pub fn is_record_commitment_hash_valid(
kzg: &Kzg,
record_commitment_hash: &Scalar,
commitment: &SegmentCommitment,
witness: &RecordWitness,
position: u32,
) -> bool {
let Ok(commitment) = Commitment::try_from(commitment) else {
return false;
};
let Ok(witness) = Witness::try_from(witness) else {
return false;
};
kzg.verify(
&commitment,
ArchivedHistorySegment::NUM_PIECES,
position,
record_commitment_hash,
&witness,
)
}
#[inline]
pub fn derive_pot_entropy(chunk: &ScalarBytes, proof_of_time: PotOutput) -> Blake3Hash {
blake3_hash_list(&[chunk.as_ref(), proof_of_time.as_ref()])
}
pub fn derive_next_solution_range(
start_slot: SlotNumber,
current_slot: SlotNumber,
slot_probability: (u64, u64),
current_solution_range: SolutionRange,
era_duration: BlockNumber,
) -> u64 {
let era_slot_count = current_slot - start_slot;
u64::try_from(
u128::from(current_solution_range)
.saturating_mul(u128::from(era_slot_count))
.saturating_mul(u128::from(slot_probability.0))
/ u128::from(era_duration)
/ u128::from(slot_probability.1),
)
.unwrap_or(u64::MAX)
.clamp(
current_solution_range / 4,
current_solution_range.saturating_mul(4),
)
}