1use crate::auditing::ChunkCandidate;
7use crate::reading::{
8 ReadSectorRecordChunksMode, ReadingError, read_record_metadata, read_sector_record_chunks,
9};
10use crate::sector::{
11 SectorContentsMap, SectorContentsMapFromBytesError, SectorMetadataChecksummed,
12};
13use crate::{ReadAt, ReadAtSync};
14use futures::FutureExt;
15use std::collections::VecDeque;
16use std::io;
17use subspace_core_primitives::pieces::{PieceOffset, Record};
18use subspace_core_primitives::pos::PosSeed;
19use subspace_core_primitives::sectors::{SBucket, SectorId};
20use subspace_core_primitives::solutions::{ChunkWitness, Solution, SolutionRange};
21use subspace_core_primitives::{PublicKey, ScalarBytes};
22use subspace_erasure_coding::ErasureCoding;
23use subspace_kzg::Kzg;
24use subspace_proof_of_space::Table;
25use thiserror::Error;
26
27pub trait ProvableSolutions: ExactSizeIterator {
31 fn best_solution_distance(&self) -> Option<SolutionRange>;
33}
34
35#[derive(Debug, Error)]
37pub enum ProvingError {
38 #[error("Invalid erasure coding instance")]
40 InvalidErasureCodingInstance,
41 #[error("Failed to create polynomial for record at offset {piece_offset}: {error}")]
43 FailedToCreatePolynomialForRecord {
44 piece_offset: PieceOffset,
46 error: String,
48 },
49 #[error(
51 "Failed to create chunk witness for record at offset {piece_offset} chunk {chunk_offset}: \
52 {error}"
53 )]
54 FailedToCreateChunkWitness {
55 piece_offset: PieceOffset,
57 chunk_offset: u32,
59 error: String,
61 },
62 #[error("Failed to decode sector contents map: {0}")]
64 FailedToDecodeSectorContentsMap(#[from] SectorContentsMapFromBytesError),
65 #[error("Proving I/O error: {0}")]
67 Io(#[from] io::Error),
68 #[error("Record reading error: {0}")]
70 RecordReadingError(#[from] ReadingError),
71}
72
73impl ProvingError {
74 pub fn is_fatal(&self) -> bool {
76 match self {
77 ProvingError::InvalidErasureCodingInstance => true,
78 ProvingError::FailedToCreatePolynomialForRecord { .. } => false,
79 ProvingError::FailedToCreateChunkWitness { .. } => false,
80 ProvingError::FailedToDecodeSectorContentsMap(_) => false,
81 ProvingError::Io(_) => true,
82 ProvingError::RecordReadingError(error) => error.is_fatal(),
83 }
84 }
85}
86
87#[derive(Debug, Clone)]
88struct WinningChunk {
89 chunk_offset: u32,
91 piece_offset: PieceOffset,
93 solution_distance: SolutionRange,
95}
96
97#[derive(Debug)]
102pub struct SolutionCandidates<'a, Sector>
103where
104 Sector: 'a,
105{
106 public_key: &'a PublicKey,
107 sector_id: SectorId,
108 s_bucket: SBucket,
109 sector: Sector,
110 sector_metadata: &'a SectorMetadataChecksummed,
111 chunk_candidates: VecDeque<ChunkCandidate>,
112}
113
114impl<'a, Sector> Clone for SolutionCandidates<'a, Sector>
115where
116 Sector: Clone + 'a,
117{
118 fn clone(&self) -> Self {
119 Self {
120 public_key: self.public_key,
121 sector_id: self.sector_id,
122 s_bucket: self.s_bucket,
123 sector: self.sector.clone(),
124 sector_metadata: self.sector_metadata,
125 chunk_candidates: self.chunk_candidates.clone(),
126 }
127 }
128}
129
130impl<'a, Sector> SolutionCandidates<'a, Sector>
131where
132 Sector: ReadAtSync + 'a,
133{
134 pub(crate) fn new(
135 public_key: &'a PublicKey,
136 sector_id: SectorId,
137 s_bucket: SBucket,
138 sector: Sector,
139 sector_metadata: &'a SectorMetadataChecksummed,
140 chunk_candidates: VecDeque<ChunkCandidate>,
141 ) -> Self {
142 Self {
143 public_key,
144 sector_id,
145 s_bucket,
146 sector,
147 sector_metadata,
148 chunk_candidates,
149 }
150 }
151
152 pub fn len(&self) -> usize {
154 self.chunk_candidates.len()
155 }
156
157 pub fn is_empty(&self) -> bool {
159 self.chunk_candidates.is_empty()
160 }
161
162 pub fn into_solutions<RewardAddress, PosTable, TableGenerator>(
164 self,
165 reward_address: &'a RewardAddress,
166 kzg: &'a Kzg,
167 erasure_coding: &'a ErasureCoding,
168 mode: ReadSectorRecordChunksMode,
169 table_generator: TableGenerator,
170 ) -> Result<impl ProvableSolutions<Item = MaybeSolution<RewardAddress>> + 'a, ProvingError>
171 where
172 RewardAddress: Copy,
173 PosTable: Table,
174 TableGenerator: (FnMut(&PosSeed) -> PosTable) + 'a,
175 {
176 SolutionsIterator::<'a, _, PosTable, _, _>::new(
177 self.public_key,
178 reward_address,
179 self.sector_id,
180 self.s_bucket,
181 self.sector,
182 self.sector_metadata,
183 kzg,
184 erasure_coding,
185 self.chunk_candidates,
186 mode,
187 table_generator,
188 )
189 }
190}
191
192type MaybeSolution<RewardAddress> = Result<Solution<RewardAddress>, ProvingError>;
193
194struct SolutionsIterator<'a, RewardAddress, PosTable, TableGenerator, Sector>
195where
196 Sector: ReadAtSync + 'a,
197 PosTable: Table,
198 TableGenerator: (FnMut(&PosSeed) -> PosTable) + 'a,
199{
200 public_key: &'a PublicKey,
201 reward_address: &'a RewardAddress,
202 sector_id: SectorId,
203 s_bucket: SBucket,
204 sector_metadata: &'a SectorMetadataChecksummed,
205 s_bucket_offsets: Box<[u32; Record::NUM_S_BUCKETS]>,
206 kzg: &'a Kzg,
207 erasure_coding: &'a ErasureCoding,
208 sector_contents_map: SectorContentsMap,
209 sector: ReadAt<Sector, !>,
210 winning_chunks: VecDeque<WinningChunk>,
211 count: usize,
212 best_solution_distance: Option<SolutionRange>,
213 mode: ReadSectorRecordChunksMode,
214 table_generator: TableGenerator,
215}
216
217impl<'a, RewardAddress, PosTable, TableGenerator, Sector> ExactSizeIterator
218 for SolutionsIterator<'a, RewardAddress, PosTable, TableGenerator, Sector>
219where
220 RewardAddress: Copy,
221 Sector: ReadAtSync + 'a,
222 PosTable: Table,
223 TableGenerator: (FnMut(&PosSeed) -> PosTable) + 'a,
224{
225}
226
227impl<'a, RewardAddress, PosTable, TableGenerator, Sector> Iterator
228 for SolutionsIterator<'a, RewardAddress, PosTable, TableGenerator, Sector>
229where
230 RewardAddress: Copy,
231 Sector: ReadAtSync + 'a,
232 PosTable: Table,
233 TableGenerator: (FnMut(&PosSeed) -> PosTable) + 'a,
234{
235 type Item = MaybeSolution<RewardAddress>;
236
237 fn next(&mut self) -> Option<Self::Item> {
238 let WinningChunk {
239 chunk_offset,
240 piece_offset,
241 solution_distance: _,
242 } = self.winning_chunks.pop_front()?;
243
244 self.count -= 1;
245
246 let pos_table =
248 (self.table_generator)(&self.sector_id.derive_evaluation_seed(piece_offset));
249
250 let maybe_solution: Result<_, ProvingError> = try {
251 let sector_record_chunks_fut = read_sector_record_chunks(
252 piece_offset,
253 self.sector_metadata.pieces_in_sector,
254 &self.s_bucket_offsets,
255 &self.sector_contents_map,
256 &pos_table,
257 &self.sector,
258 self.mode,
259 );
260 let sector_record_chunks = sector_record_chunks_fut
261 .now_or_never()
262 .expect("Sync reader; qed")
263 .map_err(ProvingError::RecordReadingError)?;
264
265 let chunk = ScalarBytes::from(
266 sector_record_chunks
267 .get(usize::from(self.s_bucket))
268 .expect("Within s-bucket range; qed")
269 .expect("Winning chunk was plotted; qed"),
270 );
271
272 let source_chunks_polynomial = self
273 .erasure_coding
274 .recover_poly(sector_record_chunks.as_slice())
275 .map_err(|error| ReadingError::FailedToErasureDecodeRecord {
276 piece_offset,
277 error,
278 })
279 .map_err(ProvingError::RecordReadingError)?;
280 drop(sector_record_chunks);
281
282 let record_metadata_fut = read_record_metadata(
285 piece_offset,
286 self.sector_metadata.pieces_in_sector,
287 &self.sector,
288 );
289 let record_metadata = record_metadata_fut
290 .now_or_never()
291 .expect("Sync reader; qed")
292 .map_err(ProvingError::RecordReadingError)?;
293
294 let proof_of_space = pos_table.find_proof(self.s_bucket.into()).expect(
295 "Quality exists for this s-bucket, otherwise it wouldn't be a winning chunk; qed",
296 );
297
298 let chunk_witness = self
299 .kzg
300 .create_witness(
301 &source_chunks_polynomial,
302 Record::NUM_S_BUCKETS,
303 self.s_bucket.into(),
304 )
305 .map_err(|error| ProvingError::FailedToCreateChunkWitness {
306 piece_offset,
307 chunk_offset,
308 error,
309 })?;
310
311 Solution {
312 public_key: *self.public_key,
313 reward_address: *self.reward_address,
314 sector_index: self.sector_metadata.sector_index,
315 history_size: self.sector_metadata.history_size,
316 piece_offset,
317 record_commitment: record_metadata.commitment,
318 record_witness: record_metadata.witness,
319 chunk,
320 chunk_witness: ChunkWitness::from(chunk_witness),
321 proof_of_space,
322 }
323 };
324
325 match maybe_solution {
326 Ok(solution) => Some(Ok(solution)),
327 Err(error) => Some(Err(error)),
328 }
329 }
330
331 fn size_hint(&self) -> (usize, Option<usize>) {
332 (self.count, Some(self.count))
333 }
334}
335
336impl<'a, RewardAddress, PosTable, TableGenerator, Sector> ProvableSolutions
337 for SolutionsIterator<'a, RewardAddress, PosTable, TableGenerator, Sector>
338where
339 RewardAddress: Copy,
340 Sector: ReadAtSync + 'a,
341 PosTable: Table,
342 TableGenerator: (FnMut(&PosSeed) -> PosTable) + 'a,
343{
344 fn best_solution_distance(&self) -> Option<SolutionRange> {
345 self.best_solution_distance
346 }
347}
348
349impl<'a, RewardAddress, PosTable, TableGenerator, Sector>
350 SolutionsIterator<'a, RewardAddress, PosTable, TableGenerator, Sector>
351where
352 RewardAddress: Copy,
353 Sector: ReadAtSync + 'a,
354 PosTable: Table,
355 TableGenerator: (FnMut(&PosSeed) -> PosTable) + 'a,
356{
357 #[allow(clippy::too_many_arguments)]
358 fn new(
359 public_key: &'a PublicKey,
360 reward_address: &'a RewardAddress,
361 sector_id: SectorId,
362 s_bucket: SBucket,
363 sector: Sector,
364 sector_metadata: &'a SectorMetadataChecksummed,
365 kzg: &'a Kzg,
366 erasure_coding: &'a ErasureCoding,
367 chunk_candidates: VecDeque<ChunkCandidate>,
368 mode: ReadSectorRecordChunksMode,
369 table_generator: TableGenerator,
370 ) -> Result<Self, ProvingError> {
371 if erasure_coding.max_shards() < Record::NUM_S_BUCKETS {
372 return Err(ProvingError::InvalidErasureCodingInstance);
373 }
374
375 let sector_contents_map = {
376 let mut sector_contents_map_bytes =
377 vec![0; SectorContentsMap::encoded_size(sector_metadata.pieces_in_sector)];
378
379 sector.read_at(&mut sector_contents_map_bytes, 0)?;
380
381 SectorContentsMap::from_bytes(
382 §or_contents_map_bytes,
383 sector_metadata.pieces_in_sector,
384 )?
385 };
386
387 let s_bucket_records = sector_contents_map
388 .iter_s_bucket_records(s_bucket)
389 .expect("S-bucket audit index is guaranteed to be in range; qed")
390 .collect::<Vec<_>>();
391 let winning_chunks = chunk_candidates
392 .into_iter()
393 .filter_map(move |chunk_candidate| {
394 let (piece_offset, encoded_chunk_used) = s_bucket_records
395 .get(chunk_candidate.chunk_offset as usize)
396 .expect("Wouldn't be a candidate if wasn't within s-bucket; qed");
397
398 encoded_chunk_used.then_some(WinningChunk {
399 chunk_offset: chunk_candidate.chunk_offset,
400 piece_offset: *piece_offset,
401 solution_distance: chunk_candidate.solution_distance,
402 })
403 })
404 .collect::<VecDeque<_>>();
405
406 let best_solution_distance = winning_chunks
407 .front()
408 .map(|winning_chunk| winning_chunk.solution_distance);
409
410 let s_bucket_offsets = sector_metadata.s_bucket_offsets();
411
412 let count = winning_chunks.len();
413
414 Ok(Self {
415 public_key,
416 reward_address,
417 sector_id,
418 s_bucket,
419 sector_metadata,
420 s_bucket_offsets,
421 kzg,
422 erasure_coding,
423 sector_contents_map,
424 sector: ReadAt::from_sync(sector),
425 winning_chunks,
426 count,
427 best_solution_distance,
428 mode,
429 table_generator,
430 })
431 }
432}