subspace_archiving/archiver/
incremental_record_commitments.rs#[cfg(not(feature = "std"))]
extern crate alloc;
use crate::archiver::Segment;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
use core::ops::{Deref, DerefMut};
use parity_scale_codec::{Encode, Output};
#[cfg(feature = "parallel")]
use rayon::prelude::*;
use subspace_core_primitives::pieces::RawRecord;
use subspace_core_primitives::ScalarBytes;
use subspace_kzg::{Commitment, Kzg, Scalar};
#[derive(Debug, Default, Clone)]
pub(super) struct IncrementalRecordCommitmentsState {
state: Vec<Commitment>,
}
impl Deref for IncrementalRecordCommitmentsState {
type Target = Vec<Commitment>;
#[inline]
fn deref(&self) -> &Self::Target {
&self.state
}
}
impl DerefMut for IncrementalRecordCommitmentsState {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.state
}
}
impl IncrementalRecordCommitmentsState {
pub(super) fn with_capacity(capacity: usize) -> Self {
Self {
state: Vec::with_capacity(capacity),
}
}
pub(super) fn clear(&mut self) {
self.state.clear();
}
}
pub(super) fn update_record_commitments(
incremental_record_commitments: &mut IncrementalRecordCommitmentsState,
segment: &Segment,
kzg: &Kzg,
) {
segment.encode_to(&mut IncrementalRecordCommitmentsProcessor::new(
incremental_record_commitments,
kzg,
));
}
struct IncrementalRecordCommitmentsProcessor<'a> {
skip_bytes: usize,
buffer: Vec<u8>,
incremental_record_commitments: &'a mut IncrementalRecordCommitmentsState,
kzg: &'a Kzg,
}
impl Drop for IncrementalRecordCommitmentsProcessor<'_> {
fn drop(&mut self) {
#[cfg(not(feature = "parallel"))]
let raw_records_bytes = self.buffer.chunks_exact(RawRecord::SIZE);
#[cfg(feature = "parallel")]
let raw_records_bytes = self.buffer.par_chunks_exact(RawRecord::SIZE);
let iter = raw_records_bytes
.map(|raw_record_bytes| {
raw_record_bytes
.array_chunks::<{ ScalarBytes::SAFE_BYTES }>()
.map(Scalar::from)
})
.map(|record_chunks| {
let number_of_chunks = record_chunks.len();
let mut scalars = Vec::with_capacity(number_of_chunks.next_power_of_two());
record_chunks.collect_into(&mut scalars);
scalars.resize(scalars.capacity(), Scalar::default());
let polynomial = self
.kzg
.poly(&scalars)
.expect("KZG instance must be configured to support this many scalars; qed");
self.kzg
.commit(&polynomial)
.expect("KZG instance must be configured to support this many scalars; qed")
});
#[cfg(not(feature = "parallel"))]
iter.collect_into(&mut self.incremental_record_commitments.state);
#[cfg(feature = "parallel")]
self.incremental_record_commitments.par_extend(iter);
}
}
impl Output for IncrementalRecordCommitmentsProcessor<'_> {
fn write(&mut self, mut bytes: &[u8]) {
if self.skip_bytes >= bytes.len() {
self.skip_bytes -= bytes.len();
} else {
bytes = &bytes[self.skip_bytes..];
self.skip_bytes = 0;
self.buffer.extend_from_slice(bytes);
}
}
}
impl<'a> IncrementalRecordCommitmentsProcessor<'a> {
fn new(
incremental_record_commitments: &'a mut IncrementalRecordCommitmentsState,
kzg: &'a Kzg,
) -> Self {
Self {
skip_bytes: incremental_record_commitments.len() * RawRecord::SIZE,
buffer: Vec::with_capacity(RawRecord::SIZE),
incremental_record_commitments,
kzg,
}
}
}