Skip to content

Commit

Permalink
fix counter incrementing
Browse files Browse the repository at this point in the history
  • Loading branch information
oconnor663 committed Jul 13, 2023
1 parent bff01e7 commit 0c55f95
Show file tree
Hide file tree
Showing 2 changed files with 85 additions and 95 deletions.
97 changes: 43 additions & 54 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1106,24 +1106,15 @@ impl OutputReader {
}
}

/// Fill a buffer with output bytes and advance the position of the
/// `OutputReader`. This is equivalent to [`Read::read`], except that it
/// doesn't return a `Result`. Both methods always fill the entire buffer.
///
/// Note that `OutputReader` doesn't buffer output bytes internally, so
/// calling `fill` repeatedly with a short-length or odd-length slice will
/// end up performing the same compression multiple times. If you're
/// reading output in a loop, prefer a slice length that's a multiple of
/// 64.
///
/// The maximum output size of BLAKE3 is 2<sup>64</sup>-1 bytes. If you try
/// to extract more than that, for example by seeking near the end and
/// reading further, the behavior is unspecified.
///
/// [`Read::read`]: #method.read
pub fn fill(&mut self, mut buf: &mut [u8]) {
// There's some nontrivial logic here to handle partial blocks, and I don't want to copy-paste
// it between the xof and xof_xor cases.
#[inline(always)]
fn fill_inner(&mut self, mut buf: &mut [u8], xor: bool) {
debug_assert!(self.position_within_block < BLOCK_LEN as u8);
if self.position_within_block != 0 {
// The xof() and xof_xor() APIs can handle a partial block at the end but not a partial
// block at the beginning. We handle the beginning case here. Start by computing the
// complete block that we need part of.
let mut partial_block = [0u8; 64];
guts::DETECTED_IMPL.xof(
&self.inner.block,
Expand All @@ -1135,44 +1126,12 @@ impl OutputReader {
);
let output_bytes = &partial_block[self.position_within_block as usize..];
let take = cmp::min(buf.len(), output_bytes.len());
buf[..take].copy_from_slice(&output_bytes[..take]);
buf = &mut buf[take..];
self.position_within_block += take as u8;
if self.position_within_block == BLOCK_LEN as u8 {
self.position_within_block = 0;
self.inner.counter += 1;
if xor {
for byte_index in 0..take {
buf[byte_index] ^= output_bytes[byte_index];
}
} else {
debug_assert!(buf.is_empty());
return;
}
}
guts::DETECTED_IMPL.xof(
&self.inner.block,
self.inner.block_len as u32,
&self.inner.input_chaining_value,
self.inner.counter,
self.inner.flags as u32,
buf,
);
self.position_within_block = (buf.len() % BLOCK_LEN) as u8;
}

pub fn fill_xor(&mut self, mut buf: &mut [u8]) {
debug_assert!(self.position_within_block < BLOCK_LEN as u8);
if self.position_within_block != 0 {
let mut partial_block = [0u8; 64];
guts::DETECTED_IMPL.xof(
&self.inner.block,
self.inner.block_len as u32,
&self.inner.input_chaining_value,
self.inner.counter,
self.inner.flags as u32,
&mut partial_block,
);
let output_bytes = &partial_block[self.position_within_block as usize..];
let take = cmp::min(buf.len(), output_bytes.len());
for byte_index in 0..take {
buf[byte_index] ^= output_bytes[byte_index];
buf[..take].copy_from_slice(&output_bytes[..take]);
}
buf = &mut buf[take..];
self.position_within_block += take as u8;
Expand All @@ -1184,17 +1143,47 @@ impl OutputReader {
return;
}
}
guts::DETECTED_IMPL.xof_xor(
let xof_fn = if xor {
guts::Implementation::xof_xor
} else {
guts::Implementation::xof
};
xof_fn(
&guts::DETECTED_IMPL,
&self.inner.block,
self.inner.block_len as u32,
&self.inner.input_chaining_value,
self.inner.counter,
self.inner.flags as u32,
buf,
);
self.inner.counter += (buf.len() / BLOCK_LEN) as u64;
self.position_within_block = (buf.len() % BLOCK_LEN) as u8;
}

/// Fill a buffer with output bytes and advance the position of the
/// `OutputReader`. This is equivalent to [`Read::read`], except that it
/// doesn't return a `Result`. Both methods always fill the entire buffer.
///
/// Note that `OutputReader` doesn't buffer output bytes internally, so
/// calling `fill` repeatedly with a short-length or odd-length slice will
/// end up performing the same compression multiple times. If you're
/// reading output in a loop, prefer a slice length that's a multiple of
/// 64.
///
/// The maximum output size of BLAKE3 is 2<sup>64</sup>-1 bytes. If you try
/// to extract more than that, for example by seeking near the end and
/// reading further, the behavior is unspecified.
///
/// [`Read::read`]: #method.read
pub fn fill(&mut self, buf: &mut [u8]) {
self.fill_inner(buf, false);
}

pub fn fill_xor(&mut self, buf: &mut [u8]) {
self.fill_inner(buf, true);
}

/// Return the current read position in the output stream. This is
/// equivalent to [`Seek::stream_position`], except that it doesn't return
/// a `Result`. The position of a new `OutputReader` starts at 0, and each
Expand Down
83 changes: 42 additions & 41 deletions src/test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -292,51 +292,52 @@ fn test_xof_seek() {

#[test]
fn test_xof_xor() {
const STEP: usize = 17;
for step in [32, 63, 64, 128, 303] {
dbg!(step);
let mut ref_hasher = reference_impl::Hasher::new();
ref_hasher.update(b"foo");
let mut ref_output = [0u8; 1000];
ref_hasher.finalize(&mut ref_output);

let mut ref_hasher = reference_impl::Hasher::new();
ref_hasher.update(b"foo");
let mut ref_output = [0u8; 1000];
ref_hasher.finalize(&mut ref_output);

let mut hasher = crate::Hasher::new();
hasher.update(b"foo");
let mut reader = hasher.finalize_xof();
let mut hasher = crate::Hasher::new();
hasher.update(b"foo");
let mut reader = hasher.finalize_xof();

let mut test_output = [0u8; 1000];
for chunk in test_output.chunks_mut(STEP) {
reader.fill(chunk);
}
assert_eq!(ref_output, test_output);
// Xor'ing the same output should zero the buffer.
reader.set_position(0);
for chunk in test_output.chunks_mut(STEP) {
reader.fill_xor(chunk);
}
assert_eq!([0u8; 1000], test_output);
// Xor'ing the same output again should reproduce the original.
reader.set_position(0);
for chunk in test_output.chunks_mut(STEP) {
reader.fill_xor(chunk);
}
assert_eq!(ref_output, test_output);
let mut test_output = [0u8; 1000];
for chunk in test_output.chunks_mut(step) {
reader.fill(chunk);
}
assert_eq!(ref_output, test_output);
// Xor'ing the same output should zero the buffer.
reader.set_position(0);
for chunk in test_output.chunks_mut(step) {
reader.fill_xor(chunk);
}
assert_eq!([0u8; 1000], test_output);
// Xor'ing the same output again should reproduce the original.
reader.set_position(0);
for chunk in test_output.chunks_mut(step) {
reader.fill_xor(chunk);
}
assert_eq!(ref_output, test_output);

// Repeat the same test but starting at offset 500.
reader.set_position(500);
for chunk in test_output[..500].chunks_mut(STEP) {
reader.fill(chunk);
}
assert_eq!(ref_output[500..], test_output[..500]);
reader.set_position(500);
for chunk in test_output[..500].chunks_mut(STEP) {
reader.fill_xor(chunk);
}
assert_eq!([0u8; 500], test_output[..500]);
reader.set_position(500);
for chunk in test_output[..500].chunks_mut(STEP) {
reader.fill_xor(chunk);
// Repeat the same test but starting at offset 500.
reader.set_position(500);
for chunk in test_output[..500].chunks_mut(step) {
reader.fill(chunk);
}
assert_eq!(ref_output[500..], test_output[..500]);
reader.set_position(500);
for chunk in test_output[..500].chunks_mut(step) {
reader.fill_xor(chunk);
}
assert_eq!([0u8; 500], test_output[..500]);
reader.set_position(500);
for chunk in test_output[..500].chunks_mut(step) {
reader.fill_xor(chunk);
}
assert_eq!(ref_output[500..], test_output[..500]);
}
assert_eq!(ref_output[500..], test_output[..500]);
}

#[test]
Expand Down

0 comments on commit 0c55f95

Please sign in to comment.