From fc6665eefa63223ae488c3cebab911f495dd4014 Mon Sep 17 00:00:00 2001 From: Glenn Griffin Date: Tue, 20 Nov 2018 11:22:30 -0800 Subject: [PATCH 1/3] Apply rustfmt --- benches/benchmarks.rs | 8 ++++--- examples/make_tables.rs | 6 ++--- src/chunked_encoder.rs | 12 +++------- src/decode.rs | 45 ++++++++++++++++++++++++++------------ src/display.rs | 5 +++-- src/encode.rs | 21 +++++------------- src/lib.rs | 23 +++++++++---------- src/tests.rs | 7 ++---- src/write/encoder.rs | 27 +++++++++++++---------- src/write/encoder_tests.rs | 32 ++++++++++++++------------- tests/decode.rs | 2 +- tests/tests.rs | 7 ++++-- 12 files changed, 104 insertions(+), 91 deletions(-) diff --git a/benches/benchmarks.rs b/benches/benchmarks.rs index acc90f0c..183737ef 100644 --- a/benches/benchmarks.rs +++ b/benches/benchmarks.rs @@ -5,10 +5,12 @@ extern crate rand; extern crate test; use base64::display; -use base64::{decode, decode_config_buf, decode_config_slice, encode, encode_config_buf, - encode_config_slice, write, Config, STANDARD}; +use base64::{ + decode, decode_config_buf, decode_config_slice, encode, encode_config_buf, encode_config_slice, + write, Config, STANDARD, +}; -use rand::{Rng, FromEntropy}; +use rand::{FromEntropy, Rng}; use std::io::Write; use test::Bencher; diff --git a/examples/make_tables.rs b/examples/make_tables.rs index f8d6f37a..9f170c04 100644 --- a/examples/make_tables.rs +++ b/examples/make_tables.rs @@ -33,11 +33,11 @@ fn main() { print_decode_table(&url_alphabet, "URL_SAFE_DECODE", 0); // ./0123456789 - let crypt_alphabet: Vec = (b'.'..(b'9'+1)) + let crypt_alphabet: Vec = (b'.'..(b'9' + 1)) // A-Z - .chain(b'A'..(b'Z'+1)) + .chain(b'A'..(b'Z' + 1)) // a-z - .chain(b'a'..(b'z'+1)) + .chain(b'a'..(b'z' + 1)) .collect(); print_encode_table(&crypt_alphabet, "CRYPT_ENCODE", 0); print_decode_table(&crypt_alphabet, "CRYPT_DECODE", 0); diff --git a/src/chunked_encoder.rs b/src/chunked_encoder.rs index 8becab65..15f08e95 100644 --- a/src/chunked_encoder.rs +++ b/src/chunked_encoder.rs @@ -77,7 +77,6 @@ fn max_input_length(encoded_buf_len: usize, config: &Config) -> usize { (effective_buf_len / 4) * 3 } - // A really simple sink that just appends to a string pub(crate) struct StringSink<'a> { string: &'a mut String, @@ -85,9 +84,7 @@ pub(crate) struct StringSink<'a> { impl<'a> StringSink<'a> { pub(crate) fn new(s: &mut String) -> StringSink { - StringSink { - string: s, - } + StringSink { string: s } } } @@ -110,7 +107,7 @@ pub mod tests { use *; use self::rand::distributions::{Distribution, Range}; - use self::rand::{Rng, FromEntropy}; + use self::rand::{FromEntropy, Rng}; #[test] fn chunked_encode_empty() { @@ -173,10 +170,7 @@ pub mod tests { #[test] fn max_input_length_cant_use_extra_single_encoded_byte() { - let config = Config::new( - CharacterSet::Standard, - false, - ); + let config = Config::new(CharacterSet::Standard, false); assert_eq!(300, max_input_length(401, &config)); } diff --git a/src/decode.rs b/src/decode.rs index 9e5a7621..bdbb3223 100644 --- a/src/decode.rs +++ b/src/decode.rs @@ -180,7 +180,8 @@ fn num_chunks(input: &[u8]) -> usize { input .len() .checked_add(INPUT_CHUNK_LEN - 1) - .expect("Overflow when calculating number of chunks in input") / INPUT_CHUNK_LEN + .expect("Overflow when calculating number of chunks in input") + / INPUT_CHUNK_LEN } /// Helper to avoid duplicating num_chunks calculation, which is costly on short inputs. @@ -402,7 +403,10 @@ fn decode_helper( let mask = !0 >> leftover_bits_ready_to_append; if (leftover_bits & mask) != 0 { // last morsel is at `morsels_in_leftover` - 1 - return Err(DecodeError::InvalidLastSymbol(start_of_leftovers + morsels_in_leftover - 1, last_symbol)); + return Err(DecodeError::InvalidLastSymbol( + start_of_leftovers + morsels_in_leftover - 1, + last_symbol, + )); } let mut leftover_bits_appended_to_buf = 0; @@ -543,8 +547,7 @@ mod tests { use tests::{assert_encode_sanity, random_config}; use self::rand::distributions::{Distribution, Range}; - use self::rand::{Rng, FromEntropy}; - + use self::rand::{FromEntropy, Rng}; #[test] fn decode_chunk_precise_writes_only_6_bytes() { @@ -722,7 +725,10 @@ mod tests { assert_eq!(Err(DecodeError::InvalidLastSymbol(2, b'X')), decode("iYX=")); // also works when there are 2 quads in the last block - assert_eq!(Err(DecodeError::InvalidLastSymbol(6, b'X')), decode("AAAAiYX=")); + assert_eq!( + Err(DecodeError::InvalidLastSymbol(6, b'X')), + decode("AAAAiYX=") + ); } #[test] @@ -739,7 +745,10 @@ mod tests { assert_eq!(Err(DecodeError::InvalidLastSymbol(1, b'/')), decode("//==")); // also works when there are 2 quads in the last block - assert_eq!(Err(DecodeError::InvalidLastSymbol(5, b'x')), decode("AAAA/x==")); + assert_eq!( + Err(DecodeError::InvalidLastSymbol(5, b'x')), + decode("AAAA/x==") + ); } #[test] @@ -757,7 +766,7 @@ mod tests { v.extend_from_slice(&bytes[..]); assert!(base64_to_bytes.insert(b64, v).is_none()); - }; + } } // every possible combination of symbols must either decode to 2 bytes or get InvalidLastSymbol @@ -772,9 +781,13 @@ mod tests { symbols[3] = b'='; match base64_to_bytes.get(&symbols[..]) { - Some(bytes) => assert_eq!(Ok(bytes.to_vec()), decode_config(&symbols, STANDARD)), - None => assert_eq!(Err(DecodeError::InvalidLastSymbol(2, s3)), - decode_config(&symbols[..], STANDARD)) + Some(bytes) => { + assert_eq!(Ok(bytes.to_vec()), decode_config(&symbols, STANDARD)) + } + None => assert_eq!( + Err(DecodeError::InvalidLastSymbol(2, s3)), + decode_config(&symbols[..], STANDARD) + ), } } } @@ -792,7 +805,7 @@ mod tests { v.push(b as u8); assert!(base64_to_bytes.insert(b64, v).is_none()); - }; + } // every possible combination of symbols must either decode to 1 byte or get InvalidLastSymbol @@ -805,9 +818,13 @@ mod tests { symbols[3] = b'='; match base64_to_bytes.get(&symbols[..]) { - Some(bytes) => assert_eq!(Ok(bytes.to_vec()), decode_config(&symbols, STANDARD)), - None => assert_eq!(Err(DecodeError::InvalidLastSymbol(1, s2)), - decode_config(&symbols[..], STANDARD)) + Some(bytes) => { + assert_eq!(Ok(bytes.to_vec()), decode_config(&symbols, STANDARD)) + } + None => assert_eq!( + Err(DecodeError::InvalidLastSymbol(1, s2)), + decode_config(&symbols[..], STANDARD) + ), } } } diff --git a/src/display.rs b/src/display.rs index 464b6bec..3d768294 100644 --- a/src/display.rs +++ b/src/display.rs @@ -54,8 +54,9 @@ impl<'a, 'b: 'a> super::chunked_encoder::Sink for FormatterSink<'a, 'b> { #[cfg(test)] mod tests { - use super::super::chunked_encoder::tests::{chunked_encode_matches_normal_encode_random, - SinkTestHelper}; + use super::super::chunked_encoder::tests::{ + chunked_encode_matches_normal_encode_random, SinkTestHelper, + }; use super::super::*; use super::*; diff --git a/src/encode.rs b/src/encode.rs index e5635820..c8e15f0a 100644 --- a/src/encode.rs +++ b/src/encode.rs @@ -72,9 +72,10 @@ pub fn encode_config_buf>(input: &T, config: Config, buf let mut sink = ::chunked_encoder::StringSink::new(buf); let encoder = ::chunked_encoder::ChunkedEncoder::new(config); - encoder.encode(input_bytes, &mut sink).expect("Writing to a String shouldn't fail") + encoder + .encode(input_bytes, &mut sink) + .expect("Writing to a String shouldn't fail") } - } /// Encode arbitrary octets as base64. @@ -134,12 +135,7 @@ pub fn encode_config_slice>( /// `output` must be of size `encoded_size`. /// /// All bytes in `output` will be written to since it is exactly the size of the output. -fn encode_with_padding( - input: &[u8], - config: &Config, - encoded_size: usize, - output: &mut [u8], -) { +fn encode_with_padding(input: &[u8], config: &Config, encoded_size: usize, output: &mut [u8]) { debug_assert_eq!(encoded_size, output.len()); let b64_bytes_written = encode_to_slice(input, output, config.char_set.encode_table()); @@ -326,7 +322,7 @@ mod tests { use {Config, STANDARD, URL_SAFE_NO_PAD}; use self::rand::distributions::{Distribution, Range}; - use self::rand::{Rng, FromEntropy}; + use self::rand::{FromEntropy, Rng}; use std; use std::str; @@ -607,12 +603,7 @@ mod tests { let orig_output_buf = output.to_vec(); - encode_with_padding( - &input, - &config, - encoded_size, - &mut output[0..encoded_size], - ); + encode_with_padding(&input, &config, encoded_size, &mut output[0..encoded_size]); // make sure the part beyond b64 is the same garbage it was before assert_eq!(orig_output_buf[encoded_size..], output[encoded_size..]); diff --git a/src/lib.rs b/src/lib.rs index 2c310a54..77c9cfa0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -50,16 +50,23 @@ //! The `_slice` flavors of encode or decode will panic if the provided output slice is too small, #![deny( - missing_docs, trivial_casts, trivial_numeric_casts, unused_extern_crates, unused_import_braces, - unused_results, variant_size_differences, warnings, unsafe_code + missing_docs, + trivial_casts, + trivial_numeric_casts, + unused_extern_crates, + unused_import_braces, + unused_results, + variant_size_differences, + warnings, + unsafe_code )] extern crate byteorder; mod chunked_encoder; pub mod display; -pub mod write; mod tables; +pub mod write; mod encode; pub use encode::{encode, encode_config, encode_config_buf, encode_config_slice}; @@ -116,14 +123,8 @@ pub struct Config { impl Config { /// Create a new `Config`. - pub fn new( - char_set: CharacterSet, - pad: bool, - ) -> Config { - Config { - char_set, - pad, - } + pub fn new(char_set: CharacterSet, pad: bool) -> Config { + Config { char_set, pad } } } diff --git a/src/tests.rs b/src/tests.rs index dcbae9f4..c52970a9 100644 --- a/src/tests.rs +++ b/src/tests.rs @@ -6,7 +6,7 @@ use *; use std::str; use self::rand::distributions::{Distribution, Range}; -use self::rand::{Rng, FromEntropy}; +use self::rand::{FromEntropy, Rng}; #[test] fn roundtrip_random_config_short() { @@ -42,10 +42,7 @@ pub fn assert_encode_sanity(encoded: &str, config: &Config, input_len: usize) { let _ = str::from_utf8(encoded.as_bytes()).expect("Base64 should be valid utf8"); } -fn roundtrip_random_config( - input_len_range: Range, - iterations: u32, -) { +fn roundtrip_random_config(input_len_range: Range, iterations: u32) { let mut input_buf: Vec = Vec::new(); let mut encoded_buf = String::new(); let mut rng = rand::rngs::SmallRng::from_entropy(); diff --git a/src/write/encoder.rs b/src/write/encoder.rs index 41a9bcbd..dae5c423 100644 --- a/src/write/encoder.rs +++ b/src/write/encoder.rs @@ -1,6 +1,6 @@ -use ::encode::encode_to_slice; -use std::{cmp, fmt}; +use encode::encode_to_slice; use std::io::{Result, Write}; +use std::{cmp, fmt}; use {encode_config_slice, Config}; pub(crate) const BUF_SIZE: usize = 1024; @@ -68,7 +68,7 @@ pub struct EncoderWriter<'a, W: 'a + Write> { /// True iff padding / partial last chunk has been written. finished: bool, /// panic safety: don't write again in destructor if writer panicked while we were writing to it - panicked: bool + panicked: bool, } impl<'a, W: Write> fmt::Debug for EncoderWriter<'a, W> { @@ -93,7 +93,7 @@ impl<'a, W: Write> EncoderWriter<'a, W> { extra_len: 0, output: [0u8; BUF_SIZE], finished: false, - panicked: false + panicked: false, } } @@ -169,11 +169,14 @@ impl<'a, W: Write> Write for EncoderWriter<'a, W> { debug_assert!(extra_input_read_len > 0); // overwrite only bytes that weren't already used. If we need to rollback extra_len // (when the subsequent write errors), the old leading bytes will still be there. - self.extra[self.extra_len..MIN_ENCODE_CHUNK_SIZE].copy_from_slice(&input[0..extra_input_read_len]); + self.extra[self.extra_len..MIN_ENCODE_CHUNK_SIZE] + .copy_from_slice(&input[0..extra_input_read_len]); - let len = encode_to_slice(&self.extra[0..MIN_ENCODE_CHUNK_SIZE], - &mut self.output[..], - self.config.char_set.encode_table()); + let len = encode_to_slice( + &self.extra[0..MIN_ENCODE_CHUNK_SIZE], + &mut self.output[..], + self.config.char_set.encode_table(), + ); debug_assert_eq!(4, len); input = &input[extra_input_read_len..]; @@ -185,7 +188,7 @@ impl<'a, W: Write> Write for EncoderWriter<'a, W> { // and don't read more than can be encoded max_input_len = MAX_INPUT_LEN - MIN_ENCODE_CHUNK_SIZE; - // fall through to normal encoding + // fall through to normal encoding } else { // `extra` and `input` are non empty, but `|extra| + |input| < 3`, so there must be // 1 byte in each. @@ -205,8 +208,10 @@ impl<'a, W: Write> Write for EncoderWriter<'a, W> { // either 0 or 1 complete chunks encoded from extra debug_assert!(encoded_size == 0 || encoded_size == 4); - debug_assert!(MAX_INPUT_LEN - max_input_len == 0 - || MAX_INPUT_LEN - max_input_len == MIN_ENCODE_CHUNK_SIZE); + debug_assert!( + MAX_INPUT_LEN - max_input_len == 0 + || MAX_INPUT_LEN - max_input_len == MIN_ENCODE_CHUNK_SIZE + ); // handle complete triples let input_complete_chunks_len = input.len() - (input.len() % MIN_ENCODE_CHUNK_SIZE); diff --git a/src/write/encoder_tests.rs b/src/write/encoder_tests.rs index 4fbf6e5b..6897c5cd 100644 --- a/src/write/encoder_tests.rs +++ b/src/write/encoder_tests.rs @@ -2,10 +2,10 @@ extern crate rand; use super::EncoderWriter; use tests::random_config; -use {encode_config, encode_config_buf, URL_SAFE, STANDARD_NO_PAD}; +use {encode_config, encode_config_buf, STANDARD_NO_PAD, URL_SAFE}; use std::io::{Cursor, Write}; -use std::{cmp, str, io}; +use std::{cmp, io, str}; use self::rand::Rng; @@ -209,7 +209,8 @@ fn write_2_partials_to_exactly_complete_chunk_encodes_complete_chunk() { } #[test] -fn write_partial_then_enough_to_complete_chunk_but_not_complete_another_chunk_encodes_complete_chunk_without_consuming_remaining() { +fn write_partial_then_enough_to_complete_chunk_but_not_complete_another_chunk_encodes_complete_chunk_without_consuming_remaining( +) { let mut c = Cursor::new(Vec::new()); { let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD); @@ -245,7 +246,8 @@ fn write_partial_then_enough_to_complete_chunk_and_another_chunk_encodes_complet } #[test] -fn write_partial_then_enough_to_complete_chunk_and_another_chunk_and_another_partial_chunk_encodes_only_complete_chunks() { +fn write_partial_then_enough_to_complete_chunk_and_another_chunk_and_another_partial_chunk_encodes_only_complete_chunks( +) { let mut c = Cursor::new(Vec::new()); { let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD); @@ -355,13 +357,13 @@ fn retrying_writes_that_error_with_interrupted_works() { while bytes_consumed < orig_len { // use short inputs since we want to use `extra` a lot as that's what needs rollback // when errors occur - let input_len: usize = cmp::min(rng.gen_range(0, 10), - orig_len - bytes_consumed); + let input_len: usize = cmp::min(rng.gen_range(0, 10), orig_len - bytes_consumed); // write a little bit of the data - retry_interrupted_write_all(&mut stream_encoder, - &orig_data[bytes_consumed..bytes_consumed + input_len]) - .unwrap(); + retry_interrupted_write_all( + &mut stream_encoder, + &orig_data[bytes_consumed..bytes_consumed + input_len], + ).unwrap(); bytes_consumed += input_len; } @@ -372,8 +374,8 @@ fn retrying_writes_that_error_with_interrupted_works() { Ok(_) => break, Err(e) => match e.kind() { io::ErrorKind::Interrupted => continue, - _ => Err(e).unwrap() // bail - } + _ => Err(e).unwrap(), // bail + }, } } @@ -399,7 +401,7 @@ fn retry_interrupted_write_all(w: &mut W, buf: &[u8]) -> io::Result<() println!("got kind: {:?}", e.kind()); return Err(e); } - } + }, } } @@ -431,8 +433,8 @@ fn do_encode_random_config_matches_normal_encode(max_input_len: usize) { let mut stream_encoder = EncoderWriter::new(&mut stream_encoded, config); let mut bytes_consumed = 0; while bytes_consumed < orig_len { - let input_len: usize = cmp::min(rng.gen_range(0, max_input_len), - orig_len - bytes_consumed); + let input_len: usize = + cmp::min(rng.gen_range(0, max_input_len), orig_len - bytes_consumed); // write a little bit of the data stream_encoder @@ -476,4 +478,4 @@ impl<'a, W: Write, R: Rng> Write for InterruptingWriter<'a, W, R> { self.w.flush() } -} \ No newline at end of file +} diff --git a/tests/decode.rs b/tests/decode.rs index 2b9aa607..eafa4417 100644 --- a/tests/decode.rs +++ b/tests/decode.rs @@ -307,4 +307,4 @@ fn decode_reject_invalid_bytes_with_correct_error() { fn config_std_pad() -> Config { Config::new(CharacterSet::Standard, true) -} \ No newline at end of file +} diff --git a/tests/tests.rs b/tests/tests.rs index 2d3f3ab3..699cb762 100644 --- a/tests/tests.rs +++ b/tests/tests.rs @@ -1,7 +1,7 @@ extern crate base64; extern crate rand; -use rand::{Rng, FromEntropy}; +use rand::{FromEntropy, Rng}; use base64::*; @@ -141,7 +141,10 @@ fn display_wrapper_matches_normal_encode() { assert_eq!( encode(&bytes), - format!("{}", base64::display::Base64Display::with_config(&bytes, STANDARD)) + format!( + "{}", + base64::display::Base64Display::with_config(&bytes, STANDARD) + ) ); } From afe39edb1a70ddeeba2f17b30da10bf54b5d27d9 Mon Sep 17 00:00:00 2001 From: Glenn Griffin Date: Tue, 20 Nov 2018 11:33:34 -0800 Subject: [PATCH 2/3] Make clippy happy --- src/chunked_encoder.rs | 14 +++++++------- src/decode.rs | 14 +++++++------- src/encode.rs | 36 ++++++++++++++++++------------------ src/lib.rs | 9 +++++---- src/tables.rs | 12 ++++++------ src/tests.rs | 6 +++--- src/write/encoder.rs | 6 +++--- 7 files changed, 49 insertions(+), 48 deletions(-) diff --git a/src/chunked_encoder.rs b/src/chunked_encoder.rs index 15f08e95..b37e4bb1 100644 --- a/src/chunked_encoder.rs +++ b/src/chunked_encoder.rs @@ -22,7 +22,7 @@ impl ChunkedEncoder { pub fn new(config: Config) -> ChunkedEncoder { ChunkedEncoder { config, - max_input_chunk_len: max_input_length(BUF_SIZE, &config), + max_input_chunk_len: max_input_length(BUF_SIZE, config), } } @@ -63,7 +63,7 @@ impl ChunkedEncoder { /// /// The input length will always be a multiple of 3 so that no encoding state has to be carried over /// between chunks. -fn max_input_length(encoded_buf_len: usize, config: &Config) -> usize { +fn max_input_length(encoded_buf_len: usize, config: Config) -> usize { let effective_buf_len = if config.pad { // make room for padding encoded_buf_len @@ -147,31 +147,31 @@ pub mod tests { #[test] fn max_input_length_no_pad() { let config = config_with_pad(false); - assert_eq!(768, max_input_length(1024, &config)); + assert_eq!(768, max_input_length(1024, config)); } #[test] fn max_input_length_with_pad_decrements_one_triple() { let config = config_with_pad(true); - assert_eq!(765, max_input_length(1024, &config)); + assert_eq!(765, max_input_length(1024, config)); } #[test] fn max_input_length_with_pad_one_byte_short() { let config = config_with_pad(true); - assert_eq!(765, max_input_length(1025, &config)); + assert_eq!(765, max_input_length(1025, config)); } #[test] fn max_input_length_with_pad_fits_exactly() { let config = config_with_pad(true); - assert_eq!(768, max_input_length(1026, &config)); + assert_eq!(768, max_input_length(1026, config)); } #[test] fn max_input_length_cant_use_extra_single_encoded_byte() { let config = Config::new(CharacterSet::Standard, false); - assert_eq!(300, max_input_length(401, &config)); + assert_eq!(300, max_input_length(401, config)); } pub fn chunked_encode_matches_normal_encode_random(sink_test_helper: &S) { diff --git a/src/decode.rs b/src/decode.rs index bdbb3223..49234341 100644 --- a/src/decode.rs +++ b/src/decode.rs @@ -143,7 +143,7 @@ pub fn decode_config_buf>( let bytes_written; { let buffer_slice = &mut buffer.as_mut_slice()[starting_output_len..]; - bytes_written = decode_helper(input_bytes, num_chunks, &config.char_set, buffer_slice)?; + bytes_written = decode_helper(input_bytes, num_chunks, config.char_set, buffer_slice)?; } buffer.truncate(starting_output_len + bytes_written); @@ -170,7 +170,7 @@ pub fn decode_config_slice>( decode_helper( input_bytes, num_chunks(input_bytes), - &config.char_set, + config.char_set, output, ) } @@ -193,7 +193,7 @@ fn num_chunks(input: &[u8]) -> usize { fn decode_helper( input: &[u8], num_chunks: usize, - char_set: &CharacterSet, + char_set: CharacterSet, output: &mut [u8], ) -> Result { let decode_table = char_set.decode_table(); @@ -313,7 +313,7 @@ fn decode_helper( } // always have one more (possibly partial) block of 8 input - debug_assert!(input.len() - input_index > 1 || input.len() == 0); + debug_assert!(input.len() - input_index > 1 || input.is_empty()); debug_assert!(input.len() - input_index <= 8); // Stage 4 @@ -593,7 +593,7 @@ mod tests { let config = random_config(&mut rng); encode_config_buf(&orig_data, config, &mut encoded_data); - assert_encode_sanity(&encoded_data, &config, input_len); + assert_encode_sanity(&encoded_data, config, input_len); let prefix_len = prefix_len_range.sample(&mut rng); @@ -648,7 +648,7 @@ mod tests { let config = random_config(&mut rng); encode_config_buf(&orig_data, config, &mut encoded_data); - assert_encode_sanity(&encoded_data, &config, input_len); + assert_encode_sanity(&encoded_data, config, input_len); // fill the buffer with random garbage, long enough to have some room before and after for _ in 0..5000 { @@ -700,7 +700,7 @@ mod tests { let config = random_config(&mut rng); encode_config_buf(&orig_data, config, &mut encoded_data); - assert_encode_sanity(&encoded_data, &config, input_len); + assert_encode_sanity(&encoded_data, config, input_len); decode_buf.resize(input_len, 0); diff --git a/src/encode.rs b/src/encode.rs index c8e15f0a..96e39b65 100644 --- a/src/encode.rs +++ b/src/encode.rs @@ -36,7 +36,7 @@ pub fn encode>(input: &T) -> String { ///} ///``` pub fn encode_config>(input: &T, config: Config) -> String { - let mut buf = match encoded_size(input.as_ref().len(), &config) { + let mut buf = match encoded_size(input.as_ref().len(), config) { Some(n) => vec![0; n], None => panic!("integer overflow when calculating buffer size"), }; @@ -44,7 +44,7 @@ pub fn encode_config>(input: &T, config: Config) -> Stri let encoded_len = encode_config_slice(input.as_ref(), config, &mut buf[..]); debug_assert_eq!(encoded_len, buf.len()); - return String::from_utf8(buf).expect("Invalid UTF8"); + String::from_utf8(buf).expect("Invalid UTF8") } ///Encode arbitrary octets as base64. @@ -115,12 +115,12 @@ pub fn encode_config_slice>( ) -> usize { let input_bytes = input.as_ref(); - let encoded_size = encoded_size(input_bytes.len(), &config) + let encoded_size = encoded_size(input_bytes.len(), config) .expect("usize overflow when calculating buffer size"); let mut b64_output = &mut output[0..encoded_size]; - encode_with_padding(&input_bytes, &config, encoded_size, &mut b64_output); + encode_with_padding(&input_bytes, config, encoded_size, &mut b64_output); encoded_size } @@ -135,7 +135,7 @@ pub fn encode_config_slice>( /// `output` must be of size `encoded_size`. /// /// All bytes in `output` will be written to since it is exactly the size of the output. -fn encode_with_padding(input: &[u8], config: &Config, encoded_size: usize, output: &mut [u8]) { +fn encode_with_padding(input: &[u8], config: Config, encoded_size: usize, output: &mut [u8]) { debug_assert_eq!(encoded_size, output.len()); let b64_bytes_written = encode_to_slice(input, output, config.char_set.encode_table()); @@ -275,7 +275,7 @@ pub fn encode_to_slice(input: &[u8], output: &mut [u8], encode_table: &[u8; 64]) } /// calculate the base64 encoded string size, including padding if appropriate -pub fn encoded_size(bytes_len: usize, config: &Config) -> Option { +pub fn encoded_size(bytes_len: usize, config: Config) -> Option { let rem = bytes_len % 3; let complete_input_chunks = bytes_len / 3; @@ -378,7 +378,7 @@ mod tests { #[test] fn encoded_size_overflow() { - assert_eq!(None, encoded_size(std::usize::MAX, &STANDARD)); + assert_eq!(None, encoded_size(std::usize::MAX, STANDARD)); } #[test] @@ -423,8 +423,8 @@ mod tests { encoded_data_no_prefix.len() + prefix_len, encoded_data_with_prefix.len() ); - assert_encode_sanity(&encoded_data_no_prefix, &config, input_len); - assert_encode_sanity(&encoded_data_with_prefix[prefix_len..], &config, input_len); + assert_encode_sanity(&encoded_data_no_prefix, config, input_len); + assert_encode_sanity(&encoded_data_with_prefix[prefix_len..], config, input_len); // append plain encode onto prefix prefix.push_str(&mut encoded_data_no_prefix); @@ -468,7 +468,7 @@ mod tests { let config = random_config(&mut rng); - let encoded_size = encoded_size(input_len, &config).unwrap(); + let encoded_size = encoded_size(input_len, config).unwrap(); assert_eq!( encoded_size, @@ -477,7 +477,7 @@ mod tests { assert_encode_sanity( std::str::from_utf8(&encoded_data[0..encoded_size]).unwrap(), - &config, + config, input_len, ); @@ -514,7 +514,7 @@ mod tests { let config = random_config(&mut rng); - let encoded_size = encoded_size(input_len, &config).unwrap(); + let encoded_size = encoded_size(input_len, config).unwrap(); encoded_data.resize(encoded_size, 0); @@ -525,7 +525,7 @@ mod tests { assert_encode_sanity( std::str::from_utf8(&encoded_data[0..encoded_size]).unwrap(), - &config, + config, input_len, ); @@ -556,7 +556,7 @@ mod tests { let config = random_config(&mut rng); // fill up the output buffer with garbage - let encoded_size = encoded_size(input_len, &config).unwrap(); + let encoded_size = encoded_size(input_len, config).unwrap(); for _ in 0..encoded_size { output.push(rng.gen()); } @@ -596,14 +596,14 @@ mod tests { let config = random_config(&mut rng); // fill up the output buffer with garbage - let encoded_size = encoded_size(input_len, &config).unwrap(); + let encoded_size = encoded_size(input_len, config).unwrap(); for _ in 0..encoded_size + 1000 { output.push(rng.gen()); } let orig_output_buf = output.to_vec(); - encode_with_padding(&input, &config, encoded_size, &mut output[0..encoded_size]); + encode_with_padding(&input, config, encoded_size, &mut output[0..encoded_size]); // make sure the part beyond b64 is the same garbage it was before assert_eq!(orig_output_buf[encoded_size..], output[encoded_size..]); @@ -641,7 +641,7 @@ mod tests { } fn assert_encoded_length(input_len: usize, encoded_len: usize, config: Config) { - assert_eq!(encoded_len, encoded_size(input_len, &config).unwrap()); + assert_eq!(encoded_len, encoded_size(input_len, config).unwrap()); let mut bytes: Vec = Vec::new(); let mut rng = rand::rngs::SmallRng::from_entropy(); @@ -651,7 +651,7 @@ mod tests { } let encoded = encode_config(&bytes, config); - assert_encode_sanity(&encoded, &config, input_len); + assert_encode_sanity(&encoded, config, input_len); assert_eq!(encoded_len, encoded.len()); } diff --git a/src/lib.rs b/src/lib.rs index 77c9cfa0..812576ec 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -49,6 +49,7 @@ //! //! The `_slice` flavors of encode or decode will panic if the provided output slice is too small, +#![cfg_attr(feature = "cargo-clippy", allow(cast_lossless))] #![deny( missing_docs, trivial_casts, @@ -95,16 +96,16 @@ pub enum CharacterSet { } impl CharacterSet { - fn encode_table(&self) -> &'static [u8; 64] { - match *self { + fn encode_table(self) -> &'static [u8; 64] { + match self { CharacterSet::Standard => tables::STANDARD_ENCODE, CharacterSet::UrlSafe => tables::URL_SAFE_ENCODE, CharacterSet::Crypt => tables::CRYPT_ENCODE, } } - fn decode_table(&self) -> &'static [u8; 256] { - match *self { + fn decode_table(self) -> &'static [u8; 256] { + match self { CharacterSet::Standard => tables::STANDARD_DECODE, CharacterSet::UrlSafe => tables::URL_SAFE_DECODE, CharacterSet::Crypt => tables::CRYPT_DECODE, diff --git a/src/tables.rs b/src/tables.rs index af3950b7..a1466c3e 100644 --- a/src/tables.rs +++ b/src/tables.rs @@ -1,6 +1,6 @@ pub const INVALID_VALUE: u8 = 255; #[cfg_attr(rustfmt, rustfmt_skip)] -pub const STANDARD_ENCODE: &'static [u8; 64] = &[ +pub const STANDARD_ENCODE: &[u8; 64] = &[ 65, // input 0 (0x0) => 'A' (0x41) 66, // input 1 (0x1) => 'B' (0x42) 67, // input 2 (0x2) => 'C' (0x43) @@ -67,7 +67,7 @@ pub const STANDARD_ENCODE: &'static [u8; 64] = &[ 47, // input 63 (0x3F) => '/' (0x2F) ]; #[cfg_attr(rustfmt, rustfmt_skip)] -pub const STANDARD_DECODE: &'static [u8; 256] = &[ +pub const STANDARD_DECODE: &[u8; 256] = &[ INVALID_VALUE, // input 0 (0x0) INVALID_VALUE, // input 1 (0x1) INVALID_VALUE, // input 2 (0x2) @@ -326,7 +326,7 @@ pub const STANDARD_DECODE: &'static [u8; 256] = &[ INVALID_VALUE, // input 255 (0xFF) ]; #[cfg_attr(rustfmt, rustfmt_skip)] -pub const URL_SAFE_ENCODE: &'static [u8; 64] = &[ +pub const URL_SAFE_ENCODE: &[u8; 64] = &[ 65, // input 0 (0x0) => 'A' (0x41) 66, // input 1 (0x1) => 'B' (0x42) 67, // input 2 (0x2) => 'C' (0x43) @@ -393,7 +393,7 @@ pub const URL_SAFE_ENCODE: &'static [u8; 64] = &[ 95, // input 63 (0x3F) => '_' (0x5F) ]; #[cfg_attr(rustfmt, rustfmt_skip)] -pub const URL_SAFE_DECODE: &'static [u8; 256] = &[ +pub const URL_SAFE_DECODE: &[u8; 256] = &[ INVALID_VALUE, // input 0 (0x0) INVALID_VALUE, // input 1 (0x1) INVALID_VALUE, // input 2 (0x2) @@ -652,7 +652,7 @@ pub const URL_SAFE_DECODE: &'static [u8; 256] = &[ INVALID_VALUE, // input 255 (0xFF) ]; #[cfg_attr(rustfmt, rustfmt_skip)] -pub const CRYPT_ENCODE: &'static [u8; 64] = &[ +pub const CRYPT_ENCODE: &[u8; 64] = &[ 46, // input 0 (0x0) => '.' (0x2E) 47, // input 1 (0x1) => '/' (0x2F) 48, // input 2 (0x2) => '0' (0x30) @@ -719,7 +719,7 @@ pub const CRYPT_ENCODE: &'static [u8; 64] = &[ 122, // input 63 (0x3F) => 'z' (0x7A) ]; #[cfg_attr(rustfmt, rustfmt_skip)] -pub const CRYPT_DECODE: &'static [u8; 256] = &[ +pub const CRYPT_DECODE: &[u8; 256] = &[ INVALID_VALUE, // input 0 (0x0) INVALID_VALUE, // input 1 (0x1) INVALID_VALUE, // input 2 (0x2) diff --git a/src/tests.rs b/src/tests.rs index c52970a9..dda7f306 100644 --- a/src/tests.rs +++ b/src/tests.rs @@ -19,7 +19,7 @@ fn roundtrip_random_config_long() { roundtrip_random_config(Range::new(0, 1000), 10_000); } -pub fn assert_encode_sanity(encoded: &str, config: &Config, input_len: usize) { +pub fn assert_encode_sanity(encoded: &str, config: Config, input_len: usize) { let input_rem = input_len % 3; let expected_padding_len = if input_rem > 0 { if config.pad { @@ -31,7 +31,7 @@ pub fn assert_encode_sanity(encoded: &str, config: &Config, input_len: usize) { 0 }; - let expected_encoded_len = encoded_size(input_len, &config).unwrap(); + let expected_encoded_len = encoded_size(input_len, config).unwrap(); assert_eq!(expected_encoded_len, encoded.len()); @@ -61,7 +61,7 @@ fn roundtrip_random_config(input_len_range: Range, iterations: u32) { encode_config_buf(&input_buf, config, &mut encoded_buf); - assert_encode_sanity(&encoded_buf, &config, input_len); + assert_encode_sanity(&encoded_buf, config, input_len); assert_eq!(input_buf, decode_config(&encoded_buf, config).unwrap()); } diff --git a/src/write/encoder.rs b/src/write/encoder.rs index dae5c423..b54fca45 100644 --- a/src/write/encoder.rs +++ b/src/write/encoder.rs @@ -136,7 +136,7 @@ impl<'a, W: Write> Write for EncoderWriter<'a, W> { panic!("Cannot write more after calling finish()"); } - if input.len() == 0 { + if input.is_empty() { return Ok(0); } @@ -228,11 +228,11 @@ impl<'a, W: Write> Write for EncoderWriter<'a, W> { let r = self.w.write(&self.output[..encoded_size]); self.panicked = false; match r { - Ok(_) => return Ok(extra_input_read_len + input_chunks_to_encode_len), + Ok(_) => Ok(extra_input_read_len + input_chunks_to_encode_len), Err(_) => { // in case we filled and encoded `extra`, reset extra_len self.extra_len = orig_extra_len; - return r; + r } } From c26a1af6583d61ad717a740cb690b515edea66d2 Mon Sep 17 00:00:00 2001 From: Glenn Griffin Date: Tue, 20 Nov 2018 09:35:10 -0800 Subject: [PATCH 3/3] Use Criterion for benchmarks. Criterion can be run on stable rust and provides much more reliable analysis. It also provides a comvenient method for comparing benchmarks between different baselines using --save-baseline and --baseline. --- Cargo.toml | 5 + benches/benchmarks.rs | 340 +++++++++--------------------------------- 2 files changed, 75 insertions(+), 270 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index d6fb8b9a..b30c2bd0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,10 +10,15 @@ keywords = ["base64", "utf8", "encode", "decode"] categories = ["encoding"] license = "MIT/Apache-2.0" +[[bench]] +name = "benchmarks" +harness = false + [dependencies] byteorder = "1.2.6" [dev-dependencies] +criterion = "0.2" rand = "0.5.5" [profile.bench] diff --git a/benches/benchmarks.rs b/benches/benchmarks.rs index 183737ef..e6ae3d20 100644 --- a/benches/benchmarks.rs +++ b/benches/benchmarks.rs @@ -1,350 +1,105 @@ -#![feature(test)] - extern crate base64; +#[macro_use] +extern crate criterion; extern crate rand; -extern crate test; use base64::display; use base64::{ decode, decode_config_buf, decode_config_slice, encode, encode_config_buf, encode_config_slice, - write, Config, STANDARD, + write, Config, }; +use criterion::{black_box, Bencher, Criterion, ParameterizedBenchmark, Throughput}; use rand::{FromEntropy, Rng}; use std::io::Write; -use test::Bencher; - -#[bench] -fn encode_3b(b: &mut Bencher) { - do_encode_bench(b, 3) -} - -#[bench] -fn encode_3b_reuse_buf(b: &mut Bencher) { - do_encode_bench_reuse_buf(b, 3, STANDARD) -} - -#[bench] -fn encode_3b_slice(b: &mut Bencher) { - do_encode_bench_slice(b, 3, STANDARD) -} - -#[bench] -fn encode_50b(b: &mut Bencher) { - do_encode_bench(b, 50) -} - -#[bench] -fn encode_50b_display(b: &mut Bencher) { - do_encode_bench_display(b, 50) -} - -#[bench] -fn encode_50b_reuse_buf(b: &mut Bencher) { - do_encode_bench_reuse_buf(b, 50, STANDARD) -} - -#[bench] -fn encode_50b_slice(b: &mut Bencher) { - do_encode_bench_slice(b, 50, STANDARD) -} - -#[bench] -fn encode_100b(b: &mut Bencher) { - do_encode_bench(b, 100) -} - -#[bench] -fn encode_100b_reuse_buf(b: &mut Bencher) { - do_encode_bench_reuse_buf(b, 100, STANDARD) -} - -#[bench] -fn encode_500b(b: &mut Bencher) { - do_encode_bench(b, 500) -} - -#[bench] -fn encode_500b_reuse_buf(b: &mut Bencher) { - do_encode_bench_reuse_buf(b, 500, STANDARD) -} - -#[bench] -fn encode_3kib(b: &mut Bencher) { - do_encode_bench(b, 3 * 1024) -} - -#[bench] -fn encode_3kib_display(b: &mut Bencher) { - do_encode_bench_display(b, 3 * 1024) -} - -#[bench] -fn encode_3kib_reuse_buf(b: &mut Bencher) { - do_encode_bench_reuse_buf(b, 3 * 1024, STANDARD) -} - -#[bench] -fn encode_3kib_slice(b: &mut Bencher) { - do_encode_bench_slice(b, 3 * 1024, STANDARD) -} - -#[bench] -fn encode_3kib_reuse_buf_stream(b: &mut Bencher) { - do_encode_bench_stream(b, 3 * 1024, STANDARD) -} - -#[bench] -fn encode_3mib(b: &mut Bencher) { - do_encode_bench(b, 3 * 1024 * 1024) -} - -#[bench] -fn encode_3mib_display(b: &mut Bencher) { - do_encode_bench_display(b, 3 * 1024 * 1024) -} - -#[bench] -fn encode_3mib_reuse_buf(b: &mut Bencher) { - do_encode_bench_reuse_buf(b, 3 * 1024 * 1024, STANDARD) -} - -#[bench] -fn encode_3mib_slice(b: &mut Bencher) { - do_encode_bench_slice(b, 3 * 1024 * 1024, STANDARD) -} - -#[bench] -fn encode_10mib(b: &mut Bencher) { - do_encode_bench(b, 10 * 1024 * 1024) -} - -#[bench] -fn encode_10mib_reuse_buf(b: &mut Bencher) { - do_encode_bench_reuse_buf(b, 10 * 1024 * 1024, STANDARD) -} - -#[bench] -fn encode_30mib(b: &mut Bencher) { - do_encode_bench(b, 30 * 1024 * 1024) -} - -#[bench] -fn encode_30mib_reuse_buf(b: &mut Bencher) { - do_encode_bench_reuse_buf(b, 30 * 1024 * 1024, STANDARD) -} - -#[bench] -fn encode_30mib_slice(b: &mut Bencher) { - do_encode_bench_slice(b, 30 * 1024 * 1024, STANDARD) -} - -#[bench] -fn decode_3b(b: &mut Bencher) { - do_decode_bench(b, 3) -} - -#[bench] -fn decode_3b_reuse_buf(b: &mut Bencher) { - do_decode_bench_reuse_buf(b, 3) -} - -#[bench] -fn decode_3b_slice(b: &mut Bencher) { - do_decode_bench_slice(b, 3) -} - -#[bench] -fn decode_50b(b: &mut Bencher) { - do_decode_bench(b, 50) -} - -#[bench] -fn decode_50b_reuse_buf(b: &mut Bencher) { - do_decode_bench_reuse_buf(b, 50) -} - -#[bench] -fn decode_50b_slice(b: &mut Bencher) { - do_decode_bench_slice(b, 50) -} - -#[bench] -fn decode_100b(b: &mut Bencher) { - do_decode_bench(b, 100) -} - -#[bench] -fn decode_100b_reuse_buf(b: &mut Bencher) { - do_decode_bench_reuse_buf(b, 100) -} - -#[bench] -fn decode_500b(b: &mut Bencher) { - do_decode_bench(b, 500) -} - -#[bench] -fn decode_500b_reuse_buf(b: &mut Bencher) { - do_decode_bench_reuse_buf(b, 500) -} - -#[bench] -fn decode_3kib(b: &mut Bencher) { - do_decode_bench(b, 3 * 1024) -} - -#[bench] -fn decode_3kib_reuse_buf(b: &mut Bencher) { - do_decode_bench_reuse_buf(b, 3 * 1024) -} - -#[bench] -fn decode_3kib_slice(b: &mut Bencher) { - do_decode_bench_slice(b, 3 * 1024) -} - -#[bench] -fn decode_3mib(b: &mut Bencher) { - do_decode_bench(b, 3 * 1024 * 1024) -} - -#[bench] -fn decode_3mib_reuse_buf(b: &mut Bencher) { - do_decode_bench_reuse_buf(b, 3 * 1024 * 1024) -} - -#[bench] -fn decode_3mib_slice(b: &mut Bencher) { - do_decode_bench_slice(b, 3 * 1024 * 1024) -} - -#[bench] -fn decode_10mib(b: &mut Bencher) { - do_decode_bench(b, 10 * 1024 * 1024) -} - -#[bench] -fn decode_10mib_reuse_buf(b: &mut Bencher) { - do_decode_bench_reuse_buf(b, 10 * 1024 * 1024) -} - -#[bench] -fn decode_30mib(b: &mut Bencher) { - do_decode_bench(b, 30 * 1024 * 1024) -} - -#[bench] -fn decode_30mib_reuse_buf(b: &mut Bencher) { - do_decode_bench_reuse_buf(b, 30 * 1024 * 1024) -} -#[bench] -fn decode_30mib_slice(b: &mut Bencher) { - do_decode_bench_slice(b, 30 * 1024 * 1024) -} +const TEST_CONFIG: Config = base64::STANDARD; -fn do_decode_bench(b: &mut Bencher, size: usize) { +fn do_decode_bench(b: &mut Bencher, &size: &usize) { let mut v: Vec = Vec::with_capacity(size * 3 / 4); fill(&mut v); let encoded = encode(&v); - b.bytes = encoded.len() as u64; b.iter(|| { let orig = decode(&encoded); - test::black_box(&orig); + black_box(&orig); }); } -fn do_decode_bench_reuse_buf(b: &mut Bencher, size: usize) { +fn do_decode_bench_reuse_buf(b: &mut Bencher, &size: &usize) { let mut v: Vec = Vec::with_capacity(size * 3 / 4); fill(&mut v); let encoded = encode(&v); let mut buf = Vec::new(); - b.bytes = encoded.len() as u64; b.iter(|| { - decode_config_buf(&encoded, STANDARD, &mut buf).unwrap(); - test::black_box(&buf); + decode_config_buf(&encoded, TEST_CONFIG, &mut buf).unwrap(); + black_box(&buf); buf.clear(); }); } -fn do_decode_bench_slice(b: &mut Bencher, size: usize) { +fn do_decode_bench_slice(b: &mut Bencher, &size: &usize) { let mut v: Vec = Vec::with_capacity(size * 3 / 4); fill(&mut v); let encoded = encode(&v); let mut buf = Vec::new(); buf.resize(size, 0); - b.bytes = encoded.len() as u64; b.iter(|| { - decode_config_slice(&encoded, STANDARD, &mut buf).unwrap(); - test::black_box(&buf); + decode_config_slice(&encoded, TEST_CONFIG, &mut buf).unwrap(); + black_box(&buf); }); } -fn do_encode_bench(b: &mut Bencher, size: usize) { +fn do_encode_bench(b: &mut Bencher, &size: &usize) { let mut v: Vec = Vec::with_capacity(size); fill(&mut v); - - b.bytes = v.len() as u64; b.iter(|| { let e = encode(&v); - test::black_box(&e); + black_box(&e); }); } -fn do_encode_bench_display(b: &mut Bencher, size: usize) { +fn do_encode_bench_display(b: &mut Bencher, &size: &usize) { let mut v: Vec = Vec::with_capacity(size); fill(&mut v); - - b.bytes = v.len() as u64; b.iter(|| { - let e = format!("{}", display::Base64Display::with_config(&v, STANDARD)); - test::black_box(&e); + let e = format!("{}", display::Base64Display::with_config(&v, TEST_CONFIG)); + black_box(&e); }); } -fn do_encode_bench_reuse_buf(b: &mut Bencher, size: usize, config: Config) { +fn do_encode_bench_reuse_buf(b: &mut Bencher, &size: &usize) { let mut v: Vec = Vec::with_capacity(size); fill(&mut v); - let mut buf = String::new(); - - b.bytes = v.len() as u64; b.iter(|| { - encode_config_buf(&v, config, &mut buf); + encode_config_buf(&v, TEST_CONFIG, &mut buf); buf.clear(); }); } -fn do_encode_bench_slice(b: &mut Bencher, size: usize, config: Config) { +fn do_encode_bench_slice(b: &mut Bencher, &size: &usize) { let mut v: Vec = Vec::with_capacity(size); fill(&mut v); - let mut buf = Vec::new(); - - b.bytes = v.len() as u64; // conservative estimate of encoded size - buf.resize(size * 2, 0); + buf.resize(v.len() * 2, 0); b.iter(|| { - encode_config_slice(&v, config, &mut buf); + encode_config_slice(&v, TEST_CONFIG, &mut buf); }); } -fn do_encode_bench_stream(b: &mut Bencher, size: usize, config: Config) { +fn do_encode_bench_stream(b: &mut Bencher, &size: &usize) { let mut v: Vec = Vec::with_capacity(size); fill(&mut v); - let mut buf = Vec::new(); - b.bytes = v.len() as u64; - buf.reserve(size * 2); b.iter(|| { buf.clear(); - let mut stream_enc = write::EncoderWriter::new(&mut buf, config); + let mut stream_enc = write::EncoderWriter::new(&mut buf, TEST_CONFIG); stream_enc.write_all(&v).unwrap(); stream_enc.flush().unwrap(); }); @@ -358,3 +113,48 @@ fn fill(v: &mut Vec) { v.push(r.gen::()); } } + +const BYTE_SIZES: [usize; 5] = [3, 50, 100, 500, 3 * 1024]; + +// Benchmarks over these byte sizes take longer so we will run fewer samples to +// keep the benchmark runtime reasonable. +const LARGE_BYTE_SIZES: [usize; 3] = [3 * 1024 * 1024, 10 * 1024 * 1024, 30 * 1024 * 1024]; + +fn encode_benchmarks(byte_sizes: &[usize]) -> ParameterizedBenchmark { + ParameterizedBenchmark::new("encode", do_encode_bench, byte_sizes.iter().cloned()) + .warm_up_time(std::time::Duration::from_millis(500)) + .measurement_time(std::time::Duration::from_secs(3)) + .throughput(|s| Throughput::Bytes(*s as u32)) + .with_function("encode_display", do_encode_bench_display) + .with_function("encode_reuse_buf", do_encode_bench_reuse_buf) + .with_function("encode_slice", do_encode_bench_slice) + .with_function("encode_reuse_buf_stream", do_encode_bench_stream) +} + +fn decode_benchmarks(byte_sizes: &[usize]) -> ParameterizedBenchmark { + ParameterizedBenchmark::new("decode", do_decode_bench, byte_sizes.iter().cloned()) + .warm_up_time(std::time::Duration::from_millis(500)) + .measurement_time(std::time::Duration::from_secs(3)) + .throughput(|s| Throughput::Bytes(*s as u32)) + .with_function("decode_reuse_buf", do_decode_bench_reuse_buf) + .with_function("decode_slice", do_decode_bench_slice) +} + +fn bench(c: &mut Criterion) { + c.bench("bench_small_input", encode_benchmarks(&BYTE_SIZES[..])); + + c.bench( + "bench_large_input", + encode_benchmarks(&LARGE_BYTE_SIZES[..]).sample_size(10), + ); + + c.bench("bench_small_input", decode_benchmarks(&BYTE_SIZES[..])); + + c.bench( + "bench_large_input", + decode_benchmarks(&LARGE_BYTE_SIZES[..]).sample_size(10), + ); +} + +criterion_group!(benches, bench); +criterion_main!(benches);