From e003e57478b6f09961eeeddf57895193f92505f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dawid=20Ci=C4=99=C5=BCarkiewicz?= Date: Sat, 28 May 2022 20:58:43 -0700 Subject: [PATCH 1/4] Add `consensus_decode_from_finite_reader` optimization As things are right now, memory exhaustion protection in `Decodable` is based on checking input-decoded lengths against arbitrary limits, and ad-hoc wrapping collection deserialization in `Take`. The problem with that are two-fold: * Potential consensus bugs due to incorrect limits. * Performance degradation when decoding nested structured, due to recursive `Take>` readers. This change introduces a systematic approach to the problem. A concept of a "size-limited-reader" is introduced to rely on the input data to finish at enforced limit and fail deserialization. Memory exhaustion protection is now achived by capping allocations to reasonable values, yet allowing the underlying collections to grow to accomodate rare yet legitmately oversized data (with tiny performance cost), and reliance on input data size limit. A set of simple rules allow avoiding recursive `Take` wrappers. Fix #997 --- Cargo.toml | 2 +- src/blockdata/script.rs | 8 +- src/blockdata/transaction.rs | 39 +++++---- src/consensus/encode.rs | 158 ++++++++++++++++++++++++++++------- src/internal_macros.rs | 10 +++ src/network/message.rs | 88 ++++++++++--------- 6 files changed, 217 insertions(+), 88 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index d46b3482e8..e912d85690 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bitcoin" -version = "0.28.1" +version = "0.28.2" authors = ["Andrew Poelstra "] license = "CC0-1.0" homepage = "https://github.com/rust-bitcoin/rust-bitcoin/" diff --git a/src/blockdata/script.rs b/src/blockdata/script.rs index dd448fd8b8..7660ba0d75 100644 --- a/src/blockdata/script.rs +++ b/src/blockdata/script.rs @@ -23,6 +23,7 @@ //! This module provides the structures and functions needed to support scripts. //! +use consensus::encode::MAX_VEC_SIZE; use prelude::*; use io; @@ -1067,9 +1068,14 @@ impl Encodable for Script { } impl Decodable for Script { + #[inline] + fn consensus_decode_from_finite_reader(d: D) -> Result { + Ok(Script(Decodable::consensus_decode_from_finite_reader(d)?)) + } + #[inline] fn consensus_decode(d: D) -> Result { - Ok(Script(Decodable::consensus_decode(d)?)) + Self::consensus_decode_from_finite_reader(d.take(MAX_VEC_SIZE as u64)) } } diff --git a/src/blockdata/transaction.rs b/src/blockdata/transaction.rs index d4cd54d6bc..8799e12248 100644 --- a/src/blockdata/transaction.rs +++ b/src/blockdata/transaction.rs @@ -638,14 +638,20 @@ impl Encodable for TxIn { } } impl Decodable for TxIn { - fn consensus_decode(mut d: D) -> Result { + #[inline] + fn consensus_decode_from_finite_reader(mut d: D) -> Result { Ok(TxIn { - previous_output: Decodable::consensus_decode(&mut d)?, - script_sig: Decodable::consensus_decode(&mut d)?, - sequence: Decodable::consensus_decode(d)?, + previous_output: Decodable::consensus_decode_from_finite_reader(&mut d)?, + script_sig: Decodable::consensus_decode_from_finite_reader(&mut d)?, + sequence: Decodable::consensus_decode_from_finite_reader(d)?, witness: Witness::default(), }) } + + #[inline] + fn consensus_decode(d: D) -> Result { + Self::consensus_decode_from_finite_reader(d.take(MAX_VEC_SIZE as u64)) + } } impl Encodable for Transaction { @@ -679,20 +685,19 @@ impl Encodable for Transaction { } impl Decodable for Transaction { - fn consensus_decode(d: D) -> Result { - let mut d = d.take(MAX_VEC_SIZE as u64); - let version = i32::consensus_decode(&mut d)?; - let input = Vec::::consensus_decode(&mut d)?; + fn consensus_decode_from_finite_reader(mut d: D) -> Result { + let version = i32::consensus_decode_from_finite_reader(&mut d)?; + let input = Vec::::consensus_decode_from_finite_reader(&mut d)?; // segwit if input.is_empty() { - let segwit_flag = u8::consensus_decode(&mut d)?; + let segwit_flag = u8::consensus_decode_from_finite_reader(&mut d)?; match segwit_flag { // BIP144 input witnesses 1 => { - let mut input = Vec::::consensus_decode(&mut d)?; - let output = Vec::::consensus_decode(&mut d)?; + let mut input = Vec::::consensus_decode_from_finite_reader(&mut d)?; + let output = Vec::::consensus_decode_from_finite_reader(&mut d)?; for txin in input.iter_mut() { - txin.witness = Decodable::consensus_decode(&mut d)?; + txin.witness = Decodable::consensus_decode_from_finite_reader(&mut d)?; } if !input.is_empty() && input.iter().all(|input| input.witness.is_empty()) { Err(encode::Error::ParseFailed("witness flag set but no witnesses present")) @@ -701,7 +706,7 @@ impl Decodable for Transaction { version, input, output, - lock_time: Decodable::consensus_decode(d)?, + lock_time: Decodable::consensus_decode_from_finite_reader(d)?, }) } } @@ -713,11 +718,15 @@ impl Decodable for Transaction { Ok(Transaction { version, input, - output: Decodable::consensus_decode(&mut d)?, - lock_time: Decodable::consensus_decode(d)?, + output: Decodable::consensus_decode_from_finite_reader(&mut d)?, + lock_time: Decodable::consensus_decode_from_finite_reader(d)?, }) } } + + fn consensus_decode(d: D) -> Result { + Self::consensus_decode_from_finite_reader(d.take(MAX_VEC_SIZE as u64)) + } } /// This type is consensus valid but an input including it would prevent the transaction from diff --git a/src/consensus/encode.rs b/src/consensus/encode.rs index 4c24748b86..bd42d2706d 100644 --- a/src/consensus/encode.rs +++ b/src/consensus/encode.rs @@ -29,7 +29,7 @@ use prelude::*; -use core::{fmt, mem, u32, convert::From}; +use core::{self, fmt, mem, u32, convert::From}; #[cfg(feature = "std")] use std::error; use hashes::{sha256d, Hash, sha256}; @@ -166,7 +166,7 @@ pub fn deserialize(data: &[u8]) -> Result { /// doesn't consume the entire vector. pub fn deserialize_partial(data: &[u8]) -> Result<(T, usize), Error> { let mut decoder = Cursor::new(data); - let rv = Decodable::consensus_decode(&mut decoder)?; + let rv = Decodable::consensus_decode_from_finite_reader(&mut decoder)?; let consumed = decoder.position() as usize; Ok((rv, consumed)) @@ -318,6 +318,47 @@ pub trait Encodable { /// Data which can be encoded in a consensus-consistent way pub trait Decodable: Sized { + /// Decode `Self` from a size-limited reader. + /// + /// Like `consensus_decode` but relies on the reader being + /// limited in the amount of data it returns, e.g. by + /// being wrapped in [`std::io::Take`]. + /// + /// Failling to obide to this requirement might lead to + /// memory exhaustion caused by malicious inputs. + /// + /// Users should default to `consensus_decode`, but + /// when data to be decoded is already in a byte vector + /// of a limited size, calling this function directly + /// might be marginally faster (due to avoiding + /// extra checks). + /// + /// ### Rules for trait implementations + /// + /// * Simple types that that have a fixed size (own and member fields), + /// don't have to overwrite this method, or be concern with it. + /// * Types that deserialize using externally provided length + /// should implement it: + /// * Make `consensus_decode` forward to `consensus_decode_bytes_from_finite_reader` + /// with the reader wrapped by `Take`. Failure to do so, without other + /// forms of memory exhaustion protection might lead to resource exhaustion + /// vulnerability. + /// * Put a max cap on things like `Vec::with_capacity` to avoid oversized + /// allocations, and rely on the reader running out of data, and collections + /// reallocating on a legitimately oversized input data, instead of trying + /// to enforce arbitrary length limits. + /// * Types that contain other types that implement custom `consensus_decode_from_finite_reader`, + /// should also implement it applying same rules, and in addition make sure to call + /// `consensus_decode_from_finite_reader` on all members, to avoid creating redundant + /// `Take` wrappers. Failure to do so might result only in a tiny performance hit. + fn consensus_decode_from_finite_reader(d: D) -> Result { + // This method is always strictly less general than, `consensus_decode`, + // so it's safe and make sense to default to just calling it. + // This way most types, that don't care about protecting against + // resource exhaustion due to malicious input, can just ignore it. + Self::consensus_decode(d) + } + /// Decode an object with a well-defined format fn consensus_decode(d: D) -> Result; } @@ -554,23 +595,29 @@ macro_rules! impl_vec { Ok(len) } } + impl Decodable for Vec<$type> { #[inline] - fn consensus_decode(mut d: D) -> Result { - let len = VarInt::consensus_decode(&mut d)?.0; - let byte_size = (len as usize) - .checked_mul(mem::size_of::<$type>()) - .ok_or(self::Error::ParseFailed("Invalid length"))?; - if byte_size > MAX_VEC_SIZE { - return Err(self::Error::OversizedVectorAllocation { requested: byte_size, max: MAX_VEC_SIZE }) - } - let mut ret = Vec::with_capacity(len as usize); - let mut d = d.take(MAX_VEC_SIZE as u64); + fn consensus_decode_from_finite_reader(mut d: D) -> Result { + let len = VarInt::consensus_decode_from_finite_reader(&mut d)?.0; + // Do not allocate upfront more items than if the sequnce of type + // occupied roughly quarter a block. This should never be the case + // for normal data, but even if that's not true - `push` will just + // reallocate. + // Note: OOM protection relies on reader eventually running out of + // data to feed us. + let max_capacity = MAX_VEC_SIZE / 4 / mem::size_of::<$type>(); + let mut ret = Vec::with_capacity(core::cmp::min(len as usize, max_capacity)); for _ in 0..len { - ret.push(Decodable::consensus_decode(&mut d)?); + ret.push(Decodable::consensus_decode_from_finite_reader(&mut d)?); } Ok(ret) } + + #[inline] + fn consensus_decode(d: D) -> Result { + Self::consensus_decode_from_finite_reader(d.take(MAX_VEC_SIZE as u64)) + } } } } @@ -596,6 +643,33 @@ pub(crate) fn consensus_encode_with_size(data: &[u8], mut s: S) -> } +struct ReadBytesFromFiniteReaderOpts { + len: usize, + chunk_size: usize, +} + +/// Read `opts.len` bytes from reader, where `opts.len` could potentially be malicious +/// +/// This function relies on reader being bound in amount of data +/// it returns for OOM protection. See [`Decodable::consensus_decode_from_finite_reader`]. +#[inline] +fn read_bytes_from_finite_reader(mut d: D, mut opts: ReadBytesFromFiniteReaderOpts) -> Result, Error> { + let mut ret = vec![]; + + assert_ne!(opts.chunk_size, 0); + + while opts.len > 0 { + let chunk_start = ret.len(); + let chunk_size = core::cmp::min(opts.len, opts.chunk_size); + let chunk_end = chunk_start + chunk_size; + ret.resize(chunk_end, 0u8); + d.read_slice(&mut ret[chunk_start..chunk_end])?; + opts.len -= chunk_size; + } + + Ok(ret) +} + impl Encodable for Vec { #[inline] fn consensus_encode(&self, s: S) -> Result { @@ -605,14 +679,15 @@ impl Encodable for Vec { impl Decodable for Vec { #[inline] - fn consensus_decode(mut d: D) -> Result { + fn consensus_decode_from_finite_reader(mut d: D) -> Result { let len = VarInt::consensus_decode(&mut d)?.0 as usize; - if len > MAX_VEC_SIZE { - return Err(self::Error::OversizedVectorAllocation { requested: len, max: MAX_VEC_SIZE }) - } - let mut ret = vec![0u8; len]; - d.read_slice(&mut ret)?; - Ok(ret) + // most real-world vec of bytes data, wouldn't be larger than 128KiB + read_bytes_from_finite_reader(d, ReadBytesFromFiniteReaderOpts { len, chunk_size: 128 * 1024 }) + } + + #[inline] + fn consensus_decode(d: D) -> Result { + Self::consensus_decode_from_finite_reader(d.take(MAX_VEC_SIZE as u64)) } } @@ -624,9 +699,14 @@ impl Encodable for Box<[u8]> { } impl Decodable for Box<[u8]> { + #[inline] + fn consensus_decode_from_finite_reader(d: D) -> Result { + >::consensus_decode_from_finite_reader(d).map(From::from) + } + #[inline] fn consensus_decode(d: D) -> Result { - >::consensus_decode(d).map(From::from) + Self::consensus_decode_from_finite_reader(d.take(MAX_VEC_SIZE as u64)) } } @@ -650,17 +730,11 @@ impl Encodable for CheckedData { impl Decodable for CheckedData { #[inline] - fn consensus_decode(mut d: D) -> Result { - let len = u32::consensus_decode(&mut d)?; - if len > MAX_VEC_SIZE as u32 { - return Err(self::Error::OversizedVectorAllocation { - requested: len as usize, - max: MAX_VEC_SIZE - }); - } - let checksum = <[u8; 4]>::consensus_decode(&mut d)?; - let mut ret = vec![0u8; len as usize]; - d.read_slice(&mut ret)?; + fn consensus_decode_from_finite_reader(mut d: D) -> Result { + let len = u32::consensus_decode_from_finite_reader(&mut d)? as usize; + + let checksum = <[u8; 4]>::consensus_decode_from_finite_reader(&mut d)?; + let ret = read_bytes_from_finite_reader(d, ReadBytesFromFiniteReaderOpts { len, chunk_size: MAX_VEC_SIZE })?; let expected_checksum = sha2_checksum(&ret); if expected_checksum != checksum { Err(self::Error::InvalidChecksum { @@ -671,6 +745,10 @@ impl Decodable for CheckedData { Ok(CheckedData(ret)) } } + + fn consensus_decode(d: D) -> Result { + Self::consensus_decode_from_finite_reader(d.take(MAX_VEC_SIZE as u64)) + } } // References @@ -1063,5 +1141,21 @@ mod tests { } } + + #[test] + fn test_read_bytes_from_finite_reader() { + let data : Vec = (0..10).collect(); + + for chunk_size in 1..20 { + assert_eq!( + read_bytes_from_finite_reader( + io::Cursor::new(&data), + ReadBytesFromFiniteReaderOpts { len: data.len(), chunk_size } + ).unwrap(), + data + ); + } + } + } diff --git a/src/internal_macros.rs b/src/internal_macros.rs index e1bfe7525a..708bc9be2f 100644 --- a/src/internal_macros.rs +++ b/src/internal_macros.rs @@ -32,6 +32,16 @@ macro_rules! impl_consensus_encoding { } impl $crate::consensus::Decodable for $thing { + + #[inline] + fn consensus_decode_from_finite_reader( + mut d: D, + ) -> Result<$thing, $crate::consensus::encode::Error> { + Ok($thing { + $($field: $crate::consensus::Decodable::consensus_decode_from_finite_reader(&mut d)?),+ + }) + } + #[inline] fn consensus_decode( d: D, diff --git a/src/network/message.rs b/src/network/message.rs index 49aa7c01e8..a7d5ae9a8f 100644 --- a/src/network/message.rs +++ b/src/network/message.rs @@ -20,7 +20,7 @@ use prelude::*; -use core::{mem, fmt, iter}; +use core::{self, fmt, iter}; use io; use blockdata::block; @@ -29,7 +29,7 @@ use network::address::{Address, AddrV2Message}; use network::{message_network, message_bloom}; use network::message_blockdata; use network::message_filter; -use consensus::encode::{CheckedData, Decodable, Encodable, VarInt, MAX_VEC_SIZE}; +use consensus::encode::{CheckedData, Decodable, Encodable, VarInt}; use consensus::{encode, serialize}; use util::merkleblock::MerkleBlock; @@ -38,6 +38,10 @@ use util::merkleblock::MerkleBlock; /// This limit is not currently enforced by this implementation. pub const MAX_INV_SIZE: usize = 50_000; +/// Maximum size, in bytes, of an encoded message +/// This by neccessity should be larger tham `MAX_VEC_SIZE` +pub const MAX_MSG_SIZE: usize = 5_000_000; + /// Serializer for command string #[derive(PartialEq, Eq, Clone, Debug)] pub struct CommandString(Cow<'static, str>); @@ -337,15 +341,11 @@ struct HeaderDeserializationWrapper(Vec); impl Decodable for HeaderDeserializationWrapper { #[inline] - fn consensus_decode(mut d: D) -> Result { + fn consensus_decode_from_finite_reader(mut d: D) -> Result { let len = VarInt::consensus_decode(&mut d)?.0; - let byte_size = (len as usize) - .checked_mul(mem::size_of::()) - .ok_or(encode::Error::ParseFailed("Invalid length"))?; - if byte_size > MAX_VEC_SIZE { - return Err(encode::Error::OversizedVectorAllocation { requested: byte_size, max: MAX_VEC_SIZE }) - } - let mut ret = Vec::with_capacity(len as usize); + // should be above usual number of items to avoid + // allocation + let mut ret = Vec::with_capacity(core::cmp::min(1024 * 16, len as usize)); for _ in 0..len { ret.push(Decodable::consensus_decode(&mut d)?); if u8::consensus_decode(&mut d)? != 0u8 { @@ -354,49 +354,54 @@ impl Decodable for HeaderDeserializationWrapper { } Ok(HeaderDeserializationWrapper(ret)) } + + #[inline] + fn consensus_decode(d: D) -> Result { + Self::consensus_decode_from_finite_reader(d.take(MAX_MSG_SIZE as u64)) + } } impl Decodable for RawNetworkMessage { - fn consensus_decode(mut d: D) -> Result { - let magic = Decodable::consensus_decode(&mut d)?; - let cmd = CommandString::consensus_decode(&mut d)?; - let raw_payload = CheckedData::consensus_decode(&mut d)?.0; + fn consensus_decode_from_finite_reader(mut d: D) -> Result { + let magic = Decodable::consensus_decode_from_finite_reader(&mut d)?; + let cmd = CommandString::consensus_decode_from_finite_reader(&mut d)?; + let raw_payload = CheckedData::consensus_decode_from_finite_reader(&mut d)?.0; let mut mem_d = io::Cursor::new(raw_payload); let payload = match &cmd.0[..] { - "version" => NetworkMessage::Version(Decodable::consensus_decode(&mut mem_d)?), + "version" => NetworkMessage::Version(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), "verack" => NetworkMessage::Verack, - "addr" => NetworkMessage::Addr(Decodable::consensus_decode(&mut mem_d)?), - "inv" => NetworkMessage::Inv(Decodable::consensus_decode(&mut mem_d)?), - "getdata" => NetworkMessage::GetData(Decodable::consensus_decode(&mut mem_d)?), - "notfound" => NetworkMessage::NotFound(Decodable::consensus_decode(&mut mem_d)?), - "getblocks" => NetworkMessage::GetBlocks(Decodable::consensus_decode(&mut mem_d)?), - "getheaders" => NetworkMessage::GetHeaders(Decodable::consensus_decode(&mut mem_d)?), + "addr" => NetworkMessage::Addr(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), + "inv" => NetworkMessage::Inv(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), + "getdata" => NetworkMessage::GetData(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), + "notfound" => NetworkMessage::NotFound(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), + "getblocks" => NetworkMessage::GetBlocks(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), + "getheaders" => NetworkMessage::GetHeaders(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), "mempool" => NetworkMessage::MemPool, - "block" => NetworkMessage::Block(Decodable::consensus_decode(&mut mem_d)?), + "block" => NetworkMessage::Block(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), "headers" => NetworkMessage::Headers( - HeaderDeserializationWrapper::consensus_decode(&mut mem_d)?.0 + HeaderDeserializationWrapper::consensus_decode_from_finite_reader(&mut mem_d)?.0 ), "sendheaders" => NetworkMessage::SendHeaders, "getaddr" => NetworkMessage::GetAddr, - "ping" => NetworkMessage::Ping(Decodable::consensus_decode(&mut mem_d)?), - "pong" => NetworkMessage::Pong(Decodable::consensus_decode(&mut mem_d)?), - "merkleblock" => NetworkMessage::MerkleBlock(Decodable::consensus_decode(&mut mem_d)?), - "filterload" => NetworkMessage::FilterLoad(Decodable::consensus_decode(&mut mem_d)?), - "filteradd" => NetworkMessage::FilterAdd(Decodable::consensus_decode(&mut mem_d)?), + "ping" => NetworkMessage::Ping(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), + "pong" => NetworkMessage::Pong(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), + "merkleblock" => NetworkMessage::MerkleBlock(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), + "filterload" => NetworkMessage::FilterLoad(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), + "filteradd" => NetworkMessage::FilterAdd(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), "filterclear" => NetworkMessage::FilterClear, - "tx" => NetworkMessage::Tx(Decodable::consensus_decode(&mut mem_d)?), - "getcfilters" => NetworkMessage::GetCFilters(Decodable::consensus_decode(&mut mem_d)?), - "cfilter" => NetworkMessage::CFilter(Decodable::consensus_decode(&mut mem_d)?), - "getcfheaders" => NetworkMessage::GetCFHeaders(Decodable::consensus_decode(&mut mem_d)?), - "cfheaders" => NetworkMessage::CFHeaders(Decodable::consensus_decode(&mut mem_d)?), - "getcfcheckpt" => NetworkMessage::GetCFCheckpt(Decodable::consensus_decode(&mut mem_d)?), - "cfcheckpt" => NetworkMessage::CFCheckpt(Decodable::consensus_decode(&mut mem_d)?), - "reject" => NetworkMessage::Reject(Decodable::consensus_decode(&mut mem_d)?), - "alert" => NetworkMessage::Alert(Decodable::consensus_decode(&mut mem_d)?), - "feefilter" => NetworkMessage::FeeFilter(Decodable::consensus_decode(&mut mem_d)?), + "tx" => NetworkMessage::Tx(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), + "getcfilters" => NetworkMessage::GetCFilters(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), + "cfilter" => NetworkMessage::CFilter(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), + "getcfheaders" => NetworkMessage::GetCFHeaders(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), + "cfheaders" => NetworkMessage::CFHeaders(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), + "getcfcheckpt" => NetworkMessage::GetCFCheckpt(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), + "cfcheckpt" => NetworkMessage::CFCheckpt(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), + "reject" => NetworkMessage::Reject(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), + "alert" => NetworkMessage::Alert(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), + "feefilter" => NetworkMessage::FeeFilter(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), "wtxidrelay" => NetworkMessage::WtxidRelay, - "addrv2" => NetworkMessage::AddrV2(Decodable::consensus_decode(&mut mem_d)?), + "addrv2" => NetworkMessage::AddrV2(Decodable::consensus_decode_from_finite_reader(&mut mem_d)?), "sendaddrv2" => NetworkMessage::SendAddrV2, _ => NetworkMessage::Unknown { command: cmd, @@ -408,6 +413,11 @@ impl Decodable for RawNetworkMessage { payload, }) } + + #[inline] + fn consensus_decode(d: D) -> Result { + Self::consensus_decode_from_finite_reader(d.take(MAX_MSG_SIZE as u64)) + } } #[cfg(test)] From 7baa21c148c1e3ef6c86e00a561593aff693fc38 Mon Sep 17 00:00:00 2001 From: Andrew Poelstra Date: Tue, 30 Aug 2022 17:19:39 +0000 Subject: [PATCH 2/4] fuzz: disable features in honggfuzz --- .github/workflows/fuzz.yml | 2 +- fuzz/Cargo.toml | 2 +- fuzz/travis-fuzz.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/fuzz.yml b/.github/workflows/fuzz.yml index be1ea6d853..0de9c3e13e 100644 --- a/.github/workflows/fuzz.yml +++ b/.github/workflows/fuzz.yml @@ -30,7 +30,7 @@ jobs: toolchain: 1.58 override: true profile: minimal - - run: cargo install honggfuzz + - run: cargo install honggfuzz --no-default-features if: steps.cache-fuzz.outputs.cache-hit != 'true' - run: echo "HFUZZ_RUN_ARGS=\"--run_time 30 --exit_upon_crash -v -f hfuzz_input/${{ matrix.fuzz_target }}/input\"" >> $GITHUB_ENV - name: fuzz diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml index b893483b23..927a3b3714 100644 --- a/fuzz/Cargo.toml +++ b/fuzz/Cargo.toml @@ -12,7 +12,7 @@ afl_fuzz = ["afl"] honggfuzz_fuzz = ["honggfuzz"] [dependencies] -honggfuzz = { version = "0.5", optional = true } +honggfuzz = { version = "0.5", optional = true, default-features = false } afl = { version = "0.4", optional = true } bitcoin = { path = ".." } diff --git a/fuzz/travis-fuzz.sh b/fuzz/travis-fuzz.sh index 8f69981558..662472c02f 100755 --- a/fuzz/travis-fuzz.sh +++ b/fuzz/travis-fuzz.sh @@ -9,7 +9,7 @@ if [ ${incorrectFilenames} -gt 0 ]; then fi # Testing -cargo install --force honggfuzz +cargo install --force honggfuzz --no-default-features for TARGET in fuzz_targets/*; do FILENAME=$(basename $TARGET) FILE="${FILENAME%.*}" From 4c6f9b39e329a0cedaf7a4eda6b6c342db715885 Mon Sep 17 00:00:00 2001 From: Andrew Poelstra Date: Tue, 30 Aug 2022 20:04:23 +0000 Subject: [PATCH 3/4] fuzz: remove mysteriously-not-necessary quotes from gh action script --- .github/workflows/fuzz.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/fuzz.yml b/.github/workflows/fuzz.yml index 0de9c3e13e..48f96c3d51 100644 --- a/.github/workflows/fuzz.yml +++ b/.github/workflows/fuzz.yml @@ -32,7 +32,7 @@ jobs: profile: minimal - run: cargo install honggfuzz --no-default-features if: steps.cache-fuzz.outputs.cache-hit != 'true' - - run: echo "HFUZZ_RUN_ARGS=\"--run_time 30 --exit_upon_crash -v -f hfuzz_input/${{ matrix.fuzz_target }}/input\"" >> $GITHUB_ENV + - run: echo "HFUZZ_RUN_ARGS=--run_time 30 --exit_upon_crash -v -f hfuzz_input/${{ matrix.fuzz_target }}/input" >> $GITHUB_ENV - name: fuzz run: cd fuzz && cargo hfuzz run ${{ matrix.fuzz_target }} - run: echo "${{ matrix.fuzz_target }}.rs" >executed_${{ matrix.fuzz_target }} From a0489d42b966ef8eb86d659674575c563b53f946 Mon Sep 17 00:00:00 2001 From: Andrew Poelstra Date: Tue, 30 Aug 2022 20:22:47 +0000 Subject: [PATCH 4/4] fuzz: use travis-fuzz.sh in CI --- .github/workflows/fuzz.yml | 7 +------ fuzz/travis-fuzz.sh | 12 +++++++++++- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/.github/workflows/fuzz.yml b/.github/workflows/fuzz.yml index 48f96c3d51..d698a68c5c 100644 --- a/.github/workflows/fuzz.yml +++ b/.github/workflows/fuzz.yml @@ -7,8 +7,6 @@ jobs: fuzz: if: ${{ !github.event.act }} runs-on: ubuntu-20.04 - env: - HFUZZ_BUILD_ARGS: "--features honggfuzz_fuzz" strategy: fail-fast: false matrix: @@ -30,11 +28,8 @@ jobs: toolchain: 1.58 override: true profile: minimal - - run: cargo install honggfuzz --no-default-features - if: steps.cache-fuzz.outputs.cache-hit != 'true' - - run: echo "HFUZZ_RUN_ARGS=--run_time 30 --exit_upon_crash -v -f hfuzz_input/${{ matrix.fuzz_target }}/input" >> $GITHUB_ENV - name: fuzz - run: cd fuzz && cargo hfuzz run ${{ matrix.fuzz_target }} + run: cd fuzz && ./travis-fuzz.sh "${{ matrix.fuzz_target }}" - run: echo "${{ matrix.fuzz_target }}.rs" >executed_${{ matrix.fuzz_target }} - uses: actions/upload-artifact@v2 with: diff --git a/fuzz/travis-fuzz.sh b/fuzz/travis-fuzz.sh index 662472c02f..1f75c9c0de 100755 --- a/fuzz/travis-fuzz.sh +++ b/fuzz/travis-fuzz.sh @@ -8,9 +8,19 @@ if [ ${incorrectFilenames} -gt 0 ]; then exit 2 fi +if [ "$1" == "" ]; then + TARGETS=fuzz_targets/* +else + TARGETS=fuzz_targets/"$1".rs +fi + +cargo --version +rustc --version + # Testing cargo install --force honggfuzz --no-default-features -for TARGET in fuzz_targets/*; do +for TARGET in $TARGETS; do + echo "Fuzzing target $TARGET" FILENAME=$(basename $TARGET) FILE="${FILENAME%.*}" if [ -d hfuzz_input/$FILE ]; then