Skip to content

Commit

Permalink
Limit bytes read with Take
Browse files Browse the repository at this point in the history
  • Loading branch information
RCasatta committed Apr 21, 2021
1 parent dc0e2b0 commit 8413706
Show file tree
Hide file tree
Showing 7 changed files with 37 additions and 22 deletions.
4 changes: 3 additions & 1 deletion src/blockdata/transaction.rs
Expand Up @@ -34,6 +34,7 @@ use blockdata::constants::WITNESS_SCALE_FACTOR;
#[cfg(feature="bitcoinconsensus")] use blockdata::script;
use blockdata::script::Script;
use consensus::{encode, Decodable, Encodable};
use consensus::encode::MAX_OBJECT_SIZE;
use hash_types::{SigHash, Txid, Wtxid};
use VarInt;

Expand Down Expand Up @@ -566,7 +567,8 @@ impl Encodable for Transaction {
}

impl Decodable for Transaction {
fn consensus_decode<D: io::Read>(mut d: D) -> Result<Self, encode::Error> {
fn consensus_decode<D: io::Read>(d: D) -> Result<Self, encode::Error> {
let mut d = d.take(MAX_OBJECT_SIZE as u64);
let version = i32::consensus_decode(&mut d)?;
let input = Vec::<TxIn>::consensus_decode(&mut d)?;
// segwit
Expand Down
30 changes: 20 additions & 10 deletions src/consensus/encode.rs
Expand Up @@ -305,7 +305,7 @@ impl<R: Read> ReadExt for R {
}

/// Maximum size, in bytes, of a vector we are allowed to decode
pub const MAX_VEC_SIZE: usize = 4_000_000;
pub const MAX_OBJECT_SIZE: usize = 4_000_000;

/// Data which can be encoded in a consensus-consistent way
pub trait Encodable {
Expand Down Expand Up @@ -571,10 +571,11 @@ macro_rules! impl_vec {
let byte_size = (len as usize)
.checked_mul(mem::size_of::<$type>())
.ok_or(self::Error::ParseFailed("Invalid length"))?;
if byte_size > MAX_VEC_SIZE {
return Err(self::Error::OversizedVectorAllocation { requested: byte_size, max: MAX_VEC_SIZE })
if byte_size > MAX_OBJECT_SIZE {
return Err(self::Error::OversizedVectorAllocation { requested: byte_size, max: MAX_OBJECT_SIZE })
}
let mut ret = Vec::with_capacity(len as usize);
let mut d = d.take(MAX_OBJECT_SIZE as u64);
for _ in 0..len {
ret.push(Decodable::consensus_decode(&mut d)?);
}
Expand Down Expand Up @@ -614,8 +615,8 @@ impl Decodable for Vec<u8> {
#[inline]
fn consensus_decode<D: io::Read>(mut d: D) -> Result<Self, Error> {
let len = VarInt::consensus_decode(&mut d)?.0 as usize;
if len > MAX_VEC_SIZE {
return Err(self::Error::OversizedVectorAllocation { requested: len, max: MAX_VEC_SIZE })
if len > MAX_OBJECT_SIZE {
return Err(self::Error::OversizedVectorAllocation { requested: len, max: MAX_OBJECT_SIZE })
}
let mut ret = vec![0u8; len];
d.read_slice(&mut ret)?;
Expand Down Expand Up @@ -659,10 +660,10 @@ impl Decodable for CheckedData {
#[inline]
fn consensus_decode<D: io::Read>(mut d: D) -> Result<Self, Error> {
let len = u32::consensus_decode(&mut d)?;
if len > MAX_VEC_SIZE as u32 {
if len > MAX_OBJECT_SIZE as u32 {
return Err(self::Error::OversizedVectorAllocation {
requested: len as usize,
max: MAX_VEC_SIZE
max: MAX_OBJECT_SIZE
});
}
let checksum = <[u8; 4]>::consensus_decode(&mut d)?;
Expand Down Expand Up @@ -957,9 +958,9 @@ mod tests {

let rand_io_err = Error::Io(io::Error::new(io::ErrorKind::Other, ""));

// Check serialization that `if len > MAX_VEC_SIZE {return err}` isn't inclusive,
// Check serialization that `if len > MAX_OBJECT_SIZE {return err}` isn't inclusive,
// by making sure it fails with IO Error and not an `OversizedVectorAllocation` Error.
let err = deserialize::<CheckedData>(&serialize(&(super::MAX_VEC_SIZE as u32))).unwrap_err();
let err = deserialize::<CheckedData>(&serialize(&(super::MAX_OBJECT_SIZE as u32))).unwrap_err();
assert_eq!(discriminant(&err), discriminant(&rand_io_err));

test_len_is_max_vec::<u8>();
Expand All @@ -977,7 +978,7 @@ mod tests {

fn test_len_is_max_vec<T>() where Vec<T>: Decodable, T: fmt::Debug {
let rand_io_err = Error::Io(io::Error::new(io::ErrorKind::Other, ""));
let varint = VarInt((super::MAX_VEC_SIZE / mem::size_of::<T>()) as u64);
let varint = VarInt((super::MAX_OBJECT_SIZE / mem::size_of::<T>()) as u64);
let err = deserialize::<Vec<T>>(&serialize(&varint)).unwrap_err();
assert_eq!(discriminant(&err), discriminant(&rand_io_err));
}
Expand All @@ -997,6 +998,15 @@ mod tests {
assert_eq!(cd.ok(), Some(CheckedData(vec![1u8, 2, 3, 4, 5])));
}

#[test]
fn limit_read_test() {
let witness = vec![vec![0u8; 3_999_999]; 2];
let ser = serialize(&witness);
let mut reader = io::Cursor::new(ser);
let err = Vec::<Vec<u8>>::consensus_decode(&mut reader);
assert!(err.is_err());
}

#[test]
fn serialization_round_trips() {
macro_rules! round_trip {
Expand Down
3 changes: 2 additions & 1 deletion src/internal_macros.rs
Expand Up @@ -33,8 +33,9 @@ macro_rules! impl_consensus_encoding {
impl $crate::consensus::Decodable for $thing {
#[inline]
fn consensus_decode<D: ::std::io::Read>(
mut d: D,
d: D,
) -> Result<$thing, $crate::consensus::encode::Error> {
let mut d = d.take($crate::consensus::encode::MAX_OBJECT_SIZE as u64);
Ok($thing {
$($field: $crate::consensus::Decodable::consensus_decode(&mut d)?),+
})
Expand Down
7 changes: 3 additions & 4 deletions src/network/message.rs
Expand Up @@ -29,9 +29,8 @@ use network::address::{Address, AddrV2Message};
use network::message_network;
use network::message_blockdata;
use network::message_filter;
use consensus::encode::{CheckedData, Decodable, Encodable, VarInt};
use consensus::encode::{CheckedData, Decodable, Encodable, VarInt, MAX_OBJECT_SIZE};
use consensus::{encode, serialize};
use consensus::encode::MAX_VEC_SIZE;

/// The maximum number of [Inventory] items in an `inv` message.
///
Expand Down Expand Up @@ -318,8 +317,8 @@ impl Decodable for HeaderDeserializationWrapper {
let byte_size = (len as usize)
.checked_mul(mem::size_of::<block::BlockHeader>())
.ok_or(encode::Error::ParseFailed("Invalid length"))?;
if byte_size > MAX_VEC_SIZE {
return Err(encode::Error::OversizedVectorAllocation { requested: byte_size, max: MAX_VEC_SIZE })
if byte_size > MAX_OBJECT_SIZE {
return Err(encode::Error::OversizedVectorAllocation { requested: byte_size, max: MAX_OBJECT_SIZE })
}
let mut ret = Vec::with_capacity(len as usize);
for _ in 0..len {
Expand Down
5 changes: 3 additions & 2 deletions src/util/psbt/map/global.rs
Expand Up @@ -19,6 +19,7 @@ use std::cmp;

use blockdata::transaction::Transaction;
use consensus::{encode, Encodable, Decodable};
use consensus::encode::MAX_OBJECT_SIZE;
use util::psbt::map::Map;
use util::psbt::raw;
use util::psbt;
Expand Down Expand Up @@ -228,8 +229,8 @@ impl Map for Global {
impl_psbtmap_consensus_encoding!(Global);

impl Decodable for Global {
fn consensus_decode<D: io::Read>(mut d: D) -> Result<Self, encode::Error> {

fn consensus_decode<D: io::Read>(d: D) -> Result<Self, encode::Error> {
let mut d = d.take(MAX_OBJECT_SIZE as u64);
let mut tx: Option<Transaction> = None;
let mut version: Option<u32> = None;
let mut unknowns: BTreeMap<raw::Key, Vec<u8>> = Default::default();
Expand Down
4 changes: 3 additions & 1 deletion src/util/psbt/mod.rs
Expand Up @@ -21,6 +21,7 @@
use blockdata::script::Script;
use blockdata::transaction::Transaction;
use consensus::{encode, Encodable, Decodable};
use consensus::encode::MAX_OBJECT_SIZE;

use std::io;

Expand Down Expand Up @@ -162,7 +163,8 @@ impl Encodable for PartiallySignedTransaction {
}

impl Decodable for PartiallySignedTransaction {
fn consensus_decode<D: io::Read>(mut d: D) -> Result<Self, encode::Error> {
fn consensus_decode<D: io::Read>(d: D) -> Result<Self, encode::Error> {
let mut d = d.take(MAX_OBJECT_SIZE as u64);
let magic: [u8; 4] = Decodable::consensus_decode(&mut d)?;

if *b"psbt" != magic {
Expand Down
6 changes: 3 additions & 3 deletions src/util/psbt/raw.rs
Expand Up @@ -19,7 +19,7 @@

use std::{fmt, io};

use consensus::encode::{self, ReadExt, WriteExt, Decodable, Encodable, VarInt, serialize, deserialize, MAX_VEC_SIZE};
use consensus::encode::{self, ReadExt, WriteExt, Decodable, Encodable, VarInt, serialize, deserialize, MAX_OBJECT_SIZE};
use hashes::hex;
use util::psbt::Error;

Expand Down Expand Up @@ -81,10 +81,10 @@ impl Decodable for Key {

let key_byte_size: u64 = byte_size - 1;

if key_byte_size > MAX_VEC_SIZE as u64 {
if key_byte_size > MAX_OBJECT_SIZE as u64 {
return Err(encode::Error::OversizedVectorAllocation {
requested: key_byte_size as usize,
max: MAX_VEC_SIZE,
max: MAX_OBJECT_SIZE,
})
}

Expand Down

0 comments on commit 8413706

Please sign in to comment.