Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use SmallVec to optimize for small integer sizes. #210

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.toml
Expand Up @@ -38,6 +38,7 @@ harness = false
name = "shootout-pidigits"

[dependencies]
smallvec = { version = "1.6.1", optional = true, features = ["union"] }

[dependencies.num-integer]
version = "0.1.42"
Expand Down
4 changes: 2 additions & 2 deletions src/bigint.rs
Expand Up @@ -18,7 +18,7 @@ use self::Sign::{Minus, NoSign, Plus};

use crate::big_digit::BigDigit;
use crate::biguint::to_str_radix_reversed;
use crate::biguint::{BigUint, IntDigits, U32Digits, U64Digits};
use crate::biguint::{BigDigitVec, BigUint, IntDigits, U32Digits, U64Digits};

mod addition;
mod division;
Expand Down Expand Up @@ -538,7 +538,7 @@ impl IntDigits for BigInt {
self.data.digits()
}
#[inline]
fn digits_mut(&mut self) -> &mut Vec<BigDigit> {
fn digits_mut(&mut self) -> &mut BigDigitVec {
self.data.digits_mut()
}
#[inline]
Expand Down
21 changes: 10 additions & 11 deletions src/bigint/bits.rs
Expand Up @@ -2,8 +2,7 @@ use super::BigInt;
use super::Sign::{Minus, NoSign, Plus};

use crate::big_digit::{self, BigDigit, DoubleBigDigit};
use crate::biguint::IntDigits;
use crate::std_alloc::Vec;
use crate::biguint::{BigDigitVec, IntDigits};

use core::cmp::Ordering::{Equal, Greater, Less};
use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign};
Expand Down Expand Up @@ -36,7 +35,7 @@ fn negate_carry(a: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit {
// + 1 & -ff = ...0 01 & ...f 01 = ...0 01 = + 1
// +ff & - 1 = ...0 ff & ...f ff = ...0 ff = +ff
// answer is pos, has length of a
fn bitand_pos_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
fn bitand_pos_neg(a: &mut BigDigitVec, b: &[BigDigit]) {
let mut carry_b = 1;
for (ai, &bi) in a.iter_mut().zip(b.iter()) {
let twos_b = negate_carry(bi, &mut carry_b);
Expand All @@ -48,7 +47,7 @@ fn bitand_pos_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
// - 1 & +ff = ...f ff & ...0 ff = ...0 ff = +ff
// -ff & + 1 = ...f 01 & ...0 01 = ...0 01 = + 1
// answer is pos, has length of b
fn bitand_neg_pos(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
fn bitand_neg_pos(a: &mut BigDigitVec, b: &[BigDigit]) {
let mut carry_a = 1;
for (ai, &bi) in a.iter_mut().zip(b.iter()) {
let twos_a = negate_carry(*ai, &mut carry_a);
Expand All @@ -69,7 +68,7 @@ fn bitand_neg_pos(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
// -ff & - 1 = ...f 01 & ...f ff = ...f 01 = - ff
// -ff & -fe = ...f 01 & ...f 02 = ...f 00 = -100
// answer is neg, has length of longest with a possible carry
fn bitand_neg_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
fn bitand_neg_neg(a: &mut BigDigitVec, b: &[BigDigit]) {
let mut carry_a = 1;
let mut carry_b = 1;
let mut carry_and = 1;
Expand Down Expand Up @@ -173,7 +172,7 @@ impl<'a> BitAndAssign<&'a BigInt> for BigInt {
// + 1 | -ff = ...0 01 | ...f 01 = ...f 01 = -ff
// +ff | - 1 = ...0 ff | ...f ff = ...f ff = - 1
// answer is neg, has length of b
fn bitor_pos_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
fn bitor_pos_neg(a: &mut BigDigitVec, b: &[BigDigit]) {
let mut carry_b = 1;
let mut carry_or = 1;
for (ai, &bi) in a.iter_mut().zip(b.iter()) {
Expand Down Expand Up @@ -202,7 +201,7 @@ fn bitor_pos_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
// - 1 | +ff = ...f ff | ...0 ff = ...f ff = - 1
// -ff | + 1 = ...f 01 | ...0 01 = ...f 01 = -ff
// answer is neg, has length of a
fn bitor_neg_pos(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
fn bitor_neg_pos(a: &mut BigDigitVec, b: &[BigDigit]) {
let mut carry_a = 1;
let mut carry_or = 1;
for (ai, &bi) in a.iter_mut().zip(b.iter()) {
Expand All @@ -224,7 +223,7 @@ fn bitor_neg_pos(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
// - 1 | -ff = ...f ff | ...f 01 = ...f ff = -1
// -ff | - 1 = ...f 01 | ...f ff = ...f ff = -1
// answer is neg, has length of shortest
fn bitor_neg_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
fn bitor_neg_neg(a: &mut BigDigitVec, b: &[BigDigit]) {
let mut carry_a = 1;
let mut carry_b = 1;
let mut carry_or = 1;
Expand Down Expand Up @@ -308,7 +307,7 @@ impl<'a> BitOrAssign<&'a BigInt> for BigInt {
// + 1 ^ -ff = ...0 01 ^ ...f 01 = ...f 00 = -100
// +ff ^ - 1 = ...0 ff ^ ...f ff = ...f 00 = -100
// answer is neg, has length of longest with a possible carry
fn bitxor_pos_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
fn bitxor_pos_neg(a: &mut BigDigitVec, b: &[BigDigit]) {
let mut carry_b = 1;
let mut carry_xor = 1;
for (ai, &bi) in a.iter_mut().zip(b.iter()) {
Expand Down Expand Up @@ -341,7 +340,7 @@ fn bitxor_pos_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
// - 1 ^ +ff = ...f ff ^ ...0 ff = ...f 00 = -100
// -ff ^ + 1 = ...f 01 ^ ...0 01 = ...f 00 = -100
// answer is neg, has length of longest with a possible carry
fn bitxor_neg_pos(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
fn bitxor_neg_pos(a: &mut BigDigitVec, b: &[BigDigit]) {
let mut carry_a = 1;
let mut carry_xor = 1;
for (ai, &bi) in a.iter_mut().zip(b.iter()) {
Expand Down Expand Up @@ -374,7 +373,7 @@ fn bitxor_neg_pos(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
// - 1 ^ -ff = ...f ff ^ ...f 01 = ...0 fe = +fe
// -ff & - 1 = ...f 01 ^ ...f ff = ...0 fe = +fe
// answer is pos, has length of longest
fn bitxor_neg_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
fn bitxor_neg_neg(a: &mut BigDigitVec, b: &[BigDigit]) {
let mut carry_a = 1;
let mut carry_b = 1;
for (ai, &bi) in a.iter_mut().zip(b.iter()) {
Expand Down
8 changes: 7 additions & 1 deletion src/bigint/multiplication.rs
Expand Up @@ -29,9 +29,15 @@ macro_rules! impl_mul {
#[inline]
fn mul(self, other: $Other) -> BigInt {
// automatically match value/ref
// from_biguint optimizes poorly if it cannot tell NoSign is impossible.
if self.is_zero() || other.is_zero() {
return BigInt::zero();
}
let new_sign = if (self.sign() == Minus) ^ (other.sign() == Minus) { Minus } else { Plus };
let BigInt { data: x, .. } = self;
let BigInt { data: y, .. } = other;
BigInt::from_biguint(self.sign * other.sign, x * y)
BigInt::from_biguint(new_sign, x * y)
// BigInt::from_biguint(self.sign * other.sign, x * y)
}
}
)*}
Expand Down
21 changes: 6 additions & 15 deletions src/bigrand.rs
Expand Up @@ -3,6 +3,7 @@
use rand::distributions::uniform::{SampleBorrow, SampleUniform, UniformSampler};
use rand::prelude::*;

use crate::big_digit::BigDigit;
use crate::BigInt;
use crate::BigUint;
use crate::Sign::*;
Expand Down Expand Up @@ -37,12 +38,12 @@ pub trait RandBigInt {
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt;
}

fn gen_bits<R: Rng + ?Sized>(rng: &mut R, data: &mut [u32], rem: u64) {
fn gen_bits<R: Rng + ?Sized>(rng: &mut R, data: &mut [BigDigit], rem: u64) {
// `fill` is faster than many `gen::<u32>` calls
rng.fill(data);
if rem > 0 {
let last = data.len() - 1;
data[last] >>= 32 - rem;
data[last] >>= crate::big_digit::BITS as u64 - rem;
}
}

Expand All @@ -60,22 +61,12 @@ impl<R: Rng + ?Sized> RandBigInt for R {

#[cfg(u64_digit)]
fn gen_biguint(&mut self, bit_size: u64) -> BigUint {
use core::slice;

let (digits, rem) = bit_size.div_rem(&32);
let (digits, rem) = bit_size.div_rem(&64);
let len = (digits + (rem > 0) as u64)
.to_usize()
.expect("capacity overflow");
let native_digits = bit_size.div_ceil(&64);
let native_len = native_digits.to_usize().expect("capacity overflow");
let mut data = vec![0u64; native_len];
unsafe {
// Generate bits in a `&mut [u32]` slice for value stability
let ptr = data.as_mut_ptr() as *mut u32;
debug_assert!(native_len * 2 >= len);
let data = slice::from_raw_parts_mut(ptr, len);
gen_bits(self, data, rem);
}
let mut data = vec![0u64; len];
gen_bits(self, data.as_mut_slice(), rem);
#[cfg(target_endian = "big")]
for digit in &mut data {
// swap u32 digits into u64 endianness
Expand Down
53 changes: 43 additions & 10 deletions src/biguint.rs
Expand Up @@ -13,6 +13,8 @@ use core::{u32, u64, u8};
use num_integer::{Integer, Roots};
use num_traits::{Num, One, Pow, ToPrimitive, Unsigned, Zero};

use crate::backend;

mod addition;
mod division;
mod multiplication;
Expand All @@ -36,16 +38,18 @@ pub use self::iter::{U32Digits, U64Digits};

/// A big unsigned integer type.
pub struct BigUint {
data: Vec<BigDigit>,
data: BigDigitVec,
}

pub(crate) type BigDigitVec = backend::Vec<BigDigit>;

// Note: derived `Clone` doesn't specialize `clone_from`,
// but we want to keep the allocation in `data`.
impl Clone for BigUint {
#[inline]
fn clone(&self) -> Self {
BigUint {
data: self.data.clone(),
data: backend::clone(&self.data),
}
}

Expand Down Expand Up @@ -146,7 +150,9 @@ impl fmt::Octal for BigUint {
impl Zero for BigUint {
#[inline]
fn zero() -> BigUint {
BigUint { data: Vec::new() }
BigUint {
data: backend::Vec::new(),
}
}

#[inline]
Expand All @@ -163,7 +169,9 @@ impl Zero for BigUint {
impl One for BigUint {
#[inline]
fn one() -> BigUint {
BigUint { data: vec![1] }
BigUint {
data: backend::vec![1],
}
}

#[inline]
Expand Down Expand Up @@ -218,6 +226,17 @@ impl Integer for BigUint {
/// The result is always positive.
#[inline]
fn gcd(&self, other: &Self) -> Self {
// use core::convert::TryInto;
// if let Some(x) = self.to_u64() {
// if let Some(y) = other.to_u64() {
// return BigUint::from(x.gcd(&y));
// }
// }
// if let Some(x) = self.to_u128() {
// if let Some(y) = other.to_u128() {
// return BigUint::from(x.gcd(&y));
// }
// }
#[inline]
fn twos(x: &BigUint) -> u64 {
x.trailing_zeros().unwrap_or(0)
Expand Down Expand Up @@ -512,6 +531,17 @@ pub trait ToBigUint {
/// The digits are in little-endian base matching `BigDigit`.
#[inline]
pub(crate) fn biguint_from_vec(digits: Vec<BigDigit>) -> BigUint {
BigUint {
data: backend::from_vec(digits),
}
.normalized()
}

/// Creates and initializes a `BigUint`.
///
/// The digits are in little-endian base matching `BigDigit`.
#[inline]
pub(crate) fn biguint_from_bigdigitvec(digits: BigDigitVec) -> BigUint {
BigUint { data: digits }.normalized()
}

Expand Down Expand Up @@ -850,9 +880,12 @@ impl BigUint {
let len = self.data.iter().rposition(|&d| d != 0).map_or(0, |i| i + 1);
self.data.truncate(len);
}
if self.data.len() < self.data.capacity() / 4 {
self.data.shrink_to_fit();
}
// Shrinking hurts performance of many algorithms which do not care about deallocating working memory.
// For example, 'to_str_radix' consumes a BigUint by dividing out digits. The possibility of shrinking
// the BigUint in the inner loop significantly lowers performance.
// if self.data.len() < self.data.capacity() / 4 {
// self.data.shrink_to_fit();
// }
}

/// Returns a normalized `BigUint`.
Expand Down Expand Up @@ -958,7 +991,7 @@ impl BigUint {

pub(crate) trait IntDigits {
fn digits(&self) -> &[BigDigit];
fn digits_mut(&mut self) -> &mut Vec<BigDigit>;
fn digits_mut(&mut self) -> &mut BigDigitVec;
fn normalize(&mut self);
fn capacity(&self) -> usize;
fn len(&self) -> usize;
Expand All @@ -970,7 +1003,7 @@ impl IntDigits for BigUint {
&self.data
}
#[inline]
fn digits_mut(&mut self) -> &mut Vec<BigDigit> {
fn digits_mut(&mut self) -> &mut BigDigitVec {
&mut self.data
}
#[inline]
Expand Down Expand Up @@ -1036,7 +1069,7 @@ fn test_from_slice() {
fn test_from_slice() {
fn check(slice: &[u32], data: &[BigDigit]) {
assert_eq!(
BigUint::from_slice(slice).data,
BigUint::from_slice(slice).data.as_slice(),
data,
"from {:?}, to {:?}",
slice,
Expand Down
22 changes: 22 additions & 0 deletions src/biguint/addition.rs
Expand Up @@ -5,6 +5,8 @@ use super::{BigUint, IntDigits};
use crate::big_digit::{self, BigDigit};
use crate::UsizePromotion;

use crate::backend;

use core::iter::Sum;
use core::ops::{Add, AddAssign};
use num_traits::{CheckedAdd, Zero};
Expand Down Expand Up @@ -89,14 +91,28 @@ forward_val_assign!(impl AddAssign for BigUint, add_assign);
impl<'a> Add<&'a BigUint> for BigUint {
type Output = BigUint;

#[inline]
fn add(mut self, other: &BigUint) -> BigUint {
if backend::inlined(&other.data) {
use num_traits::ToPrimitive;
if let Some(x) = other.to_u64() {
return self + x;
}
}
self += other;
self
}
}
impl<'a> AddAssign<&'a BigUint> for BigUint {
#[inline]
fn add_assign(&mut self, other: &BigUint) {
if backend::inlined(&other.data) {
use num_traits::ToPrimitive;
if let Some(x) = other.to_u64() {
self.add_assign(x);
return;
}
}
let self_len = self.data.len();
let carry = if self_len < other.data.len() {
let lo_carry = __add2(&mut self.data[..], &other.data[..self_len]);
Expand Down Expand Up @@ -148,6 +164,12 @@ impl Add<u64> for BigUint {

#[inline]
fn add(mut self, other: u64) -> BigUint {
use num_traits::ToPrimitive;
if backend::inlined(&self.data) {
if let Some(x) = self.to_u64() {
return BigUint::from(x as u128 + other as u128);
}
}
self += other;
self
}
Expand Down
2 changes: 1 addition & 1 deletion src/biguint/arbitrary.rs
Expand Up @@ -14,7 +14,7 @@ impl quickcheck::Arbitrary for BigUint {

fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
// Use shrinker from Vec
Box::new(self.data.shrink().map(biguint_from_vec))
Box::new(self.data.clone().into_vec().shrink().map(biguint_from_vec))
}
}

Expand Down