diff --git a/src/algorithms.rs b/src/algorithms.rs deleted file mode 100644 index febd8457..00000000 --- a/src/algorithms.rs +++ /dev/null @@ -1,936 +0,0 @@ -use crate::std_alloc::{Cow, Vec}; -use core::cmp; -use core::cmp::Ordering::{self, Equal, Greater, Less}; -use core::iter::repeat; -use core::mem; -use num_traits::{One, PrimInt, Zero}; - -#[cfg(all(use_addcarry, target_arch = "x86_64"))] -use core::arch::x86_64 as arch; - -#[cfg(all(use_addcarry, target_arch = "x86"))] -use core::arch::x86 as arch; - -use crate::biguint::biguint_from_vec; -use crate::biguint::BigUint; - -use crate::bigint::BigInt; -use crate::bigint::Sign; -use crate::bigint::Sign::{Minus, NoSign, Plus}; - -use crate::big_digit::{self, BigDigit, DoubleBigDigit}; - -// only needed for the fallback implementation of `sbb` -#[cfg(not(use_addcarry))] -use crate::big_digit::SignedDoubleBigDigit; - -// Generic functions for add/subtract/multiply with carry/borrow. These are specialized -// for some platforms to take advantage of intrinsics, etc. - -// Add with carry: -#[cfg(all(use_addcarry, u64_digit))] -#[inline] -fn adc(carry: u8, a: u64, b: u64, out: &mut u64) -> u8 { - // Safety: There are absolutely no safety concerns with calling `_addcarry_u64`. - // It's just unsafe for API consistency with other intrinsics. - unsafe { arch::_addcarry_u64(carry, a, b, out) } -} - -#[cfg(all(use_addcarry, not(u64_digit)))] -#[inline] -fn adc(carry: u8, a: u32, b: u32, out: &mut u32) -> u8 { - // Safety: There are absolutely no safety concerns with calling `_addcarry_u32`. - // It's just unsafe for API consistency with other intrinsics. - unsafe { arch::_addcarry_u32(carry, a, b, out) } -} - -// fallback for environments where we don't have an addcarry intrinsic -#[cfg(not(use_addcarry))] -#[inline] -fn adc(carry: u8, a: BigDigit, b: BigDigit, out: &mut BigDigit) -> u8 { - let sum = DoubleBigDigit::from(a) + DoubleBigDigit::from(b) + DoubleBigDigit::from(carry); - *out = sum as BigDigit; - (sum >> big_digit::BITS) as u8 -} - -// Subtract with borrow: -#[cfg(all(use_addcarry, u64_digit))] -#[inline] -fn sbb(borrow: u8, a: u64, b: u64, out: &mut u64) -> u8 { - // Safety: There are absolutely no safety concerns with calling `_subborrow_u64`. - // It's just unsafe for API consistency with other intrinsics. - unsafe { arch::_subborrow_u64(borrow, a, b, out) } -} - -#[cfg(all(use_addcarry, not(u64_digit)))] -#[inline] -fn sbb(borrow: u8, a: u32, b: u32, out: &mut u32) -> u8 { - // Safety: There are absolutely no safety concerns with calling `_subborrow_u32`. - // It's just unsafe for API consistency with other intrinsics. - unsafe { arch::_subborrow_u32(borrow, a, b, out) } -} - -// fallback for environments where we don't have a subborrow intrinsic -#[cfg(not(use_addcarry))] -#[inline] -fn sbb(borrow: u8, a: BigDigit, b: BigDigit, out: &mut BigDigit) -> u8 { - let difference = SignedDoubleBigDigit::from(a) - - SignedDoubleBigDigit::from(b) - - SignedDoubleBigDigit::from(borrow); - *out = difference as BigDigit; - u8::from(difference < 0) -} - -#[inline] -pub(crate) fn mac_with_carry( - a: BigDigit, - b: BigDigit, - c: BigDigit, - acc: &mut DoubleBigDigit, -) -> BigDigit { - *acc += DoubleBigDigit::from(a); - *acc += DoubleBigDigit::from(b) * DoubleBigDigit::from(c); - let lo = *acc as BigDigit; - *acc >>= big_digit::BITS; - lo -} - -#[inline] -pub(crate) fn mul_with_carry(a: BigDigit, b: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit { - *acc += DoubleBigDigit::from(a) * DoubleBigDigit::from(b); - let lo = *acc as BigDigit; - *acc >>= big_digit::BITS; - lo -} - -/// Divide a two digit numerator by a one digit divisor, returns quotient and remainder: -/// -/// Note: the caller must ensure that both the quotient and remainder will fit into a single digit. -/// This is _not_ true for an arbitrary numerator/denominator. -/// -/// (This function also matches what the x86 divide instruction does). -#[inline] -fn div_wide(hi: BigDigit, lo: BigDigit, divisor: BigDigit) -> (BigDigit, BigDigit) { - debug_assert!(hi < divisor); - - let lhs = big_digit::to_doublebigdigit(hi, lo); - let rhs = DoubleBigDigit::from(divisor); - ((lhs / rhs) as BigDigit, (lhs % rhs) as BigDigit) -} - -/// For small divisors, we can divide without promoting to `DoubleBigDigit` by -/// using half-size pieces of digit, like long-division. -#[inline] -fn div_half(rem: BigDigit, digit: BigDigit, divisor: BigDigit) -> (BigDigit, BigDigit) { - use crate::big_digit::{HALF, HALF_BITS}; - use num_integer::Integer; - - debug_assert!(rem < divisor && divisor <= HALF); - let (hi, rem) = ((rem << HALF_BITS) | (digit >> HALF_BITS)).div_rem(&divisor); - let (lo, rem) = ((rem << HALF_BITS) | (digit & HALF)).div_rem(&divisor); - ((hi << HALF_BITS) | lo, rem) -} - -#[inline] -pub(crate) fn div_rem_digit(mut a: BigUint, b: BigDigit) -> (BigUint, BigDigit) { - let mut rem = 0; - - if b <= big_digit::HALF { - for d in a.data.iter_mut().rev() { - let (q, r) = div_half(rem, *d, b); - *d = q; - rem = r; - } - } else { - for d in a.data.iter_mut().rev() { - let (q, r) = div_wide(rem, *d, b); - *d = q; - rem = r; - } - } - - (a.normalized(), rem) -} - -#[inline] -pub(crate) fn rem_digit(a: &BigUint, b: BigDigit) -> BigDigit { - let mut rem = 0; - - if b <= big_digit::HALF { - for &digit in a.data.iter().rev() { - let (_, r) = div_half(rem, digit, b); - rem = r; - } - } else { - for &digit in a.data.iter().rev() { - let (_, r) = div_wide(rem, digit, b); - rem = r; - } - } - - rem -} - -/// Two argument addition of raw slices, `a += b`, returning the carry. -/// -/// This is used when the data `Vec` might need to resize to push a non-zero carry, so we perform -/// the addition first hoping that it will fit. -/// -/// The caller _must_ ensure that `a` is at least as long as `b`. -#[inline] -pub(crate) fn __add2(a: &mut [BigDigit], b: &[BigDigit]) -> BigDigit { - debug_assert!(a.len() >= b.len()); - - let mut carry = 0; - let (a_lo, a_hi) = a.split_at_mut(b.len()); - - for (a, b) in a_lo.iter_mut().zip(b) { - carry = adc(carry, *a, *b, a); - } - - if carry != 0 { - for a in a_hi { - carry = adc(carry, *a, 0, a); - if carry == 0 { - break; - } - } - } - - carry as BigDigit -} - -/// Two argument addition of raw slices: -/// a += b -/// -/// The caller _must_ ensure that a is big enough to store the result - typically this means -/// resizing a to max(a.len(), b.len()) + 1, to fit a possible carry. -pub(crate) fn add2(a: &mut [BigDigit], b: &[BigDigit]) { - let carry = __add2(a, b); - - debug_assert!(carry == 0); -} - -pub(crate) fn sub2(a: &mut [BigDigit], b: &[BigDigit]) { - let mut borrow = 0; - - let len = cmp::min(a.len(), b.len()); - let (a_lo, a_hi) = a.split_at_mut(len); - let (b_lo, b_hi) = b.split_at(len); - - for (a, b) in a_lo.iter_mut().zip(b_lo) { - borrow = sbb(borrow, *a, *b, a); - } - - if borrow != 0 { - for a in a_hi { - borrow = sbb(borrow, *a, 0, a); - if borrow == 0 { - break; - } - } - } - - // note: we're _required_ to fail on underflow - assert!( - borrow == 0 && b_hi.iter().all(|x| *x == 0), - "Cannot subtract b from a because b is larger than a." - ); -} - -// Only for the Sub impl. `a` and `b` must have same length. -#[inline] -pub(crate) fn __sub2rev(a: &[BigDigit], b: &mut [BigDigit]) -> u8 { - debug_assert!(b.len() == a.len()); - - let mut borrow = 0; - - for (ai, bi) in a.iter().zip(b) { - borrow = sbb(borrow, *ai, *bi, bi); - } - - borrow -} - -pub(crate) fn sub2rev(a: &[BigDigit], b: &mut [BigDigit]) { - debug_assert!(b.len() >= a.len()); - - let len = cmp::min(a.len(), b.len()); - let (a_lo, a_hi) = a.split_at(len); - let (b_lo, b_hi) = b.split_at_mut(len); - - let borrow = __sub2rev(a_lo, b_lo); - - assert!(a_hi.is_empty()); - - // note: we're _required_ to fail on underflow - assert!( - borrow == 0 && b_hi.iter().all(|x| *x == 0), - "Cannot subtract b from a because b is larger than a." - ); -} - -pub(crate) fn sub_sign(a: &[BigDigit], b: &[BigDigit]) -> (Sign, BigUint) { - // Normalize: - let a = &a[..a.iter().rposition(|&x| x != 0).map_or(0, |i| i + 1)]; - let b = &b[..b.iter().rposition(|&x| x != 0).map_or(0, |i| i + 1)]; - - match cmp_slice(a, b) { - Greater => { - let mut a = a.to_vec(); - sub2(&mut a, b); - (Plus, biguint_from_vec(a)) - } - Less => { - let mut b = b.to_vec(); - sub2(&mut b, a); - (Minus, biguint_from_vec(b)) - } - _ => (NoSign, Zero::zero()), - } -} - -/// Three argument multiply accumulate: -/// acc += b * c -pub(crate) fn mac_digit(acc: &mut [BigDigit], b: &[BigDigit], c: BigDigit) { - if c == 0 { - return; - } - - let mut carry = 0; - let (a_lo, a_hi) = acc.split_at_mut(b.len()); - - for (a, &b) in a_lo.iter_mut().zip(b) { - *a = mac_with_carry(*a, b, c, &mut carry); - } - - let (carry_hi, carry_lo) = big_digit::from_doublebigdigit(carry); - - let final_carry = if carry_hi == 0 { - __add2(a_hi, &[carry_lo]) - } else { - __add2(a_hi, &[carry_hi, carry_lo]) - }; - assert_eq!(final_carry, 0, "carry overflow during multiplication!"); -} - -/// Subtract a multiple. -/// a -= b * c -/// Returns a borrow (if a < b then borrow > 0). -fn sub_mul_digit_same_len(a: &mut [BigDigit], b: &[BigDigit], c: BigDigit) -> BigDigit { - debug_assert!(a.len() == b.len()); - - // carry is between -big_digit::MAX and 0, so to avoid overflow we store - // offset_carry = carry + big_digit::MAX - let mut offset_carry = big_digit::MAX; - - for (x, y) in a.iter_mut().zip(b) { - // We want to calculate sum = x - y * c + carry. - // sum >= -(big_digit::MAX * big_digit::MAX) - big_digit::MAX - // sum <= big_digit::MAX - // Offsetting sum by (big_digit::MAX << big_digit::BITS) puts it in DoubleBigDigit range. - let offset_sum = big_digit::to_doublebigdigit(big_digit::MAX, *x) - - big_digit::MAX as DoubleBigDigit - + offset_carry as DoubleBigDigit - - *y as DoubleBigDigit * c as DoubleBigDigit; - - let (new_offset_carry, new_x) = big_digit::from_doublebigdigit(offset_sum); - offset_carry = new_offset_carry; - *x = new_x; - } - - // Return the borrow. - big_digit::MAX - offset_carry -} - -fn bigint_from_slice(slice: &[BigDigit]) -> BigInt { - BigInt::from(biguint_from_vec(slice.to_vec())) -} - -/// Three argument multiply accumulate: -/// acc += b * c -#[allow(clippy::many_single_char_names)] -fn mac3(acc: &mut [BigDigit], b: &[BigDigit], c: &[BigDigit]) { - let (x, y) = if b.len() < c.len() { (b, c) } else { (c, b) }; - - // We use three algorithms for different input sizes. - // - // - For small inputs, long multiplication is fastest. - // - Next we use Karatsuba multiplication (Toom-2), which we have optimized - // to avoid unnecessary allocations for intermediate values. - // - For the largest inputs we use Toom-3, which better optimizes the - // number of operations, but uses more temporary allocations. - // - // The thresholds are somewhat arbitrary, chosen by evaluating the results - // of `cargo bench --bench bigint multiply`. - - if x.len() <= 32 { - // Long multiplication: - for (i, xi) in x.iter().enumerate() { - mac_digit(&mut acc[i..], y, *xi); - } - } else if x.len() <= 256 { - // Karatsuba multiplication: - // - // The idea is that we break x and y up into two smaller numbers that each have about half - // as many digits, like so (note that multiplying by b is just a shift): - // - // x = x0 + x1 * b - // y = y0 + y1 * b - // - // With some algebra, we can compute x * y with three smaller products, where the inputs to - // each of the smaller products have only about half as many digits as x and y: - // - // x * y = (x0 + x1 * b) * (y0 + y1 * b) - // - // x * y = x0 * y0 - // + x0 * y1 * b - // + x1 * y0 * b - // + x1 * y1 * b^2 - // - // Let p0 = x0 * y0 and p2 = x1 * y1: - // - // x * y = p0 - // + (x0 * y1 + x1 * y0) * b - // + p2 * b^2 - // - // The real trick is that middle term: - // - // x0 * y1 + x1 * y0 - // - // = x0 * y1 + x1 * y0 - p0 + p0 - p2 + p2 - // - // = x0 * y1 + x1 * y0 - x0 * y0 - x1 * y1 + p0 + p2 - // - // Now we complete the square: - // - // = -(x0 * y0 - x0 * y1 - x1 * y0 + x1 * y1) + p0 + p2 - // - // = -((x1 - x0) * (y1 - y0)) + p0 + p2 - // - // Let p1 = (x1 - x0) * (y1 - y0), and substitute back into our original formula: - // - // x * y = p0 - // + (p0 + p2 - p1) * b - // + p2 * b^2 - // - // Where the three intermediate products are: - // - // p0 = x0 * y0 - // p1 = (x1 - x0) * (y1 - y0) - // p2 = x1 * y1 - // - // In doing the computation, we take great care to avoid unnecessary temporary variables - // (since creating a BigUint requires a heap allocation): thus, we rearrange the formula a - // bit so we can use the same temporary variable for all the intermediate products: - // - // x * y = p2 * b^2 + p2 * b - // + p0 * b + p0 - // - p1 * b - // - // The other trick we use is instead of doing explicit shifts, we slice acc at the - // appropriate offset when doing the add. - - // When x is smaller than y, it's significantly faster to pick b such that x is split in - // half, not y: - let b = x.len() / 2; - let (x0, x1) = x.split_at(b); - let (y0, y1) = y.split_at(b); - - // We reuse the same BigUint for all the intermediate multiplies and have to size p - // appropriately here: x1.len() >= x0.len and y1.len() >= y0.len(): - let len = x1.len() + y1.len() + 1; - let mut p = BigUint { data: vec![0; len] }; - - // p2 = x1 * y1 - mac3(&mut p.data[..], x1, y1); - - // Not required, but the adds go faster if we drop any unneeded 0s from the end: - p.normalize(); - - add2(&mut acc[b..], &p.data[..]); - add2(&mut acc[b * 2..], &p.data[..]); - - // Zero out p before the next multiply: - p.data.truncate(0); - p.data.extend(repeat(0).take(len)); - - // p0 = x0 * y0 - mac3(&mut p.data[..], x0, y0); - p.normalize(); - - add2(&mut acc[..], &p.data[..]); - add2(&mut acc[b..], &p.data[..]); - - // p1 = (x1 - x0) * (y1 - y0) - // We do this one last, since it may be negative and acc can't ever be negative: - let (j0_sign, j0) = sub_sign(x1, x0); - let (j1_sign, j1) = sub_sign(y1, y0); - - match j0_sign * j1_sign { - Plus => { - p.data.truncate(0); - p.data.extend(repeat(0).take(len)); - - mac3(&mut p.data[..], &j0.data[..], &j1.data[..]); - p.normalize(); - - sub2(&mut acc[b..], &p.data[..]); - } - Minus => { - mac3(&mut acc[b..], &j0.data[..], &j1.data[..]); - } - NoSign => (), - } - } else { - // Toom-3 multiplication: - // - // Toom-3 is like Karatsuba above, but dividing the inputs into three parts. - // Both are instances of Toom-Cook, using `k=3` and `k=2` respectively. - // - // The general idea is to treat the large integers digits as - // polynomials of a certain degree and determine the coefficients/digits - // of the product of the two via interpolation of the polynomial product. - let i = y.len() / 3 + 1; - - let x0_len = cmp::min(x.len(), i); - let x1_len = cmp::min(x.len() - x0_len, i); - - let y0_len = i; - let y1_len = cmp::min(y.len() - y0_len, i); - - // Break x and y into three parts, representating an order two polynomial. - // t is chosen to be the size of a digit so we can use faster shifts - // in place of multiplications. - // - // x(t) = x2*t^2 + x1*t + x0 - let x0 = bigint_from_slice(&x[..x0_len]); - let x1 = bigint_from_slice(&x[x0_len..x0_len + x1_len]); - let x2 = bigint_from_slice(&x[x0_len + x1_len..]); - - // y(t) = y2*t^2 + y1*t + y0 - let y0 = bigint_from_slice(&y[..y0_len]); - let y1 = bigint_from_slice(&y[y0_len..y0_len + y1_len]); - let y2 = bigint_from_slice(&y[y0_len + y1_len..]); - - // Let w(t) = x(t) * y(t) - // - // This gives us the following order-4 polynomial. - // - // w(t) = w4*t^4 + w3*t^3 + w2*t^2 + w1*t + w0 - // - // We need to find the coefficients w4, w3, w2, w1 and w0. Instead - // of simply multiplying the x and y in total, we can evaluate w - // at 5 points. An n-degree polynomial is uniquely identified by (n + 1) - // points. - // - // It is arbitrary as to what points we evaluate w at but we use the - // following. - // - // w(t) at t = 0, 1, -1, -2 and inf - // - // The values for w(t) in terms of x(t)*y(t) at these points are: - // - // let a = w(0) = x0 * y0 - // let b = w(1) = (x2 + x1 + x0) * (y2 + y1 + y0) - // let c = w(-1) = (x2 - x1 + x0) * (y2 - y1 + y0) - // let d = w(-2) = (4*x2 - 2*x1 + x0) * (4*y2 - 2*y1 + y0) - // let e = w(inf) = x2 * y2 as t -> inf - - // x0 + x2, avoiding temporaries - let p = &x0 + &x2; - - // y0 + y2, avoiding temporaries - let q = &y0 + &y2; - - // x2 - x1 + x0, avoiding temporaries - let p2 = &p - &x1; - - // y2 - y1 + y0, avoiding temporaries - let q2 = &q - &y1; - - // w(0) - let r0 = &x0 * &y0; - - // w(inf) - let r4 = &x2 * &y2; - - // w(1) - let r1 = (p + x1) * (q + y1); - - // w(-1) - let r2 = &p2 * &q2; - - // w(-2) - let r3 = ((p2 + x2) * 2 - x0) * ((q2 + y2) * 2 - y0); - - // Evaluating these points gives us the following system of linear equations. - // - // 0 0 0 0 1 | a - // 1 1 1 1 1 | b - // 1 -1 1 -1 1 | c - // 16 -8 4 -2 1 | d - // 1 0 0 0 0 | e - // - // The solved equation (after gaussian elimination or similar) - // in terms of its coefficients: - // - // w0 = w(0) - // w1 = w(0)/2 + w(1)/3 - w(-1) + w(2)/6 - 2*w(inf) - // w2 = -w(0) + w(1)/2 + w(-1)/2 - w(inf) - // w3 = -w(0)/2 + w(1)/6 + w(-1)/2 - w(1)/6 - // w4 = w(inf) - // - // This particular sequence is given by Bodrato and is an interpolation - // of the above equations. - let mut comp3: BigInt = (r3 - &r1) / 3; - let mut comp1: BigInt = (r1 - &r2) / 2; - let mut comp2: BigInt = r2 - &r0; - comp3 = (&comp2 - comp3) / 2 + &r4 * 2; - comp2 += &comp1 - &r4; - comp1 -= &comp3; - - // Recomposition. The coefficients of the polynomial are now known. - // - // Evaluate at w(t) where t is our given base to get the result. - let bits = u64::from(big_digit::BITS) * i as u64; - let result = r0 - + (comp1 << bits) - + (comp2 << (2 * bits)) - + (comp3 << (3 * bits)) - + (r4 << (4 * bits)); - let result_pos = result.to_biguint().unwrap(); - add2(&mut acc[..], &result_pos.data); - } -} - -pub(crate) fn mul3(x: &[BigDigit], y: &[BigDigit]) -> BigUint { - let len = x.len() + y.len() + 1; - let mut prod = BigUint { data: vec![0; len] }; - - mac3(&mut prod.data[..], x, y); - prod.normalized() -} - -pub(crate) fn scalar_mul(a: &mut [BigDigit], b: BigDigit) -> BigDigit { - let mut carry = 0; - for a in a.iter_mut() { - *a = mul_with_carry(*a, b, &mut carry); - } - carry as BigDigit -} - -pub(crate) fn div_rem(mut u: BigUint, mut d: BigUint) -> (BigUint, BigUint) { - if d.is_zero() { - panic!("attempt to divide by zero") - } - if u.is_zero() { - return (Zero::zero(), Zero::zero()); - } - - if d.data.len() == 1 { - if d.data == [1] { - return (u, Zero::zero()); - } - let (div, rem) = div_rem_digit(u, d.data[0]); - // reuse d - d.data.clear(); - d += rem; - return (div, d); - } - - // Required or the q_len calculation below can underflow: - match u.cmp(&d) { - Less => return (Zero::zero(), u), - Equal => { - u.set_one(); - return (u, Zero::zero()); - } - Greater => {} // Do nothing - } - - // This algorithm is from Knuth, TAOCP vol 2 section 4.3, algorithm D: - // - // First, normalize the arguments so the highest bit in the highest digit of the divisor is - // set: the main loop uses the highest digit of the divisor for generating guesses, so we - // want it to be the largest number we can efficiently divide by. - // - let shift = d.data.last().unwrap().leading_zeros() as usize; - - let (q, r) = if shift == 0 { - // no need to clone d - div_rem_core(u, &d) - } else { - div_rem_core(u << shift, &(d << shift)) - }; - // renormalize the remainder - (q, r >> shift) -} - -pub(crate) fn div_rem_ref(u: &BigUint, d: &BigUint) -> (BigUint, BigUint) { - if d.is_zero() { - panic!("attempt to divide by zero") - } - if u.is_zero() { - return (Zero::zero(), Zero::zero()); - } - - if d.data.len() == 1 { - if d.data == [1] { - return (u.clone(), Zero::zero()); - } - - let (div, rem) = div_rem_digit(u.clone(), d.data[0]); - return (div, rem.into()); - } - - // Required or the q_len calculation below can underflow: - match u.cmp(d) { - Less => return (Zero::zero(), u.clone()), - Equal => return (One::one(), Zero::zero()), - Greater => {} // Do nothing - } - - // This algorithm is from Knuth, TAOCP vol 2 section 4.3, algorithm D: - // - // First, normalize the arguments so the highest bit in the highest digit of the divisor is - // set: the main loop uses the highest digit of the divisor for generating guesses, so we - // want it to be the largest number we can efficiently divide by. - // - let shift = d.data.last().unwrap().leading_zeros() as usize; - - let (q, r) = if shift == 0 { - // no need to clone d - div_rem_core(u.clone(), d) - } else { - div_rem_core(u << shift, &(d << shift)) - }; - // renormalize the remainder - (q, r >> shift) -} - -/// An implementation of the base division algorithm. -/// Knuth, TAOCP vol 2 section 4.3.1, algorithm D, with an improvement from exercises 19-21. -fn div_rem_core(mut a: BigUint, b: &BigUint) -> (BigUint, BigUint) { - debug_assert!( - a.data.len() >= b.data.len() - && b.data.len() > 1 - && b.data.last().unwrap().leading_zeros() == 0 - ); - - // The algorithm works by incrementally calculating "guesses", q0, for the next digit of the - // quotient. Once we have any number q0 such that (q0 << j) * b <= a, we can set - // - // q += q0 << j - // a -= (q0 << j) * b - // - // and then iterate until a < b. Then, (q, a) will be our desired quotient and remainder. - // - // q0, our guess, is calculated by dividing the last three digits of a by the last two digits of - // b - this will give us a guess that is close to the actual quotient, but is possibly greater. - // It can only be greater by 1 and only in rare cases, with probability at most - // 2^-(big_digit::BITS-1) for random a, see TAOCP 4.3.1 exercise 21. - // - // If the quotient turns out to be too large, we adjust it by 1: - // q -= 1 << j - // a += b << j - - // a0 stores an additional extra most significant digit of the dividend, not stored in a. - let mut a0 = 0; - - // [b1, b0] are the two most significant digits of the divisor. They never change. - let b0 = *b.data.last().unwrap(); - let b1 = b.data[b.data.len() - 2]; - - let q_len = a.data.len() - b.data.len() + 1; - let mut q = BigUint { - data: vec![0; q_len], - }; - - for j in (0..q_len).rev() { - debug_assert!(a.data.len() == b.data.len() + j); - - let a1 = *a.data.last().unwrap(); - let a2 = a.data[a.data.len() - 2]; - - // The first q0 estimate is [a1,a0] / b0. It will never be too small, it may be too large - // by at most 2. - let (mut q0, mut r) = if a0 < b0 { - let (q0, r) = div_wide(a0, a1, b0); - (q0, r as DoubleBigDigit) - } else { - debug_assert!(a0 == b0); - // Avoid overflowing q0, we know the quotient fits in BigDigit. - // [a1,a0] = b0 * (1< a0 { - // q0 is too large. We need to add back one multiple of b. - q0 -= 1; - borrow -= __add2(&mut a.data[j..], &b.data); - } - // The top digit of a, stored in a0, has now been zeroed. - debug_assert!(borrow == a0); - - q.data[j] = q0; - - // Pop off the next top digit of a. - a0 = a.data.pop().unwrap(); - } - - a.data.push(a0); - a.normalize(); - - debug_assert!(a < *b); - - (q.normalized(), a) -} - -/// Find last set bit -/// fls(0) == 0, fls(u32::MAX) == 32 -pub(crate) fn fls(v: T) -> u8 { - mem::size_of::() as u8 * 8 - v.leading_zeros() as u8 -} - -pub(crate) fn ilog2(v: T) -> u8 { - fls(v) - 1 -} - -#[inline] -pub(crate) fn biguint_shl(n: Cow<'_, BigUint>, shift: T) -> BigUint { - if shift < T::zero() { - panic!("attempt to shift left with negative"); - } - if n.is_zero() { - return n.into_owned(); - } - let bits = T::from(big_digit::BITS).unwrap(); - let digits = (shift / bits).to_usize().expect("capacity overflow"); - let shift = (shift % bits).to_u8().unwrap(); - biguint_shl2(n, digits, shift) -} - -fn biguint_shl2(n: Cow<'_, BigUint>, digits: usize, shift: u8) -> BigUint { - let mut data = match digits { - 0 => n.into_owned().data, - _ => { - let len = digits.saturating_add(n.data.len() + 1); - let mut data = Vec::with_capacity(len); - data.extend(repeat(0).take(digits)); - data.extend(n.data.iter()); - data - } - }; - - if shift > 0 { - let mut carry = 0; - let carry_shift = big_digit::BITS as u8 - shift; - for elem in data[digits..].iter_mut() { - let new_carry = *elem >> carry_shift; - *elem = (*elem << shift) | carry; - carry = new_carry; - } - if carry != 0 { - data.push(carry); - } - } - - biguint_from_vec(data) -} - -#[inline] -pub(crate) fn biguint_shr(n: Cow<'_, BigUint>, shift: T) -> BigUint { - if shift < T::zero() { - panic!("attempt to shift right with negative"); - } - if n.is_zero() { - return n.into_owned(); - } - let bits = T::from(big_digit::BITS).unwrap(); - let digits = (shift / bits).to_usize().unwrap_or(core::usize::MAX); - let shift = (shift % bits).to_u8().unwrap(); - biguint_shr2(n, digits, shift) -} - -fn biguint_shr2(n: Cow<'_, BigUint>, digits: usize, shift: u8) -> BigUint { - if digits >= n.data.len() { - let mut n = n.into_owned(); - n.set_zero(); - return n; - } - let mut data = match n { - Cow::Borrowed(n) => n.data[digits..].to_vec(), - Cow::Owned(mut n) => { - n.data.drain(..digits); - n.data - } - }; - - if shift > 0 { - let mut borrow = 0; - let borrow_shift = big_digit::BITS as u8 - shift; - for elem in data.iter_mut().rev() { - let new_borrow = *elem << borrow_shift; - *elem = (*elem >> shift) | borrow; - borrow = new_borrow; - } - } - - biguint_from_vec(data) -} - -pub(crate) fn cmp_slice(a: &[BigDigit], b: &[BigDigit]) -> Ordering { - debug_assert!(a.last() != Some(&0)); - debug_assert!(b.last() != Some(&0)); - - match Ord::cmp(&a.len(), &b.len()) { - Equal => Iterator::cmp(a.iter().rev(), b.iter().rev()), - other => other, - } -} - -#[cfg(test)] -mod algorithm_tests { - use crate::big_digit::BigDigit; - use crate::{BigInt, BigUint}; - use num_traits::Num; - - #[test] - fn test_sub_sign() { - use super::sub_sign; - - fn sub_sign_i(a: &[BigDigit], b: &[BigDigit]) -> BigInt { - let (sign, val) = sub_sign(a, b); - BigInt::from_biguint(sign, val) - } - - let a = BigUint::from_str_radix("265252859812191058636308480000000", 10).unwrap(); - let b = BigUint::from_str_radix("26525285981219105863630848000000", 10).unwrap(); - let a_i = BigInt::from(a.clone()); - let b_i = BigInt::from(b.clone()); - - assert_eq!(sub_sign_i(&a.data[..], &b.data[..]), &a_i - &b_i); - assert_eq!(sub_sign_i(&b.data[..], &a.data[..]), &b_i - &a_i); - } -} diff --git a/src/bigint.rs b/src/bigint.rs index aa3179d6..4b0e4891 100644 --- a/src/bigint.rs +++ b/src/bigint.rs @@ -1,43 +1,40 @@ // `Add`/`Sub` ops may flip from `BigInt` to its `BigUint` magnitude #![allow(clippy::suspicious_arithmetic_impl)] -#[cfg(any(feature = "quickcheck", feature = "arbitrary"))] -use crate::std_alloc::Box; use crate::std_alloc::{String, Vec}; -use core::cmp::Ordering::{self, Equal, Greater, Less}; -#[cfg(has_try_from)] -use core::convert::TryFrom; +use core::cmp::Ordering::{self, Equal}; use core::default::Default; use core::fmt; use core::hash; -use core::iter::{Product, Sum}; -use core::mem; -use core::ops::{ - Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Div, DivAssign, - Mul, MulAssign, Neg, Not, Rem, RemAssign, Shl, ShlAssign, Shr, ShrAssign, Sub, SubAssign, -}; -use core::str::{self, FromStr}; +use core::ops::{Neg, Not}; +use core::str; use core::{i128, u128}; use core::{i64, u64}; use num_integer::{Integer, Roots}; -use num_traits::{ - CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, FromPrimitive, Num, One, Pow, PrimInt, Signed, - ToPrimitive, Zero, -}; +use num_traits::{Num, One, Pow, Signed, Zero}; use self::Sign::{Minus, NoSign, Plus}; -use crate::big_digit::{self, BigDigit, DoubleBigDigit}; -use crate::biguint; +use crate::big_digit::BigDigit; use crate::biguint::to_str_radix_reversed; use crate::biguint::{BigUint, IntDigits, U32Digits, U64Digits}; -use crate::ParseBigIntError; -#[cfg(has_try_from)] -use crate::TryFromBigIntError; -use crate::IsizePromotion; -use crate::UsizePromotion; +mod addition; +mod division; +mod multiplication; +mod subtraction; + +mod bits; +mod convert; +mod power; +mod shift; + +#[cfg(any(feature = "quickcheck", feature = "arbitrary"))] +mod arbitrary; + +#[cfg(feature = "serde")] +mod serde; /// A Sign is a `BigInt`'s composing element. #[derive(PartialEq, PartialOrd, Eq, Ord, Copy, Clone, Debug, Hash)] @@ -61,57 +58,6 @@ impl Neg for Sign { } } -impl Mul for Sign { - type Output = Sign; - - #[inline] - fn mul(self, other: Sign) -> Sign { - match (self, other) { - (NoSign, _) | (_, NoSign) => NoSign, - (Plus, Plus) | (Minus, Minus) => Plus, - (Plus, Minus) | (Minus, Plus) => Minus, - } - } -} - -#[cfg(feature = "serde")] -impl serde::Serialize for Sign { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - // Note: do not change the serialization format, or it may break - // forward and backward compatibility of serialized data! - match *self { - Sign::Minus => (-1i8).serialize(serializer), - Sign::NoSign => 0i8.serialize(serializer), - Sign::Plus => 1i8.serialize(serializer), - } - } -} - -#[cfg(feature = "serde")] -impl<'de> serde::Deserialize<'de> for Sign { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - use serde::de::Error; - use serde::de::Unexpected; - - let sign: i8 = serde::Deserialize::deserialize(deserializer)?; - match sign { - -1 => Ok(Sign::Minus), - 0 => Ok(Sign::NoSign), - 1 => Ok(Sign::Plus), - _ => Err(D::Error::invalid_value( - Unexpected::Signed(sign.into()), - &"a sign of -1, 0, or 1", - )), - } - } -} - /// A big signed integer type. #[derive(Debug)] pub struct BigInt { @@ -137,41 +83,6 @@ impl Clone for BigInt { } } -#[cfg(feature = "quickcheck")] -impl quickcheck::Arbitrary for BigInt { - fn arbitrary(g: &mut G) -> Self { - let positive = bool::arbitrary(g); - let sign = if positive { Sign::Plus } else { Sign::Minus }; - Self::from_biguint(sign, BigUint::arbitrary(g)) - } - - fn shrink(&self) -> Box> { - let sign = self.sign(); - let unsigned_shrink = self.data.shrink(); - Box::new(unsigned_shrink.map(move |x| BigInt::from_biguint(sign, x))) - } -} - -#[cfg(feature = "arbitrary")] -mod abitrary_impl { - use super::*; - use arbitrary::{Arbitrary, Result, Unstructured}; - - impl Arbitrary for BigInt { - fn arbitrary(u: &mut Unstructured<'_>) -> Result { - let positive = bool::arbitrary(u)?; - let sign = if positive { Sign::Plus } else { Sign::Minus }; - Ok(Self::from_biguint(sign, BigUint::arbitrary(u)?)) - } - - fn shrink(&self) -> Box> { - let sign = self.sign(); - let unsigned_shrink = self.data.shrink(); - Box::new(unsigned_shrink.map(move |x| BigInt::from_biguint(sign, x))) - } - } -} - impl hash::Hash for BigInt { #[inline] fn hash(&self, state: &mut H) { @@ -258,30 +169,6 @@ impl fmt::UpperHex for BigInt { } } -// Negation in two's complement. -// acc must be initialized as 1 for least-significant digit. -// -// When negating, a carry (acc == 1) means that all the digits -// considered to this point were zero. This means that if all the -// digits of a negative BigInt have been considered, carry must be -// zero as we cannot have negative zero. -// -// 01 -> ...f ff -// ff -> ...f 01 -// 01 00 -> ...f ff 00 -// 01 01 -> ...f fe ff -// 01 ff -> ...f fe 01 -// ff 00 -> ...f 01 00 -// ff 01 -> ...f 00 ff -// ff ff -> ...f 00 01 -#[inline] -fn negate_carry(a: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit { - *acc += DoubleBigDigit::from(!a); - let lo = *acc as BigDigit; - *acc >>= big_digit::BITS; - lo -} - // !-2 = !...f fe = ...0 01 = +1 // !-1 = !...f ff = ...0 00 = 0 // ! 0 = !...0 00 = ...f ff = -1 @@ -316,553 +203,6 @@ impl<'a> Not for &'a BigInt { } } -// + 1 & -ff = ...0 01 & ...f 01 = ...0 01 = + 1 -// +ff & - 1 = ...0 ff & ...f ff = ...0 ff = +ff -// answer is pos, has length of a -fn bitand_pos_neg(a: &mut Vec, b: &[BigDigit]) { - let mut carry_b = 1; - for (ai, &bi) in a.iter_mut().zip(b.iter()) { - let twos_b = negate_carry(bi, &mut carry_b); - *ai &= twos_b; - } - debug_assert!(b.len() > a.len() || carry_b == 0); -} - -// - 1 & +ff = ...f ff & ...0 ff = ...0 ff = +ff -// -ff & + 1 = ...f 01 & ...0 01 = ...0 01 = + 1 -// answer is pos, has length of b -fn bitand_neg_pos(a: &mut Vec, b: &[BigDigit]) { - let mut carry_a = 1; - for (ai, &bi) in a.iter_mut().zip(b.iter()) { - let twos_a = negate_carry(*ai, &mut carry_a); - *ai = twos_a & bi; - } - debug_assert!(a.len() > b.len() || carry_a == 0); - match Ord::cmp(&a.len(), &b.len()) { - Greater => a.truncate(b.len()), - Equal => {} - Less => { - let extra = &b[a.len()..]; - a.extend(extra.iter().cloned()); - } - } -} - -// - 1 & -ff = ...f ff & ...f 01 = ...f 01 = - ff -// -ff & - 1 = ...f 01 & ...f ff = ...f 01 = - ff -// -ff & -fe = ...f 01 & ...f 02 = ...f 00 = -100 -// answer is neg, has length of longest with a possible carry -fn bitand_neg_neg(a: &mut Vec, b: &[BigDigit]) { - let mut carry_a = 1; - let mut carry_b = 1; - let mut carry_and = 1; - for (ai, &bi) in a.iter_mut().zip(b.iter()) { - let twos_a = negate_carry(*ai, &mut carry_a); - let twos_b = negate_carry(bi, &mut carry_b); - *ai = negate_carry(twos_a & twos_b, &mut carry_and); - } - debug_assert!(a.len() > b.len() || carry_a == 0); - debug_assert!(b.len() > a.len() || carry_b == 0); - match Ord::cmp(&a.len(), &b.len()) { - Greater => { - for ai in a[b.len()..].iter_mut() { - let twos_a = negate_carry(*ai, &mut carry_a); - *ai = negate_carry(twos_a, &mut carry_and); - } - debug_assert!(carry_a == 0); - } - Equal => {} - Less => { - let extra = &b[a.len()..]; - a.extend(extra.iter().map(|&bi| { - let twos_b = negate_carry(bi, &mut carry_b); - negate_carry(twos_b, &mut carry_and) - })); - debug_assert!(carry_b == 0); - } - } - if carry_and != 0 { - a.push(1); - } -} - -forward_val_val_binop!(impl BitAnd for BigInt, bitand); -forward_ref_val_binop!(impl BitAnd for BigInt, bitand); - -// do not use forward_ref_ref_binop_commutative! for bitand so that we can -// clone as needed, avoiding over-allocation -impl<'a, 'b> BitAnd<&'b BigInt> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn bitand(self, other: &BigInt) -> BigInt { - match (self.sign, other.sign) { - (NoSign, _) | (_, NoSign) => BigInt::zero(), - (Plus, Plus) => BigInt::from(&self.data & &other.data), - (Plus, Minus) => self.clone() & other, - (Minus, Plus) => other.clone() & self, - (Minus, Minus) => { - // forward to val-ref, choosing the larger to clone - if self.len() >= other.len() { - self.clone() & other - } else { - other.clone() & self - } - } - } - } -} - -impl<'a> BitAnd<&'a BigInt> for BigInt { - type Output = BigInt; - - #[inline] - fn bitand(mut self, other: &BigInt) -> BigInt { - self &= other; - self - } -} - -forward_val_assign!(impl BitAndAssign for BigInt, bitand_assign); - -impl<'a> BitAndAssign<&'a BigInt> for BigInt { - fn bitand_assign(&mut self, other: &BigInt) { - match (self.sign, other.sign) { - (NoSign, _) => {} - (_, NoSign) => self.set_zero(), - (Plus, Plus) => { - self.data &= &other.data; - if self.data.is_zero() { - self.sign = NoSign; - } - } - (Plus, Minus) => { - bitand_pos_neg(self.digits_mut(), other.digits()); - self.normalize(); - } - (Minus, Plus) => { - bitand_neg_pos(self.digits_mut(), other.digits()); - self.sign = Plus; - self.normalize(); - } - (Minus, Minus) => { - bitand_neg_neg(self.digits_mut(), other.digits()); - self.normalize(); - } - } - } -} - -// + 1 | -ff = ...0 01 | ...f 01 = ...f 01 = -ff -// +ff | - 1 = ...0 ff | ...f ff = ...f ff = - 1 -// answer is neg, has length of b -fn bitor_pos_neg(a: &mut Vec, b: &[BigDigit]) { - let mut carry_b = 1; - let mut carry_or = 1; - for (ai, &bi) in a.iter_mut().zip(b.iter()) { - let twos_b = negate_carry(bi, &mut carry_b); - *ai = negate_carry(*ai | twos_b, &mut carry_or); - } - debug_assert!(b.len() > a.len() || carry_b == 0); - match Ord::cmp(&a.len(), &b.len()) { - Greater => { - a.truncate(b.len()); - } - Equal => {} - Less => { - let extra = &b[a.len()..]; - a.extend(extra.iter().map(|&bi| { - let twos_b = negate_carry(bi, &mut carry_b); - negate_carry(twos_b, &mut carry_or) - })); - debug_assert!(carry_b == 0); - } - } - // for carry_or to be non-zero, we would need twos_b == 0 - debug_assert!(carry_or == 0); -} - -// - 1 | +ff = ...f ff | ...0 ff = ...f ff = - 1 -// -ff | + 1 = ...f 01 | ...0 01 = ...f 01 = -ff -// answer is neg, has length of a -fn bitor_neg_pos(a: &mut Vec, b: &[BigDigit]) { - let mut carry_a = 1; - let mut carry_or = 1; - for (ai, &bi) in a.iter_mut().zip(b.iter()) { - let twos_a = negate_carry(*ai, &mut carry_a); - *ai = negate_carry(twos_a | bi, &mut carry_or); - } - debug_assert!(a.len() > b.len() || carry_a == 0); - if a.len() > b.len() { - for ai in a[b.len()..].iter_mut() { - let twos_a = negate_carry(*ai, &mut carry_a); - *ai = negate_carry(twos_a, &mut carry_or); - } - debug_assert!(carry_a == 0); - } - // for carry_or to be non-zero, we would need twos_a == 0 - debug_assert!(carry_or == 0); -} - -// - 1 | -ff = ...f ff | ...f 01 = ...f ff = -1 -// -ff | - 1 = ...f 01 | ...f ff = ...f ff = -1 -// answer is neg, has length of shortest -fn bitor_neg_neg(a: &mut Vec, b: &[BigDigit]) { - let mut carry_a = 1; - let mut carry_b = 1; - let mut carry_or = 1; - for (ai, &bi) in a.iter_mut().zip(b.iter()) { - let twos_a = negate_carry(*ai, &mut carry_a); - let twos_b = negate_carry(bi, &mut carry_b); - *ai = negate_carry(twos_a | twos_b, &mut carry_or); - } - debug_assert!(a.len() > b.len() || carry_a == 0); - debug_assert!(b.len() > a.len() || carry_b == 0); - if a.len() > b.len() { - a.truncate(b.len()); - } - // for carry_or to be non-zero, we would need twos_a == 0 or twos_b == 0 - debug_assert!(carry_or == 0); -} - -forward_val_val_binop!(impl BitOr for BigInt, bitor); -forward_ref_val_binop!(impl BitOr for BigInt, bitor); - -// do not use forward_ref_ref_binop_commutative! for bitor so that we can -// clone as needed, avoiding over-allocation -impl<'a, 'b> BitOr<&'b BigInt> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn bitor(self, other: &BigInt) -> BigInt { - match (self.sign, other.sign) { - (NoSign, _) => other.clone(), - (_, NoSign) => self.clone(), - (Plus, Plus) => BigInt::from(&self.data | &other.data), - (Plus, Minus) => other.clone() | self, - (Minus, Plus) => self.clone() | other, - (Minus, Minus) => { - // forward to val-ref, choosing the smaller to clone - if self.len() <= other.len() { - self.clone() | other - } else { - other.clone() | self - } - } - } - } -} - -impl<'a> BitOr<&'a BigInt> for BigInt { - type Output = BigInt; - - #[inline] - fn bitor(mut self, other: &BigInt) -> BigInt { - self |= other; - self - } -} - -forward_val_assign!(impl BitOrAssign for BigInt, bitor_assign); - -impl<'a> BitOrAssign<&'a BigInt> for BigInt { - fn bitor_assign(&mut self, other: &BigInt) { - match (self.sign, other.sign) { - (_, NoSign) => {} - (NoSign, _) => self.clone_from(other), - (Plus, Plus) => self.data |= &other.data, - (Plus, Minus) => { - bitor_pos_neg(self.digits_mut(), other.digits()); - self.sign = Minus; - self.normalize(); - } - (Minus, Plus) => { - bitor_neg_pos(self.digits_mut(), other.digits()); - self.normalize(); - } - (Minus, Minus) => { - bitor_neg_neg(self.digits_mut(), other.digits()); - self.normalize(); - } - } - } -} - -// + 1 ^ -ff = ...0 01 ^ ...f 01 = ...f 00 = -100 -// +ff ^ - 1 = ...0 ff ^ ...f ff = ...f 00 = -100 -// answer is neg, has length of longest with a possible carry -fn bitxor_pos_neg(a: &mut Vec, b: &[BigDigit]) { - let mut carry_b = 1; - let mut carry_xor = 1; - for (ai, &bi) in a.iter_mut().zip(b.iter()) { - let twos_b = negate_carry(bi, &mut carry_b); - *ai = negate_carry(*ai ^ twos_b, &mut carry_xor); - } - debug_assert!(b.len() > a.len() || carry_b == 0); - match Ord::cmp(&a.len(), &b.len()) { - Greater => { - for ai in a[b.len()..].iter_mut() { - let twos_b = !0; - *ai = negate_carry(*ai ^ twos_b, &mut carry_xor); - } - } - Equal => {} - Less => { - let extra = &b[a.len()..]; - a.extend(extra.iter().map(|&bi| { - let twos_b = negate_carry(bi, &mut carry_b); - negate_carry(twos_b, &mut carry_xor) - })); - debug_assert!(carry_b == 0); - } - } - if carry_xor != 0 { - a.push(1); - } -} - -// - 1 ^ +ff = ...f ff ^ ...0 ff = ...f 00 = -100 -// -ff ^ + 1 = ...f 01 ^ ...0 01 = ...f 00 = -100 -// answer is neg, has length of longest with a possible carry -fn bitxor_neg_pos(a: &mut Vec, b: &[BigDigit]) { - let mut carry_a = 1; - let mut carry_xor = 1; - for (ai, &bi) in a.iter_mut().zip(b.iter()) { - let twos_a = negate_carry(*ai, &mut carry_a); - *ai = negate_carry(twos_a ^ bi, &mut carry_xor); - } - debug_assert!(a.len() > b.len() || carry_a == 0); - match Ord::cmp(&a.len(), &b.len()) { - Greater => { - for ai in a[b.len()..].iter_mut() { - let twos_a = negate_carry(*ai, &mut carry_a); - *ai = negate_carry(twos_a, &mut carry_xor); - } - debug_assert!(carry_a == 0); - } - Equal => {} - Less => { - let extra = &b[a.len()..]; - a.extend(extra.iter().map(|&bi| { - let twos_a = !0; - negate_carry(twos_a ^ bi, &mut carry_xor) - })); - } - } - if carry_xor != 0 { - a.push(1); - } -} - -// - 1 ^ -ff = ...f ff ^ ...f 01 = ...0 fe = +fe -// -ff & - 1 = ...f 01 ^ ...f ff = ...0 fe = +fe -// answer is pos, has length of longest -fn bitxor_neg_neg(a: &mut Vec, b: &[BigDigit]) { - let mut carry_a = 1; - let mut carry_b = 1; - for (ai, &bi) in a.iter_mut().zip(b.iter()) { - let twos_a = negate_carry(*ai, &mut carry_a); - let twos_b = negate_carry(bi, &mut carry_b); - *ai = twos_a ^ twos_b; - } - debug_assert!(a.len() > b.len() || carry_a == 0); - debug_assert!(b.len() > a.len() || carry_b == 0); - match Ord::cmp(&a.len(), &b.len()) { - Greater => { - for ai in a[b.len()..].iter_mut() { - let twos_a = negate_carry(*ai, &mut carry_a); - let twos_b = !0; - *ai = twos_a ^ twos_b; - } - debug_assert!(carry_a == 0); - } - Equal => {} - Less => { - let extra = &b[a.len()..]; - a.extend(extra.iter().map(|&bi| { - let twos_a = !0; - let twos_b = negate_carry(bi, &mut carry_b); - twos_a ^ twos_b - })); - debug_assert!(carry_b == 0); - } - } -} - -forward_all_binop_to_val_ref_commutative!(impl BitXor for BigInt, bitxor); - -impl<'a> BitXor<&'a BigInt> for BigInt { - type Output = BigInt; - - #[inline] - fn bitxor(mut self, other: &BigInt) -> BigInt { - self ^= other; - self - } -} - -forward_val_assign!(impl BitXorAssign for BigInt, bitxor_assign); - -impl<'a> BitXorAssign<&'a BigInt> for BigInt { - fn bitxor_assign(&mut self, other: &BigInt) { - match (self.sign, other.sign) { - (_, NoSign) => {} - (NoSign, _) => self.clone_from(other), - (Plus, Plus) => { - self.data ^= &other.data; - if self.data.is_zero() { - self.sign = NoSign; - } - } - (Plus, Minus) => { - bitxor_pos_neg(self.digits_mut(), other.digits()); - self.sign = Minus; - self.normalize(); - } - (Minus, Plus) => { - bitxor_neg_pos(self.digits_mut(), other.digits()); - self.normalize(); - } - (Minus, Minus) => { - bitxor_neg_neg(self.digits_mut(), other.digits()); - self.sign = Plus; - self.normalize(); - } - } - } -} - -impl FromStr for BigInt { - type Err = ParseBigIntError; - - #[inline] - fn from_str(s: &str) -> Result { - BigInt::from_str_radix(s, 10) - } -} - -impl Num for BigInt { - type FromStrRadixErr = ParseBigIntError; - - /// Creates and initializes a BigInt. - #[inline] - fn from_str_radix(mut s: &str, radix: u32) -> Result { - let sign = if s.starts_with('-') { - let tail = &s[1..]; - if !tail.starts_with('+') { - s = tail - } - Minus - } else { - Plus - }; - let bu = BigUint::from_str_radix(s, radix)?; - Ok(BigInt::from_biguint(sign, bu)) - } -} - -macro_rules! impl_shift { - (@ref $Shx:ident :: $shx:ident, $ShxAssign:ident :: $shx_assign:ident, $rhs:ty) => { - impl<'b> $Shx<&'b $rhs> for BigInt { - type Output = BigInt; - - #[inline] - fn $shx(self, rhs: &'b $rhs) -> BigInt { - $Shx::$shx(self, *rhs) - } - } - impl<'a, 'b> $Shx<&'b $rhs> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn $shx(self, rhs: &'b $rhs) -> BigInt { - $Shx::$shx(self, *rhs) - } - } - impl<'b> $ShxAssign<&'b $rhs> for BigInt { - #[inline] - fn $shx_assign(&mut self, rhs: &'b $rhs) { - $ShxAssign::$shx_assign(self, *rhs); - } - } - }; - ($($rhs:ty),+) => {$( - impl Shl<$rhs> for BigInt { - type Output = BigInt; - - #[inline] - fn shl(self, rhs: $rhs) -> BigInt { - BigInt::from_biguint(self.sign, self.data << rhs) - } - } - impl<'a> Shl<$rhs> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn shl(self, rhs: $rhs) -> BigInt { - BigInt::from_biguint(self.sign, &self.data << rhs) - } - } - impl ShlAssign<$rhs> for BigInt { - #[inline] - fn shl_assign(&mut self, rhs: $rhs) { - self.data <<= rhs - } - } - impl_shift! { @ref Shl::shl, ShlAssign::shl_assign, $rhs } - - impl Shr<$rhs> for BigInt { - type Output = BigInt; - - #[inline] - fn shr(self, rhs: $rhs) -> BigInt { - let round_down = shr_round_down(&self, rhs); - let data = self.data >> rhs; - let data = if round_down { data + 1u8 } else { data }; - BigInt::from_biguint(self.sign, data) - } - } - impl<'a> Shr<$rhs> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn shr(self, rhs: $rhs) -> BigInt { - let round_down = shr_round_down(self, rhs); - let data = &self.data >> rhs; - let data = if round_down { data + 1u8 } else { data }; - BigInt::from_biguint(self.sign, data) - } - } - impl ShrAssign<$rhs> for BigInt { - #[inline] - fn shr_assign(&mut self, rhs: $rhs) { - let round_down = shr_round_down(self, rhs); - self.data >>= rhs; - if round_down { - self.data += 1u8; - } else if self.data.is_zero() { - self.sign = NoSign; - } - } - } - impl_shift! { @ref Shr::shr, ShrAssign::shr_assign, $rhs } - )*}; -} - -impl_shift! { u8, u16, u32, u64, u128, usize } -impl_shift! { i8, i16, i32, i64, i128, isize } - -// Negative values need a rounding adjustment if there are any ones in the -// bits that are getting shifted out. -fn shr_round_down(i: &BigInt, shift: T) -> bool { - if i.is_negative() { - let zeros = i.trailing_zeros().expect("negative values are non-zero"); - shift > T::zero() && shift.to_u64().map(|shift| zeros < shift).unwrap_or(true) - } else { - false - } -} - impl Zero for BigInt { #[inline] fn zero() -> BigInt { @@ -943,1203 +283,49 @@ impl Signed for BigInt { } } -/// Help function for pow -/// -/// Computes the effect of the exponent on the sign. -#[inline] -fn powsign(sign: Sign, other: &T) -> Sign { - if other.is_zero() { - Plus - } else if sign != Minus || other.is_odd() { - sign - } else { - -sign - } -} - -macro_rules! pow_impl { - ($T:ty) => { - impl Pow<$T> for BigInt { - type Output = BigInt; - - #[inline] - fn pow(self, rhs: $T) -> BigInt { - BigInt::from_biguint(powsign(self.sign, &rhs), self.data.pow(rhs)) - } - } - - impl<'b> Pow<&'b $T> for BigInt { - type Output = BigInt; - - #[inline] - fn pow(self, rhs: &$T) -> BigInt { - BigInt::from_biguint(powsign(self.sign, rhs), self.data.pow(rhs)) - } - } - - impl<'a> Pow<$T> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn pow(self, rhs: $T) -> BigInt { - BigInt::from_biguint(powsign(self.sign, &rhs), Pow::pow(&self.data, rhs)) - } - } - - impl<'a, 'b> Pow<&'b $T> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn pow(self, rhs: &$T) -> BigInt { - BigInt::from_biguint(powsign(self.sign, rhs), Pow::pow(&self.data, rhs)) - } - } - }; -} - -pow_impl!(u8); -pow_impl!(u16); -pow_impl!(u32); -pow_impl!(u64); -pow_impl!(usize); -pow_impl!(u128); -pow_impl!(BigUint); - trait UnsignedAbs { type Unsigned; - /// A convenience method for getting the absolute value of a signed primitive as unsigned - /// See also `unsigned_abs`: https://github.com/rust-lang/rust/issues/74913 - fn uabs(self) -> Self::Unsigned; - - fn checked_uabs(self) -> CheckedUnsignedAbs; -} - -enum CheckedUnsignedAbs { - Positive(T), - Negative(T), -} -use self::CheckedUnsignedAbs::{Negative, Positive}; - -macro_rules! impl_unsigned_abs { - ($Signed:ty, $Unsigned:ty) => { - impl UnsignedAbs for $Signed { - type Unsigned = $Unsigned; - - #[inline] - fn uabs(self) -> $Unsigned { - self.wrapping_abs() as $Unsigned - } - - #[inline] - fn checked_uabs(self) -> CheckedUnsignedAbs { - if self >= 0 { - Positive(self as $Unsigned) - } else { - Negative(self.wrapping_neg() as $Unsigned) - } - } - } - }; -} -impl_unsigned_abs!(i8, u8); -impl_unsigned_abs!(i16, u16); -impl_unsigned_abs!(i32, u32); -impl_unsigned_abs!(i64, u64); -impl_unsigned_abs!(i128, u128); -impl_unsigned_abs!(isize, usize); - -// We want to forward to BigUint::add, but it's not clear how that will go until -// we compare both sign and magnitude. So we duplicate this body for every -// val/ref combination, deferring that decision to BigUint's own forwarding. -macro_rules! bigint_add { - ($a:expr, $a_owned:expr, $a_data:expr, $b:expr, $b_owned:expr, $b_data:expr) => { - match ($a.sign, $b.sign) { - (_, NoSign) => $a_owned, - (NoSign, _) => $b_owned, - // same sign => keep the sign with the sum of magnitudes - (Plus, Plus) | (Minus, Minus) => BigInt::from_biguint($a.sign, $a_data + $b_data), - // opposite signs => keep the sign of the larger with the difference of magnitudes - (Plus, Minus) | (Minus, Plus) => match $a.data.cmp(&$b.data) { - Less => BigInt::from_biguint($b.sign, $b_data - $a_data), - Greater => BigInt::from_biguint($a.sign, $a_data - $b_data), - Equal => Zero::zero(), - }, - } - }; -} - -impl<'a, 'b> Add<&'b BigInt> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn add(self, other: &BigInt) -> BigInt { - bigint_add!( - self, - self.clone(), - &self.data, - other, - other.clone(), - &other.data - ) - } -} - -impl<'a> Add for &'a BigInt { - type Output = BigInt; - - #[inline] - fn add(self, other: BigInt) -> BigInt { - bigint_add!(self, self.clone(), &self.data, other, other, other.data) - } -} - -impl<'a> Add<&'a BigInt> for BigInt { - type Output = BigInt; - - #[inline] - fn add(self, other: &BigInt) -> BigInt { - bigint_add!(self, self, self.data, other, other.clone(), &other.data) - } -} - -impl Add for BigInt { - type Output = BigInt; - - #[inline] - fn add(self, other: BigInt) -> BigInt { - bigint_add!(self, self, self.data, other, other, other.data) - } -} - -impl<'a> AddAssign<&'a BigInt> for BigInt { - #[inline] - fn add_assign(&mut self, other: &BigInt) { - let n = mem::replace(self, BigInt::zero()); - *self = n + other; - } -} -forward_val_assign!(impl AddAssign for BigInt, add_assign); - -promote_all_scalars!(impl Add for BigInt, add); -promote_all_scalars_assign!(impl AddAssign for BigInt, add_assign); -forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); -forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); -forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); - -impl Add for BigInt { - type Output = BigInt; - - #[inline] - fn add(self, other: u32) -> BigInt { - match self.sign { - NoSign => From::from(other), - Plus => BigInt::from(self.data + other), - Minus => match self.data.cmp(&From::from(other)) { - Equal => Zero::zero(), - Less => BigInt::from(other - self.data), - Greater => -BigInt::from(self.data - other), - }, - } - } -} - -impl AddAssign for BigInt { - #[inline] - fn add_assign(&mut self, other: u32) { - let n = mem::replace(self, BigInt::zero()); - *self = n + other; - } -} - -impl Add for BigInt { - type Output = BigInt; - - #[inline] - fn add(self, other: u64) -> BigInt { - match self.sign { - NoSign => From::from(other), - Plus => BigInt::from(self.data + other), - Minus => match self.data.cmp(&From::from(other)) { - Equal => Zero::zero(), - Less => BigInt::from(other - self.data), - Greater => -BigInt::from(self.data - other), - }, - } - } -} - -impl AddAssign for BigInt { - #[inline] - fn add_assign(&mut self, other: u64) { - let n = mem::replace(self, BigInt::zero()); - *self = n + other; - } -} - -impl Add for BigInt { - type Output = BigInt; - - #[inline] - fn add(self, other: u128) -> BigInt { - match self.sign { - NoSign => BigInt::from(other), - Plus => BigInt::from(self.data + other), - Minus => match self.data.cmp(&From::from(other)) { - Equal => BigInt::zero(), - Less => BigInt::from(other - self.data), - Greater => -BigInt::from(self.data - other), - }, - } - } -} -impl AddAssign for BigInt { - #[inline] - fn add_assign(&mut self, other: u128) { - let n = mem::replace(self, BigInt::zero()); - *self = n + other; - } -} - -forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); -forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); -forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); - -impl Add for BigInt { - type Output = BigInt; - - #[inline] - fn add(self, other: i32) -> BigInt { - match other.checked_uabs() { - Positive(u) => self + u, - Negative(u) => self - u, - } - } -} -impl AddAssign for BigInt { - #[inline] - fn add_assign(&mut self, other: i32) { - match other.checked_uabs() { - Positive(u) => *self += u, - Negative(u) => *self -= u, - } - } -} - -impl Add for BigInt { - type Output = BigInt; - - #[inline] - fn add(self, other: i64) -> BigInt { - match other.checked_uabs() { - Positive(u) => self + u, - Negative(u) => self - u, - } - } -} -impl AddAssign for BigInt { - #[inline] - fn add_assign(&mut self, other: i64) { - match other.checked_uabs() { - Positive(u) => *self += u, - Negative(u) => *self -= u, - } - } -} - -impl Add for BigInt { - type Output = BigInt; - - #[inline] - fn add(self, other: i128) -> BigInt { - match other.checked_uabs() { - Positive(u) => self + u, - Negative(u) => self - u, - } - } -} -impl AddAssign for BigInt { - #[inline] - fn add_assign(&mut self, other: i128) { - match other.checked_uabs() { - Positive(u) => *self += u, - Negative(u) => *self -= u, - } - } -} - -// We want to forward to BigUint::sub, but it's not clear how that will go until -// we compare both sign and magnitude. So we duplicate this body for every -// val/ref combination, deferring that decision to BigUint's own forwarding. -macro_rules! bigint_sub { - ($a:expr, $a_owned:expr, $a_data:expr, $b:expr, $b_owned:expr, $b_data:expr) => { - match ($a.sign, $b.sign) { - (_, NoSign) => $a_owned, - (NoSign, _) => -$b_owned, - // opposite signs => keep the sign of the left with the sum of magnitudes - (Plus, Minus) | (Minus, Plus) => BigInt::from_biguint($a.sign, $a_data + $b_data), - // same sign => keep or toggle the sign of the left with the difference of magnitudes - (Plus, Plus) | (Minus, Minus) => match $a.data.cmp(&$b.data) { - Less => BigInt::from_biguint(-$a.sign, $b_data - $a_data), - Greater => BigInt::from_biguint($a.sign, $a_data - $b_data), - Equal => Zero::zero(), - }, - } - }; -} - -impl<'a, 'b> Sub<&'b BigInt> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn sub(self, other: &BigInt) -> BigInt { - bigint_sub!( - self, - self.clone(), - &self.data, - other, - other.clone(), - &other.data - ) - } -} - -impl<'a> Sub for &'a BigInt { - type Output = BigInt; - - #[inline] - fn sub(self, other: BigInt) -> BigInt { - bigint_sub!(self, self.clone(), &self.data, other, other, other.data) - } -} - -impl<'a> Sub<&'a BigInt> for BigInt { - type Output = BigInt; - - #[inline] - fn sub(self, other: &BigInt) -> BigInt { - bigint_sub!(self, self, self.data, other, other.clone(), &other.data) - } -} - -impl Sub for BigInt { - type Output = BigInt; - - #[inline] - fn sub(self, other: BigInt) -> BigInt { - bigint_sub!(self, self, self.data, other, other, other.data) - } -} - -impl<'a> SubAssign<&'a BigInt> for BigInt { - #[inline] - fn sub_assign(&mut self, other: &BigInt) { - let n = mem::replace(self, BigInt::zero()); - *self = n - other; - } -} -forward_val_assign!(impl SubAssign for BigInt, sub_assign); - -promote_all_scalars!(impl Sub for BigInt, sub); -promote_all_scalars_assign!(impl SubAssign for BigInt, sub_assign); -forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); -forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); -forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); - -impl Sub for BigInt { - type Output = BigInt; - - #[inline] - fn sub(self, other: u32) -> BigInt { - match self.sign { - NoSign => -BigInt::from(other), - Minus => -BigInt::from(self.data + other), - Plus => match self.data.cmp(&From::from(other)) { - Equal => Zero::zero(), - Greater => BigInt::from(self.data - other), - Less => -BigInt::from(other - self.data), - }, - } - } -} -impl SubAssign for BigInt { - #[inline] - fn sub_assign(&mut self, other: u32) { - let n = mem::replace(self, BigInt::zero()); - *self = n - other; - } -} - -impl Sub for u32 { - type Output = BigInt; - - #[inline] - fn sub(self, other: BigInt) -> BigInt { - -(other - self) - } -} - -impl Sub for u64 { - type Output = BigInt; - - #[inline] - fn sub(self, other: BigInt) -> BigInt { - -(other - self) - } -} - -impl Sub for u128 { - type Output = BigInt; - - #[inline] - fn sub(self, other: BigInt) -> BigInt { - -(other - self) - } -} - -impl Sub for BigInt { - type Output = BigInt; - - #[inline] - fn sub(self, other: u64) -> BigInt { - match self.sign { - NoSign => -BigInt::from(other), - Minus => -BigInt::from(self.data + other), - Plus => match self.data.cmp(&From::from(other)) { - Equal => Zero::zero(), - Greater => BigInt::from(self.data - other), - Less => -BigInt::from(other - self.data), - }, - } - } -} - -impl SubAssign for BigInt { - #[inline] - fn sub_assign(&mut self, other: u64) { - let n = mem::replace(self, BigInt::zero()); - *self = n - other; - } -} - -impl Sub for BigInt { - type Output = BigInt; - - #[inline] - fn sub(self, other: u128) -> BigInt { - match self.sign { - NoSign => -BigInt::from(other), - Minus => -BigInt::from(self.data + other), - Plus => match self.data.cmp(&From::from(other)) { - Equal => Zero::zero(), - Greater => BigInt::from(self.data - other), - Less => -BigInt::from(other - self.data), - }, - } - } -} - -impl SubAssign for BigInt { - #[inline] - fn sub_assign(&mut self, other: u128) { - let n = mem::replace(self, BigInt::zero()); - *self = n - other; - } -} - -forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); -forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); -forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); - -impl Sub for BigInt { - type Output = BigInt; - - #[inline] - fn sub(self, other: i32) -> BigInt { - match other.checked_uabs() { - Positive(u) => self - u, - Negative(u) => self + u, - } - } -} -impl SubAssign for BigInt { - #[inline] - fn sub_assign(&mut self, other: i32) { - match other.checked_uabs() { - Positive(u) => *self -= u, - Negative(u) => *self += u, - } - } -} - -impl Sub for i32 { - type Output = BigInt; - - #[inline] - fn sub(self, other: BigInt) -> BigInt { - match self.checked_uabs() { - Positive(u) => u - other, - Negative(u) => -other - u, - } - } -} - -impl Sub for BigInt { - type Output = BigInt; - - #[inline] - fn sub(self, other: i64) -> BigInt { - match other.checked_uabs() { - Positive(u) => self - u, - Negative(u) => self + u, - } - } -} -impl SubAssign for BigInt { - #[inline] - fn sub_assign(&mut self, other: i64) { - match other.checked_uabs() { - Positive(u) => *self -= u, - Negative(u) => *self += u, - } - } -} - -impl Sub for i64 { - type Output = BigInt; - - #[inline] - fn sub(self, other: BigInt) -> BigInt { - match self.checked_uabs() { - Positive(u) => u - other, - Negative(u) => -other - u, - } - } -} - -impl Sub for BigInt { - type Output = BigInt; - - #[inline] - fn sub(self, other: i128) -> BigInt { - match other.checked_uabs() { - Positive(u) => self - u, - Negative(u) => self + u, - } - } -} - -impl SubAssign for BigInt { - #[inline] - fn sub_assign(&mut self, other: i128) { - match other.checked_uabs() { - Positive(u) => *self -= u, - Negative(u) => *self += u, - } - } -} - -impl Sub for i128 { - type Output = BigInt; - - #[inline] - fn sub(self, other: BigInt) -> BigInt { - match self.checked_uabs() { - Positive(u) => u - other, - Negative(u) => -other - u, - } - } -} - -forward_all_binop_to_ref_ref!(impl Mul for BigInt, mul); - -impl<'a, 'b> Mul<&'b BigInt> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn mul(self, other: &BigInt) -> BigInt { - BigInt::from_biguint(self.sign * other.sign, &self.data * &other.data) - } -} - -impl<'a> MulAssign<&'a BigInt> for BigInt { - #[inline] - fn mul_assign(&mut self, other: &BigInt) { - *self = &*self * other; - } -} -forward_val_assign!(impl MulAssign for BigInt, mul_assign); - -promote_all_scalars!(impl Mul for BigInt, mul); -promote_all_scalars_assign!(impl MulAssign for BigInt, mul_assign); -forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); -forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); -forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); - -impl Mul for BigInt { - type Output = BigInt; - - #[inline] - fn mul(self, other: u32) -> BigInt { - BigInt::from_biguint(self.sign, self.data * other) - } -} - -impl MulAssign for BigInt { - #[inline] - fn mul_assign(&mut self, other: u32) { - self.data *= other; - if self.data.is_zero() { - self.sign = NoSign; - } - } -} - -impl Mul for BigInt { - type Output = BigInt; - - #[inline] - fn mul(self, other: u64) -> BigInt { - BigInt::from_biguint(self.sign, self.data * other) - } -} - -impl MulAssign for BigInt { - #[inline] - fn mul_assign(&mut self, other: u64) { - self.data *= other; - if self.data.is_zero() { - self.sign = NoSign; - } - } -} - -impl Mul for BigInt { - type Output = BigInt; - - #[inline] - fn mul(self, other: u128) -> BigInt { - BigInt::from_biguint(self.sign, self.data * other) - } -} - -impl MulAssign for BigInt { - #[inline] - fn mul_assign(&mut self, other: u128) { - self.data *= other; - if self.data.is_zero() { - self.sign = NoSign; - } - } -} - -forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); -forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); -forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); - -impl Mul for BigInt { - type Output = BigInt; - - #[inline] - fn mul(self, other: i32) -> BigInt { - match other.checked_uabs() { - Positive(u) => self * u, - Negative(u) => -self * u, - } - } -} - -impl MulAssign for BigInt { - #[inline] - fn mul_assign(&mut self, other: i32) { - match other.checked_uabs() { - Positive(u) => *self *= u, - Negative(u) => { - self.sign = -self.sign; - self.data *= u; - } - } - } -} - -impl Mul for BigInt { - type Output = BigInt; - - #[inline] - fn mul(self, other: i64) -> BigInt { - match other.checked_uabs() { - Positive(u) => self * u, - Negative(u) => -self * u, - } - } -} - -impl MulAssign for BigInt { - #[inline] - fn mul_assign(&mut self, other: i64) { - match other.checked_uabs() { - Positive(u) => *self *= u, - Negative(u) => { - self.sign = -self.sign; - self.data *= u; - } - } - } -} - -impl Mul for BigInt { - type Output = BigInt; - - #[inline] - fn mul(self, other: i128) -> BigInt { - match other.checked_uabs() { - Positive(u) => self * u, - Negative(u) => -self * u, - } - } -} - -impl MulAssign for BigInt { - #[inline] - fn mul_assign(&mut self, other: i128) { - match other.checked_uabs() { - Positive(u) => *self *= u, - Negative(u) => { - self.sign = -self.sign; - self.data *= u; - } - } - } -} - -forward_all_binop_to_ref_ref!(impl Div for BigInt, div); - -impl<'a, 'b> Div<&'b BigInt> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn div(self, other: &BigInt) -> BigInt { - let (q, _) = self.div_rem(other); - q - } -} - -impl<'a> DivAssign<&'a BigInt> for BigInt { - #[inline] - fn div_assign(&mut self, other: &BigInt) { - *self = &*self / other; - } -} -forward_val_assign!(impl DivAssign for BigInt, div_assign); - -promote_all_scalars!(impl Div for BigInt, div); -promote_all_scalars_assign!(impl DivAssign for BigInt, div_assign); -forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); -forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); -forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); - -impl Div for BigInt { - type Output = BigInt; - - #[inline] - fn div(self, other: u32) -> BigInt { - BigInt::from_biguint(self.sign, self.data / other) - } -} - -impl DivAssign for BigInt { - #[inline] - fn div_assign(&mut self, other: u32) { - self.data /= other; - if self.data.is_zero() { - self.sign = NoSign; - } - } -} - -impl Div for u32 { - type Output = BigInt; - - #[inline] - fn div(self, other: BigInt) -> BigInt { - BigInt::from_biguint(other.sign, self / other.data) - } -} - -impl Div for BigInt { - type Output = BigInt; - - #[inline] - fn div(self, other: u64) -> BigInt { - BigInt::from_biguint(self.sign, self.data / other) - } -} - -impl DivAssign for BigInt { - #[inline] - fn div_assign(&mut self, other: u64) { - self.data /= other; - if self.data.is_zero() { - self.sign = NoSign; - } - } -} - -impl Div for u64 { - type Output = BigInt; - - #[inline] - fn div(self, other: BigInt) -> BigInt { - BigInt::from_biguint(other.sign, self / other.data) - } -} - -impl Div for BigInt { - type Output = BigInt; - - #[inline] - fn div(self, other: u128) -> BigInt { - BigInt::from_biguint(self.sign, self.data / other) - } -} - -impl DivAssign for BigInt { - #[inline] - fn div_assign(&mut self, other: u128) { - self.data /= other; - if self.data.is_zero() { - self.sign = NoSign; - } - } -} - -impl Div for u128 { - type Output = BigInt; - - #[inline] - fn div(self, other: BigInt) -> BigInt { - BigInt::from_biguint(other.sign, self / other.data) - } -} - -forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); -forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); -forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); - -impl Div for BigInt { - type Output = BigInt; - - #[inline] - fn div(self, other: i32) -> BigInt { - match other.checked_uabs() { - Positive(u) => self / u, - Negative(u) => -self / u, - } - } -} - -impl DivAssign for BigInt { - #[inline] - fn div_assign(&mut self, other: i32) { - match other.checked_uabs() { - Positive(u) => *self /= u, - Negative(u) => { - self.sign = -self.sign; - *self /= u; - } - } - } -} - -impl Div for i32 { - type Output = BigInt; - - #[inline] - fn div(self, other: BigInt) -> BigInt { - match self.checked_uabs() { - Positive(u) => u / other, - Negative(u) => u / -other, - } - } -} - -impl Div for BigInt { - type Output = BigInt; - - #[inline] - fn div(self, other: i64) -> BigInt { - match other.checked_uabs() { - Positive(u) => self / u, - Negative(u) => -self / u, - } - } -} - -impl DivAssign for BigInt { - #[inline] - fn div_assign(&mut self, other: i64) { - match other.checked_uabs() { - Positive(u) => *self /= u, - Negative(u) => { - self.sign = -self.sign; - *self /= u; - } - } - } -} - -impl Div for i64 { - type Output = BigInt; - - #[inline] - fn div(self, other: BigInt) -> BigInt { - match self.checked_uabs() { - Positive(u) => u / other, - Negative(u) => u / -other, - } - } -} - -impl Div for BigInt { - type Output = BigInt; - - #[inline] - fn div(self, other: i128) -> BigInt { - match other.checked_uabs() { - Positive(u) => self / u, - Negative(u) => -self / u, - } - } -} - -impl DivAssign for BigInt { - #[inline] - fn div_assign(&mut self, other: i128) { - match other.checked_uabs() { - Positive(u) => *self /= u, - Negative(u) => { - self.sign = -self.sign; - *self /= u; - } - } - } -} - -impl Div for i128 { - type Output = BigInt; - - #[inline] - fn div(self, other: BigInt) -> BigInt { - match self.checked_uabs() { - Positive(u) => u / other, - Negative(u) => u / -other, - } - } -} - -forward_all_binop_to_ref_ref!(impl Rem for BigInt, rem); - -impl<'a, 'b> Rem<&'b BigInt> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn rem(self, other: &BigInt) -> BigInt { - if let Some(other) = other.to_u32() { - self % other - } else if let Some(other) = other.to_i32() { - self % other - } else { - let (_, r) = self.div_rem(other); - r - } - } -} - -impl<'a> RemAssign<&'a BigInt> for BigInt { - #[inline] - fn rem_assign(&mut self, other: &BigInt) { - *self = &*self % other; - } -} -forward_val_assign!(impl RemAssign for BigInt, rem_assign); - -promote_all_scalars!(impl Rem for BigInt, rem); -promote_all_scalars_assign!(impl RemAssign for BigInt, rem_assign); -forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); -forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); -forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); - -impl Rem for BigInt { - type Output = BigInt; - - #[inline] - fn rem(self, other: u32) -> BigInt { - BigInt::from_biguint(self.sign, self.data % other) - } -} - -impl RemAssign for BigInt { - #[inline] - fn rem_assign(&mut self, other: u32) { - self.data %= other; - if self.data.is_zero() { - self.sign = NoSign; - } - } -} - -impl Rem for u32 { - type Output = BigInt; - - #[inline] - fn rem(self, other: BigInt) -> BigInt { - BigInt::from(self % other.data) - } -} - -impl Rem for BigInt { - type Output = BigInt; - - #[inline] - fn rem(self, other: u64) -> BigInt { - BigInt::from_biguint(self.sign, self.data % other) - } -} - -impl RemAssign for BigInt { - #[inline] - fn rem_assign(&mut self, other: u64) { - self.data %= other; - if self.data.is_zero() { - self.sign = NoSign; - } - } -} - -impl Rem for u64 { - type Output = BigInt; - - #[inline] - fn rem(self, other: BigInt) -> BigInt { - BigInt::from(self % other.data) - } -} - -impl Rem for BigInt { - type Output = BigInt; - - #[inline] - fn rem(self, other: u128) -> BigInt { - BigInt::from_biguint(self.sign, self.data % other) - } -} - -impl RemAssign for BigInt { - #[inline] - fn rem_assign(&mut self, other: u128) { - self.data %= other; - if self.data.is_zero() { - self.sign = NoSign; - } - } -} - -impl Rem for u128 { - type Output = BigInt; - - #[inline] - fn rem(self, other: BigInt) -> BigInt { - BigInt::from(self % other.data) - } -} - -forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); -forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); -forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); - -impl Rem for BigInt { - type Output = BigInt; - - #[inline] - fn rem(self, other: i32) -> BigInt { - self % other.uabs() - } -} - -impl RemAssign for BigInt { - #[inline] - fn rem_assign(&mut self, other: i32) { - *self %= other.uabs(); - } -} - -impl Rem for i32 { - type Output = BigInt; - - #[inline] - fn rem(self, other: BigInt) -> BigInt { - match self.checked_uabs() { - Positive(u) => u % other, - Negative(u) => -(u % other), - } - } -} - -impl Rem for BigInt { - type Output = BigInt; - - #[inline] - fn rem(self, other: i64) -> BigInt { - self % other.uabs() - } -} - -impl RemAssign for BigInt { - #[inline] - fn rem_assign(&mut self, other: i64) { - *self %= other.uabs(); - } -} - -impl Rem for i64 { - type Output = BigInt; - - #[inline] - fn rem(self, other: BigInt) -> BigInt { - match self.checked_uabs() { - Positive(u) => u % other, - Negative(u) => -(u % other), - } - } -} - -impl Rem for BigInt { - type Output = BigInt; + /// A convenience method for getting the absolute value of a signed primitive as unsigned + /// See also `unsigned_abs`: https://github.com/rust-lang/rust/issues/74913 + fn uabs(self) -> Self::Unsigned; - #[inline] - fn rem(self, other: i128) -> BigInt { - self % other.uabs() - } + fn checked_uabs(self) -> CheckedUnsignedAbs; } -impl RemAssign for BigInt { - #[inline] - fn rem_assign(&mut self, other: i128) { - *self %= other.uabs(); - } +enum CheckedUnsignedAbs { + Positive(T), + Negative(T), } +use self::CheckedUnsignedAbs::{Negative, Positive}; -impl Rem for i128 { - type Output = BigInt; +macro_rules! impl_unsigned_abs { + ($Signed:ty, $Unsigned:ty) => { + impl UnsignedAbs for $Signed { + type Unsigned = $Unsigned; - #[inline] - fn rem(self, other: BigInt) -> BigInt { - match self.checked_uabs() { - Positive(u) => u % other, - Negative(u) => -(u % other), + #[inline] + fn uabs(self) -> $Unsigned { + self.wrapping_abs() as $Unsigned + } + + #[inline] + fn checked_uabs(self) -> CheckedUnsignedAbs { + if self >= 0 { + Positive(self as $Unsigned) + } else { + Negative(self.wrapping_neg() as $Unsigned) + } + } } - } + }; } +impl_unsigned_abs!(i8, u8); +impl_unsigned_abs!(i16, u16); +impl_unsigned_abs!(i32, u32); +impl_unsigned_abs!(i64, u64); +impl_unsigned_abs!(i128, u128); +impl_unsigned_abs!(isize, usize); impl Neg for BigInt { type Output = BigInt; @@ -2160,37 +346,6 @@ impl<'a> Neg for &'a BigInt { } } -impl CheckedAdd for BigInt { - #[inline] - fn checked_add(&self, v: &BigInt) -> Option { - Some(self.add(v)) - } -} - -impl CheckedSub for BigInt { - #[inline] - fn checked_sub(&self, v: &BigInt) -> Option { - Some(self.sub(v)) - } -} - -impl CheckedMul for BigInt { - #[inline] - fn checked_mul(&self, v: &BigInt) -> Option { - Some(self.mul(v)) - } -} - -impl CheckedDiv for BigInt { - #[inline] - fn checked_div(&self, v: &BigInt) -> Option { - if v.is_zero() { - return None; - } - Some(self.div(v)) - } -} - impl Integer for BigInt { #[inline] fn div_rem(&self, other: &BigInt) -> (BigInt, BigInt) { @@ -2372,246 +527,6 @@ impl Roots for BigInt { } } -impl ToPrimitive for BigInt { - #[inline] - fn to_i64(&self) -> Option { - match self.sign { - Plus => self.data.to_i64(), - NoSign => Some(0), - Minus => { - let n = self.data.to_u64()?; - let m: u64 = 1 << 63; - match n.cmp(&m) { - Less => Some(-(n as i64)), - Equal => Some(i64::MIN), - Greater => None, - } - } - } - } - - #[inline] - fn to_i128(&self) -> Option { - match self.sign { - Plus => self.data.to_i128(), - NoSign => Some(0), - Minus => { - let n = self.data.to_u128()?; - let m: u128 = 1 << 127; - match n.cmp(&m) { - Less => Some(-(n as i128)), - Equal => Some(i128::MIN), - Greater => None, - } - } - } - } - - #[inline] - fn to_u64(&self) -> Option { - match self.sign { - Plus => self.data.to_u64(), - NoSign => Some(0), - Minus => None, - } - } - - #[inline] - fn to_u128(&self) -> Option { - match self.sign { - Plus => self.data.to_u128(), - NoSign => Some(0), - Minus => None, - } - } - - #[inline] - fn to_f32(&self) -> Option { - let n = self.data.to_f32()?; - Some(if self.sign == Minus { -n } else { n }) - } - - #[inline] - fn to_f64(&self) -> Option { - let n = self.data.to_f64()?; - Some(if self.sign == Minus { -n } else { n }) - } -} - -macro_rules! impl_try_from_bigint { - ($T:ty, $to_ty:path) => { - #[cfg(has_try_from)] - impl TryFrom<&BigInt> for $T { - type Error = TryFromBigIntError<()>; - - #[inline] - fn try_from(value: &BigInt) -> Result<$T, TryFromBigIntError<()>> { - $to_ty(value).ok_or(TryFromBigIntError::new(())) - } - } - - #[cfg(has_try_from)] - impl TryFrom for $T { - type Error = TryFromBigIntError; - - #[inline] - fn try_from(value: BigInt) -> Result<$T, TryFromBigIntError> { - <$T>::try_from(&value).map_err(|_| TryFromBigIntError::new(value)) - } - } - }; -} - -impl_try_from_bigint!(u8, ToPrimitive::to_u8); -impl_try_from_bigint!(u16, ToPrimitive::to_u16); -impl_try_from_bigint!(u32, ToPrimitive::to_u32); -impl_try_from_bigint!(u64, ToPrimitive::to_u64); -impl_try_from_bigint!(usize, ToPrimitive::to_usize); -impl_try_from_bigint!(u128, ToPrimitive::to_u128); - -impl_try_from_bigint!(i8, ToPrimitive::to_i8); -impl_try_from_bigint!(i16, ToPrimitive::to_i16); -impl_try_from_bigint!(i32, ToPrimitive::to_i32); -impl_try_from_bigint!(i64, ToPrimitive::to_i64); -impl_try_from_bigint!(isize, ToPrimitive::to_isize); -impl_try_from_bigint!(i128, ToPrimitive::to_i128); - -impl FromPrimitive for BigInt { - #[inline] - fn from_i64(n: i64) -> Option { - Some(BigInt::from(n)) - } - - #[inline] - fn from_i128(n: i128) -> Option { - Some(BigInt::from(n)) - } - - #[inline] - fn from_u64(n: u64) -> Option { - Some(BigInt::from(n)) - } - - #[inline] - fn from_u128(n: u128) -> Option { - Some(BigInt::from(n)) - } - - #[inline] - fn from_f64(n: f64) -> Option { - if n >= 0.0 { - BigUint::from_f64(n).map(BigInt::from) - } else { - let x = BigUint::from_f64(-n)?; - Some(-BigInt::from(x)) - } - } -} - -impl From for BigInt { - #[inline] - fn from(n: i64) -> Self { - if n >= 0 { - BigInt::from(n as u64) - } else { - let u = u64::MAX - (n as u64) + 1; - BigInt { - sign: Minus, - data: BigUint::from(u), - } - } - } -} - -impl From for BigInt { - #[inline] - fn from(n: i128) -> Self { - if n >= 0 { - BigInt::from(n as u128) - } else { - let u = u128::MAX - (n as u128) + 1; - BigInt { - sign: Minus, - data: BigUint::from(u), - } - } - } -} - -macro_rules! impl_bigint_from_int { - ($T:ty) => { - impl From<$T> for BigInt { - #[inline] - fn from(n: $T) -> Self { - BigInt::from(n as i64) - } - } - }; -} - -impl_bigint_from_int!(i8); -impl_bigint_from_int!(i16); -impl_bigint_from_int!(i32); -impl_bigint_from_int!(isize); - -impl From for BigInt { - #[inline] - fn from(n: u64) -> Self { - if n > 0 { - BigInt { - sign: Plus, - data: BigUint::from(n), - } - } else { - BigInt::zero() - } - } -} - -impl From for BigInt { - #[inline] - fn from(n: u128) -> Self { - if n > 0 { - BigInt { - sign: Plus, - data: BigUint::from(n), - } - } else { - BigInt::zero() - } - } -} - -macro_rules! impl_bigint_from_uint { - ($T:ty) => { - impl From<$T> for BigInt { - #[inline] - fn from(n: $T) -> Self { - BigInt::from(n as u64) - } - } - }; -} - -impl_bigint_from_uint!(u8); -impl_bigint_from_uint!(u16); -impl_bigint_from_uint!(u32); -impl_bigint_from_uint!(usize); - -impl From for BigInt { - #[inline] - fn from(n: BigUint) -> Self { - if n.is_zero() { - BigInt::zero() - } else { - BigInt { - sign: Plus, - data: n, - } - } - } -} - impl IntDigits for BigInt { #[inline] fn digits(&self) -> &[BigDigit] { @@ -2638,29 +553,6 @@ impl IntDigits for BigInt { } } -#[cfg(feature = "serde")] -impl serde::Serialize for BigInt { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - // Note: do not change the serialization format, or it may break - // forward and backward compatibility of serialized data! - (self.sign, &self.data).serialize(serializer) - } -} - -#[cfg(feature = "serde")] -impl<'de> serde::Deserialize<'de> for BigInt { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let (sign, data) = serde::Deserialize::deserialize(deserializer)?; - Ok(BigInt::from_biguint(sign, data)) - } -} - /// A generic trait for converting a value to a `BigInt`. This may return /// `None` when converting from `f32` or `f64`, and will always succeed /// when converting from any integer or unsigned primitive, or `BigUint`. @@ -2669,92 +561,6 @@ pub trait ToBigInt { fn to_bigint(&self) -> Option; } -impl ToBigInt for BigInt { - #[inline] - fn to_bigint(&self) -> Option { - Some(self.clone()) - } -} - -impl ToBigInt for BigUint { - #[inline] - fn to_bigint(&self) -> Option { - if self.is_zero() { - Some(Zero::zero()) - } else { - Some(BigInt { - sign: Plus, - data: self.clone(), - }) - } - } -} - -impl biguint::ToBigUint for BigInt { - #[inline] - fn to_biguint(&self) -> Option { - match self.sign() { - Plus => Some(self.data.clone()), - NoSign => Some(Zero::zero()), - Minus => None, - } - } -} - -#[cfg(has_try_from)] -impl TryFrom<&BigInt> for BigUint { - type Error = TryFromBigIntError<()>; - - #[inline] - fn try_from(value: &BigInt) -> Result> { - value - .to_biguint() - .ok_or_else(|| TryFromBigIntError::new(())) - } -} - -#[cfg(has_try_from)] -impl TryFrom for BigUint { - type Error = TryFromBigIntError; - - #[inline] - fn try_from(value: BigInt) -> Result> { - if value.sign() == Sign::Minus { - Err(TryFromBigIntError::new(value)) - } else { - Ok(value.data) - } - } -} - -macro_rules! impl_to_bigint { - ($T:ty, $from_ty:path) => { - impl ToBigInt for $T { - #[inline] - fn to_bigint(&self) -> Option { - $from_ty(*self) - } - } - }; -} - -impl_to_bigint!(isize, FromPrimitive::from_isize); -impl_to_bigint!(i8, FromPrimitive::from_i8); -impl_to_bigint!(i16, FromPrimitive::from_i16); -impl_to_bigint!(i32, FromPrimitive::from_i32); -impl_to_bigint!(i64, FromPrimitive::from_i64); -impl_to_bigint!(i128, FromPrimitive::from_i128); - -impl_to_bigint!(usize, FromPrimitive::from_usize); -impl_to_bigint!(u8, FromPrimitive::from_u8); -impl_to_bigint!(u16, FromPrimitive::from_u16); -impl_to_bigint!(u32, FromPrimitive::from_u32); -impl_to_bigint!(u64, FromPrimitive::from_u64); -impl_to_bigint!(u128, FromPrimitive::from_u128); - -impl_to_bigint!(f32, FromPrimitive::from_f32); -impl_to_bigint!(f64, FromPrimitive::from_f64); - impl BigInt { /// Creates and initializes a BigInt. /// @@ -2836,20 +642,7 @@ impl BigInt { /// The digits are in big-endian base 28. #[inline] pub fn from_signed_bytes_be(digits: &[u8]) -> BigInt { - let sign = match digits.first() { - Some(v) if *v > 0x7f => Sign::Minus, - Some(_) => Sign::Plus, - None => return BigInt::zero(), - }; - - if sign == Sign::Minus { - // two's-complement the content to retrieve the magnitude - let mut digits = Vec::from(digits); - twos_complement_be(&mut digits); - BigInt::from_biguint(sign, BigUint::from_bytes_be(&*digits)) - } else { - BigInt::from_biguint(sign, BigUint::from_bytes_be(digits)) - } + convert::from_signed_bytes_be(digits) } /// Creates and initializes a `BigInt` from an array of bytes in two's complement. @@ -2857,20 +650,7 @@ impl BigInt { /// The digits are in little-endian base 28. #[inline] pub fn from_signed_bytes_le(digits: &[u8]) -> BigInt { - let sign = match digits.last() { - Some(v) if *v > 0x7f => Sign::Minus, - Some(_) => Sign::Plus, - None => return BigInt::zero(), - }; - - if sign == Sign::Minus { - // two's-complement the content to retrieve the magnitude - let mut digits = Vec::from(digits); - twos_complement_le(&mut digits); - BigInt::from_biguint(sign, BigUint::from_bytes_le(&*digits)) - } else { - BigInt::from_biguint(sign, BigUint::from_bytes_le(digits)) - } + convert::from_signed_bytes_le(digits) } /// Creates and initializes a `BigInt`. @@ -3052,20 +832,7 @@ impl BigInt { /// ``` #[inline] pub fn to_signed_bytes_be(&self) -> Vec { - let mut bytes = self.data.to_bytes_be(); - let first_byte = bytes.first().cloned().unwrap_or(0); - if first_byte > 0x7f - && !(first_byte == 0x80 - && bytes.iter().skip(1).all(Zero::is_zero) - && self.sign == Sign::Minus) - { - // msb used by magnitude, extend by 1 byte - bytes.insert(0, 0); - } - if self.sign == Sign::Minus { - twos_complement_be(&mut bytes); - } - bytes + convert::to_signed_bytes_be(self) } /// Returns the two's-complement byte representation of the `BigInt` in little-endian byte order. @@ -3080,20 +847,7 @@ impl BigInt { /// ``` #[inline] pub fn to_signed_bytes_le(&self) -> Vec { - let mut bytes = self.data.to_bytes_le(); - let last_byte = bytes.last().cloned().unwrap_or(0); - if last_byte > 0x7f - && !(last_byte == 0x80 - && bytes.iter().rev().skip(1).all(Zero::is_zero) - && self.sign == Sign::Minus) - { - // msb used by magnitude, extend by 1 byte - bytes.push(0); - } - if self.sign == Sign::Minus { - twos_complement_le(&mut bytes); - } - bytes + convert::to_signed_bytes_le(self) } /// Returns the integer formatted as a string in the given radix. @@ -3228,17 +982,17 @@ impl BigInt { #[inline] pub fn checked_add(&self, v: &BigInt) -> Option { - Some(self.add(v)) + Some(self + v) } #[inline] pub fn checked_sub(&self, v: &BigInt) -> Option { - Some(self.sub(v)) + Some(self - v) } #[inline] pub fn checked_mul(&self, v: &BigInt) -> Option { - Some(self.mul(v)) + Some(self * v) } #[inline] @@ -3246,7 +1000,7 @@ impl BigInt { if v.is_zero() { return None; } - Some(self.div(v)) + Some(self / v) } /// Returns `self ^ exponent`. @@ -3263,31 +1017,7 @@ impl BigInt { /// /// Panics if the exponent is negative or the modulus is zero. pub fn modpow(&self, exponent: &Self, modulus: &Self) -> Self { - assert!( - !exponent.is_negative(), - "negative exponentiation is not supported!" - ); - assert!( - !modulus.is_zero(), - "attempt to calculate with zero modulus!" - ); - - let result = self.data.modpow(&exponent.data, &modulus.data); - if result.is_zero() { - return BigInt::zero(); - } - - // The sign of the result follows the modulus, like `mod_floor`. - let (sign, mag) = match ( - self.is_negative() && exponent.is_odd(), - modulus.is_negative(), - ) { - (false, false) => (Plus, result), - (true, false) => (Plus, &modulus.data - result), - (false, true) => (Minus, &modulus.data - result), - (true, true) => (Minus, result), - }; - BigInt::from_biguint(sign, mag) + power::modpow(self, exponent, modulus) } /// Returns the truncated principal square root of `self` -- @@ -3323,14 +1053,14 @@ impl BigInt { // Then the two's complement is // ... 1 !x 1 0 ... 0 // where !x is obtained from x by flipping each bit - if bit >= u64::from(big_digit::BITS) * self.len() as u64 { + if bit >= u64::from(crate::big_digit::BITS) * self.len() as u64 { true } else { let trailing_zeros = self.data.trailing_zeros().unwrap(); match Ord::cmp(&bit, &trailing_zeros) { - Less => false, - Equal => true, - Greater => !self.data.bit(bit), + Ordering::Less => false, + Ordering::Equal => true, + Ordering::Greater => !self.data.bit(bit), } } } else { @@ -3347,6 +1077,7 @@ impl BigInt { pub fn set_bit(&mut self, bit: u64, value: bool) { match self.sign { Sign::Plus => self.data.set_bit(bit, value), + Sign::Minus => bits::set_negative_bit(self, bit, value), Sign::NoSign => { if value { self.data.set_bit(bit, true); @@ -3355,130 +1086,19 @@ impl BigInt { // Clearing a bit for zero is a no-op } } - Sign::Minus => { - let bits_per_digit = u64::from(big_digit::BITS); - if bit >= bits_per_digit * self.len() as u64 { - if !value { - self.data.set_bit(bit, true); - } - } else { - // If the Uint number is - // ... 0 x 1 0 ... 0 - // then the two's complement is - // ... 1 !x 1 0 ... 0 - // |-- bit at position 'trailing_zeros' - // where !x is obtained from x by flipping each bit - let trailing_zeros = self.data.trailing_zeros().unwrap(); - if bit > trailing_zeros { - self.data.set_bit(bit, !value); - } else if bit == trailing_zeros && !value { - // Clearing the bit at position `trailing_zeros` is dealt with by doing - // similarly to what `bitand_neg_pos` does, except we start at digit - // `bit_index`. All digits below `bit_index` are guaranteed to be zero, - // so initially we have `carry_in` = `carry_out` = 1. Furthermore, we - // stop traversing the digits when there are no more carries. - let bit_index = (bit / bits_per_digit).to_usize().unwrap(); - let bit_mask = (1 as BigDigit) << (bit % bits_per_digit); - let mut digit_iter = self.digits_mut().iter_mut().skip(bit_index); - let mut carry_in = 1; - let mut carry_out = 1; - - let digit = digit_iter.next().unwrap(); - let twos_in = negate_carry(*digit, &mut carry_in); - let twos_out = twos_in & !bit_mask; - *digit = negate_carry(twos_out, &mut carry_out); - - for digit in digit_iter { - if carry_in == 0 && carry_out == 0 { - // Exit the loop since no more digits can change - break; - } - let twos = negate_carry(*digit, &mut carry_in); - *digit = negate_carry(twos, &mut carry_out); - } - - if carry_out != 0 { - // All digits have been traversed and there is a carry - debug_assert_eq!(carry_in, 0); - self.digits_mut().push(1); - } - } else if bit < trailing_zeros && value { - // Flip each bit from position 'bit' to 'trailing_zeros', both inclusive - // ... 1 !x 1 0 ... 0 ... 0 - // |-- bit at position 'bit' - // |-- bit at position 'trailing_zeros' - // bit_mask: 1 1 ... 1 0 .. 0 - // This is done by xor'ing with the bit_mask - let index_lo = (bit / bits_per_digit).to_usize().unwrap(); - let index_hi = (trailing_zeros / bits_per_digit).to_usize().unwrap(); - let bit_mask_lo = big_digit::MAX << (bit % bits_per_digit); - let bit_mask_hi = big_digit::MAX - >> (bits_per_digit - 1 - (trailing_zeros % bits_per_digit)); - let digits = self.digits_mut(); - - if index_lo == index_hi { - digits[index_lo] ^= bit_mask_lo & bit_mask_hi; - } else { - digits[index_lo] = bit_mask_lo; - for index in (index_lo + 1)..index_hi { - digits[index] = big_digit::MAX; - } - digits[index_hi] ^= bit_mask_hi; - } - } else { - // We end up here in two cases: - // bit == trailing_zeros && value: Bit is already set - // bit < trailing_zeros && !value: Bit is already cleared - } - } - } } // The top bit may have been cleared, so normalize self.normalize(); } } -impl_sum_iter_type!(BigInt); -impl_product_iter_type!(BigInt); - -/// Perform in-place two's complement of the given binary representation, -/// in little-endian byte order. -#[inline] -fn twos_complement_le(digits: &mut [u8]) { - twos_complement(digits) -} - -/// Perform in-place two's complement of the given binary representation -/// in big-endian byte order. -#[inline] -fn twos_complement_be(digits: &mut [u8]) { - twos_complement(digits.iter_mut().rev()) -} - -/// Perform in-place two's complement of the given digit iterator -/// starting from the least significant byte. -#[inline] -fn twos_complement<'a, I>(digits: I) -where - I: IntoIterator, -{ - let mut carry = true; - for d in digits { - *d = d.not(); - if carry { - *d = d.wrapping_add(1); - carry = d.is_zero(); - } - } -} - #[test] fn test_from_biguint() { fn check(inp_s: Sign, inp_n: usize, ans_s: Sign, ans_n: usize) { - let inp = BigInt::from_biguint(inp_s, FromPrimitive::from_usize(inp_n).unwrap()); + let inp = BigInt::from_biguint(inp_s, BigUint::from(inp_n)); let ans = BigInt { sign: ans_s, - data: FromPrimitive::from_usize(ans_n).unwrap(), + data: BigUint::from(ans_n), }; assert_eq!(inp, ans); } @@ -3494,7 +1114,7 @@ fn test_from_slice() { let inp = BigInt::from_slice(inp_s, &[inp_n]); let ans = BigInt { sign: ans_s, - data: FromPrimitive::from_u32(ans_n).unwrap(), + data: BigUint::from(ans_n), }; assert_eq!(inp, ans); } @@ -3511,7 +1131,7 @@ fn test_assign_from_slice() { inp.assign_from_slice(inp_s, &[inp_n]); let ans = BigInt { sign: ans_s, - data: FromPrimitive::from_u32(ans_n).unwrap(), + data: BigUint::from(ans_n), }; assert_eq!(inp, ans); } diff --git a/src/bigint/addition.rs b/src/bigint/addition.rs new file mode 100644 index 00000000..b999f625 --- /dev/null +++ b/src/bigint/addition.rs @@ -0,0 +1,239 @@ +use super::CheckedUnsignedAbs::{Negative, Positive}; +use super::Sign::{Minus, NoSign, Plus}; +use super::{BigInt, UnsignedAbs}; + +use crate::{IsizePromotion, UsizePromotion}; + +use core::cmp::Ordering::{Equal, Greater, Less}; +use core::iter::Sum; +use core::mem; +use core::ops::{Add, AddAssign}; +use num_traits::{CheckedAdd, Zero}; + +// We want to forward to BigUint::add, but it's not clear how that will go until +// we compare both sign and magnitude. So we duplicate this body for every +// val/ref combination, deferring that decision to BigUint's own forwarding. +macro_rules! bigint_add { + ($a:expr, $a_owned:expr, $a_data:expr, $b:expr, $b_owned:expr, $b_data:expr) => { + match ($a.sign, $b.sign) { + (_, NoSign) => $a_owned, + (NoSign, _) => $b_owned, + // same sign => keep the sign with the sum of magnitudes + (Plus, Plus) | (Minus, Minus) => BigInt::from_biguint($a.sign, $a_data + $b_data), + // opposite signs => keep the sign of the larger with the difference of magnitudes + (Plus, Minus) | (Minus, Plus) => match $a.data.cmp(&$b.data) { + Less => BigInt::from_biguint($b.sign, $b_data - $a_data), + Greater => BigInt::from_biguint($a.sign, $a_data - $b_data), + Equal => Zero::zero(), + }, + } + }; +} + +impl<'a, 'b> Add<&'b BigInt> for &'a BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: &BigInt) -> BigInt { + bigint_add!( + self, + self.clone(), + &self.data, + other, + other.clone(), + &other.data + ) + } +} + +impl<'a> Add for &'a BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: BigInt) -> BigInt { + bigint_add!(self, self.clone(), &self.data, other, other, other.data) + } +} + +impl<'a> Add<&'a BigInt> for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: &BigInt) -> BigInt { + bigint_add!(self, self, self.data, other, other.clone(), &other.data) + } +} + +impl Add for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: BigInt) -> BigInt { + bigint_add!(self, self, self.data, other, other, other.data) + } +} + +impl<'a> AddAssign<&'a BigInt> for BigInt { + #[inline] + fn add_assign(&mut self, other: &BigInt) { + let n = mem::replace(self, BigInt::zero()); + *self = n + other; + } +} +forward_val_assign!(impl AddAssign for BigInt, add_assign); + +promote_all_scalars!(impl Add for BigInt, add); +promote_all_scalars_assign!(impl AddAssign for BigInt, add_assign); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); + +impl Add for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: u32) -> BigInt { + match self.sign { + NoSign => From::from(other), + Plus => BigInt::from(self.data + other), + Minus => match self.data.cmp(&From::from(other)) { + Equal => Zero::zero(), + Less => BigInt::from(other - self.data), + Greater => -BigInt::from(self.data - other), + }, + } + } +} + +impl AddAssign for BigInt { + #[inline] + fn add_assign(&mut self, other: u32) { + let n = mem::replace(self, BigInt::zero()); + *self = n + other; + } +} + +impl Add for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: u64) -> BigInt { + match self.sign { + NoSign => From::from(other), + Plus => BigInt::from(self.data + other), + Minus => match self.data.cmp(&From::from(other)) { + Equal => Zero::zero(), + Less => BigInt::from(other - self.data), + Greater => -BigInt::from(self.data - other), + }, + } + } +} + +impl AddAssign for BigInt { + #[inline] + fn add_assign(&mut self, other: u64) { + let n = mem::replace(self, BigInt::zero()); + *self = n + other; + } +} + +impl Add for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: u128) -> BigInt { + match self.sign { + NoSign => BigInt::from(other), + Plus => BigInt::from(self.data + other), + Minus => match self.data.cmp(&From::from(other)) { + Equal => BigInt::zero(), + Less => BigInt::from(other - self.data), + Greater => -BigInt::from(self.data - other), + }, + } + } +} +impl AddAssign for BigInt { + #[inline] + fn add_assign(&mut self, other: u128) { + let n = mem::replace(self, BigInt::zero()); + *self = n + other; + } +} + +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); + +impl Add for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: i32) -> BigInt { + match other.checked_uabs() { + Positive(u) => self + u, + Negative(u) => self - u, + } + } +} +impl AddAssign for BigInt { + #[inline] + fn add_assign(&mut self, other: i32) { + match other.checked_uabs() { + Positive(u) => *self += u, + Negative(u) => *self -= u, + } + } +} + +impl Add for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: i64) -> BigInt { + match other.checked_uabs() { + Positive(u) => self + u, + Negative(u) => self - u, + } + } +} +impl AddAssign for BigInt { + #[inline] + fn add_assign(&mut self, other: i64) { + match other.checked_uabs() { + Positive(u) => *self += u, + Negative(u) => *self -= u, + } + } +} + +impl Add for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: i128) -> BigInt { + match other.checked_uabs() { + Positive(u) => self + u, + Negative(u) => self - u, + } + } +} +impl AddAssign for BigInt { + #[inline] + fn add_assign(&mut self, other: i128) { + match other.checked_uabs() { + Positive(u) => *self += u, + Negative(u) => *self -= u, + } + } +} + +impl CheckedAdd for BigInt { + #[inline] + fn checked_add(&self, v: &BigInt) -> Option { + Some(self.add(v)) + } +} + +impl_sum_iter_type!(BigInt); diff --git a/src/bigint/arbitrary.rs b/src/bigint/arbitrary.rs new file mode 100644 index 00000000..08e05f9e --- /dev/null +++ b/src/bigint/arbitrary.rs @@ -0,0 +1,34 @@ +use super::{BigInt, Sign}; + +use crate::std_alloc::Box; +use crate::BigUint; + +#[cfg(feature = "quickcheck")] +impl quickcheck::Arbitrary for BigInt { + fn arbitrary(g: &mut G) -> Self { + let positive = bool::arbitrary(g); + let sign = if positive { Sign::Plus } else { Sign::Minus }; + Self::from_biguint(sign, BigUint::arbitrary(g)) + } + + fn shrink(&self) -> Box> { + let sign = self.sign(); + let unsigned_shrink = self.data.shrink(); + Box::new(unsigned_shrink.map(move |x| BigInt::from_biguint(sign, x))) + } +} + +#[cfg(feature = "arbitrary")] +impl arbitrary::Arbitrary for BigInt { + fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { + let positive = bool::arbitrary(u)?; + let sign = if positive { Sign::Plus } else { Sign::Minus }; + Ok(Self::from_biguint(sign, BigUint::arbitrary(u)?)) + } + + fn shrink(&self) -> Box> { + let sign = self.sign(); + let unsigned_shrink = self.data.shrink(); + Box::new(unsigned_shrink.map(move |x| BigInt::from_biguint(sign, x))) + } +} diff --git a/src/bigint/bits.rs b/src/bigint/bits.rs new file mode 100644 index 00000000..c66830e5 --- /dev/null +++ b/src/bigint/bits.rs @@ -0,0 +1,531 @@ +use super::BigInt; +use super::Sign::{Minus, NoSign, Plus}; + +use crate::big_digit::{self, BigDigit, DoubleBigDigit}; +use crate::biguint::IntDigits; +use crate::std_alloc::Vec; + +use core::cmp::Ordering::{Equal, Greater, Less}; +use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign}; +use num_traits::{ToPrimitive, Zero}; + +// Negation in two's complement. +// acc must be initialized as 1 for least-significant digit. +// +// When negating, a carry (acc == 1) means that all the digits +// considered to this point were zero. This means that if all the +// digits of a negative BigInt have been considered, carry must be +// zero as we cannot have negative zero. +// +// 01 -> ...f ff +// ff -> ...f 01 +// 01 00 -> ...f ff 00 +// 01 01 -> ...f fe ff +// 01 ff -> ...f fe 01 +// ff 00 -> ...f 01 00 +// ff 01 -> ...f 00 ff +// ff ff -> ...f 00 01 +#[inline] +fn negate_carry(a: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit { + *acc += DoubleBigDigit::from(!a); + let lo = *acc as BigDigit; + *acc >>= big_digit::BITS; + lo +} + +// + 1 & -ff = ...0 01 & ...f 01 = ...0 01 = + 1 +// +ff & - 1 = ...0 ff & ...f ff = ...0 ff = +ff +// answer is pos, has length of a +fn bitand_pos_neg(a: &mut Vec, b: &[BigDigit]) { + let mut carry_b = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_b = negate_carry(bi, &mut carry_b); + *ai &= twos_b; + } + debug_assert!(b.len() > a.len() || carry_b == 0); +} + +// - 1 & +ff = ...f ff & ...0 ff = ...0 ff = +ff +// -ff & + 1 = ...f 01 & ...0 01 = ...0 01 = + 1 +// answer is pos, has length of b +fn bitand_neg_pos(a: &mut Vec, b: &[BigDigit]) { + let mut carry_a = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_a = negate_carry(*ai, &mut carry_a); + *ai = twos_a & bi; + } + debug_assert!(a.len() > b.len() || carry_a == 0); + match Ord::cmp(&a.len(), &b.len()) { + Greater => a.truncate(b.len()), + Equal => {} + Less => { + let extra = &b[a.len()..]; + a.extend(extra.iter().cloned()); + } + } +} + +// - 1 & -ff = ...f ff & ...f 01 = ...f 01 = - ff +// -ff & - 1 = ...f 01 & ...f ff = ...f 01 = - ff +// -ff & -fe = ...f 01 & ...f 02 = ...f 00 = -100 +// answer is neg, has length of longest with a possible carry +fn bitand_neg_neg(a: &mut Vec, b: &[BigDigit]) { + let mut carry_a = 1; + let mut carry_b = 1; + let mut carry_and = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_a = negate_carry(*ai, &mut carry_a); + let twos_b = negate_carry(bi, &mut carry_b); + *ai = negate_carry(twos_a & twos_b, &mut carry_and); + } + debug_assert!(a.len() > b.len() || carry_a == 0); + debug_assert!(b.len() > a.len() || carry_b == 0); + match Ord::cmp(&a.len(), &b.len()) { + Greater => { + for ai in a[b.len()..].iter_mut() { + let twos_a = negate_carry(*ai, &mut carry_a); + *ai = negate_carry(twos_a, &mut carry_and); + } + debug_assert!(carry_a == 0); + } + Equal => {} + Less => { + let extra = &b[a.len()..]; + a.extend(extra.iter().map(|&bi| { + let twos_b = negate_carry(bi, &mut carry_b); + negate_carry(twos_b, &mut carry_and) + })); + debug_assert!(carry_b == 0); + } + } + if carry_and != 0 { + a.push(1); + } +} + +forward_val_val_binop!(impl BitAnd for BigInt, bitand); +forward_ref_val_binop!(impl BitAnd for BigInt, bitand); + +// do not use forward_ref_ref_binop_commutative! for bitand so that we can +// clone as needed, avoiding over-allocation +impl<'a, 'b> BitAnd<&'b BigInt> for &'a BigInt { + type Output = BigInt; + + #[inline] + fn bitand(self, other: &BigInt) -> BigInt { + match (self.sign, other.sign) { + (NoSign, _) | (_, NoSign) => BigInt::zero(), + (Plus, Plus) => BigInt::from(&self.data & &other.data), + (Plus, Minus) => self.clone() & other, + (Minus, Plus) => other.clone() & self, + (Minus, Minus) => { + // forward to val-ref, choosing the larger to clone + if self.len() >= other.len() { + self.clone() & other + } else { + other.clone() & self + } + } + } + } +} + +impl<'a> BitAnd<&'a BigInt> for BigInt { + type Output = BigInt; + + #[inline] + fn bitand(mut self, other: &BigInt) -> BigInt { + self &= other; + self + } +} + +forward_val_assign!(impl BitAndAssign for BigInt, bitand_assign); + +impl<'a> BitAndAssign<&'a BigInt> for BigInt { + fn bitand_assign(&mut self, other: &BigInt) { + match (self.sign, other.sign) { + (NoSign, _) => {} + (_, NoSign) => self.set_zero(), + (Plus, Plus) => { + self.data &= &other.data; + if self.data.is_zero() { + self.sign = NoSign; + } + } + (Plus, Minus) => { + bitand_pos_neg(self.digits_mut(), other.digits()); + self.normalize(); + } + (Minus, Plus) => { + bitand_neg_pos(self.digits_mut(), other.digits()); + self.sign = Plus; + self.normalize(); + } + (Minus, Minus) => { + bitand_neg_neg(self.digits_mut(), other.digits()); + self.normalize(); + } + } + } +} + +// + 1 | -ff = ...0 01 | ...f 01 = ...f 01 = -ff +// +ff | - 1 = ...0 ff | ...f ff = ...f ff = - 1 +// answer is neg, has length of b +fn bitor_pos_neg(a: &mut Vec, b: &[BigDigit]) { + let mut carry_b = 1; + let mut carry_or = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_b = negate_carry(bi, &mut carry_b); + *ai = negate_carry(*ai | twos_b, &mut carry_or); + } + debug_assert!(b.len() > a.len() || carry_b == 0); + match Ord::cmp(&a.len(), &b.len()) { + Greater => { + a.truncate(b.len()); + } + Equal => {} + Less => { + let extra = &b[a.len()..]; + a.extend(extra.iter().map(|&bi| { + let twos_b = negate_carry(bi, &mut carry_b); + negate_carry(twos_b, &mut carry_or) + })); + debug_assert!(carry_b == 0); + } + } + // for carry_or to be non-zero, we would need twos_b == 0 + debug_assert!(carry_or == 0); +} + +// - 1 | +ff = ...f ff | ...0 ff = ...f ff = - 1 +// -ff | + 1 = ...f 01 | ...0 01 = ...f 01 = -ff +// answer is neg, has length of a +fn bitor_neg_pos(a: &mut Vec, b: &[BigDigit]) { + let mut carry_a = 1; + let mut carry_or = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_a = negate_carry(*ai, &mut carry_a); + *ai = negate_carry(twos_a | bi, &mut carry_or); + } + debug_assert!(a.len() > b.len() || carry_a == 0); + if a.len() > b.len() { + for ai in a[b.len()..].iter_mut() { + let twos_a = negate_carry(*ai, &mut carry_a); + *ai = negate_carry(twos_a, &mut carry_or); + } + debug_assert!(carry_a == 0); + } + // for carry_or to be non-zero, we would need twos_a == 0 + debug_assert!(carry_or == 0); +} + +// - 1 | -ff = ...f ff | ...f 01 = ...f ff = -1 +// -ff | - 1 = ...f 01 | ...f ff = ...f ff = -1 +// answer is neg, has length of shortest +fn bitor_neg_neg(a: &mut Vec, b: &[BigDigit]) { + let mut carry_a = 1; + let mut carry_b = 1; + let mut carry_or = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_a = negate_carry(*ai, &mut carry_a); + let twos_b = negate_carry(bi, &mut carry_b); + *ai = negate_carry(twos_a | twos_b, &mut carry_or); + } + debug_assert!(a.len() > b.len() || carry_a == 0); + debug_assert!(b.len() > a.len() || carry_b == 0); + if a.len() > b.len() { + a.truncate(b.len()); + } + // for carry_or to be non-zero, we would need twos_a == 0 or twos_b == 0 + debug_assert!(carry_or == 0); +} + +forward_val_val_binop!(impl BitOr for BigInt, bitor); +forward_ref_val_binop!(impl BitOr for BigInt, bitor); + +// do not use forward_ref_ref_binop_commutative! for bitor so that we can +// clone as needed, avoiding over-allocation +impl<'a, 'b> BitOr<&'b BigInt> for &'a BigInt { + type Output = BigInt; + + #[inline] + fn bitor(self, other: &BigInt) -> BigInt { + match (self.sign, other.sign) { + (NoSign, _) => other.clone(), + (_, NoSign) => self.clone(), + (Plus, Plus) => BigInt::from(&self.data | &other.data), + (Plus, Minus) => other.clone() | self, + (Minus, Plus) => self.clone() | other, + (Minus, Minus) => { + // forward to val-ref, choosing the smaller to clone + if self.len() <= other.len() { + self.clone() | other + } else { + other.clone() | self + } + } + } + } +} + +impl<'a> BitOr<&'a BigInt> for BigInt { + type Output = BigInt; + + #[inline] + fn bitor(mut self, other: &BigInt) -> BigInt { + self |= other; + self + } +} + +forward_val_assign!(impl BitOrAssign for BigInt, bitor_assign); + +impl<'a> BitOrAssign<&'a BigInt> for BigInt { + fn bitor_assign(&mut self, other: &BigInt) { + match (self.sign, other.sign) { + (_, NoSign) => {} + (NoSign, _) => self.clone_from(other), + (Plus, Plus) => self.data |= &other.data, + (Plus, Minus) => { + bitor_pos_neg(self.digits_mut(), other.digits()); + self.sign = Minus; + self.normalize(); + } + (Minus, Plus) => { + bitor_neg_pos(self.digits_mut(), other.digits()); + self.normalize(); + } + (Minus, Minus) => { + bitor_neg_neg(self.digits_mut(), other.digits()); + self.normalize(); + } + } + } +} + +// + 1 ^ -ff = ...0 01 ^ ...f 01 = ...f 00 = -100 +// +ff ^ - 1 = ...0 ff ^ ...f ff = ...f 00 = -100 +// answer is neg, has length of longest with a possible carry +fn bitxor_pos_neg(a: &mut Vec, b: &[BigDigit]) { + let mut carry_b = 1; + let mut carry_xor = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_b = negate_carry(bi, &mut carry_b); + *ai = negate_carry(*ai ^ twos_b, &mut carry_xor); + } + debug_assert!(b.len() > a.len() || carry_b == 0); + match Ord::cmp(&a.len(), &b.len()) { + Greater => { + for ai in a[b.len()..].iter_mut() { + let twos_b = !0; + *ai = negate_carry(*ai ^ twos_b, &mut carry_xor); + } + } + Equal => {} + Less => { + let extra = &b[a.len()..]; + a.extend(extra.iter().map(|&bi| { + let twos_b = negate_carry(bi, &mut carry_b); + negate_carry(twos_b, &mut carry_xor) + })); + debug_assert!(carry_b == 0); + } + } + if carry_xor != 0 { + a.push(1); + } +} + +// - 1 ^ +ff = ...f ff ^ ...0 ff = ...f 00 = -100 +// -ff ^ + 1 = ...f 01 ^ ...0 01 = ...f 00 = -100 +// answer is neg, has length of longest with a possible carry +fn bitxor_neg_pos(a: &mut Vec, b: &[BigDigit]) { + let mut carry_a = 1; + let mut carry_xor = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_a = negate_carry(*ai, &mut carry_a); + *ai = negate_carry(twos_a ^ bi, &mut carry_xor); + } + debug_assert!(a.len() > b.len() || carry_a == 0); + match Ord::cmp(&a.len(), &b.len()) { + Greater => { + for ai in a[b.len()..].iter_mut() { + let twos_a = negate_carry(*ai, &mut carry_a); + *ai = negate_carry(twos_a, &mut carry_xor); + } + debug_assert!(carry_a == 0); + } + Equal => {} + Less => { + let extra = &b[a.len()..]; + a.extend(extra.iter().map(|&bi| { + let twos_a = !0; + negate_carry(twos_a ^ bi, &mut carry_xor) + })); + } + } + if carry_xor != 0 { + a.push(1); + } +} + +// - 1 ^ -ff = ...f ff ^ ...f 01 = ...0 fe = +fe +// -ff & - 1 = ...f 01 ^ ...f ff = ...0 fe = +fe +// answer is pos, has length of longest +fn bitxor_neg_neg(a: &mut Vec, b: &[BigDigit]) { + let mut carry_a = 1; + let mut carry_b = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_a = negate_carry(*ai, &mut carry_a); + let twos_b = negate_carry(bi, &mut carry_b); + *ai = twos_a ^ twos_b; + } + debug_assert!(a.len() > b.len() || carry_a == 0); + debug_assert!(b.len() > a.len() || carry_b == 0); + match Ord::cmp(&a.len(), &b.len()) { + Greater => { + for ai in a[b.len()..].iter_mut() { + let twos_a = negate_carry(*ai, &mut carry_a); + let twos_b = !0; + *ai = twos_a ^ twos_b; + } + debug_assert!(carry_a == 0); + } + Equal => {} + Less => { + let extra = &b[a.len()..]; + a.extend(extra.iter().map(|&bi| { + let twos_a = !0; + let twos_b = negate_carry(bi, &mut carry_b); + twos_a ^ twos_b + })); + debug_assert!(carry_b == 0); + } + } +} + +forward_all_binop_to_val_ref_commutative!(impl BitXor for BigInt, bitxor); + +impl<'a> BitXor<&'a BigInt> for BigInt { + type Output = BigInt; + + #[inline] + fn bitxor(mut self, other: &BigInt) -> BigInt { + self ^= other; + self + } +} + +forward_val_assign!(impl BitXorAssign for BigInt, bitxor_assign); + +impl<'a> BitXorAssign<&'a BigInt> for BigInt { + fn bitxor_assign(&mut self, other: &BigInt) { + match (self.sign, other.sign) { + (_, NoSign) => {} + (NoSign, _) => self.clone_from(other), + (Plus, Plus) => { + self.data ^= &other.data; + if self.data.is_zero() { + self.sign = NoSign; + } + } + (Plus, Minus) => { + bitxor_pos_neg(self.digits_mut(), other.digits()); + self.sign = Minus; + self.normalize(); + } + (Minus, Plus) => { + bitxor_neg_pos(self.digits_mut(), other.digits()); + self.normalize(); + } + (Minus, Minus) => { + bitxor_neg_neg(self.digits_mut(), other.digits()); + self.sign = Plus; + self.normalize(); + } + } + } +} + +pub(super) fn set_negative_bit(x: &mut BigInt, bit: u64, value: bool) { + debug_assert_eq!(x.sign, Minus); + let data = &mut x.data; + + let bits_per_digit = u64::from(big_digit::BITS); + if bit >= bits_per_digit * data.len() as u64 { + if !value { + data.set_bit(bit, true); + } + } else { + // If the Uint number is + // ... 0 x 1 0 ... 0 + // then the two's complement is + // ... 1 !x 1 0 ... 0 + // |-- bit at position 'trailing_zeros' + // where !x is obtained from x by flipping each bit + let trailing_zeros = data.trailing_zeros().unwrap(); + if bit > trailing_zeros { + data.set_bit(bit, !value); + } else if bit == trailing_zeros && !value { + // Clearing the bit at position `trailing_zeros` is dealt with by doing + // similarly to what `bitand_neg_pos` does, except we start at digit + // `bit_index`. All digits below `bit_index` are guaranteed to be zero, + // so initially we have `carry_in` = `carry_out` = 1. Furthermore, we + // stop traversing the digits when there are no more carries. + let bit_index = (bit / bits_per_digit).to_usize().unwrap(); + let bit_mask = (1 as BigDigit) << (bit % bits_per_digit); + let mut digit_iter = data.digits_mut().iter_mut().skip(bit_index); + let mut carry_in = 1; + let mut carry_out = 1; + + let digit = digit_iter.next().unwrap(); + let twos_in = negate_carry(*digit, &mut carry_in); + let twos_out = twos_in & !bit_mask; + *digit = negate_carry(twos_out, &mut carry_out); + + for digit in digit_iter { + if carry_in == 0 && carry_out == 0 { + // Exit the loop since no more digits can change + break; + } + let twos = negate_carry(*digit, &mut carry_in); + *digit = negate_carry(twos, &mut carry_out); + } + + if carry_out != 0 { + // All digits have been traversed and there is a carry + debug_assert_eq!(carry_in, 0); + data.digits_mut().push(1); + } + } else if bit < trailing_zeros && value { + // Flip each bit from position 'bit' to 'trailing_zeros', both inclusive + // ... 1 !x 1 0 ... 0 ... 0 + // |-- bit at position 'bit' + // |-- bit at position 'trailing_zeros' + // bit_mask: 1 1 ... 1 0 .. 0 + // This is done by xor'ing with the bit_mask + let index_lo = (bit / bits_per_digit).to_usize().unwrap(); + let index_hi = (trailing_zeros / bits_per_digit).to_usize().unwrap(); + let bit_mask_lo = big_digit::MAX << (bit % bits_per_digit); + let bit_mask_hi = + big_digit::MAX >> (bits_per_digit - 1 - (trailing_zeros % bits_per_digit)); + let digits = data.digits_mut(); + + if index_lo == index_hi { + digits[index_lo] ^= bit_mask_lo & bit_mask_hi; + } else { + digits[index_lo] = bit_mask_lo; + for index in (index_lo + 1)..index_hi { + digits[index] = big_digit::MAX; + } + digits[index_hi] ^= bit_mask_hi; + } + } else { + // We end up here in two cases: + // bit == trailing_zeros && value: Bit is already set + // bit < trailing_zeros && !value: Bit is already cleared + } + } +} diff --git a/src/bigint/convert.rs b/src/bigint/convert.rs new file mode 100644 index 00000000..ff8e04ef --- /dev/null +++ b/src/bigint/convert.rs @@ -0,0 +1,469 @@ +use super::Sign::{self, Minus, NoSign, Plus}; +use super::{BigInt, ToBigInt}; + +use crate::std_alloc::Vec; +#[cfg(has_try_from)] +use crate::TryFromBigIntError; +use crate::{BigUint, ParseBigIntError, ToBigUint}; + +use core::cmp::Ordering::{Equal, Greater, Less}; +#[cfg(has_try_from)] +use core::convert::TryFrom; +use core::str::{self, FromStr}; +use num_traits::{FromPrimitive, Num, ToPrimitive, Zero}; + +impl FromStr for BigInt { + type Err = ParseBigIntError; + + #[inline] + fn from_str(s: &str) -> Result { + BigInt::from_str_radix(s, 10) + } +} + +impl Num for BigInt { + type FromStrRadixErr = ParseBigIntError; + + /// Creates and initializes a BigInt. + #[inline] + fn from_str_radix(mut s: &str, radix: u32) -> Result { + let sign = if s.starts_with('-') { + let tail = &s[1..]; + if !tail.starts_with('+') { + s = tail + } + Minus + } else { + Plus + }; + let bu = BigUint::from_str_radix(s, radix)?; + Ok(BigInt::from_biguint(sign, bu)) + } +} + +impl ToPrimitive for BigInt { + #[inline] + fn to_i64(&self) -> Option { + match self.sign { + Plus => self.data.to_i64(), + NoSign => Some(0), + Minus => { + let n = self.data.to_u64()?; + let m: u64 = 1 << 63; + match n.cmp(&m) { + Less => Some(-(n as i64)), + Equal => Some(core::i64::MIN), + Greater => None, + } + } + } + } + + #[inline] + fn to_i128(&self) -> Option { + match self.sign { + Plus => self.data.to_i128(), + NoSign => Some(0), + Minus => { + let n = self.data.to_u128()?; + let m: u128 = 1 << 127; + match n.cmp(&m) { + Less => Some(-(n as i128)), + Equal => Some(core::i128::MIN), + Greater => None, + } + } + } + } + + #[inline] + fn to_u64(&self) -> Option { + match self.sign { + Plus => self.data.to_u64(), + NoSign => Some(0), + Minus => None, + } + } + + #[inline] + fn to_u128(&self) -> Option { + match self.sign { + Plus => self.data.to_u128(), + NoSign => Some(0), + Minus => None, + } + } + + #[inline] + fn to_f32(&self) -> Option { + let n = self.data.to_f32()?; + Some(if self.sign == Minus { -n } else { n }) + } + + #[inline] + fn to_f64(&self) -> Option { + let n = self.data.to_f64()?; + Some(if self.sign == Minus { -n } else { n }) + } +} + +macro_rules! impl_try_from_bigint { + ($T:ty, $to_ty:path) => { + #[cfg(has_try_from)] + impl TryFrom<&BigInt> for $T { + type Error = TryFromBigIntError<()>; + + #[inline] + fn try_from(value: &BigInt) -> Result<$T, TryFromBigIntError<()>> { + $to_ty(value).ok_or(TryFromBigIntError::new(())) + } + } + + #[cfg(has_try_from)] + impl TryFrom for $T { + type Error = TryFromBigIntError; + + #[inline] + fn try_from(value: BigInt) -> Result<$T, TryFromBigIntError> { + <$T>::try_from(&value).map_err(|_| TryFromBigIntError::new(value)) + } + } + }; +} + +impl_try_from_bigint!(u8, ToPrimitive::to_u8); +impl_try_from_bigint!(u16, ToPrimitive::to_u16); +impl_try_from_bigint!(u32, ToPrimitive::to_u32); +impl_try_from_bigint!(u64, ToPrimitive::to_u64); +impl_try_from_bigint!(usize, ToPrimitive::to_usize); +impl_try_from_bigint!(u128, ToPrimitive::to_u128); + +impl_try_from_bigint!(i8, ToPrimitive::to_i8); +impl_try_from_bigint!(i16, ToPrimitive::to_i16); +impl_try_from_bigint!(i32, ToPrimitive::to_i32); +impl_try_from_bigint!(i64, ToPrimitive::to_i64); +impl_try_from_bigint!(isize, ToPrimitive::to_isize); +impl_try_from_bigint!(i128, ToPrimitive::to_i128); + +impl FromPrimitive for BigInt { + #[inline] + fn from_i64(n: i64) -> Option { + Some(BigInt::from(n)) + } + + #[inline] + fn from_i128(n: i128) -> Option { + Some(BigInt::from(n)) + } + + #[inline] + fn from_u64(n: u64) -> Option { + Some(BigInt::from(n)) + } + + #[inline] + fn from_u128(n: u128) -> Option { + Some(BigInt::from(n)) + } + + #[inline] + fn from_f64(n: f64) -> Option { + if n >= 0.0 { + BigUint::from_f64(n).map(BigInt::from) + } else { + let x = BigUint::from_f64(-n)?; + Some(-BigInt::from(x)) + } + } +} + +impl From for BigInt { + #[inline] + fn from(n: i64) -> Self { + if n >= 0 { + BigInt::from(n as u64) + } else { + let u = core::u64::MAX - (n as u64) + 1; + BigInt { + sign: Minus, + data: BigUint::from(u), + } + } + } +} + +impl From for BigInt { + #[inline] + fn from(n: i128) -> Self { + if n >= 0 { + BigInt::from(n as u128) + } else { + let u = core::u128::MAX - (n as u128) + 1; + BigInt { + sign: Minus, + data: BigUint::from(u), + } + } + } +} + +macro_rules! impl_bigint_from_int { + ($T:ty) => { + impl From<$T> for BigInt { + #[inline] + fn from(n: $T) -> Self { + BigInt::from(n as i64) + } + } + }; +} + +impl_bigint_from_int!(i8); +impl_bigint_from_int!(i16); +impl_bigint_from_int!(i32); +impl_bigint_from_int!(isize); + +impl From for BigInt { + #[inline] + fn from(n: u64) -> Self { + if n > 0 { + BigInt { + sign: Plus, + data: BigUint::from(n), + } + } else { + BigInt::zero() + } + } +} + +impl From for BigInt { + #[inline] + fn from(n: u128) -> Self { + if n > 0 { + BigInt { + sign: Plus, + data: BigUint::from(n), + } + } else { + BigInt::zero() + } + } +} + +macro_rules! impl_bigint_from_uint { + ($T:ty) => { + impl From<$T> for BigInt { + #[inline] + fn from(n: $T) -> Self { + BigInt::from(n as u64) + } + } + }; +} + +impl_bigint_from_uint!(u8); +impl_bigint_from_uint!(u16); +impl_bigint_from_uint!(u32); +impl_bigint_from_uint!(usize); + +impl From for BigInt { + #[inline] + fn from(n: BigUint) -> Self { + if n.is_zero() { + BigInt::zero() + } else { + BigInt { + sign: Plus, + data: n, + } + } + } +} + +impl ToBigInt for BigInt { + #[inline] + fn to_bigint(&self) -> Option { + Some(self.clone()) + } +} + +impl ToBigInt for BigUint { + #[inline] + fn to_bigint(&self) -> Option { + if self.is_zero() { + Some(Zero::zero()) + } else { + Some(BigInt { + sign: Plus, + data: self.clone(), + }) + } + } +} + +impl ToBigUint for BigInt { + #[inline] + fn to_biguint(&self) -> Option { + match self.sign() { + Plus => Some(self.data.clone()), + NoSign => Some(Zero::zero()), + Minus => None, + } + } +} + +#[cfg(has_try_from)] +impl TryFrom<&BigInt> for BigUint { + type Error = TryFromBigIntError<()>; + + #[inline] + fn try_from(value: &BigInt) -> Result> { + value + .to_biguint() + .ok_or_else(|| TryFromBigIntError::new(())) + } +} + +#[cfg(has_try_from)] +impl TryFrom for BigUint { + type Error = TryFromBigIntError; + + #[inline] + fn try_from(value: BigInt) -> Result> { + if value.sign() == Sign::Minus { + Err(TryFromBigIntError::new(value)) + } else { + Ok(value.data) + } + } +} + +macro_rules! impl_to_bigint { + ($T:ty, $from_ty:path) => { + impl ToBigInt for $T { + #[inline] + fn to_bigint(&self) -> Option { + $from_ty(*self) + } + } + }; +} + +impl_to_bigint!(isize, FromPrimitive::from_isize); +impl_to_bigint!(i8, FromPrimitive::from_i8); +impl_to_bigint!(i16, FromPrimitive::from_i16); +impl_to_bigint!(i32, FromPrimitive::from_i32); +impl_to_bigint!(i64, FromPrimitive::from_i64); +impl_to_bigint!(i128, FromPrimitive::from_i128); + +impl_to_bigint!(usize, FromPrimitive::from_usize); +impl_to_bigint!(u8, FromPrimitive::from_u8); +impl_to_bigint!(u16, FromPrimitive::from_u16); +impl_to_bigint!(u32, FromPrimitive::from_u32); +impl_to_bigint!(u64, FromPrimitive::from_u64); +impl_to_bigint!(u128, FromPrimitive::from_u128); + +impl_to_bigint!(f32, FromPrimitive::from_f32); +impl_to_bigint!(f64, FromPrimitive::from_f64); + +#[inline] +pub(super) fn from_signed_bytes_be(digits: &[u8]) -> BigInt { + let sign = match digits.first() { + Some(v) if *v > 0x7f => Sign::Minus, + Some(_) => Sign::Plus, + None => return BigInt::zero(), + }; + + if sign == Sign::Minus { + // two's-complement the content to retrieve the magnitude + let mut digits = Vec::from(digits); + twos_complement_be(&mut digits); + BigInt::from_biguint(sign, BigUint::from_bytes_be(&*digits)) + } else { + BigInt::from_biguint(sign, BigUint::from_bytes_be(digits)) + } +} + +#[inline] +pub(super) fn from_signed_bytes_le(digits: &[u8]) -> BigInt { + let sign = match digits.last() { + Some(v) if *v > 0x7f => Sign::Minus, + Some(_) => Sign::Plus, + None => return BigInt::zero(), + }; + + if sign == Sign::Minus { + // two's-complement the content to retrieve the magnitude + let mut digits = Vec::from(digits); + twos_complement_le(&mut digits); + BigInt::from_biguint(sign, BigUint::from_bytes_le(&*digits)) + } else { + BigInt::from_biguint(sign, BigUint::from_bytes_le(digits)) + } +} + +#[inline] +pub(super) fn to_signed_bytes_be(x: &BigInt) -> Vec { + let mut bytes = x.data.to_bytes_be(); + let first_byte = bytes.first().cloned().unwrap_or(0); + if first_byte > 0x7f + && !(first_byte == 0x80 && bytes.iter().skip(1).all(Zero::is_zero) && x.sign == Sign::Minus) + { + // msb used by magnitude, extend by 1 byte + bytes.insert(0, 0); + } + if x.sign == Sign::Minus { + twos_complement_be(&mut bytes); + } + bytes +} + +#[inline] +pub(super) fn to_signed_bytes_le(x: &BigInt) -> Vec { + let mut bytes = x.data.to_bytes_le(); + let last_byte = bytes.last().cloned().unwrap_or(0); + if last_byte > 0x7f + && !(last_byte == 0x80 + && bytes.iter().rev().skip(1).all(Zero::is_zero) + && x.sign == Sign::Minus) + { + // msb used by magnitude, extend by 1 byte + bytes.push(0); + } + if x.sign == Sign::Minus { + twos_complement_le(&mut bytes); + } + bytes +} + +/// Perform in-place two's complement of the given binary representation, +/// in little-endian byte order. +#[inline] +fn twos_complement_le(digits: &mut [u8]) { + twos_complement(digits) +} + +/// Perform in-place two's complement of the given binary representation +/// in big-endian byte order. +#[inline] +fn twos_complement_be(digits: &mut [u8]) { + twos_complement(digits.iter_mut().rev()) +} + +/// Perform in-place two's complement of the given digit iterator +/// starting from the least significant byte. +#[inline] +fn twos_complement<'a, I>(digits: I) +where + I: IntoIterator, +{ + let mut carry = true; + for d in digits { + *d = !*d; + if carry { + *d = d.wrapping_add(1); + carry = d.is_zero(); + } + } +} diff --git a/src/bigint/division.rs b/src/bigint/division.rs new file mode 100644 index 00000000..a702b8f2 --- /dev/null +++ b/src/bigint/division.rs @@ -0,0 +1,448 @@ +use super::CheckedUnsignedAbs::{Negative, Positive}; +use super::Sign::NoSign; +use super::{BigInt, UnsignedAbs}; + +use crate::{IsizePromotion, UsizePromotion}; + +use core::ops::{Div, DivAssign, Rem, RemAssign}; +use num_integer::Integer; +use num_traits::{CheckedDiv, ToPrimitive, Zero}; + +forward_all_binop_to_ref_ref!(impl Div for BigInt, div); + +impl<'a, 'b> Div<&'b BigInt> for &'a BigInt { + type Output = BigInt; + + #[inline] + fn div(self, other: &BigInt) -> BigInt { + let (q, _) = self.div_rem(other); + q + } +} + +impl<'a> DivAssign<&'a BigInt> for BigInt { + #[inline] + fn div_assign(&mut self, other: &BigInt) { + *self = &*self / other; + } +} +forward_val_assign!(impl DivAssign for BigInt, div_assign); + +promote_all_scalars!(impl Div for BigInt, div); +promote_all_scalars_assign!(impl DivAssign for BigInt, div_assign); +forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); +forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); +forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); + +impl Div for BigInt { + type Output = BigInt; + + #[inline] + fn div(self, other: u32) -> BigInt { + BigInt::from_biguint(self.sign, self.data / other) + } +} + +impl DivAssign for BigInt { + #[inline] + fn div_assign(&mut self, other: u32) { + self.data /= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Div for u32 { + type Output = BigInt; + + #[inline] + fn div(self, other: BigInt) -> BigInt { + BigInt::from_biguint(other.sign, self / other.data) + } +} + +impl Div for BigInt { + type Output = BigInt; + + #[inline] + fn div(self, other: u64) -> BigInt { + BigInt::from_biguint(self.sign, self.data / other) + } +} + +impl DivAssign for BigInt { + #[inline] + fn div_assign(&mut self, other: u64) { + self.data /= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Div for u64 { + type Output = BigInt; + + #[inline] + fn div(self, other: BigInt) -> BigInt { + BigInt::from_biguint(other.sign, self / other.data) + } +} + +impl Div for BigInt { + type Output = BigInt; + + #[inline] + fn div(self, other: u128) -> BigInt { + BigInt::from_biguint(self.sign, self.data / other) + } +} + +impl DivAssign for BigInt { + #[inline] + fn div_assign(&mut self, other: u128) { + self.data /= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Div for u128 { + type Output = BigInt; + + #[inline] + fn div(self, other: BigInt) -> BigInt { + BigInt::from_biguint(other.sign, self / other.data) + } +} + +forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); +forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); +forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); + +impl Div for BigInt { + type Output = BigInt; + + #[inline] + fn div(self, other: i32) -> BigInt { + match other.checked_uabs() { + Positive(u) => self / u, + Negative(u) => -self / u, + } + } +} + +impl DivAssign for BigInt { + #[inline] + fn div_assign(&mut self, other: i32) { + match other.checked_uabs() { + Positive(u) => *self /= u, + Negative(u) => { + self.sign = -self.sign; + *self /= u; + } + } + } +} + +impl Div for i32 { + type Output = BigInt; + + #[inline] + fn div(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u / other, + Negative(u) => u / -other, + } + } +} + +impl Div for BigInt { + type Output = BigInt; + + #[inline] + fn div(self, other: i64) -> BigInt { + match other.checked_uabs() { + Positive(u) => self / u, + Negative(u) => -self / u, + } + } +} + +impl DivAssign for BigInt { + #[inline] + fn div_assign(&mut self, other: i64) { + match other.checked_uabs() { + Positive(u) => *self /= u, + Negative(u) => { + self.sign = -self.sign; + *self /= u; + } + } + } +} + +impl Div for i64 { + type Output = BigInt; + + #[inline] + fn div(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u / other, + Negative(u) => u / -other, + } + } +} + +impl Div for BigInt { + type Output = BigInt; + + #[inline] + fn div(self, other: i128) -> BigInt { + match other.checked_uabs() { + Positive(u) => self / u, + Negative(u) => -self / u, + } + } +} + +impl DivAssign for BigInt { + #[inline] + fn div_assign(&mut self, other: i128) { + match other.checked_uabs() { + Positive(u) => *self /= u, + Negative(u) => { + self.sign = -self.sign; + *self /= u; + } + } + } +} + +impl Div for i128 { + type Output = BigInt; + + #[inline] + fn div(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u / other, + Negative(u) => u / -other, + } + } +} + +forward_all_binop_to_ref_ref!(impl Rem for BigInt, rem); + +impl<'a, 'b> Rem<&'b BigInt> for &'a BigInt { + type Output = BigInt; + + #[inline] + fn rem(self, other: &BigInt) -> BigInt { + if let Some(other) = other.to_u32() { + self % other + } else if let Some(other) = other.to_i32() { + self % other + } else { + let (_, r) = self.div_rem(other); + r + } + } +} + +impl<'a> RemAssign<&'a BigInt> for BigInt { + #[inline] + fn rem_assign(&mut self, other: &BigInt) { + *self = &*self % other; + } +} +forward_val_assign!(impl RemAssign for BigInt, rem_assign); + +promote_all_scalars!(impl Rem for BigInt, rem); +promote_all_scalars_assign!(impl RemAssign for BigInt, rem_assign); +forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); +forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); +forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); + +impl Rem for BigInt { + type Output = BigInt; + + #[inline] + fn rem(self, other: u32) -> BigInt { + BigInt::from_biguint(self.sign, self.data % other) + } +} + +impl RemAssign for BigInt { + #[inline] + fn rem_assign(&mut self, other: u32) { + self.data %= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Rem for u32 { + type Output = BigInt; + + #[inline] + fn rem(self, other: BigInt) -> BigInt { + BigInt::from(self % other.data) + } +} + +impl Rem for BigInt { + type Output = BigInt; + + #[inline] + fn rem(self, other: u64) -> BigInt { + BigInt::from_biguint(self.sign, self.data % other) + } +} + +impl RemAssign for BigInt { + #[inline] + fn rem_assign(&mut self, other: u64) { + self.data %= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Rem for u64 { + type Output = BigInt; + + #[inline] + fn rem(self, other: BigInt) -> BigInt { + BigInt::from(self % other.data) + } +} + +impl Rem for BigInt { + type Output = BigInt; + + #[inline] + fn rem(self, other: u128) -> BigInt { + BigInt::from_biguint(self.sign, self.data % other) + } +} + +impl RemAssign for BigInt { + #[inline] + fn rem_assign(&mut self, other: u128) { + self.data %= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Rem for u128 { + type Output = BigInt; + + #[inline] + fn rem(self, other: BigInt) -> BigInt { + BigInt::from(self % other.data) + } +} + +forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); +forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); +forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); + +impl Rem for BigInt { + type Output = BigInt; + + #[inline] + fn rem(self, other: i32) -> BigInt { + self % other.uabs() + } +} + +impl RemAssign for BigInt { + #[inline] + fn rem_assign(&mut self, other: i32) { + *self %= other.uabs(); + } +} + +impl Rem for i32 { + type Output = BigInt; + + #[inline] + fn rem(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u % other, + Negative(u) => -(u % other), + } + } +} + +impl Rem for BigInt { + type Output = BigInt; + + #[inline] + fn rem(self, other: i64) -> BigInt { + self % other.uabs() + } +} + +impl RemAssign for BigInt { + #[inline] + fn rem_assign(&mut self, other: i64) { + *self %= other.uabs(); + } +} + +impl Rem for i64 { + type Output = BigInt; + + #[inline] + fn rem(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u % other, + Negative(u) => -(u % other), + } + } +} + +impl Rem for BigInt { + type Output = BigInt; + + #[inline] + fn rem(self, other: i128) -> BigInt { + self % other.uabs() + } +} + +impl RemAssign for BigInt { + #[inline] + fn rem_assign(&mut self, other: i128) { + *self %= other.uabs(); + } +} + +impl Rem for i128 { + type Output = BigInt; + + #[inline] + fn rem(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u % other, + Negative(u) => -(u % other), + } + } +} + +impl CheckedDiv for BigInt { + #[inline] + fn checked_div(&self, v: &BigInt) -> Option { + if v.is_zero() { + return None; + } + Some(self.div(v)) + } +} diff --git a/src/bigint/multiplication.rs b/src/bigint/multiplication.rs new file mode 100644 index 00000000..aaf5b142 --- /dev/null +++ b/src/bigint/multiplication.rs @@ -0,0 +1,192 @@ +use super::CheckedUnsignedAbs::{Negative, Positive}; +use super::Sign::{self, Minus, NoSign, Plus}; +use super::{BigInt, UnsignedAbs}; + +use crate::{IsizePromotion, UsizePromotion}; + +use core::iter::Product; +use core::ops::{Mul, MulAssign}; +use num_traits::{CheckedMul, One, Zero}; + +impl Mul for Sign { + type Output = Sign; + + #[inline] + fn mul(self, other: Sign) -> Sign { + match (self, other) { + (NoSign, _) | (_, NoSign) => NoSign, + (Plus, Plus) | (Minus, Minus) => Plus, + (Plus, Minus) | (Minus, Plus) => Minus, + } + } +} + +forward_all_binop_to_ref_ref!(impl Mul for BigInt, mul); + +impl<'a, 'b> Mul<&'b BigInt> for &'a BigInt { + type Output = BigInt; + + #[inline] + fn mul(self, other: &BigInt) -> BigInt { + BigInt::from_biguint(self.sign * other.sign, &self.data * &other.data) + } +} + +impl<'a> MulAssign<&'a BigInt> for BigInt { + #[inline] + fn mul_assign(&mut self, other: &BigInt) { + *self = &*self * other; + } +} +forward_val_assign!(impl MulAssign for BigInt, mul_assign); + +promote_all_scalars!(impl Mul for BigInt, mul); +promote_all_scalars_assign!(impl MulAssign for BigInt, mul_assign); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); + +impl Mul for BigInt { + type Output = BigInt; + + #[inline] + fn mul(self, other: u32) -> BigInt { + BigInt::from_biguint(self.sign, self.data * other) + } +} + +impl MulAssign for BigInt { + #[inline] + fn mul_assign(&mut self, other: u32) { + self.data *= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Mul for BigInt { + type Output = BigInt; + + #[inline] + fn mul(self, other: u64) -> BigInt { + BigInt::from_biguint(self.sign, self.data * other) + } +} + +impl MulAssign for BigInt { + #[inline] + fn mul_assign(&mut self, other: u64) { + self.data *= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Mul for BigInt { + type Output = BigInt; + + #[inline] + fn mul(self, other: u128) -> BigInt { + BigInt::from_biguint(self.sign, self.data * other) + } +} + +impl MulAssign for BigInt { + #[inline] + fn mul_assign(&mut self, other: u128) { + self.data *= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); + +impl Mul for BigInt { + type Output = BigInt; + + #[inline] + fn mul(self, other: i32) -> BigInt { + match other.checked_uabs() { + Positive(u) => self * u, + Negative(u) => -self * u, + } + } +} + +impl MulAssign for BigInt { + #[inline] + fn mul_assign(&mut self, other: i32) { + match other.checked_uabs() { + Positive(u) => *self *= u, + Negative(u) => { + self.sign = -self.sign; + self.data *= u; + } + } + } +} + +impl Mul for BigInt { + type Output = BigInt; + + #[inline] + fn mul(self, other: i64) -> BigInt { + match other.checked_uabs() { + Positive(u) => self * u, + Negative(u) => -self * u, + } + } +} + +impl MulAssign for BigInt { + #[inline] + fn mul_assign(&mut self, other: i64) { + match other.checked_uabs() { + Positive(u) => *self *= u, + Negative(u) => { + self.sign = -self.sign; + self.data *= u; + } + } + } +} + +impl Mul for BigInt { + type Output = BigInt; + + #[inline] + fn mul(self, other: i128) -> BigInt { + match other.checked_uabs() { + Positive(u) => self * u, + Negative(u) => -self * u, + } + } +} + +impl MulAssign for BigInt { + #[inline] + fn mul_assign(&mut self, other: i128) { + match other.checked_uabs() { + Positive(u) => *self *= u, + Negative(u) => { + self.sign = -self.sign; + self.data *= u; + } + } + } +} + +impl CheckedMul for BigInt { + #[inline] + fn checked_mul(&self, v: &BigInt) -> Option { + Some(self.mul(v)) + } +} + +impl_product_iter_type!(BigInt); diff --git a/src/bigint/power.rs b/src/bigint/power.rs new file mode 100644 index 00000000..a4dd8063 --- /dev/null +++ b/src/bigint/power.rs @@ -0,0 +1,94 @@ +use super::BigInt; +use super::Sign::{self, Minus, Plus}; + +use crate::BigUint; + +use num_integer::Integer; +use num_traits::{Pow, Signed, Zero}; + +/// Help function for pow +/// +/// Computes the effect of the exponent on the sign. +#[inline] +fn powsign(sign: Sign, other: &T) -> Sign { + if other.is_zero() { + Plus + } else if sign != Minus || other.is_odd() { + sign + } else { + -sign + } +} + +macro_rules! pow_impl { + ($T:ty) => { + impl Pow<$T> for BigInt { + type Output = BigInt; + + #[inline] + fn pow(self, rhs: $T) -> BigInt { + BigInt::from_biguint(powsign(self.sign, &rhs), self.data.pow(rhs)) + } + } + + impl<'b> Pow<&'b $T> for BigInt { + type Output = BigInt; + + #[inline] + fn pow(self, rhs: &$T) -> BigInt { + BigInt::from_biguint(powsign(self.sign, rhs), self.data.pow(rhs)) + } + } + + impl<'a> Pow<$T> for &'a BigInt { + type Output = BigInt; + + #[inline] + fn pow(self, rhs: $T) -> BigInt { + BigInt::from_biguint(powsign(self.sign, &rhs), Pow::pow(&self.data, rhs)) + } + } + + impl<'a, 'b> Pow<&'b $T> for &'a BigInt { + type Output = BigInt; + + #[inline] + fn pow(self, rhs: &$T) -> BigInt { + BigInt::from_biguint(powsign(self.sign, rhs), Pow::pow(&self.data, rhs)) + } + } + }; +} + +pow_impl!(u8); +pow_impl!(u16); +pow_impl!(u32); +pow_impl!(u64); +pow_impl!(usize); +pow_impl!(u128); +pow_impl!(BigUint); + +pub(super) fn modpow(x: &BigInt, exponent: &BigInt, modulus: &BigInt) -> BigInt { + assert!( + !exponent.is_negative(), + "negative exponentiation is not supported!" + ); + assert!( + !modulus.is_zero(), + "attempt to calculate with zero modulus!" + ); + + let result = x.data.modpow(&exponent.data, &modulus.data); + if result.is_zero() { + return BigInt::zero(); + } + + // The sign of the result follows the modulus, like `mod_floor`. + let (sign, mag) = match (x.is_negative() && exponent.is_odd(), modulus.is_negative()) { + (false, false) => (Plus, result), + (true, false) => (Plus, &modulus.data - result), + (false, true) => (Minus, &modulus.data - result), + (true, true) => (Minus, result), + }; + BigInt::from_biguint(sign, mag) +} diff --git a/src/bigint/serde.rs b/src/bigint/serde.rs new file mode 100644 index 00000000..5c232f94 --- /dev/null +++ b/src/bigint/serde.rs @@ -0,0 +1,58 @@ +use super::{BigInt, Sign}; + +use serde::de::{Error, Unexpected}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; + +impl Serialize for Sign { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + // Note: do not change the serialization format, or it may break + // forward and backward compatibility of serialized data! + match *self { + Sign::Minus => (-1i8).serialize(serializer), + Sign::NoSign => 0i8.serialize(serializer), + Sign::Plus => 1i8.serialize(serializer), + } + } +} + +impl<'de> Deserialize<'de> for Sign { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let sign = i8::deserialize(deserializer)?; + match sign { + -1 => Ok(Sign::Minus), + 0 => Ok(Sign::NoSign), + 1 => Ok(Sign::Plus), + _ => Err(D::Error::invalid_value( + Unexpected::Signed(sign.into()), + &"a sign of -1, 0, or 1", + )), + } + } +} + +impl Serialize for BigInt { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + // Note: do not change the serialization format, or it may break + // forward and backward compatibility of serialized data! + (self.sign, &self.data).serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for BigInt { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let (sign, data) = Deserialize::deserialize(deserializer)?; + Ok(BigInt::from_biguint(sign, data)) + } +} diff --git a/src/bigint/shift.rs b/src/bigint/shift.rs new file mode 100644 index 00000000..b816e126 --- /dev/null +++ b/src/bigint/shift.rs @@ -0,0 +1,107 @@ +use super::BigInt; +use super::Sign::NoSign; + +use core::ops::{Shl, ShlAssign, Shr, ShrAssign}; +use num_traits::{PrimInt, Signed, Zero}; + +macro_rules! impl_shift { + (@ref $Shx:ident :: $shx:ident, $ShxAssign:ident :: $shx_assign:ident, $rhs:ty) => { + impl<'b> $Shx<&'b $rhs> for BigInt { + type Output = BigInt; + + #[inline] + fn $shx(self, rhs: &'b $rhs) -> BigInt { + $Shx::$shx(self, *rhs) + } + } + impl<'a, 'b> $Shx<&'b $rhs> for &'a BigInt { + type Output = BigInt; + + #[inline] + fn $shx(self, rhs: &'b $rhs) -> BigInt { + $Shx::$shx(self, *rhs) + } + } + impl<'b> $ShxAssign<&'b $rhs> for BigInt { + #[inline] + fn $shx_assign(&mut self, rhs: &'b $rhs) { + $ShxAssign::$shx_assign(self, *rhs); + } + } + }; + ($($rhs:ty),+) => {$( + impl Shl<$rhs> for BigInt { + type Output = BigInt; + + #[inline] + fn shl(self, rhs: $rhs) -> BigInt { + BigInt::from_biguint(self.sign, self.data << rhs) + } + } + impl<'a> Shl<$rhs> for &'a BigInt { + type Output = BigInt; + + #[inline] + fn shl(self, rhs: $rhs) -> BigInt { + BigInt::from_biguint(self.sign, &self.data << rhs) + } + } + impl ShlAssign<$rhs> for BigInt { + #[inline] + fn shl_assign(&mut self, rhs: $rhs) { + self.data <<= rhs + } + } + impl_shift! { @ref Shl::shl, ShlAssign::shl_assign, $rhs } + + impl Shr<$rhs> for BigInt { + type Output = BigInt; + + #[inline] + fn shr(self, rhs: $rhs) -> BigInt { + let round_down = shr_round_down(&self, rhs); + let data = self.data >> rhs; + let data = if round_down { data + 1u8 } else { data }; + BigInt::from_biguint(self.sign, data) + } + } + impl<'a> Shr<$rhs> for &'a BigInt { + type Output = BigInt; + + #[inline] + fn shr(self, rhs: $rhs) -> BigInt { + let round_down = shr_round_down(self, rhs); + let data = &self.data >> rhs; + let data = if round_down { data + 1u8 } else { data }; + BigInt::from_biguint(self.sign, data) + } + } + impl ShrAssign<$rhs> for BigInt { + #[inline] + fn shr_assign(&mut self, rhs: $rhs) { + let round_down = shr_round_down(self, rhs); + self.data >>= rhs; + if round_down { + self.data += 1u8; + } else if self.data.is_zero() { + self.sign = NoSign; + } + } + } + impl_shift! { @ref Shr::shr, ShrAssign::shr_assign, $rhs } + )*}; +} + +impl_shift! { u8, u16, u32, u64, u128, usize } +impl_shift! { i8, i16, i32, i64, i128, isize } + +// Negative values need a rounding adjustment if there are any ones in the +// bits that are getting shifted out. +fn shr_round_down(i: &BigInt, shift: T) -> bool { + if i.is_negative() { + let zeros = i.trailing_zeros().expect("negative values are non-zero"); + shift > T::zero() && shift.to_u64().map(|shift| zeros < shift).unwrap_or(true) + } else { + false + } +} diff --git a/src/bigint/subtraction.rs b/src/bigint/subtraction.rs new file mode 100644 index 00000000..a12a844a --- /dev/null +++ b/src/bigint/subtraction.rs @@ -0,0 +1,300 @@ +use super::CheckedUnsignedAbs::{Negative, Positive}; +use super::Sign::{Minus, NoSign, Plus}; +use super::{BigInt, UnsignedAbs}; + +use crate::{IsizePromotion, UsizePromotion}; + +use core::cmp::Ordering::{Equal, Greater, Less}; +use core::mem; +use core::ops::{Sub, SubAssign}; +use num_traits::{CheckedSub, Zero}; + +// We want to forward to BigUint::sub, but it's not clear how that will go until +// we compare both sign and magnitude. So we duplicate this body for every +// val/ref combination, deferring that decision to BigUint's own forwarding. +macro_rules! bigint_sub { + ($a:expr, $a_owned:expr, $a_data:expr, $b:expr, $b_owned:expr, $b_data:expr) => { + match ($a.sign, $b.sign) { + (_, NoSign) => $a_owned, + (NoSign, _) => -$b_owned, + // opposite signs => keep the sign of the left with the sum of magnitudes + (Plus, Minus) | (Minus, Plus) => BigInt::from_biguint($a.sign, $a_data + $b_data), + // same sign => keep or toggle the sign of the left with the difference of magnitudes + (Plus, Plus) | (Minus, Minus) => match $a.data.cmp(&$b.data) { + Less => BigInt::from_biguint(-$a.sign, $b_data - $a_data), + Greater => BigInt::from_biguint($a.sign, $a_data - $b_data), + Equal => Zero::zero(), + }, + } + }; +} + +impl<'a, 'b> Sub<&'b BigInt> for &'a BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: &BigInt) -> BigInt { + bigint_sub!( + self, + self.clone(), + &self.data, + other, + other.clone(), + &other.data + ) + } +} + +impl<'a> Sub for &'a BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + bigint_sub!(self, self.clone(), &self.data, other, other, other.data) + } +} + +impl<'a> Sub<&'a BigInt> for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: &BigInt) -> BigInt { + bigint_sub!(self, self, self.data, other, other.clone(), &other.data) + } +} + +impl Sub for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + bigint_sub!(self, self, self.data, other, other, other.data) + } +} + +impl<'a> SubAssign<&'a BigInt> for BigInt { + #[inline] + fn sub_assign(&mut self, other: &BigInt) { + let n = mem::replace(self, BigInt::zero()); + *self = n - other; + } +} +forward_val_assign!(impl SubAssign for BigInt, sub_assign); + +promote_all_scalars!(impl Sub for BigInt, sub); +promote_all_scalars_assign!(impl SubAssign for BigInt, sub_assign); +forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); +forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); +forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); + +impl Sub for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: u32) -> BigInt { + match self.sign { + NoSign => -BigInt::from(other), + Minus => -BigInt::from(self.data + other), + Plus => match self.data.cmp(&From::from(other)) { + Equal => Zero::zero(), + Greater => BigInt::from(self.data - other), + Less => -BigInt::from(other - self.data), + }, + } + } +} +impl SubAssign for BigInt { + #[inline] + fn sub_assign(&mut self, other: u32) { + let n = mem::replace(self, BigInt::zero()); + *self = n - other; + } +} + +impl Sub for u32 { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + -(other - self) + } +} + +impl Sub for u64 { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + -(other - self) + } +} + +impl Sub for u128 { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + -(other - self) + } +} + +impl Sub for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: u64) -> BigInt { + match self.sign { + NoSign => -BigInt::from(other), + Minus => -BigInt::from(self.data + other), + Plus => match self.data.cmp(&From::from(other)) { + Equal => Zero::zero(), + Greater => BigInt::from(self.data - other), + Less => -BigInt::from(other - self.data), + }, + } + } +} + +impl SubAssign for BigInt { + #[inline] + fn sub_assign(&mut self, other: u64) { + let n = mem::replace(self, BigInt::zero()); + *self = n - other; + } +} + +impl Sub for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: u128) -> BigInt { + match self.sign { + NoSign => -BigInt::from(other), + Minus => -BigInt::from(self.data + other), + Plus => match self.data.cmp(&From::from(other)) { + Equal => Zero::zero(), + Greater => BigInt::from(self.data - other), + Less => -BigInt::from(other - self.data), + }, + } + } +} + +impl SubAssign for BigInt { + #[inline] + fn sub_assign(&mut self, other: u128) { + let n = mem::replace(self, BigInt::zero()); + *self = n - other; + } +} + +forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); +forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); +forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); + +impl Sub for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: i32) -> BigInt { + match other.checked_uabs() { + Positive(u) => self - u, + Negative(u) => self + u, + } + } +} +impl SubAssign for BigInt { + #[inline] + fn sub_assign(&mut self, other: i32) { + match other.checked_uabs() { + Positive(u) => *self -= u, + Negative(u) => *self += u, + } + } +} + +impl Sub for i32 { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u - other, + Negative(u) => -other - u, + } + } +} + +impl Sub for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: i64) -> BigInt { + match other.checked_uabs() { + Positive(u) => self - u, + Negative(u) => self + u, + } + } +} +impl SubAssign for BigInt { + #[inline] + fn sub_assign(&mut self, other: i64) { + match other.checked_uabs() { + Positive(u) => *self -= u, + Negative(u) => *self += u, + } + } +} + +impl Sub for i64 { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u - other, + Negative(u) => -other - u, + } + } +} + +impl Sub for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: i128) -> BigInt { + match other.checked_uabs() { + Positive(u) => self - u, + Negative(u) => self + u, + } + } +} + +impl SubAssign for BigInt { + #[inline] + fn sub_assign(&mut self, other: i128) { + match other.checked_uabs() { + Positive(u) => *self -= u, + Negative(u) => *self += u, + } + } +} + +impl Sub for i128 { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u - other, + Negative(u) => -other - u, + } + } +} + +impl CheckedSub for BigInt { + #[inline] + fn checked_sub(&self, v: &BigInt) -> Option { + Some(self.sub(v)) + } +} diff --git a/src/biguint.rs b/src/biguint.rs index 64726faa..b790c57a 100644 --- a/src/biguint.rs +++ b/src/biguint.rs @@ -1,49 +1,38 @@ -#[cfg(any(feature = "quickcheck", feature = "arbitrary"))] -use crate::std_alloc::Box; -use crate::std_alloc::{Cow, String, Vec}; +use crate::big_digit::{self, BigDigit}; +use crate::std_alloc::{String, Vec}; + use core::cmp; -use core::cmp::Ordering::{self, Equal, Greater, Less}; -#[cfg(has_try_from)] -use core::convert::TryFrom; +use core::cmp::Ordering; use core::default::Default; use core::fmt; use core::hash; -use core::iter::{FusedIterator, Product, Sum}; use core::mem; -use core::ops::{ - Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Div, DivAssign, - Mul, MulAssign, Rem, RemAssign, Shl, ShlAssign, Shr, ShrAssign, Sub, SubAssign, -}; -use core::str::{self, FromStr}; -use core::{f32, f64}; +use core::str; use core::{u32, u64, u8}; use num_integer::{Integer, Roots}; -use num_traits::float::FloatCore; -use num_traits::{ - CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, FromPrimitive, Num, One, Pow, ToPrimitive, - Unsigned, Zero, -}; +use num_traits::{Num, One, Pow, ToPrimitive, Unsigned, Zero}; -use crate::big_digit::{self, BigDigit}; +mod addition; +mod division; +mod multiplication; +mod subtraction; -#[path = "algorithms.rs"] -mod algorithms; -#[path = "monty.rs"] +mod bits; +mod convert; +mod iter; mod monty; +mod power; +mod shift; -use self::algorithms::{__add2, __sub2rev, add2, sub2, sub2rev}; -use self::algorithms::{biguint_shl, biguint_shr}; -use self::algorithms::{cmp_slice, fls, ilog2}; -use self::algorithms::{div_rem, div_rem_digit, div_rem_ref, rem_digit}; -use self::algorithms::{mac_with_carry, mul3, scalar_mul}; -use self::monty::monty_modpow; +#[cfg(any(feature = "quickcheck", feature = "arbitrary"))] +mod arbitrary; -use crate::UsizePromotion; +#[cfg(feature = "serde")] +mod serde; -use crate::ParseBigIntError; -#[cfg(has_try_from)] -use crate::TryFromBigIntError; +pub(crate) use self::convert::to_str_radix_reversed; +pub use self::iter::{U32Digits, U64Digits}; /// A big unsigned integer type. #[derive(Debug)] @@ -67,35 +56,6 @@ impl Clone for BigUint { } } -#[cfg(feature = "quickcheck")] -impl quickcheck::Arbitrary for BigUint { - fn arbitrary(g: &mut G) -> Self { - // Use arbitrary from Vec - biguint_from_vec(Vec::::arbitrary(g)) - } - - fn shrink(&self) -> Box> { - // Use shrinker from Vec - Box::new(self.data.shrink().map(biguint_from_vec)) - } -} - -#[cfg(feature = "arbitrary")] -mod abitrary_impl { - use super::*; - use arbitrary::{Arbitrary, Result, Unstructured}; - - impl Arbitrary for BigUint { - fn arbitrary(u: &mut Unstructured<'_>) -> Result { - Ok(biguint_from_vec(Vec::::arbitrary(u)?)) - } - - fn shrink(&self) -> Box> { - Box::new(self.data.shrink().map(biguint_from_vec)) - } - } -} - impl hash::Hash for BigUint { #[inline] fn hash(&self, state: &mut H) { @@ -128,6 +88,17 @@ impl Ord for BigUint { } } +#[inline] +fn cmp_slice(a: &[BigDigit], b: &[BigDigit]) -> Ordering { + debug_assert!(a.last() != Some(&0)); + debug_assert!(b.last() != Some(&0)); + + match Ord::cmp(&a.len(), &b.len()) { + Ordering::Equal => Iterator::cmp(a.iter().rev(), b.iter().rev()), + other => other, + } +} + impl Default for BigUint { #[inline] fn default() -> BigUint { @@ -167,361 +138,6 @@ impl fmt::Octal for BigUint { } } -impl FromStr for BigUint { - type Err = ParseBigIntError; - - #[inline] - fn from_str(s: &str) -> Result { - BigUint::from_str_radix(s, 10) - } -} - -/// Convert a u32 chunk (len is either 1 or 2) to a single u64 digit -#[inline] -fn u32_chunk_to_u64(chunk: &[u32]) -> u64 { - // raw could have odd length - let mut digit = chunk[0] as u64; - if let Some(&hi) = chunk.get(1) { - digit |= (hi as u64) << 32; - } - digit -} - -// Convert from a power of two radix (bits == ilog2(radix)) where bits evenly divides -// BigDigit::BITS -fn from_bitwise_digits_le(v: &[u8], bits: u8) -> BigUint { - debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits == 0); - debug_assert!(v.iter().all(|&c| BigDigit::from(c) < (1 << bits))); - - let digits_per_big_digit = big_digit::BITS / bits; - - let data = v - .chunks(digits_per_big_digit.into()) - .map(|chunk| { - chunk - .iter() - .rev() - .fold(0, |acc, &c| (acc << bits) | BigDigit::from(c)) - }) - .collect(); - - biguint_from_vec(data) -} - -// Convert from a power of two radix (bits == ilog2(radix)) where bits doesn't evenly divide -// BigDigit::BITS -fn from_inexact_bitwise_digits_le(v: &[u8], bits: u8) -> BigUint { - debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits != 0); - debug_assert!(v.iter().all(|&c| BigDigit::from(c) < (1 << bits))); - - let big_digits = (v.len() as u64) - .saturating_mul(bits.into()) - .div_ceil(&big_digit::BITS.into()) - .to_usize() - .unwrap_or(core::usize::MAX); - let mut data = Vec::with_capacity(big_digits); - - let mut d = 0; - let mut dbits = 0; // number of bits we currently have in d - - // walk v accumululating bits in d; whenever we accumulate big_digit::BITS in d, spit out a - // big_digit: - for &c in v { - d |= BigDigit::from(c) << dbits; - dbits += bits; - - if dbits >= big_digit::BITS { - data.push(d); - dbits -= big_digit::BITS; - // if dbits was > big_digit::BITS, we dropped some of the bits in c (they couldn't fit - // in d) - grab the bits we lost here: - d = BigDigit::from(c) >> (bits - dbits); - } - } - - if dbits > 0 { - debug_assert!(dbits < big_digit::BITS); - data.push(d as BigDigit); - } - - biguint_from_vec(data) -} - -// Read little-endian radix digits -fn from_radix_digits_be(v: &[u8], radix: u32) -> BigUint { - debug_assert!(!v.is_empty() && !radix.is_power_of_two()); - debug_assert!(v.iter().all(|&c| u32::from(c) < radix)); - - #[cfg(feature = "std")] - let radix_log2 = f64::from(radix).log2(); - #[cfg(not(feature = "std"))] - let radix_log2 = ilog2(radix.next_power_of_two()) as f64; - - // Estimate how big the result will be, so we can pre-allocate it. - let bits = radix_log2 * v.len() as f64; - let big_digits = (bits / big_digit::BITS as f64).ceil(); - let mut data = Vec::with_capacity(big_digits.to_usize().unwrap_or(0)); - - let (base, power) = get_radix_base(radix, big_digit::BITS); - let radix = radix as BigDigit; - - let r = v.len() % power; - let i = if r == 0 { power } else { r }; - let (head, tail) = v.split_at(i); - - let first = head - .iter() - .fold(0, |acc, &d| acc * radix + BigDigit::from(d)); - data.push(first); - - debug_assert!(tail.len() % power == 0); - for chunk in tail.chunks(power) { - if data.last() != Some(&0) { - data.push(0); - } - - let mut carry = 0; - for d in data.iter_mut() { - *d = mac_with_carry(0, *d, base, &mut carry); - } - debug_assert!(carry == 0); - - let n = chunk - .iter() - .fold(0, |acc, &d| acc * radix + BigDigit::from(d)); - add2(&mut data, &[n]); - } - - biguint_from_vec(data) -} - -impl Num for BigUint { - type FromStrRadixErr = ParseBigIntError; - - /// Creates and initializes a `BigUint`. - fn from_str_radix(s: &str, radix: u32) -> Result { - assert!(2 <= radix && radix <= 36, "The radix must be within 2...36"); - let mut s = s; - if s.starts_with('+') { - let tail = &s[1..]; - if !tail.starts_with('+') { - s = tail - } - } - - if s.is_empty() { - return Err(ParseBigIntError::empty()); - } - - if s.starts_with('_') { - // Must lead with a real digit! - return Err(ParseBigIntError::invalid()); - } - - // First normalize all characters to plain digit values - let mut v = Vec::with_capacity(s.len()); - for b in s.bytes() { - let d = match b { - b'0'..=b'9' => b - b'0', - b'a'..=b'z' => b - b'a' + 10, - b'A'..=b'Z' => b - b'A' + 10, - b'_' => continue, - _ => u8::MAX, - }; - if d < radix as u8 { - v.push(d); - } else { - return Err(ParseBigIntError::invalid()); - } - } - - let res = if radix.is_power_of_two() { - // Powers of two can use bitwise masks and shifting instead of multiplication - let bits = ilog2(radix); - v.reverse(); - if big_digit::BITS % bits == 0 { - from_bitwise_digits_le(&v, bits) - } else { - from_inexact_bitwise_digits_le(&v, bits) - } - } else { - from_radix_digits_be(&v, radix) - }; - Ok(res) - } -} - -forward_val_val_binop!(impl BitAnd for BigUint, bitand); -forward_ref_val_binop!(impl BitAnd for BigUint, bitand); - -// do not use forward_ref_ref_binop_commutative! for bitand so that we can -// clone the smaller value rather than the larger, avoiding over-allocation -impl<'a, 'b> BitAnd<&'b BigUint> for &'a BigUint { - type Output = BigUint; - - #[inline] - fn bitand(self, other: &BigUint) -> BigUint { - // forward to val-ref, choosing the smaller to clone - if self.data.len() <= other.data.len() { - self.clone() & other - } else { - other.clone() & self - } - } -} - -forward_val_assign!(impl BitAndAssign for BigUint, bitand_assign); - -impl<'a> BitAnd<&'a BigUint> for BigUint { - type Output = BigUint; - - #[inline] - fn bitand(mut self, other: &BigUint) -> BigUint { - self &= other; - self - } -} -impl<'a> BitAndAssign<&'a BigUint> for BigUint { - #[inline] - fn bitand_assign(&mut self, other: &BigUint) { - for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) { - *ai &= bi; - } - self.data.truncate(other.data.len()); - self.normalize(); - } -} - -forward_all_binop_to_val_ref_commutative!(impl BitOr for BigUint, bitor); -forward_val_assign!(impl BitOrAssign for BigUint, bitor_assign); - -impl<'a> BitOr<&'a BigUint> for BigUint { - type Output = BigUint; - - fn bitor(mut self, other: &BigUint) -> BigUint { - self |= other; - self - } -} -impl<'a> BitOrAssign<&'a BigUint> for BigUint { - #[inline] - fn bitor_assign(&mut self, other: &BigUint) { - for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) { - *ai |= bi; - } - if other.data.len() > self.data.len() { - let extra = &other.data[self.data.len()..]; - self.data.extend(extra.iter().cloned()); - } - } -} - -forward_all_binop_to_val_ref_commutative!(impl BitXor for BigUint, bitxor); -forward_val_assign!(impl BitXorAssign for BigUint, bitxor_assign); - -impl<'a> BitXor<&'a BigUint> for BigUint { - type Output = BigUint; - - fn bitxor(mut self, other: &BigUint) -> BigUint { - self ^= other; - self - } -} -impl<'a> BitXorAssign<&'a BigUint> for BigUint { - #[inline] - fn bitxor_assign(&mut self, other: &BigUint) { - for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) { - *ai ^= bi; - } - if other.data.len() > self.data.len() { - let extra = &other.data[self.data.len()..]; - self.data.extend(extra.iter().cloned()); - } - self.normalize(); - } -} - -macro_rules! impl_shift { - (@ref $Shx:ident :: $shx:ident, $ShxAssign:ident :: $shx_assign:ident, $rhs:ty) => { - impl<'b> $Shx<&'b $rhs> for BigUint { - type Output = BigUint; - - #[inline] - fn $shx(self, rhs: &'b $rhs) -> BigUint { - $Shx::$shx(self, *rhs) - } - } - impl<'a, 'b> $Shx<&'b $rhs> for &'a BigUint { - type Output = BigUint; - - #[inline] - fn $shx(self, rhs: &'b $rhs) -> BigUint { - $Shx::$shx(self, *rhs) - } - } - impl<'b> $ShxAssign<&'b $rhs> for BigUint { - #[inline] - fn $shx_assign(&mut self, rhs: &'b $rhs) { - $ShxAssign::$shx_assign(self, *rhs); - } - } - }; - ($($rhs:ty),+) => {$( - impl Shl<$rhs> for BigUint { - type Output = BigUint; - - #[inline] - fn shl(self, rhs: $rhs) -> BigUint { - biguint_shl(Cow::Owned(self), rhs) - } - } - impl<'a> Shl<$rhs> for &'a BigUint { - type Output = BigUint; - - #[inline] - fn shl(self, rhs: $rhs) -> BigUint { - biguint_shl(Cow::Borrowed(self), rhs) - } - } - impl ShlAssign<$rhs> for BigUint { - #[inline] - fn shl_assign(&mut self, rhs: $rhs) { - let n = mem::replace(self, BigUint::zero()); - *self = n << rhs; - } - } - impl_shift! { @ref Shl::shl, ShlAssign::shl_assign, $rhs } - - impl Shr<$rhs> for BigUint { - type Output = BigUint; - - #[inline] - fn shr(self, rhs: $rhs) -> BigUint { - biguint_shr(Cow::Owned(self), rhs) - } - } - impl<'a> Shr<$rhs> for &'a BigUint { - type Output = BigUint; - - #[inline] - fn shr(self, rhs: $rhs) -> BigUint { - biguint_shr(Cow::Borrowed(self), rhs) - } - } - impl ShrAssign<$rhs> for BigUint { - #[inline] - fn shr_assign(&mut self, rhs: $rhs) { - let n = mem::replace(self, BigUint::zero()); - *self = n >> rhs; - } - } - impl_shift! { @ref Shr::shr, ShrAssign::shr_assign, $rhs } - )*}; -} - -impl_shift! { u8, u16, u32, u64, u128, usize } -impl_shift! { i8, i16, i32, i64, i128, isize } - impl Zero for BigUint { #[inline] fn zero() -> BigUint { @@ -559,1960 +175,333 @@ impl One for BigUint { impl Unsigned for BigUint {} -impl<'b> Pow<&'b BigUint> for BigUint { - type Output = BigUint; - +impl Integer for BigUint { #[inline] - fn pow(self, exp: &BigUint) -> BigUint { - if self.is_one() || exp.is_zero() { - BigUint::one() - } else if self.is_zero() { - BigUint::zero() - } else if let Some(exp) = exp.to_u64() { - self.pow(exp) - } else if let Some(exp) = exp.to_u128() { - self.pow(exp) - } else { - // At this point, `self >= 2` and `exp >= 2¹²⁸`. The smallest possible result given - // `2.pow(2¹²⁸)` would require far more memory than 64-bit targets can address! - panic!("memory overflow") - } + fn div_rem(&self, other: &BigUint) -> (BigUint, BigUint) { + division::div_rem_ref(self, other) } -} -impl Pow for BigUint { - type Output = BigUint; + #[inline] + fn div_floor(&self, other: &BigUint) -> BigUint { + let (d, _) = division::div_rem_ref(self, other); + d + } #[inline] - fn pow(self, exp: BigUint) -> BigUint { - Pow::pow(self, &exp) + fn mod_floor(&self, other: &BigUint) -> BigUint { + let (_, m) = division::div_rem_ref(self, other); + m } -} -impl<'a, 'b> Pow<&'b BigUint> for &'a BigUint { - type Output = BigUint; + #[inline] + fn div_mod_floor(&self, other: &BigUint) -> (BigUint, BigUint) { + division::div_rem_ref(self, other) + } #[inline] - fn pow(self, exp: &BigUint) -> BigUint { - if self.is_one() || exp.is_zero() { - BigUint::one() - } else if self.is_zero() { - BigUint::zero() + fn div_ceil(&self, other: &BigUint) -> BigUint { + let (d, m) = division::div_rem_ref(self, other); + if m.is_zero() { + d } else { - self.clone().pow(exp) + d + 1u32 } } -} - -impl<'a> Pow for &'a BigUint { - type Output = BigUint; + /// Calculates the Greatest Common Divisor (GCD) of the number and `other`. + /// + /// The result is always positive. #[inline] - fn pow(self, exp: BigUint) -> BigUint { - Pow::pow(self, &exp) - } -} - -macro_rules! pow_impl { - ($T:ty) => { - impl Pow<$T> for BigUint { - type Output = BigUint; - - fn pow(self, mut exp: $T) -> BigUint { - if exp == 0 { - return BigUint::one(); - } - let mut base = self; - - while exp & 1 == 0 { - base = &base * &base; - exp >>= 1; - } - - if exp == 1 { - return base; - } - - let mut acc = base.clone(); - while exp > 1 { - exp >>= 1; - base = &base * &base; - if exp & 1 == 1 { - acc = &acc * &base; - } - } - acc - } + fn gcd(&self, other: &Self) -> Self { + #[inline] + fn twos(x: &BigUint) -> u64 { + x.trailing_zeros().unwrap_or(0) } - impl<'b> Pow<&'b $T> for BigUint { - type Output = BigUint; - - #[inline] - fn pow(self, exp: &$T) -> BigUint { - Pow::pow(self, *exp) - } + // Stein's algorithm + if self.is_zero() { + return other.clone(); } - - impl<'a> Pow<$T> for &'a BigUint { - type Output = BigUint; - - #[inline] - fn pow(self, exp: $T) -> BigUint { - if exp == 0 { - return BigUint::one(); - } - Pow::pow(self.clone(), exp) - } + if other.is_zero() { + return self.clone(); } + let mut m = self.clone(); + let mut n = other.clone(); + + // find common factors of 2 + let shift = cmp::min(twos(&n), twos(&m)); - impl<'a, 'b> Pow<&'b $T> for &'a BigUint { - type Output = BigUint; + // divide m and n by 2 until odd + // m inside loop + n >>= twos(&n); - #[inline] - fn pow(self, exp: &$T) -> BigUint { - Pow::pow(self, *exp) + while !m.is_zero() { + m >>= twos(&m); + if n > m { + mem::swap(&mut n, &mut m) } + m -= &n; } - }; -} - -pow_impl!(u8); -pow_impl!(u16); -pow_impl!(u32); -pow_impl!(u64); -pow_impl!(usize); -pow_impl!(u128); - -forward_all_binop_to_val_ref_commutative!(impl Add for BigUint, add); -forward_val_assign!(impl AddAssign for BigUint, add_assign); - -impl<'a> Add<&'a BigUint> for BigUint { - type Output = BigUint; - fn add(mut self, other: &BigUint) -> BigUint { - self += other; - self + n << shift } -} -impl<'a> AddAssign<&'a BigUint> for BigUint { + + /// Calculates the Lowest Common Multiple (LCM) of the number and `other`. #[inline] - fn add_assign(&mut self, other: &BigUint) { - let self_len = self.data.len(); - let carry = if self_len < other.data.len() { - let lo_carry = __add2(&mut self.data[..], &other.data[..self_len]); - self.data.extend_from_slice(&other.data[self_len..]); - __add2(&mut self.data[self_len..], &[lo_carry]) + fn lcm(&self, other: &BigUint) -> BigUint { + if self.is_zero() && other.is_zero() { + Self::zero() } else { - __add2(&mut self.data[..], &other.data[..]) - }; - if carry != 0 { - self.data.push(carry); + self / self.gcd(other) * other } } -} - -promote_unsigned_scalars!(impl Add for BigUint, add); -promote_unsigned_scalars_assign!(impl AddAssign for BigUint, add_assign); -forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigUint, add); -forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigUint, add); -forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigUint, add); - -impl Add for BigUint { - type Output = BigUint; + /// Calculates the Greatest Common Divisor (GCD) and + /// Lowest Common Multiple (LCM) together. #[inline] - fn add(mut self, other: u32) -> BigUint { - self += other; - self + fn gcd_lcm(&self, other: &Self) -> (Self, Self) { + let gcd = self.gcd(other); + let lcm = if gcd.is_zero() { + Self::zero() + } else { + self / &gcd * other + }; + (gcd, lcm) } -} -impl AddAssign for BigUint { + /// Deprecated, use `is_multiple_of` instead. #[inline] - fn add_assign(&mut self, other: u32) { - if other != 0 { - if self.data.is_empty() { - self.data.push(0); - } - - let carry = __add2(&mut self.data, &[other as BigDigit]); - if carry != 0 { - self.data.push(carry); - } - } + fn divides(&self, other: &BigUint) -> bool { + self.is_multiple_of(other) } -} - -impl Add for BigUint { - type Output = BigUint; + /// Returns `true` if the number is a multiple of `other`. #[inline] - fn add(mut self, other: u64) -> BigUint { - self += other; - self + fn is_multiple_of(&self, other: &BigUint) -> bool { + (self % other).is_zero() } -} -impl AddAssign for BigUint { - #[cfg(not(u64_digit))] + /// Returns `true` if the number is divisible by `2`. #[inline] - fn add_assign(&mut self, other: u64) { - let (hi, lo) = big_digit::from_doublebigdigit(other); - if hi == 0 { - *self += lo; - } else { - while self.data.len() < 2 { - self.data.push(0); - } - - let carry = __add2(&mut self.data, &[lo, hi]); - if carry != 0 { - self.data.push(carry); - } - } - } - - #[cfg(u64_digit)] - #[inline] - fn add_assign(&mut self, other: u64) { - if other != 0 { - if self.data.is_empty() { - self.data.push(0); - } - - let carry = __add2(&mut self.data, &[other as BigDigit]); - if carry != 0 { - self.data.push(carry); - } - } - } -} - -impl Add for BigUint { - type Output = BigUint; - - #[inline] - fn add(mut self, other: u128) -> BigUint { - self += other; - self - } -} - -impl AddAssign for BigUint { - #[cfg(not(u64_digit))] - #[inline] - fn add_assign(&mut self, other: u128) { - if other <= u128::from(u64::max_value()) { - *self += other as u64 - } else { - let (a, b, c, d) = u32_from_u128(other); - let carry = if a > 0 { - while self.data.len() < 4 { - self.data.push(0); - } - __add2(&mut self.data, &[d, c, b, a]) - } else { - debug_assert!(b > 0); - while self.data.len() < 3 { - self.data.push(0); - } - __add2(&mut self.data, &[d, c, b]) - }; - - if carry != 0 { - self.data.push(carry); - } - } - } - - #[cfg(u64_digit)] - #[inline] - fn add_assign(&mut self, other: u128) { - let (hi, lo) = big_digit::from_doublebigdigit(other); - if hi == 0 { - *self += lo; - } else { - while self.data.len() < 2 { - self.data.push(0); - } - - let carry = __add2(&mut self.data, &[lo, hi]); - if carry != 0 { - self.data.push(carry); - } - } - } -} - -forward_val_val_binop!(impl Sub for BigUint, sub); -forward_ref_ref_binop!(impl Sub for BigUint, sub); -forward_val_assign!(impl SubAssign for BigUint, sub_assign); - -impl<'a> Sub<&'a BigUint> for BigUint { - type Output = BigUint; - - fn sub(mut self, other: &BigUint) -> BigUint { - self -= other; - self - } -} -impl<'a> SubAssign<&'a BigUint> for BigUint { - fn sub_assign(&mut self, other: &'a BigUint) { - sub2(&mut self.data[..], &other.data[..]); - self.normalize(); - } -} - -impl<'a> Sub for &'a BigUint { - type Output = BigUint; - - fn sub(self, mut other: BigUint) -> BigUint { - let other_len = other.data.len(); - if other_len < self.data.len() { - let lo_borrow = __sub2rev(&self.data[..other_len], &mut other.data); - other.data.extend_from_slice(&self.data[other_len..]); - if lo_borrow != 0 { - sub2(&mut other.data[other_len..], &[1]) - } - } else { - sub2rev(&self.data[..], &mut other.data[..]); - } - other.normalized() - } -} - -promote_unsigned_scalars!(impl Sub for BigUint, sub); -promote_unsigned_scalars_assign!(impl SubAssign for BigUint, sub_assign); -forward_all_scalar_binop_to_val_val!(impl Sub for BigUint, sub); -forward_all_scalar_binop_to_val_val!(impl Sub for BigUint, sub); -forward_all_scalar_binop_to_val_val!(impl Sub for BigUint, sub); - -impl Sub for BigUint { - type Output = BigUint; - - #[inline] - fn sub(mut self, other: u32) -> BigUint { - self -= other; - self - } -} - -impl SubAssign for BigUint { - fn sub_assign(&mut self, other: u32) { - sub2(&mut self.data[..], &[other as BigDigit]); - self.normalize(); - } -} - -impl Sub for u32 { - type Output = BigUint; - - #[cfg(not(u64_digit))] - #[inline] - fn sub(self, mut other: BigUint) -> BigUint { - if other.data.len() == 0 { - other.data.push(self); - } else { - sub2rev(&[self], &mut other.data[..]); - } - other.normalized() - } - - #[cfg(u64_digit)] - #[inline] - fn sub(self, mut other: BigUint) -> BigUint { - if other.data.is_empty() { - other.data.push(self as BigDigit); - } else { - sub2rev(&[self as BigDigit], &mut other.data[..]); - } - other.normalized() - } -} - -impl Sub for BigUint { - type Output = BigUint; - - #[inline] - fn sub(mut self, other: u64) -> BigUint { - self -= other; - self - } -} - -impl SubAssign for BigUint { - #[cfg(not(u64_digit))] - #[inline] - fn sub_assign(&mut self, other: u64) { - let (hi, lo) = big_digit::from_doublebigdigit(other); - sub2(&mut self.data[..], &[lo, hi]); - self.normalize(); - } - - #[cfg(u64_digit)] - #[inline] - fn sub_assign(&mut self, other: u64) { - sub2(&mut self.data[..], &[other as BigDigit]); - self.normalize(); - } -} - -impl Sub for u64 { - type Output = BigUint; - - #[cfg(not(u64_digit))] - #[inline] - fn sub(self, mut other: BigUint) -> BigUint { - while other.data.len() < 2 { - other.data.push(0); - } - - let (hi, lo) = big_digit::from_doublebigdigit(self); - sub2rev(&[lo, hi], &mut other.data[..]); - other.normalized() - } - - #[cfg(u64_digit)] - #[inline] - fn sub(self, mut other: BigUint) -> BigUint { - if other.data.is_empty() { - other.data.push(self); - } else { - sub2rev(&[self], &mut other.data[..]); - } - other.normalized() - } -} - -impl Sub for BigUint { - type Output = BigUint; - - #[inline] - fn sub(mut self, other: u128) -> BigUint { - self -= other; - self - } -} - -impl SubAssign for BigUint { - #[cfg(not(u64_digit))] - #[inline] - fn sub_assign(&mut self, other: u128) { - let (a, b, c, d) = u32_from_u128(other); - sub2(&mut self.data[..], &[d, c, b, a]); - self.normalize(); - } - - #[cfg(u64_digit)] - #[inline] - fn sub_assign(&mut self, other: u128) { - let (hi, lo) = big_digit::from_doublebigdigit(other); - sub2(&mut self.data[..], &[lo, hi]); - self.normalize(); - } -} - -impl Sub for u128 { - type Output = BigUint; - - #[cfg(not(u64_digit))] - #[inline] - fn sub(self, mut other: BigUint) -> BigUint { - while other.data.len() < 4 { - other.data.push(0); - } - - let (a, b, c, d) = u32_from_u128(self); - sub2rev(&[d, c, b, a], &mut other.data[..]); - other.normalized() - } - - #[cfg(u64_digit)] - #[inline] - fn sub(self, mut other: BigUint) -> BigUint { - while other.data.len() < 2 { - other.data.push(0); - } - - let (hi, lo) = big_digit::from_doublebigdigit(self); - sub2rev(&[lo, hi], &mut other.data[..]); - other.normalized() - } -} - -forward_all_binop_to_ref_ref!(impl Mul for BigUint, mul); -forward_val_assign!(impl MulAssign for BigUint, mul_assign); - -impl<'a, 'b> Mul<&'b BigUint> for &'a BigUint { - type Output = BigUint; - - #[inline] - fn mul(self, other: &BigUint) -> BigUint { - mul3(&self.data[..], &other.data[..]) - } -} -impl<'a> MulAssign<&'a BigUint> for BigUint { - #[inline] - fn mul_assign(&mut self, other: &'a BigUint) { - *self = &*self * other - } -} - -promote_unsigned_scalars!(impl Mul for BigUint, mul); -promote_unsigned_scalars_assign!(impl MulAssign for BigUint, mul_assign); -forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigUint, mul); -forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigUint, mul); -forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigUint, mul); - -impl Mul for BigUint { - type Output = BigUint; - - #[inline] - fn mul(mut self, other: u32) -> BigUint { - self *= other; - self - } -} -impl MulAssign for BigUint { - #[inline] - fn mul_assign(&mut self, other: u32) { - if other == 0 { - self.data.clear(); - } else { - let carry = scalar_mul(&mut self.data[..], other as BigDigit); - if carry != 0 { - self.data.push(carry); - } - } - } -} - -impl Mul for BigUint { - type Output = BigUint; - - #[inline] - fn mul(mut self, other: u64) -> BigUint { - self *= other; - self - } -} -impl MulAssign for BigUint { - #[cfg(not(u64_digit))] - #[inline] - fn mul_assign(&mut self, other: u64) { - if other == 0 { - self.data.clear(); - } else if other <= u64::from(BigDigit::max_value()) { - *self *= other as BigDigit - } else { - let (hi, lo) = big_digit::from_doublebigdigit(other); - *self = mul3(&self.data[..], &[lo, hi]) - } - } - - #[cfg(u64_digit)] - #[inline] - fn mul_assign(&mut self, other: u64) { - if other == 0 { - self.data.clear(); - } else { - let carry = scalar_mul(&mut self.data[..], other as BigDigit); - if carry != 0 { - self.data.push(carry); - } - } - } -} - -impl Mul for BigUint { - type Output = BigUint; - - #[inline] - fn mul(mut self, other: u128) -> BigUint { - self *= other; - self - } -} - -impl MulAssign for BigUint { - #[cfg(not(u64_digit))] - #[inline] - fn mul_assign(&mut self, other: u128) { - if other == 0 { - self.data.clear(); - } else if other <= u128::from(BigDigit::max_value()) { - *self *= other as BigDigit - } else { - let (a, b, c, d) = u32_from_u128(other); - *self = mul3(&self.data[..], &[d, c, b, a]) - } - } - - #[cfg(u64_digit)] - #[inline] - fn mul_assign(&mut self, other: u128) { - if other == 0 { - self.data.clear(); - } else if other <= BigDigit::max_value() as u128 { - *self *= other as BigDigit - } else { - let (hi, lo) = big_digit::from_doublebigdigit(other); - *self = mul3(&self.data[..], &[lo, hi]) - } - } -} - -forward_val_ref_binop!(impl Div for BigUint, div); -forward_ref_val_binop!(impl Div for BigUint, div); -forward_val_assign!(impl DivAssign for BigUint, div_assign); - -impl Div for BigUint { - type Output = BigUint; - - #[inline] - fn div(self, other: BigUint) -> BigUint { - let (q, _) = div_rem(self, other); - q - } -} - -impl<'a, 'b> Div<&'b BigUint> for &'a BigUint { - type Output = BigUint; - - #[inline] - fn div(self, other: &BigUint) -> BigUint { - let (q, _) = self.div_rem(other); - q - } -} -impl<'a> DivAssign<&'a BigUint> for BigUint { - #[inline] - fn div_assign(&mut self, other: &'a BigUint) { - *self = &*self / other; - } -} - -promote_unsigned_scalars!(impl Div for BigUint, div); -promote_unsigned_scalars_assign!(impl DivAssign for BigUint, div_assign); -forward_all_scalar_binop_to_val_val!(impl Div for BigUint, div); -forward_all_scalar_binop_to_val_val!(impl Div for BigUint, div); -forward_all_scalar_binop_to_val_val!(impl Div for BigUint, div); - -impl Div for BigUint { - type Output = BigUint; - - #[inline] - fn div(self, other: u32) -> BigUint { - let (q, _) = div_rem_digit(self, other as BigDigit); - q - } -} -impl DivAssign for BigUint { - #[inline] - fn div_assign(&mut self, other: u32) { - *self = &*self / other; - } -} - -impl Div for u32 { - type Output = BigUint; - - #[inline] - fn div(self, other: BigUint) -> BigUint { - match other.data.len() { - 0 => panic!("attempt to divide by zero"), - 1 => From::from(self as BigDigit / other.data[0]), - _ => Zero::zero(), - } - } -} - -impl Div for BigUint { - type Output = BigUint; - - #[inline] - fn div(self, other: u64) -> BigUint { - let (q, _) = div_rem(self, From::from(other)); - q - } -} -impl DivAssign for BigUint { - #[inline] - fn div_assign(&mut self, other: u64) { - // a vec of size 0 does not allocate, so this is fairly cheap - let temp = mem::replace(self, Zero::zero()); - *self = temp / other; - } -} - -impl Div for u64 { - type Output = BigUint; - - #[cfg(not(u64_digit))] - #[inline] - fn div(self, other: BigUint) -> BigUint { - match other.data.len() { - 0 => panic!("attempt to divide by zero"), - 1 => From::from(self / u64::from(other.data[0])), - 2 => From::from(self / big_digit::to_doublebigdigit(other.data[1], other.data[0])), - _ => Zero::zero(), - } - } - - #[cfg(u64_digit)] - #[inline] - fn div(self, other: BigUint) -> BigUint { - match other.data.len() { - 0 => panic!("attempt to divide by zero"), - 1 => From::from(self / other.data[0]), - _ => Zero::zero(), - } - } -} - -impl Div for BigUint { - type Output = BigUint; - - #[inline] - fn div(self, other: u128) -> BigUint { - let (q, _) = div_rem(self, From::from(other)); - q - } -} - -impl DivAssign for BigUint { - #[inline] - fn div_assign(&mut self, other: u128) { - *self = &*self / other; - } -} - -impl Div for u128 { - type Output = BigUint; - - #[cfg(not(u64_digit))] - #[inline] - fn div(self, other: BigUint) -> BigUint { - match other.data.len() { - 0 => panic!("attempt to divide by zero"), - 1 => From::from(self / u128::from(other.data[0])), - 2 => From::from( - self / u128::from(big_digit::to_doublebigdigit(other.data[1], other.data[0])), - ), - 3 => From::from(self / u32_to_u128(0, other.data[2], other.data[1], other.data[0])), - 4 => From::from( - self / u32_to_u128(other.data[3], other.data[2], other.data[1], other.data[0]), - ), - _ => Zero::zero(), - } - } - - #[cfg(u64_digit)] - #[inline] - fn div(self, other: BigUint) -> BigUint { - match other.data.len() { - 0 => panic!("attempt to divide by zero"), - 1 => From::from(self / other.data[0] as u128), - 2 => From::from(self / big_digit::to_doublebigdigit(other.data[1], other.data[0])), - _ => Zero::zero(), - } - } -} - -forward_val_ref_binop!(impl Rem for BigUint, rem); -forward_ref_val_binop!(impl Rem for BigUint, rem); -forward_val_assign!(impl RemAssign for BigUint, rem_assign); - -impl Rem for BigUint { - type Output = BigUint; - - #[inline] - fn rem(self, other: BigUint) -> BigUint { - if let Some(other) = other.to_u32() { - &self % other - } else { - let (_, r) = div_rem(self, other); - r - } - } -} - -impl<'a, 'b> Rem<&'b BigUint> for &'a BigUint { - type Output = BigUint; - - #[inline] - fn rem(self, other: &BigUint) -> BigUint { - if let Some(other) = other.to_u32() { - self % other - } else { - let (_, r) = self.div_rem(other); - r - } - } -} -impl<'a> RemAssign<&'a BigUint> for BigUint { - #[inline] - fn rem_assign(&mut self, other: &BigUint) { - *self = &*self % other; - } -} - -promote_unsigned_scalars!(impl Rem for BigUint, rem); -promote_unsigned_scalars_assign!(impl RemAssign for BigUint, rem_assign); -forward_all_scalar_binop_to_ref_val!(impl Rem for BigUint, rem); -forward_all_scalar_binop_to_val_val!(impl Rem for BigUint, rem); -forward_all_scalar_binop_to_val_val!(impl Rem for BigUint, rem); - -impl<'a> Rem for &'a BigUint { - type Output = BigUint; - - #[inline] - fn rem(self, other: u32) -> BigUint { - rem_digit(self, other as BigDigit).into() - } -} -impl RemAssign for BigUint { - #[inline] - fn rem_assign(&mut self, other: u32) { - *self = &*self % other; - } -} - -impl<'a> Rem<&'a BigUint> for u32 { - type Output = BigUint; - - #[inline] - fn rem(mut self, other: &'a BigUint) -> BigUint { - self %= other; - From::from(self) - } -} - -macro_rules! impl_rem_assign_scalar { - ($scalar:ty, $to_scalar:ident) => { - forward_val_assign_scalar!(impl RemAssign for BigUint, $scalar, rem_assign); - impl<'a> RemAssign<&'a BigUint> for $scalar { - #[inline] - fn rem_assign(&mut self, other: &BigUint) { - *self = match other.$to_scalar() { - None => *self, - Some(0) => panic!("attempt to divide by zero"), - Some(v) => *self % v - }; - } - } - } -} - -// we can scalar %= BigUint for any scalar, including signed types -impl_rem_assign_scalar!(u128, to_u128); -impl_rem_assign_scalar!(usize, to_usize); -impl_rem_assign_scalar!(u64, to_u64); -impl_rem_assign_scalar!(u32, to_u32); -impl_rem_assign_scalar!(u16, to_u16); -impl_rem_assign_scalar!(u8, to_u8); -impl_rem_assign_scalar!(i128, to_i128); -impl_rem_assign_scalar!(isize, to_isize); -impl_rem_assign_scalar!(i64, to_i64); -impl_rem_assign_scalar!(i32, to_i32); -impl_rem_assign_scalar!(i16, to_i16); -impl_rem_assign_scalar!(i8, to_i8); - -impl Rem for BigUint { - type Output = BigUint; - - #[inline] - fn rem(self, other: u64) -> BigUint { - let (_, r) = div_rem(self, From::from(other)); - r - } -} -impl RemAssign for BigUint { - #[inline] - fn rem_assign(&mut self, other: u64) { - *self = &*self % other; - } -} - -impl Rem for u64 { - type Output = BigUint; - - #[inline] - fn rem(mut self, other: BigUint) -> BigUint { - self %= other; - From::from(self) - } -} - -impl Rem for BigUint { - type Output = BigUint; - - #[inline] - fn rem(self, other: u128) -> BigUint { - let (_, r) = div_rem(self, From::from(other)); - r - } -} - -impl RemAssign for BigUint { - #[inline] - fn rem_assign(&mut self, other: u128) { - *self = &*self % other; - } -} - -impl Rem for u128 { - type Output = BigUint; - - #[inline] - fn rem(mut self, other: BigUint) -> BigUint { - self %= other; - From::from(self) - } -} - -impl CheckedAdd for BigUint { - #[inline] - fn checked_add(&self, v: &BigUint) -> Option { - Some(self.add(v)) - } -} - -impl CheckedSub for BigUint { - #[inline] - fn checked_sub(&self, v: &BigUint) -> Option { - match self.cmp(v) { - Less => None, - Equal => Some(Zero::zero()), - Greater => Some(self.sub(v)), - } - } -} - -impl CheckedMul for BigUint { - #[inline] - fn checked_mul(&self, v: &BigUint) -> Option { - Some(self.mul(v)) - } -} - -impl CheckedDiv for BigUint { - #[inline] - fn checked_div(&self, v: &BigUint) -> Option { - if v.is_zero() { - return None; - } - Some(self.div(v)) - } -} - -impl Integer for BigUint { - #[inline] - fn div_rem(&self, other: &BigUint) -> (BigUint, BigUint) { - div_rem_ref(self, other) - } - - #[inline] - fn div_floor(&self, other: &BigUint) -> BigUint { - let (d, _) = div_rem_ref(self, other); - d - } - - #[inline] - fn mod_floor(&self, other: &BigUint) -> BigUint { - let (_, m) = div_rem_ref(self, other); - m - } - - #[inline] - fn div_mod_floor(&self, other: &BigUint) -> (BigUint, BigUint) { - div_rem_ref(self, other) - } - - #[inline] - fn div_ceil(&self, other: &BigUint) -> BigUint { - let (d, m) = div_rem_ref(self, other); - if m.is_zero() { - d - } else { - d + 1u32 - } - } - - /// Calculates the Greatest Common Divisor (GCD) of the number and `other`. - /// - /// The result is always positive. - #[inline] - fn gcd(&self, other: &Self) -> Self { - #[inline] - fn twos(x: &BigUint) -> u64 { - x.trailing_zeros().unwrap_or(0) - } - - // Stein's algorithm - if self.is_zero() { - return other.clone(); - } - if other.is_zero() { - return self.clone(); - } - let mut m = self.clone(); - let mut n = other.clone(); - - // find common factors of 2 - let shift = cmp::min(twos(&n), twos(&m)); - - // divide m and n by 2 until odd - // m inside loop - n >>= twos(&n); - - while !m.is_zero() { - m >>= twos(&m); - if n > m { - mem::swap(&mut n, &mut m) - } - m -= &n; - } - - n << shift - } - - /// Calculates the Lowest Common Multiple (LCM) of the number and `other`. - #[inline] - fn lcm(&self, other: &BigUint) -> BigUint { - if self.is_zero() && other.is_zero() { - Self::zero() - } else { - self / self.gcd(other) * other - } - } - - /// Calculates the Greatest Common Divisor (GCD) and - /// Lowest Common Multiple (LCM) together. - #[inline] - fn gcd_lcm(&self, other: &Self) -> (Self, Self) { - let gcd = self.gcd(other); - let lcm = if gcd.is_zero() { - Self::zero() - } else { - self / &gcd * other - }; - (gcd, lcm) - } - - /// Deprecated, use `is_multiple_of` instead. - #[inline] - fn divides(&self, other: &BigUint) -> bool { - self.is_multiple_of(other) - } - - /// Returns `true` if the number is a multiple of `other`. - #[inline] - fn is_multiple_of(&self, other: &BigUint) -> bool { - (self % other).is_zero() - } - - /// Returns `true` if the number is divisible by `2`. - #[inline] - fn is_even(&self) -> bool { - // Considering only the last digit. - match self.data.first() { - Some(x) => x.is_even(), - None => true, - } - } - - /// Returns `true` if the number is not divisible by `2`. - #[inline] - fn is_odd(&self) -> bool { - !self.is_even() - } - - /// Rounds up to nearest multiple of argument. - #[inline] - fn next_multiple_of(&self, other: &Self) -> Self { - let m = self.mod_floor(other); - if m.is_zero() { - self.clone() - } else { - self + (other - m) - } - } - /// Rounds down to nearest multiple of argument. - #[inline] - fn prev_multiple_of(&self, other: &Self) -> Self { - self - self.mod_floor(other) - } -} - -#[inline] -fn fixpoint(mut x: BigUint, max_bits: u64, f: F) -> BigUint -where - F: Fn(&BigUint) -> BigUint, -{ - let mut xn = f(&x); - - // If the value increased, then the initial guess must have been low. - // Repeat until we reverse course. - while x < xn { - // Sometimes an increase will go way too far, especially with large - // powers, and then take a long time to walk back. We know an upper - // bound based on bit size, so saturate on that. - x = if xn.bits() > max_bits { - BigUint::one() << max_bits - } else { - xn - }; - xn = f(&x); - } - - // Now keep repeating while the estimate is decreasing. - while x > xn { - x = xn; - xn = f(&x); - } - x -} - -impl Roots for BigUint { - // nth_root, sqrt and cbrt use Newton's method to compute - // principal root of a given degree for a given integer. - - // Reference: - // Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.14 - fn nth_root(&self, n: u32) -> Self { - assert!(n > 0, "root degree n must be at least 1"); - - if self.is_zero() || self.is_one() { - return self.clone(); - } - - match n { - // Optimize for small n - 1 => return self.clone(), - 2 => return self.sqrt(), - 3 => return self.cbrt(), - _ => (), - } - - // The root of non-zero values less than 2ⁿ can only be 1. - let bits = self.bits(); - let n64 = u64::from(n); - if bits <= n64 { - return BigUint::one(); - } - - // If we fit in `u64`, compute the root that way. - if let Some(x) = self.to_u64() { - return x.nth_root(n).into(); - } - - let max_bits = bits / n64 + 1; - - #[cfg(feature = "std")] - let guess = match self.to_f64() { - Some(f) if f.is_finite() => { - // We fit in `f64` (lossy), so get a better initial guess from that. - BigUint::from_f64((f.ln() / f64::from(n)).exp()).unwrap() - } - _ => { - // Try to guess by scaling down such that it does fit in `f64`. - // With some (x * 2ⁿᵏ), its nth root ≈ (ⁿ√x * 2ᵏ) - let extra_bits = bits - (f64::MAX_EXP as u64 - 1); - let root_scale = extra_bits.div_ceil(&n64); - let scale = root_scale * n64; - if scale < bits && bits - scale > n64 { - (self >> scale).nth_root(n) << root_scale - } else { - BigUint::one() << max_bits - } - } - }; - - #[cfg(not(feature = "std"))] - let guess = BigUint::one() << max_bits; - - let n_min_1 = n - 1; - fixpoint(guess, max_bits, move |s| { - let q = self / s.pow(n_min_1); - let t = n_min_1 * s + q; - t / n - }) - } - - // Reference: - // Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.13 - fn sqrt(&self) -> Self { - if self.is_zero() || self.is_one() { - return self.clone(); - } - - // If we fit in `u64`, compute the root that way. - if let Some(x) = self.to_u64() { - return x.sqrt().into(); - } - - let bits = self.bits(); - let max_bits = bits / 2 + 1; - - #[cfg(feature = "std")] - let guess = match self.to_f64() { - Some(f) if f.is_finite() => { - // We fit in `f64` (lossy), so get a better initial guess from that. - BigUint::from_f64(f.sqrt()).unwrap() - } - _ => { - // Try to guess by scaling down such that it does fit in `f64`. - // With some (x * 2²ᵏ), its sqrt ≈ (√x * 2ᵏ) - let extra_bits = bits - (f64::MAX_EXP as u64 - 1); - let root_scale = (extra_bits + 1) / 2; - let scale = root_scale * 2; - (self >> scale).sqrt() << root_scale - } - }; - - #[cfg(not(feature = "std"))] - let guess = BigUint::one() << max_bits; - - fixpoint(guess, max_bits, move |s| { - let q = self / s; - let t = s + q; - t >> 1 - }) - } - - fn cbrt(&self) -> Self { - if self.is_zero() || self.is_one() { - return self.clone(); - } - - // If we fit in `u64`, compute the root that way. - if let Some(x) = self.to_u64() { - return x.cbrt().into(); - } - - let bits = self.bits(); - let max_bits = bits / 3 + 1; - - #[cfg(feature = "std")] - let guess = match self.to_f64() { - Some(f) if f.is_finite() => { - // We fit in `f64` (lossy), so get a better initial guess from that. - BigUint::from_f64(f.cbrt()).unwrap() - } - _ => { - // Try to guess by scaling down such that it does fit in `f64`. - // With some (x * 2³ᵏ), its cbrt ≈ (∛x * 2ᵏ) - let extra_bits = bits - (f64::MAX_EXP as u64 - 1); - let root_scale = (extra_bits + 2) / 3; - let scale = root_scale * 3; - (self >> scale).cbrt() << root_scale - } - }; - - #[cfg(not(feature = "std"))] - let guess = BigUint::one() << max_bits; - - fixpoint(guess, max_bits, move |s| { - let q = self / (s * s); - let t = (s << 1) + q; - t / 3u32 - }) - } -} - -fn high_bits_to_u64(v: &BigUint) -> u64 { - match v.data.len() { - 0 => 0, - 1 => { - // XXX Conversion is useless if already 64-bit. - #[allow(clippy::useless_conversion)] - let v0 = u64::from(v.data[0]); - v0 - } - _ => { - let mut bits = v.bits(); - let mut ret = 0u64; - let mut ret_bits = 0; - - for d in v.data.iter().rev() { - let digit_bits = (bits - 1) % u64::from(big_digit::BITS) + 1; - let bits_want = cmp::min(64 - ret_bits, digit_bits); - - if bits_want != 64 { - ret <<= bits_want; - } - // XXX Conversion is useless if already 64-bit. - #[allow(clippy::useless_conversion)] - let d0 = u64::from(*d) >> (digit_bits - bits_want); - ret |= d0; - ret_bits += bits_want; - bits -= bits_want; - - if ret_bits == 64 { - break; - } - } - - ret - } - } -} - -impl ToPrimitive for BigUint { - #[inline] - fn to_i64(&self) -> Option { - self.to_u64().as_ref().and_then(u64::to_i64) - } - - #[inline] - fn to_i128(&self) -> Option { - self.to_u128().as_ref().and_then(u128::to_i128) - } - - #[allow(clippy::useless_conversion)] - #[inline] - fn to_u64(&self) -> Option { - let mut ret: u64 = 0; - let mut bits = 0; - - for i in self.data.iter() { - if bits >= 64 { - return None; - } - - // XXX Conversion is useless if already 64-bit. - ret += u64::from(*i) << bits; - bits += big_digit::BITS; + fn is_even(&self) -> bool { + // Considering only the last digit. + match self.data.first() { + Some(x) => x.is_even(), + None => true, } - - Some(ret) } + /// Returns `true` if the number is not divisible by `2`. #[inline] - fn to_u128(&self) -> Option { - let mut ret: u128 = 0; - let mut bits = 0; - - for i in self.data.iter() { - if bits >= 128 { - return None; - } - - ret |= u128::from(*i) << bits; - bits += big_digit::BITS; - } - - Some(ret) + fn is_odd(&self) -> bool { + !self.is_even() } + /// Rounds up to nearest multiple of argument. #[inline] - fn to_f32(&self) -> Option { - let mantissa = high_bits_to_u64(self); - let exponent = self.bits() - u64::from(fls(mantissa)); - - if exponent > f32::MAX_EXP as u64 { - Some(f32::INFINITY) + fn next_multiple_of(&self, other: &Self) -> Self { + let m = self.mod_floor(other); + if m.is_zero() { + self.clone() } else { - Some((mantissa as f32) * 2.0f32.powi(exponent as i32)) + self + (other - m) } } - + /// Rounds down to nearest multiple of argument. #[inline] - fn to_f64(&self) -> Option { - let mantissa = high_bits_to_u64(self); - let exponent = self.bits() - u64::from(fls(mantissa)); - - if exponent > f64::MAX_EXP as u64 { - Some(f64::INFINITY) - } else { - Some((mantissa as f64) * 2.0f64.powi(exponent as i32)) - } + fn prev_multiple_of(&self, other: &Self) -> Self { + self - self.mod_floor(other) } } -macro_rules! impl_try_from_biguint { - ($T:ty, $to_ty:path) => { - #[cfg(has_try_from)] - impl TryFrom<&BigUint> for $T { - type Error = TryFromBigIntError<()>; - - #[inline] - fn try_from(value: &BigUint) -> Result<$T, TryFromBigIntError<()>> { - $to_ty(value).ok_or(TryFromBigIntError::new(())) - } - } - - #[cfg(has_try_from)] - impl TryFrom for $T { - type Error = TryFromBigIntError; - - #[inline] - fn try_from(value: BigUint) -> Result<$T, TryFromBigIntError> { - <$T>::try_from(&value).map_err(|_| TryFromBigIntError::new(value)) - } - } - }; -} - -impl_try_from_biguint!(u8, ToPrimitive::to_u8); -impl_try_from_biguint!(u16, ToPrimitive::to_u16); -impl_try_from_biguint!(u32, ToPrimitive::to_u32); -impl_try_from_biguint!(u64, ToPrimitive::to_u64); -impl_try_from_biguint!(usize, ToPrimitive::to_usize); -impl_try_from_biguint!(u128, ToPrimitive::to_u128); - -impl_try_from_biguint!(i8, ToPrimitive::to_i8); -impl_try_from_biguint!(i16, ToPrimitive::to_i16); -impl_try_from_biguint!(i32, ToPrimitive::to_i32); -impl_try_from_biguint!(i64, ToPrimitive::to_i64); -impl_try_from_biguint!(isize, ToPrimitive::to_isize); -impl_try_from_biguint!(i128, ToPrimitive::to_i128); - -impl FromPrimitive for BigUint { - #[inline] - fn from_i64(n: i64) -> Option { - if n >= 0 { - Some(BigUint::from(n as u64)) - } else { - None - } - } +#[inline] +fn fixpoint(mut x: BigUint, max_bits: u64, f: F) -> BigUint +where + F: Fn(&BigUint) -> BigUint, +{ + let mut xn = f(&x); - #[inline] - fn from_i128(n: i128) -> Option { - if n >= 0 { - Some(BigUint::from(n as u128)) + // If the value increased, then the initial guess must have been low. + // Repeat until we reverse course. + while x < xn { + // Sometimes an increase will go way too far, especially with large + // powers, and then take a long time to walk back. We know an upper + // bound based on bit size, so saturate on that. + x = if xn.bits() > max_bits { + BigUint::one() << max_bits } else { - None - } - } - - #[inline] - fn from_u64(n: u64) -> Option { - Some(BigUint::from(n)) - } - - #[inline] - fn from_u128(n: u128) -> Option { - Some(BigUint::from(n)) - } - - #[inline] - fn from_f64(mut n: f64) -> Option { - // handle NAN, INFINITY, NEG_INFINITY - if !n.is_finite() { - return None; - } - - // match the rounding of casting from float to int - n = n.trunc(); - - // handle 0.x, -0.x - if n.is_zero() { - return Some(BigUint::zero()); - } - - let (mantissa, exponent, sign) = FloatCore::integer_decode(n); - - if sign == -1 { - return None; - } - - let mut ret = BigUint::from(mantissa); - match exponent.cmp(&0) { - Greater => ret <<= exponent as usize, - Equal => {} - Less => ret >>= (-exponent) as usize, - } - Some(ret) - } -} - -impl From for BigUint { - #[inline] - fn from(mut n: u64) -> Self { - let mut ret: BigUint = Zero::zero(); - - while n != 0 { - ret.data.push(n as BigDigit); - // don't overflow if BITS is 64: - n = (n >> 1) >> (big_digit::BITS - 1); - } - - ret - } -} - -impl From for BigUint { - #[inline] - fn from(mut n: u128) -> Self { - let mut ret: BigUint = Zero::zero(); - - while n != 0 { - ret.data.push(n as BigDigit); - n >>= big_digit::BITS; - } - - ret - } -} - -macro_rules! impl_biguint_from_uint { - ($T:ty) => { - impl From<$T> for BigUint { - #[inline] - fn from(n: $T) -> Self { - BigUint::from(n as u64) - } - } - }; -} - -impl_biguint_from_uint!(u8); -impl_biguint_from_uint!(u16); -impl_biguint_from_uint!(u32); -impl_biguint_from_uint!(usize); - -macro_rules! impl_biguint_try_from_int { - ($T:ty, $from_ty:path) => { - #[cfg(has_try_from)] - impl TryFrom<$T> for BigUint { - type Error = TryFromBigIntError<()>; - - #[inline] - fn try_from(value: $T) -> Result> { - $from_ty(value).ok_or(TryFromBigIntError::new(())) - } - } - }; -} - -impl_biguint_try_from_int!(i8, FromPrimitive::from_i8); -impl_biguint_try_from_int!(i16, FromPrimitive::from_i16); -impl_biguint_try_from_int!(i32, FromPrimitive::from_i32); -impl_biguint_try_from_int!(i64, FromPrimitive::from_i64); -impl_biguint_try_from_int!(isize, FromPrimitive::from_isize); -impl_biguint_try_from_int!(i128, FromPrimitive::from_i128); - -/// A generic trait for converting a value to a `BigUint`. -pub trait ToBigUint { - /// Converts the value of `self` to a `BigUint`. - fn to_biguint(&self) -> Option; -} - -impl ToBigUint for BigUint { - #[inline] - fn to_biguint(&self) -> Option { - Some(self.clone()) - } -} - -macro_rules! impl_to_biguint { - ($T:ty, $from_ty:path) => { - impl ToBigUint for $T { - #[inline] - fn to_biguint(&self) -> Option { - $from_ty(*self) - } - } - }; -} - -impl_to_biguint!(isize, FromPrimitive::from_isize); -impl_to_biguint!(i8, FromPrimitive::from_i8); -impl_to_biguint!(i16, FromPrimitive::from_i16); -impl_to_biguint!(i32, FromPrimitive::from_i32); -impl_to_biguint!(i64, FromPrimitive::from_i64); -impl_to_biguint!(i128, FromPrimitive::from_i128); - -impl_to_biguint!(usize, FromPrimitive::from_usize); -impl_to_biguint!(u8, FromPrimitive::from_u8); -impl_to_biguint!(u16, FromPrimitive::from_u16); -impl_to_biguint!(u32, FromPrimitive::from_u32); -impl_to_biguint!(u64, FromPrimitive::from_u64); -impl_to_biguint!(u128, FromPrimitive::from_u128); - -impl_to_biguint!(f32, FromPrimitive::from_f32); -impl_to_biguint!(f64, FromPrimitive::from_f64); - -// Extract bitwise digits that evenly divide BigDigit -fn to_bitwise_digits_le(u: &BigUint, bits: u8) -> Vec { - debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits == 0); - - let last_i = u.data.len() - 1; - let mask: BigDigit = (1 << bits) - 1; - let digits_per_big_digit = big_digit::BITS / bits; - let digits = u - .bits() - .div_ceil(&u64::from(bits)) - .to_usize() - .unwrap_or(core::usize::MAX); - let mut res = Vec::with_capacity(digits); - - for mut r in u.data[..last_i].iter().cloned() { - for _ in 0..digits_per_big_digit { - res.push((r & mask) as u8); - r >>= bits; - } - } - - let mut r = u.data[last_i]; - while r != 0 { - res.push((r & mask) as u8); - r >>= bits; - } - - res -} - -// Extract bitwise digits that don't evenly divide BigDigit -fn to_inexact_bitwise_digits_le(u: &BigUint, bits: u8) -> Vec { - debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits != 0); - - let mask: BigDigit = (1 << bits) - 1; - let digits = u - .bits() - .div_ceil(&u64::from(bits)) - .to_usize() - .unwrap_or(core::usize::MAX); - let mut res = Vec::with_capacity(digits); - - let mut r = 0; - let mut rbits = 0; - - for c in &u.data { - r |= *c << rbits; - rbits += big_digit::BITS; - - while rbits >= bits { - res.push((r & mask) as u8); - r >>= bits; - - // r had more bits than it could fit - grab the bits we lost - if rbits > big_digit::BITS { - r = *c >> (big_digit::BITS - (rbits - bits)); - } - - rbits -= bits; - } - } - - if rbits != 0 { - res.push(r as u8); + xn + }; + xn = f(&x); } - while let Some(&0) = res.last() { - res.pop(); + // Now keep repeating while the estimate is decreasing. + while x > xn { + x = xn; + xn = f(&x); } - - res + x } -// Extract little-endian radix digits -#[inline(always)] // forced inline to get const-prop for radix=10 -fn to_radix_digits_le(u: &BigUint, radix: u32) -> Vec { - debug_assert!(!u.is_zero() && !radix.is_power_of_two()); - - #[cfg(feature = "std")] - let radix_log2 = f64::from(radix).log2(); - #[cfg(not(feature = "std"))] - let radix_log2 = ilog2(radix) as f64; - - // Estimate how big the result will be, so we can pre-allocate it. - let radix_digits = ((u.bits() as f64) / radix_log2).ceil(); - let mut res = Vec::with_capacity(radix_digits.to_usize().unwrap_or(0)); - - let mut digits = u.clone(); - - let (base, power) = get_radix_base(radix, big_digit::HALF_BITS); - let radix = radix as BigDigit; - - while digits.data.len() > 1 { - let (q, mut r) = div_rem_digit(digits, base); - for _ in 0..power { - res.push((r % radix) as u8); - r /= radix; - } - digits = q; - } - - let mut r = digits.data[0]; - while r != 0 { - res.push((r % radix) as u8); - r /= radix; - } +impl Roots for BigUint { + // nth_root, sqrt and cbrt use Newton's method to compute + // principal root of a given degree for a given integer. - res -} + // Reference: + // Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.14 + fn nth_root(&self, n: u32) -> Self { + assert!(n > 0, "root degree n must be at least 1"); -pub(crate) fn to_radix_le(u: &BigUint, radix: u32) -> Vec { - if u.is_zero() { - vec![0] - } else if radix.is_power_of_two() { - // Powers of two can use bitwise masks and shifting instead of division - let bits = ilog2(radix); - if big_digit::BITS % bits == 0 { - to_bitwise_digits_le(u, bits) - } else { - to_inexact_bitwise_digits_le(u, bits) + if self.is_zero() || self.is_one() { + return self.clone(); } - } else if radix == 10 { - // 10 is so common that it's worth separating out for const-propagation. - // Optimizers can often turn constant division into a faster multiplication. - to_radix_digits_le(u, 10) - } else { - to_radix_digits_le(u, radix) - } -} - -pub(crate) fn to_str_radix_reversed(u: &BigUint, radix: u32) -> Vec { - assert!(2 <= radix && radix <= 36, "The radix must be within 2...36"); - if u.is_zero() { - return vec![b'0']; - } - - let mut res = to_radix_le(u, radix); - - // Now convert everything to ASCII digits. - for r in &mut res { - debug_assert!(u32::from(*r) < radix); - if *r < 10 { - *r += b'0'; - } else { - *r += b'a' - 10; + match n { + // Optimize for small n + 1 => return self.clone(), + 2 => return self.sqrt(), + 3 => return self.cbrt(), + _ => (), } - } - res -} -/// An iterator of `u32` digits representation of a `BigUint` or `BigInt`, -/// ordered least significant digit first. -pub struct U32Digits<'a> { - #[cfg(u64_digit)] - data: &'a [u64], - #[cfg(u64_digit)] - next_is_lo: bool, - #[cfg(u64_digit)] - last_hi_is_zero: bool, - - #[cfg(not(u64_digit))] - it: core::slice::Iter<'a, u32>, -} - -#[cfg(u64_digit)] -impl<'a> U32Digits<'a> { - #[inline] - fn new(data: &'a [u64]) -> Self { - let last_hi_is_zero = data - .last() - .map(|&last| { - let last_hi = (last >> 32) as u32; - last_hi == 0 - }) - .unwrap_or(false); - U32Digits { - data, - next_is_lo: true, - last_hi_is_zero, + // The root of non-zero values less than 2ⁿ can only be 1. + let bits = self.bits(); + let n64 = u64::from(n); + if bits <= n64 { + return BigUint::one(); } - } -} -#[cfg(u64_digit)] -impl Iterator for U32Digits<'_> { - type Item = u32; - #[inline] - fn next(&mut self) -> Option { - match self.data.split_first() { - Some((&first, data)) => { - let next_is_lo = self.next_is_lo; - self.next_is_lo = !next_is_lo; - if next_is_lo { - Some(first as u32) - } else { - self.data = data; - if data.is_empty() && self.last_hi_is_zero { - self.last_hi_is_zero = false; - None - } else { - Some((first >> 32) as u32) - } - } - } - None => None, + // If we fit in `u64`, compute the root that way. + if let Some(x) = self.to_u64() { + return x.nth_root(n).into(); } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let len = self.len(); - (len, Some(len)) - } - #[inline] - fn last(self) -> Option { - self.data.last().map(|&last| { - if self.last_hi_is_zero { - last as u32 - } else { - (last >> 32) as u32 - } - }) - } - - #[inline] - fn count(self) -> usize { - self.len() - } -} - -#[cfg(u64_digit)] -impl ExactSizeIterator for U32Digits<'_> { - #[inline] - fn len(&self) -> usize { - self.data.len() * 2 - usize::from(self.last_hi_is_zero) - usize::from(!self.next_is_lo) - } -} - -#[cfg(not(u64_digit))] -impl<'a> U32Digits<'a> { - #[inline] - fn new(data: &'a [u32]) -> Self { - Self { it: data.iter() } - } -} - -#[cfg(not(u64_digit))] -impl Iterator for U32Digits<'_> { - type Item = u32; - #[inline] - fn next(&mut self) -> Option { - self.it.next().cloned() - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } + let max_bits = bits / n64 + 1; - #[inline] - fn nth(&mut self, n: usize) -> Option { - self.it.nth(n).cloned() - } + #[cfg(feature = "std")] + let guess = match self.to_f64() { + Some(f) if f.is_finite() => { + use num_traits::FromPrimitive; - #[inline] - fn last(self) -> Option { - self.it.last().cloned() - } + // We fit in `f64` (lossy), so get a better initial guess from that. + BigUint::from_f64((f.ln() / f64::from(n)).exp()).unwrap() + } + _ => { + // Try to guess by scaling down such that it does fit in `f64`. + // With some (x * 2ⁿᵏ), its nth root ≈ (ⁿ√x * 2ᵏ) + let extra_bits = bits - (core::f64::MAX_EXP as u64 - 1); + let root_scale = extra_bits.div_ceil(&n64); + let scale = root_scale * n64; + if scale < bits && bits - scale > n64 { + (self >> scale).nth_root(n) << root_scale + } else { + BigUint::one() << max_bits + } + } + }; - #[inline] - fn count(self) -> usize { - self.it.count() - } -} + #[cfg(not(feature = "std"))] + let guess = BigUint::one() << max_bits; -#[cfg(not(u64_digit))] -impl ExactSizeIterator for U32Digits<'_> { - #[inline] - fn len(&self) -> usize { - self.it.len() + let n_min_1 = n - 1; + fixpoint(guess, max_bits, move |s| { + let q = self / s.pow(n_min_1); + let t = n_min_1 * s + q; + t / n + }) } -} -impl FusedIterator for U32Digits<'_> {} + // Reference: + // Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.13 + fn sqrt(&self) -> Self { + if self.is_zero() || self.is_one() { + return self.clone(); + } -/// An iterator of `u64` digits representation of a `BigUint` or `BigInt`, -/// ordered least significant digit first. -pub struct U64Digits<'a> { - #[cfg(not(u64_digit))] - it: core::slice::Chunks<'a, u32>, + // If we fit in `u64`, compute the root that way. + if let Some(x) = self.to_u64() { + return x.sqrt().into(); + } - #[cfg(u64_digit)] - it: core::slice::Iter<'a, u64>, -} + let bits = self.bits(); + let max_bits = bits / 2 + 1; -#[cfg(not(u64_digit))] -impl<'a> U64Digits<'a> { - #[inline] - fn new(data: &'a [u32]) -> Self { - U64Digits { it: data.chunks(2) } - } -} + #[cfg(feature = "std")] + let guess = match self.to_f64() { + Some(f) if f.is_finite() => { + use num_traits::FromPrimitive; -#[cfg(not(u64_digit))] -impl Iterator for U64Digits<'_> { - type Item = u64; - #[inline] - fn next(&mut self) -> Option { - self.it.next().map(u32_chunk_to_u64) - } + // We fit in `f64` (lossy), so get a better initial guess from that. + BigUint::from_f64(f.sqrt()).unwrap() + } + _ => { + // Try to guess by scaling down such that it does fit in `f64`. + // With some (x * 2²ᵏ), its sqrt ≈ (√x * 2ᵏ) + let extra_bits = bits - (core::f64::MAX_EXP as u64 - 1); + let root_scale = (extra_bits + 1) / 2; + let scale = root_scale * 2; + (self >> scale).sqrt() << root_scale + } + }; - #[inline] - fn size_hint(&self) -> (usize, Option) { - let len = self.len(); - (len, Some(len)) - } + #[cfg(not(feature = "std"))] + let guess = BigUint::one() << max_bits; - #[inline] - fn last(self) -> Option { - self.it.last().map(u32_chunk_to_u64) + fixpoint(guess, max_bits, move |s| { + let q = self / s; + let t = s + q; + t >> 1 + }) } - #[inline] - fn count(self) -> usize { - self.len() - } -} + fn cbrt(&self) -> Self { + if self.is_zero() || self.is_one() { + return self.clone(); + } -#[cfg(u64_digit)] -impl<'a> U64Digits<'a> { - #[inline] - fn new(data: &'a [u64]) -> Self { - Self { it: data.iter() } - } -} + // If we fit in `u64`, compute the root that way. + if let Some(x) = self.to_u64() { + return x.cbrt().into(); + } -#[cfg(u64_digit)] -impl Iterator for U64Digits<'_> { - type Item = u64; - #[inline] - fn next(&mut self) -> Option { - self.it.next().cloned() - } + let bits = self.bits(); + let max_bits = bits / 3 + 1; - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } + #[cfg(feature = "std")] + let guess = match self.to_f64() { + Some(f) if f.is_finite() => { + use num_traits::FromPrimitive; - #[inline] - fn nth(&mut self, n: usize) -> Option { - self.it.nth(n).cloned() - } + // We fit in `f64` (lossy), so get a better initial guess from that. + BigUint::from_f64(f.cbrt()).unwrap() + } + _ => { + // Try to guess by scaling down such that it does fit in `f64`. + // With some (x * 2³ᵏ), its cbrt ≈ (∛x * 2ᵏ) + let extra_bits = bits - (core::f64::MAX_EXP as u64 - 1); + let root_scale = (extra_bits + 2) / 3; + let scale = root_scale * 3; + (self >> scale).cbrt() << root_scale + } + }; - #[inline] - fn last(self) -> Option { - self.it.last().cloned() - } + #[cfg(not(feature = "std"))] + let guess = BigUint::one() << max_bits; - #[inline] - fn count(self) -> usize { - self.it.count() + fixpoint(guess, max_bits, move |s| { + let q = self / (s * s); + let t = (s << 1) + q; + t / 3u32 + }) } } -impl ExactSizeIterator for U64Digits<'_> { - #[inline] - fn len(&self) -> usize { - self.it.len() - } +/// A generic trait for converting a value to a `BigUint`. +pub trait ToBigUint { + /// Converts the value of `self` to a `BigUint`. + fn to_biguint(&self) -> Option; } -impl FusedIterator for U64Digits<'_> {} - /// Creates and initializes a `BigUint`. /// /// The digits are in little-endian base matching `BigDigit`. @@ -2604,7 +593,7 @@ impl BigUint { if bytes.is_empty() { Zero::zero() } else { - from_bitwise_digits_le(bytes, 8) + convert::from_bitwise_digits_le(bytes, 8) } } @@ -2647,34 +636,7 @@ impl BigUint { /// assert_eq!(a.to_radix_be(190), inbase190); /// ``` pub fn from_radix_be(buf: &[u8], radix: u32) -> Option { - assert!( - 2 <= radix && radix <= 256, - "The radix must be within 2...256" - ); - - if buf.is_empty() { - return Some(Zero::zero()); - } - - if radix != 256 && buf.iter().any(|&b| b >= radix as u8) { - return None; - } - - let res = if radix.is_power_of_two() { - // Powers of two can use bitwise masks and shifting instead of multiplication - let bits = ilog2(radix); - let mut v = Vec::from(buf); - v.reverse(); - if big_digit::BITS % bits == 0 { - from_bitwise_digits_le(&v, bits) - } else { - from_inexact_bitwise_digits_le(&v, bits) - } - } else { - from_radix_digits_be(buf, radix) - }; - - Some(res) + convert::from_radix_be(buf, radix) } /// Creates and initializes a `BigUint`. Each u8 of the input slice is @@ -2694,34 +656,7 @@ impl BigUint { /// assert_eq!(a.to_radix_be(190), inbase190); /// ``` pub fn from_radix_le(buf: &[u8], radix: u32) -> Option { - assert!( - 2 <= radix && radix <= 256, - "The radix must be within 2...256" - ); - - if buf.is_empty() { - return Some(Zero::zero()); - } - - if radix != 256 && buf.iter().any(|&b| b >= radix as u8) { - return None; - } - - let res = if radix.is_power_of_two() { - // Powers of two can use bitwise masks and shifting instead of multiplication - let bits = ilog2(radix); - if big_digit::BITS % bits == 0 { - from_bitwise_digits_le(buf, bits) - } else { - from_inexact_bitwise_digits_le(buf, bits) - } - } else { - let mut v = Vec::from(buf); - v.reverse(); - from_radix_digits_be(&v, radix) - }; - - Some(res) + convert::from_radix_le(buf, radix) } /// Returns the byte representation of the `BigUint` in big-endian byte order. @@ -2756,7 +691,7 @@ impl BigUint { if self.is_zero() { vec![0] } else { - to_bitwise_digits_le(self, 8) + convert::to_bitwise_digits_le(self, 8) } } @@ -2868,7 +803,7 @@ impl BigUint { /// ``` #[inline] pub fn to_radix_be(&self, radix: u32) -> Vec { - let mut v = to_radix_le(self, radix); + let mut v = convert::to_radix_le(self, radix); v.reverse(); v } @@ -2889,7 +824,7 @@ impl BigUint { /// ``` #[inline] pub fn to_radix_le(&self, radix: u32) -> Vec { - to_radix_le(self, radix) + convert::to_radix_le(self, radix) } /// Determines the fewest bits necessary to express the `BigUint`. @@ -2930,18 +865,7 @@ impl BigUint { /// /// Panics if the modulus is zero. pub fn modpow(&self, exponent: &Self, modulus: &Self) -> Self { - assert!( - !modulus.is_zero(), - "attempt to calculate with zero modulus!" - ); - - if modulus.is_odd() { - // For an odd modulus, we can use Montgomery multiplication in base 2^32. - monty_modpow(self, exponent, modulus) - } else { - // Otherwise do basically the same as `num::pow`, but with a modulus. - plain_modpow(self, &exponent.data, modulus) - } + power::modpow(self, exponent, modulus) } /// Returns the truncated principal square root of `self` -- @@ -3026,111 +950,6 @@ impl BigUint { } } -fn plain_modpow(base: &BigUint, exp_data: &[BigDigit], modulus: &BigUint) -> BigUint { - assert!( - !modulus.is_zero(), - "attempt to calculate with zero modulus!" - ); - - let i = match exp_data.iter().position(|&r| r != 0) { - None => return BigUint::one(), - Some(i) => i, - }; - - let mut base = base % modulus; - for _ in 0..i { - for _ in 0..big_digit::BITS { - base = &base * &base % modulus; - } - } - - let mut r = exp_data[i]; - let mut b = 0u8; - while r.is_even() { - base = &base * &base % modulus; - r >>= 1; - b += 1; - } - - let mut exp_iter = exp_data[i + 1..].iter(); - if exp_iter.len() == 0 && r.is_one() { - return base; - } - - let mut acc = base.clone(); - r >>= 1; - b += 1; - - { - let mut unit = |exp_is_odd| { - base = &base * &base % modulus; - if exp_is_odd { - acc = &acc * &base % modulus; - } - }; - - if let Some(&last) = exp_iter.next_back() { - // consume exp_data[i] - for _ in b..big_digit::BITS { - unit(r.is_odd()); - r >>= 1; - } - - // consume all other digits before the last - for &r in exp_iter { - let mut r = r; - for _ in 0..big_digit::BITS { - unit(r.is_odd()); - r >>= 1; - } - } - r = last; - } - - debug_assert_ne!(r, 0); - while !r.is_zero() { - unit(r.is_odd()); - r >>= 1; - } - } - acc -} - -#[test] -fn test_plain_modpow() { - let two = &BigUint::from(2u32); - let modulus = BigUint::from(0x1100u32); - - let exp = vec![0, 0b1]; - assert_eq!( - two.pow(0b1_00000000_u32) % &modulus, - plain_modpow(&two, &exp, &modulus) - ); - let exp = vec![0, 0b10]; - assert_eq!( - two.pow(0b10_00000000_u32) % &modulus, - plain_modpow(&two, &exp, &modulus) - ); - let exp = vec![0, 0b110010]; - assert_eq!( - two.pow(0b110010_00000000_u32) % &modulus, - plain_modpow(&two, &exp, &modulus) - ); - let exp = vec![0b1, 0b1]; - assert_eq!( - two.pow(0b1_00000001_u32) % &modulus, - plain_modpow(&two, &exp, &modulus) - ); - let exp = vec![0b1100, 0, 0b1]; - assert_eq!( - two.pow(0b1_00000000_00001100_u32) % &modulus, - plain_modpow(&two, &exp, &modulus) - ); -} - -impl_sum_iter_type!(BigUint); -impl_product_iter_type!(BigUint); - pub(crate) trait IntDigits { fn digits(&self) -> &[BigDigit]; fn digits_mut(&mut self) -> &mut Vec; @@ -3162,6 +981,17 @@ impl IntDigits for BigUint { } } +/// Convert a u32 chunk (len is either 1 or 2) to a single u64 digit +#[inline] +fn u32_chunk_to_u64(chunk: &[u32]) -> u64 { + // raw could have odd length + let mut digit = chunk[0] as u64; + if let Some(&hi) = chunk.get(1) { + digit |= (hi as u64) << 32; + } + digit +} + /// Combine four `u32`s into a single `u128`. #[cfg(any(test, not(u64_digit)))] #[inline] @@ -3181,138 +1011,6 @@ fn u32_from_u128(n: u128) -> (u32, u32, u32, u32) { ) } -#[cfg(feature = "serde")] -impl serde::Serialize for BigUint { - #[cfg(not(u64_digit))] - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - // Note: do not change the serialization format, or it may break forward - // and backward compatibility of serialized data! If we ever change the - // internal representation, we should still serialize in base-`u32`. - let data: &[u32] = &self.data; - data.serialize(serializer) - } - - #[cfg(u64_digit)] - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - use serde::ser::SerializeSeq; - if let Some((&last, data)) = self.data.split_last() { - let last_lo = last as u32; - let last_hi = (last >> 32) as u32; - let u32_len = data.len() * 2 + 1 + (last_hi != 0) as usize; - let mut seq = serializer.serialize_seq(Some(u32_len))?; - for &x in data { - seq.serialize_element(&(x as u32))?; - seq.serialize_element(&((x >> 32) as u32))?; - } - seq.serialize_element(&last_lo)?; - if last_hi != 0 { - seq.serialize_element(&last_hi)?; - } - seq.end() - } else { - let data: &[u32] = &[]; - data.serialize(serializer) - } - } -} - -#[cfg(feature = "serde")] -impl<'de> serde::Deserialize<'de> for BigUint { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - use serde::de::{SeqAccess, Visitor}; - - struct U32Visitor; - - impl<'de> Visitor<'de> for U32Visitor { - type Value = BigUint; - - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str("a sequence of unsigned 32-bit numbers") - } - - #[cfg(not(u64_digit))] - fn visit_seq(self, mut seq: S) -> Result - where - S: SeqAccess<'de>, - { - let len = seq.size_hint().unwrap_or(0); - let mut data = Vec::with_capacity(len); - - while let Some(value) = seq.next_element::()? { - data.push(value); - } - - Ok(biguint_from_vec(data)) - } - - #[cfg(u64_digit)] - fn visit_seq(self, mut seq: S) -> Result - where - S: SeqAccess<'de>, - { - let u32_len = seq.size_hint().unwrap_or(0); - let len = u32_len.div_ceil(&2); - let mut data = Vec::with_capacity(len); - - while let Some(lo) = seq.next_element::()? { - let mut value = BigDigit::from(lo); - if let Some(hi) = seq.next_element::()? { - value |= BigDigit::from(hi) << 32; - data.push(value); - } else { - data.push(value); - break; - } - } - - Ok(biguint_from_vec(data)) - } - } - - deserializer.deserialize_seq(U32Visitor) - } -} - -/// Returns the greatest power of the radix for the given bit size -#[inline] -fn get_radix_base(radix: u32, bits: u8) -> (BigDigit, usize) { - mod gen { - include! { concat!(env!("OUT_DIR"), "/radix_bases.rs") } - } - - debug_assert!( - 2 <= radix && radix <= 256, - "The radix must be within 2...256" - ); - debug_assert!(!radix.is_power_of_two()); - debug_assert!(bits <= big_digit::BITS); - - match bits { - 16 => { - let (base, power) = gen::BASES_16[radix as usize]; - (base as BigDigit, power) - } - 32 => { - let (base, power) = gen::BASES_32[radix as usize]; - (base as BigDigit, power) - } - 64 => { - let (base, power) = gen::BASES_64[radix as usize]; - (base as BigDigit, power) - } - _ => panic!("Invalid bigdigit size"), - } -} - #[cfg(not(u64_digit))] #[test] fn test_from_slice() { @@ -3397,53 +1095,3 @@ fn test_u128_u32_roundtrip() { assert_eq!(u32_to_u128(a, b, c, d), *val); } } - -#[test] -fn test_pow_biguint() { - let base = BigUint::from(5u8); - let exponent = BigUint::from(3u8); - - assert_eq!(BigUint::from(125u8), base.pow(exponent)); -} - -#[test] -fn test_iter_u32_digits() { - let n = BigUint::from(5u8); - let mut it = n.iter_u32_digits(); - assert_eq!(it.len(), 1); - assert_eq!(it.next(), Some(5)); - assert_eq!(it.len(), 0); - assert_eq!(it.next(), None); - assert_eq!(it.len(), 0); - assert_eq!(it.next(), None); - - let n = BigUint::from(112500000000u64); - let mut it = n.iter_u32_digits(); - assert_eq!(it.len(), 2); - assert_eq!(it.next(), Some(830850304)); - assert_eq!(it.len(), 1); - assert_eq!(it.next(), Some(26)); - assert_eq!(it.len(), 0); - assert_eq!(it.next(), None); -} - -#[test] -fn test_iter_u64_digits() { - let n = BigUint::from(5u8); - let mut it = n.iter_u64_digits(); - assert_eq!(it.len(), 1); - assert_eq!(it.next(), Some(5)); - assert_eq!(it.len(), 0); - assert_eq!(it.next(), None); - assert_eq!(it.len(), 0); - assert_eq!(it.next(), None); - - let n = BigUint::from(18_446_744_073_709_551_616u128); - let mut it = n.iter_u64_digits(); - assert_eq!(it.len(), 2); - assert_eq!(it.next(), Some(0)); - assert_eq!(it.len(), 1); - assert_eq!(it.next(), Some(1)); - assert_eq!(it.len(), 0); - assert_eq!(it.next(), None); -} diff --git a/src/biguint/addition.rs b/src/biguint/addition.rs new file mode 100644 index 00000000..e54f8cb1 --- /dev/null +++ b/src/biguint/addition.rs @@ -0,0 +1,254 @@ +#[cfg(not(u64_digit))] +use super::u32_from_u128; +use super::{BigUint, IntDigits}; + +use crate::big_digit::{self, BigDigit}; +use crate::UsizePromotion; + +use core::iter::Sum; +use core::ops::{Add, AddAssign}; +use num_traits::{CheckedAdd, Zero}; + +#[cfg(all(use_addcarry, target_arch = "x86_64"))] +use core::arch::x86_64 as arch; + +#[cfg(all(use_addcarry, target_arch = "x86"))] +use core::arch::x86 as arch; + +// Add with carry: +#[cfg(all(use_addcarry, u64_digit))] +#[inline] +fn adc(carry: u8, a: u64, b: u64, out: &mut u64) -> u8 { + // Safety: There are absolutely no safety concerns with calling `_addcarry_u64`. + // It's just unsafe for API consistency with other intrinsics. + unsafe { arch::_addcarry_u64(carry, a, b, out) } +} + +#[cfg(all(use_addcarry, not(u64_digit)))] +#[inline] +fn adc(carry: u8, a: u32, b: u32, out: &mut u32) -> u8 { + // Safety: There are absolutely no safety concerns with calling `_addcarry_u32`. + // It's just unsafe for API consistency with other intrinsics. + unsafe { arch::_addcarry_u32(carry, a, b, out) } +} + +// fallback for environments where we don't have an addcarry intrinsic +#[cfg(not(use_addcarry))] +#[inline] +fn adc(carry: u8, a: BigDigit, b: BigDigit, out: &mut BigDigit) -> u8 { + use crate::big_digit::DoubleBigDigit; + + let sum = DoubleBigDigit::from(a) + DoubleBigDigit::from(b) + DoubleBigDigit::from(carry); + *out = sum as BigDigit; + (sum >> big_digit::BITS) as u8 +} + +/// Two argument addition of raw slices, `a += b`, returning the carry. +/// +/// This is used when the data `Vec` might need to resize to push a non-zero carry, so we perform +/// the addition first hoping that it will fit. +/// +/// The caller _must_ ensure that `a` is at least as long as `b`. +#[inline] +pub(super) fn __add2(a: &mut [BigDigit], b: &[BigDigit]) -> BigDigit { + debug_assert!(a.len() >= b.len()); + + let mut carry = 0; + let (a_lo, a_hi) = a.split_at_mut(b.len()); + + for (a, b) in a_lo.iter_mut().zip(b) { + carry = adc(carry, *a, *b, a); + } + + if carry != 0 { + for a in a_hi { + carry = adc(carry, *a, 0, a); + if carry == 0 { + break; + } + } + } + + carry as BigDigit +} + +/// Two argument addition of raw slices: +/// a += b +/// +/// The caller _must_ ensure that a is big enough to store the result - typically this means +/// resizing a to max(a.len(), b.len()) + 1, to fit a possible carry. +pub(super) fn add2(a: &mut [BigDigit], b: &[BigDigit]) { + let carry = __add2(a, b); + + debug_assert!(carry == 0); +} + +forward_all_binop_to_val_ref_commutative!(impl Add for BigUint, add); +forward_val_assign!(impl AddAssign for BigUint, add_assign); + +impl<'a> Add<&'a BigUint> for BigUint { + type Output = BigUint; + + fn add(mut self, other: &BigUint) -> BigUint { + self += other; + self + } +} +impl<'a> AddAssign<&'a BigUint> for BigUint { + #[inline] + fn add_assign(&mut self, other: &BigUint) { + let self_len = self.data.len(); + let carry = if self_len < other.data.len() { + let lo_carry = __add2(&mut self.data[..], &other.data[..self_len]); + self.data.extend_from_slice(&other.data[self_len..]); + __add2(&mut self.data[self_len..], &[lo_carry]) + } else { + __add2(&mut self.data[..], &other.data[..]) + }; + if carry != 0 { + self.data.push(carry); + } + } +} + +promote_unsigned_scalars!(impl Add for BigUint, add); +promote_unsigned_scalars_assign!(impl AddAssign for BigUint, add_assign); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigUint, add); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigUint, add); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigUint, add); + +impl Add for BigUint { + type Output = BigUint; + + #[inline] + fn add(mut self, other: u32) -> BigUint { + self += other; + self + } +} + +impl AddAssign for BigUint { + #[inline] + fn add_assign(&mut self, other: u32) { + if other != 0 { + if self.data.is_empty() { + self.data.push(0); + } + + let carry = __add2(&mut self.data, &[other as BigDigit]); + if carry != 0 { + self.data.push(carry); + } + } + } +} + +impl Add for BigUint { + type Output = BigUint; + + #[inline] + fn add(mut self, other: u64) -> BigUint { + self += other; + self + } +} + +impl AddAssign for BigUint { + #[cfg(not(u64_digit))] + #[inline] + fn add_assign(&mut self, other: u64) { + let (hi, lo) = big_digit::from_doublebigdigit(other); + if hi == 0 { + *self += lo; + } else { + while self.data.len() < 2 { + self.data.push(0); + } + + let carry = __add2(&mut self.data, &[lo, hi]); + if carry != 0 { + self.data.push(carry); + } + } + } + + #[cfg(u64_digit)] + #[inline] + fn add_assign(&mut self, other: u64) { + if other != 0 { + if self.data.is_empty() { + self.data.push(0); + } + + let carry = __add2(&mut self.data, &[other as BigDigit]); + if carry != 0 { + self.data.push(carry); + } + } + } +} + +impl Add for BigUint { + type Output = BigUint; + + #[inline] + fn add(mut self, other: u128) -> BigUint { + self += other; + self + } +} + +impl AddAssign for BigUint { + #[cfg(not(u64_digit))] + #[inline] + fn add_assign(&mut self, other: u128) { + if other <= u128::from(u64::max_value()) { + *self += other as u64 + } else { + let (a, b, c, d) = u32_from_u128(other); + let carry = if a > 0 { + while self.data.len() < 4 { + self.data.push(0); + } + __add2(&mut self.data, &[d, c, b, a]) + } else { + debug_assert!(b > 0); + while self.data.len() < 3 { + self.data.push(0); + } + __add2(&mut self.data, &[d, c, b]) + }; + + if carry != 0 { + self.data.push(carry); + } + } + } + + #[cfg(u64_digit)] + #[inline] + fn add_assign(&mut self, other: u128) { + let (hi, lo) = big_digit::from_doublebigdigit(other); + if hi == 0 { + *self += lo; + } else { + while self.data.len() < 2 { + self.data.push(0); + } + + let carry = __add2(&mut self.data, &[lo, hi]); + if carry != 0 { + self.data.push(carry); + } + } + } +} + +impl CheckedAdd for BigUint { + #[inline] + fn checked_add(&self, v: &BigUint) -> Option { + Some(self.add(v)) + } +} + +impl_sum_iter_type!(BigUint); diff --git a/src/biguint/arbitrary.rs b/src/biguint/arbitrary.rs new file mode 100644 index 00000000..8a2eb9e9 --- /dev/null +++ b/src/biguint/arbitrary.rs @@ -0,0 +1,28 @@ +use super::{biguint_from_vec, BigUint}; + +use crate::big_digit::BigDigit; +use crate::std_alloc::{Box, Vec}; + +#[cfg(feature = "quickcheck")] +impl quickcheck::Arbitrary for BigUint { + fn arbitrary(g: &mut G) -> Self { + // Use arbitrary from Vec + biguint_from_vec(Vec::::arbitrary(g)) + } + + fn shrink(&self) -> Box> { + // Use shrinker from Vec + Box::new(self.data.shrink().map(biguint_from_vec)) + } +} + +#[cfg(feature = "arbitrary")] +impl arbitrary::Arbitrary for BigUint { + fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { + Ok(biguint_from_vec(Vec::::arbitrary(u)?)) + } + + fn shrink(&self) -> Box> { + Box::new(self.data.shrink().map(biguint_from_vec)) + } +} diff --git a/src/biguint/bits.rs b/src/biguint/bits.rs new file mode 100644 index 00000000..58c755a6 --- /dev/null +++ b/src/biguint/bits.rs @@ -0,0 +1,93 @@ +use super::{BigUint, IntDigits}; + +use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign}; + +forward_val_val_binop!(impl BitAnd for BigUint, bitand); +forward_ref_val_binop!(impl BitAnd for BigUint, bitand); + +// do not use forward_ref_ref_binop_commutative! for bitand so that we can +// clone the smaller value rather than the larger, avoiding over-allocation +impl<'a, 'b> BitAnd<&'b BigUint> for &'a BigUint { + type Output = BigUint; + + #[inline] + fn bitand(self, other: &BigUint) -> BigUint { + // forward to val-ref, choosing the smaller to clone + if self.data.len() <= other.data.len() { + self.clone() & other + } else { + other.clone() & self + } + } +} + +forward_val_assign!(impl BitAndAssign for BigUint, bitand_assign); + +impl<'a> BitAnd<&'a BigUint> for BigUint { + type Output = BigUint; + + #[inline] + fn bitand(mut self, other: &BigUint) -> BigUint { + self &= other; + self + } +} +impl<'a> BitAndAssign<&'a BigUint> for BigUint { + #[inline] + fn bitand_assign(&mut self, other: &BigUint) { + for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) { + *ai &= bi; + } + self.data.truncate(other.data.len()); + self.normalize(); + } +} + +forward_all_binop_to_val_ref_commutative!(impl BitOr for BigUint, bitor); +forward_val_assign!(impl BitOrAssign for BigUint, bitor_assign); + +impl<'a> BitOr<&'a BigUint> for BigUint { + type Output = BigUint; + + fn bitor(mut self, other: &BigUint) -> BigUint { + self |= other; + self + } +} +impl<'a> BitOrAssign<&'a BigUint> for BigUint { + #[inline] + fn bitor_assign(&mut self, other: &BigUint) { + for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) { + *ai |= bi; + } + if other.data.len() > self.data.len() { + let extra = &other.data[self.data.len()..]; + self.data.extend(extra.iter().cloned()); + } + } +} + +forward_all_binop_to_val_ref_commutative!(impl BitXor for BigUint, bitxor); +forward_val_assign!(impl BitXorAssign for BigUint, bitxor_assign); + +impl<'a> BitXor<&'a BigUint> for BigUint { + type Output = BigUint; + + fn bitxor(mut self, other: &BigUint) -> BigUint { + self ^= other; + self + } +} +impl<'a> BitXorAssign<&'a BigUint> for BigUint { + #[inline] + fn bitxor_assign(&mut self, other: &BigUint) { + for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) { + *ai ^= bi; + } + if other.data.len() > self.data.len() { + let extra = &other.data[self.data.len()..]; + self.data.extend(extra.iter().cloned()); + } + self.normalize(); + } +} diff --git a/src/biguint/convert.rs b/src/biguint/convert.rs new file mode 100644 index 00000000..278ec782 --- /dev/null +++ b/src/biguint/convert.rs @@ -0,0 +1,756 @@ +use super::{biguint_from_vec, BigUint, ToBigUint}; + +use super::addition::add2; +use super::division::div_rem_digit; +use super::multiplication::mac_with_carry; + +use crate::big_digit::{self, BigDigit}; +use crate::std_alloc::Vec; +use crate::ParseBigIntError; +#[cfg(has_try_from)] +use crate::TryFromBigIntError; + +use core::cmp::Ordering::{Equal, Greater, Less}; +#[cfg(has_try_from)] +use core::convert::TryFrom; +use core::mem; +use core::str::FromStr; +use num_integer::Integer; +use num_traits::float::FloatCore; +use num_traits::{FromPrimitive, Num, PrimInt, ToPrimitive, Zero}; + +/// Find last set bit +/// fls(0) == 0, fls(u32::MAX) == 32 +fn fls(v: T) -> u8 { + mem::size_of::() as u8 * 8 - v.leading_zeros() as u8 +} + +fn ilog2(v: T) -> u8 { + fls(v) - 1 +} + +impl FromStr for BigUint { + type Err = ParseBigIntError; + + #[inline] + fn from_str(s: &str) -> Result { + BigUint::from_str_radix(s, 10) + } +} + +// Convert from a power of two radix (bits == ilog2(radix)) where bits evenly divides +// BigDigit::BITS +pub(super) fn from_bitwise_digits_le(v: &[u8], bits: u8) -> BigUint { + debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits == 0); + debug_assert!(v.iter().all(|&c| BigDigit::from(c) < (1 << bits))); + + let digits_per_big_digit = big_digit::BITS / bits; + + let data = v + .chunks(digits_per_big_digit.into()) + .map(|chunk| { + chunk + .iter() + .rev() + .fold(0, |acc, &c| (acc << bits) | BigDigit::from(c)) + }) + .collect(); + + biguint_from_vec(data) +} + +// Convert from a power of two radix (bits == ilog2(radix)) where bits doesn't evenly divide +// BigDigit::BITS +fn from_inexact_bitwise_digits_le(v: &[u8], bits: u8) -> BigUint { + debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits != 0); + debug_assert!(v.iter().all(|&c| BigDigit::from(c) < (1 << bits))); + + let big_digits = (v.len() as u64) + .saturating_mul(bits.into()) + .div_ceil(&big_digit::BITS.into()) + .to_usize() + .unwrap_or(core::usize::MAX); + let mut data = Vec::with_capacity(big_digits); + + let mut d = 0; + let mut dbits = 0; // number of bits we currently have in d + + // walk v accumululating bits in d; whenever we accumulate big_digit::BITS in d, spit out a + // big_digit: + for &c in v { + d |= BigDigit::from(c) << dbits; + dbits += bits; + + if dbits >= big_digit::BITS { + data.push(d); + dbits -= big_digit::BITS; + // if dbits was > big_digit::BITS, we dropped some of the bits in c (they couldn't fit + // in d) - grab the bits we lost here: + d = BigDigit::from(c) >> (bits - dbits); + } + } + + if dbits > 0 { + debug_assert!(dbits < big_digit::BITS); + data.push(d as BigDigit); + } + + biguint_from_vec(data) +} + +// Read little-endian radix digits +fn from_radix_digits_be(v: &[u8], radix: u32) -> BigUint { + debug_assert!(!v.is_empty() && !radix.is_power_of_two()); + debug_assert!(v.iter().all(|&c| u32::from(c) < radix)); + + #[cfg(feature = "std")] + let radix_log2 = f64::from(radix).log2(); + #[cfg(not(feature = "std"))] + let radix_log2 = ilog2(radix.next_power_of_two()) as f64; + + // Estimate how big the result will be, so we can pre-allocate it. + let bits = radix_log2 * v.len() as f64; + let big_digits = (bits / big_digit::BITS as f64).ceil(); + let mut data = Vec::with_capacity(big_digits.to_usize().unwrap_or(0)); + + let (base, power) = get_radix_base(radix, big_digit::BITS); + let radix = radix as BigDigit; + + let r = v.len() % power; + let i = if r == 0 { power } else { r }; + let (head, tail) = v.split_at(i); + + let first = head + .iter() + .fold(0, |acc, &d| acc * radix + BigDigit::from(d)); + data.push(first); + + debug_assert!(tail.len() % power == 0); + for chunk in tail.chunks(power) { + if data.last() != Some(&0) { + data.push(0); + } + + let mut carry = 0; + for d in data.iter_mut() { + *d = mac_with_carry(0, *d, base, &mut carry); + } + debug_assert!(carry == 0); + + let n = chunk + .iter() + .fold(0, |acc, &d| acc * radix + BigDigit::from(d)); + add2(&mut data, &[n]); + } + + biguint_from_vec(data) +} + +pub(super) fn from_radix_be(buf: &[u8], radix: u32) -> Option { + assert!( + 2 <= radix && radix <= 256, + "The radix must be within 2...256" + ); + + if buf.is_empty() { + return Some(Zero::zero()); + } + + if radix != 256 && buf.iter().any(|&b| b >= radix as u8) { + return None; + } + + let res = if radix.is_power_of_two() { + // Powers of two can use bitwise masks and shifting instead of multiplication + let bits = ilog2(radix); + let mut v = Vec::from(buf); + v.reverse(); + if big_digit::BITS % bits == 0 { + from_bitwise_digits_le(&v, bits) + } else { + from_inexact_bitwise_digits_le(&v, bits) + } + } else { + from_radix_digits_be(buf, radix) + }; + + Some(res) +} + +pub(super) fn from_radix_le(buf: &[u8], radix: u32) -> Option { + assert!( + 2 <= radix && radix <= 256, + "The radix must be within 2...256" + ); + + if buf.is_empty() { + return Some(Zero::zero()); + } + + if radix != 256 && buf.iter().any(|&b| b >= radix as u8) { + return None; + } + + let res = if radix.is_power_of_two() { + // Powers of two can use bitwise masks and shifting instead of multiplication + let bits = ilog2(radix); + if big_digit::BITS % bits == 0 { + from_bitwise_digits_le(buf, bits) + } else { + from_inexact_bitwise_digits_le(buf, bits) + } + } else { + let mut v = Vec::from(buf); + v.reverse(); + from_radix_digits_be(&v, radix) + }; + + Some(res) +} + +impl Num for BigUint { + type FromStrRadixErr = ParseBigIntError; + + /// Creates and initializes a `BigUint`. + fn from_str_radix(s: &str, radix: u32) -> Result { + assert!(2 <= radix && radix <= 36, "The radix must be within 2...36"); + let mut s = s; + if s.starts_with('+') { + let tail = &s[1..]; + if !tail.starts_with('+') { + s = tail + } + } + + if s.is_empty() { + return Err(ParseBigIntError::empty()); + } + + if s.starts_with('_') { + // Must lead with a real digit! + return Err(ParseBigIntError::invalid()); + } + + // First normalize all characters to plain digit values + let mut v = Vec::with_capacity(s.len()); + for b in s.bytes() { + let d = match b { + b'0'..=b'9' => b - b'0', + b'a'..=b'z' => b - b'a' + 10, + b'A'..=b'Z' => b - b'A' + 10, + b'_' => continue, + _ => core::u8::MAX, + }; + if d < radix as u8 { + v.push(d); + } else { + return Err(ParseBigIntError::invalid()); + } + } + + let res = if radix.is_power_of_two() { + // Powers of two can use bitwise masks and shifting instead of multiplication + let bits = ilog2(radix); + v.reverse(); + if big_digit::BITS % bits == 0 { + from_bitwise_digits_le(&v, bits) + } else { + from_inexact_bitwise_digits_le(&v, bits) + } + } else { + from_radix_digits_be(&v, radix) + }; + Ok(res) + } +} + +fn high_bits_to_u64(v: &BigUint) -> u64 { + match v.data.len() { + 0 => 0, + 1 => { + // XXX Conversion is useless if already 64-bit. + #[allow(clippy::useless_conversion)] + let v0 = u64::from(v.data[0]); + v0 + } + _ => { + let mut bits = v.bits(); + let mut ret = 0u64; + let mut ret_bits = 0; + + for d in v.data.iter().rev() { + let digit_bits = (bits - 1) % u64::from(big_digit::BITS) + 1; + let bits_want = Ord::min(64 - ret_bits, digit_bits); + + if bits_want != 64 { + ret <<= bits_want; + } + // XXX Conversion is useless if already 64-bit. + #[allow(clippy::useless_conversion)] + let d0 = u64::from(*d) >> (digit_bits - bits_want); + ret |= d0; + ret_bits += bits_want; + bits -= bits_want; + + if ret_bits == 64 { + break; + } + } + + ret + } + } +} + +impl ToPrimitive for BigUint { + #[inline] + fn to_i64(&self) -> Option { + self.to_u64().as_ref().and_then(u64::to_i64) + } + + #[inline] + fn to_i128(&self) -> Option { + self.to_u128().as_ref().and_then(u128::to_i128) + } + + #[allow(clippy::useless_conversion)] + #[inline] + fn to_u64(&self) -> Option { + let mut ret: u64 = 0; + let mut bits = 0; + + for i in self.data.iter() { + if bits >= 64 { + return None; + } + + // XXX Conversion is useless if already 64-bit. + ret += u64::from(*i) << bits; + bits += big_digit::BITS; + } + + Some(ret) + } + + #[inline] + fn to_u128(&self) -> Option { + let mut ret: u128 = 0; + let mut bits = 0; + + for i in self.data.iter() { + if bits >= 128 { + return None; + } + + ret |= u128::from(*i) << bits; + bits += big_digit::BITS; + } + + Some(ret) + } + + #[inline] + fn to_f32(&self) -> Option { + let mantissa = high_bits_to_u64(self); + let exponent = self.bits() - u64::from(fls(mantissa)); + + if exponent > core::f32::MAX_EXP as u64 { + Some(core::f32::INFINITY) + } else { + Some((mantissa as f32) * 2.0f32.powi(exponent as i32)) + } + } + + #[inline] + fn to_f64(&self) -> Option { + let mantissa = high_bits_to_u64(self); + let exponent = self.bits() - u64::from(fls(mantissa)); + + if exponent > core::f64::MAX_EXP as u64 { + Some(core::f64::INFINITY) + } else { + Some((mantissa as f64) * 2.0f64.powi(exponent as i32)) + } + } +} + +macro_rules! impl_try_from_biguint { + ($T:ty, $to_ty:path) => { + #[cfg(has_try_from)] + impl TryFrom<&BigUint> for $T { + type Error = TryFromBigIntError<()>; + + #[inline] + fn try_from(value: &BigUint) -> Result<$T, TryFromBigIntError<()>> { + $to_ty(value).ok_or(TryFromBigIntError::new(())) + } + } + + #[cfg(has_try_from)] + impl TryFrom for $T { + type Error = TryFromBigIntError; + + #[inline] + fn try_from(value: BigUint) -> Result<$T, TryFromBigIntError> { + <$T>::try_from(&value).map_err(|_| TryFromBigIntError::new(value)) + } + } + }; +} + +impl_try_from_biguint!(u8, ToPrimitive::to_u8); +impl_try_from_biguint!(u16, ToPrimitive::to_u16); +impl_try_from_biguint!(u32, ToPrimitive::to_u32); +impl_try_from_biguint!(u64, ToPrimitive::to_u64); +impl_try_from_biguint!(usize, ToPrimitive::to_usize); +impl_try_from_biguint!(u128, ToPrimitive::to_u128); + +impl_try_from_biguint!(i8, ToPrimitive::to_i8); +impl_try_from_biguint!(i16, ToPrimitive::to_i16); +impl_try_from_biguint!(i32, ToPrimitive::to_i32); +impl_try_from_biguint!(i64, ToPrimitive::to_i64); +impl_try_from_biguint!(isize, ToPrimitive::to_isize); +impl_try_from_biguint!(i128, ToPrimitive::to_i128); + +impl FromPrimitive for BigUint { + #[inline] + fn from_i64(n: i64) -> Option { + if n >= 0 { + Some(BigUint::from(n as u64)) + } else { + None + } + } + + #[inline] + fn from_i128(n: i128) -> Option { + if n >= 0 { + Some(BigUint::from(n as u128)) + } else { + None + } + } + + #[inline] + fn from_u64(n: u64) -> Option { + Some(BigUint::from(n)) + } + + #[inline] + fn from_u128(n: u128) -> Option { + Some(BigUint::from(n)) + } + + #[inline] + fn from_f64(mut n: f64) -> Option { + // handle NAN, INFINITY, NEG_INFINITY + if !n.is_finite() { + return None; + } + + // match the rounding of casting from float to int + n = n.trunc(); + + // handle 0.x, -0.x + if n.is_zero() { + return Some(BigUint::zero()); + } + + let (mantissa, exponent, sign) = FloatCore::integer_decode(n); + + if sign == -1 { + return None; + } + + let mut ret = BigUint::from(mantissa); + match exponent.cmp(&0) { + Greater => ret <<= exponent as usize, + Equal => {} + Less => ret >>= (-exponent) as usize, + } + Some(ret) + } +} + +impl From for BigUint { + #[inline] + fn from(mut n: u64) -> Self { + let mut ret: BigUint = Zero::zero(); + + while n != 0 { + ret.data.push(n as BigDigit); + // don't overflow if BITS is 64: + n = (n >> 1) >> (big_digit::BITS - 1); + } + + ret + } +} + +impl From for BigUint { + #[inline] + fn from(mut n: u128) -> Self { + let mut ret: BigUint = Zero::zero(); + + while n != 0 { + ret.data.push(n as BigDigit); + n >>= big_digit::BITS; + } + + ret + } +} + +macro_rules! impl_biguint_from_uint { + ($T:ty) => { + impl From<$T> for BigUint { + #[inline] + fn from(n: $T) -> Self { + BigUint::from(n as u64) + } + } + }; +} + +impl_biguint_from_uint!(u8); +impl_biguint_from_uint!(u16); +impl_biguint_from_uint!(u32); +impl_biguint_from_uint!(usize); + +macro_rules! impl_biguint_try_from_int { + ($T:ty, $from_ty:path) => { + #[cfg(has_try_from)] + impl TryFrom<$T> for BigUint { + type Error = TryFromBigIntError<()>; + + #[inline] + fn try_from(value: $T) -> Result> { + $from_ty(value).ok_or(TryFromBigIntError::new(())) + } + } + }; +} + +impl_biguint_try_from_int!(i8, FromPrimitive::from_i8); +impl_biguint_try_from_int!(i16, FromPrimitive::from_i16); +impl_biguint_try_from_int!(i32, FromPrimitive::from_i32); +impl_biguint_try_from_int!(i64, FromPrimitive::from_i64); +impl_biguint_try_from_int!(isize, FromPrimitive::from_isize); +impl_biguint_try_from_int!(i128, FromPrimitive::from_i128); + +impl ToBigUint for BigUint { + #[inline] + fn to_biguint(&self) -> Option { + Some(self.clone()) + } +} + +macro_rules! impl_to_biguint { + ($T:ty, $from_ty:path) => { + impl ToBigUint for $T { + #[inline] + fn to_biguint(&self) -> Option { + $from_ty(*self) + } + } + }; +} + +impl_to_biguint!(isize, FromPrimitive::from_isize); +impl_to_biguint!(i8, FromPrimitive::from_i8); +impl_to_biguint!(i16, FromPrimitive::from_i16); +impl_to_biguint!(i32, FromPrimitive::from_i32); +impl_to_biguint!(i64, FromPrimitive::from_i64); +impl_to_biguint!(i128, FromPrimitive::from_i128); + +impl_to_biguint!(usize, FromPrimitive::from_usize); +impl_to_biguint!(u8, FromPrimitive::from_u8); +impl_to_biguint!(u16, FromPrimitive::from_u16); +impl_to_biguint!(u32, FromPrimitive::from_u32); +impl_to_biguint!(u64, FromPrimitive::from_u64); +impl_to_biguint!(u128, FromPrimitive::from_u128); + +impl_to_biguint!(f32, FromPrimitive::from_f32); +impl_to_biguint!(f64, FromPrimitive::from_f64); + +// Extract bitwise digits that evenly divide BigDigit +pub(super) fn to_bitwise_digits_le(u: &BigUint, bits: u8) -> Vec { + debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits == 0); + + let last_i = u.data.len() - 1; + let mask: BigDigit = (1 << bits) - 1; + let digits_per_big_digit = big_digit::BITS / bits; + let digits = u + .bits() + .div_ceil(&u64::from(bits)) + .to_usize() + .unwrap_or(core::usize::MAX); + let mut res = Vec::with_capacity(digits); + + for mut r in u.data[..last_i].iter().cloned() { + for _ in 0..digits_per_big_digit { + res.push((r & mask) as u8); + r >>= bits; + } + } + + let mut r = u.data[last_i]; + while r != 0 { + res.push((r & mask) as u8); + r >>= bits; + } + + res +} + +// Extract bitwise digits that don't evenly divide BigDigit +fn to_inexact_bitwise_digits_le(u: &BigUint, bits: u8) -> Vec { + debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits != 0); + + let mask: BigDigit = (1 << bits) - 1; + let digits = u + .bits() + .div_ceil(&u64::from(bits)) + .to_usize() + .unwrap_or(core::usize::MAX); + let mut res = Vec::with_capacity(digits); + + let mut r = 0; + let mut rbits = 0; + + for c in &u.data { + r |= *c << rbits; + rbits += big_digit::BITS; + + while rbits >= bits { + res.push((r & mask) as u8); + r >>= bits; + + // r had more bits than it could fit - grab the bits we lost + if rbits > big_digit::BITS { + r = *c >> (big_digit::BITS - (rbits - bits)); + } + + rbits -= bits; + } + } + + if rbits != 0 { + res.push(r as u8); + } + + while let Some(&0) = res.last() { + res.pop(); + } + + res +} + +// Extract little-endian radix digits +#[inline(always)] // forced inline to get const-prop for radix=10 +pub(super) fn to_radix_digits_le(u: &BigUint, radix: u32) -> Vec { + debug_assert!(!u.is_zero() && !radix.is_power_of_two()); + + #[cfg(feature = "std")] + let radix_log2 = f64::from(radix).log2(); + #[cfg(not(feature = "std"))] + let radix_log2 = ilog2(radix) as f64; + + // Estimate how big the result will be, so we can pre-allocate it. + let radix_digits = ((u.bits() as f64) / radix_log2).ceil(); + let mut res = Vec::with_capacity(radix_digits.to_usize().unwrap_or(0)); + + let mut digits = u.clone(); + + let (base, power) = get_radix_base(radix, big_digit::HALF_BITS); + let radix = radix as BigDigit; + + while digits.data.len() > 1 { + let (q, mut r) = div_rem_digit(digits, base); + for _ in 0..power { + res.push((r % radix) as u8); + r /= radix; + } + digits = q; + } + + let mut r = digits.data[0]; + while r != 0 { + res.push((r % radix) as u8); + r /= radix; + } + + res +} + +pub(super) fn to_radix_le(u: &BigUint, radix: u32) -> Vec { + if u.is_zero() { + vec![0] + } else if radix.is_power_of_two() { + // Powers of two can use bitwise masks and shifting instead of division + let bits = ilog2(radix); + if big_digit::BITS % bits == 0 { + to_bitwise_digits_le(u, bits) + } else { + to_inexact_bitwise_digits_le(u, bits) + } + } else if radix == 10 { + // 10 is so common that it's worth separating out for const-propagation. + // Optimizers can often turn constant division into a faster multiplication. + to_radix_digits_le(u, 10) + } else { + to_radix_digits_le(u, radix) + } +} + +pub(crate) fn to_str_radix_reversed(u: &BigUint, radix: u32) -> Vec { + assert!(2 <= radix && radix <= 36, "The radix must be within 2...36"); + + if u.is_zero() { + return vec![b'0']; + } + + let mut res = to_radix_le(u, radix); + + // Now convert everything to ASCII digits. + for r in &mut res { + debug_assert!(u32::from(*r) < radix); + if *r < 10 { + *r += b'0'; + } else { + *r += b'a' - 10; + } + } + res +} + +/// Returns the greatest power of the radix for the given bit size +#[inline] +fn get_radix_base(radix: u32, bits: u8) -> (BigDigit, usize) { + mod gen { + include! { concat!(env!("OUT_DIR"), "/radix_bases.rs") } + } + + debug_assert!( + 2 <= radix && radix <= 256, + "The radix must be within 2...256" + ); + debug_assert!(!radix.is_power_of_two()); + debug_assert!(bits <= big_digit::BITS); + + match bits { + 16 => { + let (base, power) = gen::BASES_16[radix as usize]; + (base as BigDigit, power) + } + 32 => { + let (base, power) = gen::BASES_32[radix as usize]; + (base as BigDigit, power) + } + 64 => { + let (base, power) = gen::BASES_64[radix as usize]; + (base as BigDigit, power) + } + _ => panic!("Invalid bigdigit size"), + } +} diff --git a/src/biguint/division.rs b/src/biguint/division.rs new file mode 100644 index 00000000..030b185f --- /dev/null +++ b/src/biguint/division.rs @@ -0,0 +1,615 @@ +use super::addition::__add2; +#[cfg(not(u64_digit))] +use super::u32_to_u128; +use super::BigUint; + +use crate::big_digit::{self, BigDigit, DoubleBigDigit}; +use crate::UsizePromotion; + +use core::cmp::Ordering::{Equal, Greater, Less}; +use core::mem; +use core::ops::{Div, DivAssign, Rem, RemAssign}; +use num_integer::Integer; +use num_traits::{CheckedDiv, One, ToPrimitive, Zero}; + +/// Divide a two digit numerator by a one digit divisor, returns quotient and remainder: +/// +/// Note: the caller must ensure that both the quotient and remainder will fit into a single digit. +/// This is _not_ true for an arbitrary numerator/denominator. +/// +/// (This function also matches what the x86 divide instruction does). +#[inline] +fn div_wide(hi: BigDigit, lo: BigDigit, divisor: BigDigit) -> (BigDigit, BigDigit) { + debug_assert!(hi < divisor); + + let lhs = big_digit::to_doublebigdigit(hi, lo); + let rhs = DoubleBigDigit::from(divisor); + ((lhs / rhs) as BigDigit, (lhs % rhs) as BigDigit) +} + +/// For small divisors, we can divide without promoting to `DoubleBigDigit` by +/// using half-size pieces of digit, like long-division. +#[inline] +fn div_half(rem: BigDigit, digit: BigDigit, divisor: BigDigit) -> (BigDigit, BigDigit) { + use crate::big_digit::{HALF, HALF_BITS}; + + debug_assert!(rem < divisor && divisor <= HALF); + let (hi, rem) = ((rem << HALF_BITS) | (digit >> HALF_BITS)).div_rem(&divisor); + let (lo, rem) = ((rem << HALF_BITS) | (digit & HALF)).div_rem(&divisor); + ((hi << HALF_BITS) | lo, rem) +} + +#[inline] +pub(super) fn div_rem_digit(mut a: BigUint, b: BigDigit) -> (BigUint, BigDigit) { + let mut rem = 0; + + if b <= big_digit::HALF { + for d in a.data.iter_mut().rev() { + let (q, r) = div_half(rem, *d, b); + *d = q; + rem = r; + } + } else { + for d in a.data.iter_mut().rev() { + let (q, r) = div_wide(rem, *d, b); + *d = q; + rem = r; + } + } + + (a.normalized(), rem) +} + +#[inline] +fn rem_digit(a: &BigUint, b: BigDigit) -> BigDigit { + let mut rem = 0; + + if b <= big_digit::HALF { + for &digit in a.data.iter().rev() { + let (_, r) = div_half(rem, digit, b); + rem = r; + } + } else { + for &digit in a.data.iter().rev() { + let (_, r) = div_wide(rem, digit, b); + rem = r; + } + } + + rem +} + +/// Subtract a multiple. +/// a -= b * c +/// Returns a borrow (if a < b then borrow > 0). +fn sub_mul_digit_same_len(a: &mut [BigDigit], b: &[BigDigit], c: BigDigit) -> BigDigit { + debug_assert!(a.len() == b.len()); + + // carry is between -big_digit::MAX and 0, so to avoid overflow we store + // offset_carry = carry + big_digit::MAX + let mut offset_carry = big_digit::MAX; + + for (x, y) in a.iter_mut().zip(b) { + // We want to calculate sum = x - y * c + carry. + // sum >= -(big_digit::MAX * big_digit::MAX) - big_digit::MAX + // sum <= big_digit::MAX + // Offsetting sum by (big_digit::MAX << big_digit::BITS) puts it in DoubleBigDigit range. + let offset_sum = big_digit::to_doublebigdigit(big_digit::MAX, *x) + - big_digit::MAX as DoubleBigDigit + + offset_carry as DoubleBigDigit + - *y as DoubleBigDigit * c as DoubleBigDigit; + + let (new_offset_carry, new_x) = big_digit::from_doublebigdigit(offset_sum); + offset_carry = new_offset_carry; + *x = new_x; + } + + // Return the borrow. + big_digit::MAX - offset_carry +} + +fn div_rem(mut u: BigUint, mut d: BigUint) -> (BigUint, BigUint) { + if d.is_zero() { + panic!("attempt to divide by zero") + } + if u.is_zero() { + return (Zero::zero(), Zero::zero()); + } + + if d.data.len() == 1 { + if d.data == [1] { + return (u, Zero::zero()); + } + let (div, rem) = div_rem_digit(u, d.data[0]); + // reuse d + d.data.clear(); + d += rem; + return (div, d); + } + + // Required or the q_len calculation below can underflow: + match u.cmp(&d) { + Less => return (Zero::zero(), u), + Equal => { + u.set_one(); + return (u, Zero::zero()); + } + Greater => {} // Do nothing + } + + // This algorithm is from Knuth, TAOCP vol 2 section 4.3, algorithm D: + // + // First, normalize the arguments so the highest bit in the highest digit of the divisor is + // set: the main loop uses the highest digit of the divisor for generating guesses, so we + // want it to be the largest number we can efficiently divide by. + // + let shift = d.data.last().unwrap().leading_zeros() as usize; + + let (q, r) = if shift == 0 { + // no need to clone d + div_rem_core(u, &d) + } else { + div_rem_core(u << shift, &(d << shift)) + }; + // renormalize the remainder + (q, r >> shift) +} + +pub(super) fn div_rem_ref(u: &BigUint, d: &BigUint) -> (BigUint, BigUint) { + if d.is_zero() { + panic!("attempt to divide by zero") + } + if u.is_zero() { + return (Zero::zero(), Zero::zero()); + } + + if d.data.len() == 1 { + if d.data == [1] { + return (u.clone(), Zero::zero()); + } + + let (div, rem) = div_rem_digit(u.clone(), d.data[0]); + return (div, rem.into()); + } + + // Required or the q_len calculation below can underflow: + match u.cmp(d) { + Less => return (Zero::zero(), u.clone()), + Equal => return (One::one(), Zero::zero()), + Greater => {} // Do nothing + } + + // This algorithm is from Knuth, TAOCP vol 2 section 4.3, algorithm D: + // + // First, normalize the arguments so the highest bit in the highest digit of the divisor is + // set: the main loop uses the highest digit of the divisor for generating guesses, so we + // want it to be the largest number we can efficiently divide by. + // + let shift = d.data.last().unwrap().leading_zeros() as usize; + + let (q, r) = if shift == 0 { + // no need to clone d + div_rem_core(u.clone(), d) + } else { + div_rem_core(u << shift, &(d << shift)) + }; + // renormalize the remainder + (q, r >> shift) +} + +/// An implementation of the base division algorithm. +/// Knuth, TAOCP vol 2 section 4.3.1, algorithm D, with an improvement from exercises 19-21. +fn div_rem_core(mut a: BigUint, b: &BigUint) -> (BigUint, BigUint) { + debug_assert!( + a.data.len() >= b.data.len() + && b.data.len() > 1 + && b.data.last().unwrap().leading_zeros() == 0 + ); + + // The algorithm works by incrementally calculating "guesses", q0, for the next digit of the + // quotient. Once we have any number q0 such that (q0 << j) * b <= a, we can set + // + // q += q0 << j + // a -= (q0 << j) * b + // + // and then iterate until a < b. Then, (q, a) will be our desired quotient and remainder. + // + // q0, our guess, is calculated by dividing the last three digits of a by the last two digits of + // b - this will give us a guess that is close to the actual quotient, but is possibly greater. + // It can only be greater by 1 and only in rare cases, with probability at most + // 2^-(big_digit::BITS-1) for random a, see TAOCP 4.3.1 exercise 21. + // + // If the quotient turns out to be too large, we adjust it by 1: + // q -= 1 << j + // a += b << j + + // a0 stores an additional extra most significant digit of the dividend, not stored in a. + let mut a0 = 0; + + // [b1, b0] are the two most significant digits of the divisor. They never change. + let b0 = *b.data.last().unwrap(); + let b1 = b.data[b.data.len() - 2]; + + let q_len = a.data.len() - b.data.len() + 1; + let mut q = BigUint { + data: vec![0; q_len], + }; + + for j in (0..q_len).rev() { + debug_assert!(a.data.len() == b.data.len() + j); + + let a1 = *a.data.last().unwrap(); + let a2 = a.data[a.data.len() - 2]; + + // The first q0 estimate is [a1,a0] / b0. It will never be too small, it may be too large + // by at most 2. + let (mut q0, mut r) = if a0 < b0 { + let (q0, r) = div_wide(a0, a1, b0); + (q0, r as DoubleBigDigit) + } else { + debug_assert!(a0 == b0); + // Avoid overflowing q0, we know the quotient fits in BigDigit. + // [a1,a0] = b0 * (1< a0 { + // q0 is too large. We need to add back one multiple of b. + q0 -= 1; + borrow -= __add2(&mut a.data[j..], &b.data); + } + // The top digit of a, stored in a0, has now been zeroed. + debug_assert!(borrow == a0); + + q.data[j] = q0; + + // Pop off the next top digit of a. + a0 = a.data.pop().unwrap(); + } + + a.data.push(a0); + a.normalize(); + + debug_assert!(a < *b); + + (q.normalized(), a) +} + +forward_val_ref_binop!(impl Div for BigUint, div); +forward_ref_val_binop!(impl Div for BigUint, div); +forward_val_assign!(impl DivAssign for BigUint, div_assign); + +impl Div for BigUint { + type Output = BigUint; + + #[inline] + fn div(self, other: BigUint) -> BigUint { + let (q, _) = div_rem(self, other); + q + } +} + +impl<'a, 'b> Div<&'b BigUint> for &'a BigUint { + type Output = BigUint; + + #[inline] + fn div(self, other: &BigUint) -> BigUint { + let (q, _) = self.div_rem(other); + q + } +} +impl<'a> DivAssign<&'a BigUint> for BigUint { + #[inline] + fn div_assign(&mut self, other: &'a BigUint) { + *self = &*self / other; + } +} + +promote_unsigned_scalars!(impl Div for BigUint, div); +promote_unsigned_scalars_assign!(impl DivAssign for BigUint, div_assign); +forward_all_scalar_binop_to_val_val!(impl Div for BigUint, div); +forward_all_scalar_binop_to_val_val!(impl Div for BigUint, div); +forward_all_scalar_binop_to_val_val!(impl Div for BigUint, div); + +impl Div for BigUint { + type Output = BigUint; + + #[inline] + fn div(self, other: u32) -> BigUint { + let (q, _) = div_rem_digit(self, other as BigDigit); + q + } +} +impl DivAssign for BigUint { + #[inline] + fn div_assign(&mut self, other: u32) { + *self = &*self / other; + } +} + +impl Div for u32 { + type Output = BigUint; + + #[inline] + fn div(self, other: BigUint) -> BigUint { + match other.data.len() { + 0 => panic!("attempt to divide by zero"), + 1 => From::from(self as BigDigit / other.data[0]), + _ => Zero::zero(), + } + } +} + +impl Div for BigUint { + type Output = BigUint; + + #[inline] + fn div(self, other: u64) -> BigUint { + let (q, _) = div_rem(self, From::from(other)); + q + } +} +impl DivAssign for BigUint { + #[inline] + fn div_assign(&mut self, other: u64) { + // a vec of size 0 does not allocate, so this is fairly cheap + let temp = mem::replace(self, Zero::zero()); + *self = temp / other; + } +} + +impl Div for u64 { + type Output = BigUint; + + #[cfg(not(u64_digit))] + #[inline] + fn div(self, other: BigUint) -> BigUint { + match other.data.len() { + 0 => panic!("attempt to divide by zero"), + 1 => From::from(self / u64::from(other.data[0])), + 2 => From::from(self / big_digit::to_doublebigdigit(other.data[1], other.data[0])), + _ => Zero::zero(), + } + } + + #[cfg(u64_digit)] + #[inline] + fn div(self, other: BigUint) -> BigUint { + match other.data.len() { + 0 => panic!("attempt to divide by zero"), + 1 => From::from(self / other.data[0]), + _ => Zero::zero(), + } + } +} + +impl Div for BigUint { + type Output = BigUint; + + #[inline] + fn div(self, other: u128) -> BigUint { + let (q, _) = div_rem(self, From::from(other)); + q + } +} + +impl DivAssign for BigUint { + #[inline] + fn div_assign(&mut self, other: u128) { + *self = &*self / other; + } +} + +impl Div for u128 { + type Output = BigUint; + + #[cfg(not(u64_digit))] + #[inline] + fn div(self, other: BigUint) -> BigUint { + match other.data.len() { + 0 => panic!("attempt to divide by zero"), + 1 => From::from(self / u128::from(other.data[0])), + 2 => From::from( + self / u128::from(big_digit::to_doublebigdigit(other.data[1], other.data[0])), + ), + 3 => From::from(self / u32_to_u128(0, other.data[2], other.data[1], other.data[0])), + 4 => From::from( + self / u32_to_u128(other.data[3], other.data[2], other.data[1], other.data[0]), + ), + _ => Zero::zero(), + } + } + + #[cfg(u64_digit)] + #[inline] + fn div(self, other: BigUint) -> BigUint { + match other.data.len() { + 0 => panic!("attempt to divide by zero"), + 1 => From::from(self / other.data[0] as u128), + 2 => From::from(self / big_digit::to_doublebigdigit(other.data[1], other.data[0])), + _ => Zero::zero(), + } + } +} + +forward_val_ref_binop!(impl Rem for BigUint, rem); +forward_ref_val_binop!(impl Rem for BigUint, rem); +forward_val_assign!(impl RemAssign for BigUint, rem_assign); + +impl Rem for BigUint { + type Output = BigUint; + + #[inline] + fn rem(self, other: BigUint) -> BigUint { + if let Some(other) = other.to_u32() { + &self % other + } else { + let (_, r) = div_rem(self, other); + r + } + } +} + +impl<'a, 'b> Rem<&'b BigUint> for &'a BigUint { + type Output = BigUint; + + #[inline] + fn rem(self, other: &BigUint) -> BigUint { + if let Some(other) = other.to_u32() { + self % other + } else { + let (_, r) = self.div_rem(other); + r + } + } +} +impl<'a> RemAssign<&'a BigUint> for BigUint { + #[inline] + fn rem_assign(&mut self, other: &BigUint) { + *self = &*self % other; + } +} + +promote_unsigned_scalars!(impl Rem for BigUint, rem); +promote_unsigned_scalars_assign!(impl RemAssign for BigUint, rem_assign); +forward_all_scalar_binop_to_ref_val!(impl Rem for BigUint, rem); +forward_all_scalar_binop_to_val_val!(impl Rem for BigUint, rem); +forward_all_scalar_binop_to_val_val!(impl Rem for BigUint, rem); + +impl<'a> Rem for &'a BigUint { + type Output = BigUint; + + #[inline] + fn rem(self, other: u32) -> BigUint { + rem_digit(self, other as BigDigit).into() + } +} +impl RemAssign for BigUint { + #[inline] + fn rem_assign(&mut self, other: u32) { + *self = &*self % other; + } +} + +impl<'a> Rem<&'a BigUint> for u32 { + type Output = BigUint; + + #[inline] + fn rem(mut self, other: &'a BigUint) -> BigUint { + self %= other; + From::from(self) + } +} + +macro_rules! impl_rem_assign_scalar { + ($scalar:ty, $to_scalar:ident) => { + forward_val_assign_scalar!(impl RemAssign for BigUint, $scalar, rem_assign); + impl<'a> RemAssign<&'a BigUint> for $scalar { + #[inline] + fn rem_assign(&mut self, other: &BigUint) { + *self = match other.$to_scalar() { + None => *self, + Some(0) => panic!("attempt to divide by zero"), + Some(v) => *self % v + }; + } + } + } +} + +// we can scalar %= BigUint for any scalar, including signed types +impl_rem_assign_scalar!(u128, to_u128); +impl_rem_assign_scalar!(usize, to_usize); +impl_rem_assign_scalar!(u64, to_u64); +impl_rem_assign_scalar!(u32, to_u32); +impl_rem_assign_scalar!(u16, to_u16); +impl_rem_assign_scalar!(u8, to_u8); +impl_rem_assign_scalar!(i128, to_i128); +impl_rem_assign_scalar!(isize, to_isize); +impl_rem_assign_scalar!(i64, to_i64); +impl_rem_assign_scalar!(i32, to_i32); +impl_rem_assign_scalar!(i16, to_i16); +impl_rem_assign_scalar!(i8, to_i8); + +impl Rem for BigUint { + type Output = BigUint; + + #[inline] + fn rem(self, other: u64) -> BigUint { + let (_, r) = div_rem(self, From::from(other)); + r + } +} +impl RemAssign for BigUint { + #[inline] + fn rem_assign(&mut self, other: u64) { + *self = &*self % other; + } +} + +impl Rem for u64 { + type Output = BigUint; + + #[inline] + fn rem(mut self, other: BigUint) -> BigUint { + self %= other; + From::from(self) + } +} + +impl Rem for BigUint { + type Output = BigUint; + + #[inline] + fn rem(self, other: u128) -> BigUint { + let (_, r) = div_rem(self, From::from(other)); + r + } +} + +impl RemAssign for BigUint { + #[inline] + fn rem_assign(&mut self, other: u128) { + *self = &*self % other; + } +} + +impl Rem for u128 { + type Output = BigUint; + + #[inline] + fn rem(mut self, other: BigUint) -> BigUint { + self %= other; + From::from(self) + } +} + +impl CheckedDiv for BigUint { + #[inline] + fn checked_div(&self, v: &BigUint) -> Option { + if v.is_zero() { + return None; + } + Some(self.div(v)) + } +} diff --git a/src/biguint/iter.rs b/src/biguint/iter.rs new file mode 100644 index 00000000..5b9ceff5 --- /dev/null +++ b/src/biguint/iter.rs @@ -0,0 +1,271 @@ +use core::iter::FusedIterator; + +#[cfg(not(u64_digit))] +use super::u32_chunk_to_u64; + +/// An iterator of `u32` digits representation of a `BigUint` or `BigInt`, +/// ordered least significant digit first. +pub struct U32Digits<'a> { + #[cfg(u64_digit)] + data: &'a [u64], + #[cfg(u64_digit)] + next_is_lo: bool, + #[cfg(u64_digit)] + last_hi_is_zero: bool, + + #[cfg(not(u64_digit))] + it: core::slice::Iter<'a, u32>, +} + +#[cfg(u64_digit)] +impl<'a> U32Digits<'a> { + #[inline] + pub(super) fn new(data: &'a [u64]) -> Self { + let last_hi_is_zero = data + .last() + .map(|&last| { + let last_hi = (last >> 32) as u32; + last_hi == 0 + }) + .unwrap_or(false); + U32Digits { + data, + next_is_lo: true, + last_hi_is_zero, + } + } +} + +#[cfg(u64_digit)] +impl Iterator for U32Digits<'_> { + type Item = u32; + #[inline] + fn next(&mut self) -> Option { + match self.data.split_first() { + Some((&first, data)) => { + let next_is_lo = self.next_is_lo; + self.next_is_lo = !next_is_lo; + if next_is_lo { + Some(first as u32) + } else { + self.data = data; + if data.is_empty() && self.last_hi_is_zero { + self.last_hi_is_zero = false; + None + } else { + Some((first >> 32) as u32) + } + } + } + None => None, + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let len = self.len(); + (len, Some(len)) + } + + #[inline] + fn last(self) -> Option { + self.data.last().map(|&last| { + if self.last_hi_is_zero { + last as u32 + } else { + (last >> 32) as u32 + } + }) + } + + #[inline] + fn count(self) -> usize { + self.len() + } +} + +#[cfg(u64_digit)] +impl ExactSizeIterator for U32Digits<'_> { + #[inline] + fn len(&self) -> usize { + self.data.len() * 2 - usize::from(self.last_hi_is_zero) - usize::from(!self.next_is_lo) + } +} + +#[cfg(not(u64_digit))] +impl<'a> U32Digits<'a> { + #[inline] + pub(super) fn new(data: &'a [u32]) -> Self { + Self { it: data.iter() } + } +} + +#[cfg(not(u64_digit))] +impl Iterator for U32Digits<'_> { + type Item = u32; + #[inline] + fn next(&mut self) -> Option { + self.it.next().cloned() + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } + + #[inline] + fn nth(&mut self, n: usize) -> Option { + self.it.nth(n).cloned() + } + + #[inline] + fn last(self) -> Option { + self.it.last().cloned() + } + + #[inline] + fn count(self) -> usize { + self.it.count() + } +} + +#[cfg(not(u64_digit))] +impl ExactSizeIterator for U32Digits<'_> { + #[inline] + fn len(&self) -> usize { + self.it.len() + } +} + +impl FusedIterator for U32Digits<'_> {} + +/// An iterator of `u64` digits representation of a `BigUint` or `BigInt`, +/// ordered least significant digit first. +pub struct U64Digits<'a> { + #[cfg(not(u64_digit))] + it: core::slice::Chunks<'a, u32>, + + #[cfg(u64_digit)] + it: core::slice::Iter<'a, u64>, +} + +#[cfg(not(u64_digit))] +impl<'a> U64Digits<'a> { + #[inline] + pub(super) fn new(data: &'a [u32]) -> Self { + U64Digits { it: data.chunks(2) } + } +} + +#[cfg(not(u64_digit))] +impl Iterator for U64Digits<'_> { + type Item = u64; + #[inline] + fn next(&mut self) -> Option { + self.it.next().map(u32_chunk_to_u64) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let len = self.len(); + (len, Some(len)) + } + + #[inline] + fn last(self) -> Option { + self.it.last().map(u32_chunk_to_u64) + } + + #[inline] + fn count(self) -> usize { + self.len() + } +} + +#[cfg(u64_digit)] +impl<'a> U64Digits<'a> { + #[inline] + pub(super) fn new(data: &'a [u64]) -> Self { + Self { it: data.iter() } + } +} + +#[cfg(u64_digit)] +impl Iterator for U64Digits<'_> { + type Item = u64; + #[inline] + fn next(&mut self) -> Option { + self.it.next().cloned() + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } + + #[inline] + fn nth(&mut self, n: usize) -> Option { + self.it.nth(n).cloned() + } + + #[inline] + fn last(self) -> Option { + self.it.last().cloned() + } + + #[inline] + fn count(self) -> usize { + self.it.count() + } +} + +impl ExactSizeIterator for U64Digits<'_> { + #[inline] + fn len(&self) -> usize { + self.it.len() + } +} + +impl FusedIterator for U64Digits<'_> {} + +#[test] +fn test_iter_u32_digits() { + let n = super::BigUint::from(5u8); + let mut it = n.iter_u32_digits(); + assert_eq!(it.len(), 1); + assert_eq!(it.next(), Some(5)); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); + + let n = super::BigUint::from(112500000000u64); + let mut it = n.iter_u32_digits(); + assert_eq!(it.len(), 2); + assert_eq!(it.next(), Some(830850304)); + assert_eq!(it.len(), 1); + assert_eq!(it.next(), Some(26)); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); +} + +#[test] +fn test_iter_u64_digits() { + let n = super::BigUint::from(5u8); + let mut it = n.iter_u64_digits(); + assert_eq!(it.len(), 1); + assert_eq!(it.next(), Some(5)); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); + + let n = super::BigUint::from(18_446_744_073_709_551_616u128); + let mut it = n.iter_u64_digits(); + assert_eq!(it.len(), 2); + assert_eq!(it.next(), Some(0)); + assert_eq!(it.len(), 1); + assert_eq!(it.next(), Some(1)); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); +} diff --git a/src/monty.rs b/src/biguint/monty.rs similarity index 99% rename from src/monty.rs rename to src/biguint/monty.rs index 608244d7..a5c79aa9 100644 --- a/src/monty.rs +++ b/src/biguint/monty.rs @@ -133,7 +133,7 @@ fn mul_add_www(x: BigDigit, y: BigDigit, c: BigDigit) -> (BigDigit, BigDigit) { /// Calculates x ** y mod m using a fixed, 4-bit window. #[allow(clippy::many_single_char_names)] -pub(crate) fn monty_modpow(x: &BigUint, y: &BigUint, m: &BigUint) -> BigUint { +pub(super) fn monty_modpow(x: &BigUint, y: &BigUint, m: &BigUint) -> BigUint { assert!(m.data[0] & 1 == 1); let mr = MontyReducer::new(m); let num_words = m.data.len(); diff --git a/src/biguint/multiplication.rs b/src/biguint/multiplication.rs new file mode 100644 index 00000000..aaa69344 --- /dev/null +++ b/src/biguint/multiplication.rs @@ -0,0 +1,507 @@ +use super::addition::{__add2, add2}; +use super::subtraction::sub2; +#[cfg(not(u64_digit))] +use super::u32_from_u128; +use super::{biguint_from_vec, cmp_slice, BigUint}; + +use crate::big_digit::{self, BigDigit, DoubleBigDigit}; +use crate::Sign::{self, Minus, NoSign, Plus}; +use crate::{BigInt, UsizePromotion}; + +use core::cmp::Ordering; +use core::iter::Product; +use core::ops::{Mul, MulAssign}; +use num_traits::{CheckedMul, One, Zero}; + +#[inline] +pub(super) fn mac_with_carry( + a: BigDigit, + b: BigDigit, + c: BigDigit, + acc: &mut DoubleBigDigit, +) -> BigDigit { + *acc += DoubleBigDigit::from(a); + *acc += DoubleBigDigit::from(b) * DoubleBigDigit::from(c); + let lo = *acc as BigDigit; + *acc >>= big_digit::BITS; + lo +} + +#[inline] +fn mul_with_carry(a: BigDigit, b: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit { + *acc += DoubleBigDigit::from(a) * DoubleBigDigit::from(b); + let lo = *acc as BigDigit; + *acc >>= big_digit::BITS; + lo +} + +/// Three argument multiply accumulate: +/// acc += b * c +fn mac_digit(acc: &mut [BigDigit], b: &[BigDigit], c: BigDigit) { + if c == 0 { + return; + } + + let mut carry = 0; + let (a_lo, a_hi) = acc.split_at_mut(b.len()); + + for (a, &b) in a_lo.iter_mut().zip(b) { + *a = mac_with_carry(*a, b, c, &mut carry); + } + + let (carry_hi, carry_lo) = big_digit::from_doublebigdigit(carry); + + let final_carry = if carry_hi == 0 { + __add2(a_hi, &[carry_lo]) + } else { + __add2(a_hi, &[carry_hi, carry_lo]) + }; + assert_eq!(final_carry, 0, "carry overflow during multiplication!"); +} + +fn bigint_from_slice(slice: &[BigDigit]) -> BigInt { + BigInt::from(biguint_from_vec(slice.to_vec())) +} + +/// Three argument multiply accumulate: +/// acc += b * c +#[allow(clippy::many_single_char_names)] +fn mac3(acc: &mut [BigDigit], b: &[BigDigit], c: &[BigDigit]) { + let (x, y) = if b.len() < c.len() { (b, c) } else { (c, b) }; + + // We use three algorithms for different input sizes. + // + // - For small inputs, long multiplication is fastest. + // - Next we use Karatsuba multiplication (Toom-2), which we have optimized + // to avoid unnecessary allocations for intermediate values. + // - For the largest inputs we use Toom-3, which better optimizes the + // number of operations, but uses more temporary allocations. + // + // The thresholds are somewhat arbitrary, chosen by evaluating the results + // of `cargo bench --bench bigint multiply`. + + if x.len() <= 32 { + // Long multiplication: + for (i, xi) in x.iter().enumerate() { + mac_digit(&mut acc[i..], y, *xi); + } + } else if x.len() <= 256 { + // Karatsuba multiplication: + // + // The idea is that we break x and y up into two smaller numbers that each have about half + // as many digits, like so (note that multiplying by b is just a shift): + // + // x = x0 + x1 * b + // y = y0 + y1 * b + // + // With some algebra, we can compute x * y with three smaller products, where the inputs to + // each of the smaller products have only about half as many digits as x and y: + // + // x * y = (x0 + x1 * b) * (y0 + y1 * b) + // + // x * y = x0 * y0 + // + x0 * y1 * b + // + x1 * y0 * b + // + x1 * y1 * b^2 + // + // Let p0 = x0 * y0 and p2 = x1 * y1: + // + // x * y = p0 + // + (x0 * y1 + x1 * y0) * b + // + p2 * b^2 + // + // The real trick is that middle term: + // + // x0 * y1 + x1 * y0 + // + // = x0 * y1 + x1 * y0 - p0 + p0 - p2 + p2 + // + // = x0 * y1 + x1 * y0 - x0 * y0 - x1 * y1 + p0 + p2 + // + // Now we complete the square: + // + // = -(x0 * y0 - x0 * y1 - x1 * y0 + x1 * y1) + p0 + p2 + // + // = -((x1 - x0) * (y1 - y0)) + p0 + p2 + // + // Let p1 = (x1 - x0) * (y1 - y0), and substitute back into our original formula: + // + // x * y = p0 + // + (p0 + p2 - p1) * b + // + p2 * b^2 + // + // Where the three intermediate products are: + // + // p0 = x0 * y0 + // p1 = (x1 - x0) * (y1 - y0) + // p2 = x1 * y1 + // + // In doing the computation, we take great care to avoid unnecessary temporary variables + // (since creating a BigUint requires a heap allocation): thus, we rearrange the formula a + // bit so we can use the same temporary variable for all the intermediate products: + // + // x * y = p2 * b^2 + p2 * b + // + p0 * b + p0 + // - p1 * b + // + // The other trick we use is instead of doing explicit shifts, we slice acc at the + // appropriate offset when doing the add. + + // When x is smaller than y, it's significantly faster to pick b such that x is split in + // half, not y: + let b = x.len() / 2; + let (x0, x1) = x.split_at(b); + let (y0, y1) = y.split_at(b); + + // We reuse the same BigUint for all the intermediate multiplies and have to size p + // appropriately here: x1.len() >= x0.len and y1.len() >= y0.len(): + let len = x1.len() + y1.len() + 1; + let mut p = BigUint { data: vec![0; len] }; + + // p2 = x1 * y1 + mac3(&mut p.data[..], x1, y1); + + // Not required, but the adds go faster if we drop any unneeded 0s from the end: + p.normalize(); + + add2(&mut acc[b..], &p.data[..]); + add2(&mut acc[b * 2..], &p.data[..]); + + // Zero out p before the next multiply: + p.data.truncate(0); + p.data.resize(len, 0); + + // p0 = x0 * y0 + mac3(&mut p.data[..], x0, y0); + p.normalize(); + + add2(&mut acc[..], &p.data[..]); + add2(&mut acc[b..], &p.data[..]); + + // p1 = (x1 - x0) * (y1 - y0) + // We do this one last, since it may be negative and acc can't ever be negative: + let (j0_sign, j0) = sub_sign(x1, x0); + let (j1_sign, j1) = sub_sign(y1, y0); + + match j0_sign * j1_sign { + Plus => { + p.data.truncate(0); + p.data.resize(len, 0); + + mac3(&mut p.data[..], &j0.data[..], &j1.data[..]); + p.normalize(); + + sub2(&mut acc[b..], &p.data[..]); + } + Minus => { + mac3(&mut acc[b..], &j0.data[..], &j1.data[..]); + } + NoSign => (), + } + } else { + // Toom-3 multiplication: + // + // Toom-3 is like Karatsuba above, but dividing the inputs into three parts. + // Both are instances of Toom-Cook, using `k=3` and `k=2` respectively. + // + // The general idea is to treat the large integers digits as + // polynomials of a certain degree and determine the coefficients/digits + // of the product of the two via interpolation of the polynomial product. + let i = y.len() / 3 + 1; + + let x0_len = Ord::min(x.len(), i); + let x1_len = Ord::min(x.len() - x0_len, i); + + let y0_len = i; + let y1_len = Ord::min(y.len() - y0_len, i); + + // Break x and y into three parts, representating an order two polynomial. + // t is chosen to be the size of a digit so we can use faster shifts + // in place of multiplications. + // + // x(t) = x2*t^2 + x1*t + x0 + let x0 = bigint_from_slice(&x[..x0_len]); + let x1 = bigint_from_slice(&x[x0_len..x0_len + x1_len]); + let x2 = bigint_from_slice(&x[x0_len + x1_len..]); + + // y(t) = y2*t^2 + y1*t + y0 + let y0 = bigint_from_slice(&y[..y0_len]); + let y1 = bigint_from_slice(&y[y0_len..y0_len + y1_len]); + let y2 = bigint_from_slice(&y[y0_len + y1_len..]); + + // Let w(t) = x(t) * y(t) + // + // This gives us the following order-4 polynomial. + // + // w(t) = w4*t^4 + w3*t^3 + w2*t^2 + w1*t + w0 + // + // We need to find the coefficients w4, w3, w2, w1 and w0. Instead + // of simply multiplying the x and y in total, we can evaluate w + // at 5 points. An n-degree polynomial is uniquely identified by (n + 1) + // points. + // + // It is arbitrary as to what points we evaluate w at but we use the + // following. + // + // w(t) at t = 0, 1, -1, -2 and inf + // + // The values for w(t) in terms of x(t)*y(t) at these points are: + // + // let a = w(0) = x0 * y0 + // let b = w(1) = (x2 + x1 + x0) * (y2 + y1 + y0) + // let c = w(-1) = (x2 - x1 + x0) * (y2 - y1 + y0) + // let d = w(-2) = (4*x2 - 2*x1 + x0) * (4*y2 - 2*y1 + y0) + // let e = w(inf) = x2 * y2 as t -> inf + + // x0 + x2, avoiding temporaries + let p = &x0 + &x2; + + // y0 + y2, avoiding temporaries + let q = &y0 + &y2; + + // x2 - x1 + x0, avoiding temporaries + let p2 = &p - &x1; + + // y2 - y1 + y0, avoiding temporaries + let q2 = &q - &y1; + + // w(0) + let r0 = &x0 * &y0; + + // w(inf) + let r4 = &x2 * &y2; + + // w(1) + let r1 = (p + x1) * (q + y1); + + // w(-1) + let r2 = &p2 * &q2; + + // w(-2) + let r3 = ((p2 + x2) * 2 - x0) * ((q2 + y2) * 2 - y0); + + // Evaluating these points gives us the following system of linear equations. + // + // 0 0 0 0 1 | a + // 1 1 1 1 1 | b + // 1 -1 1 -1 1 | c + // 16 -8 4 -2 1 | d + // 1 0 0 0 0 | e + // + // The solved equation (after gaussian elimination or similar) + // in terms of its coefficients: + // + // w0 = w(0) + // w1 = w(0)/2 + w(1)/3 - w(-1) + w(2)/6 - 2*w(inf) + // w2 = -w(0) + w(1)/2 + w(-1)/2 - w(inf) + // w3 = -w(0)/2 + w(1)/6 + w(-1)/2 - w(1)/6 + // w4 = w(inf) + // + // This particular sequence is given by Bodrato and is an interpolation + // of the above equations. + let mut comp3: BigInt = (r3 - &r1) / 3; + let mut comp1: BigInt = (r1 - &r2) / 2; + let mut comp2: BigInt = r2 - &r0; + comp3 = (&comp2 - comp3) / 2 + &r4 * 2; + comp2 += &comp1 - &r4; + comp1 -= &comp3; + + // Recomposition. The coefficients of the polynomial are now known. + // + // Evaluate at w(t) where t is our given base to get the result. + let bits = u64::from(big_digit::BITS) * i as u64; + let result = r0 + + (comp1 << bits) + + (comp2 << (2 * bits)) + + (comp3 << (3 * bits)) + + (r4 << (4 * bits)); + let result_pos = result.to_biguint().unwrap(); + add2(&mut acc[..], &result_pos.data); + } +} + +fn mul3(x: &[BigDigit], y: &[BigDigit]) -> BigUint { + let len = x.len() + y.len() + 1; + let mut prod = BigUint { data: vec![0; len] }; + + mac3(&mut prod.data[..], x, y); + prod.normalized() +} + +fn scalar_mul(a: &mut [BigDigit], b: BigDigit) -> BigDigit { + let mut carry = 0; + for a in a.iter_mut() { + *a = mul_with_carry(*a, b, &mut carry); + } + carry as BigDigit +} + +fn sub_sign(mut a: &[BigDigit], mut b: &[BigDigit]) -> (Sign, BigUint) { + // Normalize: + a = &a[..a.iter().rposition(|&x| x != 0).map_or(0, |i| i + 1)]; + b = &b[..b.iter().rposition(|&x| x != 0).map_or(0, |i| i + 1)]; + + match cmp_slice(a, b) { + Ordering::Greater => { + let mut a = a.to_vec(); + sub2(&mut a, b); + (Plus, biguint_from_vec(a)) + } + Ordering::Less => { + let mut b = b.to_vec(); + sub2(&mut b, a); + (Minus, biguint_from_vec(b)) + } + Ordering::Equal => (NoSign, Zero::zero()), + } +} + +forward_all_binop_to_ref_ref!(impl Mul for BigUint, mul); +forward_val_assign!(impl MulAssign for BigUint, mul_assign); + +impl<'a, 'b> Mul<&'b BigUint> for &'a BigUint { + type Output = BigUint; + + #[inline] + fn mul(self, other: &BigUint) -> BigUint { + mul3(&self.data[..], &other.data[..]) + } +} +impl<'a> MulAssign<&'a BigUint> for BigUint { + #[inline] + fn mul_assign(&mut self, other: &'a BigUint) { + *self = &*self * other + } +} + +promote_unsigned_scalars!(impl Mul for BigUint, mul); +promote_unsigned_scalars_assign!(impl MulAssign for BigUint, mul_assign); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigUint, mul); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigUint, mul); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigUint, mul); + +impl Mul for BigUint { + type Output = BigUint; + + #[inline] + fn mul(mut self, other: u32) -> BigUint { + self *= other; + self + } +} +impl MulAssign for BigUint { + #[inline] + fn mul_assign(&mut self, other: u32) { + if other == 0 { + self.data.clear(); + } else { + let carry = scalar_mul(&mut self.data[..], other as BigDigit); + if carry != 0 { + self.data.push(carry); + } + } + } +} + +impl Mul for BigUint { + type Output = BigUint; + + #[inline] + fn mul(mut self, other: u64) -> BigUint { + self *= other; + self + } +} +impl MulAssign for BigUint { + #[cfg(not(u64_digit))] + #[inline] + fn mul_assign(&mut self, other: u64) { + if other == 0 { + self.data.clear(); + } else if other <= u64::from(BigDigit::max_value()) { + *self *= other as BigDigit + } else { + let (hi, lo) = big_digit::from_doublebigdigit(other); + *self = mul3(&self.data[..], &[lo, hi]) + } + } + + #[cfg(u64_digit)] + #[inline] + fn mul_assign(&mut self, other: u64) { + if other == 0 { + self.data.clear(); + } else { + let carry = scalar_mul(&mut self.data[..], other as BigDigit); + if carry != 0 { + self.data.push(carry); + } + } + } +} + +impl Mul for BigUint { + type Output = BigUint; + + #[inline] + fn mul(mut self, other: u128) -> BigUint { + self *= other; + self + } +} + +impl MulAssign for BigUint { + #[cfg(not(u64_digit))] + #[inline] + fn mul_assign(&mut self, other: u128) { + if other == 0 { + self.data.clear(); + } else if other <= u128::from(BigDigit::max_value()) { + *self *= other as BigDigit + } else { + let (a, b, c, d) = u32_from_u128(other); + *self = mul3(&self.data[..], &[d, c, b, a]) + } + } + + #[cfg(u64_digit)] + #[inline] + fn mul_assign(&mut self, other: u128) { + if other == 0 { + self.data.clear(); + } else if other <= BigDigit::max_value() as u128 { + *self *= other as BigDigit + } else { + let (hi, lo) = big_digit::from_doublebigdigit(other); + *self = mul3(&self.data[..], &[lo, hi]) + } + } +} + +impl CheckedMul for BigUint { + #[inline] + fn checked_mul(&self, v: &BigUint) -> Option { + Some(self.mul(v)) + } +} + +impl_product_iter_type!(BigUint); + +#[test] +fn test_sub_sign() { + use crate::BigInt; + use num_traits::Num; + + fn sub_sign_i(a: &[BigDigit], b: &[BigDigit]) -> BigInt { + let (sign, val) = sub_sign(a, b); + BigInt::from_biguint(sign, val) + } + + let a = BigUint::from_str_radix("265252859812191058636308480000000", 10).unwrap(); + let b = BigUint::from_str_radix("26525285981219105863630848000000", 10).unwrap(); + let a_i = BigInt::from(a.clone()); + let b_i = BigInt::from(b.clone()); + + assert_eq!(sub_sign_i(&a.data[..], &b.data[..]), &a_i - &b_i); + assert_eq!(sub_sign_i(&b.data[..], &a.data[..]), &b_i - &a_i); +} diff --git a/src/biguint/power.rs b/src/biguint/power.rs new file mode 100644 index 00000000..44b38146 --- /dev/null +++ b/src/biguint/power.rs @@ -0,0 +1,257 @@ +use super::monty::monty_modpow; +use super::BigUint; + +use crate::big_digit::{self, BigDigit}; + +use num_integer::Integer; +use num_traits::{One, Pow, ToPrimitive, Zero}; + +impl<'b> Pow<&'b BigUint> for BigUint { + type Output = BigUint; + + #[inline] + fn pow(self, exp: &BigUint) -> BigUint { + if self.is_one() || exp.is_zero() { + BigUint::one() + } else if self.is_zero() { + BigUint::zero() + } else if let Some(exp) = exp.to_u64() { + self.pow(exp) + } else if let Some(exp) = exp.to_u128() { + self.pow(exp) + } else { + // At this point, `self >= 2` and `exp >= 2¹²⁸`. The smallest possible result given + // `2.pow(2¹²⁸)` would require far more memory than 64-bit targets can address! + panic!("memory overflow") + } + } +} + +impl Pow for BigUint { + type Output = BigUint; + + #[inline] + fn pow(self, exp: BigUint) -> BigUint { + Pow::pow(self, &exp) + } +} + +impl<'a, 'b> Pow<&'b BigUint> for &'a BigUint { + type Output = BigUint; + + #[inline] + fn pow(self, exp: &BigUint) -> BigUint { + if self.is_one() || exp.is_zero() { + BigUint::one() + } else if self.is_zero() { + BigUint::zero() + } else { + self.clone().pow(exp) + } + } +} + +impl<'a> Pow for &'a BigUint { + type Output = BigUint; + + #[inline] + fn pow(self, exp: BigUint) -> BigUint { + Pow::pow(self, &exp) + } +} + +macro_rules! pow_impl { + ($T:ty) => { + impl Pow<$T> for BigUint { + type Output = BigUint; + + fn pow(self, mut exp: $T) -> BigUint { + if exp == 0 { + return BigUint::one(); + } + let mut base = self; + + while exp & 1 == 0 { + base = &base * &base; + exp >>= 1; + } + + if exp == 1 { + return base; + } + + let mut acc = base.clone(); + while exp > 1 { + exp >>= 1; + base = &base * &base; + if exp & 1 == 1 { + acc = &acc * &base; + } + } + acc + } + } + + impl<'b> Pow<&'b $T> for BigUint { + type Output = BigUint; + + #[inline] + fn pow(self, exp: &$T) -> BigUint { + Pow::pow(self, *exp) + } + } + + impl<'a> Pow<$T> for &'a BigUint { + type Output = BigUint; + + #[inline] + fn pow(self, exp: $T) -> BigUint { + if exp == 0 { + return BigUint::one(); + } + Pow::pow(self.clone(), exp) + } + } + + impl<'a, 'b> Pow<&'b $T> for &'a BigUint { + type Output = BigUint; + + #[inline] + fn pow(self, exp: &$T) -> BigUint { + Pow::pow(self, *exp) + } + } + }; +} + +pow_impl!(u8); +pow_impl!(u16); +pow_impl!(u32); +pow_impl!(u64); +pow_impl!(usize); +pow_impl!(u128); + +pub(super) fn modpow(x: &BigUint, exponent: &BigUint, modulus: &BigUint) -> BigUint { + assert!( + !modulus.is_zero(), + "attempt to calculate with zero modulus!" + ); + + if modulus.is_odd() { + // For an odd modulus, we can use Montgomery multiplication in base 2^32. + monty_modpow(x, exponent, modulus) + } else { + // Otherwise do basically the same as `num::pow`, but with a modulus. + plain_modpow(x, &exponent.data, modulus) + } +} + +fn plain_modpow(base: &BigUint, exp_data: &[BigDigit], modulus: &BigUint) -> BigUint { + assert!( + !modulus.is_zero(), + "attempt to calculate with zero modulus!" + ); + + let i = match exp_data.iter().position(|&r| r != 0) { + None => return BigUint::one(), + Some(i) => i, + }; + + let mut base = base % modulus; + for _ in 0..i { + for _ in 0..big_digit::BITS { + base = &base * &base % modulus; + } + } + + let mut r = exp_data[i]; + let mut b = 0u8; + while r.is_even() { + base = &base * &base % modulus; + r >>= 1; + b += 1; + } + + let mut exp_iter = exp_data[i + 1..].iter(); + if exp_iter.len() == 0 && r.is_one() { + return base; + } + + let mut acc = base.clone(); + r >>= 1; + b += 1; + + { + let mut unit = |exp_is_odd| { + base = &base * &base % modulus; + if exp_is_odd { + acc = &acc * &base % modulus; + } + }; + + if let Some(&last) = exp_iter.next_back() { + // consume exp_data[i] + for _ in b..big_digit::BITS { + unit(r.is_odd()); + r >>= 1; + } + + // consume all other digits before the last + for &r in exp_iter { + let mut r = r; + for _ in 0..big_digit::BITS { + unit(r.is_odd()); + r >>= 1; + } + } + r = last; + } + + debug_assert_ne!(r, 0); + while !r.is_zero() { + unit(r.is_odd()); + r >>= 1; + } + } + acc +} + +#[test] +fn test_plain_modpow() { + let two = &BigUint::from(2u32); + let modulus = BigUint::from(0x1100u32); + + let exp = vec![0, 0b1]; + assert_eq!( + two.pow(0b1_00000000_u32) % &modulus, + plain_modpow(&two, &exp, &modulus) + ); + let exp = vec![0, 0b10]; + assert_eq!( + two.pow(0b10_00000000_u32) % &modulus, + plain_modpow(&two, &exp, &modulus) + ); + let exp = vec![0, 0b110010]; + assert_eq!( + two.pow(0b110010_00000000_u32) % &modulus, + plain_modpow(&two, &exp, &modulus) + ); + let exp = vec![0b1, 0b1]; + assert_eq!( + two.pow(0b1_00000001_u32) % &modulus, + plain_modpow(&two, &exp, &modulus) + ); + let exp = vec![0b1100, 0, 0b1]; + assert_eq!( + two.pow(0b1_00000000_00001100_u32) % &modulus, + plain_modpow(&two, &exp, &modulus) + ); +} + +#[test] +fn test_pow_biguint() { + let base = BigUint::from(5u8); + let exponent = BigUint::from(3u8); + + assert_eq!(BigUint::from(125u8), base.pow(exponent)); +} diff --git a/src/biguint/serde.rs b/src/biguint/serde.rs new file mode 100644 index 00000000..573b0a7b --- /dev/null +++ b/src/biguint/serde.rs @@ -0,0 +1,108 @@ +use super::{biguint_from_vec, BigUint}; + +use crate::std_alloc::Vec; + +use core::fmt; +use serde::de::{SeqAccess, Visitor}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; + +impl Serialize for BigUint { + #[cfg(not(u64_digit))] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + // Note: do not change the serialization format, or it may break forward + // and backward compatibility of serialized data! If we ever change the + // internal representation, we should still serialize in base-`u32`. + let data: &[u32] = &self.data; + data.serialize(serializer) + } + + #[cfg(u64_digit)] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + use serde::ser::SerializeSeq; + + if let Some((&last, data)) = self.data.split_last() { + let last_lo = last as u32; + let last_hi = (last >> 32) as u32; + let u32_len = data.len() * 2 + 1 + (last_hi != 0) as usize; + let mut seq = serializer.serialize_seq(Some(u32_len))?; + for &x in data { + seq.serialize_element(&(x as u32))?; + seq.serialize_element(&((x >> 32) as u32))?; + } + seq.serialize_element(&last_lo)?; + if last_hi != 0 { + seq.serialize_element(&last_hi)?; + } + seq.end() + } else { + let data: &[u32] = &[]; + data.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for BigUint { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_seq(U32Visitor) + } +} + +struct U32Visitor; + +impl<'de> Visitor<'de> for U32Visitor { + type Value = BigUint; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("a sequence of unsigned 32-bit numbers") + } + + #[cfg(not(u64_digit))] + fn visit_seq(self, mut seq: S) -> Result + where + S: SeqAccess<'de>, + { + let len = seq.size_hint().unwrap_or(0); + let mut data = Vec::with_capacity(len); + + while let Some(value) = seq.next_element::()? { + data.push(value); + } + + Ok(biguint_from_vec(data)) + } + + #[cfg(u64_digit)] + fn visit_seq(self, mut seq: S) -> Result + where + S: SeqAccess<'de>, + { + use crate::big_digit::BigDigit; + use num_integer::Integer; + + let u32_len = seq.size_hint().unwrap_or(0); + let len = u32_len.div_ceil(&2); + let mut data = Vec::with_capacity(len); + + while let Some(lo) = seq.next_element::()? { + let mut value = BigDigit::from(lo); + if let Some(hi) = seq.next_element::()? { + value |= BigDigit::from(hi) << 32; + data.push(value); + } else { + data.push(value); + break; + } + } + + Ok(biguint_from_vec(data)) + } +} diff --git a/src/biguint/shift.rs b/src/biguint/shift.rs new file mode 100644 index 00000000..05964d2a --- /dev/null +++ b/src/biguint/shift.rs @@ -0,0 +1,172 @@ +use super::{biguint_from_vec, BigUint}; + +use crate::big_digit; +use crate::std_alloc::{Cow, Vec}; + +use core::mem; +use core::ops::{Shl, ShlAssign, Shr, ShrAssign}; +use num_traits::{PrimInt, Zero}; + +#[inline] +fn biguint_shl(n: Cow<'_, BigUint>, shift: T) -> BigUint { + if shift < T::zero() { + panic!("attempt to shift left with negative"); + } + if n.is_zero() { + return n.into_owned(); + } + let bits = T::from(big_digit::BITS).unwrap(); + let digits = (shift / bits).to_usize().expect("capacity overflow"); + let shift = (shift % bits).to_u8().unwrap(); + biguint_shl2(n, digits, shift) +} + +fn biguint_shl2(n: Cow<'_, BigUint>, digits: usize, shift: u8) -> BigUint { + let mut data = match digits { + 0 => n.into_owned().data, + _ => { + let len = digits.saturating_add(n.data.len() + 1); + let mut data = Vec::with_capacity(len); + data.resize(digits, 0); + data.extend(n.data.iter()); + data + } + }; + + if shift > 0 { + let mut carry = 0; + let carry_shift = big_digit::BITS as u8 - shift; + for elem in data[digits..].iter_mut() { + let new_carry = *elem >> carry_shift; + *elem = (*elem << shift) | carry; + carry = new_carry; + } + if carry != 0 { + data.push(carry); + } + } + + biguint_from_vec(data) +} + +#[inline] +fn biguint_shr(n: Cow<'_, BigUint>, shift: T) -> BigUint { + if shift < T::zero() { + panic!("attempt to shift right with negative"); + } + if n.is_zero() { + return n.into_owned(); + } + let bits = T::from(big_digit::BITS).unwrap(); + let digits = (shift / bits).to_usize().unwrap_or(core::usize::MAX); + let shift = (shift % bits).to_u8().unwrap(); + biguint_shr2(n, digits, shift) +} + +fn biguint_shr2(n: Cow<'_, BigUint>, digits: usize, shift: u8) -> BigUint { + if digits >= n.data.len() { + let mut n = n.into_owned(); + n.set_zero(); + return n; + } + let mut data = match n { + Cow::Borrowed(n) => n.data[digits..].to_vec(), + Cow::Owned(mut n) => { + n.data.drain(..digits); + n.data + } + }; + + if shift > 0 { + let mut borrow = 0; + let borrow_shift = big_digit::BITS as u8 - shift; + for elem in data.iter_mut().rev() { + let new_borrow = *elem << borrow_shift; + *elem = (*elem >> shift) | borrow; + borrow = new_borrow; + } + } + + biguint_from_vec(data) +} + +macro_rules! impl_shift { + (@ref $Shx:ident :: $shx:ident, $ShxAssign:ident :: $shx_assign:ident, $rhs:ty) => { + impl<'b> $Shx<&'b $rhs> for BigUint { + type Output = BigUint; + + #[inline] + fn $shx(self, rhs: &'b $rhs) -> BigUint { + $Shx::$shx(self, *rhs) + } + } + impl<'a, 'b> $Shx<&'b $rhs> for &'a BigUint { + type Output = BigUint; + + #[inline] + fn $shx(self, rhs: &'b $rhs) -> BigUint { + $Shx::$shx(self, *rhs) + } + } + impl<'b> $ShxAssign<&'b $rhs> for BigUint { + #[inline] + fn $shx_assign(&mut self, rhs: &'b $rhs) { + $ShxAssign::$shx_assign(self, *rhs); + } + } + }; + ($($rhs:ty),+) => {$( + impl Shl<$rhs> for BigUint { + type Output = BigUint; + + #[inline] + fn shl(self, rhs: $rhs) -> BigUint { + biguint_shl(Cow::Owned(self), rhs) + } + } + impl<'a> Shl<$rhs> for &'a BigUint { + type Output = BigUint; + + #[inline] + fn shl(self, rhs: $rhs) -> BigUint { + biguint_shl(Cow::Borrowed(self), rhs) + } + } + impl ShlAssign<$rhs> for BigUint { + #[inline] + fn shl_assign(&mut self, rhs: $rhs) { + let n = mem::replace(self, BigUint::zero()); + *self = n << rhs; + } + } + impl_shift! { @ref Shl::shl, ShlAssign::shl_assign, $rhs } + + impl Shr<$rhs> for BigUint { + type Output = BigUint; + + #[inline] + fn shr(self, rhs: $rhs) -> BigUint { + biguint_shr(Cow::Owned(self), rhs) + } + } + impl<'a> Shr<$rhs> for &'a BigUint { + type Output = BigUint; + + #[inline] + fn shr(self, rhs: $rhs) -> BigUint { + biguint_shr(Cow::Borrowed(self), rhs) + } + } + impl ShrAssign<$rhs> for BigUint { + #[inline] + fn shr_assign(&mut self, rhs: $rhs) { + let n = mem::replace(self, BigUint::zero()); + *self = n >> rhs; + } + } + impl_shift! { @ref Shr::shr, ShrAssign::shr_assign, $rhs } + )*}; +} + +impl_shift! { u8, u16, u32, u64, u128, usize } +impl_shift! { i8, i16, i32, i64, i128, isize } diff --git a/src/biguint/subtraction.rs b/src/biguint/subtraction.rs new file mode 100644 index 00000000..67005175 --- /dev/null +++ b/src/biguint/subtraction.rs @@ -0,0 +1,312 @@ +#[cfg(not(u64_digit))] +use super::u32_from_u128; +use super::BigUint; + +use crate::big_digit::{self, BigDigit}; +use crate::UsizePromotion; + +use core::cmp::Ordering::{Equal, Greater, Less}; +use core::ops::{Sub, SubAssign}; +use num_traits::{CheckedSub, Zero}; + +#[cfg(all(use_addcarry, target_arch = "x86_64"))] +use core::arch::x86_64 as arch; + +#[cfg(all(use_addcarry, target_arch = "x86"))] +use core::arch::x86 as arch; + +// Subtract with borrow: +#[cfg(all(use_addcarry, u64_digit))] +#[inline] +fn sbb(borrow: u8, a: u64, b: u64, out: &mut u64) -> u8 { + // Safety: There are absolutely no safety concerns with calling `_subborrow_u64`. + // It's just unsafe for API consistency with other intrinsics. + unsafe { arch::_subborrow_u64(borrow, a, b, out) } +} + +#[cfg(all(use_addcarry, not(u64_digit)))] +#[inline] +fn sbb(borrow: u8, a: u32, b: u32, out: &mut u32) -> u8 { + // Safety: There are absolutely no safety concerns with calling `_subborrow_u32`. + // It's just unsafe for API consistency with other intrinsics. + unsafe { arch::_subborrow_u32(borrow, a, b, out) } +} + +// fallback for environments where we don't have a subborrow intrinsic +#[cfg(not(use_addcarry))] +#[inline] +fn sbb(borrow: u8, a: BigDigit, b: BigDigit, out: &mut BigDigit) -> u8 { + use crate::big_digit::SignedDoubleBigDigit; + + let difference = SignedDoubleBigDigit::from(a) + - SignedDoubleBigDigit::from(b) + - SignedDoubleBigDigit::from(borrow); + *out = difference as BigDigit; + u8::from(difference < 0) +} + +pub(super) fn sub2(a: &mut [BigDigit], b: &[BigDigit]) { + let mut borrow = 0; + + let len = Ord::min(a.len(), b.len()); + let (a_lo, a_hi) = a.split_at_mut(len); + let (b_lo, b_hi) = b.split_at(len); + + for (a, b) in a_lo.iter_mut().zip(b_lo) { + borrow = sbb(borrow, *a, *b, a); + } + + if borrow != 0 { + for a in a_hi { + borrow = sbb(borrow, *a, 0, a); + if borrow == 0 { + break; + } + } + } + + // note: we're _required_ to fail on underflow + assert!( + borrow == 0 && b_hi.iter().all(|x| *x == 0), + "Cannot subtract b from a because b is larger than a." + ); +} + +// Only for the Sub impl. `a` and `b` must have same length. +#[inline] +fn __sub2rev(a: &[BigDigit], b: &mut [BigDigit]) -> u8 { + debug_assert!(b.len() == a.len()); + + let mut borrow = 0; + + for (ai, bi) in a.iter().zip(b) { + borrow = sbb(borrow, *ai, *bi, bi); + } + + borrow +} + +fn sub2rev(a: &[BigDigit], b: &mut [BigDigit]) { + debug_assert!(b.len() >= a.len()); + + let len = Ord::min(a.len(), b.len()); + let (a_lo, a_hi) = a.split_at(len); + let (b_lo, b_hi) = b.split_at_mut(len); + + let borrow = __sub2rev(a_lo, b_lo); + + assert!(a_hi.is_empty()); + + // note: we're _required_ to fail on underflow + assert!( + borrow == 0 && b_hi.iter().all(|x| *x == 0), + "Cannot subtract b from a because b is larger than a." + ); +} + +forward_val_val_binop!(impl Sub for BigUint, sub); +forward_ref_ref_binop!(impl Sub for BigUint, sub); +forward_val_assign!(impl SubAssign for BigUint, sub_assign); + +impl<'a> Sub<&'a BigUint> for BigUint { + type Output = BigUint; + + fn sub(mut self, other: &BigUint) -> BigUint { + self -= other; + self + } +} +impl<'a> SubAssign<&'a BigUint> for BigUint { + fn sub_assign(&mut self, other: &'a BigUint) { + sub2(&mut self.data[..], &other.data[..]); + self.normalize(); + } +} + +impl<'a> Sub for &'a BigUint { + type Output = BigUint; + + fn sub(self, mut other: BigUint) -> BigUint { + let other_len = other.data.len(); + if other_len < self.data.len() { + let lo_borrow = __sub2rev(&self.data[..other_len], &mut other.data); + other.data.extend_from_slice(&self.data[other_len..]); + if lo_borrow != 0 { + sub2(&mut other.data[other_len..], &[1]) + } + } else { + sub2rev(&self.data[..], &mut other.data[..]); + } + other.normalized() + } +} + +promote_unsigned_scalars!(impl Sub for BigUint, sub); +promote_unsigned_scalars_assign!(impl SubAssign for BigUint, sub_assign); +forward_all_scalar_binop_to_val_val!(impl Sub for BigUint, sub); +forward_all_scalar_binop_to_val_val!(impl Sub for BigUint, sub); +forward_all_scalar_binop_to_val_val!(impl Sub for BigUint, sub); + +impl Sub for BigUint { + type Output = BigUint; + + #[inline] + fn sub(mut self, other: u32) -> BigUint { + self -= other; + self + } +} + +impl SubAssign for BigUint { + fn sub_assign(&mut self, other: u32) { + sub2(&mut self.data[..], &[other as BigDigit]); + self.normalize(); + } +} + +impl Sub for u32 { + type Output = BigUint; + + #[cfg(not(u64_digit))] + #[inline] + fn sub(self, mut other: BigUint) -> BigUint { + if other.data.len() == 0 { + other.data.push(self); + } else { + sub2rev(&[self], &mut other.data[..]); + } + other.normalized() + } + + #[cfg(u64_digit)] + #[inline] + fn sub(self, mut other: BigUint) -> BigUint { + if other.data.is_empty() { + other.data.push(self as BigDigit); + } else { + sub2rev(&[self as BigDigit], &mut other.data[..]); + } + other.normalized() + } +} + +impl Sub for BigUint { + type Output = BigUint; + + #[inline] + fn sub(mut self, other: u64) -> BigUint { + self -= other; + self + } +} + +impl SubAssign for BigUint { + #[cfg(not(u64_digit))] + #[inline] + fn sub_assign(&mut self, other: u64) { + let (hi, lo) = big_digit::from_doublebigdigit(other); + sub2(&mut self.data[..], &[lo, hi]); + self.normalize(); + } + + #[cfg(u64_digit)] + #[inline] + fn sub_assign(&mut self, other: u64) { + sub2(&mut self.data[..], &[other as BigDigit]); + self.normalize(); + } +} + +impl Sub for u64 { + type Output = BigUint; + + #[cfg(not(u64_digit))] + #[inline] + fn sub(self, mut other: BigUint) -> BigUint { + while other.data.len() < 2 { + other.data.push(0); + } + + let (hi, lo) = big_digit::from_doublebigdigit(self); + sub2rev(&[lo, hi], &mut other.data[..]); + other.normalized() + } + + #[cfg(u64_digit)] + #[inline] + fn sub(self, mut other: BigUint) -> BigUint { + if other.data.is_empty() { + other.data.push(self); + } else { + sub2rev(&[self], &mut other.data[..]); + } + other.normalized() + } +} + +impl Sub for BigUint { + type Output = BigUint; + + #[inline] + fn sub(mut self, other: u128) -> BigUint { + self -= other; + self + } +} + +impl SubAssign for BigUint { + #[cfg(not(u64_digit))] + #[inline] + fn sub_assign(&mut self, other: u128) { + let (a, b, c, d) = u32_from_u128(other); + sub2(&mut self.data[..], &[d, c, b, a]); + self.normalize(); + } + + #[cfg(u64_digit)] + #[inline] + fn sub_assign(&mut self, other: u128) { + let (hi, lo) = big_digit::from_doublebigdigit(other); + sub2(&mut self.data[..], &[lo, hi]); + self.normalize(); + } +} + +impl Sub for u128 { + type Output = BigUint; + + #[cfg(not(u64_digit))] + #[inline] + fn sub(self, mut other: BigUint) -> BigUint { + while other.data.len() < 4 { + other.data.push(0); + } + + let (a, b, c, d) = u32_from_u128(self); + sub2rev(&[d, c, b, a], &mut other.data[..]); + other.normalized() + } + + #[cfg(u64_digit)] + #[inline] + fn sub(self, mut other: BigUint) -> BigUint { + while other.data.len() < 2 { + other.data.push(0); + } + + let (hi, lo) = big_digit::from_doublebigdigit(self); + sub2rev(&[lo, hi], &mut other.data[..]); + other.normalized() + } +} + +impl CheckedSub for BigUint { + #[inline] + fn checked_sub(&self, v: &BigUint) -> Option { + match self.cmp(v) { + Less => None, + Equal => Some(Zero::zero()), + Greater => Some(self.sub(v)), + } + } +}