From 0d24cf4dc09c686af382afe58decb285f88f554d Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Sat, 20 Oct 2018 22:26:44 +0200 Subject: [PATCH 01/25] Run rustmt. --- src/base/alias_slice.rs | 2 +- src/base/allocator.rs | 21 +- src/base/blas.rs | 13 +- src/base/cg.rs | 25 +- src/base/componentwise.rs | 2 +- src/base/constraint.rs | 13 +- src/base/coordinates.rs | 2 +- src/base/dimension.rs | 2 +- src/base/edition.rs | 6 +- src/base/iter.rs | 31 +- src/base/matrix_array.rs | 12 +- src/base/matrix_vec.rs | 32 +- src/base/mod.rs | 2 +- src/base/ops.rs | 21 +- src/base/scalar.rs | 2 +- src/base/storage.rs | 14 +- src/base/swizzle.rs | 5 +- src/geometry/isometry.rs | 29 +- src/geometry/isometry_alga.rs | 13 +- src/geometry/mod.rs | 44 +- src/geometry/point.rs | 11 +- src/geometry/point_alga.rs | 4 +- src/geometry/quaternion.rs | 13 +- src/geometry/quaternion_alga.rs | 16 +- src/geometry/quaternion_ops.rs | 9 +- src/geometry/rotation.rs | 11 +- src/geometry/rotation_alga.rs | 17 +- src/geometry/rotation_construction.rs | 4 +- src/geometry/rotation_ops.rs | 8 +- src/geometry/rotation_specialization.rs | 16 +- src/geometry/similarity.rs | 24 +- src/geometry/similarity_alga.rs | 12 +- src/geometry/similarity_ops.rs | 4 +- src/geometry/transform.rs | 29 +- src/geometry/transform_alga.rs | 10 +- src/geometry/transform_construction.rs | 4 +- src/geometry/transform_ops.rs | 9 +- src/geometry/translation.rs | 11 +- src/geometry/translation_alga.rs | 17 +- src/geometry/translation_ops.rs | 6 +- src/geometry/unit_complex_alga.rs | 15 +- src/geometry/unit_complex_construction.rs | 2 +- src/linalg/bidiagonal.rs | 21 +- src/linalg/cholesky.rs | 21 +- src/linalg/full_piv_lu.rs | 21 +- src/linalg/hessenberg.rs | 21 +- src/linalg/inverse.rs | 68 ++- src/linalg/lu.rs | 21 +- src/linalg/mod.rs | 30 +- src/linalg/permutation_sequence.rs | 21 +- src/linalg/qr.rs | 35 +- src/linalg/schur.rs | 57 ++- src/linalg/svd.rs | 30 +- src/linalg/symmetric_eigen.rs | 21 +- src/linalg/symmetric_tridiagonal.rs | 21 +- src/sparse/cs_matrix.rs | 485 ++++++++++++++++++++++ src/sparse/mod.rs | 3 + tests/core/abomonation.rs | 12 +- tests/core/blas.rs | 2 +- tests/core/conversion.rs | 17 +- tests/core/mod.rs | 8 +- tests/core/serde.rs | 17 +- tests/geometry/isometry.rs | 6 +- tests/geometry/point.rs | 39 +- tests/geometry/projection.rs | 9 +- tests/geometry/quaternion.rs | 3 +- tests/geometry/rotation.rs | 4 +- tests/geometry/similarity.rs | 4 +- tests/geometry/unit_complex.rs | 2 +- tests/lib.rs | 4 +- tests/linalg/balancing.rs | 2 +- tests/linalg/bidiagonal.rs | 2 +- tests/linalg/cholesky.rs | 6 +- tests/linalg/eigen.rs | 83 ++-- tests/linalg/hessenberg.rs | 5 +- tests/linalg/mod.rs | 14 +- tests/linalg/qr.rs | 3 +- tests/linalg/solve.rs | 2 +- tests/linalg/svd.rs | 314 +++++++++----- 79 files changed, 1284 insertions(+), 693 deletions(-) create mode 100644 src/sparse/cs_matrix.rs create mode 100644 src/sparse/mod.rs diff --git a/src/base/alias_slice.rs b/src/base/alias_slice.rs index 145193448..1b368c8e1 100644 --- a/src/base/alias_slice.rs +++ b/src/base/alias_slice.rs @@ -1,6 +1,6 @@ -use base::Matrix; use base::dimension::{Dynamic, U1, U2, U3, U4, U5, U6}; use base::matrix_slice::{SliceStorage, SliceStorageMut}; +use base::Matrix; /* * diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 61dd71eb2..5b17c1831 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -2,10 +2,10 @@ use std::any::Any; -use base::{DefaultAllocator, Scalar}; use base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use base::dimension::{Dim, U1}; use base::storage::ContiguousStorageMut; +use base::{DefaultAllocator, Scalar}; /// A matrix allocator of a memory buffer that may contain `R::to_usize() * C::to_usize()` /// elements of type `N`. @@ -33,8 +33,9 @@ pub trait Allocator: Any + Sized { /// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom × /// CFrom) elements to a smaller or larger size (RTo, CTo). -pub trait Reallocator - : Allocator + Allocator { +pub trait Reallocator: + Allocator + Allocator +{ /// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer /// `buf`. Data stored by `buf` are linearly copied to the output: /// @@ -57,8 +58,8 @@ pub type SameShapeC = >:: // FIXME: Bad name. /// Restricts the given number of rows and columns to be respectively the same. -pub trait SameShapeAllocator - : Allocator + Allocator, SameShapeC> +pub trait SameShapeAllocator: + Allocator + Allocator, SameShapeC> where R1: Dim, R2: Dim, @@ -78,13 +79,12 @@ where N: Scalar, DefaultAllocator: Allocator + Allocator, SameShapeC>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, -{ -} +{} // XXX: Bad name. /// Restricts the given number of rows to be equal. -pub trait SameShapeVectorAllocator - : Allocator + Allocator> + SameShapeAllocator +pub trait SameShapeVectorAllocator: + Allocator + Allocator> + SameShapeAllocator where R1: Dim, R2: Dim, @@ -100,5 +100,4 @@ where N: Scalar, DefaultAllocator: Allocator + Allocator>, ShapeConstraint: SameNumberOfRows, -{ -} +{} diff --git a/src/base/blas.rs b/src/base/blas.rs index 497975190..b4a5cfbe3 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -14,7 +14,6 @@ use base::storage::{Storage, StorageMut}; use base::{DefaultAllocator, Matrix, Scalar, SquareMatrix, Vector}; impl> Vector { - /// Computes the index of the vector component with the largest value. /// /// # Examples: @@ -685,13 +684,19 @@ where // We could use matrixmultiply for large statically-sized matrices but the performance // threshold to activate it would be different from SMALL_DIM because our code optimizes // better for statically-sized matrices. - let is_dynamic = R1::is::() || C1::is::() || R2::is::() - || C2::is::() || R3::is::() + let is_dynamic = R1::is::() + || C1::is::() + || R2::is::() + || C2::is::() + || R3::is::() || C3::is::(); // Threshold determined empirically. const SMALL_DIM: usize = 5; - if is_dynamic && nrows1 > SMALL_DIM && ncols1 > SMALL_DIM && nrows2 > SMALL_DIM + if is_dynamic + && nrows1 > SMALL_DIM + && ncols1 > SMALL_DIM + && nrows2 > SMALL_DIM && ncols2 > SMALL_DIM { if N::is::() { diff --git a/src/base/cg.rs b/src/base/cg.rs index 84fff1a56..39509f2f9 100644 --- a/src/base/cg.rs +++ b/src/base/cg.rs @@ -7,15 +7,18 @@ use num::One; -use base::{DefaultAllocator, Matrix3, Matrix4, MatrixN, Scalar, SquareMatrix, Unit, Vector, - Vector3, VectorN}; +use base::allocator::Allocator; use base::dimension::{DimName, DimNameDiff, DimNameSub, U1}; use base::storage::{Storage, StorageMut}; -use base::allocator::Allocator; -use geometry::{Isometry, IsometryMatrix3, Orthographic3, Perspective3, Point, Point3, Rotation2, - Rotation3}; - -use alga::general::{Ring, Real}; +use base::{ + DefaultAllocator, Matrix3, Matrix4, MatrixN, Scalar, SquareMatrix, Unit, Vector, Vector3, + VectorN, +}; +use geometry::{ + Isometry, IsometryMatrix3, Orthographic3, Perspective3, Point, Point3, Rotation2, Rotation3, +}; + +use alga::general::{Real, Ring}; use alga::linear::Transformation; impl MatrixN @@ -302,7 +305,8 @@ impl> SquareMatrix SB: Storage>, DefaultAllocator: Allocator>, { - let scale = self.fixed_slice::>(D::dim() - 1, 0) + let scale = self + .fixed_slice::>(D::dim() - 1, 0) .tr_dot(&shift); let post_translation = self.fixed_slice::, DimNameDiff>(0, 0) * shift; @@ -341,9 +345,8 @@ where let transform = self.fixed_slice::, DimNameDiff>(0, 0); let translation = self.fixed_slice::, U1>(0, D::dim() - 1); let normalizer = self.fixed_slice::>(D::dim() - 1, 0); - let n = normalizer.tr_dot(&pt.coords) + unsafe { - *self.get_unchecked(D::dim() - 1, D::dim() - 1) - }; + let n = normalizer.tr_dot(&pt.coords) + + unsafe { *self.get_unchecked(D::dim() - 1, D::dim() - 1) }; if !n.is_zero() { return transform * (pt / n) + translation; diff --git a/src/base/componentwise.rs b/src/base/componentwise.rs index 949347157..5db2ebb8f 100644 --- a/src/base/componentwise.rs +++ b/src/base/componentwise.rs @@ -1,4 +1,4 @@ -// Non-conventional componentwise operators. +// Non-conventional component-wise operators. use num::{Signed, Zero}; use std::ops::{Add, Mul}; diff --git a/src/base/constraint.rs b/src/base/constraint.rs index 369841b56..d9d7aafee 100644 --- a/src/base/constraint.rs +++ b/src/base/constraint.rs @@ -8,11 +8,9 @@ pub struct ShapeConstraint; /// Constraints `C1` and `R2` to be equivalent. pub trait AreMultipliable: DimEq {} -impl AreMultipliable for ShapeConstraint -where - ShapeConstraint: DimEq, -{ -} +impl AreMultipliable for ShapeConstraint where + ShapeConstraint: DimEq +{} /// Constraints `D1` and `D2` to be equivalent. pub trait DimEq { @@ -70,8 +68,9 @@ equality_trait_decl!( /// Constraints D1 and D2 to be equivalent, where they both designate dimensions of algebraic /// entities (e.g. square matrices). -pub trait SameDimension - : SameNumberOfRows + SameNumberOfColumns { +pub trait SameDimension: + SameNumberOfRows + SameNumberOfColumns +{ /// This is either equal to `D1` or `D2`, always choosing the one (if any) which is a type-level /// constant. type Representative: Dim; diff --git a/src/base/coordinates.rs b/src/base/coordinates.rs index e092763e2..986b8e9d4 100644 --- a/src/base/coordinates.rs +++ b/src/base/coordinates.rs @@ -7,9 +7,9 @@ use std::mem; use std::ops::{Deref, DerefMut}; -use base::{Matrix, Scalar}; use base::dimension::{U1, U2, U3, U4, U5, U6}; use base::storage::{ContiguousStorage, ContiguousStorageMut}; +use base::{Matrix, Scalar}; /* * diff --git a/src/base/dimension.rs b/src/base/dimension.rs index c660c5a40..d694ec392 100644 --- a/src/base/dimension.rs +++ b/src/base/dimension.rs @@ -7,7 +7,7 @@ use std::cmp; use std::fmt::Debug; use std::ops::{Add, Div, Mul, Sub}; use typenum::{ - self, B1, Bit, Diff, Max, Maximum, Min, Minimum, Prod, Quot, Sum, UInt, UTerm, Unsigned, + self, Bit, Diff, Max, Maximum, Min, Minimum, Prod, Quot, Sum, UInt, UTerm, Unsigned, B1, }; #[cfg(feature = "serde-serialize")] diff --git a/src/base/edition.rs b/src/base/edition.rs index 585176143..649d4cdae 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -285,7 +285,8 @@ impl> Matrix { let copied_value_start = i + nremove.value(); unsafe { - let ptr_in = m.data + let ptr_in = m + .data .ptr() .offset((copied_value_start * nrows.value()) as isize); let ptr_out = m.data.ptr_mut().offset((i * nrows.value()) as isize); @@ -448,7 +449,8 @@ impl> Matrix { if ninsert.value() != 0 && i != ncols.value() { let ptr_in = res.data.ptr().offset((i * nrows.value()) as isize); - let ptr_out = res.data + let ptr_out = res + .data .ptr_mut() .offset(((i + ninsert.value()) * nrows.value()) as isize); diff --git a/src/base/iter.rs b/src/base/iter.rs index 52d492da2..fe8fd4f6b 100644 --- a/src/base/iter.rs +++ b/src/base/iter.rs @@ -3,21 +3,20 @@ use std::marker::PhantomData; use std::mem; -use base::Scalar; use base::dimension::Dim; use base::storage::{Storage, StorageMut}; +use base::Scalar; macro_rules! iterator { (struct $Name:ident for $Storage:ident.$ptr: ident -> $Ptr:ty, $Ref:ty, $SRef: ty) => { - /// An iterator through a dense matrix with arbitrary strides matrix. pub struct $Name<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage> { - ptr: $Ptr, + ptr: $Ptr, inner_ptr: $Ptr, inner_end: $Ptr, - size: usize, // We can't use an end pointer here because a stride might be zero. - strides: (S::RStride, S::CStride), - _phantoms: PhantomData<($Ref, R, C, S)> + size: usize, // We can't use an end pointer here because a stride might be zero. + strides: (S::RStride, S::CStride), + _phantoms: PhantomData<($Ref, R, C, S)>, } // FIXME: we need to specialize for the case where the matrix storage is owned (in which @@ -25,23 +24,25 @@ macro_rules! iterator { impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage> $Name<'a, N, R, C, S> { /// Creates a new iterator for the given matrix storage. pub fn new(storage: $SRef) -> $Name<'a, N, R, C, S> { - let shape = storage.shape(); + let shape = storage.shape(); let strides = storage.strides(); let inner_offset = shape.0.value() * strides.0.value(); let ptr = storage.$ptr(); $Name { - ptr: ptr, + ptr: ptr, inner_ptr: ptr, inner_end: unsafe { ptr.offset(inner_offset as isize) }, - size: shape.0.value() * shape.1.value(), - strides: strides, - _phantoms: PhantomData + size: shape.0.value() * shape.1.value(), + strides: strides, + _phantoms: PhantomData, } } } - impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage> Iterator for $Name<'a, N, R, C, S> { + impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage> Iterator + for $Name<'a, N, R, C, S> + { type Item = $Ref; #[inline] @@ -82,13 +83,15 @@ macro_rules! iterator { } } - impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage> ExactSizeIterator for $Name<'a, N, R, C, S> { + impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage> ExactSizeIterator + for $Name<'a, N, R, C, S> + { #[inline] fn len(&self) -> usize { self.size } } - } + }; } iterator!(struct MatrixIter for Storage.ptr -> *const N, &'a N, &'a S); diff --git a/src/base/matrix_array.rs b/src/base/matrix_array.rs index 023c7c7b2..fc81176cb 100644 --- a/src/base/matrix_array.rs +++ b/src/base/matrix_array.rs @@ -107,8 +107,7 @@ where R::Value: Mul, Prod: ArrayLength, GenericArray>: Copy, -{ -} +{} impl Clone for MatrixArray where @@ -133,8 +132,7 @@ where C: DimName, R::Value: Mul, Prod: ArrayLength, -{ -} +{} impl PartialEq for MatrixArray where @@ -234,8 +232,7 @@ where R::Value: Mul, Prod: ArrayLength, DefaultAllocator: Allocator, -{ -} +{} unsafe impl ContiguousStorageMut for MatrixArray where @@ -245,8 +242,7 @@ where R::Value: Mul, Prod: ArrayLength, DefaultAllocator: Allocator, -{ -} +{} /* * diff --git a/src/base/matrix_vec.rs b/src/base/matrix_vec.rs index 8a28b26cf..90bdfe283 100644 --- a/src/base/matrix_vec.rs +++ b/src/base/matrix_vec.rs @@ -211,17 +211,13 @@ where } } -unsafe impl ContiguousStorage for MatrixVec -where - DefaultAllocator: Allocator, -{ -} +unsafe impl ContiguousStorage for MatrixVec where + DefaultAllocator: Allocator +{} -unsafe impl ContiguousStorageMut for MatrixVec -where - DefaultAllocator: Allocator, -{ -} +unsafe impl ContiguousStorageMut for MatrixVec where + DefaultAllocator: Allocator +{} unsafe impl StorageMut for MatrixVec where @@ -253,14 +249,10 @@ impl Abomonation for MatrixVec { } } -unsafe impl ContiguousStorage for MatrixVec -where - DefaultAllocator: Allocator, -{ -} +unsafe impl ContiguousStorage for MatrixVec where + DefaultAllocator: Allocator +{} -unsafe impl ContiguousStorageMut for MatrixVec -where - DefaultAllocator: Allocator, -{ -} +unsafe impl ContiguousStorageMut for MatrixVec where + DefaultAllocator: Allocator +{} diff --git a/src/base/mod.rs b/src/base/mod.rs index 7197b0049..2f5730252 100644 --- a/src/base/mod.rs +++ b/src/base/mod.rs @@ -12,7 +12,6 @@ pub mod storage; mod alias; mod alias_slice; -mod swizzle; mod cg; mod componentwise; mod construction; @@ -27,6 +26,7 @@ mod matrix_slice; mod matrix_vec; mod properties; mod scalar; +mod swizzle; mod unit; #[doc(hidden)] diff --git a/src/base/ops.rs b/src/base/ops.rs index 217ed194d..a83d51125 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -1,17 +1,19 @@ -use std::iter; -use std::ops::{Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, - SubAssign}; -use std::cmp::PartialOrd; use num::{One, Signed, Zero}; +use std::cmp::PartialOrd; +use std::iter; +use std::ops::{ + Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign, +}; use alga::general::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub}; -use base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, MatrixSum, Scalar}; +use base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR}; +use base::constraint::{ + AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint, +}; use base::dimension::{Dim, DimMul, DimName, DimProd}; -use base::constraint::{AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, - ShapeConstraint}; use base::storage::{ContiguousStorageMut, Storage, StorageMut}; -use base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR}; +use base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, MatrixSum, Scalar}; /* * @@ -783,7 +785,8 @@ impl> Matri #[inline] pub fn amin(&self) -> N { let mut it = self.iter(); - let mut min = it.next() + let mut min = it + .next() .expect("amin: empty matrices not supported.") .abs(); diff --git a/src/base/scalar.rs b/src/base/scalar.rs index ca6da510d..47e3019c9 100644 --- a/src/base/scalar.rs +++ b/src/base/scalar.rs @@ -1,6 +1,6 @@ +use std::any::Any; use std::any::TypeId; use std::fmt::Debug; -use std::any::Any; /// The basic scalar type for all structures of `nalgebra`. /// diff --git a/src/base/storage.rs b/src/base/storage.rs index 63d10bcbd..bf57242e1 100644 --- a/src/base/storage.rs +++ b/src/base/storage.rs @@ -3,10 +3,10 @@ use std::fmt::Debug; use std::mem; -use base::Scalar; +use base::allocator::{Allocator, SameShapeC, SameShapeR}; use base::default_allocator::DefaultAllocator; use base::dimension::{Dim, U1}; -use base::allocator::{Allocator, SameShapeC, SameShapeR}; +use base::Scalar; /* * Aliases for allocation results. @@ -177,8 +177,9 @@ pub unsafe trait StorageMut: Storage { /// The storage requirement means that for any value of `i` in `[0, nrows * ncols[`, the value /// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because /// failing to comply to this may cause Undefined Behaviors. -pub unsafe trait ContiguousStorage - : Storage { +pub unsafe trait ContiguousStorage: + Storage +{ } /// A mutable matrix storage that is stored contiguously in memory. @@ -186,6 +187,7 @@ pub unsafe trait ContiguousStorage /// The storage requirement means that for any value of `i` in `[0, nrows * ncols[`, the value /// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because /// failing to comply to this may cause Undefined Behaviors. -pub unsafe trait ContiguousStorageMut - : ContiguousStorage + StorageMut { +pub unsafe trait ContiguousStorageMut: + ContiguousStorage + StorageMut +{ } diff --git a/src/base/swizzle.rs b/src/base/swizzle.rs index e4ae9c9c3..e1908e358 100644 --- a/src/base/swizzle.rs +++ b/src/base/swizzle.rs @@ -1,8 +1,7 @@ -use base::{Scalar, Vector, DimName, Vector2, Vector3}; +use base::{DimName, Scalar, Vector, Vector2, Vector3}; use storage::Storage; use typenum::{self, Cmp, Greater}; - macro_rules! impl_swizzle { ($(where $BaseDim: ident: $name: ident() -> $Result: ident[$($i: expr),*]);*) => { $( @@ -64,4 +63,4 @@ impl_swizzle!( where U2: zzx() -> Vector3[2, 2, 0]; where U2: zzy() -> Vector3[2, 2, 1]; where U2: zzz() -> Vector3[2, 2, 2] -); \ No newline at end of file +); diff --git a/src/geometry/isometry.rs b/src/geometry/isometry.rs index 435585f7f..94bf9abe7 100644 --- a/src/geometry/isometry.rs +++ b/src/geometry/isometry.rs @@ -6,7 +6,7 @@ use std::io::{Result as IOResult, Write}; use std::marker::PhantomData; #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; @@ -26,23 +26,19 @@ use geometry::{Point, Translation}; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - serialize = "R: Serialize, + serde(bound( + serialize = "R: Serialize, DefaultAllocator: Allocator, Owned: Serialize" - ) - ) + )) )] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - deserialize = "R: Deserialize<'de>, + serde(bound( + deserialize = "R: Deserialize<'de>, DefaultAllocator: Allocator, Owned: Deserialize<'de>" - ) - ) + )) )] pub struct Isometry where @@ -54,7 +50,10 @@ where pub translation: Translation, // One dummy private field just to prevent explicit construction. - #[cfg_attr(feature = "serde-serialize", serde(skip_serializing, skip_deserializing))] + #[cfg_attr( + feature = "serde-serialize", + serde(skip_serializing, skip_deserializing) + )] _noconstruct: PhantomData, } @@ -98,8 +97,7 @@ impl> + Copy> Copy for Isome where DefaultAllocator: Allocator, Owned: Copy, -{ -} +{} impl> + Clone> Clone for Isometry where @@ -200,8 +198,7 @@ impl Eq for Isometry where R: Rotation> + Eq, DefaultAllocator: Allocator, -{ -} +{} impl PartialEq for Isometry where diff --git a/src/geometry/isometry_alga.rs b/src/geometry/isometry_alga.rs index 58ddc6920..6a144550a 100644 --- a/src/geometry/isometry_alga.rs +++ b/src/geometry/isometry_alga.rs @@ -1,9 +1,12 @@ -use alga::general::{AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, - AbstractQuasigroup, AbstractSemigroup, Id, Identity, Inverse, Multiplicative, - Real}; +use alga::general::{ + AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup, + AbstractSemigroup, Id, Identity, Inverse, Multiplicative, Real, +}; use alga::linear::Isometry as AlgaIsometry; -use alga::linear::{AffineTransformation, DirectIsometry, ProjectiveTransformation, Rotation, - Similarity, Transformation}; +use alga::linear::{ + AffineTransformation, DirectIsometry, ProjectiveTransformation, Rotation, Similarity, + Transformation, +}; use base::allocator::Allocator; use base::dimension::DimName; diff --git a/src/geometry/mod.rs b/src/geometry/mod.rs index f125f9ed5..af7124616 100644 --- a/src/geometry/mod.rs +++ b/src/geometry/mod.rs @@ -4,61 +4,61 @@ mod op_macros; mod point; -mod point_construction; -mod point_alias; -mod point_ops; mod point_alga; +mod point_alias; +mod point_construction; mod point_conversion; mod point_coordinates; +mod point_ops; mod rotation; -mod rotation_construction; -mod rotation_ops; mod rotation_alga; // FIXME: implement Rotation methods. -mod rotation_conversion; mod rotation_alias; +mod rotation_construction; +mod rotation_conversion; +mod rotation_ops; mod rotation_specialization; mod quaternion; -mod quaternion_construction; -mod quaternion_ops; mod quaternion_alga; +mod quaternion_construction; mod quaternion_conversion; mod quaternion_coordinates; +mod quaternion_ops; mod unit_complex; -mod unit_complex_construction; -mod unit_complex_ops; mod unit_complex_alga; +mod unit_complex_construction; mod unit_complex_conversion; +mod unit_complex_ops; mod translation; -mod translation_construction; -mod translation_ops; mod translation_alga; -mod translation_conversion; mod translation_alias; +mod translation_construction; +mod translation_conversion; +mod translation_ops; mod isometry; -mod isometry_construction; -mod isometry_ops; mod isometry_alga; -mod isometry_conversion; mod isometry_alias; +mod isometry_construction; +mod isometry_conversion; +mod isometry_ops; mod similarity; -mod similarity_construction; -mod similarity_ops; mod similarity_alga; -mod similarity_conversion; mod similarity_alias; +mod similarity_construction; +mod similarity_conversion; +mod similarity_ops; mod transform; -mod transform_construction; -mod transform_ops; mod transform_alga; -mod transform_conversion; mod transform_alias; +mod transform_construction; +mod transform_conversion; +mod transform_ops; mod reflection; diff --git a/src/geometry/point.rs b/src/geometry/point.rs index 6b3e4d2d7..4f26d632a 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -7,7 +7,7 @@ use std::hash; use std::io::{Result as IOResult, Write}; #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize, Serializer, Deserializer}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; @@ -42,8 +42,7 @@ impl Copy for Point where DefaultAllocator: Allocator, >::Buffer: Copy, -{ -} +{} impl Clone for Point where @@ -241,11 +240,7 @@ where } } -impl Eq for Point -where - DefaultAllocator: Allocator, -{ -} +impl Eq for Point where DefaultAllocator: Allocator {} impl PartialEq for Point where diff --git a/src/geometry/point_alga.rs b/src/geometry/point_alga.rs index e3bb32e06..5673d0df4 100644 --- a/src/geometry/point_alga.rs +++ b/src/geometry/point_alga.rs @@ -1,9 +1,9 @@ use alga::general::{Field, JoinSemilattice, Lattice, MeetSemilattice, Real}; use alga::linear::{AffineSpace, EuclideanSpace}; -use base::{DefaultAllocator, Scalar, VectorN}; -use base::dimension::DimName; use base::allocator::Allocator; +use base::dimension::DimName; +use base::{DefaultAllocator, Scalar, VectorN}; use geometry::Point; diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index ccfd6af0a..60c21f8d6 100644 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -8,7 +8,7 @@ use std::io::{Result as IOResult, Write}; #[cfg(feature = "serde-serialize")] use base::storage::Owned; #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize, Serializer, Deserializer}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; @@ -457,9 +457,11 @@ impl UnitQuaternion { /// is not well-defined). #[inline] pub fn slerp(&self, other: &UnitQuaternion, t: N) -> UnitQuaternion { - Unit::new_unchecked( - Quaternion::from_vector(Unit::new_unchecked(self.coords).slerp(&Unit::new_unchecked(other.coords), t).unwrap()) - ) + Unit::new_unchecked(Quaternion::from_vector( + Unit::new_unchecked(self.coords) + .slerp(&Unit::new_unchecked(other.coords), t) + .unwrap(), + )) } /// Computes the spherical linear interpolation between two unit quaternions or returns `None` @@ -479,7 +481,8 @@ impl UnitQuaternion { t: N, epsilon: N, ) -> Option> { - Unit::new_unchecked(self.coords).try_slerp(&Unit::new_unchecked(other.coords), t, epsilon) + Unit::new_unchecked(self.coords) + .try_slerp(&Unit::new_unchecked(other.coords), t, epsilon) .map(|q| Unit::new_unchecked(Quaternion::from_vector(q.unwrap()))) } diff --git a/src/geometry/quaternion_alga.rs b/src/geometry/quaternion_alga.rs index 44fa5143a..4b926560a 100644 --- a/src/geometry/quaternion_alga.rs +++ b/src/geometry/quaternion_alga.rs @@ -1,11 +1,15 @@ use num::Zero; -use alga::general::{AbstractGroup, AbstractGroupAbelian, AbstractLoop, AbstractMagma, - AbstractModule, AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, - Additive, Id, Identity, Inverse, Module, Multiplicative, Real}; -use alga::linear::{AffineTransformation, DirectIsometry, FiniteDimVectorSpace, Isometry, - NormedSpace, OrthogonalTransformation, ProjectiveTransformation, Rotation, - Similarity, Transformation, VectorSpace}; +use alga::general::{ + AbstractGroup, AbstractGroupAbelian, AbstractLoop, AbstractMagma, AbstractModule, + AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, Additive, Id, Identity, Inverse, Module, + Multiplicative, Real, +}; +use alga::linear::{ + AffineTransformation, DirectIsometry, FiniteDimVectorSpace, Isometry, NormedSpace, + OrthogonalTransformation, ProjectiveTransformation, Rotation, Similarity, Transformation, + VectorSpace, +}; use base::{Vector3, Vector4}; use geometry::{Point3, Quaternion, UnitQuaternion}; diff --git a/src/geometry/quaternion_ops.rs b/src/geometry/quaternion_ops.rs index b61442269..498a0def3 100644 --- a/src/geometry/quaternion_ops.rs +++ b/src/geometry/quaternion_ops.rs @@ -50,15 +50,16 @@ * */ -use std::ops::{Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, - SubAssign}; +use std::ops::{ + Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign, +}; use alga::general::Real; -use base::{DefaultAllocator, Unit, Vector, Vector3}; -use base::storage::Storage; use base::allocator::Allocator; use base::dimension::{U1, U3, U4}; +use base::storage::Storage; +use base::{DefaultAllocator, Unit, Vector, Vector3}; use geometry::{Point3, Quaternion, Rotation, UnitQuaternion}; diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index 0ceaa73bf..476c045f1 100644 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -6,7 +6,7 @@ use std::hash; use std::io::{Result as IOResult, Write}; #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize, Serializer, Deserializer}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "serde-serialize")] use base::storage::Owned; @@ -44,8 +44,7 @@ impl Copy for Rotation where DefaultAllocator: Allocator, >::Buffer: Copy, -{ -} +{} impl Clone for Rotation where @@ -187,11 +186,7 @@ where } } -impl Eq for Rotation -where - DefaultAllocator: Allocator, -{ -} +impl Eq for Rotation where DefaultAllocator: Allocator {} impl PartialEq for Rotation where diff --git a/src/geometry/rotation_alga.rs b/src/geometry/rotation_alga.rs index 17e92bbf3..b95a028b6 100644 --- a/src/geometry/rotation_alga.rs +++ b/src/geometry/rotation_alga.rs @@ -1,12 +1,15 @@ -use alga::general::{AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, - AbstractQuasigroup, AbstractSemigroup, Id, Identity, Inverse, Multiplicative, - Real}; -use alga::linear::{self, AffineTransformation, DirectIsometry, Isometry, OrthogonalTransformation, - ProjectiveTransformation, Similarity, Transformation}; +use alga::general::{ + AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup, + AbstractSemigroup, Id, Identity, Inverse, Multiplicative, Real, +}; +use alga::linear::{ + self, AffineTransformation, DirectIsometry, Isometry, OrthogonalTransformation, + ProjectiveTransformation, Similarity, Transformation, +}; -use base::{DefaultAllocator, VectorN}; -use base::dimension::DimName; use base::allocator::Allocator; +use base::dimension::DimName; +use base::{DefaultAllocator, VectorN}; use geometry::{Point, Rotation}; diff --git a/src/geometry/rotation_construction.rs b/src/geometry/rotation_construction.rs index 14fa28b36..54cfc3c76 100644 --- a/src/geometry/rotation_construction.rs +++ b/src/geometry/rotation_construction.rs @@ -2,9 +2,9 @@ use num::{One, Zero}; use alga::general::{ClosedAdd, ClosedMul}; -use base::{DefaultAllocator, MatrixN, Scalar}; -use base::dimension::DimName; use base::allocator::Allocator; +use base::dimension::DimName; +use base::{DefaultAllocator, MatrixN, Scalar}; use geometry::Rotation; diff --git a/src/geometry/rotation_ops.rs b/src/geometry/rotation_ops.rs index 419830f22..0e5b20b70 100644 --- a/src/geometry/rotation_ops.rs +++ b/src/geometry/rotation_ops.rs @@ -17,16 +17,16 @@ * Matrix ×= Rotation */ -use std::ops::{Div, DivAssign, Index, Mul, MulAssign}; use num::{One, Zero}; +use std::ops::{Div, DivAssign, Index, Mul, MulAssign}; use alga::general::{ClosedAdd, ClosedMul}; -use base::{DefaultAllocator, Matrix, MatrixMN, Scalar, Unit, Vector, VectorN}; -use base::dimension::{Dim, DimName, U1}; +use base::allocator::Allocator; use base::constraint::{AreMultipliable, ShapeConstraint}; +use base::dimension::{Dim, DimName, U1}; use base::storage::Storage; -use base::allocator::Allocator; +use base::{DefaultAllocator, Matrix, MatrixMN, Scalar, Unit, Vector, VectorN}; use geometry::{Point, Rotation}; diff --git a/src/geometry/rotation_specialization.rs b/src/geometry/rotation_specialization.rs index 46a9627a0..d01a529d6 100644 --- a/src/geometry/rotation_specialization.rs +++ b/src/geometry/rotation_specialization.rs @@ -5,7 +5,7 @@ use quickcheck::{Arbitrary, Gen}; use alga::general::Real; use num::Zero; -use rand::distributions::{Distribution, Standard, OpenClosed01}; +use rand::distributions::{Distribution, OpenClosed01, Standard}; use rand::Rng; use std::ops::Neg; @@ -329,7 +329,7 @@ impl Rotation3 { pub fn angle(&self) -> N { ((self.matrix()[(0, 0)] + self.matrix()[(1, 1)] + self.matrix()[(2, 2)] - N::one()) / ::convert(2.0)) - .acos() + .acos() } /// The rotation axis. Returns `None` if the rotation angle is zero or PI. @@ -398,9 +398,15 @@ where let theta = N::two_pi() * rng.sample(OpenClosed01); let (ts, tc) = theta.sin_cos(); let a = MatrixN::::new( - tc, ts, N::zero(), - -ts, tc, N::zero(), - N::zero(), N::zero(), N::one() + tc, + ts, + N::zero(), + -ts, + tc, + N::zero(), + N::zero(), + N::zero(), + N::one(), ); // Compute a random rotation *of* Z diff --git a/src/geometry/similarity.rs b/src/geometry/similarity.rs index c29a02090..cbf26a69f 100644 --- a/src/geometry/similarity.rs +++ b/src/geometry/similarity.rs @@ -5,7 +5,7 @@ use std::hash; use std::io::{Result as IOResult, Write}; #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; @@ -25,25 +25,21 @@ use geometry::{Isometry, Point, Translation}; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - serialize = "N: Serialize, + serde(bound( + serialize = "N: Serialize, R: Serialize, DefaultAllocator: Allocator, Owned: Serialize" - ) - ) + )) )] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - deserialize = "N: Deserialize<'de>, + serde(bound( + deserialize = "N: Deserialize<'de>, R: Deserialize<'de>, DefaultAllocator: Allocator, Owned: Deserialize<'de>" - ) - ) + )) )] pub struct Similarity where @@ -89,8 +85,7 @@ impl> + Copy> Copy for Simil where DefaultAllocator: Allocator, Owned: Copy, -{ -} +{} impl> + Clone> Clone for Similarity where @@ -276,8 +271,7 @@ impl Eq for Similarity where R: Rotation> + Eq, DefaultAllocator: Allocator, -{ -} +{} impl PartialEq for Similarity where diff --git a/src/geometry/similarity_alga.rs b/src/geometry/similarity_alga.rs index 8ad0e674e..c416cad83 100644 --- a/src/geometry/similarity_alga.rs +++ b/src/geometry/similarity_alga.rs @@ -1,11 +1,13 @@ -use alga::general::{AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, - AbstractQuasigroup, AbstractSemigroup, Identity, Inverse, Multiplicative, Real}; -use alga::linear::{AffineTransformation, ProjectiveTransformation, Rotation, Transformation}; +use alga::general::{ + AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup, + AbstractSemigroup, Identity, Inverse, Multiplicative, Real, +}; use alga::linear::Similarity as AlgaSimilarity; +use alga::linear::{AffineTransformation, ProjectiveTransformation, Rotation, Transformation}; -use base::{DefaultAllocator, VectorN}; -use base::dimension::DimName; use base::allocator::Allocator; +use base::dimension::DimName; +use base::{DefaultAllocator, VectorN}; use geometry::{Point, Similarity, Translation}; diff --git a/src/geometry/similarity_ops.rs b/src/geometry/similarity_ops.rs index 92e77d087..7469c4cdb 100644 --- a/src/geometry/similarity_ops.rs +++ b/src/geometry/similarity_ops.rs @@ -3,9 +3,9 @@ use std::ops::{Div, DivAssign, Mul, MulAssign}; use alga::general::Real; use alga::linear::Rotation as AlgaRotation; -use base::{DefaultAllocator, VectorN}; -use base::dimension::{DimName, U1, U3, U4}; use base::allocator::Allocator; +use base::dimension::{DimName, U1, U3, U4}; +use base::{DefaultAllocator, VectorN}; use geometry::{Isometry, Point, Rotation, Similarity, Translation, UnitQuaternion}; diff --git a/src/geometry/transform.rs b/src/geometry/transform.rs index 78a53829a..4d7f8ba18 100644 --- a/src/geometry/transform.rs +++ b/src/geometry/transform.rs @@ -3,14 +3,14 @@ use std::fmt::Debug; use std::marker::PhantomData; #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize, Serializer, Deserializer}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; use alga::general::Real; -use base::{DefaultAllocator, MatrixN}; +use base::allocator::Allocator; use base::dimension::{DimName, DimNameAdd, DimNameSum, U1}; use base::storage::Owned; -use base::allocator::Allocator; +use base::{DefaultAllocator, MatrixN}; /// Trait implemented by phantom types identifying the projective transformation type. /// @@ -56,18 +56,15 @@ where /// Tag representing the most general (not necessarily inversible) `Transform` type. #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub enum TGeneral { -} +pub enum TGeneral {} /// Tag representing the most general inversible `Transform` type. #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub enum TProjective { -} +pub enum TProjective {} /// Tag representing an affine `Transform`. Its bottom-row is equal to `(0, 0 ... 0, 1)`. #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub enum TAffine { -} +pub enum TAffine {} impl TCategory for TGeneral { #[inline] @@ -104,7 +101,8 @@ impl TCategory for TAffine { DefaultAllocator: Allocator, { let last = D::dim() - 1; - mat.is_invertible() && mat[(last, last)] == N::one() + mat.is_invertible() + && mat[(last, last)] == N::one() && (0..last).all(|i| mat[(last, i)].is_zero()) } } @@ -177,8 +175,7 @@ impl + Copy, C: TCategory> Copy for Transform, DimNameSum>, Owned, DimNameSum>: Copy, -{ -} +{} impl, C: TCategory> Clone for Transform where @@ -220,11 +217,9 @@ where } } -impl, C: TCategory> Eq for Transform -where - DefaultAllocator: Allocator, DimNameSum>, -{ -} +impl, C: TCategory> Eq for Transform where + DefaultAllocator: Allocator, DimNameSum> +{} impl, C: TCategory> PartialEq for Transform where diff --git a/src/geometry/transform_alga.rs b/src/geometry/transform_alga.rs index d01e61bfe..652da373b 100644 --- a/src/geometry/transform_alga.rs +++ b/src/geometry/transform_alga.rs @@ -1,10 +1,12 @@ -use alga::general::{AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, - AbstractQuasigroup, AbstractSemigroup, Identity, Inverse, Multiplicative, Real}; +use alga::general::{ + AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup, + AbstractSemigroup, Identity, Inverse, Multiplicative, Real, +}; use alga::linear::{ProjectiveTransformation, Transformation}; -use base::{DefaultAllocator, VectorN}; -use base::dimension::{DimNameAdd, DimNameSum, U1}; use base::allocator::Allocator; +use base::dimension::{DimNameAdd, DimNameSum, U1}; +use base::{DefaultAllocator, VectorN}; use geometry::{Point, SubTCategoryOf, TCategory, TProjective, Transform}; diff --git a/src/geometry/transform_construction.rs b/src/geometry/transform_construction.rs index 6d5bb135e..1a7beba40 100644 --- a/src/geometry/transform_construction.rs +++ b/src/geometry/transform_construction.rs @@ -2,9 +2,9 @@ use num::One; use alga::general::Real; -use base::{DefaultAllocator, MatrixN}; -use base::dimension::{DimNameAdd, DimNameSum, U1}; use base::allocator::Allocator; +use base::dimension::{DimNameAdd, DimNameSum, U1}; +use base::{DefaultAllocator, MatrixN}; use geometry::{TCategory, Transform}; diff --git a/src/geometry/transform_ops.rs b/src/geometry/transform_ops.rs index 0e2b48264..6fc4ec32b 100644 --- a/src/geometry/transform_ops.rs +++ b/src/geometry/transform_ops.rs @@ -3,13 +3,14 @@ use std::ops::{Div, DivAssign, Index, IndexMut, Mul, MulAssign}; use alga::general::{ClosedAdd, ClosedMul, Real, SubsetOf}; -use base::{DefaultAllocator, MatrixN, Scalar, VectorN}; use base::allocator::Allocator; use base::dimension::{DimName, DimNameAdd, DimNameSum, U1, U3, U4}; +use base::{DefaultAllocator, MatrixN, Scalar, VectorN}; -use geometry::{Isometry, Point, Rotation, Similarity, SubTCategoryOf, SuperTCategoryOf, TAffine, - TCategory, TCategoryMul, TGeneral, TProjective, Transform, Translation, - UnitQuaternion}; +use geometry::{ + Isometry, Point, Rotation, Similarity, SubTCategoryOf, SuperTCategoryOf, TAffine, TCategory, + TCategoryMul, TGeneral, TProjective, Transform, Translation, UnitQuaternion, +}; /* * diff --git a/src/geometry/translation.rs b/src/geometry/translation.rs index ddf7651cd..33a0f3493 100644 --- a/src/geometry/translation.rs +++ b/src/geometry/translation.rs @@ -6,7 +6,7 @@ use std::hash; use std::io::{Result as IOResult, Write}; #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize, Serializer, Deserializer}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; @@ -44,8 +44,7 @@ impl Copy for Translation where DefaultAllocator: Allocator, Owned: Copy, -{ -} +{} impl Clone for Translation where @@ -153,11 +152,7 @@ where } } -impl Eq for Translation -where - DefaultAllocator: Allocator, -{ -} +impl Eq for Translation where DefaultAllocator: Allocator {} impl PartialEq for Translation where diff --git a/src/geometry/translation_alga.rs b/src/geometry/translation_alga.rs index 296d6e176..ce095cc59 100644 --- a/src/geometry/translation_alga.rs +++ b/src/geometry/translation_alga.rs @@ -1,13 +1,16 @@ -use alga::general::{AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, - AbstractQuasigroup, AbstractSemigroup, Id, Identity, Inverse, Multiplicative, - Real}; -use alga::linear::{AffineTransformation, DirectIsometry, Isometry, ProjectiveTransformation, - Similarity, Transformation}; +use alga::general::{ + AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup, + AbstractSemigroup, Id, Identity, Inverse, Multiplicative, Real, +}; use alga::linear::Translation as AlgaTranslation; +use alga::linear::{ + AffineTransformation, DirectIsometry, Isometry, ProjectiveTransformation, Similarity, + Transformation, +}; -use base::{DefaultAllocator, VectorN}; -use base::dimension::DimName; use base::allocator::Allocator; +use base::dimension::DimName; +use base::{DefaultAllocator, VectorN}; use geometry::{Point, Translation}; diff --git a/src/geometry/translation_ops.rs b/src/geometry/translation_ops.rs index 639affddd..d124c763b 100644 --- a/src/geometry/translation_ops.rs +++ b/src/geometry/translation_ops.rs @@ -2,10 +2,10 @@ use std::ops::{Div, DivAssign, Mul, MulAssign}; use alga::general::{ClosedAdd, ClosedSub}; -use base::{DefaultAllocator, Scalar}; -use base::dimension::{DimName, U1}; -use base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use base::allocator::{Allocator, SameShapeAllocator}; +use base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; +use base::dimension::{DimName, U1}; +use base::{DefaultAllocator, Scalar}; use geometry::{Point, Translation}; diff --git a/src/geometry/unit_complex_alga.rs b/src/geometry/unit_complex_alga.rs index bfc992023..d464ea560 100644 --- a/src/geometry/unit_complex_alga.rs +++ b/src/geometry/unit_complex_alga.rs @@ -1,12 +1,15 @@ -use alga::general::{AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, - AbstractQuasigroup, AbstractSemigroup, Id, Identity, Inverse, Multiplicative, - Real}; -use alga::linear::{AffineTransformation, DirectIsometry, Isometry, OrthogonalTransformation, - ProjectiveTransformation, Rotation, Similarity, Transformation}; +use alga::general::{ + AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup, + AbstractSemigroup, Id, Identity, Inverse, Multiplicative, Real, +}; +use alga::linear::{ + AffineTransformation, DirectIsometry, Isometry, OrthogonalTransformation, + ProjectiveTransformation, Rotation, Similarity, Transformation, +}; -use base::{DefaultAllocator, Vector2}; use base::allocator::Allocator; use base::dimension::U2; +use base::{DefaultAllocator, Vector2}; use geometry::{Point2, UnitComplex}; /* diff --git a/src/geometry/unit_complex_construction.rs b/src/geometry/unit_complex_construction.rs index 4b0207897..82dd2e7d8 100644 --- a/src/geometry/unit_complex_construction.rs +++ b/src/geometry/unit_complex_construction.rs @@ -3,7 +3,7 @@ use quickcheck::{Arbitrary, Gen}; use num::One; use num_complex::Complex; -use rand::distributions::{Distribution, Standard, OpenClosed01}; +use rand::distributions::{Distribution, OpenClosed01, Standard}; use rand::Rng; use alga::general::Real; diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index db340a9ce..a5fd178d1 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -1,5 +1,5 @@ #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use alga::general::Real; use allocator::Allocator; @@ -15,31 +15,27 @@ use linalg::householder; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - serialize = "DimMinimum: DimSub, + serde(bound( + serialize = "DimMinimum: DimSub, DefaultAllocator: Allocator + Allocator> + Allocator, U1>>, MatrixMN: Serialize, VectorN>: Serialize, VectorN, U1>>: Serialize" - ) - ) + )) )] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - deserialize = "DimMinimum: DimSub, + serde(bound( + deserialize = "DimMinimum: DimSub, DefaultAllocator: Allocator + Allocator> + Allocator, U1>>, MatrixMN: Deserialize<'de>, VectorN>: Deserialize<'de>, VectorN, U1>>: Deserialize<'de>" - ) - ) + )) )] #[derive(Clone, Debug)] pub struct Bidiagonal, C: Dim> @@ -68,8 +64,7 @@ where MatrixMN: Copy, VectorN>: Copy, VectorN, U1>>: Copy, -{ -} +{} impl, C: Dim> Bidiagonal where diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index e6592227b..9bf3cb18b 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -1,5 +1,5 @@ #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use alga::general::Real; @@ -13,21 +13,17 @@ use storage::{Storage, StorageMut}; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - serialize = "DefaultAllocator: Allocator, + serde(bound( + serialize = "DefaultAllocator: Allocator, MatrixN: Serialize" - ) - ) + )) )] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - deserialize = "DefaultAllocator: Allocator, + serde(bound( + deserialize = "DefaultAllocator: Allocator, MatrixN: Deserialize<'de>" - ) - ) + )) )] #[derive(Clone, Debug)] pub struct Cholesky @@ -41,8 +37,7 @@ impl Copy for Cholesky where DefaultAllocator: Allocator, MatrixN: Copy, -{ -} +{} impl> Cholesky where diff --git a/src/linalg/full_piv_lu.rs b/src/linalg/full_piv_lu.rs index 85079b178..a65ab5d09 100644 --- a/src/linalg/full_piv_lu.rs +++ b/src/linalg/full_piv_lu.rs @@ -1,5 +1,5 @@ #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use alga::general::Real; use allocator::Allocator; @@ -15,25 +15,21 @@ use linalg::PermutationSequence; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - serialize = "DefaultAllocator: Allocator + + serde(bound( + serialize = "DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, MatrixMN: Serialize, PermutationSequence>: Serialize" - ) - ) + )) )] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - deserialize = "DefaultAllocator: Allocator + + serde(bound( + deserialize = "DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, MatrixMN: Deserialize<'de>, PermutationSequence>: Deserialize<'de>" - ) - ) + )) )] #[derive(Clone, Debug)] pub struct FullPivLU, C: Dim> @@ -50,8 +46,7 @@ where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, MatrixMN: Copy, PermutationSequence>: Copy, -{ -} +{} impl, C: Dim> FullPivLU where diff --git a/src/linalg/hessenberg.rs b/src/linalg/hessenberg.rs index a831cc2e0..d575a4764 100644 --- a/src/linalg/hessenberg.rs +++ b/src/linalg/hessenberg.rs @@ -1,5 +1,5 @@ #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use alga::general::Real; use allocator::Allocator; @@ -14,25 +14,21 @@ use linalg::householder; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - serialize = "DefaultAllocator: Allocator + + serde(bound( + serialize = "DefaultAllocator: Allocator + Allocator>, MatrixN: Serialize, VectorN>: Serialize" - ) - ) + )) )] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - deserialize = "DefaultAllocator: Allocator + + serde(bound( + deserialize = "DefaultAllocator: Allocator + Allocator>, MatrixN: Deserialize<'de>, VectorN>: Deserialize<'de>" - ) - ) + )) )] #[derive(Clone, Debug)] pub struct Hessenberg> @@ -48,8 +44,7 @@ where DefaultAllocator: Allocator + Allocator>, MatrixN: Copy, VectorN>: Copy, -{ -} +{} impl> Hessenberg where diff --git a/src/linalg/inverse.rs b/src/linalg/inverse.rs index e00f4b8ba..921234d7b 100644 --- a/src/linalg/inverse.rs +++ b/src/linalg/inverse.rs @@ -1,9 +1,9 @@ use alga::general::Real; -use base::{DefaultAllocator, MatrixN, SquareMatrix}; +use base::allocator::Allocator; use base::dimension::Dim; use base::storage::{Storage, StorageMut}; -use base::allocator::Allocator; +use base::{DefaultAllocator, MatrixN, SquareMatrix}; use linalg::lu; @@ -129,52 +129,84 @@ where let m = m.data.as_slice(); out[(0, 0)] = m[5] * m[10] * m[15] - m[5] * m[11] * m[14] - m[9] * m[6] * m[15] - + m[9] * m[7] * m[14] + m[13] * m[6] * m[11] - m[13] * m[7] * m[10]; + + m[9] * m[7] * m[14] + + m[13] * m[6] * m[11] + - m[13] * m[7] * m[10]; out[(1, 0)] = -m[1] * m[10] * m[15] + m[1] * m[11] * m[14] + m[9] * m[2] * m[15] - - m[9] * m[3] * m[14] - m[13] * m[2] * m[11] + m[13] * m[3] * m[10]; + - m[9] * m[3] * m[14] + - m[13] * m[2] * m[11] + + m[13] * m[3] * m[10]; out[(2, 0)] = m[1] * m[6] * m[15] - m[1] * m[7] * m[14] - m[5] * m[2] * m[15] - + m[5] * m[3] * m[14] + m[13] * m[2] * m[7] - m[13] * m[3] * m[6]; + + m[5] * m[3] * m[14] + + m[13] * m[2] * m[7] + - m[13] * m[3] * m[6]; out[(3, 0)] = -m[1] * m[6] * m[11] + m[1] * m[7] * m[10] + m[5] * m[2] * m[11] - - m[5] * m[3] * m[10] - m[9] * m[2] * m[7] + m[9] * m[3] * m[6]; + - m[5] * m[3] * m[10] + - m[9] * m[2] * m[7] + + m[9] * m[3] * m[6]; out[(0, 1)] = -m[4] * m[10] * m[15] + m[4] * m[11] * m[14] + m[8] * m[6] * m[15] - - m[8] * m[7] * m[14] - m[12] * m[6] * m[11] + m[12] * m[7] * m[10]; + - m[8] * m[7] * m[14] + - m[12] * m[6] * m[11] + + m[12] * m[7] * m[10]; out[(1, 1)] = m[0] * m[10] * m[15] - m[0] * m[11] * m[14] - m[8] * m[2] * m[15] - + m[8] * m[3] * m[14] + m[12] * m[2] * m[11] - m[12] * m[3] * m[10]; + + m[8] * m[3] * m[14] + + m[12] * m[2] * m[11] + - m[12] * m[3] * m[10]; out[(2, 1)] = -m[0] * m[6] * m[15] + m[0] * m[7] * m[14] + m[4] * m[2] * m[15] - - m[4] * m[3] * m[14] - m[12] * m[2] * m[7] + m[12] * m[3] * m[6]; + - m[4] * m[3] * m[14] + - m[12] * m[2] * m[7] + + m[12] * m[3] * m[6]; out[(3, 1)] = m[0] * m[6] * m[11] - m[0] * m[7] * m[10] - m[4] * m[2] * m[11] - + m[4] * m[3] * m[10] + m[8] * m[2] * m[7] - m[8] * m[3] * m[6]; + + m[4] * m[3] * m[10] + + m[8] * m[2] * m[7] + - m[8] * m[3] * m[6]; out[(0, 2)] = m[4] * m[9] * m[15] - m[4] * m[11] * m[13] - m[8] * m[5] * m[15] - + m[8] * m[7] * m[13] + m[12] * m[5] * m[11] - m[12] * m[7] * m[9]; + + m[8] * m[7] * m[13] + + m[12] * m[5] * m[11] + - m[12] * m[7] * m[9]; out[(1, 2)] = -m[0] * m[9] * m[15] + m[0] * m[11] * m[13] + m[8] * m[1] * m[15] - - m[8] * m[3] * m[13] - m[12] * m[1] * m[11] + m[12] * m[3] * m[9]; + - m[8] * m[3] * m[13] + - m[12] * m[1] * m[11] + + m[12] * m[3] * m[9]; out[(2, 2)] = m[0] * m[5] * m[15] - m[0] * m[7] * m[13] - m[4] * m[1] * m[15] - + m[4] * m[3] * m[13] + m[12] * m[1] * m[7] - m[12] * m[3] * m[5]; + + m[4] * m[3] * m[13] + + m[12] * m[1] * m[7] + - m[12] * m[3] * m[5]; out[(0, 3)] = -m[4] * m[9] * m[14] + m[4] * m[10] * m[13] + m[8] * m[5] * m[14] - - m[8] * m[6] * m[13] - m[12] * m[5] * m[10] + m[12] * m[6] * m[9]; + - m[8] * m[6] * m[13] + - m[12] * m[5] * m[10] + + m[12] * m[6] * m[9]; out[(3, 2)] = -m[0] * m[5] * m[11] + m[0] * m[7] * m[9] + m[4] * m[1] * m[11] - - m[4] * m[3] * m[9] - m[8] * m[1] * m[7] + m[8] * m[3] * m[5]; + - m[4] * m[3] * m[9] + - m[8] * m[1] * m[7] + + m[8] * m[3] * m[5]; out[(1, 3)] = m[0] * m[9] * m[14] - m[0] * m[10] * m[13] - m[8] * m[1] * m[14] - + m[8] * m[2] * m[13] + m[12] * m[1] * m[10] - m[12] * m[2] * m[9]; + + m[8] * m[2] * m[13] + + m[12] * m[1] * m[10] + - m[12] * m[2] * m[9]; out[(2, 3)] = -m[0] * m[5] * m[14] + m[0] * m[6] * m[13] + m[4] * m[1] * m[14] - - m[4] * m[2] * m[13] - m[12] * m[1] * m[6] + m[12] * m[2] * m[5]; + - m[4] * m[2] * m[13] + - m[12] * m[1] * m[6] + + m[12] * m[2] * m[5]; out[(3, 3)] = m[0] * m[5] * m[10] - m[0] * m[6] * m[9] - m[4] * m[1] * m[10] - + m[4] * m[2] * m[9] + m[8] * m[1] * m[6] - m[8] * m[2] * m[5]; + + m[4] * m[2] * m[9] + + m[8] * m[1] * m[6] + - m[8] * m[2] * m[5]; let det = m[0] * out[(0, 0)] + m[1] * out[(0, 1)] + m[2] * out[(0, 2)] + m[3] * out[(0, 3)]; diff --git a/src/linalg/lu.rs b/src/linalg/lu.rs index 7707242b8..2dccbc0b7 100644 --- a/src/linalg/lu.rs +++ b/src/linalg/lu.rs @@ -1,5 +1,5 @@ #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use alga::general::{Field, Real}; use allocator::{Allocator, Reallocator}; @@ -15,25 +15,21 @@ use linalg::PermutationSequence; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - serialize = "DefaultAllocator: Allocator + + serde(bound( + serialize = "DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, MatrixMN: Serialize, PermutationSequence>: Serialize" - ) - ) + )) )] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - deserialize = "DefaultAllocator: Allocator + + serde(bound( + deserialize = "DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, MatrixMN: Deserialize<'de>, PermutationSequence>: Deserialize<'de>" - ) - ) + )) )] #[derive(Clone, Debug)] pub struct LU, C: Dim> @@ -49,8 +45,7 @@ where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, MatrixMN: Copy, PermutationSequence>: Copy, -{ -} +{} /// Performs a LU decomposition to overwrite `out` with the inverse of `matrix`. /// diff --git a/src/linalg/mod.rs b/src/linalg/mod.rs index 04f7e3f86..4418b2836 100644 --- a/src/linalg/mod.rs +++ b/src/linalg/mod.rs @@ -1,35 +1,35 @@ //! [Reexported at the root of this crate.] Factorization of real matrices. -mod solve; -mod determinant; -mod inverse; -pub mod householder; -pub mod givens; pub mod balancing; -mod permutation_sequence; -mod qr; -mod hessenberg; mod bidiagonal; -mod symmetric_tridiagonal; mod cholesky; -mod lu; +mod determinant; mod full_piv_lu; +pub mod givens; +mod hessenberg; +pub mod householder; +mod inverse; +mod lu; +mod permutation_sequence; +mod qr; mod schur; +mod solve; mod svd; mod symmetric_eigen; +mod symmetric_tridiagonal; //// FIXME: Not complete enough for publishing. //// This handles only cases where each eigenvalue has multiplicity one. // mod eigen; -pub use self::permutation_sequence::*; -pub use self::qr::*; -pub use self::hessenberg::*; pub use self::bidiagonal::*; pub use self::cholesky::*; -pub use self::lu::*; pub use self::full_piv_lu::*; +pub use self::hessenberg::*; +pub use self::lu::*; +pub use self::permutation_sequence::*; +pub use self::qr::*; pub use self::schur::*; pub use self::svd::*; -pub use self::symmetric_tridiagonal::*; pub use self::symmetric_eigen::*; +pub use self::symmetric_tridiagonal::*; diff --git a/src/linalg/permutation_sequence.rs b/src/linalg/permutation_sequence.rs index b68187518..a6f555f95 100644 --- a/src/linalg/permutation_sequence.rs +++ b/src/linalg/permutation_sequence.rs @@ -1,5 +1,5 @@ #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use alga::general::ClosedNeg; use num::One; @@ -15,21 +15,17 @@ use storage::StorageMut; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - serialize = "DefaultAllocator: Allocator<(usize, usize), D>, + serde(bound( + serialize = "DefaultAllocator: Allocator<(usize, usize), D>, VectorN<(usize, usize), D>: Serialize" - ) - ) + )) )] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - deserialize = "DefaultAllocator: Allocator<(usize, usize), D>, + serde(bound( + deserialize = "DefaultAllocator: Allocator<(usize, usize), D>, VectorN<(usize, usize), D>: Deserialize<'de>" - ) - ) + )) )] #[derive(Clone, Debug)] pub struct PermutationSequence @@ -44,8 +40,7 @@ impl Copy for PermutationSequence where DefaultAllocator: Allocator<(usize, usize), D>, VectorN<(usize, usize), D>: Copy, -{ -} +{} impl PermutationSequence where diff --git a/src/linalg/qr.rs b/src/linalg/qr.rs index 0dea31a7e..487a06a4b 100644 --- a/src/linalg/qr.rs +++ b/src/linalg/qr.rs @@ -1,28 +1,36 @@ #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use alga::general::Real; +use allocator::{Allocator, Reallocator}; use base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Unit, VectorN}; +use constraint::{SameNumberOfRows, ShapeConstraint}; use dimension::{Dim, DimMin, DimMinimum, U1}; use storage::{Storage, StorageMut}; -use allocator::{Allocator, Reallocator}; -use constraint::{SameNumberOfRows, ShapeConstraint}; -use linalg::householder; use geometry::Reflection; +use linalg::householder; /// The QR decomposition of a general matrix. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "serde-serialize", - serde(bound(serialize = "DefaultAllocator: Allocator + +#[cfg_attr( + feature = "serde-serialize", + serde(bound( + serialize = "DefaultAllocator: Allocator + Allocator>, MatrixMN: Serialize, - VectorN>: Serialize")))] -#[cfg_attr(feature = "serde-serialize", - serde(bound(deserialize = "DefaultAllocator: Allocator + + VectorN>: Serialize" + )) +)] +#[cfg_attr( + feature = "serde-serialize", + serde(bound( + deserialize = "DefaultAllocator: Allocator + Allocator>, MatrixMN: Deserialize<'de>, - VectorN>: Deserialize<'de>")))] + VectorN>: Deserialize<'de>" + )) +)] #[derive(Clone, Debug)] pub struct QR, C: Dim> where @@ -37,8 +45,7 @@ where DefaultAllocator: Allocator + Allocator>, MatrixMN: Copy, VectorN>: Copy, -{ -} +{} impl, C: Dim> QR where @@ -132,8 +139,8 @@ where ) where DimMinimum: DimMin>, - DefaultAllocator: Allocator> - + Reallocator, C>, + DefaultAllocator: + Allocator> + Reallocator, C>, { (self.q(), self.unpack_r()) } diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index fd385512b..0918bee98 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -1,5 +1,5 @@ #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use alga::general::Real; use num_complex::Complex; @@ -19,21 +19,17 @@ use linalg::Hessenberg; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - serialize = "DefaultAllocator: Allocator, + serde(bound( + serialize = "DefaultAllocator: Allocator, MatrixN: Serialize" - ) - ) + )) )] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - deserialize = "DefaultAllocator: Allocator, + serde(bound( + deserialize = "DefaultAllocator: Allocator, MatrixN: Deserialize<'de>" - ) - ) + )) )] #[derive(Clone, Debug)] pub struct RealSchur @@ -48,8 +44,7 @@ impl Copy for RealSchur where DefaultAllocator: Allocator, MatrixN: Copy, -{ -} +{} impl RealSchur where @@ -180,10 +175,10 @@ where { let krows = cmp::min(k + 4, end + 1); let mut work = work.rows_mut(0, krows); - refl.reflect(&mut t.generic_slice_mut( - (k, k), - (U3, Dynamic::new(dim.value() - k)), - )); + refl.reflect( + &mut t + .generic_slice_mut((k, k), (U3, Dynamic::new(dim.value() - k))), + ); refl.reflect_rows( &mut t.generic_slice_mut((0, k), (Dynamic::new(krows), U3)), &mut work, @@ -214,10 +209,9 @@ where { let mut work = work.rows_mut(0, end + 1); - refl.reflect(&mut t.generic_slice_mut( - (m, m), - (U2, Dynamic::new(dim.value() - m)), - )); + refl.reflect( + &mut t.generic_slice_mut((m, m), (U2, Dynamic::new(dim.value() - m))), + ); refl.reflect_rows( &mut t.generic_slice_mut((0, m), (Dynamic::new(end + 1), U2)), &mut work, @@ -236,10 +230,9 @@ where (start, start), (U2, Dynamic::new(dim.value() - start)), )); - rot.rotate_rows(&mut t.generic_slice_mut( - (0, start), - (Dynamic::new(end + 1), U2), - )); + rot.rotate_rows( + &mut t.generic_slice_mut((0, start), (Dynamic::new(end + 1), U2)), + ); t[(end, start)] = N::zero(); if let Some(ref mut q) = q { @@ -433,9 +426,11 @@ where )); } } - None => if compute_q { - q = Some(MatrixN::identity_generic(dim, dim)); - }, + None => { + if compute_q { + q = Some(MatrixN::identity_generic(dim, dim)); + } + } }; Some((q, m)) @@ -558,7 +553,8 @@ where N::default_epsilon(), 0, false, - ).unwrap(); + ) + .unwrap(); if RealSchur::do_eigenvalues(&schur.1, &mut work) { Some(work) } else { @@ -581,7 +577,8 @@ where N::default_epsilon(), 0, false, - ).unwrap(); + ) + .unwrap(); let mut eig = unsafe { VectorN::new_uninitialized_generic(dim, U1) }; RealSchur::do_complex_eigenvalues(&schur.1, &mut eig); eig diff --git a/src/linalg/svd.rs b/src/linalg/svd.rs index 43e2946e5..3945af9cb 100644 --- a/src/linalg/svd.rs +++ b/src/linalg/svd.rs @@ -1,5 +1,5 @@ #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use num_complex::Complex; use std::ops::MulAssign; @@ -20,31 +20,27 @@ use linalg::Bidiagonal; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - serialize = "DefaultAllocator: Allocator + + serde(bound( + serialize = "DefaultAllocator: Allocator + Allocator> + Allocator, C> + Allocator>, MatrixMN>: Serialize, MatrixMN, C>: Serialize, VectorN>: Serialize" - ) - ) + )) )] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - deserialize = "DefaultAllocator: Allocator + + serde(bound( + deserialize = "DefaultAllocator: Allocator + Allocator> + Allocator, C> + Allocator>, MatrixMN>: Deserialize<'de>, MatrixMN, C>: Deserialize<'de>, VectorN>: Deserialize<'de>" - ) - ) + )) )] #[derive(Clone, Debug)] pub struct SVD, C: Dim> @@ -69,8 +65,7 @@ where MatrixMN>: Copy, MatrixMN, C>: Copy, VectorN>: Copy, -{ -} +{} impl, C: Dim> SVD where @@ -489,7 +484,8 @@ where /// right- and left- singular vectors have not been computed at construction-time. pub fn recompose(self) -> MatrixMN { let mut u = self.u.expect("SVD recomposition: U has not been computed."); - let v_t = self.v_t + let v_t = self + .v_t .expect("SVD recomposition: V^t has not been computed."); for i in 0..self.singular_values.len() { @@ -545,10 +541,12 @@ where eps >= N::zero(), "SVD solve: the epsilon must be non-negative." ); - let u = self.u + let u = self + .u .as_ref() .expect("SVD solve: U has not been computed."); - let v_t = self.v_t + let v_t = self + .v_t .as_ref() .expect("SVD solve: V^t has not been computed."); diff --git a/src/linalg/symmetric_eigen.rs b/src/linalg/symmetric_eigen.rs index 1628f4ba6..f1e7b0265 100644 --- a/src/linalg/symmetric_eigen.rs +++ b/src/linalg/symmetric_eigen.rs @@ -1,5 +1,5 @@ #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use num_complex::Complex; use std::ops::MulAssign; @@ -18,25 +18,21 @@ use linalg::SymmetricTridiagonal; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - serialize = "DefaultAllocator: Allocator + + serde(bound( + serialize = "DefaultAllocator: Allocator + Allocator, VectorN: Serialize, MatrixN: Serialize" - ) - ) + )) )] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - deserialize = "DefaultAllocator: Allocator + + serde(bound( + deserialize = "DefaultAllocator: Allocator + Allocator, VectorN: Deserialize<'de>, MatrixN: Deserialize<'de>" - ) - ) + )) )] #[derive(Clone, Debug)] pub struct SymmetricEigen @@ -55,8 +51,7 @@ where DefaultAllocator: Allocator + Allocator, MatrixN: Copy, VectorN: Copy, -{ -} +{} impl SymmetricEigen where diff --git a/src/linalg/symmetric_tridiagonal.rs b/src/linalg/symmetric_tridiagonal.rs index a1af96d03..37d6b1949 100644 --- a/src/linalg/symmetric_tridiagonal.rs +++ b/src/linalg/symmetric_tridiagonal.rs @@ -1,5 +1,5 @@ #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use alga::general::Real; use allocator::Allocator; @@ -13,25 +13,21 @@ use linalg::householder; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - serialize = "DefaultAllocator: Allocator + + serde(bound( + serialize = "DefaultAllocator: Allocator + Allocator>, MatrixN: Serialize, VectorN>: Serialize" - ) - ) + )) )] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - deserialize = "DefaultAllocator: Allocator + + serde(bound( + deserialize = "DefaultAllocator: Allocator + Allocator>, MatrixN: Deserialize<'de>, VectorN>: Deserialize<'de>" - ) - ) + )) )] #[derive(Clone, Debug)] pub struct SymmetricTridiagonal> @@ -47,8 +43,7 @@ where DefaultAllocator: Allocator + Allocator>, MatrixN: Copy, VectorN>: Copy, -{ -} +{} impl> SymmetricTridiagonal where diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs new file mode 100644 index 000000000..f172bbee1 --- /dev/null +++ b/src/sparse/cs_matrix.rs @@ -0,0 +1,485 @@ +use alga::general::{ClosedAdd, ClosedMul}; +use num::{One, Zero}; +use std::marker::PhantomData; +use std::ops::{Add, Mul, Range}; + +use allocator::Allocator; +use constraint::{AreMultipliable, DimEq, ShapeConstraint}; +use storage::{Storage, StorageMut}; +use {DefaultAllocator, Dim, Matrix, MatrixMN, Scalar, Vector, VectorN, U1}; + +pub trait CsStorage { + fn shape(&self) -> (R, C); + fn nvalues(&self) -> usize; + unsafe fn row_index_unchecked(&self, i: usize) -> usize; + unsafe fn get_value_unchecked(&self, i: usize) -> &N; + fn get_value(&self, i: usize) -> &N; + fn row_index(&self, i: usize) -> usize; + fn column_range(&self, j: usize) -> Range; +} + +pub trait CsStorageMut: CsStorage { + /* + /// Sets the length of this column without initializing its values and row indices. + /// + /// If the given length is larger than the current one, uninitialized entries are + /// added at the end of the column `i`. This will effectively shift all the matrix entries + /// of the columns at indices `j` with `j > i`. Therefore this is a `O(n)` operation. + /// This is unsafe as the row indices on newly created components may end up being out + /// of bounds. + unsafe fn set_column_len(&mut self, i: usize, len: usize); + */ +} + +#[derive(Clone, Debug)] +pub struct CsVecStorage +where + DefaultAllocator: Allocator, +{ + shape: (R, C), + p: VectorN, + i: Vec, + vals: Vec, +} + +impl CsStorage for CsVecStorage +where + DefaultAllocator: Allocator, +{ + #[inline] + fn shape(&self) -> (R, C) { + self.shape + } + + #[inline] + fn nvalues(&self) -> usize { + self.vals.len() + } + + #[inline] + fn column_range(&self, j: usize) -> Range { + let end = if j + 1 == self.p.len() { + self.nvalues() + } else { + self.p[j + 1] + }; + + self.p[j]..end + } + + #[inline] + fn row_index(&self, i: usize) -> usize { + self.i[i] + } + + #[inline] + unsafe fn row_index_unchecked(&self, i: usize) -> usize { + *self.i.get_unchecked(i) + } + + #[inline] + unsafe fn get_value_unchecked(&self, i: usize) -> &N { + self.vals.get_unchecked(i) + } + + #[inline] + fn get_value(&self, i: usize) -> &N { + &self.vals[i] + } +} + +/* +pub struct CsSliceStorage<'a, N: Scalar, R: Dim, C: DimAdd> { + shape: (R, C), + p: VectorSlice>, + i: VectorSlice, + vals: VectorSlice, +}*/ + +/// A compressed sparse column matrix. +#[derive(Clone, Debug)] +pub struct CsMatrix = CsVecStorage> { + pub data: S, + _phantoms: PhantomData<(N, R, C)>, +} + +pub type CsVector = CsMatrix; + +impl CsMatrix +where + DefaultAllocator: Allocator, +{ + pub fn new_uninitialized_generic(nrows: R, ncols: C, nvals: usize) -> Self { + let mut i = Vec::with_capacity(nvals); + unsafe { + i.set_len(nvals); + } + i.shrink_to_fit(); + + let mut vals = Vec::with_capacity(nvals); + unsafe { + vals.set_len(nvals); + } + vals.shrink_to_fit(); + + CsMatrix { + data: CsVecStorage { + shape: (nrows, ncols), + p: unsafe { VectorN::new_uninitialized_generic(ncols, U1) }, + i, + vals, + }, + _phantoms: PhantomData, + } + } +} + +fn cumsum(a: &mut VectorN, b: &mut VectorN) -> usize +where + DefaultAllocator: Allocator, +{ + assert!(a.len() == b.len()); + let mut sum = 0; + + for i in 0..a.len() { + b[i] = sum; + sum += a[i]; + a[i] = b[i]; + } + + sum +} + +impl> CsMatrix { + pub fn nvalues(&self) -> usize { + self.data.nvalues() + } + + pub fn transpose(&self) -> CsMatrix + where + DefaultAllocator: Allocator, + { + let (nrows, ncols) = self.data.shape(); + + let nvals = self.nvalues(); + let mut res = CsMatrix::new_uninitialized_generic(ncols, nrows, nvals); + let mut workspace = Vector::zeros_generic(nrows, U1); + + // Compute p. + for i in 0..nvals { + let row_id = self.data.row_index(i); + workspace[row_id] += 1; + } + + let _ = cumsum(&mut workspace, &mut res.data.p); + + // Fill the result. + for j in 0..ncols.value() { + let column_idx = self.data.column_range(j); + + for vi in column_idx { + let row_id = self.data.row_index(vi); + let shift = workspace[row_id]; + + res.data.vals[shift] = *self.data.get_value(vi); + res.data.i[shift] = j; + workspace[row_id] += 1; + } + } + + res + } + + fn scatter( + &self, + j: usize, + beta: N, + timestamps: &mut [usize], + timestamp: usize, + workspace: &mut [N], + mut nz: usize, + res: &mut CsMatrix, + ) -> usize + where + N: ClosedAdd + ClosedMul, + DefaultAllocator: Allocator, + { + let column_idx = self.data.column_range(j); + + for vi in column_idx { + let i = self.data.row_index(vi); + let val = beta * *self.data.get_value(vi); + + if timestamps[i] < timestamp { + timestamps[i] = timestamp; + res.data.i[nz] = i; + nz += 1; + workspace[i] = val; + } else { + workspace[i] += val; + } + } + + nz + } +} + +/* +impl CsVector { + pub fn axpy(&mut self, alpha: N, x: CsVector, beta: N) { + // First, compute the number of non-zero entries. + let mut nnzero = 0; + + // Allocate a size large enough. + self.data.set_column_len(0, nnzero); + + // Fill with the axpy. + let mut i = self.nvalues(); + let mut j = x.nvalues(); + let mut k = nnzero - 1; + let mut rid1 = self.data.row_index(0, i - 1); + let mut rid2 = x.data.row_index(0, j - 1); + + while k > 0 { + if rid1 == rid2 { + self.data.set_row_index(0, k, rid1); + self[k] = alpha * x[j] + beta * self[k]; + i -= 1; + j -= 1; + } else if rid1 < rid2 { + self.data.set_row_index(0, k, rid1); + self[k] = beta * self[i]; + i -= 1; + } else { + self.data.set_row_index(0, k, rid2); + self[k] = alpha * x[j]; + j -= 1; + } + + k -= 1; + } + } +} +*/ + +impl> Vector { + pub fn axpy_cs(&mut self, alpha: N, x: &CsVector, beta: N) + where + S2: CsStorage, + ShapeConstraint: DimEq, + { + if beta.is_zero() { + for i in 0..x.nvalues() { + unsafe { + let k = x.data.row_index_unchecked(i); + let y = self.vget_unchecked_mut(k); + *y = alpha * *x.data.get_value_unchecked(i); + } + } + } else { + for i in 0..x.nvalues() { + unsafe { + let k = x.data.row_index_unchecked(i); + let y = self.vget_unchecked_mut(k); + *y = alpha * *x.data.get_value_unchecked(i) + beta * *y; + } + } + } + } + + /* + pub fn gemv_sparse(&mut self, alpha: N, a: &CsMatrix, x: &DVector, beta: N) + where + S2: CsStorage { + let col2 = a.column(0); + let val = unsafe { *x.vget_unchecked(0) }; + self.axpy_sparse(alpha * val, &col2, beta); + + for j in 1..ncols2 { + let col2 = a.column(j); + let val = unsafe { *x.vget_unchecked(j) }; + + self.axpy_sparse(alpha * val, &col2, N::one()); + } + } + */ +} + +impl<'a, 'b, N, R1, R2, C1, C2, S1, S2> Mul<&'b CsMatrix> + for &'a CsMatrix +where + N: Scalar + ClosedAdd + ClosedMul + Zero, + R1: Dim, + C1: Dim, + R2: Dim, + C2: Dim, + S1: CsStorage, + S2: CsStorage, + ShapeConstraint: AreMultipliable, + DefaultAllocator: Allocator + Allocator + Allocator, +{ + type Output = CsMatrix; + + fn mul(self, rhs: &'b CsMatrix) -> CsMatrix { + let (nrows1, ncols1) = self.data.shape(); + let (nrows2, ncols2) = rhs.data.shape(); + assert_eq!( + ncols1.value(), + nrows2.value(), + "Mismatched dimensions for matrix multiplication." + ); + + let mut res = + CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.nvalues() + rhs.nvalues()); + let mut timestamps = VectorN::zeros_generic(nrows1, U1); + let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows1, U1) }; + let mut nz = 0; + + for j in 0..ncols2.value() { + res.data.p[j] = nz; + let column_idx = rhs.data.column_range(j); + let new_size_bound = nz + nrows1.value(); + res.data.i.resize(new_size_bound, 0); + res.data.vals.resize(new_size_bound, N::zero()); + + for vi in column_idx { + let i = rhs.data.row_index(vi); + nz = self.scatter( + i, + *rhs.data.get_value(vi), + timestamps.as_mut_slice(), + j + 1, + workspace.as_mut_slice(), + nz, + &mut res, + ); + } + + for p in res.data.p[j]..nz { + res.data.vals[p] = workspace[res.data.i[p]] + } + } + + res.data.i.truncate(nz); + res.data.i.shrink_to_fit(); + res.data.vals.truncate(nz); + res.data.vals.shrink_to_fit(); + res + } +} + +impl<'a, 'b, N, R1, R2, C1, C2, S1, S2> Add<&'b CsMatrix> + for &'a CsMatrix +where + N: Scalar + ClosedAdd + ClosedMul + One, + R1: Dim, + C1: Dim, + R2: Dim, + C2: Dim, + S1: CsStorage, + S2: CsStorage, + ShapeConstraint: DimEq + DimEq, + DefaultAllocator: Allocator + Allocator + Allocator, +{ + type Output = CsMatrix; + + fn add(self, rhs: &'b CsMatrix) -> CsMatrix { + let (nrows1, ncols1) = self.data.shape(); + let (nrows2, ncols2) = rhs.data.shape(); + assert_eq!( + (nrows1.value(), ncols1.value()), + (nrows2.value(), ncols2.value()), + "Mismatched dimensions for matrix sum." + ); + + let mut res = + CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.nvalues() + rhs.nvalues()); + let mut timestamps = VectorN::zeros_generic(nrows1, U1); + let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows1, U1) }; + let mut nz = 0; + + for j in 0..ncols2.value() { + res.data.p[j] = nz; + + nz = self.scatter( + j, + N::one(), + timestamps.as_mut_slice(), + j + 1, + workspace.as_mut_slice(), + nz, + &mut res, + ); + + nz = rhs.scatter( + j, + N::one(), + timestamps.as_mut_slice(), + j + 1, + workspace.as_mut_slice(), + nz, + &mut res, + ); + + for p in res.data.p[j]..nz { + res.data.vals[p] = workspace[res.data.i[p]] + } + } + + res.data.i.truncate(nz); + res.data.i.shrink_to_fit(); + res.data.vals.truncate(nz); + res.data.vals.shrink_to_fit(); + res + } +} + +impl<'a, N: Scalar + Zero, R: Dim, C: Dim, S> From> for MatrixMN +where + S: CsStorage, + DefaultAllocator: Allocator, +{ + fn from(m: CsMatrix) -> Self { + let (nrows, ncols) = m.data.shape(); + let mut res = MatrixMN::zeros_generic(nrows, ncols); + + for j in 0..ncols.value() { + let column_idx = m.data.column_range(j); + + for iv in column_idx { + let i = m.data.row_index(iv); + res[(i, j)] = *m.data.get_value(iv); + } + } + + res + } +} + +impl<'a, N: Scalar + Zero, R: Dim, C: Dim, S> From> for CsMatrix +where + S: Storage, + DefaultAllocator: Allocator + Allocator, +{ + fn from(m: Matrix) -> Self { + let (nrows, ncols) = m.data.shape(); + let nvalues = m.iter().filter(|e| !e.is_zero()).count(); + let mut res = CsMatrix::new_uninitialized_generic(nrows, ncols, nvalues); + let mut nz = 0; + + for j in 0..ncols.value() { + let column = m.column(j); + res.data.p[j] = nz; + + for i in 0..nrows.value() { + if !column[i].is_zero() { + res.data.i[nz] = i; + res.data.vals[nz] = column[i]; + nz += 1; + } + } + } + + res + } +} diff --git a/src/sparse/mod.rs b/src/sparse/mod.rs new file mode 100644 index 000000000..f859e97fd --- /dev/null +++ b/src/sparse/mod.rs @@ -0,0 +1,3 @@ +pub use self::cs_matrix::CsMatrix; + +mod cs_matrix; diff --git a/tests/core/abomonation.rs b/tests/core/abomonation.rs index ac53716b4..be3952cd6 100644 --- a/tests/core/abomonation.rs +++ b/tests/core/abomonation.rs @@ -1,9 +1,9 @@ -use rand::random; -use abomonation::{Abomonation, encode, decode}; +use abomonation::{decode, encode, Abomonation}; use na::{ - DMatrix, Matrix3x4, Point3, Translation3, Rotation3, Isometry3, Quaternion, - IsometryMatrix3, Similarity3, SimilarityMatrix3 + DMatrix, Isometry3, IsometryMatrix3, Matrix3x4, Point3, Quaternion, Rotation3, Similarity3, + SimilarityMatrix3, Translation3, }; +use rand::random; #[test] fn abomonate_dmatrix() { @@ -39,7 +39,9 @@ fn assert_encode_and_decode(original_data: T // Encode let mut bytes = Vec::new(); - unsafe { encode(&original_data, &mut bytes); } + unsafe { + encode(&original_data, &mut bytes); + } // Drop the original, so that dangling pointers are revealed by the test drop(original_data); diff --git a/tests/core/blas.rs b/tests/core/blas.rs index e37cb049e..00eac3a3f 100644 --- a/tests/core/blas.rs +++ b/tests/core/blas.rs @@ -1,7 +1,7 @@ #![cfg(feature = "arbitrary")] +use na::{DMatrix, DVector}; use std::cmp; -use na::{DVector, DMatrix}; quickcheck! { /* diff --git a/tests/core/conversion.rs b/tests/core/conversion.rs index 175c6f2bd..f8be85886 100644 --- a/tests/core/conversion.rs +++ b/tests/core/conversion.rs @@ -1,18 +1,13 @@ #![cfg(feature = "arbitrary")] use alga::linear::Transformation; use na::{ - self, + self, Affine3, Isometry3, Matrix2, Matrix2x3, Matrix2x4, Matrix2x5, Matrix2x6, Matrix3, + Matrix3x2, Matrix3x4, Matrix3x5, Matrix3x6, Matrix4, Matrix4x2, Matrix4x3, Matrix4x5, + Matrix4x6, Matrix5, Matrix5x2, Matrix5x3, Matrix5x4, Matrix5x6, Matrix6, Matrix6x2, Matrix6x3, + Matrix6x4, Matrix6x5, Point3, Projective3, Rotation3, RowVector1, RowVector2, RowVector3, + RowVector4, RowVector5, RowVector6, Similarity3, Transform3, Translation3, UnitQuaternion, Vector1, Vector2, Vector3, Vector4, Vector5, Vector6, - RowVector1, RowVector2, RowVector3, RowVector4, RowVector5, RowVector6, - Matrix2, Matrix3, Matrix4, Matrix5, Matrix6, - Matrix2x3, Matrix2x4, Matrix2x5, Matrix2x6, - Matrix3x2, Matrix3x4, Matrix3x5, Matrix3x6, - Matrix4x2, Matrix4x3, Matrix4x5, Matrix4x6, - Matrix5x2, Matrix5x3, Matrix5x4, Matrix5x6, - Matrix6x2, Matrix6x3, Matrix6x4, Matrix6x5, - Point3, Translation3, Isometry3, Similarity3, Affine3, - Projective3, Transform3, Rotation3, UnitQuaternion}; - +}; quickcheck!{ fn translation_conversion(t: Translation3, v: Vector3, p: Point3) -> bool { diff --git a/tests/core/mod.rs b/tests/core/mod.rs index e8bac456c..7e1f85919 100644 --- a/tests/core/mod.rs +++ b/tests/core/mod.rs @@ -1,10 +1,10 @@ +#[cfg(feature = "abomonation-serialize")] +mod abomonation; +mod blas; mod conversion; mod edition; mod matrix; mod matrix_slice; -mod blas; -mod serde; -#[cfg(feature = "abomonation-serialize")] -mod abomonation; #[cfg(feature = "mint")] mod mint; +mod serde; diff --git a/tests/core/serde.rs b/tests/core/serde.rs index 14456eacd..a65ca8b35 100644 --- a/tests/core/serde.rs +++ b/tests/core/serde.rs @@ -1,20 +1,11 @@ #![cfg(feature = "serde-serialize")] -use serde_json; -use rand; use na::{ - DMatrix, - Matrix3x4, - Point3, - Translation3, - Rotation3, - Isometry3, - IsometryMatrix3, - Similarity3, - SimilarityMatrix3, - Quaternion, - Unit, + DMatrix, Isometry3, IsometryMatrix3, Matrix3x4, Point3, Quaternion, Rotation3, Similarity3, + SimilarityMatrix3, Translation3, Unit, }; +use rand; +use serde_json; macro_rules! test_serde( ($($test: ident, $ty: ident);* $(;)*) => {$( diff --git a/tests/geometry/isometry.rs b/tests/geometry/isometry.rs index f7226bbfe..c72a2475b 100644 --- a/tests/geometry/isometry.rs +++ b/tests/geometry/isometry.rs @@ -1,10 +1,10 @@ #![cfg(feature = "arbitrary")] #![allow(non_snake_case)] -use alga::linear::{Transformation, ProjectiveTransformation}; +use alga::linear::{ProjectiveTransformation, Transformation}; use na::{ - Vector3, Point3, Rotation3, Isometry3, Translation3, UnitQuaternion, - Vector2, Point2, Rotation2, Isometry2, Translation2, UnitComplex + Isometry2, Isometry3, Point2, Point3, Rotation2, Rotation3, Translation2, Translation3, + UnitComplex, UnitQuaternion, Vector2, Vector3, }; quickcheck!( diff --git a/tests/geometry/point.rs b/tests/geometry/point.rs index 1b673f929..90c515873 100644 --- a/tests/geometry/point.rs +++ b/tests/geometry/point.rs @@ -1,6 +1,6 @@ #![cfg(feature = "arbitrary")] -use num::Zero; use na::{Point3, Vector3, Vector4}; +use num::Zero; #[test] fn point_ops() { @@ -8,20 +8,20 @@ fn point_ops() { let b = Point3::new(1.0, 2.0, 3.0); let c = Vector3::new(1.0, 2.0, 3.0); - assert_eq!( a - b, Vector3::zero()); + assert_eq!(a - b, Vector3::zero()); assert_eq!(&a - &b, Vector3::zero()); - assert_eq!( a - &b, Vector3::zero()); - assert_eq!(&a - b, Vector3::zero()); + assert_eq!(a - &b, Vector3::zero()); + assert_eq!(&a - b, Vector3::zero()); - assert_eq!( b - c, Point3::origin()); + assert_eq!(b - c, Point3::origin()); assert_eq!(&b - &c, Point3::origin()); - assert_eq!( b - &c, Point3::origin()); - assert_eq!(&b - c, Point3::origin()); + assert_eq!(b - &c, Point3::origin()); + assert_eq!(&b - c, Point3::origin()); - assert_eq!( b + c, 2.0 * a); + assert_eq!(b + c, 2.0 * a); assert_eq!(&b + &c, 2.0 * a); - assert_eq!( b + &c, 2.0 * a); - assert_eq!(&b + c, 2.0 * a); + assert_eq!(b + &c, 2.0 * a); + assert_eq!(&b + c, 2.0 * a); let mut a1 = a; let mut a2 = a; @@ -58,20 +58,19 @@ fn point_coordinates() { #[test] fn point_scale() { - let pt = Point3::new(1, 2, 3); + let pt = Point3::new(1, 2, 3); let expected = Point3::new(10, 20, 30); assert_eq!(pt * 10, expected); assert_eq!(&pt * 10, expected); assert_eq!(10 * pt, expected); assert_eq!(10 * &pt, expected); - } #[test] fn point_vector_sum() { - let pt = Point3::new(1, 2, 3); - let vec = Vector3::new(10, 20, 30); + let pt = Point3::new(1, 2, 3); + let vec = Vector3::new(10, 20, 30); let expected = Point3::new(11, 22, 33); assert_eq!(&pt + &vec, expected); @@ -82,15 +81,13 @@ fn point_vector_sum() { #[test] fn to_homogeneous() { - let a = Point3::new(1.0, 2.0, 3.0); + let a = Point3::new(1.0, 2.0, 3.0); let expected = Vector4::new(1.0, 2.0, 3.0, 1.0); assert_eq!(a.to_homogeneous(), expected); } -quickcheck!( - fn point_sub(pt1: Point3, pt2: Point3) -> bool { - let dpt = &pt2 - &pt1; - relative_eq!(pt2, pt1 + dpt, epsilon = 1.0e-7) - } -); +quickcheck!(fn point_sub(pt1: Point3, pt2: Point3) -> bool { + let dpt = &pt2 - &pt1; + relative_eq!(pt2, pt1 + dpt, epsilon = 1.0e-7) +}); diff --git a/tests/geometry/projection.rs b/tests/geometry/projection.rs index 7be0d49d9..17d04a138 100644 --- a/tests/geometry/projection.rs +++ b/tests/geometry/projection.rs @@ -1,9 +1,9 @@ -use na::{Perspective3, Orthographic3}; +use na::{Orthographic3, Perspective3}; #[test] fn perspective_inverse() { let proj = Perspective3::new(800.0 / 600.0, 3.14 / 2.0, 1.0, 1000.0); - let inv = proj.inverse(); + let inv = proj.inverse(); let id = inv * proj.unwrap(); @@ -13,17 +13,16 @@ fn perspective_inverse() { #[test] fn orthographic_inverse() { let proj = Orthographic3::new(1.0, 2.0, -3.0, -2.5, 10.0, 900.0); - let inv = proj.inverse(); + let inv = proj.inverse(); let id = inv * proj.unwrap(); assert!(id.is_identity(1.0e-7)); } - #[cfg(feature = "arbitrary")] mod quickcheck_tests { - use na::{Point3, Perspective3, Orthographic3}; + use na::{Orthographic3, Perspective3, Point3}; quickcheck!{ fn perspective_project_unproject(pt: Point3) -> bool { diff --git a/tests/geometry/quaternion.rs b/tests/geometry/quaternion.rs index 927835e26..c5b915549 100644 --- a/tests/geometry/quaternion.rs +++ b/tests/geometry/quaternion.rs @@ -1,8 +1,7 @@ #![cfg(feature = "arbitrary")] #![allow(non_snake_case)] -use na::{Unit, UnitQuaternion, Quaternion, Vector3, Point3, Rotation3}; - +use na::{Point3, Quaternion, Rotation3, Unit, UnitQuaternion, Vector3}; quickcheck!( /* diff --git a/tests/geometry/rotation.rs b/tests/geometry/rotation.rs index a2e91dec1..9bd3e590e 100644 --- a/tests/geometry/rotation.rs +++ b/tests/geometry/rotation.rs @@ -18,9 +18,9 @@ fn angle_3() { #[cfg(feature = "arbitrary")] mod quickcheck_tests { - use std::f64; use alga::general::Real; - use na::{self, Vector2, Vector3, Rotation2, Rotation3, Unit}; + use na::{self, Rotation2, Rotation3, Unit, Vector2, Vector3}; + use std::f64; quickcheck! { /* diff --git a/tests/geometry/similarity.rs b/tests/geometry/similarity.rs index feb8ba1b7..e9fde4667 100644 --- a/tests/geometry/similarity.rs +++ b/tests/geometry/similarity.rs @@ -1,8 +1,8 @@ #![cfg(feature = "arbitrary")] #![allow(non_snake_case)] -use alga::linear::{Transformation, ProjectiveTransformation}; -use na::{Vector3, Point3, Similarity3, Translation3, Isometry3, UnitQuaternion}; +use alga::linear::{ProjectiveTransformation, Transformation}; +use na::{Isometry3, Point3, Similarity3, Translation3, UnitQuaternion, Vector3}; quickcheck!( fn inverse_is_identity(i: Similarity3, p: Point3, v: Vector3) -> bool { diff --git a/tests/geometry/unit_complex.rs b/tests/geometry/unit_complex.rs index 65837d8b5..7da0d20c0 100644 --- a/tests/geometry/unit_complex.rs +++ b/tests/geometry/unit_complex.rs @@ -1,7 +1,7 @@ #![cfg(feature = "arbitrary")] #![allow(non_snake_case)] -use na::{Unit, UnitComplex, Vector2, Point2, Rotation2}; +use na::{Point2, Rotation2, Unit, UnitComplex, Vector2}; quickcheck!( diff --git a/tests/lib.rs b/tests/lib.rs index a2238f1e3..c32f40669 100644 --- a/tests/lib.rs +++ b/tests/lib.rs @@ -14,5 +14,7 @@ extern crate rand; extern crate serde_json; mod core; -mod linalg; mod geometry; +mod linalg; +#[cfg(feature = "sparse")] +mod sparse; diff --git a/tests/linalg/balancing.rs b/tests/linalg/balancing.rs index 73fd82304..401f672a4 100644 --- a/tests/linalg/balancing.rs +++ b/tests/linalg/balancing.rs @@ -2,8 +2,8 @@ use std::cmp; -use na::{DMatrix, Matrix4}; use na::balancing; +use na::{DMatrix, Matrix4}; quickcheck! { fn balancing_parlett_reinsch(n: usize) -> bool { diff --git a/tests/linalg/bidiagonal.rs b/tests/linalg/bidiagonal.rs index a96ce47c7..a7d5952fd 100644 --- a/tests/linalg/bidiagonal.rs +++ b/tests/linalg/bidiagonal.rs @@ -1,6 +1,6 @@ #![cfg(feature = "arbitrary")] -use na::{DMatrix, Matrix2, Matrix4, Matrix5x3, Matrix3x5}; +use na::{DMatrix, Matrix2, Matrix3x5, Matrix4, Matrix5x3}; quickcheck! { fn bidiagonal(m: DMatrix) -> bool { diff --git a/tests/linalg/cholesky.rs b/tests/linalg/cholesky.rs index df213ad43..9fe086ff9 100644 --- a/tests/linalg/cholesky.rs +++ b/tests/linalg/cholesky.rs @@ -1,9 +1,9 @@ #![cfg(all(feature = "arbitrary", feature = "debug"))] -use std::cmp; -use na::{DMatrix, Matrix4x3, DVector, Vector4}; -use na::dimension::U4; use na::debug::RandomSDP; +use na::dimension::U4; +use na::{DMatrix, DVector, Matrix4x3, Vector4}; +use std::cmp; quickcheck! { fn cholesky(m: RandomSDP) -> bool { diff --git a/tests/linalg/eigen.rs b/tests/linalg/eigen.rs index ceb44bdfe..f609292a0 100644 --- a/tests/linalg/eigen.rs +++ b/tests/linalg/eigen.rs @@ -2,8 +2,8 @@ use na::DMatrix; #[cfg(feature = "arbitrary")] mod quickcheck_tests { - use std::cmp; use na::{DMatrix, Matrix2, Matrix3, Matrix4}; + use std::cmp; quickcheck! { fn symmetric_eigen(n: usize) -> bool { @@ -62,39 +62,58 @@ mod quickcheck_tests { // Test proposed on the issue #176 of rulinalg. #[test] fn symmetric_eigen_singular_24x24() { - let m = DMatrix::from_row_slice(24, 24, &[ - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - -1.0, -1.0, -1.0, -1.0, -1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 0.0, 1.0, 1.0, 1.0, - 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0]); + let m = DMatrix::from_row_slice( + 24, + 24, + &[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0, -1.0, 0.0, 1.0, 0.0, 0.0, + 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, + -1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, + 0.0, 1.0, 1.0, 1.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 4.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, + 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 4.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, + 0.0, 0.0, 0.0, -4.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, + -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, + 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, + -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, + 0.0, + ], + ); let eig = m.clone().symmetric_eigen(); let recomp = eig.recompose(); - assert!(relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5)); + assert!(relative_eq!( + m.lower_triangle(), + recomp.lower_triangle(), + epsilon = 1.0e-5 + )); } - // #[cfg(feature = "arbitrary")] // quickcheck! { // FIXME: full eigendecomposition is not implemented yet because of its complexity when some @@ -167,16 +186,16 @@ fn symmetric_eigen_singular_24x24() { // MatrixN: Display, // VectorN: Display { // let mv = &m * &eig.eigenvectors; -// +// // println!("eigenvalues: {}eigenvectors: {}", eig.eigenvalues, eig.eigenvectors); -// +// // let dim = m.nrows(); // for i in 0 .. dim { // let mut col = eig.eigenvectors.column_mut(i); // col *= eig.eigenvalues[i]; // } -// +// // println!("{}{:.5}{:.5}", m, mv, eig.eigenvectors); -// +// // relative_eq!(eig.eigenvectors, mv, epsilon = 1.0e-5) // } diff --git a/tests/linalg/hessenberg.rs b/tests/linalg/hessenberg.rs index 67206487b..22d62fbfa 100644 --- a/tests/linalg/hessenberg.rs +++ b/tests/linalg/hessenberg.rs @@ -1,12 +1,11 @@ #![cfg(feature = "arbitrary")] -use std::cmp; use na::{DMatrix, Matrix2, Matrix4}; +use std::cmp; #[test] fn hessenberg_simple() { - let m = Matrix2::new(1.0, 0.0, - 1.0, 3.0); + let m = Matrix2::new(1.0, 0.0, 1.0, 3.0); let hess = m.hessenberg(); let (p, h) = hess.unpack(); assert!(relative_eq!(m, p * h * p.transpose(), epsilon = 1.0e-7)) diff --git a/tests/linalg/mod.rs b/tests/linalg/mod.rs index 6ce28085b..74a5e03ca 100644 --- a/tests/linalg/mod.rs +++ b/tests/linalg/mod.rs @@ -1,13 +1,13 @@ -mod inverse; -mod solve; -mod qr; +mod balancing; +mod bidiagonal; mod cholesky; +mod eigen; +mod full_piv_lu; mod hessenberg; +mod inverse; mod lu; -mod full_piv_lu; -mod bidiagonal; +mod qr; mod real_schur; +mod solve; mod svd; -mod balancing; mod tridiagonal; -mod eigen; diff --git a/tests/linalg/qr.rs b/tests/linalg/qr.rs index 394a8b42b..d7211623c 100644 --- a/tests/linalg/qr.rs +++ b/tests/linalg/qr.rs @@ -1,8 +1,7 @@ #![cfg(feature = "arbitrary")] +use na::{DMatrix, DVector, Matrix3x5, Matrix4, Matrix4x3, Matrix5x3, Vector4}; use std::cmp; -use na::{DMatrix, Matrix4, Matrix4x3, Matrix5x3, Matrix3x5, - DVector, Vector4}; quickcheck! { fn qr(m: DMatrix) -> bool { diff --git a/tests/linalg/solve.rs b/tests/linalg/solve.rs index f960fb0ab..76dc05b51 100644 --- a/tests/linalg/solve.rs +++ b/tests/linalg/solve.rs @@ -3,7 +3,7 @@ use na::{Matrix4, Matrix4x5}; fn unzero_diagonal(a: &mut Matrix4) { - for i in 0 .. 4 { + for i in 0..4 { if a[(i, i)] < 1.0e-7 { a[(i, i)] = 1.0; } diff --git a/tests/linalg/svd.rs b/tests/linalg/svd.rs index 5b09c7c83..629b404d2 100644 --- a/tests/linalg/svd.rs +++ b/tests/linalg/svd.rs @@ -2,8 +2,10 @@ use na::{DMatrix, Matrix6}; #[cfg(feature = "arbitrary")] mod quickcheck_tests { + use na::{ + DMatrix, DVector, Matrix2, Matrix2x5, Matrix3, Matrix3x5, Matrix4, Matrix5x2, Matrix5x3, + }; use std::cmp; - use na::{DMatrix, Matrix2, Matrix3, Matrix4, Matrix5x2, Matrix5x3, Matrix2x5, Matrix3x5, DVector}; quickcheck! { fn svd(m: DMatrix) -> bool { @@ -143,31 +145,47 @@ mod quickcheck_tests { // Test proposed on the issue #176 of rulinalg. #[test] fn svd_singular() { - let m = DMatrix::from_row_slice(24, 24, &[ - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - -1.0, -1.0, -1.0, -1.0, -1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 0.0, 1.0, 1.0, 1.0, - 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0]); + let m = DMatrix::from_row_slice( + 24, + 24, + &[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0, -1.0, 0.0, 1.0, 0.0, 0.0, + 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, + -1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, + 0.0, 1.0, 1.0, 1.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 4.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, + 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 4.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, + 0.0, 0.0, 0.0, -4.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, + -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, + 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, + -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, + 0.0, + ], + ); let svd = m.clone().svd(true, true); let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); @@ -184,33 +202,48 @@ fn svd_singular() { // Same as the previous test but with one additional row. #[test] fn svd_singular_vertical() { - let m = DMatrix::from_row_slice(25, 24, &[ - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - -1.0, -1.0, -1.0, -1.0, -1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 0.0, 1.0, 1.0, 1.0, - 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0]); - + let m = DMatrix::from_row_slice( + 25, + 24, + &[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0, -1.0, 0.0, 1.0, 0.0, 0.0, + 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, + -1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, + 0.0, 1.0, 1.0, 1.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 4.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, + 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 4.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, + 0.0, 0.0, 0.0, -4.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, + -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, + 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, + -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, + 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, + ], + ); let svd = m.clone().svd(true, true); let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); @@ -223,31 +256,48 @@ fn svd_singular_vertical() { // Same as the previous test but with one additional column. #[test] fn svd_singular_horizontal() { - let m = DMatrix::from_row_slice(24, 25, &[ - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - -1.0, -1.0, -1.0, -1.0, -1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 0.0, 1.0, 1.0, 1.0, 1.0, - 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]); + let m = DMatrix::from_row_slice( + 24, + 25, + &[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0, -1.0, 0.0, 1.0, 0.0, + 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, -1.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0, + -1.0, -1.0, -1.0, -1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, -4.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 4.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, + 0.0, 0.0, -4.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, + 0.0, 0.0, -4.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, + 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, + 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, + -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, + 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, + 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + ], + ); let svd = m.clone().svd(true, true); let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); @@ -257,7 +307,6 @@ fn svd_singular_horizontal() { assert!(relative_eq!(m, &u * ds * &v_t, epsilon = 1.0e-5)); } - #[test] fn svd_zeros() { let m = DMatrix::from_element(10, 10, 0.0); @@ -283,47 +332,100 @@ fn svd_identity() { #[test] fn svd_with_delimited_subproblem() { let mut m = DMatrix::::from_element(10, 10, 0.0); - m[(0,0)] = 1.0; m[(0,1)] = 2.0; - m[(1,1)] = 0.0; m[(1,2)] = 3.0; - m[(2,2)] = 4.0; m[(2,3)] = 5.0; - m[(3,3)] = 6.0; m[(3,4)] = 0.0; - m[(4,4)] = 8.0; m[(3,5)] = 9.0; - m[(5,5)] = 10.0; m[(3,6)] = 11.0; - m[(6,6)] = 12.0; m[(3,7)] = 12.0; - m[(7,7)] = 14.0; m[(3,8)] = 13.0; - m[(8,8)] = 16.0; m[(3,9)] = 17.0; - m[(9,9)] = 18.0; + m[(0, 0)] = 1.0; + m[(0, 1)] = 2.0; + m[(1, 1)] = 0.0; + m[(1, 2)] = 3.0; + m[(2, 2)] = 4.0; + m[(2, 3)] = 5.0; + m[(3, 3)] = 6.0; + m[(3, 4)] = 0.0; + m[(4, 4)] = 8.0; + m[(3, 5)] = 9.0; + m[(5, 5)] = 10.0; + m[(3, 6)] = 11.0; + m[(6, 6)] = 12.0; + m[(3, 7)] = 12.0; + m[(7, 7)] = 14.0; + m[(3, 8)] = 13.0; + m[(8, 8)] = 16.0; + m[(3, 9)] = 17.0; + m[(9, 9)] = 18.0; let svd = m.clone().svd(true, true); assert!(relative_eq!(m, svd.recompose(), epsilon = 1.0e-7)); // Rectangular versions. let mut m = DMatrix::::from_element(15, 10, 0.0); - m[(0,0)] = 1.0; m[(0,1)] = 2.0; - m[(1,1)] = 0.0; m[(1,2)] = 3.0; - m[(2,2)] = 4.0; m[(2,3)] = 5.0; - m[(3,3)] = 6.0; m[(3,4)] = 0.0; - m[(4,4)] = 8.0; m[(3,5)] = 9.0; - m[(5,5)] = 10.0; m[(3,6)] = 11.0; - m[(6,6)] = 12.0; m[(3,7)] = 12.0; - m[(7,7)] = 14.0; m[(3,8)] = 13.0; - m[(8,8)] = 16.0; m[(3,9)] = 17.0; - m[(9,9)] = 18.0; + m[(0, 0)] = 1.0; + m[(0, 1)] = 2.0; + m[(1, 1)] = 0.0; + m[(1, 2)] = 3.0; + m[(2, 2)] = 4.0; + m[(2, 3)] = 5.0; + m[(3, 3)] = 6.0; + m[(3, 4)] = 0.0; + m[(4, 4)] = 8.0; + m[(3, 5)] = 9.0; + m[(5, 5)] = 10.0; + m[(3, 6)] = 11.0; + m[(6, 6)] = 12.0; + m[(3, 7)] = 12.0; + m[(7, 7)] = 14.0; + m[(3, 8)] = 13.0; + m[(8, 8)] = 16.0; + m[(3, 9)] = 17.0; + m[(9, 9)] = 18.0; let svd = m.clone().svd(true, true); assert!(relative_eq!(m, svd.recompose(), epsilon = 1.0e-7)); let svd = m.transpose().svd(true, true); - assert!(relative_eq!(m.transpose(), svd.recompose(), epsilon = 1.0e-7)); + assert!(relative_eq!( + m.transpose(), + svd.recompose(), + epsilon = 1.0e-7 + )); } #[test] fn svd_fail() { let m = Matrix6::new( - 0.9299319121545955, 0.9955870335651049, 0.8824725266413644, 0.28966880207132295, 0.06102723649846409, 0.9311880746048009, - 0.5938395242304351, 0.8398522876024204, 0.06672831951963198, 0.9941213119963099, 0.9431846038057834, 0.8159885168706427, - 0.9121962883152357, 0.6471119669367571, 0.4823309702814407, 0.6420516076705516, 0.7731203925207113, 0.7424069470756647, - 0.07311092531259344, 0.5579247949052946, 0.14518764691585773, 0.03502980663114896, 0.7991329455957719, 0.4929930019965745, - 0.12293810556077789, 0.6617084679545999, 0.9002240700227326, 0.027153062135304884, 0.3630189466989524, 0.18207502727558866, - 0.843196731466686, 0.08951878746549924, 0.7533450877576973, 0.009558876499740077, 0.9429679490873482, 0.9355764454129878); + 0.9299319121545955, + 0.9955870335651049, + 0.8824725266413644, + 0.28966880207132295, + 0.06102723649846409, + 0.9311880746048009, + 0.5938395242304351, + 0.8398522876024204, + 0.06672831951963198, + 0.9941213119963099, + 0.9431846038057834, + 0.8159885168706427, + 0.9121962883152357, + 0.6471119669367571, + 0.4823309702814407, + 0.6420516076705516, + 0.7731203925207113, + 0.7424069470756647, + 0.07311092531259344, + 0.5579247949052946, + 0.14518764691585773, + 0.03502980663114896, + 0.7991329455957719, + 0.4929930019965745, + 0.12293810556077789, + 0.6617084679545999, + 0.9002240700227326, + 0.027153062135304884, + 0.3630189466989524, + 0.18207502727558866, + 0.843196731466686, + 0.08951878746549924, + 0.7533450877576973, + 0.009558876499740077, + 0.9429679490873482, + 0.9355764454129878, + ); let svd = m.clone().svd(true, true); println!("Singular values: {}", svd.singular_values); println!("u: {:.5}", svd.u.unwrap()); From 9fa3e7a769cfac41257959bb8c22bc72690f3e20 Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Sat, 20 Oct 2018 22:27:18 +0200 Subject: [PATCH 02/25] Implement CsMatrix: axpy_cs, transpose, Add and Mul. --- Cargo.toml | 1 + src/lib.rs | 12 ++++-- src/sparse/cs_matrix.rs | 7 +++- src/sparse/mod.rs | 2 +- tests/sparse/cs_construction.rs | 1 + tests/sparse/cs_conversion.rs | 18 +++++++++ tests/sparse/cs_matrix.rs | 18 +++++++++ tests/sparse/cs_ops.rs | 65 +++++++++++++++++++++++++++++++++ tests/sparse/mod.rs | 4 ++ 9 files changed, 122 insertions(+), 6 deletions(-) create mode 100644 tests/sparse/cs_construction.rs create mode 100644 tests/sparse/cs_conversion.rs create mode 100644 tests/sparse/cs_matrix.rs create mode 100644 tests/sparse/cs_ops.rs create mode 100644 tests/sparse/mod.rs diff --git a/Cargo.toml b/Cargo.toml index 87524dc46..88857c552 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,6 +23,7 @@ stdweb = [ "rand/stdweb" ] arbitrary = [ "quickcheck" ] serde-serialize = [ "serde", "serde_derive", "num-complex/serde" ] abomonation-serialize = [ "abomonation" ] +sparse = [ ] debug = [ ] alloc = [ ] diff --git a/src/lib.rs b/src/lib.rs index b46e2dd66..ad81c9c83 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -81,10 +81,12 @@ an optimized set of tools for computer graphics and physics. Those features incl #![deny(non_upper_case_globals)] #![deny(unused_qualifications)] #![deny(unused_results)] -#![deny(missing_docs)] +#![warn(missing_docs)] // FIXME: deny this #![warn(incoherent_fundamental_impls)] -#![doc(html_favicon_url = "http://nalgebra.org/img/favicon.ico", - html_root_url = "http://nalgebra.org/rustdoc")] +#![doc( + html_favicon_url = "http://nalgebra.org/img/favicon.ico", + html_root_url = "http://nalgebra.org/rustdoc" +)] #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(all(feature = "alloc", not(feature = "std")), feature(alloc))] @@ -126,6 +128,8 @@ pub mod base; pub mod debug; pub mod geometry; pub mod linalg; +#[cfg(feature = "sparse")] +pub mod sparse; #[cfg(feature = "std")] #[deprecated( @@ -135,6 +139,8 @@ pub use base as core; pub use base::*; pub use geometry::*; pub use linalg::*; +#[cfg(feature = "sparse")] +pub use sparse::*; use std::cmp::{self, Ordering, PartialOrd}; diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index f172bbee1..e2ddf9ded 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -103,7 +103,7 @@ pub struct CsMatrix = CsVecStor _phantoms: PhantomData<(N, R, C)>, } -pub type CsVector = CsMatrix; +pub type CsVector> = CsMatrix; impl CsMatrix where @@ -277,11 +277,14 @@ impl> Vect } } } else { + // Needed to be sure even components not present on `x` are multiplied. + *self *= beta; + for i in 0..x.nvalues() { unsafe { let k = x.data.row_index_unchecked(i); let y = self.vget_unchecked_mut(k); - *y = alpha * *x.data.get_value_unchecked(i) + beta * *y; + *y += alpha * *x.data.get_value_unchecked(i); } } } diff --git a/src/sparse/mod.rs b/src/sparse/mod.rs index f859e97fd..bfaabb3a9 100644 --- a/src/sparse/mod.rs +++ b/src/sparse/mod.rs @@ -1,3 +1,3 @@ -pub use self::cs_matrix::CsMatrix; +pub use self::cs_matrix::{CsMatrix, CsVector}; mod cs_matrix; diff --git a/tests/sparse/cs_construction.rs b/tests/sparse/cs_construction.rs new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/tests/sparse/cs_construction.rs @@ -0,0 +1 @@ + diff --git a/tests/sparse/cs_conversion.rs b/tests/sparse/cs_conversion.rs new file mode 100644 index 000000000..ac0cc0f95 --- /dev/null +++ b/tests/sparse/cs_conversion.rs @@ -0,0 +1,18 @@ +#![cfg_attr(rustfmt, rustfmt_skip)] + +use na::{Matrix4x5, CsMatrix}; + +#[test] +fn cs_from_to_matrix() { + let m = Matrix4x5::new( + 5.0, 6.0, 0.0, 8.0, 15.0, + 9.0, 10.0, 11.0, 12.0, 0.0, + 0.0, 0.0, 13.0, 0.0, 0.0, + 0.0, 1.0, 4.0, 0.0, 14.0, + ); + + let cs: CsMatrix<_, _, _> = m.into(); + let m2: Matrix4x5<_> = cs.into(); + + assert_eq!(m2, m); +} diff --git a/tests/sparse/cs_matrix.rs b/tests/sparse/cs_matrix.rs new file mode 100644 index 000000000..0115f56aa --- /dev/null +++ b/tests/sparse/cs_matrix.rs @@ -0,0 +1,18 @@ +#![cfg_attr(rustfmt, rustfmt_skip)] + +use na::{Matrix4x5, Matrix5x4, CsMatrix}; + +#[test] +fn cs_transpose() { + let m = Matrix4x5::new( + 4.0, 1.0, 4.0, 0.0, 9.0, + 5.0, 6.0, 0.0, 8.0, 10.0, + 9.0, 10.0, 11.0, 12.0, 0.0, + 0.0, 0.0, 1.0, 0.0, 10.0 + ); + + let cs: CsMatrix<_, _, _> = m.into(); + let cs_transposed: Matrix5x4<_> = cs.transpose().into(); + + assert_eq!(cs_transposed, m.transpose()) +} diff --git a/tests/sparse/cs_ops.rs b/tests/sparse/cs_ops.rs new file mode 100644 index 000000000..6cee0050a --- /dev/null +++ b/tests/sparse/cs_ops.rs @@ -0,0 +1,65 @@ +#![cfg_attr(rustfmt, rustfmt_skip)] + + +use na::{Matrix3x4, Matrix4x5, Matrix3x5, CsMatrix, Vector5, CsVector}; + +#[test] +fn axpy_cs() { + let mut v1 = Vector5::new(1.0, 2.0, 3.0, 4.0, 5.0); + let v2 = Vector5::new(10.0, 0.0, 30.0, 0.0, 50.0); + let expected = 5.0 * v2 + 10.0 * v1; + + let cs: CsVector<_, _> = v2.into(); + v1.axpy_cs(5.0, &cs, 10.0); + + assert_eq!(v1, expected) +} + + +#[test] +fn cs_mat_mul() { + let m1 = Matrix3x4::new( + 0.0, 1.0, 4.0, 0.0, + 5.0, 6.0, 0.0, 8.0, + 9.0, 10.0, 11.0, 12.0, + ); + + let m2 = Matrix4x5::new( + 5.0, 6.0, 0.0, 8.0, 15.0, + 9.0, 10.0, 11.0, 12.0, 0.0, + 0.0, 0.0, 13.0, 0.0, 0.0, + 0.0, 1.0, 4.0, 0.0, 14.0, + ); + + let sm1: CsMatrix<_, _, _> = m1.into(); + let sm2: CsMatrix<_, _, _> = m2.into(); + + let mul = &sm1 * &sm2; + + assert_eq!(Matrix3x5::from(mul), m1 * m2); +} + + +#[test] +fn cs_mat_add() { + let m1 = Matrix4x5::new( + 4.0, 1.0, 4.0, 0.0, 9.0, + 5.0, 6.0, 0.0, 8.0, 10.0, + 9.0, 10.0, 11.0, 12.0, 0.0, + 0.0, 0.0, 1.0, 0.0, 10.0 + ); + + let m2 = Matrix4x5::new( + 0.0, 1.0, 4.0, 0.0, 14.0, + 5.0, 6.0, 0.0, 8.0, 15.0, + 9.0, 10.0, 11.0, 12.0, 0.0, + 0.0, 0.0, 13.0, 0.0, 0.0, + ); + + let sm1: CsMatrix<_, _, _> = m1.into(); + let sm2: CsMatrix<_, _, _> = m2.into(); + + let mul = &sm1 + &sm2; + + assert_eq!(Matrix4x5::from(mul), m1 + m2); +} diff --git a/tests/sparse/mod.rs b/tests/sparse/mod.rs new file mode 100644 index 000000000..79cdaa0ec --- /dev/null +++ b/tests/sparse/mod.rs @@ -0,0 +1,4 @@ +mod cs_construction; +mod cs_conversion; +mod cs_matrix; +mod cs_ops; From dc8edeceb208469bafb94c73ada00b4dae0ddb3b Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Sun, 21 Oct 2018 07:42:32 +0200 Subject: [PATCH 03/25] Use an iterator to iterate through a column entries. --- src/sparse/cs_matrix.rs | 94 ++++++++++++++++++++++++----------------- 1 file changed, 55 insertions(+), 39 deletions(-) diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index e2ddf9ded..2666425a0 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -1,21 +1,30 @@ use alga::general::{ClosedAdd, ClosedMul}; use num::{One, Zero}; +use std::iter; use std::marker::PhantomData; use std::ops::{Add, Mul, Range}; +use std::slice; use allocator::Allocator; use constraint::{AreMultipliable, DimEq, ShapeConstraint}; use storage::{Storage, StorageMut}; use {DefaultAllocator, Dim, Matrix, MatrixMN, Scalar, Vector, VectorN, U1}; -pub trait CsStorage { +// FIXME: this structure exists for now only because impl trait +// cannot be used for trait method return types. +pub trait CsStorageIter<'a, N, R, C = U1> { + type ColumnEntries: Iterator; + + fn column_entries(&'a self, j: usize) -> Self::ColumnEntries; +} + +pub trait CsStorage: for<'a> CsStorageIter<'a, N, R, C> { fn shape(&self) -> (R, C); fn nvalues(&self) -> usize; unsafe fn row_index_unchecked(&self, i: usize) -> usize; unsafe fn get_value_unchecked(&self, i: usize) -> &N; fn get_value(&self, i: usize) -> &N; fn row_index(&self, i: usize) -> usize; - fn column_range(&self, j: usize) -> Range; } pub trait CsStorageMut: CsStorage { @@ -24,10 +33,8 @@ pub trait CsStorageMut: CsStorage { /// /// If the given length is larger than the current one, uninitialized entries are /// added at the end of the column `i`. This will effectively shift all the matrix entries - /// of the columns at indices `j` with `j > i`. Therefore this is a `O(n)` operation. - /// This is unsafe as the row indices on newly created components may end up being out - /// of bounds. - unsafe fn set_column_len(&mut self, i: usize, len: usize); + /// of the columns at indices `j` with `j > i`. + fn set_column_len(&mut self, i: usize, len: usize); */ } @@ -42,20 +49,10 @@ where vals: Vec, } -impl CsStorage for CsVecStorage +impl CsVecStorage where DefaultAllocator: Allocator, { - #[inline] - fn shape(&self) -> (R, C) { - self.shape - } - - #[inline] - fn nvalues(&self) -> usize { - self.vals.len() - } - #[inline] fn column_range(&self, j: usize) -> Range { let end = if j + 1 == self.p.len() { @@ -66,6 +63,38 @@ where self.p[j]..end } +} + +impl<'a, N: Scalar, R: Dim, C: Dim> CsStorageIter<'a, N, R, C> for CsVecStorage +where + DefaultAllocator: Allocator, +{ + type ColumnEntries = + iter::Zip>, iter::Cloned>>; + + #[inline] + fn column_entries(&'a self, j: usize) -> Self::ColumnEntries { + let rng = self.column_range(j); + self.i[rng.clone()] + .iter() + .cloned() + .zip(self.vals[rng].iter().cloned()) + } +} + +impl CsStorage for CsVecStorage +where + DefaultAllocator: Allocator, +{ + #[inline] + fn shape(&self) -> (R, C) { + self.shape + } + + #[inline] + fn nvalues(&self) -> usize { + self.vals.len() + } #[inline] fn row_index(&self, i: usize) -> usize { @@ -175,13 +204,10 @@ impl> CsMatrix { // Fill the result. for j in 0..ncols.value() { - let column_idx = self.data.column_range(j); - - for vi in column_idx { - let row_id = self.data.row_index(vi); + for (row_id, value) in self.data.column_entries(j) { let shift = workspace[row_id]; - res.data.vals[shift] = *self.data.get_value(vi); + res.data.vals[shift] = value; res.data.i[shift] = j; workspace[row_id] += 1; } @@ -204,19 +230,14 @@ impl> CsMatrix { N: ClosedAdd + ClosedMul, DefaultAllocator: Allocator, { - let column_idx = self.data.column_range(j); - - for vi in column_idx { - let i = self.data.row_index(vi); - let val = beta * *self.data.get_value(vi); - + for (i, val) in self.data.column_entries(j) { if timestamps[i] < timestamp { timestamps[i] = timestamp; res.data.i[nz] = i; nz += 1; - workspace[i] = val; + workspace[i] = val * beta; } else { - workspace[i] += val; + workspace[i] += val * beta; } } @@ -340,16 +361,14 @@ where for j in 0..ncols2.value() { res.data.p[j] = nz; - let column_idx = rhs.data.column_range(j); let new_size_bound = nz + nrows1.value(); res.data.i.resize(new_size_bound, 0); res.data.vals.resize(new_size_bound, N::zero()); - for vi in column_idx { - let i = rhs.data.row_index(vi); + for (i, val) in rhs.data.column_entries(j) { nz = self.scatter( i, - *rhs.data.get_value(vi), + val, timestamps.as_mut_slice(), j + 1, workspace.as_mut_slice(), @@ -447,11 +466,8 @@ where let mut res = MatrixMN::zeros_generic(nrows, ncols); for j in 0..ncols.value() { - let column_idx = m.data.column_range(j); - - for iv in column_idx { - let i = m.data.row_index(iv); - res[(i, j)] = *m.data.get_value(iv); + for (i, val) in m.data.column_entries(j) { + res[(i, j)] = val; } } From e4e5659405f49573c78b7a80565035305cb5e751 Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Mon, 22 Oct 2018 17:55:13 +0200 Subject: [PATCH 04/25] Add lower triangular solve with dense right-hand-side. --- src/sparse/cs_matrix.rs | 130 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 128 insertions(+), 2 deletions(-) diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index 2666425a0..43e778634 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -6,9 +6,9 @@ use std::ops::{Add, Mul, Range}; use std::slice; use allocator::Allocator; -use constraint::{AreMultipliable, DimEq, ShapeConstraint}; +use constraint::{AreMultipliable, DimEq, ShapeConstraint, SameNumberOfRows}; use storage::{Storage, StorageMut}; -use {DefaultAllocator, Dim, Matrix, MatrixMN, Scalar, Vector, VectorN, U1}; +use {Real, DefaultAllocator, Dim, Matrix, MatrixMN, Scalar, Vector, VectorN, U1}; // FIXME: this structure exists for now only because impl trait // cannot be used for trait method return types. @@ -245,6 +245,132 @@ impl> CsMatrix { } } +impl> CsMatrix { + pub fn solve_lower_triangular( + &self, + b: &Matrix, + ) -> Option> + where + S2: Storage, + DefaultAllocator: Allocator, + ShapeConstraint: SameNumberOfRows, + { + let mut b = b.clone_owned(); + if self.solve_lower_triangular_mut(&mut b) { + Some(b) + } else { + None + } + } + + pub fn tr_solve_lower_triangular( + &self, + b: &Matrix, + ) -> Option> + where + S2: Storage, + DefaultAllocator: Allocator, + ShapeConstraint: SameNumberOfRows, + { + let mut b = b.clone_owned(); + if self.tr_solve_lower_triangular_mut(&mut b) { + Some(b) + } else { + None + } + } + + pub fn solve_lower_triangular_mut( + &self, + b: &mut Matrix, + ) -> bool + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + { + let (nrows, ncols) = self.data.shape(); + assert_eq!(nrows.value(), ncols.value(), "The matrix must be square."); + assert_eq!(nrows.value(), b.len(), "Mismatched matrix dimensions."); + + for j2 in 0..b.ncols() { + let mut b = b.column_mut(j2); + + for j in 0..ncols.value() { + let mut column = self.data.column_entries(j); + let mut diag_found = false; + + while let Some((i, val)) = column.next() { + if i == j { + if val.is_zero() { + return false; + } + + b[j] /= val; + diag_found = true; + break; + } + } + + if !diag_found { + return false; + } + + for (i, val) in column { + b[i] -= b[j] * val; + } + } + } + + true + } + + + pub fn tr_solve_lower_triangular_mut( + &self, + b: &mut Matrix, + ) -> bool + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + { + let (nrows, ncols) = self.data.shape(); + assert_eq!(nrows.value(), ncols.value(), "The matrix must be square."); + assert_eq!(nrows.value(), b.len(), "Mismatched matrix dimensions."); + + for j2 in 0..b.ncols() { + let mut b = b.column_mut(j2); + + for j in (0..ncols.value()).rev() { + let mut column = self.data.column_entries(j); + let mut diag = None; + + while let Some((i, val)) = column.next() { + if i == j { + if val.is_zero() { + return false; + } + + diag = Some(val); + break; + } + } + + if let Some(diag) = diag { + for (i, val) in column { + b[j] -= val * b[i]; + } + + b[j] /= diag; + } else { + return false; + } + } + } + + true + } +} + /* impl CsVector { pub fn axpy(&mut self, alpha: N, x: CsVector, beta: N) { From 34b20dc2917a9f2b945081efd3465ead7a7679b2 Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Tue, 23 Oct 2018 18:18:05 +0200 Subject: [PATCH 05/25] Add lower triangular solve with sparse right-hand-side. --- src/sparse/cs_matrix.rs | 204 ++++++++++++++++++++++++++++---------- tests/sparse/cs_linalg.rs | 106 ++++++++++++++++++++ tests/sparse/mod.rs | 1 + 3 files changed, 257 insertions(+), 54 deletions(-) create mode 100644 tests/sparse/cs_linalg.rs diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index 43e778634..4809fb54d 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -6,9 +6,9 @@ use std::ops::{Add, Mul, Range}; use std::slice; use allocator::Allocator; -use constraint::{AreMultipliable, DimEq, ShapeConstraint, SameNumberOfRows}; +use constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint}; use storage::{Storage, StorageMut}; -use {Real, DefaultAllocator, Dim, Matrix, MatrixMN, Scalar, Vector, VectorN, U1}; +use {DefaultAllocator, Dim, Matrix, MatrixMN, Real, Scalar, Vector, VectorN, U1}; // FIXME: this structure exists for now only because impl trait // cannot be used for trait method return types. @@ -20,11 +20,12 @@ pub trait CsStorageIter<'a, N, R, C = U1> { pub trait CsStorage: for<'a> CsStorageIter<'a, N, R, C> { fn shape(&self) -> (R, C); - fn nvalues(&self) -> usize; unsafe fn row_index_unchecked(&self, i: usize) -> usize; unsafe fn get_value_unchecked(&self, i: usize) -> &N; fn get_value(&self, i: usize) -> &N; fn row_index(&self, i: usize) -> usize; + fn column_range(&self, i: usize) -> Range; + fn len(&self) -> usize; } pub trait CsStorageMut: CsStorage { @@ -49,21 +50,7 @@ where vals: Vec, } -impl CsVecStorage -where - DefaultAllocator: Allocator, -{ - #[inline] - fn column_range(&self, j: usize) -> Range { - let end = if j + 1 == self.p.len() { - self.nvalues() - } else { - self.p[j + 1] - }; - - self.p[j]..end - } -} +impl CsVecStorage where DefaultAllocator: Allocator {} impl<'a, N: Scalar, R: Dim, C: Dim> CsStorageIter<'a, N, R, C> for CsVecStorage where @@ -92,7 +79,7 @@ where } #[inline] - fn nvalues(&self) -> usize { + fn len(&self) -> usize { self.vals.len() } @@ -115,6 +102,17 @@ where fn get_value(&self, i: usize) -> &N { &self.vals[i] } + + #[inline] + fn column_range(&self, j: usize) -> Range { + let end = if j + 1 == self.p.len() { + self.len() + } else { + self.p[j + 1] + }; + + self.p[j]..end + } } /* @@ -154,7 +152,7 @@ where CsMatrix { data: CsVecStorage { shape: (nrows, ncols), - p: unsafe { VectorN::new_uninitialized_generic(ncols, U1) }, + p: VectorN::zeros_generic(ncols, U1), i, vals, }, @@ -180,8 +178,8 @@ where } impl> CsMatrix { - pub fn nvalues(&self) -> usize { - self.data.nvalues() + pub fn len(&self) -> usize { + self.data.len() } pub fn transpose(&self) -> CsMatrix @@ -190,7 +188,7 @@ impl> CsMatrix { { let (nrows, ncols) = self.data.shape(); - let nvals = self.nvalues(); + let nvals = self.len(); let mut res = CsMatrix::new_uninitialized_generic(ncols, nrows, nvals); let mut workspace = Vector::zeros_generic(nrows, U1); @@ -250,27 +248,27 @@ impl> CsMatrix { &self, b: &Matrix, ) -> Option> - where - S2: Storage, - DefaultAllocator: Allocator, - ShapeConstraint: SameNumberOfRows, + where + S2: Storage, + DefaultAllocator: Allocator, + ShapeConstraint: SameNumberOfRows, { let mut b = b.clone_owned(); - if self.solve_lower_triangular_mut(&mut b) { - Some(b) - } else { - None - } + if self.solve_lower_triangular_mut(&mut b) { + Some(b) + } else { + None + } } pub fn tr_solve_lower_triangular( &self, b: &Matrix, ) -> Option> - where - S2: Storage, - DefaultAllocator: Allocator, - ShapeConstraint: SameNumberOfRows, + where + S2: Storage, + DefaultAllocator: Allocator, + ShapeConstraint: SameNumberOfRows, { let mut b = b.clone_owned(); if self.tr_solve_lower_triangular_mut(&mut b) { @@ -284,9 +282,9 @@ impl> CsMatrix { &self, b: &mut Matrix, ) -> bool - where - S2: StorageMut, - ShapeConstraint: SameNumberOfRows, + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, { let (nrows, ncols) = self.data.shape(); assert_eq!(nrows.value(), ncols.value(), "The matrix must be square."); @@ -324,14 +322,13 @@ impl> CsMatrix { true } - pub fn tr_solve_lower_triangular_mut( &self, b: &mut Matrix, ) -> bool - where - S2: StorageMut, - ShapeConstraint: SameNumberOfRows, + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, { let (nrows, ncols) = self.data.shape(); assert_eq!(nrows.value(), ncols.value(), "The matrix must be square."); @@ -369,6 +366,106 @@ impl> CsMatrix { true } + + pub fn solve_lower_triangular_cs( + &self, + b: &CsVector, + ) -> Option> + where + S2: CsStorage, + DefaultAllocator: Allocator + Allocator + Allocator, + ShapeConstraint: SameNumberOfRows, + { + let mut reach = Vec::new(); + self.lower_triangular_reach(b, &mut reach); + let mut workspace = unsafe { VectorN::new_uninitialized_generic(b.data.shape().0, U1) }; + + for i in reach.iter().cloned() { + workspace[i] = N::zero(); + } + + for (i, val) in b.data.column_entries(0) { + workspace[i] = val; + } + + for j in reach.iter().cloned().rev() { + let mut column = self.data.column_entries(j); + let mut diag_found = false; + + while let Some((i, val)) = column.next() { + if i == j { + if val.is_zero() { + break; + } + + workspace[j] /= val; + diag_found = true; + break; + } + } + + if !diag_found { + return None; + } + + for (i, val) in column { + workspace[i] -= workspace[j] * val; + } + } + + // Copy the result into a sparse vector. + let mut result = CsVector::new_uninitialized_generic(b.data.shape().0, U1, reach.len()); + + for (i, val) in reach.iter().zip(result.data.vals.iter_mut()) { + *val = workspace[*i]; + } + + result.data.i = reach; + Some(result) + } + + fn lower_triangular_reach(&self, b: &CsVector, xi: &mut Vec) + where + S2: CsStorage, + DefaultAllocator: Allocator, + { + let mut visited = VectorN::repeat_generic(self.data.shape().1, U1, false); + let mut stack = Vec::new(); + + for i in b.data.column_range(0) { + let row_index = b.data.row_index(i); + + if !visited[row_index] { + let rng = self.data.column_range(row_index); + stack.push((row_index, rng)); + self.lower_triangular_dfs(visited.as_mut_slice(), &mut stack, xi); + } + } + } + + fn lower_triangular_dfs( + &self, + visited: &mut [bool], + stack: &mut Vec<(usize, Range)>, + xi: &mut Vec, + ) { + 'recursion: while let Some((j, rng)) = stack.pop() { + visited[j] = true; + + for i in rng.clone() { + let row_id = self.data.row_index(i); + if row_id > j && !visited[row_id] { + stack.push((j, (i + 1)..rng.end)); + + let row_id = self.data.row_index(i); + stack.push((row_id, self.data.column_range(row_id))); + continue 'recursion; + } + } + + xi.push(j) + } + } } /* @@ -381,8 +478,8 @@ impl CsVector { self.data.set_column_len(0, nnzero); // Fill with the axpy. - let mut i = self.nvalues(); - let mut j = x.nvalues(); + let mut i = self.len(); + let mut j = x.len(); let mut k = nnzero - 1; let mut rid1 = self.data.row_index(0, i - 1); let mut rid2 = x.data.row_index(0, j - 1); @@ -416,7 +513,7 @@ impl> Vect ShapeConstraint: DimEq, { if beta.is_zero() { - for i in 0..x.nvalues() { + for i in 0..x.len() { unsafe { let k = x.data.row_index_unchecked(i); let y = self.vget_unchecked_mut(k); @@ -427,7 +524,7 @@ impl> Vect // Needed to be sure even components not present on `x` are multiplied. *self *= beta; - for i in 0..x.nvalues() { + for i in 0..x.len() { unsafe { let k = x.data.row_index_unchecked(i); let y = self.vget_unchecked_mut(k); @@ -479,8 +576,7 @@ where "Mismatched dimensions for matrix multiplication." ); - let mut res = - CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.nvalues() + rhs.nvalues()); + let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); let mut timestamps = VectorN::zeros_generic(nrows1, U1); let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows1, U1) }; let mut nz = 0; @@ -540,8 +636,7 @@ where "Mismatched dimensions for matrix sum." ); - let mut res = - CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.nvalues() + rhs.nvalues()); + let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); let mut timestamps = VectorN::zeros_generic(nrows1, U1); let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows1, U1) }; let mut nz = 0; @@ -582,9 +677,10 @@ where } } +use std::fmt::Debug; impl<'a, N: Scalar + Zero, R: Dim, C: Dim, S> From> for MatrixMN where - S: CsStorage, + S: CsStorage + Debug, DefaultAllocator: Allocator, { fn from(m: CsMatrix) -> Self { @@ -608,8 +704,8 @@ where { fn from(m: Matrix) -> Self { let (nrows, ncols) = m.data.shape(); - let nvalues = m.iter().filter(|e| !e.is_zero()).count(); - let mut res = CsMatrix::new_uninitialized_generic(nrows, ncols, nvalues); + let len = m.iter().filter(|e| !e.is_zero()).count(); + let mut res = CsMatrix::new_uninitialized_generic(nrows, ncols, len); let mut nz = 0; for j in 0..ncols.value() { diff --git a/tests/sparse/cs_linalg.rs b/tests/sparse/cs_linalg.rs new file mode 100644 index 000000000..d65b633c9 --- /dev/null +++ b/tests/sparse/cs_linalg.rs @@ -0,0 +1,106 @@ +#![cfg_attr(rustfmt, rustfmt_skip)] + +use na::{CsMatrix, CsVector, Matrix5, Vector5}; + + +#[test] +fn cs_lower_triangular_solve() { + let a = Matrix5::new( + 4.0, 1.0, 4.0, 0.0, 9.0, + 5.0, 6.0, 0.0, 8.0, 10.0, + 9.0, 10.0, 11.0, 12.0, 0.0, + 0.0, -8.0, 3.0, 5.0, 9.0, + 0.0, 0.0, 1.0, 0.0, -10.0 + ); + let b = Vector5::new(1.0, 2.0, 3.0, 4.0, 5.0); + + let cs_a: CsMatrix<_, _, _> = a.into(); + + assert_eq!(cs_a.solve_lower_triangular(&b), a.solve_lower_triangular(&b)); +} + +#[test] +fn cs_tr_lower_triangular_solve() { + let a = Matrix5::new( + 4.0, 1.0, 4.0, 0.0, 9.0, + 5.0, 6.0, 0.0, 8.0, 10.0, + 9.0, 10.0, 11.0, 12.0, 0.0, + 0.0, -8.0, 3.0, 5.0, 9.0, + 0.0, 0.0, 1.0, 0.0, -10.0 + ); + let b = Vector5::new(1.0, 2.0, 3.0, 4.0, 5.0); + + let cs_a: CsMatrix<_, _, _> = a.into(); + + assert!(cs_a.tr_solve_lower_triangular(&b).is_some()); + assert_eq!(cs_a.tr_solve_lower_triangular(&b), a.tr_solve_lower_triangular(&b)); + + // Singular case. + let a = Matrix5::new( + 4.0, 1.0, 4.0, 0.0, 9.0, + 5.0, 6.0, 0.0, 8.0, 10.0, + 9.0, 10.0, 0.0, 12.0, 0.0, + 0.0, -8.0, 3.0, 5.0, 9.0, + 0.0, 0.0, 1.0, 0.0, -10.0 + ); + let cs_a: CsMatrix<_, _, _> = a.into(); + + assert!(cs_a.tr_solve_lower_triangular(&b).is_none()); +} + + +#[test] +fn cs_lower_triangular_solve_cs() { + let a = Matrix5::new( + 4.0, 1.0, 4.0, 0.0, 9.0, + 5.0, 6.0, 0.0, 8.0, 10.0, + 9.0, 10.0, 11.0, 12.0, 0.0, + 0.0, -8.0, 3.0, 5.0, 9.0, + 0.0, 0.0, 1.0, 0.0, -10.0 + ); + let b1 = Vector5::zeros(); + let b2 = Vector5::new(1.0, 2.0, 3.0, 4.0, 5.0); + let b3 = Vector5::new(1.0, 0.0, 0.0, 4.0, 0.0); + let b4 = Vector5::new(0.0, 1.0, 0.0, 4.0, 5.0); + let b5 = Vector5::x(); + let b6 = Vector5::y(); + let b7 = Vector5::z(); + let b8 = Vector5::w(); + let b9 = Vector5::a(); + + let cs_a: CsMatrix<_, _, _> = a.into(); + let cs_b1: CsVector<_, _> = Vector5::zeros().into(); + let cs_b2: CsVector<_, _> = Vector5::new(1.0, 2.0, 3.0, 4.0, 5.0).into(); + let cs_b3: CsVector<_, _> = Vector5::new(1.0, 0.0, 0.0, 4.0, 0.0).into(); + let cs_b4: CsVector<_, _> = Vector5::new(0.0, 1.0, 0.0, 4.0, 5.0).into(); + let cs_b5: CsVector<_, _> = Vector5::x().into(); + let cs_b6: CsVector<_, _> = Vector5::y().into(); + let cs_b7: CsVector<_, _> = Vector5::z().into(); + let cs_b8: CsVector<_, _> = Vector5::w().into(); + let cs_b9: CsVector<_, _> = Vector5::a().into(); + + assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b1).map(|v| v.into()), a.solve_lower_triangular(&b1)); + assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b5).map(|v| v.into()), a.solve_lower_triangular(&b5)); + assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b6).map(|v| v.into()), a.solve_lower_triangular(&b6)); + assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b7).map(|v| v.into()), a.solve_lower_triangular(&b7)); + assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b8).map(|v| v.into()), a.solve_lower_triangular(&b8)); + assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b9).map(|v| v.into()), a.solve_lower_triangular(&b9)); + assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b2).map(|v| v.into()), a.solve_lower_triangular(&b2)); + assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b3).map(|v| v.into()), a.solve_lower_triangular(&b3)); + assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b4).map(|v| v.into()), a.solve_lower_triangular(&b4)); + + + // Singular case. + let a = Matrix5::new( + 4.0, 1.0, 4.0, 0.0, 9.0, + 5.0, 0.0, 0.0, 8.0, 10.0, + 9.0, 10.0, 0.0, 12.0, 0.0, + 0.0, -8.0, 3.0, 5.0, 9.0, + 0.0, 0.0, 1.0, 0.0, -10.0 + ); + let cs_a: CsMatrix<_, _, _> = a.into(); + + assert!(cs_a.solve_lower_triangular_cs(&cs_b2).is_none()); + assert!(cs_a.solve_lower_triangular_cs(&cs_b3).is_none()); + assert!(cs_a.solve_lower_triangular_cs(&cs_b4).is_none()); +} diff --git a/tests/sparse/mod.rs b/tests/sparse/mod.rs index 79cdaa0ec..6c4d6d459 100644 --- a/tests/sparse/mod.rs +++ b/tests/sparse/mod.rs @@ -2,3 +2,4 @@ mod cs_construction; mod cs_conversion; mod cs_matrix; mod cs_ops; +mod cs_linalg; \ No newline at end of file From 7ecbacacda90879ded1ebd7712cbbd965b603a2d Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Tue, 30 Oct 2018 07:46:34 +0100 Subject: [PATCH 06/25] Add elimination tree computation. --- src/sparse/cs_matrix.rs | 527 +---------------------------- src/sparse/cs_matrix_analysis.rs | 184 ++++++++++ src/sparse/cs_matrix_cholesky.rs | 1 + src/sparse/cs_matrix_conversion.rs | 59 ++++ src/sparse/cs_matrix_ops.rs | 251 ++++++++++++++ src/sparse/cs_matrix_solve.rs | 235 +++++++++++++ src/sparse/mod.rs | 7 +- 7 files changed, 749 insertions(+), 515 deletions(-) create mode 100644 src/sparse/cs_matrix_analysis.rs create mode 100644 src/sparse/cs_matrix_cholesky.rs create mode 100644 src/sparse/cs_matrix_conversion.rs create mode 100644 src/sparse/cs_matrix_ops.rs create mode 100644 src/sparse/cs_matrix_solve.rs diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index 4809fb54d..1a28932c0 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -14,7 +14,9 @@ use {DefaultAllocator, Dim, Matrix, MatrixMN, Real, Scalar, Vector, VectorN, U1} // cannot be used for trait method return types. pub trait CsStorageIter<'a, N, R, C = U1> { type ColumnEntries: Iterator; + type ColumnRowIndices: Iterator; + fn column_row_indices(&'a self, j: usize) -> Self::ColumnRowIndices; fn column_entries(&'a self, j: usize) -> Self::ColumnEntries; } @@ -44,10 +46,10 @@ pub struct CsVecStorage where DefaultAllocator: Allocator, { - shape: (R, C), - p: VectorN, - i: Vec, - vals: Vec, + pub(crate) shape: (R, C), + pub(crate) p: VectorN, + pub(crate) i: Vec, + pub(crate) vals: Vec, } impl CsVecStorage where DefaultAllocator: Allocator {} @@ -58,6 +60,7 @@ where { type ColumnEntries = iter::Zip>, iter::Cloned>>; + type ColumnRowIndices = iter::Cloned>; #[inline] fn column_entries(&'a self, j: usize) -> Self::ColumnEntries { @@ -67,6 +70,12 @@ where .cloned() .zip(self.vals[rng].iter().cloned()) } + + #[inline] + fn column_row_indices(&'a self, j: usize) -> Self::ColumnRowIndices { + let rng = self.column_range(j); + self.i[rng.clone()].iter().cloned() + } } impl CsStorage for CsVecStorage @@ -213,514 +222,4 @@ impl> CsMatrix { res } - - fn scatter( - &self, - j: usize, - beta: N, - timestamps: &mut [usize], - timestamp: usize, - workspace: &mut [N], - mut nz: usize, - res: &mut CsMatrix, - ) -> usize - where - N: ClosedAdd + ClosedMul, - DefaultAllocator: Allocator, - { - for (i, val) in self.data.column_entries(j) { - if timestamps[i] < timestamp { - timestamps[i] = timestamp; - res.data.i[nz] = i; - nz += 1; - workspace[i] = val * beta; - } else { - workspace[i] += val * beta; - } - } - - nz - } -} - -impl> CsMatrix { - pub fn solve_lower_triangular( - &self, - b: &Matrix, - ) -> Option> - where - S2: Storage, - DefaultAllocator: Allocator, - ShapeConstraint: SameNumberOfRows, - { - let mut b = b.clone_owned(); - if self.solve_lower_triangular_mut(&mut b) { - Some(b) - } else { - None - } - } - - pub fn tr_solve_lower_triangular( - &self, - b: &Matrix, - ) -> Option> - where - S2: Storage, - DefaultAllocator: Allocator, - ShapeConstraint: SameNumberOfRows, - { - let mut b = b.clone_owned(); - if self.tr_solve_lower_triangular_mut(&mut b) { - Some(b) - } else { - None - } - } - - pub fn solve_lower_triangular_mut( - &self, - b: &mut Matrix, - ) -> bool - where - S2: StorageMut, - ShapeConstraint: SameNumberOfRows, - { - let (nrows, ncols) = self.data.shape(); - assert_eq!(nrows.value(), ncols.value(), "The matrix must be square."); - assert_eq!(nrows.value(), b.len(), "Mismatched matrix dimensions."); - - for j2 in 0..b.ncols() { - let mut b = b.column_mut(j2); - - for j in 0..ncols.value() { - let mut column = self.data.column_entries(j); - let mut diag_found = false; - - while let Some((i, val)) = column.next() { - if i == j { - if val.is_zero() { - return false; - } - - b[j] /= val; - diag_found = true; - break; - } - } - - if !diag_found { - return false; - } - - for (i, val) in column { - b[i] -= b[j] * val; - } - } - } - - true - } - - pub fn tr_solve_lower_triangular_mut( - &self, - b: &mut Matrix, - ) -> bool - where - S2: StorageMut, - ShapeConstraint: SameNumberOfRows, - { - let (nrows, ncols) = self.data.shape(); - assert_eq!(nrows.value(), ncols.value(), "The matrix must be square."); - assert_eq!(nrows.value(), b.len(), "Mismatched matrix dimensions."); - - for j2 in 0..b.ncols() { - let mut b = b.column_mut(j2); - - for j in (0..ncols.value()).rev() { - let mut column = self.data.column_entries(j); - let mut diag = None; - - while let Some((i, val)) = column.next() { - if i == j { - if val.is_zero() { - return false; - } - - diag = Some(val); - break; - } - } - - if let Some(diag) = diag { - for (i, val) in column { - b[j] -= val * b[i]; - } - - b[j] /= diag; - } else { - return false; - } - } - } - - true - } - - pub fn solve_lower_triangular_cs( - &self, - b: &CsVector, - ) -> Option> - where - S2: CsStorage, - DefaultAllocator: Allocator + Allocator + Allocator, - ShapeConstraint: SameNumberOfRows, - { - let mut reach = Vec::new(); - self.lower_triangular_reach(b, &mut reach); - let mut workspace = unsafe { VectorN::new_uninitialized_generic(b.data.shape().0, U1) }; - - for i in reach.iter().cloned() { - workspace[i] = N::zero(); - } - - for (i, val) in b.data.column_entries(0) { - workspace[i] = val; - } - - for j in reach.iter().cloned().rev() { - let mut column = self.data.column_entries(j); - let mut diag_found = false; - - while let Some((i, val)) = column.next() { - if i == j { - if val.is_zero() { - break; - } - - workspace[j] /= val; - diag_found = true; - break; - } - } - - if !diag_found { - return None; - } - - for (i, val) in column { - workspace[i] -= workspace[j] * val; - } - } - - // Copy the result into a sparse vector. - let mut result = CsVector::new_uninitialized_generic(b.data.shape().0, U1, reach.len()); - - for (i, val) in reach.iter().zip(result.data.vals.iter_mut()) { - *val = workspace[*i]; - } - - result.data.i = reach; - Some(result) - } - - fn lower_triangular_reach(&self, b: &CsVector, xi: &mut Vec) - where - S2: CsStorage, - DefaultAllocator: Allocator, - { - let mut visited = VectorN::repeat_generic(self.data.shape().1, U1, false); - let mut stack = Vec::new(); - - for i in b.data.column_range(0) { - let row_index = b.data.row_index(i); - - if !visited[row_index] { - let rng = self.data.column_range(row_index); - stack.push((row_index, rng)); - self.lower_triangular_dfs(visited.as_mut_slice(), &mut stack, xi); - } - } - } - - fn lower_triangular_dfs( - &self, - visited: &mut [bool], - stack: &mut Vec<(usize, Range)>, - xi: &mut Vec, - ) { - 'recursion: while let Some((j, rng)) = stack.pop() { - visited[j] = true; - - for i in rng.clone() { - let row_id = self.data.row_index(i); - if row_id > j && !visited[row_id] { - stack.push((j, (i + 1)..rng.end)); - - let row_id = self.data.row_index(i); - stack.push((row_id, self.data.column_range(row_id))); - continue 'recursion; - } - } - - xi.push(j) - } - } -} - -/* -impl CsVector { - pub fn axpy(&mut self, alpha: N, x: CsVector, beta: N) { - // First, compute the number of non-zero entries. - let mut nnzero = 0; - - // Allocate a size large enough. - self.data.set_column_len(0, nnzero); - - // Fill with the axpy. - let mut i = self.len(); - let mut j = x.len(); - let mut k = nnzero - 1; - let mut rid1 = self.data.row_index(0, i - 1); - let mut rid2 = x.data.row_index(0, j - 1); - - while k > 0 { - if rid1 == rid2 { - self.data.set_row_index(0, k, rid1); - self[k] = alpha * x[j] + beta * self[k]; - i -= 1; - j -= 1; - } else if rid1 < rid2 { - self.data.set_row_index(0, k, rid1); - self[k] = beta * self[i]; - i -= 1; - } else { - self.data.set_row_index(0, k, rid2); - self[k] = alpha * x[j]; - j -= 1; - } - - k -= 1; - } - } -} -*/ - -impl> Vector { - pub fn axpy_cs(&mut self, alpha: N, x: &CsVector, beta: N) - where - S2: CsStorage, - ShapeConstraint: DimEq, - { - if beta.is_zero() { - for i in 0..x.len() { - unsafe { - let k = x.data.row_index_unchecked(i); - let y = self.vget_unchecked_mut(k); - *y = alpha * *x.data.get_value_unchecked(i); - } - } - } else { - // Needed to be sure even components not present on `x` are multiplied. - *self *= beta; - - for i in 0..x.len() { - unsafe { - let k = x.data.row_index_unchecked(i); - let y = self.vget_unchecked_mut(k); - *y += alpha * *x.data.get_value_unchecked(i); - } - } - } - } - - /* - pub fn gemv_sparse(&mut self, alpha: N, a: &CsMatrix, x: &DVector, beta: N) - where - S2: CsStorage { - let col2 = a.column(0); - let val = unsafe { *x.vget_unchecked(0) }; - self.axpy_sparse(alpha * val, &col2, beta); - - for j in 1..ncols2 { - let col2 = a.column(j); - let val = unsafe { *x.vget_unchecked(j) }; - - self.axpy_sparse(alpha * val, &col2, N::one()); - } - } - */ -} - -impl<'a, 'b, N, R1, R2, C1, C2, S1, S2> Mul<&'b CsMatrix> - for &'a CsMatrix -where - N: Scalar + ClosedAdd + ClosedMul + Zero, - R1: Dim, - C1: Dim, - R2: Dim, - C2: Dim, - S1: CsStorage, - S2: CsStorage, - ShapeConstraint: AreMultipliable, - DefaultAllocator: Allocator + Allocator + Allocator, -{ - type Output = CsMatrix; - - fn mul(self, rhs: &'b CsMatrix) -> CsMatrix { - let (nrows1, ncols1) = self.data.shape(); - let (nrows2, ncols2) = rhs.data.shape(); - assert_eq!( - ncols1.value(), - nrows2.value(), - "Mismatched dimensions for matrix multiplication." - ); - - let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); - let mut timestamps = VectorN::zeros_generic(nrows1, U1); - let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows1, U1) }; - let mut nz = 0; - - for j in 0..ncols2.value() { - res.data.p[j] = nz; - let new_size_bound = nz + nrows1.value(); - res.data.i.resize(new_size_bound, 0); - res.data.vals.resize(new_size_bound, N::zero()); - - for (i, val) in rhs.data.column_entries(j) { - nz = self.scatter( - i, - val, - timestamps.as_mut_slice(), - j + 1, - workspace.as_mut_slice(), - nz, - &mut res, - ); - } - - for p in res.data.p[j]..nz { - res.data.vals[p] = workspace[res.data.i[p]] - } - } - - res.data.i.truncate(nz); - res.data.i.shrink_to_fit(); - res.data.vals.truncate(nz); - res.data.vals.shrink_to_fit(); - res - } -} - -impl<'a, 'b, N, R1, R2, C1, C2, S1, S2> Add<&'b CsMatrix> - for &'a CsMatrix -where - N: Scalar + ClosedAdd + ClosedMul + One, - R1: Dim, - C1: Dim, - R2: Dim, - C2: Dim, - S1: CsStorage, - S2: CsStorage, - ShapeConstraint: DimEq + DimEq, - DefaultAllocator: Allocator + Allocator + Allocator, -{ - type Output = CsMatrix; - - fn add(self, rhs: &'b CsMatrix) -> CsMatrix { - let (nrows1, ncols1) = self.data.shape(); - let (nrows2, ncols2) = rhs.data.shape(); - assert_eq!( - (nrows1.value(), ncols1.value()), - (nrows2.value(), ncols2.value()), - "Mismatched dimensions for matrix sum." - ); - - let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); - let mut timestamps = VectorN::zeros_generic(nrows1, U1); - let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows1, U1) }; - let mut nz = 0; - - for j in 0..ncols2.value() { - res.data.p[j] = nz; - - nz = self.scatter( - j, - N::one(), - timestamps.as_mut_slice(), - j + 1, - workspace.as_mut_slice(), - nz, - &mut res, - ); - - nz = rhs.scatter( - j, - N::one(), - timestamps.as_mut_slice(), - j + 1, - workspace.as_mut_slice(), - nz, - &mut res, - ); - - for p in res.data.p[j]..nz { - res.data.vals[p] = workspace[res.data.i[p]] - } - } - - res.data.i.truncate(nz); - res.data.i.shrink_to_fit(); - res.data.vals.truncate(nz); - res.data.vals.shrink_to_fit(); - res - } -} - -use std::fmt::Debug; -impl<'a, N: Scalar + Zero, R: Dim, C: Dim, S> From> for MatrixMN -where - S: CsStorage + Debug, - DefaultAllocator: Allocator, -{ - fn from(m: CsMatrix) -> Self { - let (nrows, ncols) = m.data.shape(); - let mut res = MatrixMN::zeros_generic(nrows, ncols); - - for j in 0..ncols.value() { - for (i, val) in m.data.column_entries(j) { - res[(i, j)] = val; - } - } - - res - } -} - -impl<'a, N: Scalar + Zero, R: Dim, C: Dim, S> From> for CsMatrix -where - S: Storage, - DefaultAllocator: Allocator + Allocator, -{ - fn from(m: Matrix) -> Self { - let (nrows, ncols) = m.data.shape(); - let len = m.iter().filter(|e| !e.is_zero()).count(); - let mut res = CsMatrix::new_uninitialized_generic(nrows, ncols, len); - let mut nz = 0; - - for j in 0..ncols.value() { - let column = m.column(j); - res.data.p[j] = nz; - - for i in 0..nrows.value() { - if !column[i].is_zero() { - res.data.i[nz] = i; - res.data.vals[nz] = column[i]; - nz += 1; - } - } - } - - res - } } diff --git a/src/sparse/cs_matrix_analysis.rs b/src/sparse/cs_matrix_analysis.rs new file mode 100644 index 000000000..629904a4f --- /dev/null +++ b/src/sparse/cs_matrix_analysis.rs @@ -0,0 +1,184 @@ +use alga::general::{ClosedAdd, ClosedMul}; +use num::{One, Zero}; +use std::iter; +use std::marker::PhantomData; +use std::ops::{Add, Mul, Range}; +use std::slice; + +use allocator::Allocator; +use constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint}; +use sparse::{CsMatrix, CsStorage, CsVector}; +use storage::{Storage, StorageMut}; +use {DefaultAllocator, Dim, Matrix, MatrixMN, Real, Scalar, Vector, VectorN, U1}; + +pub struct SymbolicAnalysis { + pinv: Vec, + q: Vec, + elimination_tree: Vec, + cp: Vec, + leftmost: Vec, + m2: usize, + lnz: usize, + unz: usize, +} + +#[derive(Copy, Clone, Debug)] +pub struct EliminationTreeNode { + parent: usize, +} + +impl EliminationTreeNode { + pub fn root() -> Self { + EliminationTreeNode { + parent: usize::max_value(), + } + } + + pub fn with_parent(parent: usize) -> Self { + EliminationTreeNode { parent } + } + + pub fn is_root(&self) -> bool { + self.parent == usize::max_value() + } + + pub fn parent(&self) -> usize { + self.parent + } +} + +impl> CsMatrix { + fn elimination_tree(&self) -> Vec { + let (nrows, ncols) = self.data.shape(); + assert_eq!( + nrows.value(), + ncols.value(), + "The matrix `self` must be square to compute its elimination tree." + ); + + let mut forest: Vec<_> = iter::repeat(EliminationTreeNode::root()) + .take(nrows.value()) + .collect(); + let mut ancestor: Vec<_> = iter::repeat(usize::max_value()) + .take(nrows.value()) + .collect(); + + for k in 0..nrows.value() { + for irow in self.data.column_row_indices(k) { + let mut i = irow; + + while i < k { + let i_ancestor = ancestor[i]; + ancestor[i] = k; + + if i_ancestor == usize::max_value() { + forest[i] = EliminationTreeNode::with_parent(k); + break; + } + + i = i_ancestor; + } + } + } + + forest + } + + fn reach( + &self, + j: usize, + max_j: usize, + tree: &[EliminationTreeNode], + marks: &mut Vec, + out: &mut Vec, + ) { + marks.clear(); + marks.resize(tree.len(), false); + + for irow in self.data.column_row_indices(j) { + let mut curr = irow; + while curr != usize::max_value() && curr <= max_j && !marks[curr] { + marks[curr] = true; + out.push(curr); + curr = tree[curr].parent; + } + } + } + + fn column_counts(&self, tree: &[EliminationTreeNode]) -> Vec { + let len = self.data.shape().0.value(); + let mut counts: Vec<_> = iter::repeat(0).take(len).collect(); + let mut reach = Vec::new(); + let mut marks = Vec::new(); + + for i in 0..len { + self.reach(i, i, tree, &mut marks, &mut reach); + + for j in reach.drain(..) { + counts[j] += 1; + } + } + + counts + } + + fn tree_postorder(tree: &[EliminationTreeNode]) -> Vec { + // FIXME: avoid all those allocations? + let mut first_child: Vec<_> = iter::repeat(usize::max_value()).take(tree.len()).collect(); + let mut other_children: Vec<_> = + iter::repeat(usize::max_value()).take(tree.len()).collect(); + + // Build the children list from the parent list. + // The set of children of the node `i` is given by the linked list + // starting at `first_child[i]`. The nodes of this list are then: + // { first_child[i], other_children[first_child[i]], other_children[other_children[first_child[i]], ... } + for (i, node) in tree.iter().enumerate() { + if !node.is_root() { + let brother = first_child[node.parent]; + first_child[node.parent] = i; + other_children[i] = brother; + } + } + + let mut stack = Vec::with_capacity(tree.len()); + let mut postorder = Vec::with_capacity(tree.len()); + + for (i, node) in tree.iter().enumerate() { + if node.is_root() { + Self::dfs( + i, + &mut first_child, + &other_children, + &mut stack, + &mut postorder, + ) + } + } + + postorder + } + + fn dfs( + i: usize, + first_child: &mut [usize], + other_children: &[usize], + stack: &mut Vec, + result: &mut Vec, + ) { + stack.clear(); + stack.push(i); + + while let Some(n) = stack.pop() { + let child = first_child[n]; + + if child == usize::max_value() { + // No children left. + result.push(n); + } else { + stack.push(n); + stack.push(child); + first_child[n] = other_children[child]; + } + } + } +} diff --git a/src/sparse/cs_matrix_cholesky.rs b/src/sparse/cs_matrix_cholesky.rs new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/src/sparse/cs_matrix_cholesky.rs @@ -0,0 +1 @@ + diff --git a/src/sparse/cs_matrix_conversion.rs b/src/sparse/cs_matrix_conversion.rs new file mode 100644 index 000000000..90f5cde01 --- /dev/null +++ b/src/sparse/cs_matrix_conversion.rs @@ -0,0 +1,59 @@ +use alga::general::{ClosedAdd, ClosedMul}; +use num::{One, Zero}; +use std::iter; +use std::marker::PhantomData; +use std::ops::{Add, Mul, Range}; +use std::slice; + +use allocator::Allocator; +use constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint}; +use sparse::{CsMatrix, CsStorage, CsVector}; +use storage::{Storage, StorageMut}; +use {DefaultAllocator, Dim, Matrix, MatrixMN, Real, Scalar, Vector, VectorN, U1}; + +impl<'a, N: Scalar + Zero, R: Dim, C: Dim, S> From> for MatrixMN +where + S: CsStorage, + DefaultAllocator: Allocator, +{ + fn from(m: CsMatrix) -> Self { + let (nrows, ncols) = m.data.shape(); + let mut res = MatrixMN::zeros_generic(nrows, ncols); + + for j in 0..ncols.value() { + for (i, val) in m.data.column_entries(j) { + res[(i, j)] = val; + } + } + + res + } +} + +impl<'a, N: Scalar + Zero, R: Dim, C: Dim, S> From> for CsMatrix +where + S: Storage, + DefaultAllocator: Allocator + Allocator, +{ + fn from(m: Matrix) -> Self { + let (nrows, ncols) = m.data.shape(); + let len = m.iter().filter(|e| !e.is_zero()).count(); + let mut res = CsMatrix::new_uninitialized_generic(nrows, ncols, len); + let mut nz = 0; + + for j in 0..ncols.value() { + let column = m.column(j); + res.data.p[j] = nz; + + for i in 0..nrows.value() { + if !column[i].is_zero() { + res.data.i[nz] = i; + res.data.vals[nz] = column[i]; + nz += 1; + } + } + } + + res + } +} diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs new file mode 100644 index 000000000..b9d0a3757 --- /dev/null +++ b/src/sparse/cs_matrix_ops.rs @@ -0,0 +1,251 @@ +use alga::general::{ClosedAdd, ClosedMul}; +use num::{One, Zero}; +use std::iter; +use std::marker::PhantomData; +use std::ops::{Add, Mul, Range}; +use std::slice; + +use allocator::Allocator; +use constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint}; +use sparse::{CsMatrix, CsStorage, CsVector}; +use storage::{Storage, StorageMut}; +use {DefaultAllocator, Dim, Matrix, MatrixMN, Real, Scalar, Vector, VectorN, U1}; + +impl> CsMatrix { + fn scatter( + &self, + j: usize, + beta: N, + timestamps: &mut [usize], + timestamp: usize, + workspace: &mut [N], + mut nz: usize, + res: &mut CsMatrix, + ) -> usize + where + N: ClosedAdd + ClosedMul, + DefaultAllocator: Allocator, + { + for (i, val) in self.data.column_entries(j) { + if timestamps[i] < timestamp { + timestamps[i] = timestamp; + res.data.i[nz] = i; + nz += 1; + workspace[i] = val * beta; + } else { + workspace[i] += val * beta; + } + } + + nz + } +} + +/* +impl CsVector { + pub fn axpy(&mut self, alpha: N, x: CsVector, beta: N) { + // First, compute the number of non-zero entries. + let mut nnzero = 0; + + // Allocate a size large enough. + self.data.set_column_len(0, nnzero); + + // Fill with the axpy. + let mut i = self.len(); + let mut j = x.len(); + let mut k = nnzero - 1; + let mut rid1 = self.data.row_index(0, i - 1); + let mut rid2 = x.data.row_index(0, j - 1); + + while k > 0 { + if rid1 == rid2 { + self.data.set_row_index(0, k, rid1); + self[k] = alpha * x[j] + beta * self[k]; + i -= 1; + j -= 1; + } else if rid1 < rid2 { + self.data.set_row_index(0, k, rid1); + self[k] = beta * self[i]; + i -= 1; + } else { + self.data.set_row_index(0, k, rid2); + self[k] = alpha * x[j]; + j -= 1; + } + + k -= 1; + } + } +} +*/ + +impl> Vector { + pub fn axpy_cs(&mut self, alpha: N, x: &CsVector, beta: N) + where + S2: CsStorage, + ShapeConstraint: DimEq, + { + if beta.is_zero() { + for i in 0..x.len() { + unsafe { + let k = x.data.row_index_unchecked(i); + let y = self.vget_unchecked_mut(k); + *y = alpha * *x.data.get_value_unchecked(i); + } + } + } else { + // Needed to be sure even components not present on `x` are multiplied. + *self *= beta; + + for i in 0..x.len() { + unsafe { + let k = x.data.row_index_unchecked(i); + let y = self.vget_unchecked_mut(k); + *y += alpha * *x.data.get_value_unchecked(i); + } + } + } + } + + /* + pub fn gemv_sparse(&mut self, alpha: N, a: &CsMatrix, x: &DVector, beta: N) + where + S2: CsStorage { + let col2 = a.column(0); + let val = unsafe { *x.vget_unchecked(0) }; + self.axpy_sparse(alpha * val, &col2, beta); + + for j in 1..ncols2 { + let col2 = a.column(j); + let val = unsafe { *x.vget_unchecked(j) }; + + self.axpy_sparse(alpha * val, &col2, N::one()); + } + } + */ +} + +impl<'a, 'b, N, R1, R2, C1, C2, S1, S2> Mul<&'b CsMatrix> + for &'a CsMatrix +where + N: Scalar + ClosedAdd + ClosedMul + Zero, + R1: Dim, + C1: Dim, + R2: Dim, + C2: Dim, + S1: CsStorage, + S2: CsStorage, + ShapeConstraint: AreMultipliable, + DefaultAllocator: Allocator + Allocator + Allocator, +{ + type Output = CsMatrix; + + fn mul(self, rhs: &'b CsMatrix) -> CsMatrix { + let (nrows1, ncols1) = self.data.shape(); + let (nrows2, ncols2) = rhs.data.shape(); + assert_eq!( + ncols1.value(), + nrows2.value(), + "Mismatched dimensions for matrix multiplication." + ); + + let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); + let mut timestamps = VectorN::zeros_generic(nrows1, U1); + let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows1, U1) }; + let mut nz = 0; + + for j in 0..ncols2.value() { + res.data.p[j] = nz; + let new_size_bound = nz + nrows1.value(); + res.data.i.resize(new_size_bound, 0); + res.data.vals.resize(new_size_bound, N::zero()); + + for (i, val) in rhs.data.column_entries(j) { + nz = self.scatter( + i, + val, + timestamps.as_mut_slice(), + j + 1, + workspace.as_mut_slice(), + nz, + &mut res, + ); + } + + for p in res.data.p[j]..nz { + res.data.vals[p] = workspace[res.data.i[p]] + } + } + + res.data.i.truncate(nz); + res.data.i.shrink_to_fit(); + res.data.vals.truncate(nz); + res.data.vals.shrink_to_fit(); + res + } +} + +impl<'a, 'b, N, R1, R2, C1, C2, S1, S2> Add<&'b CsMatrix> + for &'a CsMatrix +where + N: Scalar + ClosedAdd + ClosedMul + One, + R1: Dim, + C1: Dim, + R2: Dim, + C2: Dim, + S1: CsStorage, + S2: CsStorage, + ShapeConstraint: DimEq + DimEq, + DefaultAllocator: Allocator + Allocator + Allocator, +{ + type Output = CsMatrix; + + fn add(self, rhs: &'b CsMatrix) -> CsMatrix { + let (nrows1, ncols1) = self.data.shape(); + let (nrows2, ncols2) = rhs.data.shape(); + assert_eq!( + (nrows1.value(), ncols1.value()), + (nrows2.value(), ncols2.value()), + "Mismatched dimensions for matrix sum." + ); + + let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); + let mut timestamps = VectorN::zeros_generic(nrows1, U1); + let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows1, U1) }; + let mut nz = 0; + + for j in 0..ncols2.value() { + res.data.p[j] = nz; + + nz = self.scatter( + j, + N::one(), + timestamps.as_mut_slice(), + j + 1, + workspace.as_mut_slice(), + nz, + &mut res, + ); + + nz = rhs.scatter( + j, + N::one(), + timestamps.as_mut_slice(), + j + 1, + workspace.as_mut_slice(), + nz, + &mut res, + ); + + for p in res.data.p[j]..nz { + res.data.vals[p] = workspace[res.data.i[p]] + } + } + + res.data.i.truncate(nz); + res.data.i.shrink_to_fit(); + res.data.vals.truncate(nz); + res.data.vals.shrink_to_fit(); + res + } +} diff --git a/src/sparse/cs_matrix_solve.rs b/src/sparse/cs_matrix_solve.rs new file mode 100644 index 000000000..fa3a77c73 --- /dev/null +++ b/src/sparse/cs_matrix_solve.rs @@ -0,0 +1,235 @@ +use alga::general::{ClosedAdd, ClosedMul}; +use num::{One, Zero}; +use std::iter; +use std::marker::PhantomData; +use std::ops::{Add, Mul, Range}; +use std::slice; + +use allocator::Allocator; +use constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint}; +use sparse::{CsMatrix, CsStorage, CsVector}; +use storage::{Storage, StorageMut}; +use {DefaultAllocator, Dim, Matrix, MatrixMN, Real, Scalar, Vector, VectorN, U1}; + +impl> CsMatrix { + pub fn solve_lower_triangular( + &self, + b: &Matrix, + ) -> Option> + where + S2: Storage, + DefaultAllocator: Allocator, + ShapeConstraint: SameNumberOfRows, + { + let mut b = b.clone_owned(); + if self.solve_lower_triangular_mut(&mut b) { + Some(b) + } else { + None + } + } + + pub fn tr_solve_lower_triangular( + &self, + b: &Matrix, + ) -> Option> + where + S2: Storage, + DefaultAllocator: Allocator, + ShapeConstraint: SameNumberOfRows, + { + let mut b = b.clone_owned(); + if self.tr_solve_lower_triangular_mut(&mut b) { + Some(b) + } else { + None + } + } + + pub fn solve_lower_triangular_mut( + &self, + b: &mut Matrix, + ) -> bool + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + { + let (nrows, ncols) = self.data.shape(); + assert_eq!(nrows.value(), ncols.value(), "The matrix must be square."); + assert_eq!(nrows.value(), b.len(), "Mismatched matrix dimensions."); + + for j2 in 0..b.ncols() { + let mut b = b.column_mut(j2); + + for j in 0..ncols.value() { + let mut column = self.data.column_entries(j); + let mut diag_found = false; + + while let Some((i, val)) = column.next() { + if i == j { + if val.is_zero() { + return false; + } + + b[j] /= val; + diag_found = true; + break; + } + } + + if !diag_found { + return false; + } + + for (i, val) in column { + b[i] -= b[j] * val; + } + } + } + + true + } + + pub fn tr_solve_lower_triangular_mut( + &self, + b: &mut Matrix, + ) -> bool + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + { + let (nrows, ncols) = self.data.shape(); + assert_eq!(nrows.value(), ncols.value(), "The matrix must be square."); + assert_eq!(nrows.value(), b.len(), "Mismatched matrix dimensions."); + + for j2 in 0..b.ncols() { + let mut b = b.column_mut(j2); + + for j in (0..ncols.value()).rev() { + let mut column = self.data.column_entries(j); + let mut diag = None; + + while let Some((i, val)) = column.next() { + if i == j { + if val.is_zero() { + return false; + } + + diag = Some(val); + break; + } + } + + if let Some(diag) = diag { + for (i, val) in column { + b[j] -= val * b[i]; + } + + b[j] /= diag; + } else { + return false; + } + } + } + + true + } + + pub fn solve_lower_triangular_cs( + &self, + b: &CsVector, + ) -> Option> + where + S2: CsStorage, + DefaultAllocator: Allocator + Allocator + Allocator, + ShapeConstraint: SameNumberOfRows, + { + let mut reach = Vec::new(); + self.lower_triangular_reach(b, &mut reach); + let mut workspace = unsafe { VectorN::new_uninitialized_generic(b.data.shape().0, U1) }; + + for i in reach.iter().cloned() { + workspace[i] = N::zero(); + } + + for (i, val) in b.data.column_entries(0) { + workspace[i] = val; + } + + for j in reach.iter().cloned().rev() { + let mut column = self.data.column_entries(j); + let mut diag_found = false; + + while let Some((i, val)) = column.next() { + if i == j { + if val.is_zero() { + break; + } + + workspace[j] /= val; + diag_found = true; + break; + } + } + + if !diag_found { + return None; + } + + for (i, val) in column { + workspace[i] -= workspace[j] * val; + } + } + + // Copy the result into a sparse vector. + let mut result = CsVector::new_uninitialized_generic(b.data.shape().0, U1, reach.len()); + + for (i, val) in reach.iter().zip(result.data.vals.iter_mut()) { + *val = workspace[*i]; + } + + result.data.i = reach; + Some(result) + } + + fn lower_triangular_reach(&self, b: &CsVector, xi: &mut Vec) + where + S2: CsStorage, + DefaultAllocator: Allocator, + { + let mut visited = VectorN::repeat_generic(self.data.shape().1, U1, false); + let mut stack = Vec::new(); + + for i in b.data.column_range(0) { + let row_index = b.data.row_index(i); + + if !visited[row_index] { + let rng = self.data.column_range(row_index); + stack.push((row_index, rng)); + self.lower_triangular_dfs(visited.as_mut_slice(), &mut stack, xi); + } + } + } + + fn lower_triangular_dfs( + &self, + visited: &mut [bool], + stack: &mut Vec<(usize, Range)>, + xi: &mut Vec, + ) { + 'recursion: while let Some((j, rng)) = stack.pop() { + visited[j] = true; + + for i in rng.clone() { + let row_id = self.data.row_index(i); + if row_id > j && !visited[row_id] { + stack.push((j, (i + 1)..rng.end)); + stack.push((row_id, self.data.column_range(row_id))); + continue 'recursion; + } + } + + xi.push(j) + } + } +} diff --git a/src/sparse/mod.rs b/src/sparse/mod.rs index bfaabb3a9..320d76a0d 100644 --- a/src/sparse/mod.rs +++ b/src/sparse/mod.rs @@ -1,3 +1,8 @@ -pub use self::cs_matrix::{CsMatrix, CsVector}; +pub use self::cs_matrix::{CsMatrix, CsStorage, CsStorageMut, CsVector}; mod cs_matrix; +mod cs_matrix_analysis; +mod cs_matrix_cholesky; +mod cs_matrix_conversion; +mod cs_matrix_ops; +mod cs_matrix_solve; From 9bf1d0280d53a36770b6853708dd9e23e4dd64b3 Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Tue, 30 Oct 2018 17:29:32 +0100 Subject: [PATCH 07/25] Fix cholesky computation. --- src/sparse/cs_matrix.rs | 34 +++ src/sparse/cs_matrix_analysis.rs | 184 ------------ src/sparse/cs_matrix_cholesky.rs | 330 +++++++++++++++++++++ src/sparse/mod.rs | 6 +- tests/sparse/cs_cholesky.rs | 55 ++++ tests/sparse/{cs_linalg.rs => cs_solve.rs} | 0 tests/sparse/mod.rs | 3 +- 7 files changed, 425 insertions(+), 187 deletions(-) delete mode 100644 src/sparse/cs_matrix_analysis.rs create mode 100644 tests/sparse/cs_cholesky.rs rename tests/sparse/{cs_linalg.rs => cs_solve.rs} (100%) diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index 1a28932c0..ec4cc5b0f 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -52,6 +52,15 @@ where pub(crate) vals: Vec, } +impl CsVecStorage +where + DefaultAllocator: Allocator, +{ + pub fn values(&self) -> &[N] { + &self.vals + } +} + impl CsVecStorage where DefaultAllocator: Allocator {} impl<'a, N: Scalar, R: Dim, C: Dim> CsStorageIter<'a, N, R, C> for CsVecStorage @@ -187,10 +196,35 @@ where } impl> CsMatrix { + pub fn from_data(data: S) -> Self { + CsMatrix { + data, + _phantoms: PhantomData, + } + } + pub fn len(&self) -> usize { self.data.len() } + pub fn nrows(&self) -> usize { + self.data.shape().0.value() + } + + pub fn ncols(&self) -> usize { + self.data.shape().1.value() + } + + pub fn shape(&self) -> (usize, usize) { + let (nrows, ncols) = self.data.shape(); + (nrows.value(), ncols.value()) + } + + pub fn is_square(&self) -> bool { + let (nrows, ncols) = self.data.shape(); + nrows.value() == ncols.value() + } + pub fn transpose(&self) -> CsMatrix where DefaultAllocator: Allocator, diff --git a/src/sparse/cs_matrix_analysis.rs b/src/sparse/cs_matrix_analysis.rs deleted file mode 100644 index 629904a4f..000000000 --- a/src/sparse/cs_matrix_analysis.rs +++ /dev/null @@ -1,184 +0,0 @@ -use alga::general::{ClosedAdd, ClosedMul}; -use num::{One, Zero}; -use std::iter; -use std::marker::PhantomData; -use std::ops::{Add, Mul, Range}; -use std::slice; - -use allocator::Allocator; -use constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint}; -use sparse::{CsMatrix, CsStorage, CsVector}; -use storage::{Storage, StorageMut}; -use {DefaultAllocator, Dim, Matrix, MatrixMN, Real, Scalar, Vector, VectorN, U1}; - -pub struct SymbolicAnalysis { - pinv: Vec, - q: Vec, - elimination_tree: Vec, - cp: Vec, - leftmost: Vec, - m2: usize, - lnz: usize, - unz: usize, -} - -#[derive(Copy, Clone, Debug)] -pub struct EliminationTreeNode { - parent: usize, -} - -impl EliminationTreeNode { - pub fn root() -> Self { - EliminationTreeNode { - parent: usize::max_value(), - } - } - - pub fn with_parent(parent: usize) -> Self { - EliminationTreeNode { parent } - } - - pub fn is_root(&self) -> bool { - self.parent == usize::max_value() - } - - pub fn parent(&self) -> usize { - self.parent - } -} - -impl> CsMatrix { - fn elimination_tree(&self) -> Vec { - let (nrows, ncols) = self.data.shape(); - assert_eq!( - nrows.value(), - ncols.value(), - "The matrix `self` must be square to compute its elimination tree." - ); - - let mut forest: Vec<_> = iter::repeat(EliminationTreeNode::root()) - .take(nrows.value()) - .collect(); - let mut ancestor: Vec<_> = iter::repeat(usize::max_value()) - .take(nrows.value()) - .collect(); - - for k in 0..nrows.value() { - for irow in self.data.column_row_indices(k) { - let mut i = irow; - - while i < k { - let i_ancestor = ancestor[i]; - ancestor[i] = k; - - if i_ancestor == usize::max_value() { - forest[i] = EliminationTreeNode::with_parent(k); - break; - } - - i = i_ancestor; - } - } - } - - forest - } - - fn reach( - &self, - j: usize, - max_j: usize, - tree: &[EliminationTreeNode], - marks: &mut Vec, - out: &mut Vec, - ) { - marks.clear(); - marks.resize(tree.len(), false); - - for irow in self.data.column_row_indices(j) { - let mut curr = irow; - while curr != usize::max_value() && curr <= max_j && !marks[curr] { - marks[curr] = true; - out.push(curr); - curr = tree[curr].parent; - } - } - } - - fn column_counts(&self, tree: &[EliminationTreeNode]) -> Vec { - let len = self.data.shape().0.value(); - let mut counts: Vec<_> = iter::repeat(0).take(len).collect(); - let mut reach = Vec::new(); - let mut marks = Vec::new(); - - for i in 0..len { - self.reach(i, i, tree, &mut marks, &mut reach); - - for j in reach.drain(..) { - counts[j] += 1; - } - } - - counts - } - - fn tree_postorder(tree: &[EliminationTreeNode]) -> Vec { - // FIXME: avoid all those allocations? - let mut first_child: Vec<_> = iter::repeat(usize::max_value()).take(tree.len()).collect(); - let mut other_children: Vec<_> = - iter::repeat(usize::max_value()).take(tree.len()).collect(); - - // Build the children list from the parent list. - // The set of children of the node `i` is given by the linked list - // starting at `first_child[i]`. The nodes of this list are then: - // { first_child[i], other_children[first_child[i]], other_children[other_children[first_child[i]], ... } - for (i, node) in tree.iter().enumerate() { - if !node.is_root() { - let brother = first_child[node.parent]; - first_child[node.parent] = i; - other_children[i] = brother; - } - } - - let mut stack = Vec::with_capacity(tree.len()); - let mut postorder = Vec::with_capacity(tree.len()); - - for (i, node) in tree.iter().enumerate() { - if node.is_root() { - Self::dfs( - i, - &mut first_child, - &other_children, - &mut stack, - &mut postorder, - ) - } - } - - postorder - } - - fn dfs( - i: usize, - first_child: &mut [usize], - other_children: &[usize], - stack: &mut Vec, - result: &mut Vec, - ) { - stack.clear(); - stack.push(i); - - while let Some(n) = stack.pop() { - let child = first_child[n]; - - if child == usize::max_value() { - // No children left. - result.push(n); - } else { - stack.push(n); - stack.push(child); - first_child[n] = other_children[child]; - } - } - } -} diff --git a/src/sparse/cs_matrix_cholesky.rs b/src/sparse/cs_matrix_cholesky.rs index 8b1378917..2826b5556 100644 --- a/src/sparse/cs_matrix_cholesky.rs +++ b/src/sparse/cs_matrix_cholesky.rs @@ -1 +1,331 @@ +use alga::general::{ClosedAdd, ClosedMul}; +use num::{One, Zero}; +use std::iter; +use std::marker::PhantomData; +use std::mem; +use std::ops::{Add, Mul, Range}; +use std::slice; +use allocator::Allocator; +use constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint}; +use sparse::{CsMatrix, CsStorage, CsStorageIter, CsVecStorage, CsVector}; +use storage::{Storage, StorageMut}; +use {DefaultAllocator, Dim, Matrix, MatrixMN, Real, Scalar, Vector, VectorN, U1}; + +pub struct CsCholesky +where + DefaultAllocator: Allocator + Allocator, +{ + // Non-zero pattern of the original matrix upper-triangular part. + // Unlike the original matrix, the `original_p` array does contain the last sentinel value + // equal to `original_i.len()` at the end. + original_p: Vec, + original_i: Vec, + original_len: usize, // Number of elements on the numerical value vector of the original matrix. + // Decomposition result. + l: CsMatrix, + // Used only for the pattern. + // FIXME: store only the nonzero pattern instead. + u: CsMatrix, + ok: bool, + // Workspaces. + work_x: VectorN, + work_c: VectorN, +} + +impl CsCholesky +where + DefaultAllocator: Allocator + Allocator, +{ + /// Computes the cholesky decomposition of the sparse matrix `m`. + pub fn new(m: &CsMatrix) -> Self { + let mut me = Self::new_symbolic(m); + let _ = me.decompose(&m.data.vals); + me + } + /// Perform symbolic analysis for the given matrix. + /// + /// This does not access the numerical values of `m`. + pub fn new_symbolic(m: &CsMatrix) -> Self { + assert!( + m.is_square(), + "The matrix `m` must be square to compute its elimination tree." + ); + + let (l, u) = Self::nonzero_pattern(m); + + // Workspaces. + let work_x = unsafe { VectorN::new_uninitialized_generic(m.data.shape().0, U1) }; + let work_c = unsafe { VectorN::new_uninitialized_generic(m.data.shape().1, U1) }; + let mut original_p = m.data.p.as_slice().to_vec(); + original_p.push(m.data.i.len()); + + CsCholesky { + original_p, + original_i: m.data.i.clone(), + original_len: m.data.i.len(), + l, + u, + ok: false, + work_x, + work_c, + } + } + + pub fn l(&self) -> Option<&CsMatrix> { + if self.ok { + Some(&self.l) + } else { + None + } + } + + pub fn unwrap_l(self) -> Option> { + if self.ok { + Some(self.l) + } else { + None + } + } + + // Performs the numerical Cholesky decomposition given the set of numerical values. + pub fn decompose(&mut self, values: &[N]) -> bool { + assert!( + values.len() >= self.original_len, + "The set of values is too small." + ); + + // Reset `work_c` to the column pointers of `l`. + self.work_c.copy_from(&self.l.data.p); + + // Perform the decomposition. + for k in 0..self.l.nrows() { + // Scatter the k-th column of the original matrix with the values provided. + let column_range = self.original_p[k]..self.original_p[k + 1]; + + self.work_x[k] = N::zero(); + for p in column_range.clone() { + let irow = self.original_i[p]; + + if irow <= k { + self.work_x[irow] = values[p]; + } + } + + let mut diag = self.work_x[k]; + self.work_x[k] = N::zero(); + + // Triangular solve. + for irow in self.u.data.column_row_indices(k) { + if irow >= k { + continue; + } + + let lki = self.work_x[irow] / self.l.data.vals[self.l.data.p[irow]]; + self.work_x[irow] = N::zero(); + + for p in self.l.data.p[irow] + 1..self.work_c[irow] { + self.work_x[self.l.data.i[p]] -= self.l.data.vals[p] * lki; + } + + diag -= lki * lki; + let p = self.work_c[irow]; + self.work_c[irow] += 1; + self.l.data.i[p] = k; + self.l.data.vals[p] = lki; + } + + if diag <= N::zero() { + self.ok = false; + return false; + } + + // Deal with the diagonal element. + let p = self.work_c[k]; + self.work_c[k] += 1; + self.l.data.i[p] = k; + self.l.data.vals[p] = diag.sqrt(); + } + + self.ok = true; + true + } + + fn elimination_tree>(m: &CsMatrix) -> Vec { + let nrows = m.nrows(); + let mut forest: Vec<_> = iter::repeat(usize::max_value()).take(nrows).collect(); + let mut ancestor: Vec<_> = iter::repeat(usize::max_value()).take(nrows).collect(); + + for k in 0..nrows { + for irow in m.data.column_row_indices(k) { + let mut i = irow; + + while i < k { + let i_ancestor = ancestor[i]; + ancestor[i] = k; + + if i_ancestor == usize::max_value() { + forest[i] = k; + break; + } + + i = i_ancestor; + } + } + } + + forest + } + + fn reach>( + m: &CsMatrix, + j: usize, + max_j: usize, + tree: &[usize], + marks: &mut Vec, + out: &mut Vec, + ) { + marks.clear(); + marks.resize(tree.len(), false); + + // FIXME: avoid all those allocations. + let mut tmp = Vec::new(); + let mut res = Vec::new(); + + for irow in m.data.column_row_indices(j) { + let mut curr = irow; + while curr != usize::max_value() && curr <= max_j && !marks[curr] { + marks[curr] = true; + tmp.push(curr); + curr = tree[curr]; + } + + tmp.append(&mut res); + mem::swap(&mut tmp, &mut res); + } + + out.append(&mut res); + } + + fn nonzero_pattern>( + m: &CsMatrix, + ) -> (CsMatrix, CsMatrix) { + let etree = Self::elimination_tree(m); + let (nrows, ncols) = m.data.shape(); + let mut rows = Vec::with_capacity(m.len()); + let mut cols = unsafe { VectorN::new_uninitialized_generic(m.data.shape().0, U1) }; + let mut marks = Vec::new(); + + // NOTE: the following will actually compute the non-zero pattern of + // the transpose of l. + for i in 0..nrows.value() { + cols[i] = rows.len(); + Self::reach(m, i, i, &etree, &mut marks, &mut rows); + } + + let mut vals = Vec::with_capacity(rows.len()); + unsafe { + vals.set_len(rows.len()); + } + vals.shrink_to_fit(); + + let data = CsVecStorage { + shape: (nrows, ncols), + p: cols, + i: rows, + vals, + }; + + let u = CsMatrix::from_data(data); + // XXX: avoid this transpose. + let l = u.transpose(); + + (l, u) + } + + /* + * + * NOTE: All the following methods are untested and currently unused. + * + * + fn column_counts>( + m: &CsMatrix, + tree: &[usize], + ) -> Vec { + let len = m.data.shape().0.value(); + let mut counts: Vec<_> = iter::repeat(0).take(len).collect(); + let mut reach = Vec::new(); + let mut marks = Vec::new(); + + for i in 0..len { + Self::reach(m, i, i, tree, &mut marks, &mut reach); + + for j in reach.drain(..) { + counts[j] += 1; + } + } + + counts + } + + fn tree_postorder(tree: &[usize]) -> Vec { + // FIXME: avoid all those allocations? + let mut first_child: Vec<_> = iter::repeat(usize::max_value()).take(tree.len()).collect(); + let mut other_children: Vec<_> = + iter::repeat(usize::max_value()).take(tree.len()).collect(); + + // Build the children list from the parent list. + // The set of children of the node `i` is given by the linked list + // starting at `first_child[i]`. The nodes of this list are then: + // { first_child[i], other_children[first_child[i]], other_children[other_children[first_child[i]], ... } + for (i, parent) in tree.iter().enumerate() { + if *parent != usize::max_value() { + let brother = first_child[*parent]; + first_child[*parent] = i; + other_children[i] = brother; + } + } + + let mut stack = Vec::with_capacity(tree.len()); + let mut postorder = Vec::with_capacity(tree.len()); + + for (i, node) in tree.iter().enumerate() { + if *node == usize::max_value() { + Self::dfs( + i, + &mut first_child, + &other_children, + &mut stack, + &mut postorder, + ) + } + } + + postorder + } + + fn dfs( + i: usize, + first_child: &mut [usize], + other_children: &[usize], + stack: &mut Vec, + result: &mut Vec, + ) { + stack.clear(); + stack.push(i); + + while let Some(n) = stack.pop() { + let child = first_child[n]; + + if child == usize::max_value() { + // No children left. + result.push(n); + } else { + stack.push(n); + stack.push(child); + first_child[n] = other_children[child]; + } + } + } + */ +} diff --git a/src/sparse/mod.rs b/src/sparse/mod.rs index 320d76a0d..6ce898e59 100644 --- a/src/sparse/mod.rs +++ b/src/sparse/mod.rs @@ -1,7 +1,9 @@ -pub use self::cs_matrix::{CsMatrix, CsStorage, CsStorageMut, CsVector}; +pub use self::cs_matrix::{ + CsMatrix, CsStorage, CsStorageIter, CsStorageMut, CsVecStorage, CsVector, +}; +pub use self::cs_matrix_cholesky::CsCholesky; mod cs_matrix; -mod cs_matrix_analysis; mod cs_matrix_cholesky; mod cs_matrix_conversion; mod cs_matrix_ops; diff --git a/tests/sparse/cs_cholesky.rs b/tests/sparse/cs_cholesky.rs new file mode 100644 index 000000000..9c1997374 --- /dev/null +++ b/tests/sparse/cs_cholesky.rs @@ -0,0 +1,55 @@ +#![cfg_attr(rustfmt, rustfmt_skip)] + +use na::{CsMatrix, CsVector, CsCholesky, Cholesky, Matrix5, Vector5}; + +#[test] +fn cs_cholesky() { + let mut a = Matrix5::new( + 40.0, 0.0, 0.0, 0.0, 0.0, + 2.0, 60.0, 0.0, 0.0, 0.0, + 1.0, 0.0, 11.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 50.0, 0.0, + 1.0, 0.0, 0.0, 4.0, 10.0 + ); + a.fill_upper_triangle_with_lower_triangle(); + test_cholesky(a); + + let a = Matrix5::from_diagonal(&Vector5::new(40.0, 60.0, 11.0, 50.0, 10.0)); + test_cholesky(a); + + let mut a = Matrix5::new( + 40.0, 0.0, 0.0, 0.0, 0.0, + 2.0, 60.0, 0.0, 0.0, 0.0, + 1.0, 0.0, 11.0, 0.0, 0.0, + 1.0, 0.0, 0.0, 50.0, 0.0, + 0.0, 0.0, 0.0, 4.0, 10.0 + ); + a.fill_upper_triangle_with_lower_triangle(); + test_cholesky(a); + + let mut a = Matrix5::new( + 2.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 2.0, 0.0, 0.0, 0.0, + 1.0, 1.0, 2.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 2.0, 0.0, + 1.0, 1.0, 0.0, 0.0, 2.0 + ); + a.fill_upper_triangle_with_lower_triangle(); + test_cholesky(a); +} + + +fn test_cholesky(a: Matrix5) { + let cs_a: CsMatrix<_, _, _> = a.into(); + + let chol_a = Cholesky::new(a).unwrap(); + let chol_cs_a = CsCholesky::new(&cs_a); + let l = chol_a.l(); + println!("{:?}", chol_cs_a.l()); + let cs_l: Matrix5<_> = chol_cs_a.unwrap_l().unwrap().into(); + + println!("{}", l); + println!("{}", cs_l); + + assert_eq!(l, cs_l); +} diff --git a/tests/sparse/cs_linalg.rs b/tests/sparse/cs_solve.rs similarity index 100% rename from tests/sparse/cs_linalg.rs rename to tests/sparse/cs_solve.rs diff --git a/tests/sparse/mod.rs b/tests/sparse/mod.rs index 6c4d6d459..0e772c99f 100644 --- a/tests/sparse/mod.rs +++ b/tests/sparse/mod.rs @@ -1,5 +1,6 @@ +mod cs_cholesky; mod cs_construction; mod cs_conversion; mod cs_matrix; mod cs_ops; -mod cs_linalg; \ No newline at end of file +mod cs_solve; From 50d0b6492452793c72cc83e243bbf7bc3c96b176 Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Tue, 30 Oct 2018 17:45:59 +0100 Subject: [PATCH 08/25] Avoid bound-checking on cholesky decomposition. --- src/sparse/cs_matrix_cholesky.rs | 85 ++++++++++++++++++-------------- 1 file changed, 48 insertions(+), 37 deletions(-) diff --git a/src/sparse/cs_matrix_cholesky.rs b/src/sparse/cs_matrix_cholesky.rs index 2826b5556..3b4185a89 100644 --- a/src/sparse/cs_matrix_cholesky.rs +++ b/src/sparse/cs_matrix_cholesky.rs @@ -21,7 +21,6 @@ where // equal to `original_i.len()` at the end. original_p: Vec, original_i: Vec, - original_len: usize, // Number of elements on the numerical value vector of the original matrix. // Decomposition result. l: CsMatrix, // Used only for the pattern. @@ -63,7 +62,6 @@ where CsCholesky { original_p, original_i: m.data.i.clone(), - original_len: m.data.i.len(), l, u, ok: false, @@ -91,7 +89,7 @@ where // Performs the numerical Cholesky decomposition given the set of numerical values. pub fn decompose(&mut self, values: &[N]) -> bool { assert!( - values.len() >= self.original_len, + values.len() >= self.original_i.len(), "The set of values is too small." ); @@ -100,51 +98,64 @@ where // Perform the decomposition. for k in 0..self.l.nrows() { - // Scatter the k-th column of the original matrix with the values provided. - let column_range = self.original_p[k]..self.original_p[k + 1]; + unsafe { + // Scatter the k-th column of the original matrix with the values provided. + let column_range = + *self.original_p.get_unchecked(k)..*self.original_p.get_unchecked(k + 1); - self.work_x[k] = N::zero(); - for p in column_range.clone() { - let irow = self.original_i[p]; + *self.work_x.vget_unchecked_mut(k) = N::zero(); + for p in column_range.clone() { + let irow = *self.original_i.get_unchecked(p); - if irow <= k { - self.work_x[irow] = values[p]; + if irow <= k { + *self.work_x.vget_unchecked_mut(irow) = *values.get_unchecked(p); + } } - } - let mut diag = self.work_x[k]; - self.work_x[k] = N::zero(); + let mut diag = *self.work_x.vget_unchecked(k); + *self.work_x.vget_unchecked_mut(k) = N::zero(); - // Triangular solve. - for irow in self.u.data.column_row_indices(k) { - if irow >= k { - continue; - } + // Triangular solve. + for irow in self.u.data.column_row_indices(k) { + if irow >= k { + continue; + } - let lki = self.work_x[irow] / self.l.data.vals[self.l.data.p[irow]]; - self.work_x[irow] = N::zero(); + let lki = *self.work_x.vget_unchecked(irow) + / *self + .l + .data + .vals + .get_unchecked(*self.l.data.p.vget_unchecked(irow)); + *self.work_x.vget_unchecked_mut(irow) = N::zero(); + + for p in + *self.l.data.p.vget_unchecked(irow) + 1..*self.work_c.vget_unchecked(irow) + { + *self + .work_x + .vget_unchecked_mut(*self.l.data.i.get_unchecked(p)) -= + *self.l.data.vals.get_unchecked(p) * lki; + } - for p in self.l.data.p[irow] + 1..self.work_c[irow] { - self.work_x[self.l.data.i[p]] -= self.l.data.vals[p] * lki; + diag -= lki * lki; + let p = *self.work_c.vget_unchecked(irow); + *self.work_c.vget_unchecked_mut(irow) += 1; + *self.l.data.i.get_unchecked_mut(p) = k; + *self.l.data.vals.get_unchecked_mut(p) = lki; } - diag -= lki * lki; - let p = self.work_c[irow]; - self.work_c[irow] += 1; - self.l.data.i[p] = k; - self.l.data.vals[p] = lki; - } + if diag <= N::zero() { + self.ok = false; + return false; + } - if diag <= N::zero() { - self.ok = false; - return false; + // Deal with the diagonal element. + let p = *self.work_c.vget_unchecked(k); + *self.work_c.vget_unchecked_mut(k) += 1; + *self.l.data.i.get_unchecked_mut(p) = k; + *self.l.data.vals.get_unchecked_mut(p) = diag.sqrt(); } - - // Deal with the diagonal element. - let p = self.work_c[k]; - self.work_c[k] += 1; - self.l.data.i[p] = k; - self.l.data.vals[p] = diag.sqrt(); } self.ok = true; From c3e8112d5ea13f1e135272e65e38a0c1570e0ddd Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Sun, 4 Nov 2018 07:10:43 +0100 Subject: [PATCH 09/25] Add implementation of the left-looking cholesky decomposition. --- src/sparse/cs_matrix.rs | 39 +++++++++++++---- src/sparse/cs_matrix_cholesky.rs | 72 +++++++++++++++++++++++++++++++- src/sparse/mod.rs | 2 +- tests/sparse/cs_cholesky.rs | 2 +- 4 files changed, 102 insertions(+), 13 deletions(-) diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index ec4cc5b0f..6403f5721 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -20,6 +20,12 @@ pub trait CsStorageIter<'a, N, R, C = U1> { fn column_entries(&'a self, j: usize) -> Self::ColumnEntries; } +pub trait CsStorageIterMut<'a, N: 'a, R, C = U1> { + type ColumnEntriesMut: Iterator; + + fn column_entries_mut(&'a mut self, j: usize) -> Self::ColumnEntriesMut; +} + pub trait CsStorage: for<'a> CsStorageIter<'a, N, R, C> { fn shape(&self) -> (R, C); unsafe fn row_index_unchecked(&self, i: usize) -> usize; @@ -30,15 +36,9 @@ pub trait CsStorage: for<'a> CsStorageIter<'a, N, R, C> { fn len(&self) -> usize; } -pub trait CsStorageMut: CsStorage { - /* - /// Sets the length of this column without initializing its values and row indices. - /// - /// If the given length is larger than the current one, uninitialized entries are - /// added at the end of the column `i`. This will effectively shift all the matrix entries - /// of the columns at indices `j` with `j > i`. - fn set_column_len(&mut self, i: usize, len: usize); - */ +pub trait CsStorageMut: + CsStorage + for<'a> CsStorageIterMut<'a, N, R, C> +{ } #[derive(Clone, Debug)] @@ -133,6 +133,27 @@ where } } +impl<'a, N: Scalar, R: Dim, C: Dim> CsStorageIterMut<'a, N, R, C> for CsVecStorage +where + DefaultAllocator: Allocator, +{ + type ColumnEntriesMut = iter::Zip>, slice::IterMut<'a, N>>; + + #[inline] + fn column_entries_mut(&'a mut self, j: usize) -> Self::ColumnEntriesMut { + let rng = self.column_range(j); + self.i[rng.clone()] + .iter() + .cloned() + .zip(self.vals[rng].iter_mut()) + } +} + +impl CsStorageMut for CsVecStorage where + DefaultAllocator: Allocator +{ +} + /* pub struct CsSliceStorage<'a, N: Scalar, R: Dim, C: DimAdd> { shape: (R, C), diff --git a/src/sparse/cs_matrix_cholesky.rs b/src/sparse/cs_matrix_cholesky.rs index 3b4185a89..01a6b0811 100644 --- a/src/sparse/cs_matrix_cholesky.rs +++ b/src/sparse/cs_matrix_cholesky.rs @@ -8,7 +8,7 @@ use std::slice; use allocator::Allocator; use constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint}; -use sparse::{CsMatrix, CsStorage, CsStorageIter, CsVecStorage, CsVector}; +use sparse::{CsMatrix, CsStorage, CsStorageIter, CsStorageIterMut, CsVecStorage, CsVector}; use storage::{Storage, StorageMut}; use {DefaultAllocator, Dim, Matrix, MatrixMN, Real, Scalar, Vector, VectorN, U1}; @@ -39,7 +39,7 @@ where /// Computes the cholesky decomposition of the sparse matrix `m`. pub fn new(m: &CsMatrix) -> Self { let mut me = Self::new_symbolic(m); - let _ = me.decompose(&m.data.vals); + let _ = me.decompose_left_looking(&m.data.vals); me } /// Perform symbolic analysis for the given matrix. @@ -86,6 +86,74 @@ where } } + pub fn decompose_left_looking(&mut self, values: &[N]) -> bool { + assert!( + values.len() >= self.original_i.len(), + "The set of values is too small." + ); + + let n = self.l.nrows(); + + // Reset `work_c` to the column pointers of `l`. + self.work_c.copy_from(&self.l.data.p); + + unsafe { + for k in 0..n { + // Scatter the k-th column of the original matrix with the values provided. + let range_k = + *self.original_p.get_unchecked(k)..*self.original_p.get_unchecked(k + 1); + + *self.work_x.vget_unchecked_mut(k) = N::zero(); + for p in range_k.clone() { + let irow = *self.original_i.get_unchecked(p); + + if irow >= k { + *self.work_x.vget_unchecked_mut(irow) = *values.get_unchecked(p); + } + } + + for j in self.u.data.column_row_indices(k) { + let factor = -*self + .l + .data + .vals + .get_unchecked(*self.work_c.vget_unchecked(j)); + *self.work_c.vget_unchecked_mut(j) += 1; + + if j < k { + for (z, val) in self.l.data.column_entries(j) { + if z >= k { + *self.work_x.vget_unchecked_mut(z) += val * factor; + } + } + } + } + + let diag = *self.work_x.vget_unchecked(k); + + if diag > N::zero() { + let denom = diag.sqrt(); + *self + .l + .data + .vals + .get_unchecked_mut(*self.l.data.p.vget_unchecked(k)) = denom; + + for (p, val) in self.l.data.column_entries_mut(k) { + *val = *self.work_x.vget_unchecked(p) / denom; + *self.work_x.vget_unchecked_mut(p) = N::zero(); + } + } else { + self.ok = false; + return false; + } + } + } + + self.ok = true; + true + } + // Performs the numerical Cholesky decomposition given the set of numerical values. pub fn decompose(&mut self, values: &[N]) -> bool { assert!( diff --git a/src/sparse/mod.rs b/src/sparse/mod.rs index 6ce898e59..411e133b0 100644 --- a/src/sparse/mod.rs +++ b/src/sparse/mod.rs @@ -1,5 +1,5 @@ pub use self::cs_matrix::{ - CsMatrix, CsStorage, CsStorageIter, CsStorageMut, CsVecStorage, CsVector, + CsMatrix, CsStorage, CsStorageIter, CsStorageIterMut, CsStorageMut, CsVecStorage, CsVector, }; pub use self::cs_matrix_cholesky::CsCholesky; diff --git a/tests/sparse/cs_cholesky.rs b/tests/sparse/cs_cholesky.rs index 9c1997374..aebefacb4 100644 --- a/tests/sparse/cs_cholesky.rs +++ b/tests/sparse/cs_cholesky.rs @@ -51,5 +51,5 @@ fn test_cholesky(a: Matrix5) { println!("{}", l); println!("{}", cs_l); - assert_eq!(l, cs_l); + assert_relative_eq!(l, cs_l); } From 748cfeea665865b40518da9d142b581b5afbf1a0 Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Mon, 5 Nov 2018 16:38:43 +0100 Subject: [PATCH 10/25] Ensure the output of multiplication and triangular solve are sorted. --- src/sparse/cs_matrix.rs | 64 ++++++++++++++++++++++++++++++++ src/sparse/cs_matrix_cholesky.rs | 2 +- src/sparse/cs_matrix_ops.rs | 6 ++- src/sparse/cs_matrix_solve.rs | 52 ++++++++++++++++++++++++-- tests/sparse/cs_cholesky.rs | 36 ++++++++++++++---- tests/sparse/cs_conversion.rs | 3 +- tests/sparse/cs_matrix.rs | 8 +++- tests/sparse/cs_ops.rs | 11 +++++- tests/sparse/cs_solve.rs | 18 ++++----- 9 files changed, 173 insertions(+), 27 deletions(-) diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index 6403f5721..707ea93da 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -246,6 +246,32 @@ impl> CsMatrix { nrows.value() == ncols.value() } + /// Should always return `true`. + /// + /// This method is generally used for debugging and should typically not be called in user code. + /// This checks that the row inner indices of this matrix are sorted. It takes `O(n)` time, + /// where n` is `self.len()`. + /// All operations of CSC matrices on nalgebra assume, and will return, sorted indices. + /// If at any time this `is_sorted` method returns `false`, then, something went wrong + /// and an issue should be open on the nalgebra repository with details on how to reproduce + /// this. + pub fn is_sorted(&self) -> bool { + for j in 0..self.ncols() { + let mut curr = None; + for idx in self.data.column_row_indices(j) { + if let Some(curr) = curr { + if idx <= curr { + return false; + } + } + + curr = Some(idx); + } + } + + true + } + pub fn transpose(&self) -> CsMatrix where DefaultAllocator: Allocator, @@ -278,3 +304,41 @@ impl> CsMatrix { res } } + +impl CsMatrix +where + DefaultAllocator: Allocator, +{ + pub(crate) fn sort(&mut self) + where + DefaultAllocator: Allocator, + { + // Size = R + let nrows = self.data.shape().0; + let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows, U1) }; + self.sort_with_workspace(workspace.as_mut_slice()); + } + + pub(crate) fn sort_with_workspace(&mut self, workspace: &mut [N]) { + assert!( + workspace.len() >= self.nrows(), + "Workspace must be able to hold at least self.nrows() elements." + ); + + for j in 0..self.ncols() { + // Scatter the row in the workspace. + for (irow, val) in self.data.column_entries(j) { + workspace[irow] = val; + } + + // Sort the index vector. + let range = self.data.column_range(j); + self.data.i[range.clone()].sort(); + + // Permute the values too. + for (i, irow) in range.clone().zip(self.data.i[range].iter().cloned()) { + self.data.vals[i] = workspace[irow]; + } + } + } +} diff --git a/src/sparse/cs_matrix_cholesky.rs b/src/sparse/cs_matrix_cholesky.rs index 01a6b0811..e35d7e930 100644 --- a/src/sparse/cs_matrix_cholesky.rs +++ b/src/sparse/cs_matrix_cholesky.rs @@ -155,7 +155,7 @@ where } // Performs the numerical Cholesky decomposition given the set of numerical values. - pub fn decompose(&mut self, values: &[N]) -> bool { + pub fn decompose_up_looking(&mut self, values: &[N]) -> bool { assert!( values.len() >= self.original_i.len(), "The set of values is too small." diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index b9d0a3757..07119c198 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -172,7 +172,11 @@ where ); } - for p in res.data.p[j]..nz { + // Keep the output sorted. + let range = res.data.p[j]..nz; + res.data.i[range.clone()].sort(); + + for p in range { res.data.vals[p] = workspace[res.data.i[p]] } } diff --git a/src/sparse/cs_matrix_solve.rs b/src/sparse/cs_matrix_solve.rs index fa3a77c73..3d88455bf 100644 --- a/src/sparse/cs_matrix_solve.rs +++ b/src/sparse/cs_matrix_solve.rs @@ -145,7 +145,10 @@ impl> CsMatrix { ShapeConstraint: SameNumberOfRows, { let mut reach = Vec::new(); + // We don't compute a postordered reach here because it will be sorted after anyway. self.lower_triangular_reach(b, &mut reach); + // We sort the reach so the result matrix has sorted indices. + reach.sort(); let mut workspace = unsafe { VectorN::new_uninitialized_generic(b.data.shape().0, U1) }; for i in reach.iter().cloned() { @@ -156,7 +159,7 @@ impl> CsMatrix { workspace[i] = val; } - for j in reach.iter().cloned().rev() { + for j in reach.iter().cloned() { let mut column = self.data.column_entries(j); let mut diag_found = false; @@ -192,8 +195,12 @@ impl> CsMatrix { Some(result) } - fn lower_triangular_reach(&self, b: &CsVector, xi: &mut Vec) - where + // Computes the reachable, post-ordered, nodes from `b`. + fn lower_triangular_reach_postordered( + &self, + b: &CsVector, + xi: &mut Vec, + ) where S2: CsStorage, DefaultAllocator: Allocator, { @@ -232,4 +239,43 @@ impl> CsMatrix { xi.push(j) } } + + // Computes the nodes reachable from `b` in an arbitrary order. + fn lower_triangular_reach(&self, b: &CsVector, xi: &mut Vec) + where + S2: CsStorage, + DefaultAllocator: Allocator, + { + let mut visited = VectorN::repeat_generic(self.data.shape().1, U1, false); + let mut stack = Vec::new(); + + for irow in b.data.column_row_indices(0) { + self.lower_triangular_bfs(irow, visited.as_mut_slice(), &mut stack, xi); + } + } + + fn lower_triangular_bfs( + &self, + start: usize, + visited: &mut [bool], + stack: &mut Vec, + xi: &mut Vec, + ) { + if !visited[start] { + stack.clear(); + stack.push(start); + xi.push(start); + visited[start] = true; + + while let Some(j) = stack.pop() { + for irow in self.data.column_row_indices(j) { + if irow > j && !visited[irow] { + stack.push(irow); + xi.push(irow); + visited[irow] = true; + } + } + } + } + } } diff --git a/tests/sparse/cs_cholesky.rs b/tests/sparse/cs_cholesky.rs index aebefacb4..72a9a08fa 100644 --- a/tests/sparse/cs_cholesky.rs +++ b/tests/sparse/cs_cholesky.rs @@ -35,21 +35,41 @@ fn cs_cholesky() { 1.0, 1.0, 0.0, 0.0, 2.0 ); a.fill_upper_triangle_with_lower_triangle(); + // Test ::new, left_looking, and up_looking implementations. test_cholesky(a); } - fn test_cholesky(a: Matrix5) { + // Test ::new + test_cholesky_variant(a, 0); + // Test up-looking + test_cholesky_variant(a, 1); + // Test left-looking + test_cholesky_variant(a, 2); +} + +fn test_cholesky_variant(a: Matrix5, option: usize) { let cs_a: CsMatrix<_, _, _> = a.into(); let chol_a = Cholesky::new(a).unwrap(); - let chol_cs_a = CsCholesky::new(&cs_a); - let l = chol_a.l(); - println!("{:?}", chol_cs_a.l()); - let cs_l: Matrix5<_> = chol_cs_a.unwrap_l().unwrap().into(); + let mut chol_cs_a; + + match option { + 0 => chol_cs_a = CsCholesky::new(&cs_a), + 1 => { + chol_cs_a = CsCholesky::new_symbolic(&cs_a); + chol_cs_a.decompose_up_looking(cs_a.data.values()); + } + _ => { + chol_cs_a = CsCholesky::new_symbolic(&cs_a); + chol_cs_a.decompose_left_looking(cs_a.data.values()); + } + }; - println!("{}", l); - println!("{}", cs_l); + let l = chol_a.l(); + let cs_l = chol_cs_a.unwrap_l().unwrap(); + assert!(cs_l.is_sorted()); - assert_relative_eq!(l, cs_l); + let cs_l_mat: Matrix5<_> = cs_l.into(); + assert_relative_eq!(l, cs_l_mat); } diff --git a/tests/sparse/cs_conversion.rs b/tests/sparse/cs_conversion.rs index ac0cc0f95..8a3376366 100644 --- a/tests/sparse/cs_conversion.rs +++ b/tests/sparse/cs_conversion.rs @@ -12,7 +12,8 @@ fn cs_from_to_matrix() { ); let cs: CsMatrix<_, _, _> = m.into(); - let m2: Matrix4x5<_> = cs.into(); + assert!(cs.is_sorted()); + let m2: Matrix4x5<_> = cs.into(); assert_eq!(m2, m); } diff --git a/tests/sparse/cs_matrix.rs b/tests/sparse/cs_matrix.rs index 0115f56aa..b97260d4b 100644 --- a/tests/sparse/cs_matrix.rs +++ b/tests/sparse/cs_matrix.rs @@ -12,7 +12,11 @@ fn cs_transpose() { ); let cs: CsMatrix<_, _, _> = m.into(); - let cs_transposed: Matrix5x4<_> = cs.transpose().into(); + assert!(cs.is_sorted()); - assert_eq!(cs_transposed, m.transpose()) + let cs_transposed = cs.transpose(); + assert!(cs_transposed.is_sorted()); + + let cs_transposed_mat: Matrix5x4<_> = cs_transposed.into(); + assert_eq!(cs_transposed_mat, m.transpose()) } diff --git a/tests/sparse/cs_ops.rs b/tests/sparse/cs_ops.rs index 6cee0050a..fa98fdb3d 100644 --- a/tests/sparse/cs_ops.rs +++ b/tests/sparse/cs_ops.rs @@ -12,6 +12,7 @@ fn axpy_cs() { let cs: CsVector<_, _> = v2.into(); v1.axpy_cs(5.0, &cs, 10.0); + assert!(cs.is_sorted()); assert_eq!(v1, expected) } @@ -36,6 +37,9 @@ fn cs_mat_mul() { let mul = &sm1 * &sm2; + assert!(sm1.is_sorted()); + assert!(sm2.is_sorted()); + assert!(mul.is_sorted()); assert_eq!(Matrix3x5::from(mul), m1 * m2); } @@ -59,7 +63,10 @@ fn cs_mat_add() { let sm1: CsMatrix<_, _, _> = m1.into(); let sm2: CsMatrix<_, _, _> = m2.into(); - let mul = &sm1 + &sm2; + let sum = &sm1 + &sm2; - assert_eq!(Matrix4x5::from(mul), m1 + m2); + assert!(sm1.is_sorted()); + assert!(sm2.is_sorted()); + assert!(sum.is_sorted()); + assert_eq!(Matrix4x5::from(sum), m1 + m2); } diff --git a/tests/sparse/cs_solve.rs b/tests/sparse/cs_solve.rs index d65b633c9..b3415d797 100644 --- a/tests/sparse/cs_solve.rs +++ b/tests/sparse/cs_solve.rs @@ -79,15 +79,15 @@ fn cs_lower_triangular_solve_cs() { let cs_b8: CsVector<_, _> = Vector5::w().into(); let cs_b9: CsVector<_, _> = Vector5::a().into(); - assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b1).map(|v| v.into()), a.solve_lower_triangular(&b1)); - assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b5).map(|v| v.into()), a.solve_lower_triangular(&b5)); - assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b6).map(|v| v.into()), a.solve_lower_triangular(&b6)); - assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b7).map(|v| v.into()), a.solve_lower_triangular(&b7)); - assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b8).map(|v| v.into()), a.solve_lower_triangular(&b8)); - assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b9).map(|v| v.into()), a.solve_lower_triangular(&b9)); - assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b2).map(|v| v.into()), a.solve_lower_triangular(&b2)); - assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b3).map(|v| v.into()), a.solve_lower_triangular(&b3)); - assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b4).map(|v| v.into()), a.solve_lower_triangular(&b4)); + assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b1).map(|v| { assert!(v.is_sorted()); v.into() }), a.solve_lower_triangular(&b1)); + assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b5).map(|v| { assert!(v.is_sorted()); v.into() }), a.solve_lower_triangular(&b5)); + assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b6).map(|v| { assert!(v.is_sorted()); v.into() }), a.solve_lower_triangular(&b6)); + assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b7).map(|v| { assert!(v.is_sorted()); v.into() }), a.solve_lower_triangular(&b7)); + assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b8).map(|v| { assert!(v.is_sorted()); v.into() }), a.solve_lower_triangular(&b8)); + assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b9).map(|v| { assert!(v.is_sorted()); v.into() }), a.solve_lower_triangular(&b9)); + assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b2).map(|v| { assert!(v.is_sorted()); v.into() }), a.solve_lower_triangular(&b2)); + assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b3).map(|v| { assert!(v.is_sorted()); v.into() }), a.solve_lower_triangular(&b3)); + assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b4).map(|v| { assert!(v.is_sorted()); v.into() }), a.solve_lower_triangular(&b4)); // Singular case. From 538e18b3e98a88020fe2c071632c4f3479e120f2 Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Mon, 5 Nov 2018 16:44:59 +0100 Subject: [PATCH 11/25] Ensure the output of addition is sorted. --- src/sparse/cs_matrix_ops.rs | 6 +++++- tests/sparse/cs_ops.rs | 4 ++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index 07119c198..34a68f9b3 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -241,7 +241,11 @@ where &mut res, ); - for p in res.data.p[j]..nz { + // Keep the output sorted. + let range = res.data.p[j]..nz; + res.data.i[range.clone()].sort(); + + for p in range { res.data.vals[p] = workspace[res.data.i[p]] } } diff --git a/tests/sparse/cs_ops.rs b/tests/sparse/cs_ops.rs index fa98fdb3d..49dfc2bc4 100644 --- a/tests/sparse/cs_ops.rs +++ b/tests/sparse/cs_ops.rs @@ -47,8 +47,8 @@ fn cs_mat_mul() { #[test] fn cs_mat_add() { let m1 = Matrix4x5::new( - 4.0, 1.0, 4.0, 0.0, 9.0, - 5.0, 6.0, 0.0, 8.0, 10.0, + 4.0, 1.0, 4.0, 0.0, 0.0, + 5.0, 6.0, 0.0, 8.0, 0.0, 9.0, 10.0, 11.0, 12.0, 0.0, 0.0, 0.0, 1.0, 0.0, 10.0 ); From 383a18f08362d02ebd8a89bbf18cede29c7418c0 Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Tue, 6 Nov 2018 18:27:43 +0100 Subject: [PATCH 12/25] Improve CsMatrix multiplaction performances. --- src/sparse/cs_matrix_ops.rs | 50 +++++++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 19 deletions(-) diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index 34a68f9b3..bf1dccdbf 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -7,7 +7,7 @@ use std::slice; use allocator::Allocator; use constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint}; -use sparse::{CsMatrix, CsStorage, CsVector}; +use sparse::{CsMatrix, CsStorage, CsStorageMut, CsVector}; use storage::{Storage, StorageMut}; use {DefaultAllocator, Dim, Matrix, MatrixMN, Real, Scalar, Vector, VectorN, U1}; @@ -150,8 +150,7 @@ where ); let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); - let mut timestamps = VectorN::zeros_generic(nrows1, U1); - let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows1, U1) }; + let mut workspace = VectorN::::zeros_generic(nrows1, U1); let mut nz = 0; for j in 0..ncols2.value() { @@ -160,24 +159,19 @@ where res.data.i.resize(new_size_bound, 0); res.data.vals.resize(new_size_bound, N::zero()); - for (i, val) in rhs.data.column_entries(j) { - nz = self.scatter( - i, - val, - timestamps.as_mut_slice(), - j + 1, - workspace.as_mut_slice(), - nz, - &mut res, - ); + for (i, beta) in rhs.data.column_entries(j) { + for (k, val) in self.data.column_entries(i) { + workspace[k] += val * beta; + } } - // Keep the output sorted. - let range = res.data.p[j]..nz; - res.data.i[range.clone()].sort(); - - for p in range { - res.data.vals[p] = workspace[res.data.i[p]] + for (i, val) in workspace.as_mut_slice().iter_mut().enumerate() { + if !val.is_zero() { + res.data.i[nz] = i; + res.data.vals[nz] = *val; + *val = N::zero(); + nz += 1; + } } } @@ -257,3 +251,21 @@ where res } } + +impl<'a, 'b, N, R, C, S> Mul for CsMatrix +where + N: Scalar + ClosedAdd + ClosedMul + Zero, + R: Dim, + C: Dim, + S: CsStorageMut, +{ + type Output = Self; + + fn mul(mut self, rhs: N) -> Self { + for e in self.values_mut() { + *e *= rhs + } + + self + } +} From ed07b78b972466e6a32ceb62557286062a6f69f3 Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Tue, 6 Nov 2018 18:31:04 +0100 Subject: [PATCH 13/25] Add matrixmarket parser. --- Cargo.toml | 3 + src/io/matrix_market.pest | 16 +++ src/io/matrix_market.rs | 51 ++++++++ src/io/mod.rs | 3 + src/sparse/cs_matrix.rs | 192 +++++++++++++++++++++++++---- src/sparse/cs_matrix_conversion.rs | 60 ++++++++- src/sparse/cs_utils.rs | 18 +++ src/sparse/mod.rs | 1 + tests/sparse/cs_conversion.rs | 76 +++++++++++- tests/sparse/cs_matrix_market.rs | 55 +++++++++ tests/sparse/mod.rs | 2 + 11 files changed, 449 insertions(+), 28 deletions(-) create mode 100644 src/io/matrix_market.pest create mode 100644 src/io/matrix_market.rs create mode 100644 src/io/mod.rs create mode 100644 src/sparse/cs_utils.rs create mode 100644 tests/sparse/cs_matrix_market.rs diff --git a/Cargo.toml b/Cargo.toml index 88857c552..ed76cbb8e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,6 +26,7 @@ abomonation-serialize = [ "abomonation" ] sparse = [ ] debug = [ ] alloc = [ ] +io = [ "pest", "pest_derive" ] [dependencies] typenum = "1.10" @@ -41,6 +42,8 @@ serde_derive = { version = "1.0", optional = true } abomonation = { version = "0.5", optional = true } mint = { version = "0.5", optional = true } quickcheck = { version = "0.6", optional = true } +pest = { version = "2.0", optional = true } +pest_derive = { version = "2.0", optional = true } [dev-dependencies] serde_json = "1.0" diff --git a/src/io/matrix_market.pest b/src/io/matrix_market.pest new file mode 100644 index 000000000..eafe5b829 --- /dev/null +++ b/src/io/matrix_market.pest @@ -0,0 +1,16 @@ +WHITESPACE = _{ " " } + +Comments = _{ "%" ~ (!NEWLINE ~ ANY)* } +Header = { "%%" ~ (!NEWLINE ~ ANY)* } +Shape = { Dimension ~ Dimension ~ Dimension } +Document = { + SOI ~ + NEWLINE ~ + Header ~ + (NEWLINE ~ Comments)* ~ + (NEWLINE ~ Shape) ~ + (NEWLINE ~ Entry?)* +} +Dimension = @{ ASCII_DIGIT+ } +Value = @{ ("+" | "-")? ~ NUMBER+ ~ ("." ~ NUMBER+)? ~ ("e" ~ ("+" | "-")? ~ NUMBER+)? } +Entry = { Dimension ~ Dimension ~ Value } \ No newline at end of file diff --git a/src/io/matrix_market.rs b/src/io/matrix_market.rs new file mode 100644 index 000000000..12fb6c559 --- /dev/null +++ b/src/io/matrix_market.rs @@ -0,0 +1,51 @@ +use std::fs; +use std::path::Path; + +use pest::Parser; +use sparse::CsMatrix; +use Real; + +#[derive(Parser)] +#[grammar = "io/matrix_market.pest"] +struct MatrixMarketParser; + +// FIXME: return an Error instead of an Option. +pub fn cs_matrix_from_matrix_market>(path: P) -> Option> { + let file = fs::read_to_string(path).ok()?; + cs_matrix_from_matrix_market_str(&file) +} + +// FIXME: return an Error instead of an Option. +pub fn cs_matrix_from_matrix_market_str(data: &str) -> Option> { + let file = MatrixMarketParser::parse(Rule::Document, data) + .unwrap() + .next()?; + let mut shape = (0, 0, 0); + let mut rows: Vec = Vec::new(); + let mut cols: Vec = Vec::new(); + let mut data: Vec = Vec::new(); + + for line in file.into_inner() { + match line.as_rule() { + Rule::Header => {} + Rule::Shape => { + let mut inner = line.into_inner(); + shape.0 = inner.next()?.as_str().parse::().ok()?; + shape.1 = inner.next()?.as_str().parse::().ok()?; + shape.2 = inner.next()?.as_str().parse::().ok()?; + } + Rule::Entry => { + let mut inner = line.into_inner(); + // NOTE: indices are 1-based. + rows.push(inner.next()?.as_str().parse::().ok()? - 1); + cols.push(inner.next()?.as_str().parse::().ok()? - 1); + data.push(::convert(inner.next()?.as_str().parse::().ok()?)); + } + _ => return None, // FIXME: return an Err instead. + } + } + + Some(CsMatrix::from_triplet( + shape.0, shape.1, &rows, &cols, &data, + )) +} diff --git a/src/io/mod.rs b/src/io/mod.rs new file mode 100644 index 000000000..fd7dc5367 --- /dev/null +++ b/src/io/mod.rs @@ -0,0 +1,3 @@ +pub use self::matrix_market::*; + +mod matrix_market; diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index 707ea93da..9bb03cdac 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -7,8 +7,43 @@ use std::slice; use allocator::Allocator; use constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint}; +use sparse::cs_utils; use storage::{Storage, StorageMut}; -use {DefaultAllocator, Dim, Matrix, MatrixMN, Real, Scalar, Vector, VectorN, U1}; +use { + DVector, DefaultAllocator, Dim, Dynamic, Matrix, MatrixMN, MatrixVec, Real, Scalar, Vector, + VectorN, U1, +}; + +pub struct ColumnEntries<'a, N> { + curr: usize, + i: &'a [usize], + v: &'a [N], +} + +impl<'a, N> ColumnEntries<'a, N> { + #[inline] + pub fn new(i: &'a [usize], v: &'a [N]) -> Self { + assert_eq!(i.len(), v.len()); + ColumnEntries { curr: 0, i, v } + } +} + +impl<'a, N: Copy> Iterator for ColumnEntries<'a, N> { + type Item = (usize, N); + + #[inline] + fn next(&mut self) -> Option<(usize, N)> { + if self.curr >= self.i.len() { + None + } else { + let res = Some((unsafe { *self.i.get_unchecked(self.curr) }, unsafe { + *self.v.get_unchecked(self.curr) + })); + self.curr += 1; + res + } + } +} // FIXME: this structure exists for now only because impl trait // cannot be used for trait method return types. @@ -17,12 +52,15 @@ pub trait CsStorageIter<'a, N, R, C = U1> { type ColumnRowIndices: Iterator; fn column_row_indices(&'a self, j: usize) -> Self::ColumnRowIndices; + #[inline(always)] fn column_entries(&'a self, j: usize) -> Self::ColumnEntries; } pub trait CsStorageIterMut<'a, N: 'a, R, C = U1> { + type ValuesMut: Iterator; type ColumnEntriesMut: Iterator; + fn values_mut(&'a mut self) -> Self::ValuesMut; fn column_entries_mut(&'a mut self, j: usize) -> Self::ColumnEntriesMut; } @@ -41,7 +79,7 @@ pub trait CsStorageMut: { } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq)] pub struct CsVecStorage where DefaultAllocator: Allocator, @@ -59,6 +97,12 @@ where pub fn values(&self) -> &[N] { &self.vals } + pub fn p(&self) -> &[usize] { + self.p.as_slice() + } + pub fn i(&self) -> &[usize] { + &self.i + } } impl CsVecStorage where DefaultAllocator: Allocator {} @@ -67,17 +111,13 @@ impl<'a, N: Scalar, R: Dim, C: Dim> CsStorageIter<'a, N, R, C> for CsVecStorage< where DefaultAllocator: Allocator, { - type ColumnEntries = - iter::Zip>, iter::Cloned>>; + type ColumnEntries = ColumnEntries<'a, N>; type ColumnRowIndices = iter::Cloned>; #[inline] fn column_entries(&'a self, j: usize) -> Self::ColumnEntries { let rng = self.column_range(j); - self.i[rng.clone()] - .iter() - .cloned() - .zip(self.vals[rng].iter().cloned()) + ColumnEntries::new(&self.i[rng.clone()], &self.vals[rng]) } #[inline] @@ -137,8 +177,14 @@ impl<'a, N: Scalar, R: Dim, C: Dim> CsStorageIterMut<'a, N, R, C> for CsVecStora where DefaultAllocator: Allocator, { + type ValuesMut = slice::IterMut<'a, N>; type ColumnEntriesMut = iter::Zip>, slice::IterMut<'a, N>>; + #[inline] + fn values_mut(&'a mut self) -> Self::ValuesMut { + self.vals.iter_mut() + } + #[inline] fn column_entries_mut(&'a mut self, j: usize) -> Self::ColumnEntriesMut { let rng = self.column_range(j); @@ -163,13 +209,18 @@ pub struct CsSliceStorage<'a, N: Scalar, R: Dim, C: DimAdd> { }*/ /// A compressed sparse column matrix. -#[derive(Clone, Debug)] -pub struct CsMatrix = CsVecStorage> { +#[derive(Clone, Debug, PartialEq)] +pub struct CsMatrix< + N: Scalar, + R: Dim = Dynamic, + C: Dim = Dynamic, + S: CsStorage = CsVecStorage, +> { pub data: S, _phantoms: PhantomData<(N, R, C)>, } -pub type CsVector> = CsMatrix; +pub type CsVector> = CsMatrix; impl CsMatrix where @@ -198,22 +249,66 @@ where _phantoms: PhantomData, } } -} -fn cumsum(a: &mut VectorN, b: &mut VectorN) -> usize -where - DefaultAllocator: Allocator, -{ - assert!(a.len() == b.len()); - let mut sum = 0; + pub fn from_parts_generic( + nrows: R, + ncols: C, + p: VectorN, + i: Vec, + vals: Vec, + ) -> Self + where + N: Zero + ClosedAdd, + DefaultAllocator: Allocator, + { + assert_eq!(ncols.value(), p.len(), "Invalid inptr size."); + assert_eq!(i.len(), vals.len(), "Invalid value size."); + + // Check p. + for ptr in &p { + assert!(*ptr < i.len(), "Invalid inptr value."); + } - for i in 0..a.len() { - b[i] = sum; - sum += a[i]; - a[i] = b[i]; + for ptr in p.as_slice().windows(2) { + assert!(ptr[0] <= ptr[1], "Invalid inptr ordering."); + } + + // Check i. + for i in &i { + assert!(*i < nrows.value(), "Invalid row ptr value.") + } + + let mut res = CsMatrix { + data: CsVecStorage { + shape: (nrows, ncols), + p, + i, + vals, + }, + _phantoms: PhantomData, + }; + + // Sort and remove duplicates. + res.sort(); + res.dedup(); + + res } +} - sum +impl CsMatrix { + pub fn from_parts( + nrows: usize, + ncols: usize, + p: Vec, + i: Vec, + vals: Vec, + ) -> Self { + let nrows = Dynamic::new(nrows); + let ncols = Dynamic::new(ncols); + let p = DVector::from_data(MatrixVec::new(ncols, U1, p)); + Self::from_parts_generic(nrows, ncols, p, i, vals) + } } impl> CsMatrix { @@ -288,7 +383,7 @@ impl> CsMatrix { workspace[row_id] += 1; } - let _ = cumsum(&mut workspace, &mut res.data.p); + let _ = cs_utils::cumsum(&mut workspace, &mut res.data.p); // Fill the result. for j in 0..ncols.value() { @@ -305,6 +400,13 @@ impl> CsMatrix { } } +impl> CsMatrix { + #[inline] + pub fn values_mut(&mut self) -> impl Iterator { + self.data.values_mut() + } +} + impl CsMatrix where DefaultAllocator: Allocator, @@ -341,4 +443,46 @@ where } } } + + // Remove dupliate entries on a sorted CsMatrix. + pub(crate) fn dedup(&mut self) + where + N: Zero + ClosedAdd, + { + let mut curr_i = 0; + + for j in 0..self.ncols() { + let range = self.data.column_range(j); + self.data.p[j] = curr_i; + + if range.start != range.end { + let mut value = N::zero(); + let mut irow = self.data.i[range.start]; + + for idx in range { + let curr_irow = self.data.i[idx]; + + if curr_irow == irow { + value += self.data.vals[idx]; + } else { + self.data.i[curr_i] = irow; + self.data.vals[curr_i] = value; + value = self.data.vals[idx]; + irow = curr_irow; + curr_i += 1; + } + } + + // Handle the last entry. + self.data.i[curr_i] = irow; + self.data.vals[curr_i] = value; + curr_i += 1; + } + } + + self.data.i.truncate(curr_i); + self.data.i.shrink_to_fit(); + self.data.vals.truncate(curr_i); + self.data.vals.shrink_to_fit(); + } } diff --git a/src/sparse/cs_matrix_conversion.rs b/src/sparse/cs_matrix_conversion.rs index 90f5cde01..b764bf10e 100644 --- a/src/sparse/cs_matrix_conversion.rs +++ b/src/sparse/cs_matrix_conversion.rs @@ -7,9 +7,67 @@ use std::slice; use allocator::Allocator; use constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint}; +use sparse::cs_utils; use sparse::{CsMatrix, CsStorage, CsVector}; use storage::{Storage, StorageMut}; -use {DefaultAllocator, Dim, Matrix, MatrixMN, Real, Scalar, Vector, VectorN, U1}; +use {DefaultAllocator, Dim, Dynamic, Matrix, MatrixMN, Real, Scalar, Vector, VectorN, U1}; + +impl<'a, N: Scalar + Zero + ClosedAdd> CsMatrix { + // FIXME: implement for dimensions other than Dynamic too. + pub fn from_triplet( + nrows: usize, + ncols: usize, + irows: &[usize], + icols: &[usize], + vals: &[N], + ) -> Self { + Self::from_triplet_generic(Dynamic::new(nrows), Dynamic::new(ncols), irows, icols, vals) + } +} + +impl<'a, N: Scalar + Zero + ClosedAdd, R: Dim, C: Dim> CsMatrix +where + DefaultAllocator: Allocator + Allocator, +{ + pub fn from_triplet_generic( + nrows: R, + ncols: C, + irows: &[usize], + icols: &[usize], + vals: &[N], + ) -> Self { + assert!(vals.len() == irows.len()); + assert!(vals.len() == icols.len()); + + let mut res = CsMatrix::new_uninitialized_generic(nrows, ncols, vals.len()); + let mut workspace = res.data.p.clone(); + + // Column count. + for j in icols.iter().cloned() { + workspace[j] += 1; + } + + let _ = cs_utils::cumsum(&mut workspace, &mut res.data.p); + + // Fill i and vals. + for ((i, j), val) in irows + .iter() + .cloned() + .zip(icols.iter().cloned()) + .zip(vals.iter().cloned()) + { + let offset = workspace[j]; + res.data.i[offset] = i; + res.data.vals[offset] = val; + workspace[j] = offset + 1; + } + + // Sort the result. + res.sort(); + res.dedup(); + res + } +} impl<'a, N: Scalar + Zero, R: Dim, C: Dim, S> From> for MatrixMN where diff --git a/src/sparse/cs_utils.rs b/src/sparse/cs_utils.rs new file mode 100644 index 000000000..a79ee4d98 --- /dev/null +++ b/src/sparse/cs_utils.rs @@ -0,0 +1,18 @@ +use allocator::Allocator; +use {DefaultAllocator, Dim, VectorN}; + +pub fn cumsum(a: &mut VectorN, b: &mut VectorN) -> usize +where + DefaultAllocator: Allocator, +{ + assert!(a.len() == b.len()); + let mut sum = 0; + + for i in 0..a.len() { + b[i] = sum; + sum += a[i]; + a[i] = b[i]; + } + + sum +} diff --git a/src/sparse/mod.rs b/src/sparse/mod.rs index 411e133b0..546507eb8 100644 --- a/src/sparse/mod.rs +++ b/src/sparse/mod.rs @@ -8,3 +8,4 @@ mod cs_matrix_cholesky; mod cs_matrix_conversion; mod cs_matrix_ops; mod cs_matrix_solve; +pub mod cs_utils; diff --git a/tests/sparse/cs_conversion.rs b/tests/sparse/cs_conversion.rs index 8a3376366..f08fe7581 100644 --- a/tests/sparse/cs_conversion.rs +++ b/tests/sparse/cs_conversion.rs @@ -1,9 +1,8 @@ -#![cfg_attr(rustfmt, rustfmt_skip)] - -use na::{Matrix4x5, CsMatrix}; +use na::{CsMatrix, DMatrix, Matrix4x5}; #[test] fn cs_from_to_matrix() { + #[cfg_attr(rustfmt, rustfmt_skip)] let m = Matrix4x5::new( 5.0, 6.0, 0.0, 8.0, 15.0, 9.0, 10.0, 11.0, 12.0, 0.0, @@ -17,3 +16,74 @@ fn cs_from_to_matrix() { let m2: Matrix4x5<_> = cs.into(); assert_eq!(m2, m); } + +#[test] +fn cs_matrix_from_triplet() { + let mut irows = vec![0, 0, 0, 0, 1, 1, 1, 1, 2, 3, 3, 3]; + let mut icols = vec![0, 1, 3, 4, 0, 1, 2, 3, 2, 1, 2, 4]; + let mut vals = vec![ + 5.0, 6.0, 8.0, 15.0, 9.0, 10.0, 11.0, 12.0, 13.0, 1.0, 4.0, 14.0, + ]; + + #[cfg_attr(rustfmt, rustfmt_skip)] + let expected = DMatrix::from_row_slice(4, 5, &[ + 5.0, 6.0, 0.0, 8.0, 15.0, + 9.0, 10.0, 11.0, 12.0, 0.0, + 0.0, 0.0, 13.0, 0.0, 0.0, + 0.0, 1.0, 4.0, 0.0, 14.0, + ]); + let cs_expected = CsMatrix::from_parts( + 4, + 5, + vec![0, 2, 5, 8, 10], + vec![0, 1, 0, 1, 3, 1, 2, 3, 0, 1, 0, 3], + vec![ + 5.0, 9.0, 6.0, 10.0, 1.0, 11.0, 13.0, 4.0, 8.0, 12.0, 15.0, 14.0, + ], + ); + + let cs_mat = CsMatrix::from_triplet(4, 5, &irows, &icols, &vals); + println!("Mat from triplet: {:?}", cs_mat); + assert!(cs_mat.is_sorted()); + assert_eq!(cs_mat, cs_expected); + + let m: DMatrix<_> = cs_mat.into(); + assert_eq!(m, expected); + + /* + * Try again with some permutations. + */ + let permutations = [(2, 5), (0, 4), (8, 10), (1, 11)]; + + for (i, j) in &permutations { + irows.swap(*i, *j); + icols.swap(*i, *j); + vals.swap(*i, *j); + } + + let cs_mat = CsMatrix::from_triplet(4, 5, &irows, &icols, &vals); + println!("Mat from triplet: {:?}", cs_mat); + assert!(cs_mat.is_sorted()); + assert_eq!(cs_mat, cs_expected); + + let m: DMatrix<_> = cs_mat.into(); + assert_eq!(m, expected); + + /* + * Try again, duplicating all entries. + */ + let mut ir = irows.clone(); + let mut ic = icols.clone(); + let mut va = vals.clone(); + irows.append(&mut ir); + icols.append(&mut ic); + vals.append(&mut va); + + let cs_mat = CsMatrix::from_triplet(4, 5, &irows, &icols, &vals); + println!("Mat from triplet: {:?}", cs_mat); + assert!(cs_mat.is_sorted()); + assert_eq!(cs_mat, cs_expected * 2.0); + + let m: DMatrix<_> = cs_mat.into(); + assert_eq!(m, expected * 2.0); +} diff --git a/tests/sparse/cs_matrix_market.rs b/tests/sparse/cs_matrix_market.rs new file mode 100644 index 000000000..12414b37e --- /dev/null +++ b/tests/sparse/cs_matrix_market.rs @@ -0,0 +1,55 @@ +#![cfg_attr(rustfmt, rustfmt_skip)] + + +use na::io; +use na::DMatrix; + +#[test] +fn cs_matrix_market() { + let file_str = r#" + %%MatrixMarket matrix coordinate real general +%================================================================================= +% +% This ASCII file represents a sparse MxN matrix with L +% nonzeros in the following Matrix Market format: +% +% +----------------------------------------------+ +% |%%MatrixMarket matrix coordinate real general | <--- header line +% |% | <--+ +% |% comments | |-- 0 or more comment lines +% |% | <--+ +% | M N L | <--- rows, columns, entries +% | I1 J1 A(I1, J1) | <--+ +% | I2 J2 A(I2, J2) | | +% | I3 J3 A(I3, J3) | |-- L lines +% | . . . | | +% | IL JL A(IL, JL) | <--+ +% +----------------------------------------------+ +% +% Indices are 1-based, i.e. A(1,1) is the first element. +% +%================================================================================= + 5 5 8 + 1 1 1.000e+00 + 2 2 1.050e+01 + 3 3 1.500e-02 + 1 4 6.000e+00 + 4 2 2.505e+02 + 4 4 -2.800e+02 + 4 5 3.332e+01 + 5 5 1.200e+01 +"#; + + let cs_mat = io::cs_matrix_from_matrix_market_str(file_str).unwrap(); + println!("CS mat: {:?}", cs_mat); + let mat: DMatrix<_> = cs_mat.into(); + let expected = DMatrix::from_row_slice(5, 5, &[ + 1.0, 0.0, 0.0, 6.0, 0.0, + 0.0, 10.5, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.015, 0.0, 0.0, + 0.0, 250.5, 0.0, -280.0, 33.32, + 0.0, 0.0, 0.0, 0.0, 12.0, + ]); + + assert_eq!(mat, expected); +} diff --git a/tests/sparse/mod.rs b/tests/sparse/mod.rs index 0e772c99f..df8e7e376 100644 --- a/tests/sparse/mod.rs +++ b/tests/sparse/mod.rs @@ -2,5 +2,7 @@ mod cs_cholesky; mod cs_construction; mod cs_conversion; mod cs_matrix; +#[cfg(feature = "io")] +mod cs_matrix_market; mod cs_ops; mod cs_solve; From 8341ec2f10a5a3829061cda349f8f0719cf6e155 Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Tue, 6 Nov 2018 18:32:20 +0100 Subject: [PATCH 14/25] Run rustfmt. --- benches/core/matrix.rs | 16 +- benches/core/vector.rs | 4 +- benches/geometry/quaternion.rs | 4 +- benches/lib.rs | 4 +- benches/linalg/bidiagonal.rs | 2 +- benches/linalg/cholesky.rs | 2 +- benches/linalg/full_piv_lu.rs | 2 +- benches/linalg/hessenberg.rs | 2 +- benches/linalg/lu.rs | 2 +- benches/linalg/mod.rs | 10 +- benches/linalg/qr.rs | 2 +- benches/linalg/schur.rs | 2 +- benches/linalg/solve.rs | 2 +- benches/linalg/svd.rs | 2 +- benches/linalg/symmetric_eigen.rs | 2 +- examples/dimensional_genericity.rs | 14 +- examples/homogeneous_coordinates.rs | 2 +- examples/identity.rs | 4 +- examples/matrix_construction.rs | 11 +- examples/transform_vector_point.rs | 2 +- nalgebra-glm/src/aliases.rs | 19 +- nalgebra-glm/src/common.rs | 61 ++-- nalgebra-glm/src/constructors.rs | 200 +++++++---- nalgebra-glm/src/exponential.rs | 17 +- nalgebra-glm/src/ext/matrix_clip_space.rs | 3 +- nalgebra-glm/src/ext/matrix_projection.rs | 58 +++- nalgebra-glm/src/ext/matrix_relationnal.rs | 62 +++- nalgebra-glm/src/ext/matrix_transform.rs | 20 +- nalgebra-glm/src/ext/mod.rs | 39 ++- nalgebra-glm/src/ext/quaternion_common.rs | 4 +- nalgebra-glm/src/ext/quaternion_geometric.rs | 2 +- nalgebra-glm/src/ext/quaternion_relational.rs | 1 - nalgebra-glm/src/ext/quaternion_transform.rs | 4 +- .../src/ext/quaternion_trigonometric.rs | 2 +- nalgebra-glm/src/ext/vector_common.rs | 36 +- nalgebra-glm/src/ext/vector_relational.rs | 44 ++- nalgebra-glm/src/geometric.rs | 34 +- nalgebra-glm/src/gtc/epsilon.rs | 2 +- nalgebra-glm/src/gtc/matrix_access.rs | 35 +- nalgebra-glm/src/gtc/matrix_inverse.rs | 12 +- nalgebra-glm/src/gtc/mod.rs | 25 +- nalgebra-glm/src/gtc/quaternion.rs | 5 +- nalgebra-glm/src/gtc/type_ptr.rs | 45 +-- nalgebra-glm/src/gtx/component_wise.rs | 10 +- nalgebra-glm/src/gtx/exterior_product.rs | 4 +- .../src/gtx/handed_coordinate_space.rs | 2 +- nalgebra-glm/src/gtx/matrix_operation.rs | 4 +- nalgebra-glm/src/gtx/mod.rs | 38 +- nalgebra-glm/src/gtx/norm.rs | 18 +- nalgebra-glm/src/gtx/normal.rs | 2 +- nalgebra-glm/src/gtx/normalize_dot.rs | 8 +- nalgebra-glm/src/gtx/quaternion.rs | 19 +- nalgebra-glm/src/gtx/rotate_vector.rs | 6 +- nalgebra-glm/src/gtx/transform.rs | 5 +- nalgebra-glm/src/gtx/transform2.rs | 40 +-- nalgebra-glm/src/gtx/transform2d.rs | 2 +- nalgebra-glm/src/gtx/vector_angle.rs | 5 +- nalgebra-glm/src/gtx/vector_query.rs | 20 +- nalgebra-glm/src/lib.rs | 328 +++++++++--------- nalgebra-glm/src/matrix.rs | 32 +- nalgebra-glm/src/traits.rs | 91 +++-- nalgebra-glm/src/trigonometric.rs | 33 +- nalgebra-glm/src/vector_relational.rs | 29 +- nalgebra-lapack/benches/linalg/hessenberg.rs | 2 +- nalgebra-lapack/benches/linalg/lu.rs | 2 +- nalgebra-lapack/benches/linalg/mod.rs | 4 +- nalgebra-lapack/benches/linalg/qr.rs | 2 +- nalgebra-lapack/src/cholesky.rs | 28 +- nalgebra-lapack/src/eigen.rs | 27 +- nalgebra-lapack/src/hessenberg.rs | 25 +- nalgebra-lapack/src/lib.rs | 6 +- nalgebra-lapack/src/lu.rs | 35 +- nalgebra-lapack/src/qr.rs | 34 +- nalgebra-lapack/src/schur.rs | 24 +- nalgebra-lapack/src/svd.rs | 31 +- nalgebra-lapack/src/symmetric_eigen.rs | 27 +- nalgebra-lapack/tests/linalg/cholesky.rs | 2 +- nalgebra-lapack/tests/linalg/lu.rs | 2 +- nalgebra-lapack/tests/linalg/mod.rs | 6 +- nalgebra-lapack/tests/linalg/qr.rs | 2 +- .../tests/linalg/real_eigensystem.rs | 2 +- nalgebra-lapack/tests/linalg/real_schur.rs | 4 +- nalgebra-lapack/tests/linalg/svd.rs | 2 +- .../tests/linalg/symmetric_eigen.rs | 2 +- src/base/allocator.rs | 6 +- src/base/blas.rs | 20 +- src/base/cg.rs | 16 +- src/base/constraint.rs | 3 +- src/base/construction.rs | 94 ++--- src/base/construction_slice.rs | 18 +- src/base/default_allocator.rs | 30 +- src/base/dimension.rs | 14 +- src/base/edition.rs | 26 +- src/base/helper.rs | 4 +- src/base/matrix.rs | 64 ++-- src/base/matrix_alga.rs | 13 +- src/base/matrix_array.rs | 32 +- src/base/matrix_slice.rs | 18 +- src/base/matrix_vec.rs | 40 +-- src/base/ops.rs | 8 +- src/base/properties.rs | 3 +- src/base/storage.rs | 6 +- src/base/unit.rs | 11 +- src/debug/random_orthogonal.rs | 6 +- src/debug/random_sdp.rs | 6 +- src/geometry/isometry.rs | 33 +- src/geometry/isometry_construction.rs | 6 +- src/geometry/orthographic.rs | 18 +- src/geometry/perspective.rs | 15 +- src/geometry/point.rs | 29 +- src/geometry/point_alga.rs | 3 +- src/geometry/point_construction.rs | 10 +- src/geometry/point_ops.rs | 12 +- src/geometry/quaternion.rs | 26 +- src/geometry/quaternion_construction.rs | 37 +- src/geometry/rotation.rs | 23 +- src/geometry/rotation_alga.rs | 24 +- src/geometry/rotation_ops.rs | 3 +- src/geometry/rotation_specialization.rs | 13 +- src/geometry/similarity.rs | 33 +- src/geometry/transform.rs | 37 +- src/geometry/transform_construction.rs | 6 +- src/geometry/translation.rs | 34 +- src/geometry/translation_alga.rs | 24 +- src/geometry/translation_construction.rs | 6 +- src/geometry/unit_complex.rs | 3 +- src/geometry/unit_complex_alga.rs | 15 +- src/geometry/unit_complex_construction.rs | 7 +- src/geometry/unit_complex_ops.rs | 24 +- src/lib.rs | 12 +- src/linalg/balancing.rs | 8 +- src/linalg/bidiagonal.rs | 23 +- src/linalg/cholesky.rs | 24 +- src/linalg/determinant.rs | 4 +- src/linalg/full_piv_lu.rs | 38 +- src/linalg/hessenberg.rs | 36 +- src/linalg/householder.rs | 4 +- src/linalg/inverse.rs | 8 +- src/linalg/lu.rs | 49 +-- src/linalg/permutation_sequence.rs | 43 +-- src/linalg/qr.rs | 35 +- src/linalg/schur.rs | 33 +- src/linalg/svd.rs | 50 +-- src/linalg/symmetric_eigen.rs | 24 +- src/linalg/symmetric_tridiagonal.rs | 40 +-- src/sparse/cs_matrix.rs | 42 +-- src/sparse/cs_matrix_cholesky.rs | 9 +- src/sparse/cs_matrix_conversion.rs | 9 +- src/sparse/cs_matrix_solve.rs | 6 +- src/sparse/cs_utils.rs | 4 +- tests/geometry/isometry.rs | 207 ++++++----- tests/geometry/point.rs | 10 +- tests/geometry/quaternion.rs | 219 ++++++------ tests/geometry/similarity.rs | 203 ++++++----- tests/geometry/unit_complex.rs | 147 ++++---- 155 files changed, 1886 insertions(+), 2066 deletions(-) diff --git a/benches/core/matrix.rs b/benches/core/matrix.rs index 1103858a9..c323cd6ec 100644 --- a/benches/core/matrix.rs +++ b/benches/core/matrix.rs @@ -1,7 +1,7 @@ +use na::{DMatrix, DVector, Matrix2, Matrix3, Matrix4, MatrixN, Vector2, Vector3, Vector4, U10}; use rand::{IsaacRng, Rng}; -use test::{self, Bencher}; -use na::{DMatrix, DVector, Matrix2, Matrix3, Matrix4, MatrixN, U10, Vector2, Vector3, Vector4}; use std::ops::{Add, Div, Mul, Sub}; +use test::{self, Bencher}; #[path = "../common/macros.rs"] mod macros; @@ -189,18 +189,10 @@ fn mat_mul_mat(bench: &mut Bencher) { #[bench] fn mat100_from_fn(bench: &mut Bencher) { - bench.iter(|| { - DMatrix::from_fn(100, 100, |a, b| { - a + b - }) - }) + bench.iter(|| DMatrix::from_fn(100, 100, |a, b| a + b)) } #[bench] fn mat500_from_fn(bench: &mut Bencher) { - bench.iter(|| { - DMatrix::from_fn(500, 500, |a, b| { - a + b - }) - }) + bench.iter(|| DMatrix::from_fn(500, 500, |a, b| a + b)) } diff --git a/benches/core/vector.rs b/benches/core/vector.rs index afcc05ae1..35e25e2d3 100644 --- a/benches/core/vector.rs +++ b/benches/core/vector.rs @@ -1,8 +1,8 @@ +use na::{DVector, Vector2, Vector3, Vector4, VectorN}; use rand::{IsaacRng, Rng}; +use std::ops::{Add, Div, Mul, Sub}; use test::{self, Bencher}; use typenum::U10000; -use na::{DVector, Vector2, Vector3, Vector4, VectorN}; -use std::ops::{Add, Div, Mul, Sub}; #[path = "../common/macros.rs"] mod macros; diff --git a/benches/geometry/quaternion.rs b/benches/geometry/quaternion.rs index c04698c76..bc94c054e 100644 --- a/benches/geometry/quaternion.rs +++ b/benches/geometry/quaternion.rs @@ -1,7 +1,7 @@ -use rand::{IsaacRng, Rng}; -use test::{self, Bencher}; use na::{Quaternion, UnitQuaternion, Vector3}; +use rand::{IsaacRng, Rng}; use std::ops::{Add, Div, Mul, Sub}; +use test::{self, Bencher}; #[path = "../common/macros.rs"] mod macros; diff --git a/benches/lib.rs b/benches/lib.rs index cb77c4cc9..1ad3a2be5 100644 --- a/benches/lib.rs +++ b/benches/lib.rs @@ -6,12 +6,12 @@ extern crate rand; extern crate test; extern crate typenum; -use rand::{IsaacRng, Rng}; use na::DMatrix; +use rand::{IsaacRng, Rng}; mod core; -mod linalg; mod geometry; +mod linalg; fn reproductible_dmatrix(nrows: usize, ncols: usize) -> DMatrix { let mut rng = IsaacRng::new_unseeded(); diff --git a/benches/linalg/bidiagonal.rs b/benches/linalg/bidiagonal.rs index c3c7f060f..9e5a723ef 100644 --- a/benches/linalg/bidiagonal.rs +++ b/benches/linalg/bidiagonal.rs @@ -1,5 +1,5 @@ -use test::{self, Bencher}; use na::{Bidiagonal, DMatrix, Matrix4}; +use test::{self, Bencher}; #[path = "../common/macros.rs"] mod macros; diff --git a/benches/linalg/cholesky.rs b/benches/linalg/cholesky.rs index e9d2646cb..73c028cb5 100644 --- a/benches/linalg/cholesky.rs +++ b/benches/linalg/cholesky.rs @@ -1,5 +1,5 @@ -use test::{self, Bencher}; use na::{Cholesky, DMatrix, DVector}; +use test::{self, Bencher}; #[bench] fn cholesky_100x100(bh: &mut Bencher) { diff --git a/benches/linalg/full_piv_lu.rs b/benches/linalg/full_piv_lu.rs index 1e0a307ea..ad82b069e 100644 --- a/benches/linalg/full_piv_lu.rs +++ b/benches/linalg/full_piv_lu.rs @@ -1,5 +1,5 @@ -use test::{self, Bencher}; use na::{DMatrix, DVector, FullPivLU}; +use test::{self, Bencher}; // Without unpack. #[bench] diff --git a/benches/linalg/hessenberg.rs b/benches/linalg/hessenberg.rs index c12c6a466..427aa1fe2 100644 --- a/benches/linalg/hessenberg.rs +++ b/benches/linalg/hessenberg.rs @@ -1,5 +1,5 @@ -use test::{self, Bencher}; use na::{DMatrix, Hessenberg, Matrix4}; +use test::{self, Bencher}; #[path = "../common/macros.rs"] mod macros; diff --git a/benches/linalg/lu.rs b/benches/linalg/lu.rs index 2f83d3510..2428cf9c4 100644 --- a/benches/linalg/lu.rs +++ b/benches/linalg/lu.rs @@ -1,5 +1,5 @@ -use test::{self, Bencher}; use na::{DMatrix, DVector, LU}; +use test::{self, Bencher}; // Without unpack. #[bench] diff --git a/benches/linalg/mod.rs b/benches/linalg/mod.rs index c2cc4adeb..526b32ea5 100644 --- a/benches/linalg/mod.rs +++ b/benches/linalg/mod.rs @@ -1,11 +1,11 @@ -mod solve; +mod bidiagonal; mod cholesky; -mod qr; +mod full_piv_lu; mod hessenberg; -mod bidiagonal; mod lu; -mod full_piv_lu; -mod svd; +mod qr; mod schur; +mod solve; +mod svd; mod symmetric_eigen; // mod eigen; diff --git a/benches/linalg/qr.rs b/benches/linalg/qr.rs index 1a182259b..41a814ff2 100644 --- a/benches/linalg/qr.rs +++ b/benches/linalg/qr.rs @@ -1,5 +1,5 @@ -use test::{self, Bencher}; use na::{DMatrix, DVector, Matrix4, QR}; +use test::{self, Bencher}; #[path = "../common/macros.rs"] mod macros; diff --git a/benches/linalg/schur.rs b/benches/linalg/schur.rs index e62035e8f..e0e588ac9 100644 --- a/benches/linalg/schur.rs +++ b/benches/linalg/schur.rs @@ -1,5 +1,5 @@ -use test::{self, Bencher}; use na::{Matrix4, RealSchur}; +use test::{self, Bencher}; #[bench] fn schur_decompose_4x4(bh: &mut Bencher) { diff --git a/benches/linalg/solve.rs b/benches/linalg/solve.rs index 3362549af..03ec71e56 100644 --- a/benches/linalg/solve.rs +++ b/benches/linalg/solve.rs @@ -1,5 +1,5 @@ -use test::Bencher; use na::{DMatrix, DVector}; +use test::Bencher; #[bench] fn solve_l_triangular_100x100(bh: &mut Bencher) { diff --git a/benches/linalg/svd.rs b/benches/linalg/svd.rs index 62a29d3a1..47023804a 100644 --- a/benches/linalg/svd.rs +++ b/benches/linalg/svd.rs @@ -1,5 +1,5 @@ -use test::{self, Bencher}; use na::{Matrix4, SVD}; +use test::{self, Bencher}; #[bench] fn svd_decompose_4x4(bh: &mut Bencher) { diff --git a/benches/linalg/symmetric_eigen.rs b/benches/linalg/symmetric_eigen.rs index 6d2056d28..2a9058dae 100644 --- a/benches/linalg/symmetric_eigen.rs +++ b/benches/linalg/symmetric_eigen.rs @@ -1,5 +1,5 @@ -use test::{self, Bencher}; use na::{Matrix4, SymmetricEigen}; +use test::{self, Bencher}; #[bench] fn symmetric_eigen_decompose_4x4(bh: &mut Bencher) { diff --git a/examples/dimensional_genericity.rs b/examples/dimensional_genericity.rs index 2650cc64b..ca653d57d 100644 --- a/examples/dimensional_genericity.rs +++ b/examples/dimensional_genericity.rs @@ -2,15 +2,13 @@ extern crate alga; extern crate nalgebra as na; use alga::linear::FiniteDimInnerSpace; -use na::{DefaultAllocator, Real, Unit, Vector2, Vector3, VectorN}; use na::allocator::Allocator; use na::dimension::Dim; +use na::{DefaultAllocator, Real, Unit, Vector2, Vector3, VectorN}; /// Reflects a vector wrt. the hyperplane with normal `plane_normal`. fn reflect_wrt_hyperplane_with_algebraic_genericity(plane_normal: &Unit, vector: &V) -> V -where - V: FiniteDimInnerSpace + Copy, -{ +where V: FiniteDimInnerSpace + Copy { let n = plane_normal.as_ref(); // Get the underlying vector of type `V`. *vector - *n * (n.dot(vector) * na::convert(2.0)) } @@ -31,9 +29,7 @@ where /// Reflects a 2D vector wrt. the 2D line with normal `plane_normal`. fn reflect_wrt_hyperplane2(plane_normal: &Unit>, vector: &Vector2) -> Vector2 -where - N: Real, -{ +where N: Real { let n = plane_normal.as_ref(); // Get the underlying Vector2 vector - n * (n.dot(vector) * na::convert(2.0)) } @@ -41,9 +37,7 @@ where /// Reflects a 3D vector wrt. the 3D plane with normal `plane_normal`. /// /!\ This is an exact replicate of `reflect_wrt_hyperplane2, but for 3D. fn reflect_wrt_hyperplane3(plane_normal: &Unit>, vector: &Vector3) -> Vector3 -where - N: Real, -{ +where N: Real { let n = plane_normal.as_ref(); // Get the underlying Vector3 vector - n * (n.dot(vector) * na::convert(2.0)) } diff --git a/examples/homogeneous_coordinates.rs b/examples/homogeneous_coordinates.rs index f2d30d5ab..e06600323 100644 --- a/examples/homogeneous_coordinates.rs +++ b/examples/homogeneous_coordinates.rs @@ -2,8 +2,8 @@ extern crate approx; extern crate nalgebra as na; -use std::f32; use na::{Isometry2, Point2, Vector2}; +use std::f32; fn use_dedicated_types() { let iso = Isometry2::new(Vector2::new(1.0, 1.0), f32::consts::PI); diff --git a/examples/identity.rs b/examples/identity.rs index d97bed988..06d69f708 100644 --- a/examples/identity.rs +++ b/examples/identity.rs @@ -9,9 +9,7 @@ use na::{Id, Isometry3, Point3, Vector3}; * intermediate value. */ fn complicated_algorithm(v: &Vector3, t: &T, n: usize) -> Vector3 -where - T: Transformation>, -{ +where T: Transformation> { let mut result = *v; // Do lots of operations involving t. diff --git a/examples/matrix_construction.rs b/examples/matrix_construction.rs index 304228cbc..bb78458fa 100644 --- a/examples/matrix_construction.rs +++ b/examples/matrix_construction.rs @@ -48,12 +48,11 @@ fn main() { 4, 3, [ - // Components listed column-by-column. - 1.0, 0.0, 0.0, 0.0, - 0.0, 1.0, 0.0, 0.0, - 0.0, 0.0, 1.0, 0.0 - ].iter() - .cloned(), + // Components listed column-by-column. + 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, + ] + .iter() + .cloned(), ); assert_eq!(dm, dm1); diff --git a/examples/transform_vector_point.rs b/examples/transform_vector_point.rs index f9a2e5757..a6ec2fff2 100644 --- a/examples/transform_vector_point.rs +++ b/examples/transform_vector_point.rs @@ -2,8 +2,8 @@ extern crate approx; extern crate nalgebra as na; -use std::f32; use na::{Isometry2, Point2, Vector2}; +use std::f32; fn main() { let t = Isometry2::new(Vector2::new(1.0, 1.0), f32::consts::PI); diff --git a/nalgebra-glm/src/aliases.rs b/nalgebra-glm/src/aliases.rs index 9bb7edb67..206a8abe0 100644 --- a/nalgebra-glm/src/aliases.rs +++ b/nalgebra-glm/src/aliases.rs @@ -1,9 +1,7 @@ -use na::{MatrixMN, VectorN, - Matrix2, Matrix3, Matrix4, - Matrix2x3, Matrix3x2, Matrix4x2, - Matrix2x4, Matrix3x4, Matrix4x3, - Quaternion, - U1, U2, U3, U4}; +use na::{ + Matrix2, Matrix2x3, Matrix2x4, Matrix3, Matrix3x2, Matrix3x4, Matrix4, Matrix4x2, Matrix4x3, + MatrixMN, Quaternion, VectorN, U1, U2, U3, U4, +}; /// A matrix with components of type `N`. It has `R` rows, and `C` columns. /// @@ -194,13 +192,13 @@ pub type UVec3 = TVec3; /// A 4D vector with `u32` components. pub type UVec4 = TVec4; /// A 1D vector with `f32` components. -pub type Vec1 = TVec1; +pub type Vec1 = TVec1; /// A 2D vector with `f32` components. -pub type Vec2 = TVec2; +pub type Vec2 = TVec2; /// A 3D vector with `f32` components. -pub type Vec3 = TVec3; +pub type Vec3 = TVec3; /// A 4D vector with `f32` components. -pub type Vec4 = TVec4; +pub type Vec4 = TVec4; /// A 1D vector with `u64` components. pub type U64Vec1 = TVec1; @@ -270,7 +268,6 @@ pub type I8Vec3 = TVec3; /// A 4D vector with `i8` components. pub type I8Vec4 = TVec4; - /// A 2x2 matrix with components of type `N`. pub type TMat2 = Matrix2; /// A 2x2 matrix with components of type `N`. diff --git a/nalgebra-glm/src/common.rs b/nalgebra-glm/src/common.rs index 45b94336b..e6a3dc0f4 100644 --- a/nalgebra-glm/src/common.rs +++ b/nalgebra-glm/src/common.rs @@ -1,9 +1,9 @@ -use std::mem; +use na::{self, DefaultAllocator, Real}; use num::FromPrimitive; -use na::{self, Real, DefaultAllocator}; +use std::mem; -use aliases::{TVec, TMat}; -use traits::{Number, Dimension, Alloc}; +use aliases::{TMat, TVec}; +use traits::{Alloc, Dimension, Number}; /// For each matrix or vector component `x` if `x >= 0`; otherwise, it returns `-x`. /// @@ -22,8 +22,8 @@ use traits::{Number, Dimension, Alloc}; /// /// * [`sign`](fn.sign.html) pub fn abs(x: &TMat) -> TMat - where DefaultAllocator: Alloc { - x.abs() +where DefaultAllocator: Alloc { + x.abs() } /// For each matrix or vector component returns a value equal to the nearest integer that is greater than or equal to `x`. @@ -44,7 +44,7 @@ pub fn abs(x: &TMat) -> TMat(x: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.map(|x| x.ceil()) } @@ -94,7 +94,7 @@ pub fn clamp_scalar(x: N, min_val: N, max_val: N) -> N { /// * [`clamp_scalar`](fn.clamp_scalar.html) /// * [`clamp_vec`](fn.clamp_vec.html) pub fn clamp(x: &TVec, min_val: N, max_val: N) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.map(|x| na::clamp(x, min_val, max_val)) } @@ -125,8 +125,14 @@ pub fn clamp(x: &TVec, min_val: N, max_val: N) -> /// /// * [`clamp_scalar`](fn.clamp_scalar.html) /// * [`clamp`](fn.clamp.html) -pub fn clamp_vec(x: &TVec, min_val: &TVec, max_val: &TVec) -> TVec - where DefaultAllocator: Alloc { +pub fn clamp_vec( + x: &TVec, + min_val: &TVec, + max_val: &TVec, +) -> TVec +where + DefaultAllocator: Alloc, +{ x.zip_zip_map(min_val, max_val, |a, min, max| na::clamp(a, min, max)) } @@ -161,7 +167,7 @@ pub fn float_bits_to_int(v: f32) -> i32 { /// * [`uint_bits_to_float`](fn.uint_bits_to_float.html) /// * [`uint_bits_to_float_scalar`](fn.uint_bits_to_float_scalar.html) pub fn float_bits_to_int_vec(v: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { v.map(float_bits_to_int) } @@ -196,7 +202,7 @@ pub fn float_bits_to_uint(v: f32) -> u32 { /// * [`uint_bits_to_float`](fn.uint_bits_to_float.html) /// * [`uint_bits_to_float_scalar`](fn.uint_bits_to_float_scalar.html) pub fn float_bits_to_uint_vec(v: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { v.map(float_bits_to_uint) } @@ -217,7 +223,7 @@ pub fn float_bits_to_uint_vec(v: &TVec) -> TVec /// * [`round`](fn.round.html) /// * [`trunc`](fn.trunc.html) pub fn floor(x: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.map(|x| x.floor()) } @@ -244,7 +250,7 @@ pub fn floor(x: &TVec) -> TVec /// * [`round`](fn.round.html) /// * [`trunc`](fn.trunc.html) pub fn fract(x: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.map(|x| x.fract()) } @@ -271,7 +277,6 @@ pub fn fract(x: &TVec) -> TVec /// * [`uint_bits_to_float_scalar`](fn.uint_bits_to_float_scalar.html) pub fn int_bits_to_float(v: i32) -> f32 { f32::from_bits(v as u32) - } /// For each components of `v`, returns a floating-point value corresponding to a signed integer encoding of a floating-point value. @@ -288,7 +293,7 @@ pub fn int_bits_to_float(v: i32) -> f32 { /// * [`uint_bits_to_float`](fn.uint_bits_to_float.html) /// * [`uint_bits_to_float_scalar`](fn.uint_bits_to_float_scalar.html) pub fn int_bits_to_float_vec(v: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { v.map(int_bits_to_float) } @@ -325,7 +330,7 @@ pub fn mix(x: N, y: N, a: N) -> N { /// /// * [`modf`](fn.modf.html) pub fn modf_vec(x: &TVec, y: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.zip_map(y, |x, y| x % y) } @@ -357,9 +362,8 @@ pub fn modf(x: N, i: N) -> N { /// * [`fract`](fn.fract.html) /// * [`trunc`](fn.trunc.html) pub fn round(x: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.map(|x| x.round()) - } //pub fn roundEven(x: &TVec) -> TVec @@ -382,14 +386,8 @@ pub fn round(x: &TVec) -> TVec /// * [`abs`](fn.abs.html) /// pub fn sign(x: &TVec) -> TVec - where DefaultAllocator: Alloc { - x.map(|x| { - if x.is_zero() { - N::zero() - } else { - x.signum() - } - }) +where DefaultAllocator: Alloc { + x.map(|x| if x.is_zero() { N::zero() } else { x.signum() }) } /// Returns 0.0 if `x <= edge0` and `1.0 if x >= edge1` and performs smooth Hermite interpolation between 0 and 1 when `edge0 < x < edge1`. @@ -414,13 +412,13 @@ pub fn step_scalar(edge: N, x: N) -> N { /// Returns 0.0 if `x[i] < edge`, otherwise it returns 1.0. pub fn step(edge: N, x: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.map(|x| step_scalar(edge, x)) } /// Returns 0.0 if `x[i] < edge[i]`, otherwise it returns 1.0. pub fn step_vec(edge: &TVec, x: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { edge.zip_map(x, step_scalar) } @@ -441,7 +439,7 @@ pub fn step_vec(edge: &TVec, x: &TVec) -> T /// * [`fract`](fn.fract.html) /// * [`round`](fn.round.html) pub fn trunc(x: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.map(|x| x.trunc()) } @@ -460,7 +458,6 @@ pub fn trunc(x: &TVec) -> TVec /// * [`uint_bits_to_float`](fn.uint_bits_to_float.html) pub fn uint_bits_to_float_scalar(v: u32) -> f32 { f32::from_bits(v) - } /// For each component of `v`, returns a floating-point value corresponding to a unsigned integer encoding of a floating-point value. @@ -477,6 +474,6 @@ pub fn uint_bits_to_float_scalar(v: u32) -> f32 { /// * [`int_bits_to_float_vec`](fn.int_bits_to_float_vec.html) /// * [`uint_bits_to_float_scalar`](fn.uint_bits_to_float_scalar.html) pub fn uint_bits_to_float(v: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { v.map(uint_bits_to_float_scalar) } diff --git a/nalgebra-glm/src/constructors.rs b/nalgebra-glm/src/constructors.rs index f19ae5348..210a10d49 100644 --- a/nalgebra-glm/src/constructors.rs +++ b/nalgebra-glm/src/constructors.rs @@ -1,7 +1,8 @@ -use na::{Scalar, Real, U2, U3, U4}; -use aliases::{TMat, Qua, TVec1, TVec2, TVec3, TVec4, TMat2, TMat2x3, TMat2x4, TMat3, TMat3x2, TMat3x4, - TMat4, TMat4x2, TMat4x3}; - +use aliases::{ + Qua, TMat, TMat2, TMat2x3, TMat2x4, TMat3, TMat3x2, TMat3x4, TMat4, TMat4x2, TMat4x3, TVec1, + TVec2, TVec3, TVec4, +}; +use na::{Real, Scalar, U2, U3, U4}; /// Creates a new 1D vector. /// @@ -32,112 +33,173 @@ pub fn vec4(x: N, y: N, z: N, w: N) -> TVec4 { TVec4::new(x, y, z, w) } - /// Create a new 2x2 matrix. pub fn mat2(m11: N, m12: N, m21: N, m22: N) -> TMat2 { - TMat::::new( - m11, m12, - m21, m22, - ) + TMat::::new(m11, m12, m21, m22) } /// Create a new 2x2 matrix. pub fn mat2x2(m11: N, m12: N, m21: N, m22: N) -> TMat2 { - TMat::::new( - m11, m12, - m21, m22, - ) + TMat::::new(m11, m12, m21, m22) } /// Create a new 2x3 matrix. pub fn mat2x3(m11: N, m12: N, m13: N, m21: N, m22: N, m23: N) -> TMat2x3 { - TMat::::new( - m11, m12, m13, - m21, m22, m23, - ) + TMat::::new(m11, m12, m13, m21, m22, m23) } /// Create a new 2x4 matrix. -pub fn mat2x4(m11: N, m12: N, m13: N, m14: N, m21: N, m22: N, m23: N, m24: N) -> TMat2x4 { - TMat::::new( - m11, m12, m13, m14, - m21, m22, m23, m24, - ) +pub fn mat2x4( + m11: N, + m12: N, + m13: N, + m14: N, + m21: N, + m22: N, + m23: N, + m24: N, +) -> TMat2x4 +{ + TMat::::new(m11, m12, m13, m14, m21, m22, m23, m24) } /// Create a new 3x3 matrix. -pub fn mat3(m11: N, m12: N, m13: N, m21: N, m22: N, m23: N, m31: N, m32: N, m33: N) -> TMat3 { - TMat::::new( - m11, m12, m13, - m21, m22, m23, - m31, m32, m33, - ) +pub fn mat3( + m11: N, + m12: N, + m13: N, + m21: N, + m22: N, + m23: N, + m31: N, + m32: N, + m33: N, +) -> TMat3 +{ + TMat::::new(m11, m12, m13, m21, m22, m23, m31, m32, m33) } /// Create a new 3x2 matrix. pub fn mat3x2(m11: N, m12: N, m21: N, m22: N, m31: N, m32: N) -> TMat3x2 { - TMat::::new( - m11, m12, - m21, m22, - m31, m32, - ) + TMat::::new(m11, m12, m21, m22, m31, m32) } /// Create a new 3x3 matrix. -pub fn mat3x3(m11: N, m12: N, m13: N, m21: N, m22: N, m23: N, m31: N, m32: N, m33: N) -> TMat3 { - TMat::::new( - m11, m12, m13, - m31, m32, m33, - m21, m22, m23, - ) +pub fn mat3x3( + m11: N, + m12: N, + m13: N, + m21: N, + m22: N, + m23: N, + m31: N, + m32: N, + m33: N, +) -> TMat3 +{ + TMat::::new(m11, m12, m13, m31, m32, m33, m21, m22, m23) } /// Create a new 3x4 matrix. -pub fn mat3x4(m11: N, m12: N, m13: N, m14: N, m21: N, m22: N, m23: N, m24: N, m31: N, m32: N, m33: N, m34: N) -> TMat3x4 { - TMat::::new( - m11, m12, m13, m14, - m21, m22, m23, m24, - m31, m32, m33, m34, - ) +pub fn mat3x4( + m11: N, + m12: N, + m13: N, + m14: N, + m21: N, + m22: N, + m23: N, + m24: N, + m31: N, + m32: N, + m33: N, + m34: N, +) -> TMat3x4 +{ + TMat::::new(m11, m12, m13, m14, m21, m22, m23, m24, m31, m32, m33, m34) } /// Create a new 4x2 matrix. -pub fn mat4x2(m11: N, m12: N, m21: N, m22: N, m31: N, m32: N, m41: N, m42: N) -> TMat4x2 { - TMat::::new( - m11, m12, - m21, m22, - m31, m32, - m41, m42, - ) +pub fn mat4x2( + m11: N, + m12: N, + m21: N, + m22: N, + m31: N, + m32: N, + m41: N, + m42: N, +) -> TMat4x2 +{ + TMat::::new(m11, m12, m21, m22, m31, m32, m41, m42) } /// Create a new 4x3 matrix. -pub fn mat4x3(m11: N, m12: N, m13: N, m21: N, m22: N, m23: N, m31: N, m32: N, m33: N, m41: N, m42: N, m43: N) -> TMat4x3 { - TMat::::new( - m11, m12, m13, - m21, m22, m23, - m31, m32, m33, - m41, m42, m43, - ) +pub fn mat4x3( + m11: N, + m12: N, + m13: N, + m21: N, + m22: N, + m23: N, + m31: N, + m32: N, + m33: N, + m41: N, + m42: N, + m43: N, +) -> TMat4x3 +{ + TMat::::new(m11, m12, m13, m21, m22, m23, m31, m32, m33, m41, m42, m43) } /// Create a new 4x4 matrix. -pub fn mat4x4(m11: N, m12: N, m13: N, m14: N, m21: N, m22: N, m23: N, m24: N, m31: N, m32: N, m33: N, m34: N, m41: N, m42: N, m43: N, m44: N) -> TMat4 { +pub fn mat4x4( + m11: N, + m12: N, + m13: N, + m14: N, + m21: N, + m22: N, + m23: N, + m24: N, + m31: N, + m32: N, + m33: N, + m34: N, + m41: N, + m42: N, + m43: N, + m44: N, +) -> TMat4 +{ TMat::::new( - m11, m12, m13, m14, - m21, m22, m23, m24, - m31, m32, m33, m34, - m41, m42, m43, m44, + m11, m12, m13, m14, m21, m22, m23, m24, m31, m32, m33, m34, m41, m42, m43, m44, ) } /// Create a new 4x4 matrix. -pub fn mat4(m11: N, m12: N, m13: N, m14: N, m21: N, m22: N, m23: N, m24: N, m31: N, m32: N, m33: N, m34: N, m41: N, m42: N, m43: N, m44: N) -> TMat4 { +pub fn mat4( + m11: N, + m12: N, + m13: N, + m14: N, + m21: N, + m22: N, + m23: N, + m24: N, + m31: N, + m32: N, + m33: N, + m34: N, + m41: N, + m42: N, + m43: N, + m44: N, +) -> TMat4 +{ TMat::::new( - m11, m12, m13, m14, - m21, m22, m23, m24, - m31, m32, m33, m34, - m41, m42, m43, m44, + m11, m12, m13, m14, m21, m22, m23, m24, m31, m32, m33, m34, m41, m42, m43, m44, ) } diff --git a/nalgebra-glm/src/exponential.rs b/nalgebra-glm/src/exponential.rs index d73db3a86..21b716f2a 100644 --- a/nalgebra-glm/src/exponential.rs +++ b/nalgebra-glm/src/exponential.rs @@ -1,5 +1,5 @@ -use na::{Real, DefaultAllocator}; use aliases::TVec; +use na::{DefaultAllocator, Real}; use traits::{Alloc, Dimension}; /// Component-wise exponential. @@ -8,7 +8,7 @@ use traits::{Alloc, Dimension}; /// /// * [`exp2`](fn.exp2.html) pub fn exp(v: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { v.map(|x| x.exp()) } @@ -18,7 +18,7 @@ pub fn exp(v: &TVec) -> TVec /// /// * [`exp`](fn.exp.html) pub fn exp2(v: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { v.map(|x| x.exp2()) } @@ -28,9 +28,8 @@ pub fn exp2(v: &TVec) -> TVec /// /// * [`sqrt`](fn.sqrt.html) pub fn inversesqrt(v: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { v.map(|x| N::one() / x.sqrt()) - } /// Component-wise logarithm. @@ -39,7 +38,7 @@ pub fn inversesqrt(v: &TVec) -> TVec /// /// * [`log2`](fn.log2.html) pub fn log(v: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { v.map(|x| x.ln()) } @@ -49,13 +48,13 @@ pub fn log(v: &TVec) -> TVec /// /// * [`log`](fn.log.html) pub fn log2(v: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { v.map(|x| x.log2()) } /// Component-wise power. pub fn pow(base: &TVec, exponent: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { base.zip_map(exponent, |b, e| b.powf(e)) } @@ -68,6 +67,6 @@ pub fn pow(base: &TVec, exponent: &TVec) -> T /// * [`inversesqrt`](fn.inversesqrt.html) /// * [`pow`](fn.pow.html) pub fn sqrt(v: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { v.map(|x| x.sqrt()) } diff --git a/nalgebra-glm/src/ext/matrix_clip_space.rs b/nalgebra-glm/src/ext/matrix_clip_space.rs index 813fe8fc6..89c137ea6 100644 --- a/nalgebra-glm/src/ext/matrix_clip_space.rs +++ b/nalgebra-glm/src/ext/matrix_clip_space.rs @@ -1,5 +1,5 @@ -use na::{Real, Orthographic3, Perspective3}; use aliases::TMat4; +use na::{Orthographic3, Perspective3, Real}; //pub fn frustum(left: N, right: N, bottom: N, top: N, near: N, far: N) -> TMat4 { // unimplemented!() @@ -90,7 +90,6 @@ pub fn ortho(left: N, right: N, bottom: N, top: N, znear: N, zfar: N) - // unimplemented!() //} - /// Creates a matrix for a perspective-view frustum based on the right handedness and OpenGL near and far clip planes definition. /// /// # Important note diff --git a/nalgebra-glm/src/ext/matrix_projection.rs b/nalgebra-glm/src/ext/matrix_projection.rs index b8d2fc8d2..d56103a69 100644 --- a/nalgebra-glm/src/ext/matrix_projection.rs +++ b/nalgebra-glm/src/ext/matrix_projection.rs @@ -1,6 +1,6 @@ use na::{self, Real, U3}; -use aliases::{TVec2, TVec3, TVec4, TMat4}; +use aliases::{TMat4, TVec2, TVec3, TVec4}; /// Define a picking region. /// @@ -13,11 +13,15 @@ pub fn pick_matrix(center: &TVec2, delta: &TVec2, viewport: &TVec let shift = TVec3::new( (viewport.z - (center.x - viewport.x) * na::convert(2.0)) / delta.x, (viewport.w - (center.y - viewport.y) * na::convert(2.0)) / delta.y, - N::zero() + N::zero(), ); let result = TMat4::new_translation(&shift); - result.prepend_nonuniform_scaling(&TVec3::new(viewport.z / delta.x, viewport.w / delta.y, N::one())) + result.prepend_nonuniform_scaling(&TVec3::new( + viewport.z / delta.x, + viewport.w / delta.y, + N::one(), + )) } /// Map the specified object coordinates `(obj.x, obj.y, obj.z)` into window coordinates using OpenGL near and far clip planes definition. @@ -36,7 +40,13 @@ pub fn pick_matrix(center: &TVec2, delta: &TVec2, viewport: &TVec /// * [`unproject`](fn.unproject.html) /// * [`unproject_no`](fn.unproject_no.html) /// * [`unproject_zo`](fn.unproject_zo.html) -pub fn project(obj: &TVec3, model: &TMat4, proj: &TMat4, viewport: TVec4) -> TVec3 { +pub fn project( + obj: &TVec3, + model: &TMat4, + proj: &TMat4, + viewport: TVec4, +) -> TVec3 +{ project_no(obj, model, proj, viewport) } @@ -58,7 +68,13 @@ pub fn project(obj: &TVec3, model: &TMat4, proj: &TMat4, viewp /// * [`unproject`](fn.unproject.html) /// * [`unproject_no`](fn.unproject_no.html) /// * [`unproject_zo`](fn.unproject_zo.html) -pub fn project_no(obj: &TVec3, model: &TMat4, proj: &TMat4, viewport: TVec4) -> TVec3 { +pub fn project_no( + obj: &TVec3, + model: &TMat4, + proj: &TMat4, + viewport: TVec4, +) -> TVec3 +{ let proj = project_zo(obj, model, proj, viewport); TVec3::new(proj.x, proj.y, proj.z * na::convert(0.5) + na::convert(0.5)) } @@ -81,7 +97,13 @@ pub fn project_no(obj: &TVec3, model: &TMat4, proj: &TMat4, vi /// * [`unproject`](fn.unproject.html) /// * [`unproject_no`](fn.unproject_no.html) /// * [`unproject_zo`](fn.unproject_zo.html) -pub fn project_zo(obj: &TVec3, model: &TMat4, proj: &TMat4, viewport: TVec4) -> TVec3 { +pub fn project_zo( + obj: &TVec3, + model: &TMat4, + proj: &TMat4, + viewport: TVec4, +) -> TVec3 +{ let normalized = proj * model * TVec4::new(obj.x, obj.y, obj.z, N::one()); let scale = N::one() / normalized.w; @@ -108,7 +130,13 @@ pub fn project_zo(obj: &TVec3, model: &TMat4, proj: &TMat4, vi /// * [`project_zo`](fn.project_zo.html) /// * [`unproject_no`](fn.unproject_no.html) /// * [`unproject_zo`](fn.unproject_zo.html) -pub fn unproject(win: &TVec3, model: &TMat4, proj: &TMat4, viewport: TVec4) -> TVec3 { +pub fn unproject( + win: &TVec3, + model: &TMat4, + proj: &TMat4, + viewport: TVec4, +) -> TVec3 +{ unproject_no(win, model, proj, viewport) } @@ -130,7 +158,13 @@ pub fn unproject(win: &TVec3, model: &TMat4, proj: &TMat4, vie /// * [`project_zo`](fn.project_zo.html) /// * [`unproject`](fn.unproject.html) /// * [`unproject_zo`](fn.unproject_zo.html) -pub fn unproject_no(win: &TVec3, model: &TMat4, proj: &TMat4, viewport: TVec4) -> TVec3 { +pub fn unproject_no( + win: &TVec3, + model: &TMat4, + proj: &TMat4, + viewport: TVec4, +) -> TVec3 +{ let _2: N = na::convert(2.0); let transform = (proj * model).try_inverse().unwrap_or_else(TMat4::zeros); let pt = TVec4::new( @@ -162,7 +196,13 @@ pub fn unproject_no(win: &TVec3, model: &TMat4, proj: &TMat4, /// * [`project_zo`](fn.project_zo.html) /// * [`unproject`](fn.unproject.html) /// * [`unproject_no`](fn.unproject_no.html) -pub fn unproject_zo(win: &TVec3, model: &TMat4, proj: &TMat4, viewport: TVec4) -> TVec3 { +pub fn unproject_zo( + win: &TVec3, + model: &TMat4, + proj: &TMat4, + viewport: TVec4, +) -> TVec3 +{ let _2: N = na::convert(2.0); let transform = (proj * model).try_inverse().unwrap_or_else(TMat4::zeros); let pt = TVec4::new( diff --git a/nalgebra-glm/src/ext/matrix_relationnal.rs b/nalgebra-glm/src/ext/matrix_relationnal.rs index d5f34ae8d..80fb8b6fd 100644 --- a/nalgebra-glm/src/ext/matrix_relationnal.rs +++ b/nalgebra-glm/src/ext/matrix_relationnal.rs @@ -1,13 +1,18 @@ use na::DefaultAllocator; -use aliases::{TVec, TMat}; -use traits::{Alloc, Number, Dimension}; +use aliases::{TMat, TVec}; +use traits::{Alloc, Dimension, Number}; /// Perform a component-wise equal-to comparison of two matrices. /// /// Return a boolean vector which components value is True if this expression is satisfied per column of the matrices. -pub fn equal_columns(x: &TMat, y: &TMat) -> TVec - where DefaultAllocator: Alloc { +pub fn equal_columns( + x: &TMat, + y: &TMat, +) -> TVec +where + DefaultAllocator: Alloc, +{ let mut res = TVec::<_, C>::repeat(false); for i in 0..C::dim() { @@ -20,16 +25,28 @@ pub fn equal_columns(x: &TMat, y /// Returns the component-wise comparison of `|x - y| < epsilon`. /// /// True if this expression is satisfied. -pub fn equal_columns_eps(x: &TMat, y: &TMat, epsilon: N) -> TVec - where DefaultAllocator: Alloc { +pub fn equal_columns_eps( + x: &TMat, + y: &TMat, + epsilon: N, +) -> TVec +where + DefaultAllocator: Alloc, +{ equal_columns_eps_vec(x, y, &TVec::<_, C>::repeat(epsilon)) } /// Returns the component-wise comparison on each matrix column `|x - y| < epsilon`. /// /// True if this expression is satisfied. -pub fn equal_columns_eps_vec(x: &TMat, y: &TMat, epsilon: &TVec) -> TVec - where DefaultAllocator: Alloc { +pub fn equal_columns_eps_vec( + x: &TMat, + y: &TMat, + epsilon: &TVec, +) -> TVec +where + DefaultAllocator: Alloc, +{ let mut res = TVec::<_, C>::repeat(false); for i in 0..C::dim() { @@ -42,8 +59,13 @@ pub fn equal_columns_eps_vec(x: &TMat(x: &TMat, y: &TMat) -> TVec - where DefaultAllocator: Alloc { +pub fn not_equal_columns( + x: &TMat, + y: &TMat, +) -> TVec +where + DefaultAllocator: Alloc, +{ let mut res = TVec::<_, C>::repeat(false); for i in 0..C::dim() { @@ -56,16 +78,28 @@ pub fn not_equal_columns(x: &TMat(x: &TMat, y: &TMat, epsilon: N) -> TVec - where DefaultAllocator: Alloc { +pub fn not_equal_columns_eps( + x: &TMat, + y: &TMat, + epsilon: N, +) -> TVec +where + DefaultAllocator: Alloc, +{ not_equal_columns_eps_vec(x, y, &TVec::<_, C>::repeat(epsilon)) } /// Returns the component-wise comparison of `|x - y| >= epsilon`. /// /// True if this expression is not satisfied. -pub fn not_equal_columns_eps_vec(x: &TMat, y: &TMat, epsilon: &TVec) -> TVec - where DefaultAllocator: Alloc { +pub fn not_equal_columns_eps_vec( + x: &TMat, + y: &TMat, + epsilon: &TVec, +) -> TVec +where + DefaultAllocator: Alloc, +{ let mut res = TVec::<_, C>::repeat(false); for i in 0..C::dim() { diff --git a/nalgebra-glm/src/ext/matrix_transform.rs b/nalgebra-glm/src/ext/matrix_transform.rs index af2408fd5..fb23efc98 100644 --- a/nalgebra-glm/src/ext/matrix_transform.rs +++ b/nalgebra-glm/src/ext/matrix_transform.rs @@ -1,11 +1,11 @@ -use na::{DefaultAllocator, Real, Unit, Rotation3, Point3}; +use na::{DefaultAllocator, Point3, Real, Rotation3, Unit}; -use traits::{Dimension, Number, Alloc}; -use aliases::{TMat, TVec, TVec3, TMat4}; +use aliases::{TMat, TMat4, TVec, TVec3}; +use traits::{Alloc, Dimension, Number}; /// The identity matrix. pub fn identity() -> TMat - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { TMat::::identity() } @@ -38,7 +38,11 @@ pub fn look_at(eye: &TVec3, center: &TVec3, up: &TVec3) -> TMa /// * [`look_at`](fn.look_at.html) /// * [`look_at_rh`](fn.look_at_rh.html) pub fn look_at_lh(eye: &TVec3, center: &TVec3, up: &TVec3) -> TMat4 { - TMat::look_at_lh(&Point3::from_coordinates(*eye), &Point3::from_coordinates(*center), up) + TMat::look_at_lh( + &Point3::from_coordinates(*eye), + &Point3::from_coordinates(*center), + up, + ) } /// Build a right handed look at view matrix. @@ -54,7 +58,11 @@ pub fn look_at_lh(eye: &TVec3, center: &TVec3, up: &TVec3) -> /// * [`look_at`](fn.look_at.html) /// * [`look_at_lh`](fn.look_at_lh.html) pub fn look_at_rh(eye: &TVec3, center: &TVec3, up: &TVec3) -> TMat4 { - TMat::look_at_rh(&Point3::from_coordinates(*eye), &Point3::from_coordinates(*center), up) + TMat::look_at_rh( + &Point3::from_coordinates(*eye), + &Point3::from_coordinates(*center), + up, + ) } /// Builds a rotation 4 * 4 matrix created from an axis vector and an angle and right-multiply it to `m`. diff --git a/nalgebra-glm/src/ext/mod.rs b/nalgebra-glm/src/ext/mod.rs index 5d8e9d5c5..6ad74ff6f 100644 --- a/nalgebra-glm/src/ext/mod.rs +++ b/nalgebra-glm/src/ext/mod.rs @@ -1,30 +1,41 @@ //! (Reexported) Additional features not specified by GLSL specification pub use self::matrix_clip_space::{ortho, perspective}; -pub use self::matrix_projection::{pick_matrix, project, project_no, project_zo, unproject, unproject_no, unproject_zo}; -pub use self::matrix_relationnal::{equal_columns, equal_columns_eps, equal_columns_eps_vec, not_equal_columns, not_equal_columns_eps, not_equal_columns_eps_vec}; -pub use self::matrix_transform::{identity, look_at, look_at_lh, rotate, scale, look_at_rh, translate, rotate_x, rotate_y, rotate_z}; +pub use self::matrix_projection::{ + pick_matrix, project, project_no, project_zo, unproject, unproject_no, unproject_zo, +}; +pub use self::matrix_relationnal::{ + equal_columns, equal_columns_eps, equal_columns_eps_vec, not_equal_columns, + not_equal_columns_eps, not_equal_columns_eps_vec, +}; +pub use self::matrix_transform::{ + identity, look_at, look_at_lh, look_at_rh, rotate, rotate_x, rotate_y, rotate_z, scale, + translate, +}; +pub use self::quaternion_common::{quat_conjugate, quat_inverse, quat_lerp, quat_slerp}; +pub use self::quaternion_geometric::{ + quat_cross, quat_dot, quat_length, quat_magnitude, quat_normalize, +}; +pub use self::quaternion_relational::{ + quat_equal, quat_equal_eps, quat_not_equal, quat_not_equal_eps, +}; +pub use self::quaternion_transform::{quat_exp, quat_log, quat_pow, quat_rotate}; +pub use self::quaternion_trigonometric::{quat_angle, quat_angle_axis, quat_axis}; pub use self::scalar_common::{max3_scalar, max4_scalar, min3_scalar, min4_scalar}; pub use self::scalar_constants::{epsilon, pi}; pub use self::vector_common::{max, max2, max3, max4, min, min2, min3, min4}; pub use self::vector_relational::{equal_eps, equal_eps_vec, not_equal_eps, not_equal_eps_vec}; -pub use self::quaternion_common::{quat_conjugate, quat_inverse, quat_lerp, quat_slerp}; -pub use self::quaternion_geometric::{quat_cross, quat_dot, quat_length, quat_magnitude, quat_normalize}; -pub use self::quaternion_relational::{quat_equal, quat_equal_eps, quat_not_equal, quat_not_equal_eps}; -pub use self::quaternion_transform::{quat_exp, quat_log, quat_pow, quat_rotate}; -pub use self::quaternion_trigonometric::{quat_angle, quat_angle_axis, quat_axis}; - mod matrix_clip_space; mod matrix_projection; mod matrix_relationnal; mod matrix_transform; -mod scalar_common; -mod scalar_constants; -mod vector_common; -mod vector_relational; mod quaternion_common; mod quaternion_geometric; mod quaternion_relational; mod quaternion_transform; -mod quaternion_trigonometric; \ No newline at end of file +mod quaternion_trigonometric; +mod scalar_common; +mod scalar_constants; +mod vector_common; +mod vector_relational; diff --git a/nalgebra-glm/src/ext/quaternion_common.rs b/nalgebra-glm/src/ext/quaternion_common.rs index 84af997d8..208601edf 100644 --- a/nalgebra-glm/src/ext/quaternion_common.rs +++ b/nalgebra-glm/src/ext/quaternion_common.rs @@ -31,5 +31,7 @@ pub fn quat_lerp(x: &Qua, y: &Qua, a: N) -> Qua { /// Interpolate spherically between `x` and `y`. pub fn quat_slerp(x: &Qua, y: &Qua, a: N) -> Qua { - Unit::new_normalize(*x).slerp(&Unit::new_normalize(*y), a).unwrap() + Unit::new_normalize(*x) + .slerp(&Unit::new_normalize(*y), a) + .unwrap() } diff --git a/nalgebra-glm/src/ext/quaternion_geometric.rs b/nalgebra-glm/src/ext/quaternion_geometric.rs index 88838f8ad..a67de587f 100644 --- a/nalgebra-glm/src/ext/quaternion_geometric.rs +++ b/nalgebra-glm/src/ext/quaternion_geometric.rs @@ -25,4 +25,4 @@ pub fn quat_magnitude(q: &Qua) -> N { /// Normalizes the quaternion `q`. pub fn quat_normalize(q: &Qua) -> Qua { q.normalize() -} \ No newline at end of file +} diff --git a/nalgebra-glm/src/ext/quaternion_relational.rs b/nalgebra-glm/src/ext/quaternion_relational.rs index d493defd2..e459054fb 100644 --- a/nalgebra-glm/src/ext/quaternion_relational.rs +++ b/nalgebra-glm/src/ext/quaternion_relational.rs @@ -1,6 +1,5 @@ use na::{Real, U4}; - use aliases::{Qua, TVec}; /// Component-wise equality comparison between two quaternions. diff --git a/nalgebra-glm/src/ext/quaternion_transform.rs b/nalgebra-glm/src/ext/quaternion_transform.rs index c2fedf36e..a1459269d 100644 --- a/nalgebra-glm/src/ext/quaternion_transform.rs +++ b/nalgebra-glm/src/ext/quaternion_transform.rs @@ -1,4 +1,4 @@ -use na::{Real, UnitQuaternion, Unit}; +use na::{Real, Unit, UnitQuaternion}; use aliases::{Qua, TVec3}; @@ -24,4 +24,4 @@ pub fn quat_rotate(q: &Qua, angle: N, axis: &TVec3) -> Qua { //pub fn quat_sqrt(q: &Qua) -> Qua { // unimplemented!() -//} \ No newline at end of file +//} diff --git a/nalgebra-glm/src/ext/quaternion_trigonometric.rs b/nalgebra-glm/src/ext/quaternion_trigonometric.rs index ae53eb726..6e9be0307 100644 --- a/nalgebra-glm/src/ext/quaternion_trigonometric.rs +++ b/nalgebra-glm/src/ext/quaternion_trigonometric.rs @@ -19,4 +19,4 @@ pub fn quat_axis(x: &Qua) -> TVec3 { } else { TVec3::zeros() } -} \ No newline at end of file +} diff --git a/nalgebra-glm/src/ext/vector_common.rs b/nalgebra-glm/src/ext/vector_common.rs index 4c7366518..c0318067e 100644 --- a/nalgebra-glm/src/ext/vector_common.rs +++ b/nalgebra-glm/src/ext/vector_common.rs @@ -1,7 +1,7 @@ use na::{self, DefaultAllocator}; -use traits::{Alloc, Number, Dimension}; use aliases::TVec; +use traits::{Alloc, Dimension, Number}; /// Component-wise maximum between a vector and a scalar. /// @@ -17,7 +17,7 @@ use aliases::TVec; /// * [`min3`](fn.min3.html) /// * [`min4`](fn.min4.html) pub fn max(a: &TVec, b: N) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { a.map(|a| na::sup(&a, &b)) } @@ -35,7 +35,7 @@ pub fn max(a: &TVec, b: N) -> TVec /// * [`min3`](fn.min3.html) /// * [`min4`](fn.min4.html) pub fn max2(a: &TVec, b: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { na::sup(a, b) } @@ -53,7 +53,7 @@ pub fn max2(a: &TVec, b: &TVec) -> TVec(a: &TVec, b: &TVec, c: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { max2(&max2(a, b), c) } @@ -70,8 +70,15 @@ pub fn max3(a: &TVec, b: &TVec, c: &TVec(a: &TVec, b: &TVec, c: &TVec, d: &TVec) -> TVec - where DefaultAllocator: Alloc { +pub fn max4( + a: &TVec, + b: &TVec, + c: &TVec, + d: &TVec, +) -> TVec +where + DefaultAllocator: Alloc, +{ max2(&max2(a, b), &max2(c, d)) } @@ -89,7 +96,7 @@ pub fn max4(a: &TVec, b: &TVec, c: &TVec(x: &TVec, y: N) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.map(|x| na::inf(&x, &y)) } @@ -107,7 +114,7 @@ pub fn min(x: &TVec, y: N) -> TVec /// * [`min3`](fn.min3.html) /// * [`min4`](fn.min4.html) pub fn min2(x: &TVec, y: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { na::inf(x, y) } @@ -125,7 +132,7 @@ pub fn min2(x: &TVec, y: &TVec) -> TVec(a: &TVec, b: &TVec, c: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { min2(&min2(a, b), c) } @@ -142,7 +149,14 @@ pub fn min3(a: &TVec, b: &TVec, c: &TVec(a: &TVec, b: &TVec, c: &TVec, d: &TVec) -> TVec - where DefaultAllocator: Alloc { +pub fn min4( + a: &TVec, + b: &TVec, + c: &TVec, + d: &TVec, +) -> TVec +where + DefaultAllocator: Alloc, +{ min2(&min2(a, b), &min2(c, d)) } diff --git a/nalgebra-glm/src/ext/vector_relational.rs b/nalgebra-glm/src/ext/vector_relational.rs index cfd8649b8..ee418588f 100644 --- a/nalgebra-glm/src/ext/vector_relational.rs +++ b/nalgebra-glm/src/ext/vector_relational.rs @@ -1,7 +1,7 @@ -use na::{DefaultAllocator}; +use na::DefaultAllocator; -use traits::{Alloc, Number, Dimension}; use aliases::TVec; +use traits::{Alloc, Dimension, Number}; /// Component-wise approximate equality of two vectors, using a scalar epsilon. /// @@ -10,8 +10,14 @@ use aliases::TVec; /// * [`equal_eps_vec`](fn.equal_eps_vec.html) /// * [`not_equal_eps`](fn.not_equal_eps.html) /// * [`not_equal_eps_vec`](fn.not_equal_eps_vec.html) -pub fn equal_eps(x: &TVec, y: &TVec, epsilon: N) -> TVec - where DefaultAllocator: Alloc { +pub fn equal_eps( + x: &TVec, + y: &TVec, + epsilon: N, +) -> TVec +where + DefaultAllocator: Alloc, +{ x.zip_map(y, |x, y| abs_diff_eq!(x, y, epsilon = epsilon)) } @@ -22,8 +28,14 @@ pub fn equal_eps(x: &TVec, y: &TVec, epsilo /// * [`equal_eps`](fn.equal_eps.html) /// * [`not_equal_eps`](fn.not_equal_eps.html) /// * [`not_equal_eps_vec`](fn.not_equal_eps_vec.html) -pub fn equal_eps_vec(x: &TVec, y: &TVec, epsilon: &TVec) -> TVec - where DefaultAllocator: Alloc { +pub fn equal_eps_vec( + x: &TVec, + y: &TVec, + epsilon: &TVec, +) -> TVec +where + DefaultAllocator: Alloc, +{ x.zip_zip_map(y, epsilon, |x, y, eps| abs_diff_eq!(x, y, epsilon = eps)) } @@ -34,8 +46,14 @@ pub fn equal_eps_vec(x: &TVec, y: &TVec, ep /// * [`equal_eps`](fn.equal_eps.html) /// * [`equal_eps_vec`](fn.equal_eps_vec.html) /// * [`not_equal_eps_vec`](fn.not_equal_eps_vec.html) -pub fn not_equal_eps(x: &TVec, y: &TVec, epsilon: N) -> TVec - where DefaultAllocator: Alloc { +pub fn not_equal_eps( + x: &TVec, + y: &TVec, + epsilon: N, +) -> TVec +where + DefaultAllocator: Alloc, +{ x.zip_map(y, |x, y| abs_diff_ne!(x, y, epsilon = epsilon)) } @@ -46,7 +64,13 @@ pub fn not_equal_eps(x: &TVec, y: &TVec, ep /// * [`equal_eps`](fn.equal_eps.html) /// * [`equal_eps_vec`](fn.equal_eps_vec.html) /// * [`not_equal_eps`](fn.not_equal_eps.html) -pub fn not_equal_eps_vec(x: &TVec, y: &TVec, epsilon: &TVec) -> TVec - where DefaultAllocator: Alloc { +pub fn not_equal_eps_vec( + x: &TVec, + y: &TVec, + epsilon: &TVec, +) -> TVec +where + DefaultAllocator: Alloc, +{ x.zip_zip_map(y, epsilon, |x, y, eps| abs_diff_ne!(x, y, epsilon = eps)) } diff --git a/nalgebra-glm/src/geometric.rs b/nalgebra-glm/src/geometric.rs index 57355bbc5..998709d66 100644 --- a/nalgebra-glm/src/geometric.rs +++ b/nalgebra-glm/src/geometric.rs @@ -1,7 +1,7 @@ -use na::{Real, DefaultAllocator}; +use na::{DefaultAllocator, Real}; -use traits::{Number, Alloc, Dimension}; use aliases::{TVec, TVec3}; +use traits::{Alloc, Dimension, Number}; /// The cross product of two vectors. pub fn cross(x: &TVec3, y: &TVec3) -> TVec3 { @@ -14,19 +14,25 @@ pub fn cross(x: &TVec3, y: &TVec3) -> TVec3 { /// /// * [`distance2`](fn.distance2.html) pub fn distance(p0: &TVec, p1: &TVec) -> N - where DefaultAllocator: Alloc { - (p1 - p0).norm() +where DefaultAllocator: Alloc { + (p1 - p0).norm() } /// The dot product of two vectors. pub fn dot(x: &TVec, y: &TVec) -> N - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.dot(y) } /// If `dot(nref, i) < 0.0`, return `n`, otherwise, return `-n`. -pub fn faceforward(n: &TVec, i: &TVec, nref: &TVec) -> TVec - where DefaultAllocator: Alloc { +pub fn faceforward( + n: &TVec, + i: &TVec, + nref: &TVec, +) -> TVec +where + DefaultAllocator: Alloc, +{ if nref.dot(i) < N::zero() { n.clone() } else { @@ -44,7 +50,7 @@ pub fn faceforward(n: &TVec, i: &TVec, nref /// * [`magnitude`](fn.magnitude.html) /// * [`magnitude2`](fn.magnitude2.html) pub fn length(x: &TVec) -> N - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.norm() } @@ -58,34 +64,32 @@ pub fn length(x: &TVec) -> N /// * [`magnitude2`](fn.magnitude2.html) /// * [`nalgebra::norm`](../nalgebra/fn.norm.html) pub fn magnitude(x: &TVec) -> N - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.norm() } /// Normalizes a vector. pub fn normalize(x: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.normalize() } /// For the incident vector `i` and surface orientation `n`, returns the reflection direction : `result = i - 2.0 * dot(n, i) * n`. pub fn reflect_vec(i: &TVec, n: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { let _2 = N::one() + N::one(); i - n * (n.dot(i) * _2) } /// For the incident vector `i` and surface normal `n`, and the ratio of indices of refraction `eta`, return the refraction vector. pub fn refract_vec(i: &TVec, n: &TVec, eta: N) -> TVec - where DefaultAllocator: Alloc { - +where DefaultAllocator: Alloc { let ni = n.dot(i); let k = N::one() - eta * eta * (N::one() - ni * ni); if k < N::zero() { TVec::<_, D>::zeros() - } - else { + } else { i * eta - n * (eta * dot(n, i) + k.sqrt()) } } diff --git a/nalgebra-glm/src/gtc/epsilon.rs b/nalgebra-glm/src/gtc/epsilon.rs index 604ed3e4f..4fd7138a7 100644 --- a/nalgebra-glm/src/gtc/epsilon.rs +++ b/nalgebra-glm/src/gtc/epsilon.rs @@ -28,4 +28,4 @@ pub fn epsilon_not_equal(x: &TVec, y: &TVec pub fn epsilon_not_equal2>(x: N, y: N, epsilon: N) -> bool { abs_diff_ne!(x, y, epsilon = epsilon) } -*/ \ No newline at end of file +*/ diff --git a/nalgebra-glm/src/gtc/matrix_access.rs b/nalgebra-glm/src/gtc/matrix_access.rs index 2db271c11..3eabf5e26 100644 --- a/nalgebra-glm/src/gtc/matrix_access.rs +++ b/nalgebra-glm/src/gtc/matrix_access.rs @@ -1,7 +1,7 @@ -use na::{Scalar, DefaultAllocator}; +use na::{DefaultAllocator, Scalar}; +use aliases::{TMat, TVec}; use traits::{Alloc, Dimension}; -use aliases::{TVec, TMat}; /// The `index`-th column of the matrix `m`. /// @@ -10,8 +10,13 @@ use aliases::{TVec, TMat}; /// * [`row`](fn.row.html) /// * [`set_column`](fn.set_column.html) /// * [`set_row`](fn.set_row.html) -pub fn column(m: &TMat, index: usize) -> TVec - where DefaultAllocator: Alloc { +pub fn column( + m: &TMat, + index: usize, +) -> TVec +where + DefaultAllocator: Alloc, +{ m.column(index).into_owned() } @@ -22,8 +27,14 @@ pub fn column(m: &TMat, index: u /// * [`column`](fn.column.html) /// * [`row`](fn.row.html) /// * [`set_row`](fn.set_row.html) -pub fn set_column(m: &TMat, index: usize, x: &TVec) -> TMat - where DefaultAllocator: Alloc { +pub fn set_column( + m: &TMat, + index: usize, + x: &TVec, +) -> TMat +where + DefaultAllocator: Alloc, +{ let mut res = m.clone(); res.set_column(index, x); res @@ -37,7 +48,7 @@ pub fn set_column(m: &TMat, inde /// * [`set_column`](fn.set_column.html) /// * [`set_row`](fn.set_row.html) pub fn row(m: &TMat, index: usize) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { m.row(index).into_owned().transpose() } @@ -48,8 +59,14 @@ pub fn row(m: &TMat, index: usiz /// * [`column`](fn.column.html) /// * [`row`](fn.row.html) /// * [`set_column`](fn.set_column.html) -pub fn set_row(m: &TMat, index: usize, x: &TVec) -> TMat - where DefaultAllocator: Alloc { +pub fn set_row( + m: &TMat, + index: usize, + x: &TVec, +) -> TMat +where + DefaultAllocator: Alloc, +{ let mut res = m.clone(); res.set_row(index, &x.transpose()); res diff --git a/nalgebra-glm/src/gtc/matrix_inverse.rs b/nalgebra-glm/src/gtc/matrix_inverse.rs index 0ccfca864..4fc305e07 100644 --- a/nalgebra-glm/src/gtc/matrix_inverse.rs +++ b/nalgebra-glm/src/gtc/matrix_inverse.rs @@ -1,17 +1,19 @@ -use na::{Real, DefaultAllocator}; +use na::{DefaultAllocator, Real}; -use traits::{Alloc, Dimension}; use aliases::TMat; +use traits::{Alloc, Dimension}; /// Fast matrix inverse for affine matrix. pub fn affine_inverse(m: TMat) -> TMat - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { // FIXME: this should be optimized. m.try_inverse().unwrap_or_else(TMat::<_, D, D>::zeros) } /// Compute the transpose of the inverse of a matrix. pub fn inverse_transpose(m: TMat) -> TMat - where DefaultAllocator: Alloc { - m.try_inverse().unwrap_or_else(TMat::<_, D, D>::zeros).transpose() +where DefaultAllocator: Alloc { + m.try_inverse() + .unwrap_or_else(TMat::<_, D, D>::zeros) + .transpose() } diff --git a/nalgebra-glm/src/gtc/mod.rs b/nalgebra-glm/src/gtc/mod.rs index 2fd871b29..66db7b7a8 100644 --- a/nalgebra-glm/src/gtc/mod.rs +++ b/nalgebra-glm/src/gtc/mod.rs @@ -1,17 +1,32 @@ //! (Reexported) Recommended features not specified by GLSL specification //pub use self::bitfield::*; -pub use self::constants::{e, two_pi, euler, four_over_pi, golden_ratio, half_pi, ln_ln_two, ln_ten, ln_two, one, one_over_pi, one_over_root_two, one_over_two_pi, quarter_pi, root_five, root_half_pi, root_ln_four, root_pi, root_three, root_two, root_two_pi, third, three_over_two_pi, two_over_pi, two_over_root_pi, two_thirds, zero}; +pub use self::constants::{ + e, euler, four_over_pi, golden_ratio, half_pi, ln_ln_two, ln_ten, ln_two, one, one_over_pi, + one_over_root_two, one_over_two_pi, quarter_pi, root_five, root_half_pi, root_ln_four, root_pi, + root_three, root_two, root_two_pi, third, three_over_two_pi, two_over_pi, two_over_root_pi, + two_pi, two_thirds, zero, +}; //pub use self::integer::*; pub use self::matrix_access::{column, row, set_column, set_row}; pub use self::matrix_inverse::{affine_inverse, inverse_transpose}; //pub use self::packing::*; //pub use self::reciprocal::*; //pub use self::round::*; -pub use self::type_ptr::{make_mat2, make_mat2x2, make_mat2x3, make_mat2x4, make_mat3, make_mat3x2, make_mat3x3, make_mat3x4, make_mat4, make_mat4x2, make_mat4x3, make_mat4x4, make_quat, make_vec1, make_vec2, make_vec3, make_vec4, value_ptr, value_ptr_mut, vec1_to_vec2, vec1_to_vec3, vec1_to_vec4, vec2_to_vec1, vec2_to_vec2, vec2_to_vec3, vec2_to_vec4, vec3_to_vec1, vec3_to_vec2, vec3_to_vec3, vec3_to_vec4, vec4_to_vec1, vec4_to_vec2, vec4_to_vec3, vec4_to_vec4, mat2_to_mat3, mat2_to_mat4, mat3_to_mat2, mat3_to_mat4, mat4_to_mat2, mat4_to_mat3}; +pub use self::type_ptr::{ + make_mat2, make_mat2x2, make_mat2x3, make_mat2x4, make_mat3, make_mat3x2, make_mat3x3, + make_mat3x4, make_mat4, make_mat4x2, make_mat4x3, make_mat4x4, make_quat, make_vec1, make_vec2, + make_vec3, make_vec4, mat2_to_mat3, mat2_to_mat4, mat3_to_mat2, mat3_to_mat4, mat4_to_mat2, + mat4_to_mat3, value_ptr, value_ptr_mut, vec1_to_vec2, vec1_to_vec3, vec1_to_vec4, vec2_to_vec1, + vec2_to_vec2, vec2_to_vec3, vec2_to_vec4, vec3_to_vec1, vec3_to_vec2, vec3_to_vec3, + vec3_to_vec4, vec4_to_vec1, vec4_to_vec2, vec4_to_vec3, vec4_to_vec4, +}; //pub use self::ulp::*; -pub use self::quaternion::{quat_cast, quat_euler_angles, quat_greater_than, quat_greater_than_equal, quat_less_than, quat_less_than_equal, quat_look_at, quat_look_at_lh, quat_look_at_rh, quat_pitch, quat_roll, quat_yaw}; - +pub use self::quaternion::{ + quat_cast, quat_euler_angles, quat_greater_than, quat_greater_than_equal, quat_less_than, + quat_less_than_equal, quat_look_at, quat_look_at_lh, quat_look_at_rh, quat_pitch, quat_roll, + quat_yaw, +}; //mod bitfield; mod constants; @@ -24,4 +39,4 @@ mod matrix_inverse; //mod round; mod type_ptr; //mod ulp; -mod quaternion; \ No newline at end of file +mod quaternion; diff --git a/nalgebra-glm/src/gtc/quaternion.rs b/nalgebra-glm/src/gtc/quaternion.rs index 157936053..fd3e22bfb 100644 --- a/nalgebra-glm/src/gtc/quaternion.rs +++ b/nalgebra-glm/src/gtc/quaternion.rs @@ -1,7 +1,6 @@ -use na::{Real, U4, UnitQuaternion}; - -use aliases::{Qua, TVec, TVec3, TMat4}; +use na::{Real, UnitQuaternion, U4}; +use aliases::{Qua, TMat4, TVec, TVec3}; /// Euler angles of the quaternion `q` as (pitch, yaw, roll). pub fn quat_euler_angles(x: &Qua) -> TVec3 { diff --git a/nalgebra-glm/src/gtc/type_ptr.rs b/nalgebra-glm/src/gtc/type_ptr.rs index 96f87ef2e..93a88e786 100644 --- a/nalgebra-glm/src/gtc/type_ptr.rs +++ b/nalgebra-glm/src/gtc/type_ptr.rs @@ -1,8 +1,10 @@ -use na::{Scalar, Real, DefaultAllocator, Quaternion}; +use na::{DefaultAllocator, Quaternion, Real, Scalar}; -use traits::{Number, Alloc, Dimension}; -use aliases::{Qua, TMat, TMat2, TMat3, TMat4, TVec1, TVec2, TVec3, TVec4, - TMat2x3, TMat2x4, TMat3x2, TMat3x4, TMat4x2, TMat4x3}; +use aliases::{ + Qua, TMat, TMat2, TMat2x3, TMat2x4, TMat3, TMat3x2, TMat3x4, TMat4, TMat4x2, TMat4x3, TVec1, + TVec2, TVec3, TVec4, +}; +use traits::{Alloc, Dimension, Number}; /// Creates a 2x2 matrix from a slice arranged in column-major order. pub fn make_mat2(ptr: &[N]) -> TMat2 { @@ -69,19 +71,12 @@ pub fn mat2_to_mat3(m: &TMat2) -> TMat3 { let _0 = N::zero(); let _1 = N::one(); - TMat3::new( - m.m11, m.m12, _0, - m.m21, m.m22, _0, - _0, _0, _1 - ) + TMat3::new(m.m11, m.m12, _0, m.m21, m.m22, _0, _0, _0, _1) } /// Converts a 3x3 matrix to a 2x2 matrix. pub fn mat3_to_mat2(m: &TMat3) -> TMat2 { - TMat2::new( - m.m11, m.m12, - m.m21, m.m22 - ) + TMat2::new(m.m11, m.m12, m.m21, m.m22) } /// Converts a 3x3 matrix to a 4x4 matrix. @@ -90,19 +85,14 @@ pub fn mat3_to_mat4(m: &TMat3) -> TMat4 { let _1 = N::one(); TMat4::new( - m.m11, m.m12, m.m13, _0, - m.m21, m.m22, m.m23, _0, - m.m31, m.m32, m.m33, _0, - _0, _0, _0, _1, + m.m11, m.m12, m.m13, _0, m.m21, m.m22, m.m23, _0, m.m31, m.m32, m.m33, _0, _0, _0, _0, _1, ) } /// Converts a 4x4 matrix to a 3x3 matrix. pub fn mat4_to_mat3(m: &TMat4) -> TMat3 { TMat3::new( - m.m11, m.m12, m.m13, - m.m21, m.m22, m.m23, - m.m31, m.m32, m.m33, + m.m11, m.m12, m.m13, m.m21, m.m22, m.m23, m.m31, m.m32, m.m33, ) } @@ -112,19 +102,13 @@ pub fn mat2_to_mat4(m: &TMat2) -> TMat4 { let _1 = N::one(); TMat4::new( - m.m11, m.m12, _0, _0, - m.m21, m.m22, _0, _0, - _0, _0, _1, _0, - _0, _0, _0, _1, + m.m11, m.m12, _0, _0, m.m21, m.m22, _0, _0, _0, _0, _1, _0, _0, _0, _0, _1, ) } /// Converts a 4x4 matrix to a 2x2 matrix. pub fn mat4_to_mat2(m: &TMat4) -> TMat2 { - TMat2::new( - m.m11, m.m12, - m.m21, m.m22, - ) + TMat2::new(m.m11, m.m12, m.m21, m.m22) } /// Creates a quaternion from a slice arranged as `[x, y, z, w]`. @@ -400,13 +384,12 @@ pub fn make_vec4(ptr: &[N]) -> TVec4 { /// Converts a matrix or vector to a slice arranged in column-major order. pub fn value_ptr(x: &TMat) -> &[N] - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.as_slice() } /// Converts a matrix or vector to a mutable slice arranged in column-major order. pub fn value_ptr_mut(x: &mut TMat) -> &mut [N] - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.as_mut_slice() } - diff --git a/nalgebra-glm/src/gtx/component_wise.rs b/nalgebra-glm/src/gtx/component_wise.rs index 56d4cff4e..338f43800 100644 --- a/nalgebra-glm/src/gtx/component_wise.rs +++ b/nalgebra-glm/src/gtx/component_wise.rs @@ -1,7 +1,7 @@ use na::{self, DefaultAllocator}; -use traits::{Number, Alloc, Dimension}; use aliases::TMat; +use traits::{Alloc, Dimension, Number}; /// The sum of every component of the given matrix or vector. /// @@ -22,7 +22,7 @@ use aliases::TMat; /// * [`comp_min`](fn.comp_min.html) /// * [`comp_mul`](fn.comp_mul.html) pub fn comp_add(m: &TMat) -> N - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { m.iter().fold(N::zero(), |x, y| x + *y) } @@ -49,7 +49,7 @@ pub fn comp_add(m: &TMat) -> N /// * [`max3`](fn.max3.html) /// * [`max4`](fn.max4.html) pub fn comp_max(m: &TMat) -> N - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { m.iter().fold(N::min_value(), |x, y| na::sup(&x, y)) } @@ -76,7 +76,7 @@ pub fn comp_max(m: &TMat) -> N /// * [`min3`](fn.min3.html) /// * [`min4`](fn.min4.html) pub fn comp_min(m: &TMat) -> N - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { m.iter().fold(N::max_value(), |x, y| na::inf(&x, y)) } @@ -99,7 +99,7 @@ pub fn comp_min(m: &TMat) -> N /// * [`comp_max`](fn.comp_max.html) /// * [`comp_min`](fn.comp_min.html) pub fn comp_mul(m: &TMat) -> N - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { m.iter().fold(N::one(), |x, y| x * *y) } diff --git a/nalgebra-glm/src/gtx/exterior_product.rs b/nalgebra-glm/src/gtx/exterior_product.rs index 228be61e8..cc58597bf 100644 --- a/nalgebra-glm/src/gtx/exterior_product.rs +++ b/nalgebra-glm/src/gtx/exterior_product.rs @@ -1,7 +1,7 @@ -use traits::Number; use aliases::TVec2; +use traits::Number; /// The 2D perpendicular product between two vectors. pub fn cross2d(v: &TVec2, u: &TVec2) -> N { v.perp(u) -} \ No newline at end of file +} diff --git a/nalgebra-glm/src/gtx/handed_coordinate_space.rs b/nalgebra-glm/src/gtx/handed_coordinate_space.rs index e499207fc..ee1979adc 100644 --- a/nalgebra-glm/src/gtx/handed_coordinate_space.rs +++ b/nalgebra-glm/src/gtx/handed_coordinate_space.rs @@ -1,5 +1,5 @@ -use traits::Number; use aliases::TVec3; +use traits::Number; /// Returns `true` if `{a, b, c}` forms a left-handed trihedron. /// diff --git a/nalgebra-glm/src/gtx/matrix_operation.rs b/nalgebra-glm/src/gtx/matrix_operation.rs index 17ea4bb94..954607ef7 100644 --- a/nalgebra-glm/src/gtx/matrix_operation.rs +++ b/nalgebra-glm/src/gtx/matrix_operation.rs @@ -1,5 +1,7 @@ +use aliases::{ + TMat2, TMat2x3, TMat2x4, TMat3, TMat3x2, TMat3x4, TMat4, TMat4x2, TMat4x3, TVec2, TVec3, TVec4, +}; use traits::Number; -use aliases::{TVec2, TVec3, TVec4, TMat2, TMat2x3, TMat2x4, TMat3, TMat3x2, TMat3x4, TMat4, TMat4x2, TMat4x3}; /// Builds a 2x2 diagonal matrix. /// diff --git a/nalgebra-glm/src/gtx/mod.rs b/nalgebra-glm/src/gtx/mod.rs index 7dc66bda0..b06f3998d 100644 --- a/nalgebra-glm/src/gtx/mod.rs +++ b/nalgebra-glm/src/gtx/mod.rs @@ -1,25 +1,37 @@ //! (Reexported) Experimental features not specified by GLSL specification. - pub use self::component_wise::{comp_add, comp_max, comp_min, comp_mul}; //pub use self::euler_angles::*; -pub use self::exterior_product::{cross2d}; +pub use self::exterior_product::cross2d; pub use self::handed_coordinate_space::{left_handed, right_handed}; pub use self::matrix_cross_product::{matrix_cross, matrix_cross3}; -pub use self::matrix_operation::{diagonal2x2, diagonal2x3, diagonal2x4, diagonal3x2, diagonal3x3, diagonal3x4, diagonal4x2, diagonal4x3, diagonal4x4}; +pub use self::matrix_operation::{ + diagonal2x2, diagonal2x3, diagonal2x4, diagonal3x2, diagonal3x3, diagonal3x4, diagonal4x2, + diagonal4x3, diagonal4x4, +}; pub use self::norm::{distance2, l1_distance, l1_norm, l2_distance, l2_norm, length2, magnitude2}; -pub use self::normal::{triangle_normal}; +pub use self::normal::triangle_normal; pub use self::normalize_dot::{fast_normalize_dot, normalize_dot}; +pub use self::quaternion::{ + mat3_to_quat, quat_cross_vec, quat_extract_real_component, quat_fast_mix, quat_identity, + quat_inv_cross_vec, quat_length2, quat_magnitude2, quat_rotate_vec, quat_rotate_vec3, + quat_rotation, quat_short_mix, quat_to_mat3, quat_to_mat4, to_quat, +}; pub use self::rotate_normalized_axis::{quat_rotate_normalized_axis, rotate_normalized_axis}; -pub use self::rotate_vector::{orientation, rotate_vec2, rotate_vec3, rotate_vec4, rotate_x_vec4, rotate_x_vec3, rotate_y_vec4, rotate_y_vec3, rotate_z_vec4, rotate_z_vec3, slerp}; -pub use self::transform::{rotation, scaling, translation, rotation2d, scaling2d, translation2d}; -pub use self::transform2::{proj, proj2d, reflect, reflect2d, scale_bias, scale_bias_matrix, shear2d_x, shear_x, shear_y, shear2d_y, shear_z}; +pub use self::rotate_vector::{ + orientation, rotate_vec2, rotate_vec3, rotate_vec4, rotate_x_vec3, rotate_x_vec4, + rotate_y_vec3, rotate_y_vec4, rotate_z_vec3, rotate_z_vec4, slerp, +}; +pub use self::transform::{rotation, rotation2d, scaling, scaling2d, translation, translation2d}; +pub use self::transform2::{ + proj, proj2d, reflect, reflect2d, scale_bias, scale_bias_matrix, shear2d_x, shear2d_y, shear_x, + shear_y, shear_z, +}; pub use self::transform2d::{rotate2d, scale2d, translate2d}; -pub use self::vector_angle::{angle}; -pub use self::vector_query::{are_collinear, are_collinear2d, are_orthogonal, is_comp_null, is_normalized, is_null}; -pub use self::quaternion::{quat_to_mat3, quat_rotate_vec, quat_cross_vec, mat3_to_quat, quat_extract_real_component, quat_fast_mix, quat_inv_cross_vec, quat_length2, quat_magnitude2, quat_identity, quat_rotate_vec3, quat_rotation, quat_short_mix, quat_to_mat4, to_quat}; - - +pub use self::vector_angle::angle; +pub use self::vector_query::{ + are_collinear, are_collinear2d, are_orthogonal, is_comp_null, is_normalized, is_null, +}; mod component_wise; //mod euler_angles; @@ -30,6 +42,7 @@ mod matrix_operation; mod norm; mod normal; mod normalize_dot; +mod quaternion; mod rotate_normalized_axis; mod rotate_vector; mod transform; @@ -37,4 +50,3 @@ mod transform2; mod transform2d; mod vector_angle; mod vector_query; -mod quaternion; \ No newline at end of file diff --git a/nalgebra-glm/src/gtx/norm.rs b/nalgebra-glm/src/gtx/norm.rs index 2a40c02a2..0a2875300 100644 --- a/nalgebra-glm/src/gtx/norm.rs +++ b/nalgebra-glm/src/gtx/norm.rs @@ -1,7 +1,7 @@ -use na::{Real, DefaultAllocator}; +use na::{DefaultAllocator, Real}; -use traits::{Alloc, Dimension}; use aliases::TVec; +use traits::{Alloc, Dimension}; /// The squared distance between two points. /// @@ -9,7 +9,7 @@ use aliases::TVec; /// /// * [`distance`](fn.distance.html) pub fn distance2(p0: &TVec, p1: &TVec) -> N - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { (p1 - p0).norm_squared() } @@ -21,7 +21,7 @@ pub fn distance2(p0: &TVec, p1: &TVec) -> N /// * [`l2_distance`](fn.l2_distance.html) /// * [`l2_norm`](fn.l2_norm.html) pub fn l1_distance(x: &TVec, y: &TVec) -> N - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { l1_norm(&(y - x)) } @@ -36,7 +36,7 @@ pub fn l1_distance(x: &TVec, y: &TVec) -> N /// * [`l2_distance`](fn.l2_distance.html) /// * [`l2_norm`](fn.l2_norm.html) pub fn l1_norm(v: &TVec) -> N - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { ::comp_add(&v.abs()) } @@ -55,7 +55,7 @@ pub fn l1_norm(v: &TVec) -> N /// * [`magnitude`](fn.magnitude.html) /// * [`magnitude2`](fn.magnitude2.html) pub fn l2_distance(x: &TVec, y: &TVec) -> N - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { l2_norm(&(y - x)) } @@ -76,7 +76,7 @@ pub fn l2_distance(x: &TVec, y: &TVec) -> N /// * [`magnitude`](fn.magnitude.html) /// * [`magnitude2`](fn.magnitude2.html) pub fn l2_norm(x: &TVec) -> N - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.norm() } @@ -92,7 +92,7 @@ pub fn l2_norm(x: &TVec) -> N /// * [`magnitude`](fn.magnitude.html) /// * [`magnitude2`](fn.magnitude2.html) pub fn length2(x: &TVec) -> N - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.norm_squared() } @@ -108,7 +108,7 @@ pub fn length2(x: &TVec) -> N /// * [`magnitude`](fn.magnitude.html) /// * [`nalgebra::norm_squared`](../nalgebra/fn.norm_squared.html) pub fn magnitude2(x: &TVec) -> N - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.norm_squared() } diff --git a/nalgebra-glm/src/gtx/normal.rs b/nalgebra-glm/src/gtx/normal.rs index 6ae70ab76..63fc9246e 100644 --- a/nalgebra-glm/src/gtx/normal.rs +++ b/nalgebra-glm/src/gtx/normal.rs @@ -7,4 +7,4 @@ use aliases::TVec3; /// The normal is computed as the normalized vector `cross(p2 - p1, p3 - p1)`. pub fn triangle_normal(p1: &TVec3, p2: &TVec3, p3: &TVec3) -> TVec3 { (p2 - p1).cross(&(p3 - p1)).normalize() -} \ No newline at end of file +} diff --git a/nalgebra-glm/src/gtx/normalize_dot.rs b/nalgebra-glm/src/gtx/normalize_dot.rs index 4c67aee3e..df52d2c02 100644 --- a/nalgebra-glm/src/gtx/normalize_dot.rs +++ b/nalgebra-glm/src/gtx/normalize_dot.rs @@ -1,7 +1,7 @@ -use na::{Real, DefaultAllocator}; +use na::{DefaultAllocator, Real}; -use traits::{Dimension, Alloc}; use aliases::TVec; +use traits::{Alloc, Dimension}; /// The dot product of the normalized version of `x` and `y`. /// @@ -11,7 +11,7 @@ use aliases::TVec; /// /// * [`normalize_dot`](fn.normalize_dot.html`) pub fn fast_normalize_dot(x: &TVec, y: &TVec) -> N - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { // XXX: improve those. x.normalize().dot(&y.normalize()) } @@ -22,7 +22,7 @@ pub fn fast_normalize_dot(x: &TVec, y: &TVec) /// /// * [`fast_normalize_dot`](fn.fast_normalize_dot.html`) pub fn normalize_dot(x: &TVec, y: &TVec) -> N - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { // XXX: improve those. x.normalize().dot(&y.normalize()) } diff --git a/nalgebra-glm/src/gtx/quaternion.rs b/nalgebra-glm/src/gtx/quaternion.rs index a490352d5..5762b58d3 100644 --- a/nalgebra-glm/src/gtx/quaternion.rs +++ b/nalgebra-glm/src/gtx/quaternion.rs @@ -1,4 +1,4 @@ -use na::{Real, Unit, Rotation3, UnitQuaternion, U3}; +use na::{Real, Rotation3, Unit, UnitQuaternion, U3}; use aliases::{Qua, TMat3, TMat4, TVec3, TVec4}; @@ -19,7 +19,9 @@ pub fn quat_extract_real_component(q: &Qua) -> N { /// Normalized linear interpolation between two quaternions. pub fn quat_fast_mix(x: &Qua, y: &Qua, a: N) -> Qua { - Unit::new_unchecked(*x).nlerp(&Unit::new_unchecked(*y), a).unwrap() + Unit::new_unchecked(*x) + .nlerp(&Unit::new_unchecked(*y), a) + .unwrap() } //pub fn quat_intermediate(prev: &Qua, curr: &Qua, next: &Qua) -> Qua { @@ -54,12 +56,16 @@ pub fn quat_rotate_vec(q: &Qua, v: &TVec4) -> TVec4 { /// The rotation required to align `orig` to `dest`. pub fn quat_rotation(orig: &TVec3, dest: &TVec3) -> Qua { - UnitQuaternion::rotation_between(orig, dest).unwrap_or_else(UnitQuaternion::identity).unwrap() + UnitQuaternion::rotation_between(orig, dest) + .unwrap_or_else(UnitQuaternion::identity) + .unwrap() } /// The spherical linear interpolation between two quaternions. pub fn quat_short_mix(x: &Qua, y: &Qua, a: N) -> Qua { - Unit::new_normalize(*x).slerp(&Unit::new_normalize(*y), a).unwrap() + Unit::new_normalize(*x) + .slerp(&Unit::new_normalize(*y), a) + .unwrap() } //pub fn quat_squad(q1: &Qua, q2: &Qua, s1: &Qua, s2: &Qua, h: N) -> Qua { @@ -68,7 +74,9 @@ pub fn quat_short_mix(x: &Qua, y: &Qua, a: N) -> Qua { /// Converts a quaternion to a rotation matrix. pub fn quat_to_mat3(x: &Qua) -> TMat3 { - UnitQuaternion::new_unchecked(*x).to_rotation_matrix().unwrap() + UnitQuaternion::new_unchecked(*x) + .to_rotation_matrix() + .unwrap() } /// Converts a quaternion to a rotation matrix in homogenous coordinates. @@ -87,4 +95,3 @@ pub fn to_quat(x: &TMat4) -> Qua { let rot = x.fixed_slice::(0, 0).into_owned(); mat3_to_quat(&rot) } - diff --git a/nalgebra-glm/src/gtx/rotate_vector.rs b/nalgebra-glm/src/gtx/rotate_vector.rs index 76dcdbfad..0855244d4 100644 --- a/nalgebra-glm/src/gtx/rotate_vector.rs +++ b/nalgebra-glm/src/gtx/rotate_vector.rs @@ -1,6 +1,6 @@ use na::{Real, Rotation3, Unit, UnitComplex}; -use aliases::{TVec2, TVec3, TVec4, TMat4}; +use aliases::{TMat4, TVec2, TVec3, TVec4}; /// Build the rotation matrix needed to align `normal` and `up`. pub fn orientation(normal: &TVec3, up: &TVec3) -> TMat4 { @@ -58,5 +58,7 @@ pub fn rotate_z_vec4(v: &TVec4, angle: N) -> TVec4 { /// Computes a spherical linear interpolation between the vectors `x` and `y` assumed to be normalized. pub fn slerp(x: &TVec3, y: &TVec3, a: N) -> TVec3 { - Unit::new_unchecked(*x).slerp(&Unit::new_unchecked(*y), a).unwrap() + Unit::new_unchecked(*x) + .slerp(&Unit::new_unchecked(*y), a) + .unwrap() } diff --git a/nalgebra-glm/src/gtx/transform.rs b/nalgebra-glm/src/gtx/transform.rs index 3ebc958dc..85554fe5a 100644 --- a/nalgebra-glm/src/gtx/transform.rs +++ b/nalgebra-glm/src/gtx/transform.rs @@ -1,7 +1,7 @@ -use na::{Real, Unit, Rotation2, Rotation3}; +use na::{Real, Rotation2, Rotation3, Unit}; +use aliases::{TMat3, TMat4, TVec2, TVec3}; use traits::Number; -use aliases::{TVec3, TVec2, TMat3, TMat4}; /// A rotation 4 * 4 matrix created from an axis of 3 scalars and an angle expressed in radians. /// @@ -42,7 +42,6 @@ pub fn translation(v: &TVec3) -> TMat4 { TMat4::new_translation(v) } - /// A rotation 3 * 3 matrix created from an angle expressed in radians. /// /// # See also: diff --git a/nalgebra-glm/src/gtx/transform2.rs b/nalgebra-glm/src/gtx/transform2.rs index 0bb19e958..fa5533f70 100644 --- a/nalgebra-glm/src/gtx/transform2.rs +++ b/nalgebra-glm/src/gtx/transform2.rs @@ -1,7 +1,7 @@ use na::{U2, U3}; +use aliases::{TMat3, TMat4, TVec2, TVec3}; use traits::Number; -use aliases::{TVec2, TVec3, TMat3, TMat4}; /// Build planar projection matrix along normal axis and right-multiply it to `m`. pub fn proj2d(m: &TMat3, normal: &TVec2) -> TMat3 { @@ -57,10 +57,7 @@ pub fn scale_bias_matrix(scale: N, bias: N) -> TMat4 { let _1 = N::one(); TMat4::new( - scale, _0, _0, bias, - _0, scale, _0, bias, - _0, _0, scale, bias, - _0, _0, _0, _1, + scale, _0, _0, bias, _0, scale, _0, bias, _0, _0, scale, bias, _0, _0, _0, _1, ) } @@ -74,11 +71,7 @@ pub fn shear2d_x(m: &TMat3, y: N) -> TMat3 { let _0 = N::zero(); let _1 = N::one(); - let shear = TMat3::new( - _1, y, _0, - _0, _1, _0, - _0, _0, _1 - ); + let shear = TMat3::new(_1, y, _0, _0, _1, _0, _0, _0, _1); m * shear } @@ -86,12 +79,7 @@ pub fn shear2d_x(m: &TMat3, y: N) -> TMat3 { pub fn shear_x(m: &TMat4, y: N, z: N) -> TMat4 { let _0 = N::zero(); let _1 = N::one(); - let shear = TMat4::new( - _1, _0, _0, _0, - y, _1, _0, _0, - z, _0, _1, _0, - _0, _0, _0, _1, - ); + let shear = TMat4::new(_1, _0, _0, _0, y, _1, _0, _0, z, _0, _1, _0, _0, _0, _0, _1); m * shear } @@ -101,11 +89,7 @@ pub fn shear2d_y(m: &TMat3, x: N) -> TMat3 { let _0 = N::zero(); let _1 = N::one(); - let shear = TMat3::new( - _1, _0, _0, - x, _1, _0, - _0, _0, _1 - ); + let shear = TMat3::new(_1, _0, _0, x, _1, _0, _0, _0, _1); m * shear } @@ -113,12 +97,7 @@ pub fn shear2d_y(m: &TMat3, x: N) -> TMat3 { pub fn shear_y(m: &TMat4, x: N, z: N) -> TMat4 { let _0 = N::zero(); let _1 = N::one(); - let shear = TMat4::new( - _1, x, _0, _0, - _0, _1, _0, _0, - _0, z, _1, _0, - _0, _0, _0, _1, - ); + let shear = TMat4::new(_1, x, _0, _0, _0, _1, _0, _0, _0, z, _1, _0, _0, _0, _0, _1); m * shear } @@ -127,12 +106,7 @@ pub fn shear_y(m: &TMat4, x: N, z: N) -> TMat4 { pub fn shear_z(m: &TMat4, x: N, y: N) -> TMat4 { let _0 = N::zero(); let _1 = N::one(); - let shear = TMat4::new( - _1, _0, x, _0, - _0, _1, y, _0, - _0, _0, _1, _0, - _0, _0, _0, _1, - ); + let shear = TMat4::new(_1, _0, x, _0, _0, _1, y, _0, _0, _0, _1, _0, _0, _0, _0, _1); m * shear } diff --git a/nalgebra-glm/src/gtx/transform2d.rs b/nalgebra-glm/src/gtx/transform2d.rs index 92aa77095..3d401f566 100644 --- a/nalgebra-glm/src/gtx/transform2d.rs +++ b/nalgebra-glm/src/gtx/transform2d.rs @@ -1,7 +1,7 @@ use na::{Real, UnitComplex}; -use traits::Number; use aliases::{TMat3, TVec2}; +use traits::Number; /// Builds a 2D rotation matrix from an angle and right-multiply it to `m`. /// diff --git a/nalgebra-glm/src/gtx/vector_angle.rs b/nalgebra-glm/src/gtx/vector_angle.rs index 22deaaf5a..1ebb39a44 100644 --- a/nalgebra-glm/src/gtx/vector_angle.rs +++ b/nalgebra-glm/src/gtx/vector_angle.rs @@ -1,12 +1,11 @@ use na::{DefaultAllocator, Real}; -use traits::{Dimension, Alloc}; use aliases::TVec; - +use traits::{Alloc, Dimension}; /// The angle between two vectors. pub fn angle(x: &TVec, y: &TVec) -> N - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.angle(y) } diff --git a/nalgebra-glm/src/gtx/vector_query.rs b/nalgebra-glm/src/gtx/vector_query.rs index c4517803c..f408a4b39 100644 --- a/nalgebra-glm/src/gtx/vector_query.rs +++ b/nalgebra-glm/src/gtx/vector_query.rs @@ -1,7 +1,7 @@ -use na::{Real, DefaultAllocator}; +use na::{DefaultAllocator, Real}; -use traits::{Number, Dimension, Alloc}; use aliases::{TVec, TVec2, TVec3}; +use traits::{Alloc, Dimension, Number}; /// Returns `true` if two vectors are collinear (up to an epsilon). /// @@ -22,8 +22,14 @@ pub fn are_collinear2d(v0: &TVec2, v1: &TVec2, epsilon: N) -> b } /// Returns `true` if two vectors are orthogonal (up to an epsilon). -pub fn are_orthogonal(v0: &TVec, v1: &TVec, epsilon: N) -> bool - where DefaultAllocator: Alloc { +pub fn are_orthogonal( + v0: &TVec, + v1: &TVec, + epsilon: N, +) -> bool +where + DefaultAllocator: Alloc, +{ abs_diff_eq!(v0.dot(v1), N::zero(), epsilon = epsilon) } @@ -34,18 +40,18 @@ pub fn are_orthogonal(v0: &TVec, v1: &TVec, /// Returns `true` if all the components of `v` are zero (up to an epsilon). pub fn is_comp_null(v: &TVec, epsilon: N) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { v.map(|x| abs_diff_eq!(x, N::zero(), epsilon = epsilon)) } /// Returns `true` if `v` has a magnitude of 1 (up to an epsilon). pub fn is_normalized(v: &TVec, epsilon: N) -> bool - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { abs_diff_eq!(v.norm_squared(), N::one(), epsilon = epsilon * epsilon) } /// Returns `true` if `v` is zero (up to an epsilon). pub fn is_null(v: &TVec, epsilon: N) -> bool - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { abs_diff_eq!(*v, TVec::::zeros(), epsilon = epsilon) } diff --git a/nalgebra-glm/src/lib.rs b/nalgebra-glm/src/lib.rs index dc485846c..495b61d97 100644 --- a/nalgebra-glm/src/lib.rs +++ b/nalgebra-glm/src/lib.rs @@ -1,114 +1,114 @@ /*! # nalgebra-glm − nalgebra in _easy mode_ - **nalgebra-glm** is a GLM-like interface for the **nalgebra** general-purpose linear algebra library. - [GLM](https://glm.g-truc.net) itself is a popular C++ linear algebra library essentially targeting computer graphics. Therefore - **nalgebra-glm** draws inspiration from GLM to define a nice and easy-to-use API for simple graphics application. - - All the types of **nalgebra-glm** are aliases of types from **nalgebra**. Therefore there is a complete and - seamless inter-operability between both. - - ## Getting started - First of all, you should start by taking a look at the official [GLM API documentation](http://glm.g-truc.net/0.9.9/api/index.html) - since **nalgebra-glm** implements a large subset of it. To use **nalgebra-glm** to your project, you - should add it as a dependency to your `Crates.toml`: - - ```toml - [dependencies] - nalgebra-glm = "0.1" - ``` - - Then, you should add an `extern crate` statement to your `lib.rs` or `main.rs` file. It is **strongly - recommended** to add a crate alias to `glm` as well so that you will be able to call functions of - **nalgebra-glm** using the module prefix `glm::`. For example you will write `glm::rotate(...)` instead - of the more verbose `nalgebra_glm::rotate(...)`: - - ```rust - extern crate nalgebra_glm as glm; - ``` - - ## Features overview - **nalgebra-glm** supports most linear-algebra related features of the C++ GLM library. Mathematically - speaking, it supports all the common transformations like rotations, translations, scaling, shearing, - and projections but operating in homogeneous coordinates. This means all the 2D transformations are - expressed as 3x3 matrices, and all the 3D transformations as 4x4 matrices. This is less computationally-efficient - and memory-efficient than nalgebra's [transformation types](https://www.nalgebra.org/points_and_transformations/#transformations), - but this has the benefit of being simpler to use. - ### Main differences compared to GLM - While **nalgebra-glm** follows the feature line of the C++ GLM library, quite a few differences - remain and they are mostly syntactic. The main ones are: - * All function names use `snake_case`, which is the Rust convention. - * All type names use `CamelCase`, which is the Rust convention. - * All function arguments, except for scalars, are all passed by-reference. - * The most generic vector and matrix types are [`TMat`](type.TMat.html) and [`TVec`](type.TVec.html) instead of `mat` and `vec`. - * Some feature are not yet implemented and should be added in the future. In particular, no packing - functions are available. - * A few features are not implemented and will never be. This includes functions related to color - spaces, and closest points computations. Other crates should be used for those. For example, closest - points computation can be handled by the [ncollide](https://ncollide.org) project. - - In addition, because Rust does not allows function overloading, all functions must be given a unique name. - Here are a few rules chosen arbitrarily for **nalgebra-glm**: - * Functions operating in 2d will usually end with the `2d` suffix, e.g., [`glm::rotate2d`](fn.rotate2d.html) is for 2D while [`glm::rotate`](fn.rotate.html) is for 3D. - * Functions operating on vectors will often end with the `_vec` suffix, possibly followed by the dimension of vector, e.g., [`glm::rotate_vec2`](fn.rotate_vec2.html). - * Every function related to quaternions start with the `quat_` prefix, e.g., [`glm::quat_dot(q1, q2)`](fn.quat_dot.html). - * All the conversion functions have unique names as described [below](#conversions). - ### Vector and matrix construction - Vectors, matrices, and quaternions can be constructed using several approaches: - * Using functions with the same name as their type in lower-case. For example [`glm::vec3(x, y, z)`](fn.vec3.html) will create a 3D vector. - * Using the `::new` constructor. For example [`Vec3::new(x, y, z)`](../nalgebra/base/type.MatrixMN.html#method.new-27) will create a 3D vector. - * Using the functions prefixed by `make_` to build a vector a matrix from a slice. For example [`glm::make_vec3(&[x, y, z])`](fn.make_vec3.html) will create a 3D vector. - Keep in mind that constructing a matrix using this type of functions require its components to be arranged in column-major order on the slice. - * Using a geometric construction function. For example [`glm::rotation(angle, axis)`](fn.rotation.html) will build a 4x4 homogeneous rotation matrix from an angle (in radians) and an axis. - * Using swizzling and conversions as described in the next sections. - ### Swizzling - Vector swizzling is a native feature of **nalgebra** itself. Therefore, you can use it with all - the vectors of **nalgebra-glm** as well. Swizzling is supported as methods and works only up to - dimension 3, i.e., you can only refer to the components `x`, `y` and `z` and can only create a - 2D or 3D vector using this technique. Here is some examples, assuming `v` is a vector with float - components here: - * `v.xx()` is equivalent to `glm::vec2(v.x, v.x)` and to `Vec2::new(v.x, v.x)`. - * `v.zx()` is equivalent to `glm::vec2(v.z, v.x)` and to `Vec2::new(v.z, v.x)`. - * `v.yxz()` is equivalent to `glm::vec3(v.y, v.x, v.z)` and to `Vec3::new(v.y, v.x, v.z)`. - * `v.zzy()` is equivalent to `glm::vec3(v.z, v.z, v.y)` and to `Vec3::new(v.z, v.z, v.y)`. - - Any combination of two or three components picked among `x`, `y`, and `z` will work. - ### Conversions - It is often useful to convert one algebraic type to another. There are two main approaches for converting - between types in `nalgebra-glm`: - * Using function with the form `type1_to_type2` in order to convert an instance of `type1` into an instance of `type2`. - For example [`glm::mat3_to_mat4(m)`](fn.mat3_to_mat4.html) will convert the 3x3 matrix `m` to a 4x4 matrix by appending one column on the right - and one row on the left. Those now row and columns are filled with 0 except for the diagonal element which is set to 1. - * Using one of the [`convert`](fn.convert.html), [`try_convert`](fn.try_convert.html), or [`convert_unchecked`](fn.convert_unchecked.html) functions. - These functions are directly re-exported from nalgebra and are extremely versatile: - 1. The `convert` function can convert any type (especially geometric types from nalgebra like `Isometry3`) into another algebraic type which is equivalent but more general. For example, - `let sim: Similarity3<_> = na::convert(isometry)` will convert an `Isometry3` into a `Similarity3`. - In addition, `let mat: Mat4 = glm::convert(isometry)` will convert an `Isometry3` to a 4x4 matrix. This will also convert the scalar types, - therefore: `let mat: DMat4 = glm::convert(m)` where `m: Mat4` will work. However, conversion will not work the other way round: you - can't convert a `Matrix4` to an `Isometry3` using `glm::convert` because that could cause unexpected results if the matrix does - not complies to the requirements of the isometry. - 2. If you need this kind of conversions anyway, you can use `try_convert` which will test if the object being converted complies with the algebraic requirements of the target type. - This will return `None` if the requirements are not satisfied. - 3. The `convert_unchecked` will ignore those tests and always perform the conversion, even if that breaks the invariants of the target type. - This must be used with care! This is actually the recommended method to convert between homogeneous transformations generated by `nalgebra-glm` and - specific transformation types from **nalgebra** like `Isometry3`. Just be careful you know your conversions make sense. - - ### Should I use nalgebra or nalgebra-glm? - Well that depends on your tastes and your background. **nalgebra** is more powerful overall since it allows stronger typing, - and goes much further than simple computer graphics math. However, has a bit of a learning curve for - those not used to the abstract mathematical concepts for transformations. **nalgebra-glm** however - have more straightforward functions and benefit from the various tutorials existing on the internet - for the original C++ GLM library. - - Overall, if you are already used to the C++ GLM library, or to working with homogeneous coordinates (like 4D - matrices for 3D transformations), then you will have more success with **nalgebra-glm**. If on the other - hand you prefer more rigorous treatments of transformations, with type-level restrictions, then go for **nalgebra**. - If you need dynamically-sized matrices, you should go for **nalgebra** as well. - - Keep in mind that **nalgebra-glm** is just a different API for **nalgebra**. So you can very well use both - and benefit from both their advantages: use **nalgebra-glm** when mathematical rigor is not that important, - and **nalgebra** itself when you need more expressive types, and more powerful linear algebra operations like - matrix factorizations and slicing. Just remember that all the **nalgebra-glm** types are just aliases to **nalgebra** types, - and keep in mind it is possible to convert, e.g., an `Isometry3` to a `Mat4` and vice-versa (see the [conversions section](#conversions)). - */ + **nalgebra-glm** is a GLM-like interface for the **nalgebra** general-purpose linear algebra library. + [GLM](https://glm.g-truc.net) itself is a popular C++ linear algebra library essentially targeting computer graphics. Therefore + **nalgebra-glm** draws inspiration from GLM to define a nice and easy-to-use API for simple graphics application. + + All the types of **nalgebra-glm** are aliases of types from **nalgebra**. Therefore there is a complete and + seamless inter-operability between both. + + ## Getting started + First of all, you should start by taking a look at the official [GLM API documentation](http://glm.g-truc.net/0.9.9/api/index.html) + since **nalgebra-glm** implements a large subset of it. To use **nalgebra-glm** to your project, you + should add it as a dependency to your `Crates.toml`: + + ```toml + [dependencies] + nalgebra-glm = "0.1" + ``` + + Then, you should add an `extern crate` statement to your `lib.rs` or `main.rs` file. It is **strongly + recommended** to add a crate alias to `glm` as well so that you will be able to call functions of + **nalgebra-glm** using the module prefix `glm::`. For example you will write `glm::rotate(...)` instead + of the more verbose `nalgebra_glm::rotate(...)`: + + ```rust + extern crate nalgebra_glm as glm; + ``` + + ## Features overview + **nalgebra-glm** supports most linear-algebra related features of the C++ GLM library. Mathematically + speaking, it supports all the common transformations like rotations, translations, scaling, shearing, + and projections but operating in homogeneous coordinates. This means all the 2D transformations are + expressed as 3x3 matrices, and all the 3D transformations as 4x4 matrices. This is less computationally-efficient + and memory-efficient than nalgebra's [transformation types](https://www.nalgebra.org/points_and_transformations/#transformations), + but this has the benefit of being simpler to use. + ### Main differences compared to GLM + While **nalgebra-glm** follows the feature line of the C++ GLM library, quite a few differences + remain and they are mostly syntactic. The main ones are: + * All function names use `snake_case`, which is the Rust convention. + * All type names use `CamelCase`, which is the Rust convention. + * All function arguments, except for scalars, are all passed by-reference. + * The most generic vector and matrix types are [`TMat`](type.TMat.html) and [`TVec`](type.TVec.html) instead of `mat` and `vec`. + * Some feature are not yet implemented and should be added in the future. In particular, no packing + functions are available. + * A few features are not implemented and will never be. This includes functions related to color + spaces, and closest points computations. Other crates should be used for those. For example, closest + points computation can be handled by the [ncollide](https://ncollide.org) project. + + In addition, because Rust does not allows function overloading, all functions must be given a unique name. + Here are a few rules chosen arbitrarily for **nalgebra-glm**: + * Functions operating in 2d will usually end with the `2d` suffix, e.g., [`glm::rotate2d`](fn.rotate2d.html) is for 2D while [`glm::rotate`](fn.rotate.html) is for 3D. + * Functions operating on vectors will often end with the `_vec` suffix, possibly followed by the dimension of vector, e.g., [`glm::rotate_vec2`](fn.rotate_vec2.html). + * Every function related to quaternions start with the `quat_` prefix, e.g., [`glm::quat_dot(q1, q2)`](fn.quat_dot.html). + * All the conversion functions have unique names as described [below](#conversions). + ### Vector and matrix construction + Vectors, matrices, and quaternions can be constructed using several approaches: + * Using functions with the same name as their type in lower-case. For example [`glm::vec3(x, y, z)`](fn.vec3.html) will create a 3D vector. + * Using the `::new` constructor. For example [`Vec3::new(x, y, z)`](../nalgebra/base/type.MatrixMN.html#method.new-27) will create a 3D vector. + * Using the functions prefixed by `make_` to build a vector a matrix from a slice. For example [`glm::make_vec3(&[x, y, z])`](fn.make_vec3.html) will create a 3D vector. + Keep in mind that constructing a matrix using this type of functions require its components to be arranged in column-major order on the slice. + * Using a geometric construction function. For example [`glm::rotation(angle, axis)`](fn.rotation.html) will build a 4x4 homogeneous rotation matrix from an angle (in radians) and an axis. + * Using swizzling and conversions as described in the next sections. + ### Swizzling + Vector swizzling is a native feature of **nalgebra** itself. Therefore, you can use it with all + the vectors of **nalgebra-glm** as well. Swizzling is supported as methods and works only up to + dimension 3, i.e., you can only refer to the components `x`, `y` and `z` and can only create a + 2D or 3D vector using this technique. Here is some examples, assuming `v` is a vector with float + components here: + * `v.xx()` is equivalent to `glm::vec2(v.x, v.x)` and to `Vec2::new(v.x, v.x)`. + * `v.zx()` is equivalent to `glm::vec2(v.z, v.x)` and to `Vec2::new(v.z, v.x)`. + * `v.yxz()` is equivalent to `glm::vec3(v.y, v.x, v.z)` and to `Vec3::new(v.y, v.x, v.z)`. + * `v.zzy()` is equivalent to `glm::vec3(v.z, v.z, v.y)` and to `Vec3::new(v.z, v.z, v.y)`. + + Any combination of two or three components picked among `x`, `y`, and `z` will work. + ### Conversions + It is often useful to convert one algebraic type to another. There are two main approaches for converting + between types in `nalgebra-glm`: + * Using function with the form `type1_to_type2` in order to convert an instance of `type1` into an instance of `type2`. + For example [`glm::mat3_to_mat4(m)`](fn.mat3_to_mat4.html) will convert the 3x3 matrix `m` to a 4x4 matrix by appending one column on the right + and one row on the left. Those now row and columns are filled with 0 except for the diagonal element which is set to 1. + * Using one of the [`convert`](fn.convert.html), [`try_convert`](fn.try_convert.html), or [`convert_unchecked`](fn.convert_unchecked.html) functions. + These functions are directly re-exported from nalgebra and are extremely versatile: + 1. The `convert` function can convert any type (especially geometric types from nalgebra like `Isometry3`) into another algebraic type which is equivalent but more general. For example, + `let sim: Similarity3<_> = na::convert(isometry)` will convert an `Isometry3` into a `Similarity3`. + In addition, `let mat: Mat4 = glm::convert(isometry)` will convert an `Isometry3` to a 4x4 matrix. This will also convert the scalar types, + therefore: `let mat: DMat4 = glm::convert(m)` where `m: Mat4` will work. However, conversion will not work the other way round: you + can't convert a `Matrix4` to an `Isometry3` using `glm::convert` because that could cause unexpected results if the matrix does + not complies to the requirements of the isometry. + 2. If you need this kind of conversions anyway, you can use `try_convert` which will test if the object being converted complies with the algebraic requirements of the target type. + This will return `None` if the requirements are not satisfied. + 3. The `convert_unchecked` will ignore those tests and always perform the conversion, even if that breaks the invariants of the target type. + This must be used with care! This is actually the recommended method to convert between homogeneous transformations generated by `nalgebra-glm` and + specific transformation types from **nalgebra** like `Isometry3`. Just be careful you know your conversions make sense. + + ### Should I use nalgebra or nalgebra-glm? + Well that depends on your tastes and your background. **nalgebra** is more powerful overall since it allows stronger typing, + and goes much further than simple computer graphics math. However, has a bit of a learning curve for + those not used to the abstract mathematical concepts for transformations. **nalgebra-glm** however + have more straightforward functions and benefit from the various tutorials existing on the internet + for the original C++ GLM library. + + Overall, if you are already used to the C++ GLM library, or to working with homogeneous coordinates (like 4D + matrices for 3D transformations), then you will have more success with **nalgebra-glm**. If on the other + hand you prefer more rigorous treatments of transformations, with type-level restrictions, then go for **nalgebra**. + If you need dynamically-sized matrices, you should go for **nalgebra** as well. + + Keep in mind that **nalgebra-glm** is just a different API for **nalgebra**. So you can very well use both + and benefit from both their advantages: use **nalgebra-glm** when mathematical rigor is not that important, + and **nalgebra** itself when you need more expressive types, and more powerful linear algebra operations like + matrix factorizations and slicing. Just remember that all the **nalgebra-glm** types are just aliases to **nalgebra** types, + and keep in mind it is possible to convert, e.g., an `Isometry3` to a `Mat4` and vice-versa (see the [conversions section](#conversions)). +*/ #![doc(html_favicon_url = "http://nalgebra.org/img/favicon.ico")] @@ -119,68 +119,82 @@ extern crate alga; extern crate nalgebra as na; pub use aliases::*; +pub use common::{ + abs, ceil, clamp, clamp_scalar, clamp_vec, float_bits_to_int, float_bits_to_int_vec, + float_bits_to_uint, float_bits_to_uint_vec, floor, fract, int_bits_to_float, + int_bits_to_float_vec, mix, modf, modf_vec, round, sign, smoothstep, step, step_scalar, + step_vec, trunc, uint_bits_to_float, uint_bits_to_float_scalar, +}; pub use constructors::*; -pub use common::{abs, ceil, clamp, clamp_scalar, clamp_vec, float_bits_to_int, float_bits_to_int_vec, float_bits_to_uint, float_bits_to_uint_vec, floor, fract, int_bits_to_float, int_bits_to_float_vec, mix, modf, modf_vec, round, sign, smoothstep, step, step_scalar, step_vec, trunc, uint_bits_to_float, uint_bits_to_float_scalar}; -pub use geometric::{reflect_vec, cross, distance, dot, faceforward, length, magnitude, normalize, refract_vec}; -pub use matrix::{transpose, determinant, inverse, matrix_comp_mult, outer_product}; -pub use traits::{Dimension, Number, Alloc}; -pub use trigonometric::{acos, acosh, asin, asinh, atan, atan2, atanh, cos, cosh, degrees, radians, sin, sinh, tan, tanh}; -pub use vector_relational::{all, any, equal, greater_than, greater_than_equal, less_than, less_than_equal, not, not_equal}; pub use exponential::{exp, exp2, inversesqrt, log, log2, pow, sqrt}; +pub use geometric::{ + cross, distance, dot, faceforward, length, magnitude, normalize, reflect_vec, refract_vec, +}; +pub use matrix::{determinant, inverse, matrix_comp_mult, outer_product, transpose}; +pub use traits::{Alloc, Dimension, Number}; +pub use trigonometric::{ + acos, acosh, asin, asinh, atan, atan2, atanh, cos, cosh, degrees, radians, sin, sinh, tan, tanh, +}; +pub use vector_relational::{ + all, any, equal, greater_than, greater_than_equal, less_than, less_than_equal, not, not_equal, +}; -pub use gtx::{ - comp_add, comp_max, comp_min, comp_mul, - cross2d, - left_handed, right_handed, - matrix_cross, matrix_cross3, - diagonal2x2, diagonal2x3, diagonal2x4, diagonal3x2, diagonal3x3, diagonal3x4, diagonal4x2, diagonal4x3, diagonal4x4, - distance2, l1_distance, l1_norm, l2_distance, l2_norm, length2, magnitude2, - triangle_normal, - fast_normalize_dot, normalize_dot, - quat_rotate_normalized_axis, rotate_normalized_axis, - orientation, rotate_vec2, rotate_vec3, rotate_vec4, rotate_x_vec4, rotate_x_vec3, rotate_y_vec4, rotate_y_vec3, rotate_z_vec4, rotate_z_vec3, slerp, - rotation, scaling, translation, rotation2d, scaling2d, translation2d, - proj, proj2d, reflect, reflect2d, scale_bias, scale_bias_matrix, shear2d_x, shear_x, shear_y, shear2d_y, shear_z, - rotate2d, scale2d, translate2d, - angle, - are_collinear, are_collinear2d, are_orthogonal, is_comp_null, is_normalized, is_null, - quat_to_mat3, quat_rotate_vec, quat_cross_vec, mat3_to_quat, quat_extract_real_component, quat_fast_mix, quat_inv_cross_vec, quat_length2, quat_magnitude2, quat_identity, quat_rotate_vec3, quat_rotation, quat_short_mix, quat_to_mat4, to_quat +pub use ext::{ + epsilon, equal_columns, equal_columns_eps, equal_columns_eps_vec, equal_eps, equal_eps_vec, + identity, look_at, look_at_lh, look_at_rh, max, max2, max3, max3_scalar, max4, max4_scalar, + min, min2, min3, min3_scalar, min4, min4_scalar, not_equal_columns, not_equal_columns_eps, + not_equal_columns_eps_vec, not_equal_eps, not_equal_eps_vec, ortho, perspective, pi, + pick_matrix, project, project_no, project_zo, quat_angle, quat_angle_axis, quat_axis, + quat_conjugate, quat_cross, quat_dot, quat_equal, quat_equal_eps, quat_exp, quat_inverse, + quat_length, quat_lerp, quat_log, quat_magnitude, quat_normalize, quat_not_equal, + quat_not_equal_eps, quat_pow, quat_rotate, quat_slerp, rotate, rotate_x, rotate_y, rotate_z, + scale, translate, unproject, unproject_no, unproject_zo, }; pub use gtc::{ - e, two_pi, euler, four_over_pi, golden_ratio, half_pi, ln_ln_two, ln_ten, ln_two, one, one_over_pi, one_over_root_two, one_over_two_pi, quarter_pi, root_five, root_half_pi, root_ln_four, root_pi, root_three, root_two, root_two_pi, third, three_over_two_pi, two_over_pi, two_over_root_pi, two_thirds, zero, - column, row, set_column, set_row, - affine_inverse, inverse_transpose, - make_mat2, make_mat2x2, make_mat2x3, make_mat2x4, make_mat3, make_mat3x2, make_mat3x3, make_mat3x4, make_mat4, make_mat4x2, make_mat4x3, make_mat4x4, make_quat, make_vec1, make_vec2, make_vec3, make_vec4, value_ptr, value_ptr_mut, vec1_to_vec2, vec1_to_vec3, vec1_to_vec4, vec2_to_vec1, vec2_to_vec2, vec2_to_vec3, vec2_to_vec4, vec3_to_vec1, vec3_to_vec2, vec3_to_vec3, vec3_to_vec4, vec4_to_vec1, vec4_to_vec2, vec4_to_vec3, vec4_to_vec4, mat2_to_mat3, mat2_to_mat4, mat3_to_mat2, mat3_to_mat4, mat4_to_mat2, mat4_to_mat3, - quat_cast, quat_euler_angles, quat_greater_than, quat_greater_than_equal, quat_less_than, quat_less_than_equal, quat_look_at, quat_look_at_lh, quat_look_at_rh, quat_pitch, quat_roll, quat_yaw + affine_inverse, column, e, euler, four_over_pi, golden_ratio, half_pi, inverse_transpose, + ln_ln_two, ln_ten, ln_two, make_mat2, make_mat2x2, make_mat2x3, make_mat2x4, make_mat3, + make_mat3x2, make_mat3x3, make_mat3x4, make_mat4, make_mat4x2, make_mat4x3, make_mat4x4, + make_quat, make_vec1, make_vec2, make_vec3, make_vec4, mat2_to_mat3, mat2_to_mat4, + mat3_to_mat2, mat3_to_mat4, mat4_to_mat2, mat4_to_mat3, one, one_over_pi, one_over_root_two, + one_over_two_pi, quarter_pi, quat_cast, quat_euler_angles, quat_greater_than, + quat_greater_than_equal, quat_less_than, quat_less_than_equal, quat_look_at, quat_look_at_lh, + quat_look_at_rh, quat_pitch, quat_roll, quat_yaw, root_five, root_half_pi, root_ln_four, + root_pi, root_three, root_two, root_two_pi, row, set_column, set_row, third, three_over_two_pi, + two_over_pi, two_over_root_pi, two_pi, two_thirds, value_ptr, value_ptr_mut, vec1_to_vec2, + vec1_to_vec3, vec1_to_vec4, vec2_to_vec1, vec2_to_vec2, vec2_to_vec3, vec2_to_vec4, + vec3_to_vec1, vec3_to_vec2, vec3_to_vec3, vec3_to_vec4, vec4_to_vec1, vec4_to_vec2, + vec4_to_vec3, vec4_to_vec4, zero, }; -pub use ext::{ - ortho, perspective, - pick_matrix, project, project_no, project_zo, unproject, unproject_no, unproject_zo, - equal_columns, equal_columns_eps, equal_columns_eps_vec, not_equal_columns, not_equal_columns_eps, not_equal_columns_eps_vec, - identity, look_at, look_at_lh, rotate, scale, look_at_rh, translate, rotate_x, rotate_y, rotate_z, - max3_scalar, max4_scalar, min3_scalar, min4_scalar, - epsilon, pi, - max, max2, max3, max4, min, min2, min3, min4, - equal_eps, equal_eps_vec, not_equal_eps, not_equal_eps_vec, - quat_conjugate, quat_inverse, quat_lerp, quat_slerp, - quat_cross, quat_dot, quat_length, quat_magnitude, quat_normalize, - quat_equal, quat_equal_eps, quat_not_equal, quat_not_equal_eps, - quat_exp, quat_log, quat_pow, quat_rotate, - quat_angle, quat_angle_axis, quat_axis +pub use gtx::{ + angle, are_collinear, are_collinear2d, are_orthogonal, comp_add, comp_max, comp_min, comp_mul, + cross2d, diagonal2x2, diagonal2x3, diagonal2x4, diagonal3x2, diagonal3x3, diagonal3x4, + diagonal4x2, diagonal4x3, diagonal4x4, distance2, fast_normalize_dot, is_comp_null, + is_normalized, is_null, l1_distance, l1_norm, l2_distance, l2_norm, left_handed, length2, + magnitude2, mat3_to_quat, matrix_cross, matrix_cross3, normalize_dot, orientation, proj, + proj2d, quat_cross_vec, quat_extract_real_component, quat_fast_mix, quat_identity, + quat_inv_cross_vec, quat_length2, quat_magnitude2, quat_rotate_normalized_axis, + quat_rotate_vec, quat_rotate_vec3, quat_rotation, quat_short_mix, quat_to_mat3, quat_to_mat4, + reflect, reflect2d, right_handed, rotate2d, rotate_normalized_axis, rotate_vec2, rotate_vec3, + rotate_vec4, rotate_x_vec3, rotate_x_vec4, rotate_y_vec3, rotate_y_vec4, rotate_z_vec3, + rotate_z_vec4, rotation, rotation2d, scale2d, scale_bias, scale_bias_matrix, scaling, + scaling2d, shear2d_x, shear2d_y, shear_x, shear_y, shear_z, slerp, to_quat, translate2d, + translation, translation2d, triangle_normal, }; -pub use na::{convert, convert_ref, convert_unchecked, convert_ref_unchecked, try_convert, try_convert_ref}; -pub use na::{Scalar, Real, DefaultAllocator, U1, U2, U3, U4}; +pub use na::{ + convert, convert_ref, convert_ref_unchecked, convert_unchecked, try_convert, try_convert_ref, +}; +pub use na::{DefaultAllocator, Real, Scalar, U1, U2, U3, U4}; mod aliases; -mod constructors; mod common; -mod matrix; +mod constructors; +mod exponential; mod geometric; +mod matrix; mod traits; mod trigonometric; mod vector_relational; -mod exponential; //mod integer; //mod packing; diff --git a/nalgebra-glm/src/matrix.rs b/nalgebra-glm/src/matrix.rs index b61242c94..812122484 100644 --- a/nalgebra-glm/src/matrix.rs +++ b/nalgebra-glm/src/matrix.rs @@ -1,34 +1,46 @@ -use na::{Scalar, Real, DefaultAllocator}; +use na::{DefaultAllocator, Real, Scalar}; -use traits::{Alloc, Dimension, Number}; use aliases::{TMat, TVec}; +use traits::{Alloc, Dimension, Number}; /// The determinant of the matrix `m`. pub fn determinant(m: &TMat) -> N - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { m.determinant() } /// The inverse of the matrix `m`. pub fn inverse(m: &TMat) -> TMat - where DefaultAllocator: Alloc { - m.clone().try_inverse().unwrap_or_else(TMat::::zeros) +where DefaultAllocator: Alloc { + m.clone() + .try_inverse() + .unwrap_or_else(TMat::::zeros) } /// Component-wise multiplication of two matrices. -pub fn matrix_comp_mult(x: &TMat, y: &TMat) -> TMat - where DefaultAllocator: Alloc { +pub fn matrix_comp_mult( + x: &TMat, + y: &TMat, +) -> TMat +where + DefaultAllocator: Alloc, +{ x.component_mul(y) } /// Treats the first parameter `c` as a column vector and the second parameter `r` as a row vector and does a linear algebraic matrix multiply `c * r`. -pub fn outer_product(c: &TVec, r: &TVec) -> TMat - where DefaultAllocator: Alloc { +pub fn outer_product( + c: &TVec, + r: &TVec, +) -> TMat +where + DefaultAllocator: Alloc, +{ c * r.transpose() } /// The transpose of the matrix `m`. pub fn transpose(x: &TMat) -> TMat - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.transpose() } diff --git a/nalgebra-glm/src/traits.rs b/nalgebra-glm/src/traits.rs index 02e62e977..3c2033c13 100644 --- a/nalgebra-glm/src/traits.rs +++ b/nalgebra-glm/src/traits.rs @@ -1,48 +1,81 @@ -use num::{Signed, FromPrimitive, Bounded}; use approx::AbsDiffEq; +use num::{Bounded, FromPrimitive, Signed}; -use alga::general::{Ring, Lattice}; -use na::{Scalar, DimName, DimMin, U1}; +use alga::general::{Lattice, Ring}; use na::allocator::Allocator; +use na::{DimMin, DimName, Scalar, U1}; /// A type-level number representing a vector, matrix row, or matrix column, dimension. pub trait Dimension: DimName + DimMin {} impl> Dimension for D {} - /// A number that can either be an integer or a float. -pub trait Number: Scalar + Ring + Lattice + AbsDiffEq + Signed + FromPrimitive + Bounded { +pub trait Number: + Scalar + Ring + Lattice + AbsDiffEq + Signed + FromPrimitive + Bounded +{ } -impl + Signed + FromPrimitive + Bounded> Number for T { +impl + Signed + FromPrimitive + Bounded> + Number for T +{ } #[doc(hidden)] pub trait Alloc: -Allocator + Allocator + Allocator + Allocator + Allocator + Allocator + Allocator + Allocator + -Allocator + Allocator + -Allocator + Allocator + -Allocator + Allocator + -Allocator + Allocator + -Allocator + Allocator + -Allocator + Allocator + -Allocator + Allocator + -Allocator + Allocator + -Allocator<(usize, usize), R> + Allocator<(usize, usize), C> + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator<(usize, usize), R> + + Allocator<(usize, usize), C> { } -impl - Alloc for T -where T: Allocator + Allocator + Allocator + Allocator + Allocator + Allocator + Allocator + Allocator + - Allocator + Allocator + - Allocator + Allocator + - Allocator + Allocator + - Allocator + Allocator + - Allocator + Allocator + - Allocator + Allocator + - Allocator + Allocator + - Allocator + Allocator + - Allocator<(usize, usize), R> + Allocator<(usize, usize), C> +impl Alloc for T where T: Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator + + Allocator<(usize, usize), R> + + Allocator<(usize, usize), C> { -} \ No newline at end of file +} diff --git a/nalgebra-glm/src/trigonometric.rs b/nalgebra-glm/src/trigonometric.rs index e9f69ae5f..cf0b26529 100644 --- a/nalgebra-glm/src/trigonometric.rs +++ b/nalgebra-glm/src/trigonometric.rs @@ -1,95 +1,94 @@ -use na::{self, Real, DefaultAllocator}; +use na::{self, DefaultAllocator, Real}; use aliases::TVec; use traits::{Alloc, Dimension}; - /// Component-wise arc-cosinus. pub fn acos(x: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.map(|e| e.acos()) } /// Component-wise hyperbolic arc-cosinus. pub fn acosh(x: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.map(|e| e.acosh()) } /// Component-wise arc-sinus. pub fn asin(x: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.map(|e| e.asin()) } /// Component-wise hyperbolic arc-sinus. pub fn asinh(x: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.map(|e| e.asinh()) } /// Component-wise arc-tangent of `y / x`. pub fn atan2(y: &TVec, x: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { y.zip_map(x, |y, x| y.atan2(x)) } /// Component-wise arc-tangent. pub fn atan(y_over_x: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { y_over_x.map(|e| e.atan()) } /// Component-wise hyperbolic arc-tangent. pub fn atanh(x: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.map(|e| e.atanh()) } /// Component-wise cosinus. pub fn cos(angle: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { angle.map(|e| e.cos()) } /// Component-wise hyperbolic cosinus. pub fn cosh(angle: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { angle.map(|e| e.cosh()) } /// Component-wise conversion from radians to degrees. pub fn degrees(radians: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { radians.map(|e| e * na::convert(180.0) / N::pi()) } /// Component-wise conversion fro degrees to radians. pub fn radians(degrees: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { degrees.map(|e| e * N::pi() / na::convert(180.0)) } /// Component-wise sinus. pub fn sin(angle: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { angle.map(|e| e.sin()) } /// Component-wise hyperbolic sinus. pub fn sinh(angle: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { angle.map(|e| e.sinh()) } /// Component-wise tangent. pub fn tan(angle: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { angle.map(|e| e.tan()) } /// Component-wise hyperbolic tangent. pub fn tanh(angle: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { angle.map(|e| e.tanh()) } diff --git a/nalgebra-glm/src/vector_relational.rs b/nalgebra-glm/src/vector_relational.rs index 24763e067..c92f69fe8 100644 --- a/nalgebra-glm/src/vector_relational.rs +++ b/nalgebra-glm/src/vector_relational.rs @@ -1,7 +1,7 @@ -use na::{DefaultAllocator}; +use na::DefaultAllocator; use aliases::TVec; -use traits::{Number, Alloc, Dimension}; +use traits::{Alloc, Dimension, Number}; /// Checks that all the vector components are `true`. /// @@ -21,7 +21,7 @@ use traits::{Number, Alloc, Dimension}; /// * [`any`](fn.any.html) /// * [`not`](fn.not.html) pub fn all(v: &TVec) -> bool - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { v.iter().all(|x| *x) } @@ -46,7 +46,7 @@ pub fn all(v: &TVec) -> bool /// * [`all`](fn.all.html) /// * [`not`](fn.not.html) pub fn any(v: &TVec) -> bool - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { v.iter().any(|x| *x) } @@ -70,7 +70,7 @@ pub fn any(v: &TVec) -> bool /// * [`not`](fn.not.html) /// * [`not_equal`](fn.not_equal.html) pub fn equal(x: &TVec, y: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.zip_map(y, |x, y| x == y) } @@ -94,7 +94,7 @@ pub fn equal(x: &TVec, y: &TVec) -> TVec(x: &TVec, y: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.zip_map(y, |x, y| x > y) } @@ -117,8 +117,13 @@ pub fn greater_than(x: &TVec, y: &TVec) -> /// * [`less_than_equal`](fn.less_than_equal.html) /// * [`not`](fn.not.html) /// * [`not_equal`](fn.not_equal.html) -pub fn greater_than_equal(x: &TVec, y: &TVec) -> TVec - where DefaultAllocator: Alloc { +pub fn greater_than_equal( + x: &TVec, + y: &TVec, +) -> TVec +where + DefaultAllocator: Alloc, +{ x.zip_map(y, |x, y| x >= y) } @@ -142,7 +147,7 @@ pub fn greater_than_equal(x: &TVec, y: &TVec(x: &TVec, y: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.zip_map(y, |x, y| x < y) } @@ -166,7 +171,7 @@ pub fn less_than(x: &TVec, y: &TVec) -> TVe /// * [`not`](fn.not.html) /// * [`not_equal`](fn.not_equal.html) pub fn less_than_equal(x: &TVec, y: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.zip_map(y, |x, y| x <= y) } @@ -191,7 +196,7 @@ pub fn less_than_equal(x: &TVec, y: &TVec) /// * [`less_than_equal`](fn.less_than_equal.html) /// * [`not_equal`](fn.not_equal.html) pub fn not(v: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { v.map(|x| !x) } @@ -215,6 +220,6 @@ pub fn not(v: &TVec) -> TVec /// * [`less_than_equal`](fn.less_than_equal.html) /// * [`not`](fn.not.html) pub fn not_equal(x: &TVec, y: &TVec) -> TVec - where DefaultAllocator: Alloc { +where DefaultAllocator: Alloc { x.zip_map(y, |x, y| x != y) } diff --git a/nalgebra-lapack/benches/linalg/hessenberg.rs b/nalgebra-lapack/benches/linalg/hessenberg.rs index 90c97b8b1..d62c221ce 100644 --- a/nalgebra-lapack/benches/linalg/hessenberg.rs +++ b/nalgebra-lapack/benches/linalg/hessenberg.rs @@ -1,6 +1,6 @@ -use test::{self, Bencher}; use na::{DMatrix, Matrix4}; use nl::Hessenberg; +use test::{self, Bencher}; #[bench] fn hessenberg_decompose_100x100(bh: &mut Bencher) { diff --git a/nalgebra-lapack/benches/linalg/lu.rs b/nalgebra-lapack/benches/linalg/lu.rs index 950109785..4afd90039 100644 --- a/nalgebra-lapack/benches/linalg/lu.rs +++ b/nalgebra-lapack/benches/linalg/lu.rs @@ -1,6 +1,6 @@ -use test::{self, Bencher}; use na::{DMatrix, Matrix4}; use nl::LU; +use test::{self, Bencher}; #[bench] fn lu_decompose_100x100(bh: &mut Bencher) { diff --git a/nalgebra-lapack/benches/linalg/mod.rs b/nalgebra-lapack/benches/linalg/mod.rs index f42ec3217..e07bd3615 100644 --- a/nalgebra-lapack/benches/linalg/mod.rs +++ b/nalgebra-lapack/benches/linalg/mod.rs @@ -1,3 +1,3 @@ -mod qr; -mod lu; mod hessenberg; +mod lu; +mod qr; diff --git a/nalgebra-lapack/benches/linalg/qr.rs b/nalgebra-lapack/benches/linalg/qr.rs index 07b830d9f..c473e6f5a 100644 --- a/nalgebra-lapack/benches/linalg/qr.rs +++ b/nalgebra-lapack/benches/linalg/qr.rs @@ -1,6 +1,6 @@ -use test::{self, Bencher}; use na::{DMatrix, Matrix4}; use nl::QR; +use test::{self, Bencher}; #[bench] fn qr_decompose_100x100(bh: &mut Bencher) { diff --git a/nalgebra-lapack/src/cholesky.rs b/nalgebra-lapack/src/cholesky.rs index 26d2ef921..a213e24f2 100644 --- a/nalgebra-lapack/src/cholesky.rs +++ b/nalgebra-lapack/src/cholesky.rs @@ -1,5 +1,5 @@ #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use num::Zero; use num_complex::Complex; @@ -15,26 +15,17 @@ use lapack; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - serialize = "DefaultAllocator: Allocator, - MatrixN: Serialize" - ) - ) + serde(bound(serialize = "DefaultAllocator: Allocator, + MatrixN: Serialize")) )] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - deserialize = "DefaultAllocator: Allocator, - MatrixN: Deserialize<'de>" - ) - ) + serde(bound(deserialize = "DefaultAllocator: Allocator, + MatrixN: Deserialize<'de>")) )] #[derive(Clone, Debug)] pub struct Cholesky -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { l: MatrixN, } @@ -47,8 +38,7 @@ where } impl Cholesky -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { /// Computes the cholesky decomposition of the given symmetric-definite-positive square /// matrix. @@ -124,9 +114,7 @@ where /// Solves in-place the symmetric-definite-positive linear system `self * x = b`, where `x` is /// the unknown to be determined. pub fn solve_mut(&self, b: &mut MatrixMN) -> bool - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { let dim = self.l.nrows(); assert!( diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index e1c71075c..9fa40e1ba 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -1,5 +1,5 @@ #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use num::Zero; use num_complex::Complex; @@ -19,27 +19,22 @@ use lapack; #[cfg_attr( feature = "serde-serialize", serde( - bound( - serialize = "DefaultAllocator: Allocator + Allocator, + bound(serialize = "DefaultAllocator: Allocator + Allocator, VectorN: Serialize, - MatrixN: Serialize" - ) + MatrixN: Serialize") ) )] #[cfg_attr( feature = "serde-serialize", serde( - bound( - deserialize = "DefaultAllocator: Allocator + Allocator, + bound(deserialize = "DefaultAllocator: Allocator + Allocator, VectorN: Serialize, - MatrixN: Deserialize<'de>" - ) + MatrixN: Deserialize<'de>") ) )] #[derive(Clone, Debug)] pub struct Eigen -where - DefaultAllocator: Allocator + Allocator, +where DefaultAllocator: Allocator + Allocator { /// The eigenvalues of the decomposed matrix. pub eigenvalues: VectorN, @@ -58,8 +53,7 @@ where } impl Eigen -where - DefaultAllocator: Allocator + Allocator, +where DefaultAllocator: Allocator + Allocator { /// Computes the eigenvalues and eigenvectors of the square matrix `m`. /// @@ -68,7 +62,8 @@ where mut m: MatrixN, left_eigenvectors: bool, eigenvectors: bool, - ) -> Option> { + ) -> Option> + { assert!( m.is_square(), "Unable to compute the eigenvalue decomposition of a non-square matrix." @@ -234,9 +229,7 @@ where /// /// Panics if the eigenvalue computation does not converge. pub fn complex_eigenvalues(mut m: MatrixN) -> VectorN, D> - where - DefaultAllocator: Allocator, D>, - { + where DefaultAllocator: Allocator, D> { assert!( m.is_square(), "Unable to compute the eigenvalue decomposition of a non-square matrix." diff --git a/nalgebra-lapack/src/hessenberg.rs b/nalgebra-lapack/src/hessenberg.rs index c42e349bb..57a4b2e18 100644 --- a/nalgebra-lapack/src/hessenberg.rs +++ b/nalgebra-lapack/src/hessenberg.rs @@ -13,30 +13,21 @@ use lapack; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - serialize = "DefaultAllocator: Allocator + + serde(bound(serialize = "DefaultAllocator: Allocator + Allocator>, MatrixN: Serialize, - VectorN>: Serialize" - ) - ) + VectorN>: Serialize")) )] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - deserialize = "DefaultAllocator: Allocator + + serde(bound(deserialize = "DefaultAllocator: Allocator + Allocator>, MatrixN: Deserialize<'de>, - VectorN>: Deserialize<'de>" - ) - ) + VectorN>: Deserialize<'de>")) )] #[derive(Clone, Debug)] pub struct Hessenberg> -where - DefaultAllocator: Allocator + Allocator>, +where DefaultAllocator: Allocator + Allocator> { h: MatrixN, tau: VectorN>, @@ -51,8 +42,7 @@ where } impl> Hessenberg -where - DefaultAllocator: Allocator + Allocator>, +where DefaultAllocator: Allocator + Allocator> { /// Computes the hessenberg decomposition of the matrix `m`. pub fn new(mut m: MatrixN) -> Hessenberg { @@ -104,8 +94,7 @@ where } impl> Hessenberg -where - DefaultAllocator: Allocator + Allocator>, +where DefaultAllocator: Allocator + Allocator> { /// Computes the matrices `(Q, H)` of this decomposition. #[inline] diff --git a/nalgebra-lapack/src/lib.rs b/nalgebra-lapack/src/lib.rs index a001dcc3a..c343ba836 100644 --- a/nalgebra-lapack/src/lib.rs +++ b/nalgebra-lapack/src/lib.rs @@ -68,8 +68,10 @@ #![deny(unused_qualifications)] #![deny(unused_results)] #![deny(missing_docs)] -#![doc(html_favicon_url = "http://nalgebra.org/img/favicon.ico", - html_root_url = "http://nalgebra.org/rustdoc")] +#![doc( + html_favicon_url = "http://nalgebra.org/img/favicon.ico", + html_root_url = "http://nalgebra.org/rustdoc" +)] extern crate alga; extern crate lapack; diff --git a/nalgebra-lapack/src/lu.rs b/nalgebra-lapack/src/lu.rs index cab7b763f..47ee912ab 100644 --- a/nalgebra-lapack/src/lu.rs +++ b/nalgebra-lapack/src/lu.rs @@ -20,30 +20,21 @@ use lapack; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - serialize = "DefaultAllocator: Allocator + + serde(bound(serialize = "DefaultAllocator: Allocator + Allocator>, MatrixMN: Serialize, - PermutationSequence>: Serialize" - ) - ) + PermutationSequence>: Serialize")) )] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - deserialize = "DefaultAllocator: Allocator + + serde(bound(deserialize = "DefaultAllocator: Allocator + Allocator>, MatrixMN: Deserialize<'de>, - PermutationSequence>: Deserialize<'de>" - ) - ) + PermutationSequence>: Deserialize<'de>")) )] #[derive(Clone, Debug)] pub struct LU, C: Dim> -where - DefaultAllocator: Allocator> + Allocator, +where DefaultAllocator: Allocator> + Allocator { lu: MatrixMN, p: VectorN>, @@ -139,9 +130,7 @@ where /// Applies the permutation matrix to a given matrix or vector in-place. #[inline] pub fn permute(&self, rhs: &mut MatrixMN) - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { let (nrows, ncols) = rhs.shape(); N::xlaswp( @@ -156,9 +145,7 @@ where } fn generic_solve_mut(&self, trans: u8, b: &mut MatrixMN) -> bool - where - DefaultAllocator: Allocator + Allocator, - { + where DefaultAllocator: Allocator + Allocator { let dim = self.lu.nrows(); assert!( @@ -246,9 +233,7 @@ where /// /// Returns `false` if no solution was found (the decomposed matrix is singular). pub fn solve_mut(&self, b: &mut MatrixMN) -> bool - where - DefaultAllocator: Allocator + Allocator, - { + where DefaultAllocator: Allocator + Allocator { self.generic_solve_mut(b'N', b) } @@ -257,9 +242,7 @@ where /// /// Returns `false` if no solution was found (the decomposed matrix is singular). pub fn solve_transpose_mut(&self, b: &mut MatrixMN) -> bool - where - DefaultAllocator: Allocator + Allocator, - { + where DefaultAllocator: Allocator + Allocator { self.generic_solve_mut(b'T', b) } diff --git a/nalgebra-lapack/src/qr.rs b/nalgebra-lapack/src/qr.rs index f41cb7ce3..1bf55644e 100644 --- a/nalgebra-lapack/src/qr.rs +++ b/nalgebra-lapack/src/qr.rs @@ -1,5 +1,5 @@ #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use num::Zero; use num_complex::Complex; @@ -16,30 +16,21 @@ use lapack; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - serialize = "DefaultAllocator: Allocator + + serde(bound(serialize = "DefaultAllocator: Allocator + Allocator>, MatrixMN: Serialize, - VectorN>: Serialize" - ) - ) + VectorN>: Serialize")) )] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - deserialize = "DefaultAllocator: Allocator + + serde(bound(deserialize = "DefaultAllocator: Allocator + Allocator>, MatrixMN: Deserialize<'de>, - VectorN>: Deserialize<'de>" - ) - ) + VectorN>: Deserialize<'de>")) )] #[derive(Clone, Debug)] pub struct QR, C: Dim> -where - DefaultAllocator: Allocator + Allocator>, +where DefaultAllocator: Allocator + Allocator> { qr: MatrixMN, tau: VectorN>, @@ -54,11 +45,10 @@ where } impl, C: Dim> QR -where - DefaultAllocator: Allocator +where DefaultAllocator: Allocator + Allocator> + Allocator, C> - + Allocator>, + + Allocator> { /// Computes the QR decomposition of the matrix `m`. pub fn new(mut m: MatrixMN) -> QR { @@ -105,11 +95,10 @@ where } impl, C: Dim> QR -where - DefaultAllocator: Allocator +where DefaultAllocator: Allocator + Allocator> + Allocator, C> - + Allocator>, + + Allocator> { /// Retrieves the matrices `(Q, R)` of this decompositions. pub fn unpack( @@ -131,7 +120,8 @@ where return MatrixMN::from_element_generic(nrows, min_nrows_ncols, N::zero()); } - let mut q = self.qr + let mut q = self + .qr .generic_slice((0, 0), (nrows, min_nrows_ncols)) .into_owned(); diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index b26e464b9..27ce59279 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -1,5 +1,5 @@ #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use num::Zero; use num_complex::Complex; @@ -19,27 +19,22 @@ use lapack; #[cfg_attr( feature = "serde-serialize", serde( - bound( - serialize = "DefaultAllocator: Allocator + Allocator, + bound(serialize = "DefaultAllocator: Allocator + Allocator, VectorN: Serialize, - MatrixN: Serialize" - ) + MatrixN: Serialize") ) )] #[cfg_attr( feature = "serde-serialize", serde( - bound( - deserialize = "DefaultAllocator: Allocator + Allocator, + bound(deserialize = "DefaultAllocator: Allocator + Allocator, VectorN: Serialize, - MatrixN: Deserialize<'de>" - ) + MatrixN: Deserialize<'de>") ) )] #[derive(Clone, Debug)] pub struct RealSchur -where - DefaultAllocator: Allocator + Allocator, +where DefaultAllocator: Allocator + Allocator { re: VectorN, im: VectorN, @@ -56,8 +51,7 @@ where } impl RealSchur -where - DefaultAllocator: Allocator + Allocator, +where DefaultAllocator: Allocator + Allocator { /// Computes the eigenvalues and real Schur form of the matrix `m`. /// @@ -152,9 +146,7 @@ where /// Computes the complex eigenvalues of the decomposed matrix. pub fn complex_eigenvalues(&self) -> VectorN, D> - where - DefaultAllocator: Allocator, D>, - { + where DefaultAllocator: Allocator, D> { let mut out = unsafe { VectorN::new_uninitialized_generic(self.t.data.shape().0, U1) }; for i in 0..out.len() { diff --git a/nalgebra-lapack/src/svd.rs b/nalgebra-lapack/src/svd.rs index 347f5b9ec..5883578ab 100644 --- a/nalgebra-lapack/src/svd.rs +++ b/nalgebra-lapack/src/svd.rs @@ -1,5 +1,5 @@ #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use num::Signed; use std::cmp; @@ -15,34 +15,25 @@ use lapack; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - serialize = "DefaultAllocator: Allocator> + + serde(bound(serialize = "DefaultAllocator: Allocator> + Allocator + Allocator, MatrixN: Serialize, MatrixN: Serialize, - VectorN>: Serialize" - ) - ) + VectorN>: Serialize")) )] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - serialize = "DefaultAllocator: Allocator> + + serde(bound(serialize = "DefaultAllocator: Allocator> + Allocator + Allocator, MatrixN: Deserialize<'de>, MatrixN: Deserialize<'de>, - VectorN>: Deserialize<'de>" - ) - ) + VectorN>: Deserialize<'de>")) )] #[derive(Clone, Debug)] pub struct SVD, C: Dim> -where - DefaultAllocator: Allocator + Allocator> + Allocator, +where DefaultAllocator: Allocator + Allocator> + Allocator { /// The left-singular vectors `U` of this SVD. pub u: MatrixN, // FIXME: should be MatrixMN> @@ -64,22 +55,20 @@ where /// Trait implemented by floats (`f32`, `f64`) and complex floats (`Complex`, `Complex`) /// supported by the Singular Value Decompotition. pub trait SVDScalar, C: Dim>: Scalar -where - DefaultAllocator: Allocator +where DefaultAllocator: Allocator + Allocator + Allocator> - + Allocator, + + Allocator { /// Computes the SVD decomposition of `m`. fn compute(m: MatrixMN) -> Option>; } impl, R: DimMin, C: Dim> SVD -where - DefaultAllocator: Allocator +where DefaultAllocator: Allocator + Allocator + Allocator> - + Allocator, + + Allocator { /// Computes the Singular Value Decomposition of `matrix`. pub fn new(m: MatrixMN) -> Option { diff --git a/nalgebra-lapack/src/symmetric_eigen.rs b/nalgebra-lapack/src/symmetric_eigen.rs index 6ec7e1e92..a06445ee6 100644 --- a/nalgebra-lapack/src/symmetric_eigen.rs +++ b/nalgebra-lapack/src/symmetric_eigen.rs @@ -1,5 +1,5 @@ #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use num::Zero; use std::ops::MulAssign; @@ -18,30 +18,21 @@ use lapack; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - serialize = "DefaultAllocator: Allocator + + serde(bound(serialize = "DefaultAllocator: Allocator + Allocator, VectorN: Serialize, - MatrixN: Serialize" - ) - ) + MatrixN: Serialize")) )] #[cfg_attr( feature = "serde-serialize", - serde( - bound( - deserialize = "DefaultAllocator: Allocator + + serde(bound(deserialize = "DefaultAllocator: Allocator + Allocator, VectorN: Deserialize<'de>, - MatrixN: Deserialize<'de>" - ) - ) + MatrixN: Deserialize<'de>")) )] #[derive(Clone, Debug)] pub struct SymmetricEigen -where - DefaultAllocator: Allocator + Allocator, +where DefaultAllocator: Allocator + Allocator { /// The eigenvectors of the decomposed matrix. pub eigenvectors: MatrixN, @@ -59,8 +50,7 @@ where } impl SymmetricEigen -where - DefaultAllocator: Allocator + Allocator, +where DefaultAllocator: Allocator + Allocator { /// Computes the eigenvalues and eigenvectors of the symmetric matrix `m`. /// @@ -89,7 +79,8 @@ where fn do_decompose( mut m: MatrixN, eigenvectors: bool, - ) -> Option<(VectorN, Option>)> { + ) -> Option<(VectorN, Option>)> + { assert!( m.is_square(), "Unable to compute the eigenvalue decomposition of a non-square matrix." diff --git a/nalgebra-lapack/tests/linalg/cholesky.rs b/nalgebra-lapack/tests/linalg/cholesky.rs index 5f011bcec..015615324 100644 --- a/nalgebra-lapack/tests/linalg/cholesky.rs +++ b/nalgebra-lapack/tests/linalg/cholesky.rs @@ -1,7 +1,7 @@ use std::cmp; -use nl::Cholesky; use na::{DMatrix, DVector, Matrix3, Matrix4, Matrix4x3, Vector4}; +use nl::Cholesky; quickcheck!{ fn cholesky(m: DMatrix) -> bool { diff --git a/nalgebra-lapack/tests/linalg/lu.rs b/nalgebra-lapack/tests/linalg/lu.rs index c601a8979..652d10329 100644 --- a/nalgebra-lapack/tests/linalg/lu.rs +++ b/nalgebra-lapack/tests/linalg/lu.rs @@ -1,7 +1,7 @@ use std::cmp; -use nl::LU; use na::{DMatrix, DVector, Matrix3x4, Matrix4, Matrix4x3, Vector4}; +use nl::LU; quickcheck!{ fn lup(m: DMatrix) -> bool { diff --git a/nalgebra-lapack/tests/linalg/mod.rs b/nalgebra-lapack/tests/linalg/mod.rs index f692fa88a..ba2283085 100644 --- a/nalgebra-lapack/tests/linalg/mod.rs +++ b/nalgebra-lapack/tests/linalg/mod.rs @@ -1,7 +1,7 @@ -mod real_eigensystem; -mod symmetric_eigen; mod cholesky; mod lu; mod qr; -mod svd; +mod real_eigensystem; mod real_schur; +mod svd; +mod symmetric_eigen; diff --git a/nalgebra-lapack/tests/linalg/qr.rs b/nalgebra-lapack/tests/linalg/qr.rs index baac445b7..ebdb9b348 100644 --- a/nalgebra-lapack/tests/linalg/qr.rs +++ b/nalgebra-lapack/tests/linalg/qr.rs @@ -1,5 +1,5 @@ -use nl::QR; use na::{DMatrix, Matrix4x3}; +use nl::QR; quickcheck!{ fn qr(m: DMatrix) -> bool { diff --git a/nalgebra-lapack/tests/linalg/real_eigensystem.rs b/nalgebra-lapack/tests/linalg/real_eigensystem.rs index 70a3d7874..f3130a545 100644 --- a/nalgebra-lapack/tests/linalg/real_eigensystem.rs +++ b/nalgebra-lapack/tests/linalg/real_eigensystem.rs @@ -1,7 +1,7 @@ use std::cmp; -use nl::Eigen; use na::{DMatrix, Matrix4}; +use nl::Eigen; quickcheck!{ fn eigensystem(n: usize) -> bool { diff --git a/nalgebra-lapack/tests/linalg/real_schur.rs b/nalgebra-lapack/tests/linalg/real_schur.rs index ad6fbb3c5..127107dd3 100644 --- a/nalgebra-lapack/tests/linalg/real_schur.rs +++ b/nalgebra-lapack/tests/linalg/real_schur.rs @@ -1,6 +1,6 @@ -use std::cmp; -use nl::RealSchur; use na::{DMatrix, Matrix4}; +use nl::RealSchur; +use std::cmp; quickcheck! { fn schur(n: usize) -> bool { diff --git a/nalgebra-lapack/tests/linalg/svd.rs b/nalgebra-lapack/tests/linalg/svd.rs index 9ab7a99ef..9f15b83ae 100644 --- a/nalgebra-lapack/tests/linalg/svd.rs +++ b/nalgebra-lapack/tests/linalg/svd.rs @@ -1,5 +1,5 @@ -use nl::SVD; use na::{DMatrix, Matrix3x4}; +use nl::SVD; quickcheck!{ fn svd(m: DMatrix) -> bool { diff --git a/nalgebra-lapack/tests/linalg/symmetric_eigen.rs b/nalgebra-lapack/tests/linalg/symmetric_eigen.rs index a1ebdfa2a..42074ada0 100644 --- a/nalgebra-lapack/tests/linalg/symmetric_eigen.rs +++ b/nalgebra-lapack/tests/linalg/symmetric_eigen.rs @@ -1,7 +1,7 @@ use std::cmp; -use nl::SymmetricEigen; use na::{DMatrix, Matrix4}; +use nl::SymmetricEigen; quickcheck!{ fn symmetric_eigen(n: usize) -> bool { diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 5b17c1831..30f08af0b 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -79,7 +79,8 @@ where N: Scalar, DefaultAllocator: Allocator + Allocator, SameShapeC>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, -{} +{ +} // XXX: Bad name. /// Restricts the given number of rows to be equal. @@ -100,4 +101,5 @@ where N: Scalar, DefaultAllocator: Allocator + Allocator>, ShapeConstraint: SameNumberOfRows, -{} +{ +} diff --git a/src/base/blas.rs b/src/base/blas.rs index b4a5cfbe3..073f519ba 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -161,8 +161,7 @@ impl> Matri } impl> Matrix -where - N: Scalar + Zero + ClosedAdd + ClosedMul, +where N: Scalar + Zero + ClosedAdd + ClosedMul { /// The dot product between two vectors or matrices (seen as vectors). /// @@ -324,9 +323,7 @@ where } fn array_axpy(y: &mut [N], a: N, x: &[N], beta: N, stride1: usize, stride2: usize, len: usize) -where - N: Scalar + Zero + ClosedAdd + ClosedMul, -{ +where N: Scalar + Zero + ClosedAdd + ClosedMul { for i in 0..len { unsafe { let y = y.get_unchecked_mut(i * stride1); @@ -336,9 +333,7 @@ where } fn array_ax(y: &mut [N], a: N, x: &[N], stride1: usize, stride2: usize, len: usize) -where - N: Scalar + Zero + ClosedAdd + ClosedMul, -{ +where N: Scalar + Zero + ClosedAdd + ClosedMul { for i in 0..len { unsafe { *y.get_unchecked_mut(i * stride1) = a * *x.get_unchecked(i * stride2); @@ -576,8 +571,7 @@ where } impl> Matrix -where - N: Scalar + Zero + ClosedAdd + ClosedMul, +where N: Scalar + Zero + ClosedAdd + ClosedMul { /// Computes `self = alpha * x * y.transpose() + beta * self`. /// @@ -817,8 +811,7 @@ where } impl> Matrix -where - N: Scalar + Zero + ClosedAdd + ClosedMul, +where N: Scalar + Zero + ClosedAdd + ClosedMul { /// Computes `self = alpha * x * y.transpose() + beta * self`, where `self` is a **symmetric** /// matrix. @@ -876,8 +869,7 @@ where } impl> SquareMatrix -where - N: Scalar + Zero + One + ClosedAdd + ClosedMul, +where N: Scalar + Zero + One + ClosedAdd + ClosedMul { /// Computes the quadratic form `self = alpha * lhs * mid * lhs.transpose() + beta * self`. /// diff --git a/src/base/cg.rs b/src/base/cg.rs index 39509f2f9..01be4e993 100644 --- a/src/base/cg.rs +++ b/src/base/cg.rs @@ -238,9 +238,7 @@ impl> SquareMatrix /// Computes in-place the transformation equal to `self` followed by an uniform scaling factor. #[inline] pub fn append_scaling_mut(&mut self, scaling: N) - where - D: DimNameSub, - { + where D: DimNameSub { let mut to_scale = self.fixed_rows_mut::>(0); to_scale *= scaling; } @@ -248,9 +246,7 @@ impl> SquareMatrix /// Computes in-place the transformation equal to an uniform scaling factor followed by `self`. #[inline] pub fn prepend_scaling_mut(&mut self, scaling: N) - where - D: DimNameSub, - { + where D: DimNameSub { let mut to_scale = self.fixed_columns_mut::>(0); to_scale *= scaling; } @@ -319,16 +315,16 @@ impl> SquareMatrix } impl> Transformation>> for MatrixN -where - DefaultAllocator: Allocator +where DefaultAllocator: Allocator + Allocator> - + Allocator, DimNameDiff>, + + Allocator, DimNameDiff> { #[inline] fn transform_vector( &self, v: &VectorN>, - ) -> VectorN> { + ) -> VectorN> + { let transform = self.fixed_slice::, DimNameDiff>(0, 0); let normalizer = self.fixed_slice::>(D::dim() - 1, 0); let n = normalizer.tr_dot(&v); diff --git a/src/base/constraint.rs b/src/base/constraint.rs index d9d7aafee..3bd0540b6 100644 --- a/src/base/constraint.rs +++ b/src/base/constraint.rs @@ -8,8 +8,7 @@ pub struct ShapeConstraint; /// Constraints `C1` and `R2` to be equivalent. pub trait AreMultipliable: DimEq {} -impl AreMultipliable for ShapeConstraint where - ShapeConstraint: DimEq +impl AreMultipliable for ShapeConstraint where ShapeConstraint: DimEq {} /// Constraints `D1` and `D2` to be equivalent. diff --git a/src/base/construction.rs b/src/base/construction.rs index 652b3eb60..af3714138 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -26,8 +26,7 @@ use base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar, Unit, Vector, Ve * */ impl MatrixMN -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { /// Creates a new uninitialized matrix. If the matrix has a compile-time dimension, this panics /// if `nrows != R::to_usize()` or `ncols != C::to_usize()`. @@ -55,18 +54,14 @@ where /// Creates a matrix with all its elements set to 0. #[inline] pub fn zeros_generic(nrows: R, ncols: C) -> Self - where - N: Zero, - { + where N: Zero { Self::from_element_generic(nrows, ncols, N::zero()) } /// Creates a matrix with all its elements filled by an iterator. #[inline] pub fn from_iterator_generic(nrows: R, ncols: C, iter: I) -> Self - where - I: IntoIterator, - { + where I: IntoIterator { Self::from_data(DefaultAllocator::allocate_from_iterator(nrows, ncols, iter)) } @@ -105,9 +100,7 @@ where /// coordinates. #[inline] pub fn from_fn_generic(nrows: R, ncols: C, mut f: F) -> Self - where - F: FnMut(usize, usize) -> N, - { + where F: FnMut(usize, usize) -> N { let mut res = unsafe { Self::new_uninitialized_generic(nrows, ncols) }; for j in 0..ncols.value() { @@ -125,9 +118,7 @@ where /// to the identity matrix. All other entries are set to zero. #[inline] pub fn identity_generic(nrows: R, ncols: C) -> Self - where - N: Zero + One, - { + where N: Zero + One { Self::from_diagonal_element_generic(nrows, ncols, N::one()) } @@ -137,9 +128,7 @@ where /// to the identity matrix. All other entries are set to zero. #[inline] pub fn from_diagonal_element_generic(nrows: R, ncols: C, elt: N) -> Self - where - N: Zero + One, - { + where N: Zero + One { let mut res = Self::zeros_generic(nrows, ncols); for i in 0..::min(nrows.value(), ncols.value()) { @@ -155,9 +144,7 @@ where /// Panics if `elts.len()` is larger than the minimum among `nrows` and `ncols`. #[inline] pub fn from_partial_diagonal_generic(nrows: R, ncols: C, elts: &[N]) -> Self - where - N: Zero, - { + where N: Zero { let mut res = Self::zeros_generic(nrows, ncols); assert!( elts.len() <= ::min(nrows.value(), ncols.value()), @@ -177,9 +164,7 @@ where /// not have the same dimensions. #[inline] pub fn from_rows(rows: &[Matrix]) -> Self - where - SB: Storage, - { + where SB: Storage { assert!(rows.len() > 0, "At least one row must be given."); let nrows = R::try_to_usize().unwrap_or(rows.len()); let ncols = rows[0].len(); @@ -207,9 +192,7 @@ where /// columns do not have the same dimensions. #[inline] pub fn from_columns(columns: &[Vector]) -> Self - where - SB: Storage, - { + where SB: Storage { assert!(columns.len() > 0, "At least one column must be given."); let ncols = C::try_to_usize().unwrap_or(columns.len()); let nrows = columns[0].len(); @@ -235,9 +218,7 @@ where #[inline] #[cfg(feature = "std")] pub fn new_random_generic(nrows: R, ncols: C) -> Self - where - Standard: Distribution, - { + where Standard: Distribution { Self::from_fn_generic(nrows, ncols, |_, _| rand::random()) } @@ -248,7 +229,8 @@ where ncols: C, distribution: &mut Distr, rng: &mut G, - ) -> Self { + ) -> Self + { Self::from_fn_generic(nrows, ncols, |_, _| distribution.sample(rng)) } } @@ -261,9 +243,7 @@ where /// Creates a square matrix with its diagonal set to `diag` and all other entries set to 0. #[inline] pub fn from_diagonal>(diag: &Vector) -> Self - where - N: Zero, - { + where N: Zero { let (dim, _) = diag.data.shape(); let mut res = Self::zeros_generic(dim, dim); @@ -703,9 +683,7 @@ where /// The column vector with a 1 as its first component, and zero elsewhere. #[inline] pub fn x() -> Self - where - R::Value: Cmp, - { + where R::Value: Cmp { let mut res = Self::zeros(); unsafe { *res.vget_unchecked_mut(0) = N::one(); @@ -717,9 +695,7 @@ where /// The column vector with a 1 as its second component, and zero elsewhere. #[inline] pub fn y() -> Self - where - R::Value: Cmp, - { + where R::Value: Cmp { let mut res = Self::zeros(); unsafe { *res.vget_unchecked_mut(1) = N::one(); @@ -731,9 +707,7 @@ where /// The column vector with a 1 as its third component, and zero elsewhere. #[inline] pub fn z() -> Self - where - R::Value: Cmp, - { + where R::Value: Cmp { let mut res = Self::zeros(); unsafe { *res.vget_unchecked_mut(2) = N::one(); @@ -745,9 +719,7 @@ where /// The column vector with a 1 as its fourth component, and zero elsewhere. #[inline] pub fn w() -> Self - where - R::Value: Cmp, - { + where R::Value: Cmp { let mut res = Self::zeros(); unsafe { *res.vget_unchecked_mut(3) = N::one(); @@ -759,9 +731,7 @@ where /// The column vector with a 1 as its fifth component, and zero elsewhere. #[inline] pub fn a() -> Self - where - R::Value: Cmp, - { + where R::Value: Cmp { let mut res = Self::zeros(); unsafe { *res.vget_unchecked_mut(4) = N::one(); @@ -773,9 +743,7 @@ where /// The column vector with a 1 as its sixth component, and zero elsewhere. #[inline] pub fn b() -> Self - where - R::Value: Cmp, - { + where R::Value: Cmp { let mut res = Self::zeros(); unsafe { *res.vget_unchecked_mut(5) = N::one(); @@ -787,54 +755,42 @@ where /// The unit column vector with a 1 as its first component, and zero elsewhere. #[inline] pub fn x_axis() -> Unit - where - R::Value: Cmp, - { + where R::Value: Cmp { Unit::new_unchecked(Self::x()) } /// The unit column vector with a 1 as its second component, and zero elsewhere. #[inline] pub fn y_axis() -> Unit - where - R::Value: Cmp, - { + where R::Value: Cmp { Unit::new_unchecked(Self::y()) } /// The unit column vector with a 1 as its third component, and zero elsewhere. #[inline] pub fn z_axis() -> Unit - where - R::Value: Cmp, - { + where R::Value: Cmp { Unit::new_unchecked(Self::z()) } /// The unit column vector with a 1 as its fourth component, and zero elsewhere. #[inline] pub fn w_axis() -> Unit - where - R::Value: Cmp, - { + where R::Value: Cmp { Unit::new_unchecked(Self::w()) } /// The unit column vector with a 1 as its fifth component, and zero elsewhere. #[inline] pub fn a_axis() -> Unit - where - R::Value: Cmp, - { + where R::Value: Cmp { Unit::new_unchecked(Self::a()) } /// The unit column vector with a 1 as its sixth component, and zero elsewhere. #[inline] pub fn b_axis() -> Unit - where - R::Value: Cmp, - { + where R::Value: Cmp { Unit::new_unchecked(Self::b()) } } diff --git a/src/base/construction_slice.rs b/src/base/construction_slice.rs index d7d2d1595..419720bf3 100644 --- a/src/base/construction_slice.rs +++ b/src/base/construction_slice.rs @@ -22,7 +22,8 @@ impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> ncols: C, rstride: RStride, cstride: CStride, - ) -> Self { + ) -> Self + { let data = SliceStorage::from_raw_parts( data.as_ptr().offset(start as isize), (nrows, ncols), @@ -42,7 +43,8 @@ impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> ncols: C, rstride: RStride, cstride: CStride, - ) -> Self { + ) -> Self + { // NOTE: The assertion implements the following formula, but without subtractions to avoid // underflow panics: // len >= (ncols - 1) * cstride + (nrows - 1) * rstride + 1 @@ -73,7 +75,8 @@ impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> ncols: C, rstride: RStride, cstride: CStride, - ) -> Self { + ) -> Self + { let data = SliceStorageMut::from_raw_parts( data.as_mut_ptr().offset(start as isize), (nrows, ncols), @@ -93,7 +96,8 @@ impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> ncols: C, rstride: RStride, cstride: CStride, - ) -> Self { + ) -> Self + { // NOTE: The assertion implements the following formula, but without subtractions to avoid // underflow panics: // len >= (ncols - 1) * cstride + (nrows - 1) * rstride + 1 @@ -120,7 +124,8 @@ impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> { start: usize, nrows: R, ncols: C, - ) -> Self { + ) -> Self + { Self::from_slice_with_strides_generic_unchecked(data, start, nrows, ncols, U1, nrows) } @@ -145,7 +150,8 @@ impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> { start: usize, nrows: R, ncols: C, - ) -> Self { + ) -> Self + { Self::from_slice_with_strides_generic_unchecked(data, start, nrows, ncols, U1, nrows) } diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index e097f3a2b..6bdbd30de 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -54,7 +54,8 @@ where nrows: R, ncols: C, iter: I, - ) -> Self::Buffer { + ) -> Self::Buffer + { let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols) }; let mut count = 0; @@ -93,7 +94,8 @@ impl Allocator for DefaultAllocator { nrows: Dynamic, ncols: C, iter: I, - ) -> Self::Buffer { + ) -> Self::Buffer + { let it = iter.into_iter(); let res: Vec = it.collect(); assert!(res.len() == nrows.value() * ncols.value(), @@ -123,7 +125,8 @@ impl Allocator for DefaultAllocator { nrows: R, ncols: Dynamic, iter: I, - ) -> Self::Buffer { + ) -> Self::Buffer + { let it = iter.into_iter(); let res: Vec = it.collect(); assert!(res.len() == nrows.value() * ncols.value(), @@ -154,7 +157,8 @@ where rto: RTo, cto: CTo, buf: >::Buffer, - ) -> MatrixArray { + ) -> MatrixArray + { let mut res = >::allocate_uninitialized(rto, cto); let (rfrom, cfrom) = buf.shape(); @@ -182,7 +186,8 @@ where rto: Dynamic, cto: CTo, buf: MatrixArray, - ) -> MatrixVec { + ) -> MatrixVec + { let mut res = >::allocate_uninitialized(rto, cto); let (rfrom, cfrom) = buf.shape(); @@ -210,7 +215,8 @@ where rto: RTo, cto: Dynamic, buf: MatrixArray, - ) -> MatrixVec { + ) -> MatrixVec + { let mut res = >::allocate_uninitialized(rto, cto); let (rfrom, cfrom) = buf.shape(); @@ -233,7 +239,8 @@ impl Reallocator, - ) -> MatrixVec { + ) -> MatrixVec + { let new_buf = buf.resize(rto.value() * cto.value()); MatrixVec::new(rto, cto, new_buf) } @@ -248,7 +255,8 @@ impl Reallocator, - ) -> MatrixVec { + ) -> MatrixVec + { let new_buf = buf.resize(rto.value() * cto.value()); MatrixVec::new(rto, cto, new_buf) } @@ -263,7 +271,8 @@ impl Reallocator, - ) -> MatrixVec { + ) -> MatrixVec + { let new_buf = buf.resize(rto.value() * cto.value()); MatrixVec::new(rto, cto, new_buf) } @@ -278,7 +287,8 @@ impl Reallocator, - ) -> MatrixVec { + ) -> MatrixVec + { let new_buf = buf.resize(rto.value() * cto.value()); MatrixVec::new(rto, cto, new_buf) } diff --git a/src/base/dimension.rs b/src/base/dimension.rs index d694ec392..1d79d48e2 100644 --- a/src/base/dimension.rs +++ b/src/base/dimension.rs @@ -30,9 +30,7 @@ impl Dynamic { #[cfg(feature = "serde-serialize")] impl Serialize for Dynamic { fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { + where S: Serializer { self.value.serialize(serializer) } } @@ -40,9 +38,7 @@ impl Serialize for Dynamic { #[cfg(feature = "serde-serialize")] impl<'de> Deserialize<'de> for Dynamic { fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { + where D: Deserializer<'de> { usize::deserialize(deserializer).map(|x| Dynamic { value: x }) } } @@ -368,7 +364,8 @@ impl< G: Bit + Any + Debug + Copy + PartialEq + Send + Sync, > IsNotStaticOne for UInt, A>, B>, C>, D>, E>, F>, G> -{} +{ +} impl NamedDim for UInt @@ -409,4 +406,5 @@ impl IsNotStaticOne for UInt -{} +{ +} diff --git a/src/base/edition.rs b/src/base/edition.rs index 649d4cdae..0ae9c35fc 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -16,9 +16,7 @@ impl> Matrix { /// Extracts the upper triangular part of this matrix (including the diagonal). #[inline] pub fn upper_triangle(&self) -> MatrixMN - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { let mut res = self.clone_owned(); res.fill_lower_triangle(N::zero(), 1); @@ -28,9 +26,7 @@ impl> Matrix { /// Extracts the upper triangular part of this matrix (including the diagonal). #[inline] pub fn lower_triangle(&self) -> MatrixMN - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { let mut res = self.clone_owned(); res.fill_upper_triangle(N::zero(), 1); @@ -50,9 +46,7 @@ impl> Matrix { /// Fills `self` with the identity matrix. #[inline] pub fn fill_with_identity(&mut self) - where - N: Zero + One, - { + where N: Zero + One { self.fill(N::zero()); self.fill_diagonal(N::one()); } @@ -551,9 +545,7 @@ impl> Matrix { /// rows and/or columns than `self`, then the extra rows or columns are filled with `val`. #[cfg(any(feature = "std", feature = "alloc"))] pub fn resize(self, new_nrows: usize, new_ncols: usize, val: N) -> DMatrix - where - DefaultAllocator: Reallocator, - { + where DefaultAllocator: Reallocator { self.resize_generic(Dynamic::new(new_nrows), Dynamic::new(new_ncols), val) } @@ -562,9 +554,7 @@ impl> Matrix { /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more /// rows and/or columns than `self`, then the extra rows or columns are filled with `val`. pub fn fixed_resize(self, val: N) -> MatrixMN - where - DefaultAllocator: Reallocator, - { + where DefaultAllocator: Reallocator { self.resize_generic(R2::name(), C2::name(), val) } @@ -642,7 +632,8 @@ unsafe fn compress_rows( ncols: usize, i: usize, nremove: usize, -) { +) +{ let new_nrows = nrows - nremove; if new_nrows == 0 || ncols == 0 { @@ -681,7 +672,8 @@ unsafe fn extend_rows( ncols: usize, i: usize, ninsert: usize, -) { +) +{ let new_nrows = nrows + ninsert; if new_nrows == 0 || ncols == 0 { diff --git a/src/base/helper.rs b/src/base/helper.rs index de601fb65..ef85a477b 100644 --- a/src/base/helper.rs +++ b/src/base/helper.rs @@ -18,9 +18,7 @@ pub fn reject bool, T: Arbitrary>(g: &mut G, f: F) -> T #[doc(hidden)] #[inline] pub fn reject_rand bool, T>(g: &mut G, f: F) -> T -where - Standard: Distribution, -{ +where Standard: Distribution { use std::iter; iter::repeat(()).map(|_| g.gen()).find(f).unwrap() } diff --git a/src/base/matrix.rs b/src/base/matrix.rs index e88251212..784ae9a23 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -99,9 +99,7 @@ where S: Serialize, { fn serialize(&self, serializer: T) -> Result - where - T: Serializer, - { + where T: Serializer { self.data.serialize(serializer) } } @@ -115,9 +113,7 @@ where S: Deserialize<'de>, { fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { + where D: Deserializer<'de> { S::deserialize(deserializer).map(|x| Matrix { data: x, _phantoms: PhantomData, @@ -319,9 +315,7 @@ impl> Matrix { /// Moves this matrix into one that owns its data. #[inline] pub fn into_owned(self) -> MatrixMN - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { Matrix::from_data(self.data.into_owned()) } @@ -355,9 +349,7 @@ impl> Matrix { /// Clones this matrix to one that owns its data. #[inline] pub fn clone_owned(&self) -> MatrixMN - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { Matrix::from_data(self.data.clone_owned()) } @@ -393,9 +385,7 @@ impl> Matrix { /// Returns a matrix containing the result of `f` applied to each of its entries. #[inline] pub fn map N2>(&self, mut f: F) -> MatrixMN - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { let (nrows, ncols) = self.data.shape(); let mut res = unsafe { MatrixMN::new_uninitialized_generic(nrows, ncols) }; @@ -541,9 +531,7 @@ impl> Matrix { /// Transposes `self`. #[inline] pub fn transpose(&self) -> MatrixMN - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { let (nrows, ncols) = self.data.shape(); unsafe { @@ -666,9 +654,7 @@ impl> Matrix { /// Replaces each component of `self` by the result of a closure `f` applied on it. #[inline] pub fn apply N>(&mut self, mut f: F) - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { let (nrows, ncols) = self.shape(); for j in 0..ncols { @@ -765,9 +751,7 @@ impl, R, C>> Matrix, R /// The conjugate transposition of `self`. #[inline] pub fn conjugate_transpose(&self) -> MatrixMN, C, R> - where - DefaultAllocator: Allocator, C, R>, - { + where DefaultAllocator: Allocator, C, R> { let (nrows, ncols) = self.data.shape(); unsafe { @@ -808,9 +792,7 @@ impl> SquareMatrix { /// Creates a square matrix with its diagonal set to `diag` and all other entries set to 0. #[inline] pub fn diagonal(&self) -> VectorN - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { assert!( self.is_square(), "Unable to get the diagonal of a non-square matrix." @@ -831,9 +813,7 @@ impl> SquareMatrix { /// Computes a trace of a square matrix, i.e., the sum of its diagonal elements. #[inline] pub fn trace(&self) -> N - where - N: Ring, - { + where N: Ring { assert!( self.is_square(), "Cannot compute the trace of non-square matrix." @@ -855,9 +835,7 @@ impl, S: Storage> Vector { /// coordinates. #[inline] pub fn to_homogeneous(&self) -> VectorN> - where - DefaultAllocator: Allocator>, - { + where DefaultAllocator: Allocator> { let len = self.len(); let hnrows = DimSum::::from_usize(len + 1); let mut res = unsafe { VectorN::::new_uninitialized_generic(hnrows, U1) }; @@ -923,7 +901,8 @@ where other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon, - ) -> bool { + ) -> bool + { self.relative_eq(other, epsilon, max_relative) } } @@ -1040,7 +1019,8 @@ impl Eq for Matrix where N: Scalar + Eq, S: Storage, -{} +{ +} impl PartialEq for Matrix where @@ -1220,8 +1200,7 @@ impl> Matrix { } impl> Vector -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { /// Computes the matrix `M` such that for all vector `v` we have `M * v == self.cross(&v)`. #[inline] @@ -1311,18 +1290,14 @@ impl> Matrix { /// Returns a normalized version of this matrix. #[inline] pub fn normalize(&self) -> MatrixMN - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { self / self.norm() } /// Returns a normalized version of this matrix unless its norm as smaller or equal to `eps`. #[inline] pub fn try_normalize(&self, min_norm: N) -> Option> - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { let n = self.norm(); if n <= min_norm { @@ -1446,7 +1421,8 @@ where other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon, - ) -> bool { + ) -> bool + { self.as_ref() .relative_eq(other.as_ref(), epsilon, max_relative) } diff --git a/src/base/matrix_alga.rs b/src/base/matrix_alga.rs index 8593474e0..7c454986b 100644 --- a/src/base/matrix_alga.rs +++ b/src/base/matrix_alga.rs @@ -146,8 +146,7 @@ where } impl NormedSpace for MatrixMN -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn norm_squared(&self) -> N { @@ -181,8 +180,7 @@ where } impl InnerSpace for MatrixMN -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { type Real = N; @@ -202,8 +200,7 @@ where // − use `x()` instead of `::canonical_basis_element` // − use `::new(x, y, z)` instead of `::from_slice` impl FiniteDimInnerSpace for MatrixMN -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn orthonormalize(vs: &mut [MatrixMN]) -> usize { @@ -236,9 +233,7 @@ where #[inline] fn orthonormal_subspace_basis(vs: &[Self], mut f: F) - where - F: FnMut(&Self) -> bool, - { + where F: FnMut(&Self) -> bool { // FIXME: is this necessary? assert!( vs.len() <= Self::dimension(), diff --git a/src/base/matrix_array.rs b/src/base/matrix_array.rs index fc81176cb..deaa9fc31 100644 --- a/src/base/matrix_array.rs +++ b/src/base/matrix_array.rs @@ -107,7 +107,8 @@ where R::Value: Mul, Prod: ArrayLength, GenericArray>: Copy, -{} +{ +} impl Clone for MatrixArray where @@ -132,7 +133,8 @@ where C: DimName, R::Value: Mul, Prod: ArrayLength, -{} +{ +} impl PartialEq for MatrixArray where @@ -182,17 +184,13 @@ where #[inline] fn into_owned(self) -> Owned - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { self } #[inline] fn clone_owned(&self) -> Owned - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { let it = self.iter().cloned(); DefaultAllocator::allocate_from_iterator(self.shape().0, self.shape().1, it) @@ -232,7 +230,8 @@ where R::Value: Mul, Prod: ArrayLength, DefaultAllocator: Allocator, -{} +{ +} unsafe impl ContiguousStorageMut for MatrixArray where @@ -242,7 +241,8 @@ where R::Value: Mul, Prod: ArrayLength, DefaultAllocator: Allocator, -{} +{ +} /* * @@ -260,9 +260,7 @@ where Prod: ArrayLength, { fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { + where S: Serializer { let mut serializer = serializer.serialize_seq(Some(R::dim() * C::dim()))?; for e in self.iter() { @@ -283,9 +281,7 @@ where Prod: ArrayLength, { fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'a>, - { + where D: Deserializer<'a> { deserializer.deserialize_seq(MatrixArrayVisitor::new()) } } @@ -330,9 +326,7 @@ where #[inline] fn visit_seq(self, mut visitor: V) -> Result, V::Error> - where - V: SeqAccess<'a>, - { + where V: SeqAccess<'a> { let mut out: Self::Value = unsafe { mem::uninitialized() }; let mut curr = 0; diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index 6ab3c6b68..8027c49d9 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -91,7 +91,8 @@ slice_storage_impl!("A mutable matrix data storage for mutable matrix slice. Onl impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy for SliceStorage<'a, N, R, C, RStride, CStride> -{} +{ +} impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone for SliceStorage<'a, N, R, C, RStride, CStride> @@ -206,7 +207,8 @@ impl> Matrix { start: (usize, usize), shape: (usize, usize), steps: (usize, usize), - ) { + ) + { let my_shape = self.shape(); // NOTE: we don't do any subtraction to avoid underflow for zero-sized matrices. // @@ -803,7 +805,8 @@ impl> Matrix { pub fn rows_range>( &self, rows: RowRange, - ) -> MatrixSlice { + ) -> MatrixSlice + { self.slice_range(rows, ..) } @@ -812,7 +815,8 @@ impl> Matrix { pub fn columns_range>( &self, cols: ColRange, - ) -> MatrixSlice { + ) -> MatrixSlice + { self.slice_range(.., cols) } } @@ -841,7 +845,8 @@ impl> Matrix { pub fn rows_range_mut>( &mut self, rows: RowRange, - ) -> MatrixSliceMut { + ) -> MatrixSliceMut + { self.slice_range_mut(rows, ..) } @@ -850,7 +855,8 @@ impl> Matrix { pub fn columns_range_mut>( &mut self, cols: ColRange, - ) -> MatrixSliceMut { + ) -> MatrixSliceMut + { self.slice_range_mut(.., cols) } } diff --git a/src/base/matrix_vec.rs b/src/base/matrix_vec.rs index 90bdfe283..73b77609a 100644 --- a/src/base/matrix_vec.rs +++ b/src/base/matrix_vec.rs @@ -94,8 +94,7 @@ impl Deref for MatrixVec { * */ unsafe impl Storage for MatrixVec -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { type RStride = U1; type CStride = Dynamic; @@ -122,17 +121,13 @@ where #[inline] fn into_owned(self) -> Owned - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { self } #[inline] fn clone_owned(&self) -> Owned - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { self.clone() } @@ -143,8 +138,7 @@ where } unsafe impl Storage for MatrixVec -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { type RStride = U1; type CStride = R; @@ -171,17 +165,13 @@ where #[inline] fn into_owned(self) -> Owned - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { self } #[inline] fn clone_owned(&self) -> Owned - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { self.clone() } @@ -197,8 +187,7 @@ where * */ unsafe impl StorageMut for MatrixVec -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn ptr_mut(&mut self) -> *mut N { @@ -211,17 +200,14 @@ where } } -unsafe impl ContiguousStorage for MatrixVec where - DefaultAllocator: Allocator +unsafe impl ContiguousStorage for MatrixVec where DefaultAllocator: Allocator {} -unsafe impl ContiguousStorageMut for MatrixVec where - DefaultAllocator: Allocator +unsafe impl ContiguousStorageMut for MatrixVec where DefaultAllocator: Allocator {} unsafe impl StorageMut for MatrixVec -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn ptr_mut(&mut self) -> *mut N { @@ -249,10 +235,8 @@ impl Abomonation for MatrixVec { } } -unsafe impl ContiguousStorage for MatrixVec where - DefaultAllocator: Allocator +unsafe impl ContiguousStorage for MatrixVec where DefaultAllocator: Allocator {} -unsafe impl ContiguousStorageMut for MatrixVec where - DefaultAllocator: Allocator +unsafe impl ContiguousStorageMut for MatrixVec where DefaultAllocator: Allocator {} diff --git a/src/base/ops.rs b/src/base/ops.rs index a83d51125..14ec98f01 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -724,9 +724,7 @@ impl> Matrix MatrixMN - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { let mut res = self.clone_owned(); res.add_scalar_mut(rhs); res @@ -735,9 +733,7 @@ impl> Matrix, - { + where S: StorageMut { for e in self.iter_mut() { *e += rhs } diff --git a/src/base/properties.rs b/src/base/properties.rs index 266d22b0c..7e5015758 100644 --- a/src/base/properties.rs +++ b/src/base/properties.rs @@ -100,8 +100,7 @@ impl> Matrix { } impl> SquareMatrix -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { /// Checks that this matrix is orthogonal and has a determinant equal to 1. #[inline] diff --git a/src/base/storage.rs b/src/base/storage.rs index bf57242e1..b96f69d08 100644 --- a/src/base/storage.rs +++ b/src/base/storage.rs @@ -105,13 +105,11 @@ pub unsafe trait Storage: Debug + Sized { /// Builds a matrix data storage that does not contain any reference. fn into_owned(self) -> Owned - where - DefaultAllocator: Allocator; + where DefaultAllocator: Allocator; /// Clones this data storage to one that does not contain any reference. fn clone_owned(&self) -> Owned - where - DefaultAllocator: Allocator; + where DefaultAllocator: Allocator; } /// Trait implemented by matrix data storage that can provide a mutable access to its elements. diff --git a/src/base/unit.rs b/src/base/unit.rs index 2b466c045..1c2fa1e29 100644 --- a/src/base/unit.rs +++ b/src/base/unit.rs @@ -25,9 +25,7 @@ pub struct Unit { #[cfg(feature = "serde-serialize")] impl Serialize for Unit { fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { + where S: Serializer { self.value.serialize(serializer) } } @@ -35,9 +33,7 @@ impl Serialize for Unit { #[cfg(feature = "serde-serialize")] impl<'de, T: Deserialize<'de>> Deserialize<'de> for Unit { fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { + where D: Deserializer<'de> { T::deserialize(deserializer).map(|x| Unit { value: x }) } } @@ -143,8 +139,7 @@ impl AsRef for Unit { * */ impl SubsetOf for Unit -where - T::Field: RelativeEq, +where T::Field: RelativeEq { #[inline] fn to_superset(&self) -> T { diff --git a/src/debug/random_orthogonal.rs b/src/debug/random_orthogonal.rs index a699d867b..da06805ba 100644 --- a/src/debug/random_orthogonal.rs +++ b/src/debug/random_orthogonal.rs @@ -13,15 +13,13 @@ use num_complex::Complex; /// A random orthogonal matrix. #[derive(Clone, Debug)] pub struct RandomOrthogonal -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { m: MatrixN, } impl RandomOrthogonal -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { /// Retrieve the generated matrix. pub fn unwrap(self) -> MatrixN { diff --git a/src/debug/random_sdp.rs b/src/debug/random_sdp.rs index a49f7b804..c78d1fd14 100644 --- a/src/debug/random_sdp.rs +++ b/src/debug/random_sdp.rs @@ -13,15 +13,13 @@ use debug::RandomOrthogonal; /// A random, well-conditioned, symmetric definite-positive matrix. #[derive(Clone, Debug)] pub struct RandomSDP -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { m: MatrixN, } impl RandomSDP -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { /// Retrieve the generated matrix. pub fn unwrap(self) -> MatrixN { diff --git a/src/geometry/isometry.rs b/src/geometry/isometry.rs index 94bf9abe7..235049d8f 100644 --- a/src/geometry/isometry.rs +++ b/src/geometry/isometry.rs @@ -26,23 +26,18 @@ use geometry::{Point, Translation}; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde(bound( - serialize = "R: Serialize, + serde(bound(serialize = "R: Serialize, DefaultAllocator: Allocator, - Owned: Serialize" - )) + Owned: Serialize")) )] #[cfg_attr( feature = "serde-serialize", - serde(bound( - deserialize = "R: Deserialize<'de>, + serde(bound(deserialize = "R: Deserialize<'de>, DefaultAllocator: Allocator, - Owned: Deserialize<'de>" - )) + Owned: Deserialize<'de>")) )] pub struct Isometry -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { /// The pure rotational part of this isometry. pub rotation: R, @@ -97,11 +92,11 @@ impl> + Copy> Copy for Isome where DefaultAllocator: Allocator, Owned: Copy, -{} +{ +} impl> + Clone> Clone for Isometry -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn clone(&self) -> Self { @@ -110,8 +105,7 @@ where } impl>> Isometry -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { /// Creates a new isometry from its rotational and translational parts. #[inline] @@ -175,8 +169,7 @@ where // This is OK since all constructors of the isometry enforce the Rotation bound already (and // explicit struct construction is prevented by the dummy ZST field). impl Isometry -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { /// Converts this isometry into its equivalent homogeneous transformation matrix. #[inline] @@ -198,7 +191,8 @@ impl Eq for Isometry where R: Rotation> + Eq, DefaultAllocator: Allocator, -{} +{ +} impl PartialEq for Isometry where @@ -248,7 +242,8 @@ where other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon, - ) -> bool { + ) -> bool + { self.translation .relative_eq(&other.translation, epsilon, max_relative) && self diff --git a/src/geometry/isometry_construction.rs b/src/geometry/isometry_construction.rs index 9d6c88b7c..a52996886 100644 --- a/src/geometry/isometry_construction.rs +++ b/src/geometry/isometry_construction.rs @@ -20,8 +20,7 @@ use geometry::{ }; impl>> Isometry -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { /// Creates a new identity isometry. #[inline] @@ -39,8 +38,7 @@ where } impl>> One for Isometry -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { /// Creates a new identity isometry. #[inline] diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs index d76e8c5a2..d7e519cc0 100644 --- a/src/geometry/orthographic.rs +++ b/src/geometry/orthographic.rs @@ -46,9 +46,7 @@ impl PartialEq for Orthographic3 { #[cfg(feature = "serde-serialize")] impl Serialize for Orthographic3 { fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { + where S: Serializer { self.matrix.serialize(serializer) } } @@ -56,9 +54,7 @@ impl Serialize for Orthographic3 { #[cfg(feature = "serde-serialize")] impl<'a, N: Real + Deserialize<'a>> Deserialize<'a> for Orthographic3 { fn deserialize(deserializer: Des) -> Result - where - Des: Deserializer<'a>, - { + where Des: Deserializer<'a> { let matrix = Matrix4::::deserialize(deserializer)?; Ok(Orthographic3::from_matrix_unchecked(matrix)) @@ -238,9 +234,7 @@ impl Orthographic3 { /// Projects a vector. Faster than matrix multiplication. #[inline] pub fn project_vector(&self, p: &Vector) -> Vector3 - where - SB: Storage, - { + where SB: Storage { Vector3::new( self.matrix[(0, 0)] * p[0], self.matrix[(1, 1)] * p[1], @@ -325,8 +319,7 @@ impl Orthographic3 { } impl Distribution> for Standard -where - Standard: Distribution, +where Standard: Distribution { fn sample(&self, r: &mut R) -> Orthographic3 { let left = r.gen(); @@ -342,8 +335,7 @@ where #[cfg(feature = "arbitrary")] impl Arbitrary for Orthographic3 -where - Matrix4: Send, +where Matrix4: Send { fn arbitrary(g: &mut G) -> Self { let left = Arbitrary::arbitrary(g); diff --git a/src/geometry/perspective.rs b/src/geometry/perspective.rs index 1537b3276..77642554c 100644 --- a/src/geometry/perspective.rs +++ b/src/geometry/perspective.rs @@ -47,9 +47,7 @@ impl PartialEq for Perspective3 { #[cfg(feature = "serde-serialize")] impl Serialize for Perspective3 { fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { + where S: Serializer { self.matrix.serialize(serializer) } } @@ -57,9 +55,7 @@ impl Serialize for Perspective3 { #[cfg(feature = "serde-serialize")] impl<'a, N: Real + Deserialize<'a>> Deserialize<'a> for Perspective3 { fn deserialize(deserializer: Des) -> Result - where - Des: Deserializer<'a>, - { + where Des: Deserializer<'a> { let matrix = Matrix4::::deserialize(deserializer)?; Ok(Perspective3::from_matrix_unchecked(matrix)) @@ -207,9 +203,7 @@ impl Perspective3 { /// Projects a vector. Faster than matrix multiplication. #[inline] pub fn project_vector(&self, p: &Vector) -> Vector3 - where - SB: Storage, - { + where SB: Storage { let inverse_denom = -N::one() / p[2]; Vector3::new( self.matrix[(0, 0)] * p[0] * inverse_denom, @@ -260,8 +254,7 @@ impl Perspective3 { } impl Distribution> for Standard -where - Standard: Distribution, +where Standard: Distribution { fn sample<'a, R: Rng + ?Sized>(&self, r: &'a mut R) -> Perspective3 { let znear = r.gen(); diff --git a/src/geometry/point.rs b/src/geometry/point.rs index 4f26d632a..0530c0c46 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -21,8 +21,7 @@ use base::{DefaultAllocator, Scalar, VectorN}; #[repr(C)] #[derive(Debug)] pub struct Point -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { /// The coordinates of this point, i.e., the shift from the origin. pub coords: VectorN, @@ -42,7 +41,8 @@ impl Copy for Point where DefaultAllocator: Allocator, >::Buffer: Copy, -{} +{ +} impl Clone for Point where @@ -62,9 +62,7 @@ where >::Buffer: Serialize, { fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { + where S: Serializer { self.coords.serialize(serializer) } } @@ -76,9 +74,7 @@ where >::Buffer: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result - where - Des: Deserializer<'a>, - { + where Des: Deserializer<'a> { let coords = VectorN::::deserialize(deserializer)?; Ok(Point::from_coordinates(coords)) @@ -107,8 +103,7 @@ where } impl Point -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { /// Clones this point into one that owns its data. #[inline] @@ -218,7 +213,8 @@ where other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon, - ) -> bool { + ) -> bool + { self.coords .relative_eq(&other.coords, epsilon, max_relative) } @@ -243,8 +239,7 @@ where impl Eq for Point where DefaultAllocator: Allocator {} impl PartialEq for Point -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn eq(&self, right: &Self) -> bool { @@ -253,8 +248,7 @@ where } impl PartialOrd for Point -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn partial_cmp(&self, other: &Self) -> Option { @@ -288,8 +282,7 @@ where * */ impl fmt::Display for Point -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { try!(write!(f, "{{")); diff --git a/src/geometry/point_alga.rs b/src/geometry/point_alga.rs index 5673d0df4..42a1088a6 100644 --- a/src/geometry/point_alga.rs +++ b/src/geometry/point_alga.rs @@ -16,8 +16,7 @@ where } impl EuclideanSpace for Point -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { type Coordinates = VectorN; type Real = N; diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index 9d6497b8b..c4f6bfc1e 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -13,8 +13,7 @@ use base::{DefaultAllocator, Scalar, VectorN}; use geometry::Point; impl Point -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { /// Creates a new point with uninitialized coordinates. #[inline] @@ -25,9 +24,7 @@ where /// Creates a new point with all coordinates equal to zero. #[inline] pub fn origin() -> Self - where - N: Zero, - { + where N: Zero { Self::from_coordinates(VectorN::from_element(N::zero())) } @@ -63,8 +60,7 @@ where * */ impl Bounded for Point -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn max_value() -> Self { diff --git a/src/geometry/point_ops.rs b/src/geometry/point_ops.rs index 1ea8413ac..03b07c398 100644 --- a/src/geometry/point_ops.rs +++ b/src/geometry/point_ops.rs @@ -19,8 +19,7 @@ use geometry::Point; * */ impl Index for Point -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { type Output = N; @@ -31,8 +30,7 @@ where } impl IndexMut for Point -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn index_mut(&mut self, i: usize) -> &mut Self::Output { @@ -46,8 +44,7 @@ where * */ impl Neg for Point -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { type Output = Point; @@ -58,8 +55,7 @@ where } impl<'a, N: Scalar + ClosedNeg, D: DimName> Neg for &'a Point -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { type Output = Point; diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index 60c21f8d6..6fb70d2b7 100644 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -32,8 +32,7 @@ pub struct Quaternion { #[cfg(feature = "abomonation-serialize")] impl Abomonation for Quaternion -where - Vector4: Abomonation, +where Vector4: Abomonation { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { self.coords.entomb(writer) @@ -75,26 +74,20 @@ impl Clone for Quaternion { #[cfg(feature = "serde-serialize")] impl Serialize for Quaternion -where - Owned: Serialize, +where Owned: Serialize { fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { + where S: Serializer { self.coords.serialize(serializer) } } #[cfg(feature = "serde-serialize")] impl<'a, N: Real> Deserialize<'a> for Quaternion -where - Owned: Deserialize<'a>, +where Owned: Deserialize<'a> { fn deserialize(deserializer: Des) -> Result - where - Des: Deserializer<'a>, - { + where Des: Deserializer<'a> { let coords = Vector4::::deserialize(deserializer)?; Ok(Quaternion::from_vector(coords)) @@ -337,7 +330,8 @@ impl> RelativeEq for Quaternion { other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon, - ) -> bool { + ) -> bool + { self.as_vector().relative_eq(other.as_vector(), epsilon, max_relative) || // Account for the double-covering of S², i.e. q = -q self.as_vector().iter().zip(other.as_vector().iter()).all(|(a, b)| a.relative_eq(&-*b, epsilon, max_relative)) @@ -480,7 +474,8 @@ impl UnitQuaternion { other: &UnitQuaternion, t: N, epsilon: N, - ) -> Option> { + ) -> Option> + { Unit::new_unchecked(self.coords) .try_slerp(&Unit::new_unchecked(other.coords), t, epsilon) .map(|q| Unit::new_unchecked(Quaternion::from_vector(q.unwrap()))) @@ -662,7 +657,8 @@ impl> RelativeEq for UnitQuaternion { other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon, - ) -> bool { + ) -> bool + { self.as_ref() .relative_eq(other.as_ref(), epsilon, max_relative) } diff --git a/src/geometry/quaternion_construction.rs b/src/geometry/quaternion_construction.rs index be2c64c79..5daa0a3f4 100644 --- a/src/geometry/quaternion_construction.rs +++ b/src/geometry/quaternion_construction.rs @@ -44,9 +44,7 @@ impl Quaternion { #[inline] // FIXME: take a reference to `vector`? pub fn from_parts(scalar: N, vector: Vector) -> Self - where - SB: Storage, - { + where SB: Storage { Self::new(scalar, vector[0], vector[1], vector[2]) } @@ -55,9 +53,7 @@ impl Quaternion { /// Note that `axis` is assumed to be a unit vector. // FIXME: take a reference to `axis`? pub fn from_polar_decomposition(scale: N, theta: N, axis: Unit>) -> Self - where - SB: Storage, - { + where SB: Storage { let rot = UnitQuaternion::::from_axis_angle(&axis, theta * ::convert(2.0f64)); rot.unwrap() * scale @@ -90,8 +86,7 @@ impl Zero for Quaternion { } impl Distribution> for Standard -where - Standard: Distribution, +where Standard: Distribution { #[inline] fn sample<'a, R: Rng + ?Sized>(&self, rng: &'a mut R) -> Quaternion { @@ -101,8 +96,7 @@ where #[cfg(feature = "arbitrary")] impl Arbitrary for Quaternion -where - Owned: Send, +where Owned: Send { #[inline] fn arbitrary(g: &mut G) -> Self { @@ -126,9 +120,7 @@ impl UnitQuaternion { /// (the rotation angle). #[inline] pub fn from_axis_angle(axis: &Unit>, angle: N) -> Self - where - SB: Storage, - { + where SB: Storage { let (sang, cang) = (angle / ::convert(2.0f64)).sin_cos(); let q = Quaternion::from_parts(cang, axis.as_ref() * sang); @@ -360,9 +352,7 @@ impl UnitQuaternion { /// If `axisangle` has a magnitude smaller than `N::default_epsilon()`, this returns the identity rotation. #[inline] pub fn new(axisangle: Vector) -> Self - where - SB: Storage, - { + where SB: Storage { let two: N = ::convert(2.0f64); let q = Quaternion::::from_parts(N::zero(), axisangle / two).exp(); Self::new_unchecked(q) @@ -373,9 +363,7 @@ impl UnitQuaternion { /// If `axisangle` has a magnitude smaller than `eps`, this returns the identity rotation. #[inline] pub fn new_eps(axisangle: Vector, eps: N) -> Self - where - SB: Storage, - { + where SB: Storage { let two: N = ::convert(2.0f64); let q = Quaternion::::from_parts(N::zero(), axisangle / two).exp_eps(eps); Self::new_unchecked(q) @@ -387,9 +375,7 @@ impl UnitQuaternion { /// Same as `Self::new(axisangle)`. #[inline] pub fn from_scaled_axis(axisangle: Vector) -> Self - where - SB: Storage, - { + where SB: Storage { Self::new(axisangle) } @@ -399,9 +385,7 @@ impl UnitQuaternion { /// Same as `Self::new(axisangle)`. #[inline] pub fn from_scaled_axis_eps(axisangle: Vector, eps: N) -> Self - where - SB: Storage, - { + where SB: Storage { Self::new_eps(axisangle, eps) } } @@ -414,8 +398,7 @@ impl One for UnitQuaternion { } impl Distribution> for Standard -where - OpenClosed01: Distribution, +where OpenClosed01: Distribution { /// Generate a uniformly distributed random rotation quaternion. #[inline] diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index 476c045f1..7dfa56f39 100644 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -24,8 +24,7 @@ use base::{DefaultAllocator, MatrixN, Scalar}; #[repr(C)] #[derive(Debug)] pub struct Rotation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { matrix: MatrixN, } @@ -44,7 +43,8 @@ impl Copy for Rotation where DefaultAllocator: Allocator, >::Buffer: Copy, -{} +{ +} impl Clone for Rotation where @@ -85,9 +85,7 @@ where Owned: Serialize, { fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { + where S: Serializer { self.matrix.serialize(serializer) } } @@ -99,9 +97,7 @@ where Owned: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result - where - Des: Deserializer<'a>, - { + where Des: Deserializer<'a> { let matrix = MatrixN::::deserialize(deserializer)?; Ok(Rotation::from_matrix_unchecked(matrix)) @@ -109,8 +105,7 @@ where } impl Rotation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { /// A reference to the underlying matrix representation of this rotation. #[inline] @@ -189,8 +184,7 @@ where impl Eq for Rotation where DefaultAllocator: Allocator {} impl PartialEq for Rotation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn eq(&self, right: &Rotation) -> bool { @@ -234,7 +228,8 @@ where other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon, - ) -> bool { + ) -> bool + { self.matrix .relative_eq(&other.matrix, epsilon, max_relative) } diff --git a/src/geometry/rotation_alga.rs b/src/geometry/rotation_alga.rs index b95a028b6..4f02bbae6 100644 --- a/src/geometry/rotation_alga.rs +++ b/src/geometry/rotation_alga.rs @@ -19,8 +19,7 @@ use geometry::{Point, Rotation}; * */ impl Identity for Rotation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn identity() -> Self { @@ -29,8 +28,7 @@ where } impl Inverse for Rotation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn inverse(&self) -> Self { @@ -44,8 +42,7 @@ where } impl AbstractMagma for Rotation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn operate(&self, rhs: &Self) -> Self { @@ -74,8 +71,7 @@ impl_multiplicative_structures!( * */ impl Transformation> for Rotation -where - DefaultAllocator: Allocator + Allocator, +where DefaultAllocator: Allocator + Allocator { #[inline] fn transform_point(&self, pt: &Point) -> Point { @@ -89,8 +85,7 @@ where } impl ProjectiveTransformation> for Rotation -where - DefaultAllocator: Allocator + Allocator, +where DefaultAllocator: Allocator + Allocator { #[inline] fn inverse_transform_point(&self, pt: &Point) -> Point { @@ -104,8 +99,7 @@ where } impl AffineTransformation> for Rotation -where - DefaultAllocator: Allocator + Allocator, +where DefaultAllocator: Allocator + Allocator { type Rotation = Self; type NonUniformScaling = Id; @@ -148,8 +142,7 @@ where } impl Similarity> for Rotation -where - DefaultAllocator: Allocator + Allocator, +where DefaultAllocator: Allocator + Allocator { type Scaling = Id; @@ -181,8 +174,7 @@ marker_impl!(Isometry, DirectIsometry, OrthogonalTransformation); /// Subgroups of the n-dimensional rotation group `SO(n)`. impl linear::Rotation> for Rotation -where - DefaultAllocator: Allocator + Allocator, +where DefaultAllocator: Allocator + Allocator { #[inline] fn powf(&self, _: N) -> Option { diff --git a/src/geometry/rotation_ops.rs b/src/geometry/rotation_ops.rs index 0e5b20b70..344d6b3f7 100644 --- a/src/geometry/rotation_ops.rs +++ b/src/geometry/rotation_ops.rs @@ -31,8 +31,7 @@ use base::{DefaultAllocator, Matrix, MatrixMN, Scalar, Unit, Vector, VectorN}; use geometry::{Point, Rotation}; impl Index<(usize, usize)> for Rotation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { type Output = N; diff --git a/src/geometry/rotation_specialization.rs b/src/geometry/rotation_specialization.rs index d01a529d6..e03cd97d2 100644 --- a/src/geometry/rotation_specialization.rs +++ b/src/geometry/rotation_specialization.rs @@ -99,8 +99,7 @@ impl Rotation2 { } impl Distribution> for Standard -where - OpenClosed01: Distribution, +where OpenClosed01: Distribution { /// Generate a uniformly distributed random rotation. #[inline] @@ -111,8 +110,7 @@ where #[cfg(feature = "arbitrary")] impl Arbitrary for Rotation2 -where - Owned: Send, +where Owned: Send { #[inline] fn arbitrary(g: &mut G) -> Self { @@ -144,9 +142,7 @@ impl Rotation3 { /// Builds a 3D rotation matrix from an axis and a rotation angle. pub fn from_axis_angle(axis: &Unit>, angle: N) -> Self - where - SB: Storage, - { + where SB: Storage { if angle.is_zero() { Self::identity() } else { @@ -384,8 +380,7 @@ impl Rotation3 { } impl Distribution> for Standard -where - OpenClosed01: Distribution, +where OpenClosed01: Distribution { /// Generate a uniformly distributed random rotation. #[inline] diff --git a/src/geometry/similarity.rs b/src/geometry/similarity.rs index cbf26a69f..6592e6ed6 100644 --- a/src/geometry/similarity.rs +++ b/src/geometry/similarity.rs @@ -25,25 +25,20 @@ use geometry::{Isometry, Point, Translation}; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde(bound( - serialize = "N: Serialize, + serde(bound(serialize = "N: Serialize, R: Serialize, DefaultAllocator: Allocator, - Owned: Serialize" - )) + Owned: Serialize")) )] #[cfg_attr( feature = "serde-serialize", - serde(bound( - deserialize = "N: Deserialize<'de>, + serde(bound(deserialize = "N: Deserialize<'de>, R: Deserialize<'de>, DefaultAllocator: Allocator, - Owned: Deserialize<'de>" - )) + Owned: Deserialize<'de>")) )] pub struct Similarity -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { /// The part of this similarity that does not include the scaling factor. pub isometry: Isometry, @@ -85,11 +80,11 @@ impl> + Copy> Copy for Simil where DefaultAllocator: Allocator, Owned: Copy, -{} +{ +} impl> + Clone> Clone for Similarity -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn clone(&self) -> Self { @@ -108,7 +103,8 @@ where translation: Translation, rotation: R, scaling: N, - ) -> Similarity { + ) -> Similarity + { Similarity::from_isometry(Isometry::from_parts(translation, rotation), scaling) } @@ -246,8 +242,7 @@ where // This is OK since all constructors of the isometry enforce the Rotation bound already (and // explicit struct construction is prevented by the private scaling factor). impl Similarity -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { /// Converts this similarity into its equivalent homogeneous transformation matrix. #[inline] @@ -271,7 +266,8 @@ impl Eq for Similarity where R: Rotation> + Eq, DefaultAllocator: Allocator, -{} +{ +} impl PartialEq for Similarity where @@ -321,7 +317,8 @@ where other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon, - ) -> bool { + ) -> bool + { self.isometry .relative_eq(&other.isometry, epsilon, max_relative) && self diff --git a/src/geometry/transform.rs b/src/geometry/transform.rs index 4d7f8ba18..4326c9159 100644 --- a/src/geometry/transform.rs +++ b/src/geometry/transform.rs @@ -155,8 +155,7 @@ super_tcategory_impl!( #[repr(C)] #[derive(Debug)] pub struct Transform, C: TCategory> -where - DefaultAllocator: Allocator, DimNameSum>, +where DefaultAllocator: Allocator, DimNameSum> { matrix: MatrixN>, _phantom: PhantomData, @@ -175,11 +174,11 @@ impl + Copy, C: TCategory> Copy for Transform, DimNameSum>, Owned, DimNameSum>: Copy, -{} +{ +} impl, C: TCategory> Clone for Transform -where - DefaultAllocator: Allocator, DimNameSum>, +where DefaultAllocator: Allocator, DimNameSum> { #[inline] fn clone(&self) -> Self { @@ -194,9 +193,7 @@ where Owned, DimNameSum>: Serialize, { fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { + where S: Serializer { self.matrix.serialize(serializer) } } @@ -208,22 +205,18 @@ where Owned, DimNameSum>: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result - where - Des: Deserializer<'a>, - { + where Des: Deserializer<'a> { let matrix = MatrixN::>::deserialize(deserializer)?; Ok(Transform::from_matrix_unchecked(matrix)) } } -impl, C: TCategory> Eq for Transform where - DefaultAllocator: Allocator, DimNameSum> +impl, C: TCategory> Eq for Transform where DefaultAllocator: Allocator, DimNameSum> {} impl, C: TCategory> PartialEq for Transform -where - DefaultAllocator: Allocator, DimNameSum>, +where DefaultAllocator: Allocator, DimNameSum> { #[inline] fn eq(&self, right: &Self) -> bool { @@ -232,8 +225,7 @@ where } impl, C: TCategory> Transform -where - DefaultAllocator: Allocator, DimNameSum>, +where DefaultAllocator: Allocator, DimNameSum> { /// Creates a new transformation from the given homogeneous matrix. The transformation category /// of `Self` is not checked to be verified by the given matrix. @@ -305,9 +297,7 @@ where /// category (it may not be invertible). #[inline] pub fn inverse(self) -> Transform - where - C: SubTCategoryOf, - { + where C: SubTCategoryOf { // FIXME: specialize for TAffine? Transform::from_matrix_unchecked(self.matrix.try_inverse().unwrap()) } @@ -323,16 +313,13 @@ where /// `TGeneral` category (it may not be invertible). #[inline] pub fn inverse_mut(&mut self) - where - C: SubTCategoryOf, - { + where C: SubTCategoryOf { let _ = self.matrix.try_inverse_mut(); } } impl> Transform -where - DefaultAllocator: Allocator, DimNameSum>, +where DefaultAllocator: Allocator, DimNameSum> { /// A mutable reference to underlying matrix. Use `.matrix_mut_unchecked` instead if this /// transformation category is not `TGeneral`. diff --git a/src/geometry/transform_construction.rs b/src/geometry/transform_construction.rs index 1a7beba40..0ab9e5f25 100644 --- a/src/geometry/transform_construction.rs +++ b/src/geometry/transform_construction.rs @@ -9,8 +9,7 @@ use base::{DefaultAllocator, MatrixN}; use geometry::{TCategory, Transform}; impl, C: TCategory> Transform -where - DefaultAllocator: Allocator, DimNameSum>, +where DefaultAllocator: Allocator, DimNameSum> { /// Creates a new identity transform. #[inline] @@ -20,8 +19,7 @@ where } impl, C: TCategory> One for Transform -where - DefaultAllocator: Allocator, DimNameSum>, +where DefaultAllocator: Allocator, DimNameSum> { /// Creates a new identity transform. #[inline] diff --git a/src/geometry/translation.rs b/src/geometry/translation.rs index 33a0f3493..1200d2512 100644 --- a/src/geometry/translation.rs +++ b/src/geometry/translation.rs @@ -22,8 +22,7 @@ use base::{DefaultAllocator, MatrixN, Scalar, VectorN}; #[repr(C)] #[derive(Debug)] pub struct Translation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { /// The translation coordinates, i.e., how much is added to a point's coordinates when it is /// translated. @@ -44,7 +43,8 @@ impl Copy for Translation where DefaultAllocator: Allocator, Owned: Copy, -{} +{ +} impl Clone for Translation where @@ -85,9 +85,7 @@ where Owned: Serialize, { fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { + where S: Serializer { self.vector.serialize(serializer) } } @@ -99,9 +97,7 @@ where Owned: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result - where - Des: Deserializer<'a>, - { + where Des: Deserializer<'a> { let matrix = VectorN::::deserialize(deserializer)?; Ok(Translation::from_vector(matrix)) @@ -109,8 +105,7 @@ where } impl Translation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { /// Creates a new translation from the given vector. #[inline] @@ -121,9 +116,7 @@ where /// Inverts `self`. #[inline] pub fn inverse(&self) -> Translation - where - N: ClosedNeg, - { + where N: ClosedNeg { Translation::from_vector(-&self.vector) } @@ -145,9 +138,7 @@ where /// Inverts `self` in-place. #[inline] pub fn inverse_mut(&mut self) - where - N: ClosedNeg, - { + where N: ClosedNeg { self.vector.neg_mut() } } @@ -155,8 +146,7 @@ where impl Eq for Translation where DefaultAllocator: Allocator {} impl PartialEq for Translation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn eq(&self, right: &Translation) -> bool { @@ -198,7 +188,8 @@ where other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon, - ) -> bool { + ) -> bool + { self.vector .relative_eq(&other.vector, epsilon, max_relative) } @@ -226,8 +217,7 @@ where * */ impl fmt::Display for Translation -where - DefaultAllocator: Allocator + Allocator, +where DefaultAllocator: Allocator + Allocator { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let precision = f.precision().unwrap_or(3); diff --git a/src/geometry/translation_alga.rs b/src/geometry/translation_alga.rs index ce095cc59..896e7a8b3 100644 --- a/src/geometry/translation_alga.rs +++ b/src/geometry/translation_alga.rs @@ -20,8 +20,7 @@ use geometry::{Point, Translation}; * */ impl Identity for Translation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn identity() -> Self { @@ -30,8 +29,7 @@ where } impl Inverse for Translation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn inverse(&self) -> Self { @@ -45,8 +43,7 @@ where } impl AbstractMagma for Translation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn operate(&self, rhs: &Self) -> Self { @@ -75,8 +72,7 @@ impl_multiplicative_structures!( * */ impl Transformation> for Translation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn transform_point(&self, pt: &Point) -> Point { @@ -90,8 +86,7 @@ where } impl ProjectiveTransformation> for Translation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn inverse_transform_point(&self, pt: &Point) -> Point { @@ -105,8 +100,7 @@ where } impl AffineTransformation> for Translation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { type Rotation = Id; type NonUniformScaling = Id; @@ -149,8 +143,7 @@ where } impl Similarity> for Translation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { type Scaling = Id; @@ -181,8 +174,7 @@ marker_impl!(Isometry, DirectIsometry); /// Subgroups of the n-dimensional translation group `T(n)`. impl AlgaTranslation> for Translation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn to_vector(&self) -> VectorN { diff --git a/src/geometry/translation_construction.rs b/src/geometry/translation_construction.rs index df97872ab..cc67507d7 100644 --- a/src/geometry/translation_construction.rs +++ b/src/geometry/translation_construction.rs @@ -16,8 +16,7 @@ use base::{DefaultAllocator, Scalar, VectorN}; use geometry::Translation; impl Translation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { /// Creates a new square identity rotation of the given `dimension`. #[inline] @@ -27,8 +26,7 @@ where } impl One for Translation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn one() -> Self { diff --git a/src/geometry/unit_complex.rs b/src/geometry/unit_complex.rs index 9e91b1f13..63ddf80fd 100644 --- a/src/geometry/unit_complex.rs +++ b/src/geometry/unit_complex.rs @@ -155,7 +155,8 @@ impl RelativeEq for UnitComplex { other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon, - ) -> bool { + ) -> bool + { self.re.relative_eq(&other.re, epsilon, max_relative) && self.im.relative_eq(&other.im, epsilon, max_relative) } diff --git a/src/geometry/unit_complex_alga.rs b/src/geometry/unit_complex_alga.rs index d464ea560..59b11903a 100644 --- a/src/geometry/unit_complex_alga.rs +++ b/src/geometry/unit_complex_alga.rs @@ -59,8 +59,7 @@ impl_structures!( ); impl Transformation> for UnitComplex -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn transform_point(&self, pt: &Point2) -> Point2 { @@ -74,8 +73,7 @@ where } impl ProjectiveTransformation> for UnitComplex -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn inverse_transform_point(&self, pt: &Point2) -> Point2 { @@ -91,8 +89,7 @@ where } impl AffineTransformation> for UnitComplex -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { type Rotation = Self; type NonUniformScaling = Id; @@ -135,8 +132,7 @@ where } impl Similarity> for UnitComplex -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { type Scaling = Id; @@ -166,8 +162,7 @@ macro_rules! marker_impl( marker_impl!(Isometry, DirectIsometry, OrthogonalTransformation); impl Rotation> for UnitComplex -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn powf(&self, n: N) -> Option { diff --git a/src/geometry/unit_complex_construction.rs b/src/geometry/unit_complex_construction.rs index 82dd2e7d8..8e4f45fec 100644 --- a/src/geometry/unit_complex_construction.rs +++ b/src/geometry/unit_complex_construction.rs @@ -71,9 +71,7 @@ impl UnitComplex { /// Builds the unit complex number from the corresponding 2D rotation matrix. #[inline] pub fn from_rotation_matrix(rotmat: &Rotation) -> Self - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { Self::new_unchecked(Complex::new(rotmat[(0, 0)], rotmat[(1, 0)])) } @@ -152,8 +150,7 @@ impl One for UnitComplex { } impl Distribution> for Standard -where - OpenClosed01: Distribution, +where OpenClosed01: Distribution { /// Generate a uniformly distributed random `UnitComplex`. #[inline] diff --git a/src/geometry/unit_complex_ops.rs b/src/geometry/unit_complex_ops.rs index 3d737f757..6456733c6 100644 --- a/src/geometry/unit_complex_ops.rs +++ b/src/geometry/unit_complex_ops.rs @@ -332,8 +332,7 @@ impl<'b, N: Real> DivAssign<&'b UnitComplex> for UnitComplex { // UnitComplex ×= Rotation impl MulAssign> for UnitComplex -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn mul_assign(&mut self, rhs: Rotation) { @@ -342,8 +341,7 @@ where } impl<'b, N: Real> MulAssign<&'b Rotation> for UnitComplex -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn mul_assign(&mut self, rhs: &'b Rotation) { @@ -353,8 +351,7 @@ where // UnitComplex ÷= Rotation impl DivAssign> for UnitComplex -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn div_assign(&mut self, rhs: Rotation) { @@ -363,8 +360,7 @@ where } impl<'b, N: Real> DivAssign<&'b Rotation> for UnitComplex -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn div_assign(&mut self, rhs: &'b Rotation) { @@ -374,8 +370,7 @@ where // Rotation ×= UnitComplex impl MulAssign> for Rotation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn mul_assign(&mut self, rhs: UnitComplex) { @@ -384,8 +379,7 @@ where } impl<'b, N: Real> MulAssign<&'b UnitComplex> for Rotation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn mul_assign(&mut self, rhs: &'b UnitComplex) { @@ -395,8 +389,7 @@ where // Rotation ÷= UnitComplex impl DivAssign> for Rotation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn div_assign(&mut self, rhs: UnitComplex) { @@ -405,8 +398,7 @@ where } impl<'b, N: Real> DivAssign<&'b UnitComplex> for Rotation -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn div_assign(&mut self, rhs: &'b UnitComplex) { diff --git a/src/lib.rs b/src/lib.rs index ad81c9c83..05665575e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -123,10 +123,18 @@ extern crate alloc; #[cfg(not(feature = "std"))] extern crate core as std; +#[cfg(feature = "io")] +extern crate pest; +#[macro_use] +#[cfg(feature = "io")] +extern crate pest_derive; + pub mod base; #[cfg(feature = "debug")] pub mod debug; pub mod geometry; +#[cfg(feature = "io")] +pub mod io; pub mod linalg; #[cfg(feature = "sparse")] pub mod sparse; @@ -224,9 +232,7 @@ pub fn dimension() -> usize { /// The range must not be empty. #[inline] pub fn wrap(mut val: T, min: T, max: T) -> T -where - T: Copy + PartialOrd + AdditiveGroup, -{ +where T: Copy + PartialOrd + AdditiveGroup { assert!(min < max, "Invalid wrapping bounds."); let width = max - min; diff --git a/src/linalg/balancing.rs b/src/linalg/balancing.rs index 54c325cf2..5db113bae 100644 --- a/src/linalg/balancing.rs +++ b/src/linalg/balancing.rs @@ -13,9 +13,7 @@ use base::{DefaultAllocator, MatrixN, VectorN}; /// /// See https://arxiv.org/pdf/1401.5766.pdf pub fn balance_parlett_reinsch(m: &mut MatrixN) -> VectorN -where - DefaultAllocator: Allocator + Allocator, -{ +where DefaultAllocator: Allocator + Allocator { assert!(m.is_square(), "Unable to balance a non-square matrix."); let dim = m.data.shape().0; @@ -67,9 +65,7 @@ where /// Computes in-place `D * m * D.inverse()`, where `D` is the matrix with diagonal `d`. pub fn unbalance(m: &mut MatrixN, d: &VectorN) -where - DefaultAllocator: Allocator + Allocator, -{ +where DefaultAllocator: Allocator + Allocator { assert!(m.is_square(), "Unable to unbalance a non-square matrix."); assert_eq!(m.nrows(), d.len(), "Unbalancing: mismatched dimensions."); diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index a5fd178d1..5e2f66092 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -15,27 +15,23 @@ use linalg::householder; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde(bound( - serialize = "DimMinimum: DimSub, + serde(bound(serialize = "DimMinimum: DimSub, DefaultAllocator: Allocator + Allocator> + Allocator, U1>>, MatrixMN: Serialize, VectorN>: Serialize, - VectorN, U1>>: Serialize" - )) + VectorN, U1>>: Serialize")) )] #[cfg_attr( feature = "serde-serialize", - serde(bound( - deserialize = "DimMinimum: DimSub, + serde(bound(deserialize = "DimMinimum: DimSub, DefaultAllocator: Allocator + Allocator> + Allocator, U1>>, MatrixMN: Deserialize<'de>, VectorN>: Deserialize<'de>, - VectorN, U1>>: Deserialize<'de>" - )) + VectorN, U1>>: Deserialize<'de>")) )] #[derive(Clone, Debug)] pub struct Bidiagonal, C: Dim> @@ -64,7 +60,8 @@ where MatrixMN: Copy, VectorN>: Copy, VectorN, U1>>: Copy, -{} +{ +} impl, C: Dim> Bidiagonal where @@ -212,9 +209,7 @@ where // FIXME: code duplication with householder::assemble_q. // Except that we are returning a rectangular matrix here. pub fn u(&self) -> MatrixMN> - where - DefaultAllocator: Allocator>, - { + where DefaultAllocator: Allocator> { let (nrows, ncols) = self.uv.data.shape(); let mut res = Matrix::identity_generic(nrows, nrows.min(ncols)); @@ -235,9 +230,7 @@ where /// Computes the orthogonal matrix `V` of this `U * D * V` decomposition. pub fn v_t(&self) -> MatrixMN, C> - where - DefaultAllocator: Allocator, C>, - { + where DefaultAllocator: Allocator, C> { let (nrows, ncols) = self.uv.data.shape(); let min_nrows_ncols = nrows.min(ncols); diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 9bf3cb18b..b75ba33a4 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -13,22 +13,17 @@ use storage::{Storage, StorageMut}; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde(bound( - serialize = "DefaultAllocator: Allocator, - MatrixN: Serialize" - )) + serde(bound(serialize = "DefaultAllocator: Allocator, + MatrixN: Serialize")) )] #[cfg_attr( feature = "serde-serialize", - serde(bound( - deserialize = "DefaultAllocator: Allocator, - MatrixN: Deserialize<'de>" - )) + serde(bound(deserialize = "DefaultAllocator: Allocator, + MatrixN: Deserialize<'de>")) )] #[derive(Clone, Debug)] pub struct Cholesky -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { chol: MatrixN, } @@ -37,11 +32,11 @@ impl Copy for Cholesky where DefaultAllocator: Allocator, MatrixN: Copy, -{} +{ +} impl> Cholesky -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { /// Attempts to compute the Cholesky decomposition of `matrix`. /// @@ -147,8 +142,7 @@ where } impl, S: Storage> SquareMatrix -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { /// Attempts to compute the Cholesky decomposition of this matrix. /// diff --git a/src/linalg/determinant.rs b/src/linalg/determinant.rs index 56a21c382..100a21e73 100644 --- a/src/linalg/determinant.rs +++ b/src/linalg/determinant.rs @@ -13,9 +13,7 @@ impl, S: Storage> SquareMatrix N - where - DefaultAllocator: Allocator + Allocator<(usize, usize), D>, - { + where DefaultAllocator: Allocator + Allocator<(usize, usize), D> { assert!( self.is_square(), "Unable to compute the determinant of a non-square matrix." diff --git a/src/linalg/full_piv_lu.rs b/src/linalg/full_piv_lu.rs index a65ab5d09..43cc20bae 100644 --- a/src/linalg/full_piv_lu.rs +++ b/src/linalg/full_piv_lu.rs @@ -15,26 +15,21 @@ use linalg::PermutationSequence; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde(bound( - serialize = "DefaultAllocator: Allocator + + serde(bound(serialize = "DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, MatrixMN: Serialize, - PermutationSequence>: Serialize" - )) + PermutationSequence>: Serialize")) )] #[cfg_attr( feature = "serde-serialize", - serde(bound( - deserialize = "DefaultAllocator: Allocator + + serde(bound(deserialize = "DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, MatrixMN: Deserialize<'de>, - PermutationSequence>: Deserialize<'de>" - )) + PermutationSequence>: Deserialize<'de>")) )] #[derive(Clone, Debug)] pub struct FullPivLU, C: Dim> -where - DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, +where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum> { lu: MatrixMN, p: PermutationSequence>, @@ -46,11 +41,11 @@ where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, MatrixMN: Copy, PermutationSequence>: Copy, -{} +{ +} impl, C: Dim> FullPivLU -where - DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, +where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum> { /// Computes the LU decomposition with full pivoting of `matrix`. /// @@ -108,9 +103,7 @@ where /// The lower triangular matrix of this decomposition. #[inline] pub fn l(&self) -> MatrixMN> - where - DefaultAllocator: Allocator>, - { + where DefaultAllocator: Allocator> { let (nrows, ncols) = self.lu.data.shape(); let mut m = self.lu.columns_generic(0, nrows.min(ncols)).into_owned(); m.fill_upper_triangle(N::zero(), 1); @@ -121,9 +114,7 @@ where /// The upper triangular matrix of this decomposition. #[inline] pub fn u(&self) -> MatrixMN, C> - where - DefaultAllocator: Allocator, C>, - { + where DefaultAllocator: Allocator, C> { let (nrows, ncols) = self.lu.data.shape(); self.lu.rows_generic(0, nrows.min(ncols)).upper_triangle() } @@ -150,8 +141,7 @@ where MatrixMN, C>, PermutationSequence>, ) - where - DefaultAllocator: Allocator> + Allocator, C>, + where DefaultAllocator: Allocator> + Allocator, C> { // Use reallocation for either l or u. let l = self.l(); @@ -164,8 +154,7 @@ where } impl> FullPivLU -where - DefaultAllocator: Allocator + Allocator<(usize, usize), D>, +where DefaultAllocator: Allocator + Allocator<(usize, usize), D> { /// Solves the linear system `self * x = b`, where `x` is the unknown to be determined. /// @@ -270,8 +259,7 @@ where } impl, C: Dim, S: Storage> Matrix -where - DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, +where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum> { /// Computes the LU decomposition with full pivoting of `matrix`. /// diff --git a/src/linalg/hessenberg.rs b/src/linalg/hessenberg.rs index d575a4764..9a525edb2 100644 --- a/src/linalg/hessenberg.rs +++ b/src/linalg/hessenberg.rs @@ -14,26 +14,21 @@ use linalg::householder; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde(bound( - serialize = "DefaultAllocator: Allocator + + serde(bound(serialize = "DefaultAllocator: Allocator + Allocator>, MatrixN: Serialize, - VectorN>: Serialize" - )) + VectorN>: Serialize")) )] #[cfg_attr( feature = "serde-serialize", - serde(bound( - deserialize = "DefaultAllocator: Allocator + + serde(bound(deserialize = "DefaultAllocator: Allocator + Allocator>, MatrixN: Deserialize<'de>, - VectorN>: Deserialize<'de>" - )) + VectorN>: Deserialize<'de>")) )] #[derive(Clone, Debug)] pub struct Hessenberg> -where - DefaultAllocator: Allocator + Allocator>, +where DefaultAllocator: Allocator + Allocator> { hess: MatrixN, subdiag: VectorN>, @@ -44,11 +39,11 @@ where DefaultAllocator: Allocator + Allocator>, MatrixN: Copy, VectorN>: Copy, -{} +{ +} impl> Hessenberg -where - DefaultAllocator: Allocator + Allocator + Allocator>, +where DefaultAllocator: Allocator + Allocator + Allocator> { /// Computes the Hessenberg decomposition using householder reflections. pub fn new(hess: MatrixN) -> Self { @@ -95,9 +90,7 @@ where /// hessenberg matrix. #[inline] pub fn unpack(self) -> (MatrixN, MatrixN) - where - ShapeConstraint: DimEq>, - { + where ShapeConstraint: DimEq> { let q = self.q(); (q, self.unpack_h()) @@ -106,9 +99,7 @@ where /// Retrieves the upper trapezoidal submatrix `H` of this decomposition. #[inline] pub fn unpack_h(mut self) -> MatrixN - where - ShapeConstraint: DimEq>, - { + where ShapeConstraint: DimEq> { let dim = self.hess.nrows(); self.hess.fill_lower_triangle(N::zero(), 2); self.hess @@ -123,9 +114,7 @@ where /// This is less efficient than `.unpack_h()` as it allocates a new matrix. #[inline] pub fn h(&self) -> MatrixN - where - ShapeConstraint: DimEq>, - { + where ShapeConstraint: DimEq> { let dim = self.hess.nrows(); let mut res = self.hess.clone(); res.fill_lower_triangle(N::zero(), 2); @@ -146,8 +135,7 @@ where } impl, S: Storage> SquareMatrix -where - DefaultAllocator: Allocator + Allocator + Allocator>, +where DefaultAllocator: Allocator + Allocator + Allocator> { /// Computes the Hessenberg decomposition of this matrix using householder reflections. pub fn hessenberg(self) -> Hessenberg { diff --git a/src/linalg/householder.rs b/src/linalg/householder.rs index 22799adb7..09c230916 100644 --- a/src/linalg/householder.rs +++ b/src/linalg/householder.rs @@ -105,9 +105,7 @@ pub fn clear_row_unchecked( /// matrices. #[doc(hidden)] pub fn assemble_q(m: &MatrixN) -> MatrixN -where - DefaultAllocator: Allocator, -{ +where DefaultAllocator: Allocator { assert!(m.is_square()); let dim = m.data.shape().0; diff --git a/src/linalg/inverse.rs b/src/linalg/inverse.rs index 921234d7b..08cebd2a9 100644 --- a/src/linalg/inverse.rs +++ b/src/linalg/inverse.rs @@ -11,9 +11,7 @@ impl> SquareMatrix { /// Attempts to invert this matrix. #[inline] pub fn try_inverse(self) -> Option> - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { let mut me = self.into_owned(); if me.try_inverse_mut() { Some(me) @@ -28,9 +26,7 @@ impl> SquareMatrix { /// inversion fails. #[inline] pub fn try_inverse_mut(&mut self) -> bool - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { assert!(self.is_square(), "Unable to invert a non-square matrix."); let dim = self.shape().0; diff --git a/src/linalg/lu.rs b/src/linalg/lu.rs index 2dccbc0b7..d150852ac 100644 --- a/src/linalg/lu.rs +++ b/src/linalg/lu.rs @@ -15,26 +15,21 @@ use linalg::PermutationSequence; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde(bound( - serialize = "DefaultAllocator: Allocator + + serde(bound(serialize = "DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, MatrixMN: Serialize, - PermutationSequence>: Serialize" - )) + PermutationSequence>: Serialize")) )] #[cfg_attr( feature = "serde-serialize", - serde(bound( - deserialize = "DefaultAllocator: Allocator + + serde(bound(deserialize = "DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, MatrixMN: Deserialize<'de>, - PermutationSequence>: Deserialize<'de>" - )) + PermutationSequence>: Deserialize<'de>")) )] #[derive(Clone, Debug)] pub struct LU, C: Dim> -where - DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, +where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum> { lu: MatrixMN, p: PermutationSequence>, @@ -45,7 +40,8 @@ where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, MatrixMN: Copy, PermutationSequence>: Copy, -{} +{ +} /// Performs a LU decomposition to overwrite `out` with the inverse of `matrix`. /// @@ -88,8 +84,7 @@ where } impl, C: Dim> LU -where - DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, +where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum> { /// Computes the LU decomposition with partial (row) pivoting of `matrix`. pub fn new(mut matrix: MatrixMN) -> Self { @@ -131,9 +126,7 @@ where /// The lower triangular matrix of this decomposition. #[inline] pub fn l(&self) -> MatrixMN> - where - DefaultAllocator: Allocator>, - { + where DefaultAllocator: Allocator> { let (nrows, ncols) = self.lu.data.shape(); let mut m = self.lu.columns_generic(0, nrows.min(ncols)).into_owned(); m.fill_upper_triangle(N::zero(), 1); @@ -148,9 +141,7 @@ where MatrixMN>, PermutationSequence>, ) - where - DefaultAllocator: Reallocator>, - { + where DefaultAllocator: Reallocator> { let (nrows, ncols) = self.lu.data.shape(); let mut m = self.lu.resize_generic(nrows, nrows.min(ncols), N::zero()); m.fill_upper_triangle(N::zero(), 1); @@ -161,9 +152,7 @@ where /// The lower triangular matrix of this decomposition. #[inline] pub fn l_unpack(self) -> MatrixMN> - where - DefaultAllocator: Reallocator>, - { + where DefaultAllocator: Reallocator> { let (nrows, ncols) = self.lu.data.shape(); let mut m = self.lu.resize_generic(nrows, nrows.min(ncols), N::zero()); m.fill_upper_triangle(N::zero(), 1); @@ -174,9 +163,7 @@ where /// The upper triangular matrix of this decomposition. #[inline] pub fn u(&self) -> MatrixMN, C> - where - DefaultAllocator: Allocator, C>, - { + where DefaultAllocator: Allocator, C> { let (nrows, ncols) = self.lu.data.shape(); self.lu.rows_generic(0, nrows.min(ncols)).upper_triangle() } @@ -196,11 +183,9 @@ where MatrixMN>, MatrixMN, C>, ) - where - DefaultAllocator: Allocator> + where DefaultAllocator: Allocator> + Allocator, C> - + Reallocator>, - { + + Reallocator> { // Use reallocation for either l or u. let u = self.u(); let (l, p) = self.l_unpack_with_p(); @@ -210,8 +195,7 @@ where } impl> LU -where - DefaultAllocator: Allocator + Allocator<(usize, usize), D>, +where DefaultAllocator: Allocator + Allocator<(usize, usize), D> { /// Solves the linear system `self * x = b`, where `x` is the unknown to be determined. /// @@ -382,8 +366,7 @@ pub fn gauss_step_swap( } impl, C: Dim, S: Storage> Matrix -where - DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, +where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum> { /// Computes the LU decomposition with partial (row) pivoting of `matrix`. pub fn lu(self) -> LU { diff --git a/src/linalg/permutation_sequence.rs b/src/linalg/permutation_sequence.rs index a6f555f95..bba40b8fb 100644 --- a/src/linalg/permutation_sequence.rs +++ b/src/linalg/permutation_sequence.rs @@ -15,22 +15,17 @@ use storage::StorageMut; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde(bound( - serialize = "DefaultAllocator: Allocator<(usize, usize), D>, - VectorN<(usize, usize), D>: Serialize" - )) + serde(bound(serialize = "DefaultAllocator: Allocator<(usize, usize), D>, + VectorN<(usize, usize), D>: Serialize")) )] #[cfg_attr( feature = "serde-serialize", - serde(bound( - deserialize = "DefaultAllocator: Allocator<(usize, usize), D>, - VectorN<(usize, usize), D>: Deserialize<'de>" - )) + serde(bound(deserialize = "DefaultAllocator: Allocator<(usize, usize), D>, + VectorN<(usize, usize), D>: Deserialize<'de>")) )] #[derive(Clone, Debug)] pub struct PermutationSequence -where - DefaultAllocator: Allocator<(usize, usize), D>, +where DefaultAllocator: Allocator<(usize, usize), D> { len: usize, ipiv: VectorN<(usize, usize), D>, @@ -40,11 +35,11 @@ impl Copy for PermutationSequence where DefaultAllocator: Allocator<(usize, usize), D>, VectorN<(usize, usize), D>: Copy, -{} +{ +} impl PermutationSequence -where - DefaultAllocator: Allocator<(usize, usize), D>, +where DefaultAllocator: Allocator<(usize, usize), D> { /// Creates a new statically-allocated sequence of `D` identity permutations. #[inline] @@ -55,8 +50,7 @@ where #[cfg(any(feature = "std", feature = "alloc"))] impl PermutationSequence -where - DefaultAllocator: Allocator<(usize, usize), Dynamic>, +where DefaultAllocator: Allocator<(usize, usize), Dynamic> { /// Creates a new dynamically-allocated sequence of `n` identity permutations. #[inline] @@ -66,8 +60,7 @@ where } impl PermutationSequence -where - DefaultAllocator: Allocator<(usize, usize), D>, +where DefaultAllocator: Allocator<(usize, usize), D> { /// Creates a new sequence of D identity permutations. #[inline] @@ -97,9 +90,7 @@ where /// Applies this sequence of permutations to the rows of `rhs`. #[inline] pub fn permute_rows(&self, rhs: &mut Matrix) - where - S2: StorageMut, - { + where S2: StorageMut { for i in self.ipiv.rows_range(..self.len).iter() { rhs.swap_rows(i.0, i.1) } @@ -107,8 +98,10 @@ where /// Applies this sequence of permutations in reverse to the rows of `rhs`. #[inline] - pub fn inv_permute_rows(&self, rhs: &mut Matrix) - where + pub fn inv_permute_rows( + &self, + rhs: &mut Matrix, + ) where S2: StorageMut, { for i in 0..self.len { @@ -119,8 +112,10 @@ where /// Applies this sequence of permutations to the columns of `rhs`. #[inline] - pub fn permute_columns(&self, rhs: &mut Matrix) - where + pub fn permute_columns( + &self, + rhs: &mut Matrix, + ) where S2: StorageMut, { for i in self.ipiv.rows_range(..self.len).iter() { diff --git a/src/linalg/qr.rs b/src/linalg/qr.rs index 487a06a4b..3ec53c1d7 100644 --- a/src/linalg/qr.rs +++ b/src/linalg/qr.rs @@ -15,26 +15,21 @@ use linalg::householder; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde(bound( - serialize = "DefaultAllocator: Allocator + + serde(bound(serialize = "DefaultAllocator: Allocator + Allocator>, MatrixMN: Serialize, - VectorN>: Serialize" - )) + VectorN>: Serialize")) )] #[cfg_attr( feature = "serde-serialize", - serde(bound( - deserialize = "DefaultAllocator: Allocator + + serde(bound(deserialize = "DefaultAllocator: Allocator + Allocator>, MatrixMN: Deserialize<'de>, - VectorN>: Deserialize<'de>" - )) + VectorN>: Deserialize<'de>")) )] #[derive(Clone, Debug)] pub struct QR, C: Dim> -where - DefaultAllocator: Allocator + Allocator>, +where DefaultAllocator: Allocator + Allocator> { qr: MatrixMN, diag: VectorN>, @@ -45,11 +40,11 @@ where DefaultAllocator: Allocator + Allocator>, MatrixMN: Copy, VectorN>: Copy, -{} +{ +} impl, C: Dim> QR -where - DefaultAllocator: Allocator + Allocator + Allocator>, +where DefaultAllocator: Allocator + Allocator + Allocator> { /// Computes the QR decomposition using householder reflections. pub fn new(mut matrix: MatrixMN) -> Self { @@ -108,9 +103,7 @@ where /// Computes the orthogonal matrix `Q` of this decomposition. pub fn q(&self) -> MatrixMN> - where - DefaultAllocator: Allocator>, - { + where DefaultAllocator: Allocator> { let (nrows, ncols) = self.qr.data.shape(); // NOTE: we could build the identity matrix and call q_mul on it. @@ -153,9 +146,7 @@ where /// Multiplies the provided matrix by the transpose of the `Q` matrix of this decomposition. pub fn q_tr_mul(&self, rhs: &mut Matrix) // FIXME: do we need a static constraint on the number of rows of rhs? - where - S2: StorageMut, - { + where S2: StorageMut { let dim = self.diag.len(); for i in 0..dim { @@ -169,8 +160,7 @@ where } impl> QR -where - DefaultAllocator: Allocator + Allocator, +where DefaultAllocator: Allocator + Allocator { /// Solves the linear system `self * x = b`, where `x` is the unknown to be determined. /// @@ -302,8 +292,7 @@ where } impl, C: Dim, S: Storage> Matrix -where - DefaultAllocator: Allocator + Allocator + Allocator>, +where DefaultAllocator: Allocator + Allocator + Allocator> { /// Computes the QR decomposition of this matrix. pub fn qr(self) -> QR { diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index 0918bee98..cb6c637f0 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -19,22 +19,17 @@ use linalg::Hessenberg; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde(bound( - serialize = "DefaultAllocator: Allocator, - MatrixN: Serialize" - )) + serde(bound(serialize = "DefaultAllocator: Allocator, + MatrixN: Serialize")) )] #[cfg_attr( feature = "serde-serialize", - serde(bound( - deserialize = "DefaultAllocator: Allocator, - MatrixN: Deserialize<'de>" - )) + serde(bound(deserialize = "DefaultAllocator: Allocator, + MatrixN: Deserialize<'de>")) )] #[derive(Clone, Debug)] pub struct RealSchur -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { q: MatrixN, t: MatrixN, @@ -44,7 +39,8 @@ impl Copy for RealSchur where DefaultAllocator: Allocator, MatrixN: Copy, -{} +{ +} impl RealSchur where @@ -86,7 +82,8 @@ where eps: N, max_niter: usize, compute_q: bool, - ) -> Option<(Option>, MatrixN)> { + ) -> Option<(Option>, MatrixN)> + { assert!( m.is_square(), "Unable to compute the eigenvectors and eigenvalues of a non-square matrix." @@ -290,9 +287,7 @@ where /// Computes the complex eigenvalues of the decomposed matrix. fn do_complex_eigenvalues(t: &MatrixN, out: &mut VectorN, D>) - where - DefaultAllocator: Allocator, D>, - { + where DefaultAllocator: Allocator, D> { let dim = t.nrows(); let mut m = 0; @@ -390,9 +385,7 @@ where /// Computes the complex eigenvalues of the decomposed matrix. pub fn complex_eigenvalues(&self) -> VectorN, D> - where - DefaultAllocator: Allocator, D>, - { + where DefaultAllocator: Allocator, D> { let mut out = unsafe { VectorN::new_uninitialized_generic(self.t.data.shape().0, U1) }; Self::do_complex_eigenvalues(&self.t, &mut out); out @@ -565,9 +558,7 @@ where /// Computes the eigenvalues of this matrix. pub fn complex_eigenvalues(&self) -> VectorN, D> // FIXME: add balancing? - where - DefaultAllocator: Allocator, D>, - { + where DefaultAllocator: Allocator, D> { let dim = self.data.shape().0; let mut work = unsafe { VectorN::new_uninitialized_generic(dim, U1) }; diff --git a/src/linalg/svd.rs b/src/linalg/svd.rs index 3945af9cb..e3e42d5b4 100644 --- a/src/linalg/svd.rs +++ b/src/linalg/svd.rs @@ -20,34 +20,33 @@ use linalg::Bidiagonal; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde(bound( - serialize = "DefaultAllocator: Allocator + + serde( + bound(serialize = "DefaultAllocator: Allocator + Allocator> + Allocator, C> + Allocator>, MatrixMN>: Serialize, MatrixMN, C>: Serialize, - VectorN>: Serialize" - )) + VectorN>: Serialize") + ) )] #[cfg_attr( feature = "serde-serialize", - serde(bound( - deserialize = "DefaultAllocator: Allocator + + serde( + bound(deserialize = "DefaultAllocator: Allocator + Allocator> + Allocator, C> + Allocator>, MatrixMN>: Deserialize<'de>, MatrixMN, C>: Deserialize<'de>, - VectorN>: Deserialize<'de>" - )) + VectorN>: Deserialize<'de>") + ) )] #[derive(Clone, Debug)] pub struct SVD, C: Dim> -where - DefaultAllocator: Allocator, C> +where DefaultAllocator: Allocator, C> + Allocator> - + Allocator>, + + Allocator> { /// The left-singular vectors `U` of this SVD. pub u: Option>>, @@ -65,7 +64,8 @@ where MatrixMN>: Copy, MatrixMN, C>: Copy, VectorN>: Copy, -{} +{ +} impl, C: Dim> SVD where @@ -99,7 +99,8 @@ where compute_v: bool, eps: N, max_niter: usize, - ) -> Option { + ) -> Option + { assert!( matrix.len() != 0, "Cannot compute the SVD of an empty matrix." @@ -287,7 +288,8 @@ where m22: N, compute_u: bool, compute_v: bool, - ) -> (Option>, Vector2, Option>) { + ) -> (Option>, Vector2, Option>) + { let two: N = ::convert(2.0f64); let half: N = ::convert(0.5f64); @@ -342,7 +344,8 @@ where v_t: &mut Option, C>>, end: usize, eps: N, - ) -> (usize, usize) { + ) -> (usize, usize) + { let mut n = end; while n > 0 { @@ -406,7 +409,8 @@ where v_t: &mut Option, C>>, i: usize, end: usize, - ) { + ) + { let mut v = Vector2::new(b.off_diagonal[i], b.diagonal[i + 1]); b.off_diagonal[i] = N::zero(); @@ -440,7 +444,8 @@ where u: &mut Option>>, v_t: &mut Option, C>>, i: usize, - ) { + ) + { let mut v = Vector2::new(b.diagonal[i], b.off_diagonal[i]); b.off_diagonal[i] = N::zero(); @@ -502,9 +507,7 @@ where /// Panics if the right- and left- singular vectors have not been computed at /// construction-time. pub fn pseudo_inverse(mut self, eps: N) -> MatrixMN - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { assert!( eps >= N::zero(), "SVD pseudo inverse: the epsilon must be non-negative." @@ -601,7 +604,8 @@ where compute_v: bool, eps: N, max_niter: usize, - ) -> Option> { + ) -> Option> + { SVD::try_new(self.into_owned(), compute_u, compute_v, eps, max_niter) } @@ -622,9 +626,7 @@ where /// /// All singular values below `eps` are considered equal to 0. pub fn pseudo_inverse(self, eps: N) -> MatrixMN - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { SVD::new(self.clone_owned(), true, true).pseudo_inverse(eps) } } diff --git a/src/linalg/symmetric_eigen.rs b/src/linalg/symmetric_eigen.rs index f1e7b0265..bf4ac8c25 100644 --- a/src/linalg/symmetric_eigen.rs +++ b/src/linalg/symmetric_eigen.rs @@ -18,26 +18,21 @@ use linalg::SymmetricTridiagonal; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde(bound( - serialize = "DefaultAllocator: Allocator + + serde(bound(serialize = "DefaultAllocator: Allocator + Allocator, VectorN: Serialize, - MatrixN: Serialize" - )) + MatrixN: Serialize")) )] #[cfg_attr( feature = "serde-serialize", - serde(bound( - deserialize = "DefaultAllocator: Allocator + + serde(bound(deserialize = "DefaultAllocator: Allocator + Allocator, VectorN: Deserialize<'de>, - MatrixN: Deserialize<'de>" - )) + MatrixN: Deserialize<'de>")) )] #[derive(Clone, Debug)] pub struct SymmetricEigen -where - DefaultAllocator: Allocator + Allocator, +where DefaultAllocator: Allocator + Allocator { /// The eigenvectors of the decomposed matrix. pub eigenvectors: MatrixN, @@ -51,11 +46,11 @@ where DefaultAllocator: Allocator + Allocator, MatrixN: Copy, VectorN: Copy, -{} +{ +} impl SymmetricEigen -where - DefaultAllocator: Allocator + Allocator, +where DefaultAllocator: Allocator + Allocator { /// Computes the eigendecomposition of the given symmetric matrix. /// @@ -306,8 +301,7 @@ pub fn wilkinson_shift(tmm: N, tnn: N, tmn: N) -> N { * */ impl, S: Storage> SquareMatrix -where - DefaultAllocator: Allocator + Allocator + Allocator>, +where DefaultAllocator: Allocator + Allocator + Allocator> { /// Computes the eigendecomposition of this symmetric matrix. /// diff --git a/src/linalg/symmetric_tridiagonal.rs b/src/linalg/symmetric_tridiagonal.rs index 37d6b1949..e1ed5ba52 100644 --- a/src/linalg/symmetric_tridiagonal.rs +++ b/src/linalg/symmetric_tridiagonal.rs @@ -13,26 +13,21 @@ use linalg::householder; #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde(bound( - serialize = "DefaultAllocator: Allocator + + serde(bound(serialize = "DefaultAllocator: Allocator + Allocator>, MatrixN: Serialize, - VectorN>: Serialize" - )) + VectorN>: Serialize")) )] #[cfg_attr( feature = "serde-serialize", - serde(bound( - deserialize = "DefaultAllocator: Allocator + + serde(bound(deserialize = "DefaultAllocator: Allocator + Allocator>, MatrixN: Deserialize<'de>, - VectorN>: Deserialize<'de>" - )) + VectorN>: Deserialize<'de>")) )] #[derive(Clone, Debug)] pub struct SymmetricTridiagonal> -where - DefaultAllocator: Allocator + Allocator>, +where DefaultAllocator: Allocator + Allocator> { tri: MatrixN, off_diagonal: VectorN>, @@ -43,11 +38,11 @@ where DefaultAllocator: Allocator + Allocator>, MatrixN: Copy, VectorN>: Copy, -{} +{ +} impl> SymmetricTridiagonal -where - DefaultAllocator: Allocator + Allocator>, +where DefaultAllocator: Allocator + Allocator> { /// Computes the tridiagonalization of the symmetric matrix `m`. /// @@ -100,9 +95,7 @@ where /// Retrieve the orthogonal transformation, diagonal, and off diagonal elements of this /// decomposition. pub fn unpack(self) -> (MatrixN, VectorN, VectorN>) - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { let diag = self.diagonal(); let q = self.q(); @@ -111,9 +104,7 @@ where /// Retrieve the diagonal, and off diagonal elements of this decomposition. pub fn unpack_tridiagonal(self) -> (VectorN, VectorN>) - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { let diag = self.diagonal(); (diag, self.off_diagonal) @@ -121,17 +112,13 @@ where /// The diagonal components of this decomposition. pub fn diagonal(&self) -> VectorN - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { self.tri.diagonal() } /// The off-diagonal components of this decomposition. pub fn off_diagonal(&self) -> &VectorN> - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { &self.off_diagonal } @@ -156,8 +143,7 @@ where } impl, S: Storage> SquareMatrix -where - DefaultAllocator: Allocator + Allocator>, +where DefaultAllocator: Allocator + Allocator> { /// Computes the tridiagonalization of this symmetric matrix. /// diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index 9bb03cdac..a02e8613d 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -81,8 +81,7 @@ pub trait CsStorageMut: #[derive(Clone, Debug, PartialEq)] pub struct CsVecStorage -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { pub(crate) shape: (R, C), pub(crate) p: VectorN, @@ -91,8 +90,7 @@ where } impl CsVecStorage -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { pub fn values(&self) -> &[N] { &self.vals @@ -108,8 +106,7 @@ where impl CsVecStorage where DefaultAllocator: Allocator {} impl<'a, N: Scalar, R: Dim, C: Dim> CsStorageIter<'a, N, R, C> for CsVecStorage -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { type ColumnEntries = ColumnEntries<'a, N>; type ColumnRowIndices = iter::Cloned>; @@ -128,8 +125,7 @@ where } impl CsStorage for CsVecStorage -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { #[inline] fn shape(&self) -> (R, C) { @@ -174,8 +170,7 @@ where } impl<'a, N: Scalar, R: Dim, C: Dim> CsStorageIterMut<'a, N, R, C> for CsVecStorage -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { type ValuesMut = slice::IterMut<'a, N>; type ColumnEntriesMut = iter::Zip>, slice::IterMut<'a, N>>; @@ -195,10 +190,8 @@ where } } -impl CsStorageMut for CsVecStorage where - DefaultAllocator: Allocator -{ -} +impl CsStorageMut for CsVecStorage where DefaultAllocator: Allocator +{} /* pub struct CsSliceStorage<'a, N: Scalar, R: Dim, C: DimAdd> { @@ -223,8 +216,7 @@ pub struct CsMatrix< pub type CsVector> = CsMatrix; impl CsMatrix -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { pub fn new_uninitialized_generic(nrows: R, ncols: C, nvals: usize) -> Self { let mut i = Vec::with_capacity(nvals); @@ -303,7 +295,8 @@ impl CsMatrix { p: Vec, i: Vec, vals: Vec, - ) -> Self { + ) -> Self + { let nrows = Dynamic::new(nrows); let ncols = Dynamic::new(ncols); let p = DVector::from_data(MatrixVec::new(ncols, U1, p)); @@ -368,9 +361,7 @@ impl> CsMatrix { } pub fn transpose(&self) -> CsMatrix - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { let (nrows, ncols) = self.data.shape(); let nvals = self.len(); @@ -408,13 +399,10 @@ impl> CsMatrix { } impl CsMatrix -where - DefaultAllocator: Allocator, +where DefaultAllocator: Allocator { pub(crate) fn sort(&mut self) - where - DefaultAllocator: Allocator, - { + where DefaultAllocator: Allocator { // Size = R let nrows = self.data.shape().0; let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows, U1) }; @@ -446,9 +434,7 @@ where // Remove dupliate entries on a sorted CsMatrix. pub(crate) fn dedup(&mut self) - where - N: Zero + ClosedAdd, - { + where N: Zero + ClosedAdd { let mut curr_i = 0; for j in 0..self.ncols() { diff --git a/src/sparse/cs_matrix_cholesky.rs b/src/sparse/cs_matrix_cholesky.rs index e35d7e930..000332e7c 100644 --- a/src/sparse/cs_matrix_cholesky.rs +++ b/src/sparse/cs_matrix_cholesky.rs @@ -13,8 +13,7 @@ use storage::{Storage, StorageMut}; use {DefaultAllocator, Dim, Matrix, MatrixMN, Real, Scalar, Vector, VectorN, U1}; pub struct CsCholesky -where - DefaultAllocator: Allocator + Allocator, +where DefaultAllocator: Allocator + Allocator { // Non-zero pattern of the original matrix upper-triangular part. // Unlike the original matrix, the `original_p` array does contain the last sentinel value @@ -33,8 +32,7 @@ where } impl CsCholesky -where - DefaultAllocator: Allocator + Allocator, +where DefaultAllocator: Allocator + Allocator { /// Computes the cholesky decomposition of the sparse matrix `m`. pub fn new(m: &CsMatrix) -> Self { @@ -263,7 +261,8 @@ where tree: &[usize], marks: &mut Vec, out: &mut Vec, - ) { + ) + { marks.clear(); marks.resize(tree.len(), false); diff --git a/src/sparse/cs_matrix_conversion.rs b/src/sparse/cs_matrix_conversion.rs index b764bf10e..f37ef61dd 100644 --- a/src/sparse/cs_matrix_conversion.rs +++ b/src/sparse/cs_matrix_conversion.rs @@ -20,14 +20,14 @@ impl<'a, N: Scalar + Zero + ClosedAdd> CsMatrix { irows: &[usize], icols: &[usize], vals: &[N], - ) -> Self { + ) -> Self + { Self::from_triplet_generic(Dynamic::new(nrows), Dynamic::new(ncols), irows, icols, vals) } } impl<'a, N: Scalar + Zero + ClosedAdd, R: Dim, C: Dim> CsMatrix -where - DefaultAllocator: Allocator + Allocator, +where DefaultAllocator: Allocator + Allocator { pub fn from_triplet_generic( nrows: R, @@ -35,7 +35,8 @@ where irows: &[usize], icols: &[usize], vals: &[N], - ) -> Self { + ) -> Self + { assert!(vals.len() == irows.len()); assert!(vals.len() == icols.len()); diff --git a/src/sparse/cs_matrix_solve.rs b/src/sparse/cs_matrix_solve.rs index 3d88455bf..9cd25def7 100644 --- a/src/sparse/cs_matrix_solve.rs +++ b/src/sparse/cs_matrix_solve.rs @@ -223,7 +223,8 @@ impl> CsMatrix { visited: &mut [bool], stack: &mut Vec<(usize, Range)>, xi: &mut Vec, - ) { + ) + { 'recursion: while let Some((j, rng)) = stack.pop() { visited[j] = true; @@ -260,7 +261,8 @@ impl> CsMatrix { visited: &mut [bool], stack: &mut Vec, xi: &mut Vec, - ) { + ) + { if !visited[start] { stack.clear(); stack.push(start); diff --git a/src/sparse/cs_utils.rs b/src/sparse/cs_utils.rs index a79ee4d98..3c5db43e4 100644 --- a/src/sparse/cs_utils.rs +++ b/src/sparse/cs_utils.rs @@ -2,9 +2,7 @@ use allocator::Allocator; use {DefaultAllocator, Dim, VectorN}; pub fn cumsum(a: &mut VectorN, b: &mut VectorN) -> usize -where - DefaultAllocator: Allocator, -{ +where DefaultAllocator: Allocator { assert!(a.len() == b.len()); let mut sum = 0; diff --git a/tests/geometry/isometry.rs b/tests/geometry/isometry.rs index c72a2475b..936f4a099 100644 --- a/tests/geometry/isometry.rs +++ b/tests/geometry/isometry.rs @@ -25,27 +25,35 @@ quickcheck!( let viewmatrix = Isometry3::look_at_rh(&eye, &target, &up); let origin = Point3::origin(); - relative_eq!(viewmatrix * eye, origin, epsilon = 1.0e-7) && - relative_eq!((viewmatrix * (target - eye)).normalize(), -Vector3::z(), epsilon = 1.0e-7) + relative_eq!(viewmatrix * eye, origin, epsilon = 1.0e-7) + && relative_eq!( + (viewmatrix * (target - eye)).normalize(), + -Vector3::z(), + epsilon = 1.0e-7 + ) } fn observer_frame_3(eye: Point3, target: Point3, up: Vector3) -> bool { let observer = Isometry3::new_observer_frame(&eye, &target, &up); let origin = Point3::origin(); - relative_eq!(observer * origin, eye, epsilon = 1.0e-7) && - relative_eq!(observer * Vector3::z(), (target - eye).normalize(), epsilon = 1.0e-7) + relative_eq!(observer * origin, eye, epsilon = 1.0e-7) + && relative_eq!( + observer * Vector3::z(), + (target - eye).normalize(), + epsilon = 1.0e-7 + ) } fn inverse_is_identity(i: Isometry3, p: Point3, v: Vector3) -> bool { let ii = i.inverse(); - relative_eq!(i * ii, Isometry3::identity(), epsilon = 1.0e-7) && - relative_eq!(ii * i, Isometry3::identity(), epsilon = 1.0e-7) && - relative_eq!((i * ii) * p, p, epsilon = 1.0e-7) && - relative_eq!((ii * i) * p, p, epsilon = 1.0e-7) && - relative_eq!((i * ii) * v, v, epsilon = 1.0e-7) && - relative_eq!((ii * i) * v, v, epsilon = 1.0e-7) + relative_eq!(i * ii, Isometry3::identity(), epsilon = 1.0e-7) + && relative_eq!(ii * i, Isometry3::identity(), epsilon = 1.0e-7) + && relative_eq!((i * ii) * p, p, epsilon = 1.0e-7) + && relative_eq!((ii * i) * p, p, epsilon = 1.0e-7) + && relative_eq!((i * ii) * v, v, epsilon = 1.0e-7) + && relative_eq!((ii * i) * v, v, epsilon = 1.0e-7) } fn inverse_is_parts_inversion(t: Translation3, r: UnitQuaternion) -> bool { @@ -54,14 +62,29 @@ quickcheck!( } fn multiply_equals_alga_transform(i: Isometry3, v: Vector3, p: Point3) -> bool { - i * v == i.transform_vector(&v) && - i * p == i.transform_point(&p) && - relative_eq!(i.inverse() * v, i.inverse_transform_vector(&v), epsilon = 1.0e-7) && - relative_eq!(i.inverse() * p, i.inverse_transform_point(&p), epsilon = 1.0e-7) + i * v == i.transform_vector(&v) + && i * p == i.transform_point(&p) + && relative_eq!( + i.inverse() * v, + i.inverse_transform_vector(&v), + epsilon = 1.0e-7 + ) + && relative_eq!( + i.inverse() * p, + i.inverse_transform_point(&p), + epsilon = 1.0e-7 + ) } - fn composition2(i: Isometry2, uc: UnitComplex, r: Rotation2, - t: Translation2, v: Vector2, p: Point2) -> bool { + fn composition2( + i: Isometry2, + uc: UnitComplex, + r: Rotation2, + t: Translation2, + v: Vector2, + p: Point2, + ) -> bool + { // (rotation × translation) * point = rotation × (translation * point) relative_eq!((uc * t) * v, uc * v, epsilon = 1.0e-7) && relative_eq!((r * t) * v, r * v, epsilon = 1.0e-7) && @@ -91,8 +114,15 @@ quickcheck!( relative_eq!((i * t) * p, i * (t * p), epsilon = 1.0e-7) } - fn composition3(i: Isometry3, uq: UnitQuaternion, r: Rotation3, - t: Translation3, v: Vector3, p: Point3) -> bool { + fn composition3( + i: Isometry3, + uq: UnitQuaternion, + r: Rotation3, + t: Translation3, + v: Vector3, + p: Point3, + ) -> bool + { // (rotation × translation) * point = rotation × (translation * point) relative_eq!((uq * t) * v, uq * v, epsilon = 1.0e-7) && relative_eq!((r * t) * v, r * v, epsilon = 1.0e-7) && @@ -122,11 +152,18 @@ quickcheck!( relative_eq!((i * t) * p, i * (t * p), epsilon = 1.0e-7) } - fn all_op_exist(i: Isometry3, uq: UnitQuaternion, t: Translation3, - v: Vector3, p: Point3, r: Rotation3) -> bool { - let iMi = i * i; + fn all_op_exist( + i: Isometry3, + uq: UnitQuaternion, + t: Translation3, + v: Vector3, + p: Point3, + r: Rotation3, + ) -> bool + { + let iMi = i * i; let iMuq = i * uq; - let iDi = i / i; + let iDi = i / i; let iDuq = i / uq; let iMp = i * p; @@ -135,13 +172,13 @@ quickcheck!( let iMt = i * t; let tMi = t * i; - let tMr = t * r; + let tMr = t * r; let tMuq = t * uq; let uqMi = uq * i; let uqDi = uq / i; - let rMt = r * t; + let rMt = r * t; let uqMt = uq * t; let mut iMt1 = i; @@ -174,75 +211,57 @@ quickcheck!( iDuq1 /= uq; iDuq2 /= &uq; - iMt == iMt1 && - iMt == iMt2 && - - iMi == iMi1 && - iMi == iMi2 && - - iMuq == iMuq1 && - iMuq == iMuq2 && - - iDi == iDi1 && - iDi == iDi2 && - - iDuq == iDuq1 && - iDuq == iDuq2 && - - iMi == &i * &i && - iMi == i * &i && - iMi == &i * i && - - iMuq == &i * &uq && - iMuq == i * &uq && - iMuq == &i * uq && - - iDi == &i / &i && - iDi == i / &i && - iDi == &i / i && - - iDuq == &i / &uq && - iDuq == i / &uq && - iDuq == &i / uq && - - iMp == &i * &p && - iMp == i * &p && - iMp == &i * p && - - iMv == &i * &v && - iMv == i * &v && - iMv == &i * v && - - iMt == &i * &t && - iMt == i * &t && - iMt == &i * t && - - tMi == &t * &i && - tMi == t * &i && - tMi == &t * i && - - tMr == &t * &r && - tMr == t * &r && - tMr == &t * r && - - tMuq == &t * &uq && - tMuq == t * &uq && - tMuq == &t * uq && - - uqMi == &uq * &i && - uqMi == uq * &i && - uqMi == &uq * i && - - uqDi == &uq / &i && - uqDi == uq / &i && - uqDi == &uq / i && - - rMt == &r * &t && - rMt == r * &t && - rMt == &r * t && - - uqMt == &uq * &t && - uqMt == uq * &t && - uqMt == &uq * t + iMt == iMt1 + && iMt == iMt2 + && iMi == iMi1 + && iMi == iMi2 + && iMuq == iMuq1 + && iMuq == iMuq2 + && iDi == iDi1 + && iDi == iDi2 + && iDuq == iDuq1 + && iDuq == iDuq2 + && iMi == &i * &i + && iMi == i * &i + && iMi == &i * i + && iMuq == &i * &uq + && iMuq == i * &uq + && iMuq == &i * uq + && iDi == &i / &i + && iDi == i / &i + && iDi == &i / i + && iDuq == &i / &uq + && iDuq == i / &uq + && iDuq == &i / uq + && iMp == &i * &p + && iMp == i * &p + && iMp == &i * p + && iMv == &i * &v + && iMv == i * &v + && iMv == &i * v + && iMt == &i * &t + && iMt == i * &t + && iMt == &i * t + && tMi == &t * &i + && tMi == t * &i + && tMi == &t * i + && tMr == &t * &r + && tMr == t * &r + && tMr == &t * r + && tMuq == &t * &uq + && tMuq == t * &uq + && tMuq == &t * uq + && uqMi == &uq * &i + && uqMi == uq * &i + && uqMi == &uq * i + && uqDi == &uq / &i + && uqDi == uq / &i + && uqDi == &uq / i + && rMt == &r * &t + && rMt == r * &t + && rMt == &r * t + && uqMt == &uq * &t + && uqMt == uq * &t + && uqMt == &uq * t } ); diff --git a/tests/geometry/point.rs b/tests/geometry/point.rs index 90c515873..ad990048b 100644 --- a/tests/geometry/point.rs +++ b/tests/geometry/point.rs @@ -87,7 +87,9 @@ fn to_homogeneous() { assert_eq!(a.to_homogeneous(), expected); } -quickcheck!(fn point_sub(pt1: Point3, pt2: Point3) -> bool { - let dpt = &pt2 - &pt1; - relative_eq!(pt2, pt1 + dpt, epsilon = 1.0e-7) -}); +quickcheck!( + fn point_sub(pt1: Point3, pt2: Point3) -> bool { + let dpt = &pt2 - &pt1; + relative_eq!(pt2, pt1 + dpt, epsilon = 1.0e-7) + } +); diff --git a/tests/geometry/quaternion.rs b/tests/geometry/quaternion.rs index c5b915549..7d903450d 100644 --- a/tests/geometry/quaternion.rs +++ b/tests/geometry/quaternion.rs @@ -10,15 +10,15 @@ quickcheck!( * */ fn from_euler_angles(r: f64, p: f64, y: f64) -> bool { - let roll = UnitQuaternion::from_euler_angles(r, 0.0, 0.0); + let roll = UnitQuaternion::from_euler_angles(r, 0.0, 0.0); let pitch = UnitQuaternion::from_euler_angles(0.0, p, 0.0); - let yaw = UnitQuaternion::from_euler_angles(0.0, 0.0, y); + let yaw = UnitQuaternion::from_euler_angles(0.0, 0.0, y); let rpy = UnitQuaternion::from_euler_angles(r, p, y); - let rroll = roll.to_rotation_matrix(); + let rroll = roll.to_rotation_matrix(); let rpitch = pitch.to_rotation_matrix(); - let ryaw = yaw.to_rotation_matrix(); + let ryaw = yaw.to_rotation_matrix(); relative_eq!(rroll[(0, 0)], 1.0, epsilon = 1.0e-7) && // rotation wrt. x axis. relative_eq!(rpitch[(1, 1)], 1.0, epsilon = 1.0e-7) && // rotation wrt. y axis. @@ -29,22 +29,24 @@ quickcheck!( fn to_euler_angles(r: f64, p: f64, y: f64) -> bool { let rpy = UnitQuaternion::from_euler_angles(r, p, y); let (roll, pitch, yaw) = rpy.to_euler_angles(); - relative_eq!(UnitQuaternion::from_euler_angles(roll, pitch, yaw), rpy, epsilon = 1.0e-7) + relative_eq!( + UnitQuaternion::from_euler_angles(roll, pitch, yaw), + rpy, + epsilon = 1.0e-7 + ) } - /* * * From/to rotation matrix. * */ fn unit_quaternion_rotation_conversion(q: UnitQuaternion) -> bool { - let r = q.to_rotation_matrix(); + let r = q.to_rotation_matrix(); let qq = UnitQuaternion::from_rotation_matrix(&r); let rr = qq.to_rotation_matrix(); - relative_eq!(q, qq, epsilon = 1.0e-7) && - relative_eq!(r, rr, epsilon = 1.0e-7) + relative_eq!(q, qq, epsilon = 1.0e-7) && relative_eq!(r, rr, epsilon = 1.0e-7) } /* @@ -52,23 +54,26 @@ quickcheck!( * Point/Vector transformation. * */ - fn unit_quaternion_transformation(q: UnitQuaternion, v: Vector3, p: Point3) -> bool { + fn unit_quaternion_transformation( + q: UnitQuaternion, + v: Vector3, + p: Point3, + ) -> bool + { let r = q.to_rotation_matrix(); let rv = r * v; let rp = r * p; - relative_eq!( q * v, rv, epsilon = 1.0e-7) && - relative_eq!( q * &v, rv, epsilon = 1.0e-7) && - relative_eq!(&q * v, rv, epsilon = 1.0e-7) && - relative_eq!(&q * &v, rv, epsilon = 1.0e-7) && - - relative_eq!( q * p, rp, epsilon = 1.0e-7) && - relative_eq!( q * &p, rp, epsilon = 1.0e-7) && - relative_eq!(&q * p, rp, epsilon = 1.0e-7) && - relative_eq!(&q * &p, rp, epsilon = 1.0e-7) + relative_eq!(q * v, rv, epsilon = 1.0e-7) + && relative_eq!(q * &v, rv, epsilon = 1.0e-7) + && relative_eq!(&q * v, rv, epsilon = 1.0e-7) + && relative_eq!(&q * &v, rv, epsilon = 1.0e-7) + && relative_eq!(q * p, rp, epsilon = 1.0e-7) + && relative_eq!(q * &p, rp, epsilon = 1.0e-7) + && relative_eq!(&q * p, rp, epsilon = 1.0e-7) + && relative_eq!(&q * &p, rp, epsilon = 1.0e-7) } - /* * * Inversion. @@ -76,15 +81,14 @@ quickcheck!( */ fn unit_quaternion_inv(q: UnitQuaternion) -> bool { let iq = q.inverse(); - relative_eq!(&iq * &q, UnitQuaternion::identity(), epsilon = 1.0e-7) && - relative_eq!( iq * &q, UnitQuaternion::identity(), epsilon = 1.0e-7) && - relative_eq!(&iq * q, UnitQuaternion::identity(), epsilon = 1.0e-7) && - relative_eq!( iq * q, UnitQuaternion::identity(), epsilon = 1.0e-7) && - - relative_eq!(&q * &iq, UnitQuaternion::identity(), epsilon = 1.0e-7) && - relative_eq!( q * &iq, UnitQuaternion::identity(), epsilon = 1.0e-7) && - relative_eq!(&q * iq, UnitQuaternion::identity(), epsilon = 1.0e-7) && - relative_eq!( q * iq, UnitQuaternion::identity(), epsilon = 1.0e-7) + relative_eq!(&iq * &q, UnitQuaternion::identity(), epsilon = 1.0e-7) + && relative_eq!(iq * &q, UnitQuaternion::identity(), epsilon = 1.0e-7) + && relative_eq!(&iq * q, UnitQuaternion::identity(), epsilon = 1.0e-7) + && relative_eq!(iq * q, UnitQuaternion::identity(), epsilon = 1.0e-7) + && relative_eq!(&q * &iq, UnitQuaternion::identity(), epsilon = 1.0e-7) + && relative_eq!(q * &iq, UnitQuaternion::identity(), epsilon = 1.0e-7) + && relative_eq!(&q * iq, UnitQuaternion::identity(), epsilon = 1.0e-7) + && relative_eq!(q * iq, UnitQuaternion::identity(), epsilon = 1.0e-7) } /* @@ -116,28 +120,34 @@ quickcheck!( // Test that all operators (incl. all combinations of references) work. // See the top comment on `geometry/quaternion_ops.rs` for details on which operations are // supported. - fn all_op_exist(q: Quaternion, uq: UnitQuaternion, - v: Vector3, p: Point3, r: Rotation3, - s: f64) -> bool { + fn all_op_exist( + q: Quaternion, + uq: UnitQuaternion, + v: Vector3, + p: Point3, + r: Rotation3, + s: f64, + ) -> bool + { let uv = Unit::new_normalize(v); let qpq = q + q; let qmq = q - q; let qMq = q * q; - let mq = -q; + let mq = -q; let qMs = q * s; let qDs = q / s; let sMq = s * q; let uqMuq = uq * uq; - let uqMr = uq * r; - let rMuq = r * uq; + let uqMr = uq * r; + let rMuq = r * uq; let uqDuq = uq / uq; - let uqDr = uq / r; - let rDuq = r / uq; + let uqDr = uq / r; + let rDuq = r / uq; - let uqMp = uq * p; - let uqMv = uq * v; + let uqMp = uq * p; + let uqMv = uq * v; let uqMuv = uq * uv; let mut qMs1 = q; @@ -186,81 +196,60 @@ quickcheck!( uqDr1 /= r; uqDr2 /= &r; - qMs1 == qMs && - - qMq1 == qMq && - qMq1 == qMq2 && - - qpq1 == qpq && - qpq1 == qpq2 && - - qmq1 == qmq && - qmq1 == qmq2 && - - uqMuq1 == uqMuq && - uqMuq1 == uqMuq2 && - - uqMr1 == uqMr && - uqMr1 == uqMr2 && - - uqDuq1 == uqDuq && - uqDuq1 == uqDuq2 && - - uqDr1 == uqDr && - uqDr1 == uqDr2 && - - qpq == &q + &q && - qpq == q + &q && - qpq == &q + q && - - qmq == &q - &q && - qmq == q - &q && - qmq == &q - q && - - qMq == &q * &q && - qMq == q * &q && - qMq == &q * q && - - mq == -&q && - - qMs == &q * s && - qDs == &q / s && - sMq == s * &q && - - uqMuq == &uq * &uq && - uqMuq == uq * &uq && - uqMuq == &uq * uq && - - uqMr == &uq * &r && - uqMr == uq * &r && - uqMr == &uq * r && - - rMuq == &r * &uq && - rMuq == r * &uq && - rMuq == &r * uq && - - uqDuq == &uq / &uq && - uqDuq == uq / &uq && - uqDuq == &uq / uq && - - uqDr == &uq / &r && - uqDr == uq / &r && - uqDr == &uq / r && - - rDuq == &r / &uq && - rDuq == r / &uq && - rDuq == &r / uq && - - uqMp == &uq * &p && - uqMp == uq * &p && - uqMp == &uq * p && - - uqMv == &uq * &v && - uqMv == uq * &v && - uqMv == &uq * v && - - uqMuv == &uq * &uv && - uqMuv == uq * &uv && - uqMuv == &uq * uv + qMs1 == qMs + && qMq1 == qMq + && qMq1 == qMq2 + && qpq1 == qpq + && qpq1 == qpq2 + && qmq1 == qmq + && qmq1 == qmq2 + && uqMuq1 == uqMuq + && uqMuq1 == uqMuq2 + && uqMr1 == uqMr + && uqMr1 == uqMr2 + && uqDuq1 == uqDuq + && uqDuq1 == uqDuq2 + && uqDr1 == uqDr + && uqDr1 == uqDr2 + && qpq == &q + &q + && qpq == q + &q + && qpq == &q + q + && qmq == &q - &q + && qmq == q - &q + && qmq == &q - q + && qMq == &q * &q + && qMq == q * &q + && qMq == &q * q + && mq == -&q + && qMs == &q * s + && qDs == &q / s + && sMq == s * &q + && uqMuq == &uq * &uq + && uqMuq == uq * &uq + && uqMuq == &uq * uq + && uqMr == &uq * &r + && uqMr == uq * &r + && uqMr == &uq * r + && rMuq == &r * &uq + && rMuq == r * &uq + && rMuq == &r * uq + && uqDuq == &uq / &uq + && uqDuq == uq / &uq + && uqDuq == &uq / uq + && uqDr == &uq / &r + && uqDr == uq / &r + && uqDr == &uq / r + && rDuq == &r / &uq + && rDuq == r / &uq + && rDuq == &r / uq + && uqMp == &uq * &p + && uqMp == uq * &p + && uqMp == &uq * p + && uqMv == &uq * &v + && uqMv == uq * &v + && uqMv == &uq * v + && uqMuv == &uq * &uv + && uqMuv == uq * &uv + && uqMuv == &uq * uv } ); diff --git a/tests/geometry/similarity.rs b/tests/geometry/similarity.rs index e9fde4667..68b86943d 100644 --- a/tests/geometry/similarity.rs +++ b/tests/geometry/similarity.rs @@ -8,33 +8,57 @@ quickcheck!( fn inverse_is_identity(i: Similarity3, p: Point3, v: Vector3) -> bool { let ii = i.inverse(); - relative_eq!(i * ii, Similarity3::identity(), epsilon = 1.0e-7) && - relative_eq!(ii * i, Similarity3::identity(), epsilon = 1.0e-7) && - relative_eq!((i * ii) * p, p, epsilon = 1.0e-7) && - relative_eq!((ii * i) * p, p, epsilon = 1.0e-7) && - relative_eq!((i * ii) * v, v, epsilon = 1.0e-7) && - relative_eq!((ii * i) * v, v, epsilon = 1.0e-7) + relative_eq!(i * ii, Similarity3::identity(), epsilon = 1.0e-7) + && relative_eq!(ii * i, Similarity3::identity(), epsilon = 1.0e-7) + && relative_eq!((i * ii) * p, p, epsilon = 1.0e-7) + && relative_eq!((ii * i) * p, p, epsilon = 1.0e-7) + && relative_eq!((i * ii) * v, v, epsilon = 1.0e-7) + && relative_eq!((ii * i) * v, v, epsilon = 1.0e-7) } - fn inverse_is_parts_inversion(t: Translation3, r: UnitQuaternion, scaling: f64) -> bool { + fn inverse_is_parts_inversion( + t: Translation3, + r: UnitQuaternion, + scaling: f64, + ) -> bool + { if relative_eq!(scaling, 0.0) { true - } - else { + } else { let s = Similarity3::from_isometry(t * r, scaling); s.inverse() == Similarity3::from_scaling(1.0 / scaling) * r.inverse() * t.inverse() } } - fn multiply_equals_alga_transform(s: Similarity3, v: Vector3, p: Point3) -> bool { - s * v == s.transform_vector(&v) && - s * p == s.transform_point(&p) && - relative_eq!(s.inverse() * v, s.inverse_transform_vector(&v), epsilon = 1.0e-7) && - relative_eq!(s.inverse() * p, s.inverse_transform_point(&p), epsilon = 1.0e-7) + fn multiply_equals_alga_transform( + s: Similarity3, + v: Vector3, + p: Point3, + ) -> bool + { + s * v == s.transform_vector(&v) + && s * p == s.transform_point(&p) + && relative_eq!( + s.inverse() * v, + s.inverse_transform_vector(&v), + epsilon = 1.0e-7 + ) + && relative_eq!( + s.inverse() * p, + s.inverse_transform_point(&p), + epsilon = 1.0e-7 + ) } - fn composition(i: Isometry3, uq: UnitQuaternion, - t: Translation3, v: Vector3, p: Point3, scaling: f64) -> bool { + fn composition( + i: Isometry3, + uq: UnitQuaternion, + t: Translation3, + v: Vector3, + p: Point3, + scaling: f64, + ) -> bool + { if relative_eq!(scaling, 0.0) { return true; } @@ -122,11 +146,18 @@ quickcheck!( relative_eq!((s * i * t) * p, scaling * (i * (t * p)), epsilon = 1.0e-7) } - fn all_op_exist(s: Similarity3, i: Isometry3, uq: UnitQuaternion, - t: Translation3, v: Vector3, p: Point3) -> bool { - let sMs = s * s; + fn all_op_exist( + s: Similarity3, + i: Isometry3, + uq: UnitQuaternion, + t: Translation3, + v: Vector3, + p: Point3, + ) -> bool + { + let sMs = s * s; let sMuq = s * uq; - let sDs = s / s; + let sDs = s / s; let sDuq = s / uq; let sMp = s * p; @@ -186,81 +217,61 @@ quickcheck!( sDi1 /= i; sDi2 /= &i; - sMt == sMt1 && - sMt == sMt2 && - - sMs == sMs1 && - sMs == sMs2 && - - sMuq == sMuq1 && - sMuq == sMuq2 && - - sMi == sMi1 && - sMi == sMi2 && - - sDs == sDs1 && - sDs == sDs2 && - - sDuq == sDuq1 && - sDuq == sDuq2 && - - sDi == sDi1 && - sDi == sDi2 && - - sMs == &s * &s && - sMs == s * &s && - sMs == &s * s && - - sMuq == &s * &uq && - sMuq == s * &uq && - sMuq == &s * uq && - - sDs == &s / &s && - sDs == s / &s && - sDs == &s / s && - - sDuq == &s / &uq && - sDuq == s / &uq && - sDuq == &s / uq && - - sMp == &s * &p && - sMp == s * &p && - sMp == &s * p && - - sMv == &s * &v && - sMv == s * &v && - sMv == &s * v && - - sMt == &s * &t && - sMt == s * &t && - sMt == &s * t && - - tMs == &t * &s && - tMs == t * &s && - tMs == &t * s && - - uqMs == &uq * &s && - uqMs == uq * &s && - uqMs == &uq * s && - - uqDs == &uq / &s && - uqDs == uq / &s && - uqDs == &uq / s && - - sMi == &s * &i && - sMi == s * &i && - sMi == &s * i && - - sDi == &s / &i && - sDi == s / &i && - sDi == &s / i && - - iMs == &i * &s && - iMs == i * &s && - iMs == &i * s && - - iDs == &i / &s && - iDs == i / &s && - iDs == &i / s + sMt == sMt1 + && sMt == sMt2 + && sMs == sMs1 + && sMs == sMs2 + && sMuq == sMuq1 + && sMuq == sMuq2 + && sMi == sMi1 + && sMi == sMi2 + && sDs == sDs1 + && sDs == sDs2 + && sDuq == sDuq1 + && sDuq == sDuq2 + && sDi == sDi1 + && sDi == sDi2 + && sMs == &s * &s + && sMs == s * &s + && sMs == &s * s + && sMuq == &s * &uq + && sMuq == s * &uq + && sMuq == &s * uq + && sDs == &s / &s + && sDs == s / &s + && sDs == &s / s + && sDuq == &s / &uq + && sDuq == s / &uq + && sDuq == &s / uq + && sMp == &s * &p + && sMp == s * &p + && sMp == &s * p + && sMv == &s * &v + && sMv == s * &v + && sMv == &s * v + && sMt == &s * &t + && sMt == s * &t + && sMt == &s * t + && tMs == &t * &s + && tMs == t * &s + && tMs == &t * s + && uqMs == &uq * &s + && uqMs == uq * &s + && uqMs == &uq * s + && uqDs == &uq / &s + && uqDs == uq / &s + && uqDs == &uq / s + && sMi == &s * &i + && sMi == s * &i + && sMi == &s * i + && sDi == &s / &i + && sDi == s / &i + && sDi == &s / i + && iMs == &i * &s + && iMs == i * &s + && iMs == &i * s + && iDs == &i / &s + && iDs == i / &s + && iDs == &i / s } ); diff --git a/tests/geometry/unit_complex.rs b/tests/geometry/unit_complex.rs index 7da0d20c0..88988aa87 100644 --- a/tests/geometry/unit_complex.rs +++ b/tests/geometry/unit_complex.rs @@ -4,19 +4,17 @@ use na::{Point2, Rotation2, Unit, UnitComplex, Vector2}; quickcheck!( - /* * * From/to rotation matrix. * */ fn unit_complex_rotation_conversion(c: UnitComplex) -> bool { - let r = c.to_rotation_matrix(); + let r = c.to_rotation_matrix(); let cc = UnitComplex::from_rotation_matrix(&r); let rr = cc.to_rotation_matrix(); - relative_eq!(c, cc, epsilon = 1.0e-7) && - relative_eq!(r, rr, epsilon = 1.0e-7) + relative_eq!(c, cc, epsilon = 1.0e-7) && relative_eq!(r, rr, epsilon = 1.0e-7) } /* @@ -25,19 +23,18 @@ quickcheck!( * */ fn unit_complex_transformation(c: UnitComplex, v: Vector2, p: Point2) -> bool { - let r = c.to_rotation_matrix(); + let r = c.to_rotation_matrix(); let rv = r * v; let rp = r * p; - relative_eq!( c * v, rv, epsilon = 1.0e-7) && - relative_eq!( c * &v, rv, epsilon = 1.0e-7) && - relative_eq!(&c * v, rv, epsilon = 1.0e-7) && - relative_eq!(&c * &v, rv, epsilon = 1.0e-7) && - - relative_eq!( c * p, rp, epsilon = 1.0e-7) && - relative_eq!( c * &p, rp, epsilon = 1.0e-7) && - relative_eq!(&c * p, rp, epsilon = 1.0e-7) && - relative_eq!(&c * &p, rp, epsilon = 1.0e-7) + relative_eq!(c * v, rv, epsilon = 1.0e-7) + && relative_eq!(c * &v, rv, epsilon = 1.0e-7) + && relative_eq!(&c * v, rv, epsilon = 1.0e-7) + && relative_eq!(&c * &v, rv, epsilon = 1.0e-7) + && relative_eq!(c * p, rp, epsilon = 1.0e-7) + && relative_eq!(c * &p, rp, epsilon = 1.0e-7) + && relative_eq!(&c * p, rp, epsilon = 1.0e-7) + && relative_eq!(&c * &p, rp, epsilon = 1.0e-7) } /* @@ -47,15 +44,14 @@ quickcheck!( */ fn unit_complex_inv(c: UnitComplex) -> bool { let iq = c.inverse(); - relative_eq!(&iq * &c, UnitComplex::identity(), epsilon = 1.0e-7) && - relative_eq!( iq * &c, UnitComplex::identity(), epsilon = 1.0e-7) && - relative_eq!(&iq * c, UnitComplex::identity(), epsilon = 1.0e-7) && - relative_eq!( iq * c, UnitComplex::identity(), epsilon = 1.0e-7) && - - relative_eq!(&c * &iq, UnitComplex::identity(), epsilon = 1.0e-7) && - relative_eq!( c * &iq, UnitComplex::identity(), epsilon = 1.0e-7) && - relative_eq!(&c * iq, UnitComplex::identity(), epsilon = 1.0e-7) && - relative_eq!( c * iq, UnitComplex::identity(), epsilon = 1.0e-7) + relative_eq!(&iq * &c, UnitComplex::identity(), epsilon = 1.0e-7) + && relative_eq!(iq * &c, UnitComplex::identity(), epsilon = 1.0e-7) + && relative_eq!(&iq * c, UnitComplex::identity(), epsilon = 1.0e-7) + && relative_eq!(iq * c, UnitComplex::identity(), epsilon = 1.0e-7) + && relative_eq!(&c * &iq, UnitComplex::identity(), epsilon = 1.0e-7) + && relative_eq!(c * &iq, UnitComplex::identity(), epsilon = 1.0e-7) + && relative_eq!(&c * iq, UnitComplex::identity(), epsilon = 1.0e-7) + && relative_eq!(c * iq, UnitComplex::identity(), epsilon = 1.0e-7) } /* @@ -66,25 +62,30 @@ quickcheck!( fn unit_complex_mul_vector(c: UnitComplex, v: Vector2, p: Point2) -> bool { let r = c.to_rotation_matrix(); - relative_eq!(c * v, r * v, epsilon = 1.0e-7) && - relative_eq!(c * p, r * p, epsilon = 1.0e-7) + relative_eq!(c * v, r * v, epsilon = 1.0e-7) && relative_eq!(c * p, r * p, epsilon = 1.0e-7) } // Test that all operators (incl. all combinations of references) work. // See the top comment on `geometry/quaternion_ops.rs` for details on which operations are // supported. - fn all_op_exist(uc: UnitComplex, v: Vector2, p: Point2, r: Rotation2) -> bool { + fn all_op_exist( + uc: UnitComplex, + v: Vector2, + p: Point2, + r: Rotation2, + ) -> bool + { let uv = Unit::new_normalize(v); let ucMuc = uc * uc; - let ucMr = uc * r; - let rMuc = r * uc; + let ucMr = uc * r; + let rMuc = r * uc; let ucDuc = uc / uc; - let ucDr = uc / r; - let rDuc = r / uc; + let ucDr = uc / r; + let rDuc = r / uc; - let ucMp = uc * p; - let ucMv = uc * v; + let ucMp = uc * p; + let ucMv = uc * v; let ucMuv = uc * uv; let mut ucMuc1 = uc; @@ -111,52 +112,40 @@ quickcheck!( ucDr1 /= r; ucDr2 /= &r; - ucMuc1 == ucMuc && - ucMuc1 == ucMuc2 && - - ucMr1 == ucMr && - ucMr1 == ucMr2 && - - ucDuc1 == ucDuc && - ucDuc1 == ucDuc2 && - - ucDr1 == ucDr && - ucDr1 == ucDr2 && - - ucMuc == &uc * &uc && - ucMuc == uc * &uc && - ucMuc == &uc * uc && - - ucMr == &uc * &r && - ucMr == uc * &r && - ucMr == &uc * r && - - rMuc == &r * &uc && - rMuc == r * &uc && - rMuc == &r * uc && - - ucDuc == &uc / &uc && - ucDuc == uc / &uc && - ucDuc == &uc / uc && - - ucDr == &uc / &r && - ucDr == uc / &r && - ucDr == &uc / r && - - rDuc == &r / &uc && - rDuc == r / &uc && - rDuc == &r / uc && - - ucMp == &uc * &p && - ucMp == uc * &p && - ucMp == &uc * p && - - ucMv == &uc * &v && - ucMv == uc * &v && - ucMv == &uc * v && - - ucMuv == &uc * &uv && - ucMuv == uc * &uv && - ucMuv == &uc * uv + ucMuc1 == ucMuc + && ucMuc1 == ucMuc2 + && ucMr1 == ucMr + && ucMr1 == ucMr2 + && ucDuc1 == ucDuc + && ucDuc1 == ucDuc2 + && ucDr1 == ucDr + && ucDr1 == ucDr2 + && ucMuc == &uc * &uc + && ucMuc == uc * &uc + && ucMuc == &uc * uc + && ucMr == &uc * &r + && ucMr == uc * &r + && ucMr == &uc * r + && rMuc == &r * &uc + && rMuc == r * &uc + && rMuc == &r * uc + && ucDuc == &uc / &uc + && ucDuc == uc / &uc + && ucDuc == &uc / uc + && ucDr == &uc / &r + && ucDr == uc / &r + && ucDr == &uc / r + && rDuc == &r / &uc + && rDuc == r / &uc + && rDuc == &r / uc + && ucMp == &uc * &p + && ucMp == uc * &p + && ucMp == &uc * p + && ucMv == &uc * &v + && ucMv == uc * &v + && ucMv == &uc * v + && ucMuv == &uc * &uv + && ucMuv == uc * &uv + && ucMuv == &uc * uv } ); From 1866d59c4a5b0d8a8d867eba2fc3df3e04a08056 Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Tue, 6 Nov 2018 18:32:31 +0100 Subject: [PATCH 15/25] Add rustfmt.toml. --- rustfmt.toml | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 rustfmt.toml diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 000000000..8dc8e61ae --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,3 @@ +unstable_features = true +indent_style = "Block" +where_single_line = true \ No newline at end of file From f43ab963ad259c03f3ecc03d7158200da403ad27 Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Tue, 6 Nov 2018 19:25:07 +0100 Subject: [PATCH 16/25] Fix matrix market grammar. --- src/io/matrix_market.pest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/io/matrix_market.pest b/src/io/matrix_market.pest index eafe5b829..e024ec57d 100644 --- a/src/io/matrix_market.pest +++ b/src/io/matrix_market.pest @@ -5,7 +5,7 @@ Header = { "%%" ~ (!NEWLINE ~ ANY)* } Shape = { Dimension ~ Dimension ~ Dimension } Document = { SOI ~ - NEWLINE ~ + NEWLINE* ~ Header ~ (NEWLINE ~ Comments)* ~ (NEWLINE ~ Shape) ~ From b4b66bddd28e73475e89507f16c1610485d8d21f Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Tue, 6 Nov 2018 19:25:27 +0100 Subject: [PATCH 17/25] Add comment about cs matrix multiplication implementation. --- src/sparse/cs_matrix_ops.rs | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index bf1dccdbf..5ebf099a2 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -175,6 +175,41 @@ where } } + // NOTE: the following has a lower complexity, but is slower in many cases, likely because + // of branching inside of the inner loop. + // + // let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); + // let mut timestamps = VectorN::zeros_generic(nrows1, U1); + // let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows1, U1) }; + // let mut nz = 0; + // + // for j in 0..ncols2.value() { + // res.data.p[j] = nz; + // let new_size_bound = nz + nrows1.value(); + // res.data.i.resize(new_size_bound, 0); + // res.data.vals.resize(new_size_bound, N::zero()); + // + // for (i, val) in rhs.data.column_entries(j) { + // nz = self.scatter( + // i, + // val, + // timestamps.as_mut_slice(), + // j + 1, + // workspace.as_mut_slice(), + // nz, + // &mut res, + // ); + // } + // + // // Keep the output sorted. + // let range = res.data.p[j]..nz; + // res.data.i[range.clone()].sort(); + // + // for p in range { + // res.data.vals[p] = workspace[res.data.i[p]] + // } + // } + res.data.i.truncate(nz); res.data.i.shrink_to_fit(); res.data.vals.truncate(nz); From cae2be5cad025868811a3cc29cbbd6cb08fb9e21 Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Sat, 22 Dec 2018 18:03:32 +0100 Subject: [PATCH 18/25] Add .min and .max. --- Cargo.toml | 2 +- src/base/ops.rs | 42 ++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ed76cbb8e..7e2e21dd9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "nalgebra" -version = "0.16.5" +version = "0.16.12" authors = [ "Sébastien Crozet " ] description = "Linear algebra library with transformations and statically-sized or dynamically-sized matrices." diff --git a/src/base/ops.rs b/src/base/ops.rs index 14ec98f01..594c3da4b 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -761,7 +761,7 @@ where } impl> Matrix { - /// Returns the absolute value of the coefficient with the largest absolute value. + /// Returns the absolute value of the component with the largest absolute value. #[inline] pub fn amax(&self) -> N { let mut max = N::zero(); @@ -777,7 +777,7 @@ impl> Matri max } - /// Returns the absolute value of the coefficient with the smallest absolute value. + /// Returns the absolute value of the component with the smallest absolute value. #[inline] pub fn amin(&self) -> N { let mut it = self.iter(); @@ -796,4 +796,42 @@ impl> Matri min } + + /// Returns the component with the largest value. + #[inline] + pub fn max(&self) -> N { + let mut it = self.iter(); + let mut max = it + .next() + .expect("max: empty matrices not supported."); + + for e in it { + let ae = e; + + if ae > max { + max = ae; + } + } + + *max + } + + /// Returns the component with the smallest value. + #[inline] + pub fn min(&self) -> N { + let mut it = self.iter(); + let mut min = it + .next() + .expect("min: empty matrices not supported."); + + for e in it { + let ae = e; + + if ae < min { + min = ae; + } + } + + *min + } } From a14d8a4cb211489dd0d6ca494bc0937603352b9a Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Wed, 26 Dec 2018 15:18:03 +0100 Subject: [PATCH 19/25] Add coordinates access to translations. Fix #509. --- src/geometry/mod.rs | 1 + src/geometry/translation_coordinates.rs | 44 +++++++++++++++++++++++++ 2 files changed, 45 insertions(+) create mode 100644 src/geometry/translation_coordinates.rs diff --git a/src/geometry/mod.rs b/src/geometry/mod.rs index af7124616..dd0599592 100644 --- a/src/geometry/mod.rs +++ b/src/geometry/mod.rs @@ -37,6 +37,7 @@ mod translation_alga; mod translation_alias; mod translation_construction; mod translation_conversion; +mod translation_coordinates; mod translation_ops; mod isometry; diff --git a/src/geometry/translation_coordinates.rs b/src/geometry/translation_coordinates.rs new file mode 100644 index 000000000..01207a32e --- /dev/null +++ b/src/geometry/translation_coordinates.rs @@ -0,0 +1,44 @@ +use std::mem; +use std::ops::{Deref, DerefMut}; + +use base::allocator::Allocator; +use base::coordinates::{X, XY, XYZ, XYZW, XYZWA, XYZWAB}; +use base::dimension::{U1, U2, U3, U4, U5, U6}; +use base::{DefaultAllocator, Scalar}; + +use geometry::Translation; + +/* + * + * Give coordinates to Translation{1 .. 6} + * + */ + +macro_rules! deref_impl( + ($D: ty, $Target: ident $(, $comps: ident)*) => { + impl Deref for Translation + where DefaultAllocator: Allocator { + type Target = $Target; + + #[inline] + fn deref(&self) -> &Self::Target { + unsafe { mem::transmute(self) } + } + } + + impl DerefMut for Translation + where DefaultAllocator: Allocator { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { mem::transmute(self) } + } + } + } +); + +deref_impl!(U1, X, x); +deref_impl!(U2, XY, x, y); +deref_impl!(U3, XYZ, x, y, z); +deref_impl!(U4, XYZW, x, y, z, w); +deref_impl!(U5, XYZWA, x, y, z, w, a); +deref_impl!(U6, XYZWAB, x, y, z, w, a, b); From 9fbdedb9422bcc11658c8d72cb138ec32e7618ad Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Mon, 28 Jan 2019 11:22:57 +0100 Subject: [PATCH 20/25] Implement ContiguousStorage for some matrix slices. --- src/base/matrix_slice.rs | 36 ++++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index 8027c49d9..d26fffb65 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -4,9 +4,9 @@ use std::slice; use base::allocator::Allocator; use base::default_allocator::DefaultAllocator; -use base::dimension::{Dim, DimName, Dynamic, U1}; +use base::dimension::{Dim, DimName, Dynamic, U1, IsNotStaticOne}; use base::iter::MatrixIter; -use base::storage::{Owned, Storage, StorageMut}; +use base::storage::{Owned, Storage, StorageMut, ContiguousStorage, ContiguousStorageMut}; use base::{Matrix, Scalar}; macro_rules! slice_storage_impl( @@ -147,8 +147,6 @@ macro_rules! storage_impl( } } - - #[inline] fn into_owned(self) -> Owned where DefaultAllocator: Allocator { @@ -200,6 +198,14 @@ unsafe impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMu } } +unsafe impl<'a, N: Scalar, R: Dim, CStride: Dim> ContiguousStorage for SliceStorage<'a, N, R, U1, U1, CStride> { } +unsafe impl<'a, N: Scalar, R: Dim, CStride: Dim> ContiguousStorage for SliceStorageMut<'a, N, R, U1, U1, CStride> { } +unsafe impl<'a, N: Scalar, R: Dim, CStride: Dim> ContiguousStorageMut for SliceStorageMut<'a, N, R, U1, U1, CStride> { } + +unsafe impl<'a, N: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage for SliceStorage<'a, N, R, C, U1, R> { } +unsafe impl<'a, N: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage for SliceStorageMut<'a, N, R, C, U1, R> { } +unsafe impl<'a, N: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut for SliceStorageMut<'a, N, R, C, U1, R> { } + impl> Matrix { #[inline] fn assert_slice_index( @@ -860,3 +866,25 @@ impl> Matrix { self.slice_range_mut(.., cols) } } + + +impl<'a, N, R, C, RStride, CStride> From> +for MatrixSlice<'a, N, R, C, RStride, CStride> + where + N: Scalar, + R: Dim, + C: Dim, + RStride: Dim, + CStride: Dim, +{ + fn from(slice_mut: MatrixSliceMut<'a, N, R, C, RStride, CStride>) -> Self { + let data = SliceStorage { + ptr: slice_mut.data.ptr, + shape: slice_mut.data.shape, + strides: slice_mut.data.strides, + _phantoms: PhantomData, + }; + + unsafe { Matrix::from_data_statically_unchecked(data) } + } +} \ No newline at end of file From a7ab61f9745dd21ea7dce5b713b0fcff36341fe8 Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Tue, 29 Jan 2019 12:03:48 +0100 Subject: [PATCH 21/25] Add horizontal and vertical resizing for dynamic matrices and vectors. --- src/base/edition.rs | 77 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/src/base/edition.rs b/src/base/edition.rs index 0ae9c35fc..74acd5d9a 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -1,6 +1,7 @@ use num::{One, Zero}; use std::cmp; use std::ptr; +use std::mem; use base::allocator::{Allocator, Reallocator}; use base::constraint::{DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; @@ -549,6 +550,29 @@ impl> Matrix { self.resize_generic(Dynamic::new(new_nrows), Dynamic::new(new_ncols), val) } + /// Resizes this matrix vertically, i.e., so that it contains `new_nrows` rows while keeping the same number of columns. + /// + /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more + /// rows than `self`, then the extra rows are filled with `val`. + #[cfg(any(feature = "std", feature = "alloc"))] + pub fn resize_vertically(self, new_nrows: usize, val: N) -> MatrixMN + where DefaultAllocator: Reallocator { + let ncols = self.data.shape().1; + self.resize_generic(Dynamic::new(new_nrows), ncols, val) + } + + /// Resizes this matrix horizontally, i.e., so that it contains `new_ncolumns` columns while keeping the same number of columns. + /// + /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more + /// columns than `self`, then the extra columns are filled with `val`. + #[cfg(any(feature = "std", feature = "alloc"))] + pub fn resize_horizontally(self, new_ncols: usize, val: N) -> MatrixMN + where DefaultAllocator: Reallocator { + let nrows = self.data.shape().0; + self.resize_generic(nrows, Dynamic::new(new_ncols), val) + } + + /// Resizes this matrix so that it contains `R2::value()` rows and `C2::value()` columns. /// /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more @@ -626,6 +650,59 @@ impl> Matrix { } } +impl DMatrix { + /// Resizes this matrix in-place. + /// + /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more + /// rows and/or columns than `self`, then the extra rows or columns are filled with `val`. + /// + /// Defined only for owned fully-dynamic matrices, i.e., `DMatrix`. + #[cfg(any(feature = "std", feature = "alloc"))] + pub fn resize_mut(&mut self, new_nrows: usize, new_ncols: usize, val: N) + where DefaultAllocator: Reallocator { + let placeholder = unsafe { Self::new_uninitialized(0, 0) }; + let old = mem::replace(self, placeholder); + let new = old.resize(new_nrows, new_ncols, val); + let _ = mem::replace(self, new); + } +} + +impl MatrixMN + where DefaultAllocator: Allocator { + /// Changes the number of rows of this matrix in-place. + /// + /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more + /// rows than `self`, then the extra rows are filled with `val`. + /// + /// Defined only for owned matrices with a dynamic number of rows (for example, `DVector`). + #[cfg(any(feature = "std", feature = "alloc"))] + pub fn resize_vertically_mut(&mut self, new_nrows: usize, val: N) + where DefaultAllocator: Reallocator { + let placeholder = unsafe { Self::new_uninitialized_generic(Dynamic::new(0), self.data.shape().1) }; + let old = mem::replace(self, placeholder); + let new = old.resize_vertically(new_nrows, val); + let _ = mem::replace(self, new); + } +} + +impl MatrixMN + where DefaultAllocator: Allocator { + /// Changes the number of column of this matrix in-place. + /// + /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more + /// columns than `self`, then the extra columns are filled with `val`. + /// + /// Defined only for owned matrices with a dynamic number of columns (for example, `DVector`). + #[cfg(any(feature = "std", feature = "alloc"))] + pub fn resize_horizontally_mut(&mut self, new_ncols: usize, val: N) + where DefaultAllocator: Reallocator { + let placeholder = unsafe { Self::new_uninitialized_generic(self.data.shape().0, Dynamic::new(0)) }; + let old = mem::replace(self, placeholder); + let new = old.resize_horizontally(new_ncols, val); + let _ = mem::replace(self, new); + } +} + unsafe fn compress_rows( data: &mut [N], nrows: usize, From 13f76efe36ef5a2fbb5abf241bb52c0e30d185fc Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Tue, 29 Jan 2019 12:04:23 +0100 Subject: [PATCH 22/25] Add simple constructors for pure-translation and pure-rotation isometries. --- src/geometry/isometry_construction.rs | 38 ++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/src/geometry/isometry_construction.rs b/src/geometry/isometry_construction.rs index a52996886..a60b0f634 100644 --- a/src/geometry/isometry_construction.rs +++ b/src/geometry/isometry_construction.rs @@ -16,7 +16,7 @@ use base::{DefaultAllocator, Vector2, Vector3}; use geometry::{ Isometry, Point, Point3, Rotation, Rotation2, Rotation3, Translation, UnitComplex, - UnitQuaternion, + UnitQuaternion, Translation2, Translation3 }; impl>> Isometry @@ -89,6 +89,18 @@ impl Isometry> { Rotation::::new(angle), ) } + + /// Creates a new isometry from the given translation coordinates. + #[inline] + pub fn translation(x: N, y: N) -> Self { + Self::new(Vector2::new(x, y), N::zero()) + } + + /// Creates a new isometry from the given rotation angle. + #[inline] + pub fn rotation(angle: N) -> Self { + Self::new(Vector2::zeros(), angle) + } } impl Isometry> { @@ -100,6 +112,18 @@ impl Isometry> { UnitComplex::from_angle(angle), ) } + + /// Creates a new isometry from the given translation coordinates. + #[inline] + pub fn translation(x: N, y: N) -> Self { + Self::from_parts(Translation2::new(x, y), UnitComplex::identity()) + } + + /// Creates a new isometry from the given rotation angle. + #[inline] + pub fn rotation(angle: N) -> Self { + Self::new(Vector2::zeros(), angle) + } } // 3D rotation. @@ -114,6 +138,18 @@ macro_rules! isometry_construction_impl( $RotId::<$($RotParams),*>::from_scaled_axis(axisangle)) } + /// Creates a new isometry from the given translation coordinates. + #[inline] + pub fn translation(x: N, y: N, z: N) -> Self { + Self::from_parts(Translation3::new(x, y, z), $RotId::identity()) + } + + /// Creates a new isometry from the given rotation angle. + #[inline] + pub fn rotation(axisangle: Vector3) -> Self { + Self::new(Vector3::zeros(), axisangle) + } + /// Creates an isometry that corresponds to the local frame of an observer standing at the /// point `eye` and looking toward `target`. /// From ce8879c37ad0f823b15bbc80e79ce26cabc795c0 Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Sun, 3 Feb 2019 14:18:55 +0100 Subject: [PATCH 23/25] Add all the missing docs. --- ci/build.sh | 2 +- src/io/matrix_market.rs | 2 + src/io/mod.rs | 4 +- src/sparse/cs_matrix.rs | 69 ++++++++++++++++++++++++------ src/sparse/cs_matrix_cholesky.rs | 19 ++++---- src/sparse/cs_matrix_conversion.rs | 18 +++----- src/sparse/cs_matrix_ops.rs | 12 +++--- src/sparse/cs_matrix_solve.rs | 18 ++++---- src/sparse/mod.rs | 4 +- 9 files changed, 95 insertions(+), 53 deletions(-) diff --git a/ci/build.sh b/ci/build.sh index 4c5c4d1b7..550c9a69a 100755 --- a/ci/build.sh +++ b/ci/build.sh @@ -11,7 +11,7 @@ if [ -z "$NO_STD" ]; then cargo build --verbose -p nalgebra --features "serde-serialize"; cargo build --verbose -p nalgebra --features "abomonation-serialize"; cargo build --verbose -p nalgebra --features "debug"; - cargo build --verbose -p nalgebra --features "debug arbitrary mint serde-serialize abomonation-serialize"; + cargo build --verbose -p nalgebra --all-features else cargo build -p nalgebra-lapack; fi diff --git a/src/io/matrix_market.rs b/src/io/matrix_market.rs index 12fb6c559..f4919cd44 100644 --- a/src/io/matrix_market.rs +++ b/src/io/matrix_market.rs @@ -10,12 +10,14 @@ use Real; struct MatrixMarketParser; // FIXME: return an Error instead of an Option. +/// Parses a Matrix Market file at the given path, and returns the corresponding sparse matrix. pub fn cs_matrix_from_matrix_market>(path: P) -> Option> { let file = fs::read_to_string(path).ok()?; cs_matrix_from_matrix_market_str(&file) } // FIXME: return an Error instead of an Option. +/// Parses a Matrix Market file described by the given string, and returns the corresponding sparse matrix. pub fn cs_matrix_from_matrix_market_str(data: &str) -> Option> { let file = MatrixMarketParser::parse(Rule::Document, data) .unwrap() diff --git a/src/io/mod.rs b/src/io/mod.rs index fd7dc5367..1b172b200 100644 --- a/src/io/mod.rs +++ b/src/io/mod.rs @@ -1,3 +1,5 @@ -pub use self::matrix_market::*; +//! Parsers for various matrix formats. + +pub use self::matrix_market::{cs_matrix_from_matrix_market, cs_matrix_from_matrix_market_str}; mod matrix_market; diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index a02e8613d..7ddc4debc 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -1,17 +1,14 @@ -use alga::general::{ClosedAdd, ClosedMul}; -use num::{One, Zero}; +use alga::general::ClosedAdd; +use num::Zero; use std::iter; use std::marker::PhantomData; -use std::ops::{Add, Mul, Range}; +use std::ops::Range; use std::slice; use allocator::Allocator; -use constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint}; use sparse::cs_utils; -use storage::{Storage, StorageMut}; use { - DVector, DefaultAllocator, Dim, Dynamic, Matrix, MatrixMN, MatrixVec, Real, Scalar, Vector, - VectorN, U1, + DefaultAllocator, Dim, Dynamic, Scalar, Vector, VectorN, U1 }; pub struct ColumnEntries<'a, N> { @@ -47,38 +44,66 @@ impl<'a, N: Copy> Iterator for ColumnEntries<'a, N> { // FIXME: this structure exists for now only because impl trait // cannot be used for trait method return types. +/// Trait for iterable compressed-column matrix storage. pub trait CsStorageIter<'a, N, R, C = U1> { + /// Iterator through all the rows of a specific columns. + /// + /// The elements are given as a tuple (row_index, value). type ColumnEntries: Iterator; + /// Iterator through the row indices of a specific column. type ColumnRowIndices: Iterator; + /// Iterates through all the row indices of the j-th column. fn column_row_indices(&'a self, j: usize) -> Self::ColumnRowIndices; #[inline(always)] + /// Iterates through all the entries of the j-th column. fn column_entries(&'a self, j: usize) -> Self::ColumnEntries; } +/// Trait for mutably iterable compressed-column sparse matrix storage. pub trait CsStorageIterMut<'a, N: 'a, R, C = U1> { + /// Mutable iterator through all the values of the sparse matrix. type ValuesMut: Iterator; + /// Mutable iterator through all the rows of a specific columns. + /// + /// The elements are given as a tuple (row_index, value). type ColumnEntriesMut: Iterator; + /// A mutable iterator through the values buffer of the sparse matrix. fn values_mut(&'a mut self) -> Self::ValuesMut; + /// Iterates mutably through all the entries of the j-th column. fn column_entries_mut(&'a mut self, j: usize) -> Self::ColumnEntriesMut; } +/// Trait for compressed column sparse matrix storage. pub trait CsStorage: for<'a> CsStorageIter<'a, N, R, C> { + /// The shape of the stored matrix. fn shape(&self) -> (R, C); + /// Retrieve the i-th row index of the underlying row index buffer. + /// + /// No bound-checking is performed. unsafe fn row_index_unchecked(&self, i: usize) -> usize; + /// The i-th value on the contiguous value buffer of this storage. + /// + /// No bound-checking is performed. unsafe fn get_value_unchecked(&self, i: usize) -> &N; + /// The i-th value on the contiguous value buffer of this storage. fn get_value(&self, i: usize) -> &N; + /// Retrieve the i-th row index of the underlying row index buffer. fn row_index(&self, i: usize) -> usize; + /// The value indices for the `i`-th column. fn column_range(&self, i: usize) -> Range; + /// The size of the value buffer (i.e. the entries known as possibly being non-zero). fn len(&self) -> usize; } +/// Trait for compressed column sparse matrix mutable storage. pub trait CsStorageMut: CsStorage + for<'a> CsStorageIterMut<'a, N, R, C> { } +/// A storage of column-compressed sparse matrix based on a Vec. #[derive(Clone, Debug, PartialEq)] pub struct CsVecStorage where DefaultAllocator: Allocator @@ -92,12 +117,17 @@ where DefaultAllocator: Allocator impl CsVecStorage where DefaultAllocator: Allocator { + /// The value buffer of this storage. pub fn values(&self) -> &[N] { &self.vals } + + /// The column shifts buffer. pub fn p(&self) -> &[usize] { self.p.as_slice() } + + /// The row index buffers. pub fn i(&self) -> &[usize] { &self.i } @@ -209,15 +239,18 @@ pub struct CsMatrix< C: Dim = Dynamic, S: CsStorage = CsVecStorage, > { - pub data: S, + pub(crate) data: S, _phantoms: PhantomData<(N, R, C)>, } +/// A column compressed sparse vector. pub type CsVector> = CsMatrix; impl CsMatrix where DefaultAllocator: Allocator { + /// Creates a new compressed sparse column matrix with the specified dimension and + /// `nvals` possible non-zero values. pub fn new_uninitialized_generic(nrows: R, ncols: C, nvals: usize) -> Self { let mut i = Vec::with_capacity(nvals); unsafe { @@ -242,7 +275,8 @@ where DefaultAllocator: Allocator } } - pub fn from_parts_generic( + /* + pub(crate) fn from_parts_generic( nrows: R, ncols: C, p: VectorN, @@ -285,11 +319,12 @@ where DefaultAllocator: Allocator res.dedup(); res - } + }*/ } +/* impl CsMatrix { - pub fn from_parts( + pub(crate) fn from_parts( nrows: usize, ncols: usize, p: Vec, @@ -299,36 +334,42 @@ impl CsMatrix { { let nrows = Dynamic::new(nrows); let ncols = Dynamic::new(ncols); - let p = DVector::from_data(MatrixVec::new(ncols, U1, p)); + let p = DVector::from_data(VecStorage::new(ncols, U1, p)); Self::from_parts_generic(nrows, ncols, p, i, vals) } } +*/ impl> CsMatrix { - pub fn from_data(data: S) -> Self { + pub(crate) fn from_data(data: S) -> Self { CsMatrix { data, _phantoms: PhantomData, } } + /// The size of the data buffer. pub fn len(&self) -> usize { self.data.len() } + /// The number of rows of this matrix. pub fn nrows(&self) -> usize { self.data.shape().0.value() } + /// The number of rows of this matrix. pub fn ncols(&self) -> usize { self.data.shape().1.value() } + /// The shape of this matrix. pub fn shape(&self) -> (usize, usize) { let (nrows, ncols) = self.data.shape(); (nrows.value(), ncols.value()) } + /// Whether this matrix is square or not. pub fn is_square(&self) -> bool { let (nrows, ncols) = self.data.shape(); nrows.value() == ncols.value() @@ -360,6 +401,7 @@ impl> CsMatrix { true } + /// Computes the transpose of this sparse matrix. pub fn transpose(&self) -> CsMatrix where DefaultAllocator: Allocator { let (nrows, ncols) = self.data.shape(); @@ -392,6 +434,7 @@ impl> CsMatrix { } impl> CsMatrix { + /// Iterator through all the mutable values of this sparse matrix. #[inline] pub fn values_mut(&mut self) -> impl Iterator { self.data.values_mut() diff --git a/src/sparse/cs_matrix_cholesky.rs b/src/sparse/cs_matrix_cholesky.rs index 000332e7c..5d834ef21 100644 --- a/src/sparse/cs_matrix_cholesky.rs +++ b/src/sparse/cs_matrix_cholesky.rs @@ -1,17 +1,11 @@ -use alga::general::{ClosedAdd, ClosedMul}; -use num::{One, Zero}; use std::iter; -use std::marker::PhantomData; use std::mem; -use std::ops::{Add, Mul, Range}; -use std::slice; use allocator::Allocator; -use constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint}; -use sparse::{CsMatrix, CsStorage, CsStorageIter, CsStorageIterMut, CsVecStorage, CsVector}; -use storage::{Storage, StorageMut}; -use {DefaultAllocator, Dim, Matrix, MatrixMN, Real, Scalar, Vector, VectorN, U1}; +use sparse::{CsMatrix, CsStorage, CsStorageIter, CsStorageIterMut, CsVecStorage}; +use {DefaultAllocator, Dim, Real, VectorN, U1}; +/// The cholesky decomposition of a column compressed sparse matrix. pub struct CsCholesky where DefaultAllocator: Allocator + Allocator { @@ -68,6 +62,7 @@ where DefaultAllocator: Allocator + Allocator } } + /// The lower-triangular matrix of the cholesky decomposition. pub fn l(&self) -> Option<&CsMatrix> { if self.ok { Some(&self.l) @@ -76,6 +71,7 @@ where DefaultAllocator: Allocator + Allocator } } + /// Extracts the lower-triangular matrix of the cholesky decomposition. pub fn unwrap_l(self) -> Option> { if self.ok { Some(self.l) @@ -84,6 +80,8 @@ where DefaultAllocator: Allocator + Allocator } } + /// Perform a numerical left-looking cholesky decomposition of a matrix with the same structure as the + /// one used to initialize `self`, but with different non-zero values provided by `values`. pub fn decompose_left_looking(&mut self, values: &[N]) -> bool { assert!( values.len() >= self.original_i.len(), @@ -152,7 +150,8 @@ where DefaultAllocator: Allocator + Allocator true } - // Performs the numerical Cholesky decomposition given the set of numerical values. + /// Perform a numerical up-looking cholesky decomposition of a matrix with the same structure as the + /// one used to initialize `self`, but with different non-zero values provided by `values`. pub fn decompose_up_looking(&mut self, values: &[N]) -> bool { assert!( values.len() >= self.original_i.len(), diff --git a/src/sparse/cs_matrix_conversion.rs b/src/sparse/cs_matrix_conversion.rs index f37ef61dd..0017340f0 100644 --- a/src/sparse/cs_matrix_conversion.rs +++ b/src/sparse/cs_matrix_conversion.rs @@ -1,19 +1,14 @@ -use alga::general::{ClosedAdd, ClosedMul}; -use num::{One, Zero}; -use std::iter; -use std::marker::PhantomData; -use std::ops::{Add, Mul, Range}; -use std::slice; +use alga::general::ClosedAdd; +use num::Zero; use allocator::Allocator; -use constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint}; use sparse::cs_utils; -use sparse::{CsMatrix, CsStorage, CsVector}; -use storage::{Storage, StorageMut}; -use {DefaultAllocator, Dim, Dynamic, Matrix, MatrixMN, Real, Scalar, Vector, VectorN, U1}; +use sparse::{CsMatrix, CsStorage}; +use storage::Storage; +use {DefaultAllocator, Dim, Dynamic, Matrix, MatrixMN, Scalar}; impl<'a, N: Scalar + Zero + ClosedAdd> CsMatrix { - // FIXME: implement for dimensions other than Dynamic too. + /// Creates a column-compressed sparse matrix from a sparse matrix in triplet form. pub fn from_triplet( nrows: usize, ncols: usize, @@ -29,6 +24,7 @@ impl<'a, N: Scalar + Zero + ClosedAdd> CsMatrix { impl<'a, N: Scalar + Zero + ClosedAdd, R: Dim, C: Dim> CsMatrix where DefaultAllocator: Allocator + Allocator { + /// Creates a column-compressed sparse matrix from a sparse matrix in triplet form. pub fn from_triplet_generic( nrows: R, ncols: C, diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index 5ebf099a2..478048478 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -1,15 +1,12 @@ use alga::general::{ClosedAdd, ClosedMul}; use num::{One, Zero}; -use std::iter; -use std::marker::PhantomData; -use std::ops::{Add, Mul, Range}; -use std::slice; +use std::ops::{Add, Mul}; use allocator::Allocator; -use constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint}; +use constraint::{AreMultipliable, DimEq, ShapeConstraint}; use sparse::{CsMatrix, CsStorage, CsStorageMut, CsVector}; -use storage::{Storage, StorageMut}; -use {DefaultAllocator, Dim, Matrix, MatrixMN, Real, Scalar, Vector, VectorN, U1}; +use storage::StorageMut; +use {DefaultAllocator, Dim, Scalar, Vector, VectorN, U1}; impl> CsMatrix { fn scatter( @@ -80,6 +77,7 @@ impl CsVector { */ impl> Vector { + /// Perform a sparse axpy operation: `self = alpha * x + beta * self` operation. pub fn axpy_cs(&mut self, alpha: N, x: &CsVector, beta: N) where S2: CsStorage, diff --git a/src/sparse/cs_matrix_solve.rs b/src/sparse/cs_matrix_solve.rs index 9cd25def7..2a13188e6 100644 --- a/src/sparse/cs_matrix_solve.rs +++ b/src/sparse/cs_matrix_solve.rs @@ -1,17 +1,11 @@ -use alga::general::{ClosedAdd, ClosedMul}; -use num::{One, Zero}; -use std::iter; -use std::marker::PhantomData; -use std::ops::{Add, Mul, Range}; -use std::slice; - use allocator::Allocator; -use constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint}; +use constraint::{SameNumberOfRows, ShapeConstraint}; use sparse::{CsMatrix, CsStorage, CsVector}; use storage::{Storage, StorageMut}; -use {DefaultAllocator, Dim, Matrix, MatrixMN, Real, Scalar, Vector, VectorN, U1}; +use {DefaultAllocator, Dim, Matrix, MatrixMN, Real, VectorN, U1}; impl> CsMatrix { + /// Solve a lower-triangular system with a dense right-hand-side. pub fn solve_lower_triangular( &self, b: &Matrix, @@ -29,6 +23,7 @@ impl> CsMatrix { } } + /// Solve a lower-triangular system with `self` transposed and a dense right-hand-side. pub fn tr_solve_lower_triangular( &self, b: &Matrix, @@ -46,6 +41,7 @@ impl> CsMatrix { } } + /// Solve in-place a lower-triangular system with a dense right-hand-side. pub fn solve_lower_triangular_mut( &self, b: &mut Matrix, @@ -90,6 +86,7 @@ impl> CsMatrix { true } + /// Solve a lower-triangular system with `self` transposed and a dense right-hand-side. pub fn tr_solve_lower_triangular_mut( &self, b: &mut Matrix, @@ -135,6 +132,7 @@ impl> CsMatrix { true } + /// Solve a lower-triangular system with a sparse right-hand-side. pub fn solve_lower_triangular_cs( &self, b: &CsVector, @@ -195,6 +193,7 @@ impl> CsMatrix { Some(result) } + /* // Computes the reachable, post-ordered, nodes from `b`. fn lower_triangular_reach_postordered( &self, @@ -240,6 +239,7 @@ impl> CsMatrix { xi.push(j) } } + */ // Computes the nodes reachable from `b` in an arbitrary order. fn lower_triangular_reach(&self, b: &CsVector, xi: &mut Vec) diff --git a/src/sparse/mod.rs b/src/sparse/mod.rs index 546507eb8..5df2d75dd 100644 --- a/src/sparse/mod.rs +++ b/src/sparse/mod.rs @@ -1,3 +1,5 @@ +//! Sparse matrices. + pub use self::cs_matrix::{ CsMatrix, CsStorage, CsStorageIter, CsStorageIterMut, CsStorageMut, CsVecStorage, CsVector, }; @@ -8,4 +10,4 @@ mod cs_matrix_cholesky; mod cs_matrix_conversion; mod cs_matrix_ops; mod cs_matrix_solve; -pub mod cs_utils; +pub(crate) mod cs_utils; From 7be7fc87763ac26d0e2a2732a833b10f215646ac Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Sun, 3 Feb 2019 15:16:50 +0100 Subject: [PATCH 24/25] Fix compilation with no-std. --- src/base/conversion.rs | 4 +++- src/base/edition.rs | 19 +++++++++++++++++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/src/base/conversion.rs b/src/base/conversion.rs index 2e5ca9df0..fcb6907da 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -12,8 +12,10 @@ use typenum::Prod; use base::allocator::{Allocator, SameShapeAllocator}; use base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use base::dimension::{ - Dim, DimName, Dynamic, U1, U10, U11, U12, U13, U14, U15, U16, U2, U3, U4, U5, U6, U7, U8, U9, + Dim, DimName, U1, U10, U11, U12, U13, U14, U15, U16, U2, U3, U4, U5, U6, U7, U8, U9, }; +#[cfg(any(feature = "std", feature = "alloc"))] +use base::dimension::Dynamic; use base::iter::{MatrixIter, MatrixIterMut}; use base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut}; #[cfg(any(feature = "std", feature = "alloc"))] diff --git a/src/base/edition.rs b/src/base/edition.rs index 82e5d7eae..32e2c1aa7 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -1,14 +1,18 @@ use num::{One, Zero}; use std::cmp; use std::ptr; +#[cfg(any(feature = "std", feature = "alloc"))] use std::iter::ExactSizeIterator; +#[cfg(any(feature = "std", feature = "alloc"))] use std::mem; use base::allocator::{Allocator, Reallocator}; use base::constraint::{DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use base::dimension::{ - Dim, DimAdd, DimDiff, DimMin, DimMinimum, DimName, DimSub, DimSum, Dynamic, U1, + Dim, DimAdd, DimDiff, DimMin, DimMinimum, DimName, DimSub, DimSum, U1, }; +#[cfg(any(feature = "std", feature = "alloc"))] +use base::dimension::Dynamic; use base::storage::{Storage, StorageMut}; #[cfg(any(feature = "std", feature = "alloc"))] use base::DMatrix; @@ -36,6 +40,7 @@ impl> Matrix { } /// Creates a new matrix by extracting the given set of rows from `self`. + #[cfg(any(feature = "std", feature = "alloc"))] pub fn select_rows<'a, I>(&self, irows: I) -> MatrixMN where I: IntoIterator, I::IntoIter: ExactSizeIterator + Clone, @@ -66,6 +71,7 @@ impl> Matrix { } /// Creates a new matrix by extracting the given set of columns from `self`. + #[cfg(any(feature = "std", feature = "alloc"))] pub fn select_columns<'a, I>(&self, icols: I) -> MatrixMN where I: IntoIterator, I::IntoIter: ExactSizeIterator, @@ -296,6 +302,7 @@ impl> Matrix { /// Removes `n` consecutive columns from this matrix, starting with the `i`-th (included). #[inline] + #[cfg(any(feature = "std", feature = "alloc"))] pub fn remove_columns(self, i: usize, n: usize) -> MatrixMN where C: DimSub, @@ -378,6 +385,7 @@ impl> Matrix { /// Removes `n` consecutive rows from this matrix, starting with the `i`-th (included). #[inline] + #[cfg(any(feature = "std", feature = "alloc"))] pub fn remove_rows(self, i: usize, n: usize) -> MatrixMN where R: DimSub, @@ -455,6 +463,7 @@ impl> Matrix { /// Inserts `n` columns filled with `val` starting at the `i-th` position. #[inline] + #[cfg(any(feature = "std", feature = "alloc"))] pub fn insert_columns(self, i: usize, n: usize, val: N) -> MatrixMN where C: DimAdd, @@ -532,6 +541,7 @@ impl> Matrix { /// Inserts `n` rows filled with `val` starting at the `i-th` position. #[inline] + #[cfg(any(feature = "std", feature = "alloc"))] pub fn insert_rows(self, i: usize, n: usize, val: N) -> MatrixMN where R: DimAdd, @@ -697,6 +707,7 @@ impl> Matrix { } } +#[cfg(any(feature = "std", feature = "alloc"))] impl DMatrix { /// Resizes this matrix in-place. /// @@ -704,7 +715,6 @@ impl DMatrix { /// rows and/or columns than `self`, then the extra rows or columns are filled with `val`. /// /// Defined only for owned fully-dynamic matrices, i.e., `DMatrix`. - #[cfg(any(feature = "std", feature = "alloc"))] pub fn resize_mut(&mut self, new_nrows: usize, new_ncols: usize, val: N) where DefaultAllocator: Reallocator { let placeholder = unsafe { Self::new_uninitialized(0, 0) }; @@ -714,6 +724,7 @@ impl DMatrix { } } +#[cfg(any(feature = "std", feature = "alloc"))] impl MatrixMN where DefaultAllocator: Allocator { /// Changes the number of rows of this matrix in-place. @@ -732,6 +743,7 @@ impl MatrixMN } } +#[cfg(any(feature = "std", feature = "alloc"))] impl MatrixMN where DefaultAllocator: Allocator { /// Changes the number of column of this matrix in-place. @@ -830,6 +842,7 @@ unsafe fn extend_rows( /// Extend the number of columns of the `Matrix` with elements from /// a given iterator. +#[cfg(any(feature = "std", feature = "alloc"))] impl Extend for Matrix where N: Scalar, @@ -877,6 +890,7 @@ where /// Extend the number of rows of the `Vector` with elements from /// a given iterator. +#[cfg(any(feature = "std", feature = "alloc"))] impl Extend for Matrix where N: Scalar, @@ -897,6 +911,7 @@ where } } +#[cfg(any(feature = "std", feature = "alloc"))] impl Extend> for Matrix where N: Scalar, From 08f31837a8e093dad6ab538e244d1d748c4bb5f3 Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Sun, 3 Feb 2019 15:45:25 +0100 Subject: [PATCH 25/25] Update to alga 0.8. --- Cargo.toml | 6 +++--- nalgebra-glm/Cargo.toml | 2 +- nalgebra-lapack/Cargo.toml | 4 ++-- src/base/matrix_alga.rs | 8 ++++---- src/geometry/isometry.rs | 2 +- src/geometry/isometry_alga.rs | 8 ++++---- src/geometry/isometry_ops.rs | 4 ++-- src/geometry/quaternion_alga.rs | 12 ++++++------ src/geometry/rotation_alga.rs | 8 ++++---- src/geometry/similarity_alga.rs | 8 ++++---- src/geometry/similarity_ops.rs | 4 ++-- src/geometry/transform_alga.rs | 12 ++++++------ src/geometry/translation_alga.rs | 8 ++++---- src/geometry/unit_complex_alga.rs | 8 ++++---- src/lib.rs | 6 +++--- 15 files changed, 50 insertions(+), 50 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 048ec5576..4672dae23 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,13 +35,13 @@ rand = { version = "0.6", default-features = false } num-traits = { version = "0.2", default-features = false } num-complex = { version = "0.2", default-features = false } approx = { version = "0.3", default-features = false } -alga = { version = "0.7", default-features = false } +alga = { version = "0.8", default-features = false } matrixmultiply = { version = "0.2", optional = true } serde = { version = "1.0", optional = true } serde_derive = { version = "1.0", optional = true } abomonation = { version = "0.7", optional = true } mint = { version = "0.5", optional = true } -quickcheck = { version = "0.7", optional = true } +quickcheck = { version = "0.8", optional = true } pest = { version = "2.0", optional = true } pest_derive = { version = "2.0", optional = true } @@ -50,4 +50,4 @@ serde_json = "1.0" rand_xorshift = "0.1" [workspace] -members = [ "nalgebra-lapack", "nalgebra-glm" ] +members = [ "nalgebra-lapack", "nalgebra-glm" ] \ No newline at end of file diff --git a/nalgebra-glm/Cargo.toml b/nalgebra-glm/Cargo.toml index 0ba77cd6a..508c1c7d8 100644 --- a/nalgebra-glm/Cargo.toml +++ b/nalgebra-glm/Cargo.toml @@ -23,5 +23,5 @@ abomonation-serialize = [ "nalgebra/abomonation-serialize" ] [dependencies] num-traits = { version = "0.2", default-features = false } approx = { version = "0.3", default-features = false } -alga = { version = "0.7", default-features = false } +alga = { version = "0.8", default-features = false } nalgebra = { path = "..", version = "^0.16.13", default-features = false } diff --git a/nalgebra-lapack/Cargo.toml b/nalgebra-lapack/Cargo.toml index c59211ab7..939217ff8 100644 --- a/nalgebra-lapack/Cargo.toml +++ b/nalgebra-lapack/Cargo.toml @@ -25,7 +25,7 @@ intel-mkl = ["lapack-src/intel-mkl"] nalgebra = { version = "0.16", path = ".." } num-traits = "0.2" num-complex = { version = "0.2", default-features = false } -alga = { version = "0.7", default-features = false } +alga = { version = "0.8", default-features = false } serde = { version = "1.0", optional = true } serde_derive = { version = "1.0", optional = true } lapack = { version = "0.16", default-features = false } @@ -34,6 +34,6 @@ lapack-src = { version = "0.2", default-features = false } [dev-dependencies] nalgebra = { version = "0.16", path = "..", features = [ "arbitrary" ] } -quickcheck = "0.7" +quickcheck = "0.8" approx = "0.3" rand = "0.6" diff --git a/src/base/matrix_alga.rs b/src/base/matrix_alga.rs index 7c454986b..427cdc840 100644 --- a/src/base/matrix_alga.rs +++ b/src/base/matrix_alga.rs @@ -6,7 +6,7 @@ use num::{One, Zero}; use alga::general::{ AbstractGroup, AbstractGroupAbelian, AbstractLoop, AbstractMagma, AbstractModule, AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, Additive, ClosedAdd, ClosedMul, - ClosedNeg, Field, Identity, Inverse, JoinSemilattice, Lattice, MeetSemilattice, Module, + ClosedNeg, Field, Identity, TwoSidedInverse, JoinSemilattice, Lattice, MeetSemilattice, Module, Multiplicative, Real, RingCommutative, }; use alga::linear::{ @@ -45,18 +45,18 @@ where } } -impl Inverse for MatrixMN +impl TwoSidedInverse for MatrixMN where N: Scalar + ClosedNeg, DefaultAllocator: Allocator, { #[inline] - fn inverse(&self) -> MatrixMN { + fn two_sided_inverse(&self) -> MatrixMN { -self } #[inline] - fn inverse_mut(&mut self) { + fn two_sided_inverse_mut(&mut self) { *self = -self.clone() } } diff --git a/src/geometry/isometry.rs b/src/geometry/isometry.rs index 1814efb53..1097be473 100644 --- a/src/geometry/isometry.rs +++ b/src/geometry/isometry.rs @@ -166,7 +166,7 @@ where DefaultAllocator: Allocator /// ``` #[inline] pub fn inverse_mut(&mut self) { - self.rotation.inverse_mut(); + self.rotation.two_sided_inverse_mut(); self.translation.inverse_mut(); self.translation.vector = self.rotation.transform_vector(&self.translation.vector); } diff --git a/src/geometry/isometry_alga.rs b/src/geometry/isometry_alga.rs index 7decea8e7..fee8b63dc 100644 --- a/src/geometry/isometry_alga.rs +++ b/src/geometry/isometry_alga.rs @@ -1,6 +1,6 @@ use alga::general::{ AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup, - AbstractSemigroup, Id, Identity, Inverse, Multiplicative, Real, + AbstractSemigroup, Id, Identity, TwoSidedInverse, Multiplicative, Real, }; use alga::linear::Isometry as AlgaIsometry; use alga::linear::{ @@ -30,18 +30,18 @@ where } } -impl Inverse for Isometry +impl TwoSidedInverse for Isometry where R: Rotation>, DefaultAllocator: Allocator, { #[inline] - fn inverse(&self) -> Self { + fn two_sided_inverse(&self) -> Self { self.inverse() } #[inline] - fn inverse_mut(&mut self) { + fn two_sided_inverse_mut(&mut self) { self.inverse_mut() } } diff --git a/src/geometry/isometry_ops.rs b/src/geometry/isometry_ops.rs index a3608a630..6a4b921ea 100644 --- a/src/geometry/isometry_ops.rs +++ b/src/geometry/isometry_ops.rs @@ -200,8 +200,8 @@ isometry_binop_assign_impl_all!( DivAssign, div_assign; self: Isometry, rhs: R; // FIXME: don't invert explicitly? - [val] => *self *= rhs.inverse(); - [ref] => *self *= rhs.inverse(); + [val] => *self *= rhs.two_sided_inverse(); + [ref] => *self *= rhs.two_sided_inverse(); ); // Isometry × R diff --git a/src/geometry/quaternion_alga.rs b/src/geometry/quaternion_alga.rs index fe1a33b8d..529d27f48 100644 --- a/src/geometry/quaternion_alga.rs +++ b/src/geometry/quaternion_alga.rs @@ -2,7 +2,7 @@ use num::Zero; use alga::general::{ AbstractGroup, AbstractGroupAbelian, AbstractLoop, AbstractMagma, AbstractModule, - AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, Additive, Id, Identity, Inverse, Module, + AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, Additive, Id, Identity, TwoSidedInverse, Module, Multiplicative, Real, }; use alga::linear::{ @@ -42,9 +42,9 @@ impl AbstractMagma for Quaternion { } } -impl Inverse for Quaternion { +impl TwoSidedInverse for Quaternion { #[inline] - fn inverse(&self) -> Self { + fn two_sided_inverse(&self) -> Self { -self } } @@ -173,14 +173,14 @@ impl AbstractMagma for UnitQuaternion { } } -impl Inverse for UnitQuaternion { +impl TwoSidedInverse for UnitQuaternion { #[inline] - fn inverse(&self) -> Self { + fn two_sided_inverse(&self) -> Self { self.inverse() } #[inline] - fn inverse_mut(&mut self) { + fn two_sided_inverse_mut(&mut self) { self.inverse_mut() } } diff --git a/src/geometry/rotation_alga.rs b/src/geometry/rotation_alga.rs index b3bf7477c..18c47b417 100644 --- a/src/geometry/rotation_alga.rs +++ b/src/geometry/rotation_alga.rs @@ -1,6 +1,6 @@ use alga::general::{ AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup, - AbstractSemigroup, Id, Identity, Inverse, Multiplicative, Real, + AbstractSemigroup, Id, Identity, TwoSidedInverse, Multiplicative, Real, }; use alga::linear::{ self, AffineTransformation, DirectIsometry, Isometry, OrthogonalTransformation, @@ -27,16 +27,16 @@ where DefaultAllocator: Allocator } } -impl Inverse for Rotation +impl TwoSidedInverse for Rotation where DefaultAllocator: Allocator { #[inline] - fn inverse(&self) -> Self { + fn two_sided_inverse(&self) -> Self { self.transpose() } #[inline] - fn inverse_mut(&mut self) { + fn two_sided_inverse_mut(&mut self) { self.transpose_mut() } } diff --git a/src/geometry/similarity_alga.rs b/src/geometry/similarity_alga.rs index c416cad83..e8a6b1544 100644 --- a/src/geometry/similarity_alga.rs +++ b/src/geometry/similarity_alga.rs @@ -1,6 +1,6 @@ use alga::general::{ AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup, - AbstractSemigroup, Identity, Inverse, Multiplicative, Real, + AbstractSemigroup, Identity, TwoSidedInverse, Multiplicative, Real, }; use alga::linear::Similarity as AlgaSimilarity; use alga::linear::{AffineTransformation, ProjectiveTransformation, Rotation, Transformation}; @@ -27,18 +27,18 @@ where } } -impl Inverse for Similarity +impl TwoSidedInverse for Similarity where R: Rotation>, DefaultAllocator: Allocator, { #[inline] - fn inverse(&self) -> Self { + fn two_sided_inverse(&self) -> Self { self.inverse() } #[inline] - fn inverse_mut(&mut self) { + fn two_sided_inverse_mut(&mut self) { self.inverse_mut() } } diff --git a/src/geometry/similarity_ops.rs b/src/geometry/similarity_ops.rs index 57fdc05fc..081e51330 100644 --- a/src/geometry/similarity_ops.rs +++ b/src/geometry/similarity_ops.rs @@ -222,8 +222,8 @@ similarity_binop_assign_impl_all!( DivAssign, div_assign; self: Similarity, rhs: R; // FIXME: don't invert explicitly? - [val] => *self *= rhs.inverse(); - [ref] => *self *= rhs.inverse(); + [val] => *self *= rhs.two_sided_inverse(); + [ref] => *self *= rhs.two_sided_inverse(); ); // Similarity × R diff --git a/src/geometry/transform_alga.rs b/src/geometry/transform_alga.rs index 652da373b..c5ba675b5 100644 --- a/src/geometry/transform_alga.rs +++ b/src/geometry/transform_alga.rs @@ -1,6 +1,6 @@ use alga::general::{ AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup, - AbstractSemigroup, Identity, Inverse, Multiplicative, Real, + AbstractSemigroup, Identity, TwoSidedInverse, Multiplicative, Real, }; use alga::linear::{ProjectiveTransformation, Transformation}; @@ -26,18 +26,18 @@ where } } -impl, C> Inverse for Transform +impl, C> TwoSidedInverse for Transform where C: SubTCategoryOf, DefaultAllocator: Allocator, DimNameSum>, { #[inline] - fn inverse(&self) -> Self { + fn two_sided_inverse(&self) -> Self { self.clone().inverse() } #[inline] - fn inverse_mut(&mut self) { + fn two_sided_inverse_mut(&mut self) { self.inverse_mut() } } @@ -116,12 +116,12 @@ where { #[inline] fn inverse_transform_point(&self, pt: &Point) -> Point { - self.inverse() * pt + self.two_sided_inverse() * pt } #[inline] fn inverse_transform_vector(&self, v: &VectorN) -> VectorN { - self.inverse() * v + self.two_sided_inverse() * v } } diff --git a/src/geometry/translation_alga.rs b/src/geometry/translation_alga.rs index 24aa28d29..fdd240145 100644 --- a/src/geometry/translation_alga.rs +++ b/src/geometry/translation_alga.rs @@ -1,6 +1,6 @@ use alga::general::{ AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup, - AbstractSemigroup, Id, Identity, Inverse, Multiplicative, Real, + AbstractSemigroup, Id, Identity, TwoSidedInverse, Multiplicative, Real, }; use alga::linear::Translation as AlgaTranslation; use alga::linear::{ @@ -28,16 +28,16 @@ where DefaultAllocator: Allocator } } -impl Inverse for Translation +impl TwoSidedInverse for Translation where DefaultAllocator: Allocator { #[inline] - fn inverse(&self) -> Self { + fn two_sided_inverse(&self) -> Self { self.inverse() } #[inline] - fn inverse_mut(&mut self) { + fn two_sided_inverse_mut(&mut self) { self.inverse_mut() } } diff --git a/src/geometry/unit_complex_alga.rs b/src/geometry/unit_complex_alga.rs index 59b11903a..21b956d99 100644 --- a/src/geometry/unit_complex_alga.rs +++ b/src/geometry/unit_complex_alga.rs @@ -1,6 +1,6 @@ use alga::general::{ AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup, - AbstractSemigroup, Id, Identity, Inverse, Multiplicative, Real, + AbstractSemigroup, Id, Identity, TwoSidedInverse, Multiplicative, Real, }; use alga::linear::{ AffineTransformation, DirectIsometry, Isometry, OrthogonalTransformation, @@ -31,14 +31,14 @@ impl AbstractMagma for UnitComplex { } } -impl Inverse for UnitComplex { +impl TwoSidedInverse for UnitComplex { #[inline] - fn inverse(&self) -> Self { + fn two_sided_inverse(&self) -> Self { self.inverse() } #[inline] - fn inverse_mut(&mut self) { + fn two_sided_inverse_mut(&mut self) { self.inverse_mut() } } diff --git a/src/lib.rs b/src/lib.rs index 157764599..f92cd36a5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -153,7 +153,7 @@ pub use sparse::*; use std::cmp::{self, Ordering, PartialOrd}; use alga::general::{ - Additive, AdditiveGroup, Identity, Inverse, JoinSemilattice, Lattice, MeetSemilattice, + Additive, AdditiveGroup, Identity, TwoSidedInverse, JoinSemilattice, Lattice, MeetSemilattice, Multiplicative, SupersetOf, }; use alga::linear::SquareMatrix as AlgaSquareMatrix; @@ -427,8 +427,8 @@ pub fn try_inverse(m: &M) -> Option { /// /// * [`try_inverse`](fn.try_inverse.html) #[inline] -pub fn inverse>(m: &M) -> M { - m.inverse() +pub fn inverse>(m: &M) -> M { + m.two_sided_inverse() } /*