From 2af4dc0d06f4a46d9e11de04aa70f472e3357619 Mon Sep 17 00:00:00 2001 From: PBerger Date: Wed, 10 Oct 2012 23:15:41 +0200 Subject: [PATCH] Updated eigen to HEAD version --- resources/3rdparty/eigen/.hg_archival.txt | 7 +- resources/3rdparty/eigen/.hgtags | 1 - resources/3rdparty/eigen/.krazy | 3 - resources/3rdparty/eigen/COPYING.LGPL | 655 ++++-- resources/3rdparty/eigen/COPYING.README | 3 + resources/3rdparty/eigen/Eigen/Core | 40 +- resources/3rdparty/eigen/Eigen/Eigenvalues | 2 + resources/3rdparty/eigen/Eigen/MetisSupport | 26 + .../3rdparty/eigen/Eigen/OrderingMethods | 2 +- resources/3rdparty/eigen/Eigen/SparseLU | 17 + .../3rdparty/eigen/Eigen/src/Cholesky/LDLT.h | 7 + .../Eigen/src/CholmodSupport/CholmodSupport.h | 24 +- .../3rdparty/eigen/Eigen/src/Core/Array.h | 24 +- .../eigen/Eigen/src/Core/ArrayWrapper.h | 64 +- .../eigen/Eigen/src/Core/AssignEvaluator.h | 755 +++++++ .../eigen/Eigen/src/Core/Assign_MKL.h | 2 +- .../3rdparty/eigen/Eigen/src/Core/Block.h | 40 +- .../eigen/Eigen/src/Core/CoreEvaluators.h | 1299 ++++++++++++ .../eigen/Eigen/src/Core/CwiseBinaryOp.h | 18 +- .../eigen/Eigen/src/Core/CwiseNullaryOp.h | 96 +- .../eigen/Eigen/src/Core/CwiseUnaryOp.h | 8 +- .../3rdparty/eigen/Eigen/src/Core/DenseBase.h | 28 +- .../eigen/Eigen/src/Core/DenseCoeffsBase.h | 12 +- .../eigen/Eigen/src/Core/DenseStorage.h | 83 +- .../3rdparty/eigen/Eigen/src/Core/Diagonal.h | 37 +- .../eigen/Eigen/src/Core/DiagonalMatrix.h | 16 +- .../eigen/Eigen/src/Core/DiagonalProduct.h | 4 +- resources/3rdparty/eigen/Eigen/src/Core/Dot.h | 4 +- .../3rdparty/eigen/Eigen/src/Core/Functors.h | 64 +- .../3rdparty/eigen/Eigen/src/Core/Fuzzy.h | 18 +- .../eigen/Eigen/src/Core/GeneralProduct.h | 2 +- resources/3rdparty/eigen/Eigen/src/Core/Map.h | 8 +- .../3rdparty/eigen/Eigen/src/Core/MapBase.h | 50 +- .../eigen/Eigen/src/Core/MathFunctions.h | 47 + .../eigen/Eigen/src/Core/MatrixBase.h | 28 +- .../3rdparty/eigen/Eigen/src/Core/NoAlias.h | 5 + .../eigen/Eigen/src/Core/PermutationMatrix.h | 22 +- .../eigen/Eigen/src/Core/PlainObjectBase.h | 131 +- .../3rdparty/eigen/Eigen/src/Core/Product.h | 45 +- .../eigen/Eigen/src/Core/ProductBase.h | 12 +- .../eigen/Eigen/src/Core/ProductEvaluators.h | 411 ++++ .../3rdparty/eigen/Eigen/src/Core/Random.h | 4 +- resources/3rdparty/eigen/Eigen/src/Core/Ref.h | 254 +++ .../3rdparty/eigen/Eigen/src/Core/Replicate.h | 28 +- .../3rdparty/eigen/Eigen/src/Core/Select.h | 8 +- .../eigen/Eigen/src/Core/StableNorm.h | 1 - .../3rdparty/eigen/Eigen/src/Core/Swap.h | 34 +- .../3rdparty/eigen/Eigen/src/Core/Transpose.h | 28 +- .../eigen/Eigen/src/Core/Transpositions.h | 18 +- .../eigen/Eigen/src/Core/TriangularMatrix.h | 7 +- .../eigen/Eigen/src/Core/VectorBlock.h | 24 +- .../3rdparty/eigen/Eigen/src/Core/Visitor.h | 12 +- .../Eigen/src/Core/arch/NEON/PacketMath.h | 39 +- .../Eigen/src/Core/arch/SSE/MathFunctions.h | 78 +- .../Eigen/src/Core/arch/SSE/PacketMath.h | 8 +- .../Core/products/GeneralBlockPanelKernel.h | 2 +- .../src/Core/products/GeneralMatrixVector.h | 24 +- .../products/TriangularMatrixMatrix_MKL.h | 20 +- .../products/TriangularMatrixVector_MKL.h | 8 +- .../eigen/Eigen/src/Core/util/Constants.h | 11 +- .../Eigen/src/Core/util/ForwardDeclarations.h | 4 +- .../eigen/Eigen/src/Core/util/Macros.h | 14 +- .../eigen/Eigen/src/Core/util/StaticAssert.h | 3 +- .../eigen/Eigen/src/Core/util/XprHelper.h | 25 +- .../src/Eigen2Support/Geometry/AlignedBox.h | 2 +- .../src/Eigen2Support/Geometry/AngleAxis.h | 2 +- .../src/Eigen2Support/Geometry/Hyperplane.h | 2 +- .../Eigen2Support/Geometry/ParametrizedLine.h | 2 +- .../src/Eigen2Support/Geometry/Quaternion.h | 2 +- .../src/Eigen2Support/Geometry/Rotation2D.h | 2 +- .../src/Eigen2Support/Geometry/RotationBase.h | 2 +- .../src/Eigen2Support/Geometry/Scaling.h | 2 +- .../src/Eigen2Support/Geometry/Transform.h | 2 +- .../src/Eigen2Support/Geometry/Translation.h | 2 +- .../Eigen/src/Eigen2Support/LeastSquares.h | 2 +- .../eigen/Eigen/src/Eigen2Support/SVD.h | 2 +- .../src/Eigenvalues/ComplexEigenSolver.h | 18 +- .../Eigen/src/Eigenvalues/ComplexSchur.h | 58 +- .../Eigen/src/Eigenvalues/ComplexSchur_MKL.h | 2 +- .../eigen/Eigen/src/Eigenvalues/EigenSolver.h | 21 +- .../src/Eigenvalues/GeneralizedEigenSolver.h | 339 +++ .../eigen/Eigen/src/Eigenvalues/RealQZ.h | 618 ++++++ .../eigen/Eigen/src/Eigenvalues/RealSchur.h | 50 +- .../src/Eigenvalues/SelfAdjointEigenSolver.h | 11 +- .../Eigenvalues/SelfAdjointEigenSolver_MKL.h | 2 +- .../eigen/Eigen/src/Geometry/AngleAxis.h | 4 +- .../eigen/Eigen/src/Geometry/Hyperplane.h | 4 +- .../Eigen/src/Geometry/ParametrizedLine.h | 4 +- .../eigen/Eigen/src/Geometry/Quaternion.h | 4 +- .../eigen/Eigen/src/Geometry/Rotation2D.h | 6 +- .../eigen/Eigen/src/Geometry/Scaling.h | 6 +- .../eigen/Eigen/src/Geometry/Transform.h | 18 +- .../eigen/Eigen/src/Geometry/Umeyama.h | 25 +- .../src/IterativeLinearSolvers/BiCGSTAB.h | 6 +- .../IterativeLinearSolvers/IncompleteLUT.h | 101 +- .../3rdparty/eigen/Eigen/src/Jacobi/Jacobi.h | 26 +- .../Eigen/src/MetisSupport/CMakeLists.txt | 6 + .../Eigen/src/MetisSupport/MetisSupport.h | 138 ++ .../Eigen/src/OrderingMethods/Eigen_Colamd.h | 1849 +++++++++++++++++ .../Eigen/src/OrderingMethods/Ordering.h | 158 ++ .../Eigen/src/QR/ColPivHouseholderQR_MKL.h | 2 +- .../eigen/Eigen/src/QR/HouseholderQR.h | 8 + .../3rdparty/eigen/Eigen/src/SVD/JacobiSVD.h | 14 +- .../eigen/Eigen/src/SVD/JacobiSVD_MKL.h | 2 +- .../Eigen/src/SparseCore/CompressedStorage.h | 8 +- .../Eigen/src/SparseCore/SparseDenseProduct.h | 14 +- .../eigen/Eigen/src/SparseCore/SparseMatrix.h | 149 +- .../Eigen/src/SparseCore/SparseMatrixBase.h | 10 +- .../Eigen/src/SparseCore/SparseProduct.h | 4 +- .../src/SparseCore/SparseSelfAdjointView.h | 8 +- .../SparseSparseProductWithPruning.h | 10 +- .../Eigen/src/SparseCore/SparseTranspose.h | 2 +- .../eigen/Eigen/src/SparseCore/SparseUtil.h | 5 +- .../eigen/Eigen/src/SparseCore/SparseVector.h | 2 +- .../eigen/Eigen/src/SparseCore/SparseView.h | 2 +- .../eigen/Eigen/src/SparseLU/CMakeLists.txt | 6 + .../eigen/Eigen/src/SparseLU/SparseLU.h | 630 ++++++ .../eigen/Eigen/src/SparseLU/SparseLUBase.h | 74 + .../Eigen/src/SparseLU/SparseLU_Coletree.h | 180 ++ .../Eigen/src/SparseLU/SparseLU_Matrix.h | 313 +++ .../Eigen/src/SparseLU/SparseLU_Memory.h | 204 ++ .../Eigen/src/SparseLU/SparseLU_Structs.h | 103 + .../eigen/Eigen/src/SparseLU/SparseLU_Utils.h | 75 + .../Eigen/src/SparseLU/SparseLU_column_bmod.h | 162 ++ .../Eigen/src/SparseLU/SparseLU_column_dfs.h | 164 ++ .../src/SparseLU/SparseLU_copy_to_ucol.h | 100 + .../src/SparseLU/SparseLU_heap_relax_snode.h | 119 ++ .../Eigen/src/SparseLU/SparseLU_kernel_bmod.h | 109 + .../Eigen/src/SparseLU/SparseLU_panel_bmod.h | 204 ++ .../Eigen/src/SparseLU/SparseLU_panel_dfs.h | 247 +++ .../Eigen/src/SparseLU/SparseLU_pivotL.h | 125 ++ .../Eigen/src/SparseLU/SparseLU_pruneL.h | 129 ++ .../Eigen/src/SparseLU/SparseLU_relax_snode.h | 73 + .../Eigen/src/SparseLU/SparseLU_snode_bmod.h | 72 + .../Eigen/src/SparseLU/SparseLU_snode_dfs.h | 95 + .../Eigen/src/SuperLUSupport/SuperLUSupport.h | 7 +- .../Eigen/src/plugins/ArrayCwiseBinaryOps.h | 6 +- .../Eigen/src/plugins/ArrayCwiseUnaryOps.h | 1 + resources/3rdparty/eigen/bench/bench_gemm.cpp | 2 +- .../eigen/bench/spbench/CMakeLists.txt | 13 + .../eigen/bench/spbench/sp_solver.cpp | 125 ++ .../3rdparty/eigen/bench/spbench/spbench.dtd | 31 + .../eigen/bench/spbench/spbenchsolver.cpp | 15 +- .../eigen/bench/spbench/spbenchsolver.h | 475 +++-- .../eigen/bench/spbench/spbenchstyle.h | 94 + .../eigen/bench/spbench/test_sparseLU.cpp | 93 + resources/3rdparty/eigen/blas/CMakeLists.txt | 8 +- .../3rdparty/eigen/blas/GeneralRank1Update.h | 44 + .../eigen/blas/PackedSelfadjointProduct.h | 54 + .../eigen/blas/PackedTriangularMatrixVector.h | 79 + .../eigen/blas/PackedTriangularSolverVector.h | 88 + resources/3rdparty/eigen/blas/Rank2Update.h | 57 + resources/3rdparty/eigen/blas/chpr.f | 220 -- resources/3rdparty/eigen/blas/chpr2.f | 255 --- resources/3rdparty/eigen/blas/common.h | 13 +- resources/3rdparty/eigen/blas/ctpmv.f | 329 --- resources/3rdparty/eigen/blas/ctpsv.f | 332 --- resources/3rdparty/eigen/blas/double.cpp | 14 + resources/3rdparty/eigen/blas/dspr.f | 202 -- resources/3rdparty/eigen/blas/dspr2.f | 233 --- resources/3rdparty/eigen/blas/dtpmv.f | 293 --- resources/3rdparty/eigen/blas/dtpsv.f | 296 --- .../3rdparty/eigen/blas/level2_cplx_impl.h | 178 +- resources/3rdparty/eigen/blas/level2_impl.h | 145 +- .../3rdparty/eigen/blas/level2_real_impl.h | 210 +- resources/3rdparty/eigen/blas/level3_impl.h | 2 + resources/3rdparty/eigen/blas/single.cpp | 3 + resources/3rdparty/eigen/blas/sspr.f | 202 -- resources/3rdparty/eigen/blas/sspr2.f | 233 --- resources/3rdparty/eigen/blas/stpmv.f | 293 --- resources/3rdparty/eigen/blas/stpsv.f | 296 --- .../3rdparty/eigen/blas/testing/dblat1.f | 476 ++++- .../3rdparty/eigen/blas/testing/sblat1.f | 430 +++- resources/3rdparty/eigen/blas/zhpr.f | 220 -- resources/3rdparty/eigen/blas/zhpr2.f | 255 --- resources/3rdparty/eigen/blas/ztpmv.f | 329 --- resources/3rdparty/eigen/blas/ztpsv.f | 332 --- .../3rdparty/eigen/cmake/FindMetis.cmake | 3 +- .../3rdparty/eigen/cmake/FindUmfpack.cmake | 5 +- .../3rdparty/eigen/debug/gdb/printers.py | 4 +- .../3rdparty/eigen/doc/C09_TutorialSparse.dox | 6 +- resources/3rdparty/eigen/doc/CMakeLists.txt | 4 +- .../3rdparty/eigen/doc/D01_StlContainers.dox | 4 +- resources/3rdparty/eigen/doc/Doxyfile.in | 6 +- .../3rdparty/eigen/doc/I02_HiPerformance.dox | 2 +- .../3rdparty/eigen/doc/I10_Assertions.dox | 107 +- .../eigen/doc/I17_SparseLinearSystems.dox | 110 + .../3rdparty/eigen/doc/QuickReference.dox | 2 +- .../eigen/doc/eigendoxy_footer.html.in | 18 +- .../eigen/doc/examples/CMakeLists.txt | 2 - .../examples/QuickStart_example2_dynamic.cpp | 6 +- .../examples/QuickStart_example2_fixed.cpp | 6 +- .../doc/snippets/GeneralizedEigenSolver.cpp | 7 + .../snippets/HouseholderQR_householderQ.cpp | 7 + .../eigen/doc/snippets/RealQZ_compute.cpp | 17 + .../eigen/doc/special_examples/CMakeLists.txt | 1 + resources/3rdparty/eigen/test/CMakeLists.txt | 5 +- .../3rdparty/eigen/test/array_for_matrix.cpp | 37 + resources/3rdparty/eigen/test/cholesky.cpp | 14 + resources/3rdparty/eigen/test/diagonal.cpp | 9 +- .../3rdparty/eigen/test/diagonalmatrices.cpp | 9 + .../eigen/test/eigensolver_complex.cpp | 11 + .../test/eigensolver_generalized_real.cpp | 63 + .../eigen/test/eigensolver_generic.cpp | 13 +- resources/3rdparty/eigen/test/evaluators.cpp | 321 +++ .../3rdparty/eigen/test/product_extra.cpp | 32 +- resources/3rdparty/eigen/test/real_qz.cpp | 69 + .../3rdparty/eigen/test/schur_complex.cpp | 19 +- resources/3rdparty/eigen/test/schur_real.cpp | 21 +- .../3rdparty/eigen/test/sparse_basic.cpp | 41 + resources/3rdparty/eigen/test/sparse_solver.h | 4 +- .../3rdparty/eigen/test/sparse_vector.cpp | 5 + resources/3rdparty/eigen/test/sparselu.cpp | 43 + .../eigen/unsupported/Eigen/IterativeSolvers | 1 + .../eigen/unsupported/Eigen/MatrixFunctions | 72 +- .../src/IterativeSolvers/IncompleteCholesky.h | 221 ++ .../src/MatrixFunctions/MatrixExponential.h | 47 +- .../src/MatrixFunctions/MatrixFunction.h | 2 +- .../src/MatrixFunctions/MatrixLogarithm.h | 85 +- .../Eigen/src/MatrixFunctions/MatrixPower.h | 386 ++++ .../src/MatrixFunctions/MatrixPowerBase.h | 359 ++++ .../src/MatrixFunctions/MatrixSquareRoot.h | 10 +- .../src/SparseExtra/MatrixMarketIterator.h | 13 +- .../unsupported/Eigen/src/Splines/Spline.h | 15 + .../unsupported/doc/examples/MatrixPower.cpp | 16 + .../doc/examples/MatrixPower_optimal.cpp | 17 + .../eigen/unsupported/test/CMakeLists.txt | 1 + .../unsupported/test/matrix_exponential.cpp | 12 +- .../eigen/unsupported/test/matrix_functions.h | 47 + .../eigen/unsupported/test/matrix_power.cpp | 136 ++ .../unsupported/test/matrix_square_root.cpp | 33 +- 231 files changed, 15251 insertions(+), 5934 deletions(-) delete mode 100644 resources/3rdparty/eigen/.krazy create mode 100644 resources/3rdparty/eigen/Eigen/MetisSupport create mode 100644 resources/3rdparty/eigen/Eigen/SparseLU create mode 100644 resources/3rdparty/eigen/Eigen/src/Core/AssignEvaluator.h create mode 100644 resources/3rdparty/eigen/Eigen/src/Core/CoreEvaluators.h create mode 100644 resources/3rdparty/eigen/Eigen/src/Core/ProductEvaluators.h create mode 100644 resources/3rdparty/eigen/Eigen/src/Core/Ref.h create mode 100644 resources/3rdparty/eigen/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h create mode 100644 resources/3rdparty/eigen/Eigen/src/Eigenvalues/RealQZ.h create mode 100644 resources/3rdparty/eigen/Eigen/src/MetisSupport/CMakeLists.txt create mode 100644 resources/3rdparty/eigen/Eigen/src/MetisSupport/MetisSupport.h create mode 100644 resources/3rdparty/eigen/Eigen/src/OrderingMethods/Eigen_Colamd.h create mode 100644 resources/3rdparty/eigen/Eigen/src/OrderingMethods/Ordering.h create mode 100644 resources/3rdparty/eigen/Eigen/src/SparseLU/CMakeLists.txt create mode 100644 resources/3rdparty/eigen/Eigen/src/SparseLU/SparseLU.h create mode 100644 resources/3rdparty/eigen/Eigen/src/SparseLU/SparseLUBase.h create mode 100644 resources/3rdparty/eigen/Eigen/src/SparseLU/SparseLU_Coletree.h create mode 100644 resources/3rdparty/eigen/Eigen/src/SparseLU/SparseLU_Matrix.h create mode 100644 resources/3rdparty/eigen/Eigen/src/SparseLU/SparseLU_Memory.h create mode 100644 resources/3rdparty/eigen/Eigen/src/SparseLU/SparseLU_Structs.h create mode 100644 resources/3rdparty/eigen/Eigen/src/SparseLU/SparseLU_Utils.h create mode 100644 resources/3rdparty/eigen/Eigen/src/SparseLU/SparseLU_column_bmod.h create mode 100644 resources/3rdparty/eigen/Eigen/src/SparseLU/SparseLU_column_dfs.h create mode 100644 resources/3rdparty/eigen/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h create mode 100644 resources/3rdparty/eigen/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h create mode 100644 resources/3rdparty/eigen/Eigen/src/SparseLU/SparseLU_kernel_bmod.h create mode 100644 resources/3rdparty/eigen/Eigen/src/SparseLU/SparseLU_panel_bmod.h create mode 100644 resources/3rdparty/eigen/Eigen/src/SparseLU/SparseLU_panel_dfs.h create mode 100644 resources/3rdparty/eigen/Eigen/src/SparseLU/SparseLU_pivotL.h create mode 100644 resources/3rdparty/eigen/Eigen/src/SparseLU/SparseLU_pruneL.h create mode 100644 resources/3rdparty/eigen/Eigen/src/SparseLU/SparseLU_relax_snode.h create mode 100644 resources/3rdparty/eigen/Eigen/src/SparseLU/SparseLU_snode_bmod.h create mode 100644 resources/3rdparty/eigen/Eigen/src/SparseLU/SparseLU_snode_dfs.h create mode 100644 resources/3rdparty/eigen/bench/spbench/sp_solver.cpp create mode 100644 resources/3rdparty/eigen/bench/spbench/spbench.dtd create mode 100644 resources/3rdparty/eigen/bench/spbench/spbenchstyle.h create mode 100644 resources/3rdparty/eigen/bench/spbench/test_sparseLU.cpp create mode 100644 resources/3rdparty/eigen/blas/GeneralRank1Update.h create mode 100644 resources/3rdparty/eigen/blas/PackedSelfadjointProduct.h create mode 100644 resources/3rdparty/eigen/blas/PackedTriangularMatrixVector.h create mode 100644 resources/3rdparty/eigen/blas/PackedTriangularSolverVector.h create mode 100644 resources/3rdparty/eigen/blas/Rank2Update.h delete mode 100644 resources/3rdparty/eigen/blas/chpr.f delete mode 100644 resources/3rdparty/eigen/blas/chpr2.f delete mode 100644 resources/3rdparty/eigen/blas/ctpmv.f delete mode 100644 resources/3rdparty/eigen/blas/ctpsv.f delete mode 100644 resources/3rdparty/eigen/blas/dspr.f delete mode 100644 resources/3rdparty/eigen/blas/dspr2.f delete mode 100644 resources/3rdparty/eigen/blas/dtpmv.f delete mode 100644 resources/3rdparty/eigen/blas/dtpsv.f delete mode 100644 resources/3rdparty/eigen/blas/sspr.f delete mode 100644 resources/3rdparty/eigen/blas/sspr2.f delete mode 100644 resources/3rdparty/eigen/blas/stpmv.f delete mode 100644 resources/3rdparty/eigen/blas/stpsv.f delete mode 100644 resources/3rdparty/eigen/blas/zhpr.f delete mode 100644 resources/3rdparty/eigen/blas/zhpr2.f delete mode 100644 resources/3rdparty/eigen/blas/ztpmv.f delete mode 100644 resources/3rdparty/eigen/blas/ztpsv.f create mode 100644 resources/3rdparty/eigen/doc/I17_SparseLinearSystems.dox create mode 100644 resources/3rdparty/eigen/doc/snippets/GeneralizedEigenSolver.cpp create mode 100644 resources/3rdparty/eigen/doc/snippets/HouseholderQR_householderQ.cpp create mode 100644 resources/3rdparty/eigen/doc/snippets/RealQZ_compute.cpp create mode 100644 resources/3rdparty/eigen/test/eigensolver_generalized_real.cpp create mode 100644 resources/3rdparty/eigen/test/evaluators.cpp create mode 100644 resources/3rdparty/eigen/test/real_qz.cpp create mode 100644 resources/3rdparty/eigen/test/sparselu.cpp create mode 100644 resources/3rdparty/eigen/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h create mode 100644 resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h create mode 100644 resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixPowerBase.h create mode 100644 resources/3rdparty/eigen/unsupported/doc/examples/MatrixPower.cpp create mode 100644 resources/3rdparty/eigen/unsupported/doc/examples/MatrixPower_optimal.cpp create mode 100644 resources/3rdparty/eigen/unsupported/test/matrix_functions.h create mode 100644 resources/3rdparty/eigen/unsupported/test/matrix_power.cpp diff --git a/resources/3rdparty/eigen/.hg_archival.txt b/resources/3rdparty/eigen/.hg_archival.txt index 5a08f7257..ed93eed54 100644 --- a/resources/3rdparty/eigen/.hg_archival.txt +++ b/resources/3rdparty/eigen/.hg_archival.txt @@ -1,4 +1,5 @@ repo: 8a21fd850624c931e448cbcfb38168cb2717c790 -node: 43d9075b23ef596ddf396101956d06f446fc0765 -branch: 3.1 -tag: 3.1.1 +node: 5945cb388ded120eb6dd3a1dfd2766b8e83237a4 +branch: default +latesttag: 3.1.0-rc2 +latesttagdistance: 147 diff --git a/resources/3rdparty/eigen/.hgtags b/resources/3rdparty/eigen/.hgtags index 4068c28b2..cbbcebdae 100644 --- a/resources/3rdparty/eigen/.hgtags +++ b/resources/3rdparty/eigen/.hgtags @@ -20,4 +20,3 @@ a810d5dbab47acfe65b3350236efdd98f67d4d8a 3.1.0-alpha1 920fc730b5930daae0a6dbe296d60ce2e3808215 3.1.0-beta1 8383e883ebcc6f14695ff0b5e20bb631abab43fb 3.1.0-rc1 bf4cb8c934fa3a79f45f1e629610f0225e93e493 3.1.0-rc2 -ca142d0540d3384180c5082d24ef056bd3c354b6 3.1.0 diff --git a/resources/3rdparty/eigen/.krazy b/resources/3rdparty/eigen/.krazy deleted file mode 100644 index d719866a6..000000000 --- a/resources/3rdparty/eigen/.krazy +++ /dev/null @@ -1,3 +0,0 @@ -SKIP /disabled/ -SKIP /bench/ -SKIP /build/ diff --git a/resources/3rdparty/eigen/COPYING.LGPL b/resources/3rdparty/eigen/COPYING.LGPL index 0e4fa8aaf..4362b4915 100644 --- a/resources/3rdparty/eigen/COPYING.LGPL +++ b/resources/3rdparty/eigen/COPYING.LGPL @@ -1,165 +1,502 @@ GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 + Version 2.1, February 1999 - Copyright (C) 2007 Free Software Foundation, Inc. + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! diff --git a/resources/3rdparty/eigen/COPYING.README b/resources/3rdparty/eigen/COPYING.README index 1d706784d..de5b63215 100644 --- a/resources/3rdparty/eigen/COPYING.README +++ b/resources/3rdparty/eigen/COPYING.README @@ -5,6 +5,9 @@ Eigen is primarily MPL2 licensed. See COPYING.MPL2 and these links: Some files contain third-party code under BSD or LGPL licenses, whence the other COPYING.* files here. +All the LGPL code is either LGPL 2.1-only, or LGPL 2.1-or-later. +For this reason, the COPYING.LGPL file contains the LGPL 2.1 text. + If you want to guarantee that the Eigen code that you are #including is licensed under the MPL2 and possibly more permissive licenses (like BSD), #define this preprocessor symbol: diff --git a/resources/3rdparty/eigen/Eigen/Core b/resources/3rdparty/eigen/Eigen/Core index d48017022..502a4fc55 100644 --- a/resources/3rdparty/eigen/Eigen/Core +++ b/resources/3rdparty/eigen/Eigen/Core @@ -87,19 +87,25 @@ // so, to avoid compile errors when windows.h is included after Eigen/Core, ensure intrinsics are extern "C" here too. // notice that since these are C headers, the extern "C" is theoretically needed anyways. extern "C" { - #include - #include - #ifdef EIGEN_VECTORIZE_SSE3 - #include - #endif - #ifdef EIGEN_VECTORIZE_SSSE3 - #include - #endif - #ifdef EIGEN_VECTORIZE_SSE4_1 - #include - #endif - #ifdef EIGEN_VECTORIZE_SSE4_2 - #include + // In theory we should only include immintrin.h and not the other *mmintrin.h header files directly. + // Doing so triggers some issues with ICC. However old gcc versions seems to not have this file, thus: + #ifdef __INTEL_COMPILER + #include + #else + #include + #include + #ifdef EIGEN_VECTORIZE_SSE3 + #include + #endif + #ifdef EIGEN_VECTORIZE_SSSE3 + #include + #endif + #ifdef EIGEN_VECTORIZE_SSE4_1 + #include + #endif + #ifdef EIGEN_VECTORIZE_SSE4_2 + #include + #endif #endif } // end extern "C" #elif defined __ALTIVEC__ @@ -297,6 +303,7 @@ using std::ptrdiff_t; #include "src/Core/Map.h" #include "src/Core/Block.h" #include "src/Core/VectorBlock.h" +#include "src/Core/Ref.h" #include "src/Core/Transpose.h" #include "src/Core/DiagonalMatrix.h" #include "src/Core/Diagonal.h" @@ -340,6 +347,13 @@ using std::ptrdiff_t; #include "src/Core/ArrayBase.h" #include "src/Core/ArrayWrapper.h" +#ifdef EIGEN_ENABLE_EVALUATORS +#include "src/Core/Product.h" +#include "src/Core/CoreEvaluators.h" +#include "src/Core/AssignEvaluator.h" +#include "src/Core/ProductEvaluators.h" +#endif + #ifdef EIGEN_USE_BLAS #include "src/Core/products/GeneralMatrixMatrix_MKL.h" #include "src/Core/products/GeneralMatrixVector_MKL.h" diff --git a/resources/3rdparty/eigen/Eigen/Eigenvalues b/resources/3rdparty/eigen/Eigen/Eigenvalues index af99ccd1f..53c5a73a2 100644 --- a/resources/3rdparty/eigen/Eigen/Eigenvalues +++ b/resources/3rdparty/eigen/Eigen/Eigenvalues @@ -33,6 +33,8 @@ #include "src/Eigenvalues/HessenbergDecomposition.h" #include "src/Eigenvalues/ComplexSchur.h" #include "src/Eigenvalues/ComplexEigenSolver.h" +#include "src/Eigenvalues/RealQZ.h" +#include "src/Eigenvalues/GeneralizedEigenSolver.h" #include "src/Eigenvalues/MatrixBaseEigenvalues.h" #ifdef EIGEN_USE_LAPACKE #include "src/Eigenvalues/RealSchur_MKL.h" diff --git a/resources/3rdparty/eigen/Eigen/MetisSupport b/resources/3rdparty/eigen/Eigen/MetisSupport new file mode 100644 index 000000000..a44086ad9 --- /dev/null +++ b/resources/3rdparty/eigen/Eigen/MetisSupport @@ -0,0 +1,26 @@ +#ifndef EIGEN_METISSUPPORT_MODULE_H +#define EIGEN_METISSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +extern "C" { +#include +} + + +/** \ingroup Support_modules + * \defgroup MetisSupport_Module MetisSupport module + * + * \code + * #include + * \endcode + */ + + +#include "src/MetisSupport/MetisSupport.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_METISSUPPORT_MODULE_H diff --git a/resources/3rdparty/eigen/Eigen/OrderingMethods b/resources/3rdparty/eigen/Eigen/OrderingMethods index 1e2d87452..bb43220e8 100644 --- a/resources/3rdparty/eigen/Eigen/OrderingMethods +++ b/resources/3rdparty/eigen/Eigen/OrderingMethods @@ -17,7 +17,7 @@ */ #include "src/OrderingMethods/Amd.h" - +#include "src/OrderingMethods/Ordering.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_ORDERINGMETHODS_MODULE_H diff --git a/resources/3rdparty/eigen/Eigen/SparseLU b/resources/3rdparty/eigen/Eigen/SparseLU new file mode 100644 index 000000000..452bc9f83 --- /dev/null +++ b/resources/3rdparty/eigen/Eigen/SparseLU @@ -0,0 +1,17 @@ +#ifndef EIGEN_SPARSELU_MODULE_H +#define EIGEN_SPARSELU_MODULE_H + +#include "SparseCore" + + +/** \ingroup Sparse_modules + * \defgroup SparseLU_Module SparseLU module + * + */ + +// Ordering interface +#include "OrderingMethods" + +#include "src/SparseLU/SparseLU.h" + +#endif // EIGEN_SPARSELU_MODULE_H diff --git a/resources/3rdparty/eigen/Eigen/src/Cholesky/LDLT.h b/resources/3rdparty/eigen/Eigen/src/Cholesky/LDLT.h index 68e54b1d4..a73a9c19f 100644 --- a/resources/3rdparty/eigen/Eigen/src/Cholesky/LDLT.h +++ b/resources/3rdparty/eigen/Eigen/src/Cholesky/LDLT.h @@ -281,6 +281,13 @@ template<> struct ldlt_inplace if(sign) *sign = real(mat.diagonal().coeff(index_of_biggest_in_corner)) > 0 ? 1 : -1; } + else if(sign) + { + // LDLT is not guaranteed to work for indefinite matrices, but let's try to get the sign right + int newSign = real(mat.diagonal().coeff(index_of_biggest_in_corner)) > 0; + if(newSign != *sign) + *sign = 0; + } // Finish early if the matrix is not full rank. if(biggest_in_corner < cutoff) diff --git a/resources/3rdparty/eigen/Eigen/src/CholmodSupport/CholmodSupport.h b/resources/3rdparty/eigen/Eigen/src/CholmodSupport/CholmodSupport.h index 37f142150..b38821807 100644 --- a/resources/3rdparty/eigen/Eigen/src/CholmodSupport/CholmodSupport.h +++ b/resources/3rdparty/eigen/Eigen/src/CholmodSupport/CholmodSupport.h @@ -173,6 +173,7 @@ class CholmodBase : internal::noncopyable CholmodBase(const MatrixType& matrix) : m_cholmodFactor(0), m_info(Success), m_isInitialized(false) { + m_shiftOffset[0] = m_shiftOffset[1] = RealScalar(0.0); cholmod_start(&m_cholmod); compute(matrix); } @@ -269,9 +270,10 @@ class CholmodBase : internal::noncopyable { eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView()); - cholmod_factorize(&A, m_cholmodFactor, &m_cholmod); + cholmod_factorize_p(&A, m_shiftOffset, 0, 0, m_cholmodFactor, &m_cholmod); - this->m_info = Success; + // If the factorization failed, minor is the column at which it did. On success minor == n. + this->m_info = (m_cholmodFactor->minor == m_cholmodFactor->n ? Success : NumericalIssue); m_factorizationIsOk = true; } @@ -286,6 +288,7 @@ class CholmodBase : internal::noncopyable { eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); const Index size = m_cholmodFactor->n; + EIGEN_UNUSED_VARIABLE(size); eigen_assert(size==b.rows()); // note: cd stands for Cholmod Dense @@ -321,6 +324,22 @@ class CholmodBase : internal::noncopyable } #endif // EIGEN_PARSED_BY_DOXYGEN + + /** Sets the shift parameter that will be used to adjust the diagonal coefficients during the numerical factorization. + * + * During the numerical factorization, an offset term is added to the diagonal coefficients:\n + * \c d_ii = \a offset + \c d_ii + * + * The default is \a offset=0. + * + * \returns a reference to \c *this. + */ + Derived& setShift(const RealScalar& offset) + { + m_shiftOffset[0] = offset; + return derived(); + } + template void dumpMemory(Stream& s) {} @@ -328,6 +347,7 @@ class CholmodBase : internal::noncopyable protected: mutable cholmod_common m_cholmod; cholmod_factor* m_cholmodFactor; + RealScalar m_shiftOffset[2]; mutable ComputationInfo m_info; bool m_isInitialized; int m_factorizationIsOk; diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Array.h b/resources/3rdparty/eigen/Eigen/src/Core/Array.h index aaa389978..539e1d22b 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/Array.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/Array.h @@ -142,10 +142,10 @@ class Array #ifndef EIGEN_PARSED_BY_DOXYGEN template - EIGEN_STRONG_INLINE Array(const T0& x, const T1& y) + EIGEN_STRONG_INLINE Array(const T0& val0, const T1& val1) { Base::_check_template_params(); - this->template _init2(x, y); + this->template _init2(val0, val1); } #else /** constructs an uninitialized matrix with \a rows rows and \a cols columns. @@ -155,27 +155,27 @@ class Array * Matrix() instead. */ Array(Index rows, Index cols); /** constructs an initialized 2D vector with given coefficients */ - Array(const Scalar& x, const Scalar& y); + Array(const Scalar& val0, const Scalar& val1); #endif /** constructs an initialized 3D vector with given coefficients */ - EIGEN_STRONG_INLINE Array(const Scalar& x, const Scalar& y, const Scalar& z) + EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2) { Base::_check_template_params(); EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 3) - m_storage.data()[0] = x; - m_storage.data()[1] = y; - m_storage.data()[2] = z; + m_storage.data()[0] = val0; + m_storage.data()[1] = val1; + m_storage.data()[2] = val2; } /** constructs an initialized 4D vector with given coefficients */ - EIGEN_STRONG_INLINE Array(const Scalar& x, const Scalar& y, const Scalar& z, const Scalar& w) + EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2, const Scalar& val3) { Base::_check_template_params(); EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 4) - m_storage.data()[0] = x; - m_storage.data()[1] = y; - m_storage.data()[2] = z; - m_storage.data()[3] = w; + m_storage.data()[0] = val0; + m_storage.data()[1] = val1; + m_storage.data()[2] = val2; + m_storage.data()[3] = val3; } explicit Array(const Scalar *data); diff --git a/resources/3rdparty/eigen/Eigen/src/Core/ArrayWrapper.h b/resources/3rdparty/eigen/Eigen/src/Core/ArrayWrapper.h index 87af7fda9..1e021b0b9 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/ArrayWrapper.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/ArrayWrapper.h @@ -58,19 +58,19 @@ class ArrayWrapper : public ArrayBase > inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); } inline const Scalar* data() const { return m_expression.data(); } - inline CoeffReturnType coeff(Index row, Index col) const + inline CoeffReturnType coeff(Index rowId, Index colId) const { - return m_expression.coeff(row, col); + return m_expression.coeff(rowId, colId); } - inline Scalar& coeffRef(Index row, Index col) + inline Scalar& coeffRef(Index rowId, Index colId) { - return m_expression.const_cast_derived().coeffRef(row, col); + return m_expression.const_cast_derived().coeffRef(rowId, colId); } - inline const Scalar& coeffRef(Index row, Index col) const + inline const Scalar& coeffRef(Index rowId, Index colId) const { - return m_expression.const_cast_derived().coeffRef(row, col); + return m_expression.const_cast_derived().coeffRef(rowId, colId); } inline CoeffReturnType coeff(Index index) const @@ -89,15 +89,15 @@ class ArrayWrapper : public ArrayBase > } template - inline const PacketScalar packet(Index row, Index col) const + inline const PacketScalar packet(Index rowId, Index colId) const { - return m_expression.template packet(row, col); + return m_expression.template packet(rowId, colId); } template - inline void writePacket(Index row, Index col, const PacketScalar& x) + inline void writePacket(Index rowId, Index colId, const PacketScalar& val) { - m_expression.const_cast_derived().template writePacket(row, col, x); + m_expression.const_cast_derived().template writePacket(rowId, colId, val); } template @@ -107,9 +107,9 @@ class ArrayWrapper : public ArrayBase > } template - inline void writePacket(Index index, const PacketScalar& x) + inline void writePacket(Index index, const PacketScalar& val) { - m_expression.const_cast_derived().template writePacket(index, x); + m_expression.const_cast_derived().template writePacket(index, val); } template @@ -121,6 +121,13 @@ class ArrayWrapper : public ArrayBase > return m_expression; } + /** Forwards the resizing request to the nested expression + * \sa DenseBase::resize(Index) */ + void resize(Index newSize) { m_expression.const_cast_derived().resize(newSize); } + /** Forwards the resizing request to the nested expression + * \sa DenseBase::resize(Index,Index)*/ + void resize(Index nbRows, Index nbCols) { m_expression.const_cast_derived().resize(nbRows,nbCols); } + protected: NestedExpressionType m_expression; }; @@ -161,7 +168,7 @@ class MatrixWrapper : public MatrixBase > typedef typename internal::nested::type NestedExpressionType; - inline MatrixWrapper(ExpressionType& matrix) : m_expression(matrix) {} + inline MatrixWrapper(ExpressionType& a_matrix) : m_expression(a_matrix) {} inline Index rows() const { return m_expression.rows(); } inline Index cols() const { return m_expression.cols(); } @@ -171,19 +178,19 @@ class MatrixWrapper : public MatrixBase > inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); } inline const Scalar* data() const { return m_expression.data(); } - inline CoeffReturnType coeff(Index row, Index col) const + inline CoeffReturnType coeff(Index rowId, Index colId) const { - return m_expression.coeff(row, col); + return m_expression.coeff(rowId, colId); } - inline Scalar& coeffRef(Index row, Index col) + inline Scalar& coeffRef(Index rowId, Index colId) { - return m_expression.const_cast_derived().coeffRef(row, col); + return m_expression.const_cast_derived().coeffRef(rowId, colId); } - inline const Scalar& coeffRef(Index row, Index col) const + inline const Scalar& coeffRef(Index rowId, Index colId) const { - return m_expression.derived().coeffRef(row, col); + return m_expression.derived().coeffRef(rowId, colId); } inline CoeffReturnType coeff(Index index) const @@ -202,15 +209,15 @@ class MatrixWrapper : public MatrixBase > } template - inline const PacketScalar packet(Index row, Index col) const + inline const PacketScalar packet(Index rowId, Index colId) const { - return m_expression.template packet(row, col); + return m_expression.template packet(rowId, colId); } template - inline void writePacket(Index row, Index col, const PacketScalar& x) + inline void writePacket(Index rowId, Index colId, const PacketScalar& val) { - m_expression.const_cast_derived().template writePacket(row, col, x); + m_expression.const_cast_derived().template writePacket(rowId, colId, val); } template @@ -220,9 +227,9 @@ class MatrixWrapper : public MatrixBase > } template - inline void writePacket(Index index, const PacketScalar& x) + inline void writePacket(Index index, const PacketScalar& val) { - m_expression.const_cast_derived().template writePacket(index, x); + m_expression.const_cast_derived().template writePacket(index, val); } const typename internal::remove_all::type& @@ -231,6 +238,13 @@ class MatrixWrapper : public MatrixBase > return m_expression; } + /** Forwards the resizing request to the nested expression + * \sa DenseBase::resize(Index) */ + void resize(Index newSize) { m_expression.const_cast_derived().resize(newSize); } + /** Forwards the resizing request to the nested expression + * \sa DenseBase::resize(Index,Index)*/ + void resize(Index nbRows, Index nbCols) { m_expression.const_cast_derived().resize(nbRows,nbCols); } + protected: NestedExpressionType m_expression; }; diff --git a/resources/3rdparty/eigen/Eigen/src/Core/AssignEvaluator.h b/resources/3rdparty/eigen/Eigen/src/Core/AssignEvaluator.h new file mode 100644 index 000000000..5e134c83a --- /dev/null +++ b/resources/3rdparty/eigen/Eigen/src/Core/AssignEvaluator.h @@ -0,0 +1,755 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Benoit Jacob +// Copyright (C) 2011 Gael Guennebaud +// Copyright (C) 2011-2012 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ASSIGN_EVALUATOR_H +#define EIGEN_ASSIGN_EVALUATOR_H + +namespace Eigen { + +// This implementation is based on Assign.h + +namespace internal { + +/*************************************************************************** +* Part 1 : the logic deciding a strategy for traversal and unrolling * +***************************************************************************/ + +// copy_using_evaluator_traits is based on assign_traits + +template +struct copy_using_evaluator_traits +{ +public: + enum { + DstIsAligned = Derived::Flags & AlignedBit, + DstHasDirectAccess = Derived::Flags & DirectAccessBit, + SrcIsAligned = OtherDerived::Flags & AlignedBit, + JointAlignment = bool(DstIsAligned) && bool(SrcIsAligned) ? Aligned : Unaligned, + SrcEvalBeforeAssign = (evaluator_traits::HasEvalTo == 1) + }; + +private: + enum { + InnerSize = int(Derived::IsVectorAtCompileTime) ? int(Derived::SizeAtCompileTime) + : int(Derived::Flags)&RowMajorBit ? int(Derived::ColsAtCompileTime) + : int(Derived::RowsAtCompileTime), + InnerMaxSize = int(Derived::IsVectorAtCompileTime) ? int(Derived::MaxSizeAtCompileTime) + : int(Derived::Flags)&RowMajorBit ? int(Derived::MaxColsAtCompileTime) + : int(Derived::MaxRowsAtCompileTime), + MaxSizeAtCompileTime = Derived::SizeAtCompileTime, + PacketSize = packet_traits::size + }; + + enum { + StorageOrdersAgree = (int(Derived::IsRowMajor) == int(OtherDerived::IsRowMajor)), + MightVectorize = StorageOrdersAgree + && (int(Derived::Flags) & int(OtherDerived::Flags) & ActualPacketAccessBit), + MayInnerVectorize = MightVectorize && int(InnerSize)!=Dynamic && int(InnerSize)%int(PacketSize)==0 + && int(DstIsAligned) && int(SrcIsAligned), + MayLinearize = StorageOrdersAgree && (int(Derived::Flags) & int(OtherDerived::Flags) & LinearAccessBit), + MayLinearVectorize = MightVectorize && MayLinearize && DstHasDirectAccess + && (DstIsAligned || MaxSizeAtCompileTime == Dynamic), + /* If the destination isn't aligned, we have to do runtime checks and we don't unroll, + so it's only good for large enough sizes. */ + MaySliceVectorize = MightVectorize && DstHasDirectAccess + && (int(InnerMaxSize)==Dynamic || int(InnerMaxSize)>=3*PacketSize) + /* slice vectorization can be slow, so we only want it if the slices are big, which is + indicated by InnerMaxSize rather than InnerSize, think of the case of a dynamic block + in a fixed-size matrix */ + }; + +public: + enum { + Traversal = int(SrcEvalBeforeAssign) ? int(AllAtOnceTraversal) + : int(MayInnerVectorize) ? int(InnerVectorizedTraversal) + : int(MayLinearVectorize) ? int(LinearVectorizedTraversal) + : int(MaySliceVectorize) ? int(SliceVectorizedTraversal) + : int(MayLinearize) ? int(LinearTraversal) + : int(DefaultTraversal), + Vectorized = int(Traversal) == InnerVectorizedTraversal + || int(Traversal) == LinearVectorizedTraversal + || int(Traversal) == SliceVectorizedTraversal + }; + +private: + enum { + UnrollingLimit = EIGEN_UNROLLING_LIMIT * (Vectorized ? int(PacketSize) : 1), + MayUnrollCompletely = int(Derived::SizeAtCompileTime) != Dynamic + && int(OtherDerived::CoeffReadCost) != Dynamic + && int(Derived::SizeAtCompileTime) * int(OtherDerived::CoeffReadCost) <= int(UnrollingLimit), + MayUnrollInner = int(InnerSize) != Dynamic + && int(OtherDerived::CoeffReadCost) != Dynamic + && int(InnerSize) * int(OtherDerived::CoeffReadCost) <= int(UnrollingLimit) + }; + +public: + enum { + Unrolling = (int(Traversal) == int(InnerVectorizedTraversal) || int(Traversal) == int(DefaultTraversal)) + ? ( + int(MayUnrollCompletely) ? int(CompleteUnrolling) + : int(MayUnrollInner) ? int(InnerUnrolling) + : int(NoUnrolling) + ) + : int(Traversal) == int(LinearVectorizedTraversal) + ? ( bool(MayUnrollCompletely) && bool(DstIsAligned) ? int(CompleteUnrolling) + : int(NoUnrolling) ) + : int(Traversal) == int(LinearTraversal) + ? ( bool(MayUnrollCompletely) ? int(CompleteUnrolling) + : int(NoUnrolling) ) + : int(NoUnrolling) + }; + +#ifdef EIGEN_DEBUG_ASSIGN + static void debug() + { + EIGEN_DEBUG_VAR(DstIsAligned) + EIGEN_DEBUG_VAR(SrcIsAligned) + EIGEN_DEBUG_VAR(JointAlignment) + EIGEN_DEBUG_VAR(InnerSize) + EIGEN_DEBUG_VAR(InnerMaxSize) + EIGEN_DEBUG_VAR(PacketSize) + EIGEN_DEBUG_VAR(StorageOrdersAgree) + EIGEN_DEBUG_VAR(MightVectorize) + EIGEN_DEBUG_VAR(MayLinearize) + EIGEN_DEBUG_VAR(MayInnerVectorize) + EIGEN_DEBUG_VAR(MayLinearVectorize) + EIGEN_DEBUG_VAR(MaySliceVectorize) + EIGEN_DEBUG_VAR(Traversal) + EIGEN_DEBUG_VAR(UnrollingLimit) + EIGEN_DEBUG_VAR(MayUnrollCompletely) + EIGEN_DEBUG_VAR(MayUnrollInner) + EIGEN_DEBUG_VAR(Unrolling) + } +#endif +}; + +/*************************************************************************** +* Part 2 : meta-unrollers +***************************************************************************/ + +/************************ +*** Default traversal *** +************************/ + +template +struct copy_using_evaluator_DefaultTraversal_CompleteUnrolling +{ + typedef typename DstEvaluatorType::XprType DstXprType; + + enum { + outer = Index / DstXprType::InnerSizeAtCompileTime, + inner = Index % DstXprType::InnerSizeAtCompileTime + }; + + EIGEN_STRONG_INLINE static void run(DstEvaluatorType &dstEvaluator, + SrcEvaluatorType &srcEvaluator) + { + dstEvaluator.copyCoeffByOuterInner(outer, inner, srcEvaluator); + copy_using_evaluator_DefaultTraversal_CompleteUnrolling + + ::run(dstEvaluator, srcEvaluator); + } +}; + +template +struct copy_using_evaluator_DefaultTraversal_CompleteUnrolling +{ + EIGEN_STRONG_INLINE static void run(DstEvaluatorType&, SrcEvaluatorType&) { } +}; + +template +struct copy_using_evaluator_DefaultTraversal_InnerUnrolling +{ + EIGEN_STRONG_INLINE static void run(DstEvaluatorType &dstEvaluator, + SrcEvaluatorType &srcEvaluator, + int outer) + { + dstEvaluator.copyCoeffByOuterInner(outer, Index, srcEvaluator); + copy_using_evaluator_DefaultTraversal_InnerUnrolling + + ::run(dstEvaluator, srcEvaluator, outer); + } +}; + +template +struct copy_using_evaluator_DefaultTraversal_InnerUnrolling +{ + EIGEN_STRONG_INLINE static void run(DstEvaluatorType&, SrcEvaluatorType&, int) { } +}; + +/*********************** +*** Linear traversal *** +***********************/ + +template +struct copy_using_evaluator_LinearTraversal_CompleteUnrolling +{ + EIGEN_STRONG_INLINE static void run(DstEvaluatorType &dstEvaluator, + SrcEvaluatorType &srcEvaluator) + { + dstEvaluator.copyCoeff(Index, srcEvaluator); + copy_using_evaluator_LinearTraversal_CompleteUnrolling + + ::run(dstEvaluator, srcEvaluator); + } +}; + +template +struct copy_using_evaluator_LinearTraversal_CompleteUnrolling +{ + EIGEN_STRONG_INLINE static void run(DstEvaluatorType&, SrcEvaluatorType&) { } +}; + +/************************** +*** Inner vectorization *** +**************************/ + +template +struct copy_using_evaluator_innervec_CompleteUnrolling +{ + typedef typename DstEvaluatorType::XprType DstXprType; + typedef typename SrcEvaluatorType::XprType SrcXprType; + + enum { + outer = Index / DstXprType::InnerSizeAtCompileTime, + inner = Index % DstXprType::InnerSizeAtCompileTime, + JointAlignment = copy_using_evaluator_traits::JointAlignment + }; + + EIGEN_STRONG_INLINE static void run(DstEvaluatorType &dstEvaluator, + SrcEvaluatorType &srcEvaluator) + { + dstEvaluator.template copyPacketByOuterInner(outer, inner, srcEvaluator); + enum { NextIndex = Index + packet_traits::size }; + copy_using_evaluator_innervec_CompleteUnrolling + + ::run(dstEvaluator, srcEvaluator); + } +}; + +template +struct copy_using_evaluator_innervec_CompleteUnrolling +{ + EIGEN_STRONG_INLINE static void run(DstEvaluatorType&, SrcEvaluatorType&) { } +}; + +template +struct copy_using_evaluator_innervec_InnerUnrolling +{ + EIGEN_STRONG_INLINE static void run(DstEvaluatorType &dstEvaluator, + SrcEvaluatorType &srcEvaluator, + int outer) + { + dstEvaluator.template copyPacketByOuterInner(outer, Index, srcEvaluator); + typedef typename DstEvaluatorType::XprType DstXprType; + enum { NextIndex = Index + packet_traits::size }; + copy_using_evaluator_innervec_InnerUnrolling + + ::run(dstEvaluator, srcEvaluator, outer); + } +}; + +template +struct copy_using_evaluator_innervec_InnerUnrolling +{ + EIGEN_STRONG_INLINE static void run(DstEvaluatorType&, SrcEvaluatorType&, int) { } +}; + +/*************************************************************************** +* Part 3 : implementation of all cases +***************************************************************************/ + +// copy_using_evaluator_impl is based on assign_impl + +template::Traversal, + int Unrolling = copy_using_evaluator_traits::Unrolling> +struct copy_using_evaluator_impl; + +/************************ +*** Default traversal *** +************************/ + +template +struct copy_using_evaluator_impl +{ + static void run(DstXprType& dst, const SrcXprType& src) + { + typedef typename evaluator::type DstEvaluatorType; + typedef typename evaluator::type SrcEvaluatorType; + typedef typename DstXprType::Index Index; + + DstEvaluatorType dstEvaluator(dst); + SrcEvaluatorType srcEvaluator(src); + + for(Index outer = 0; outer < dst.outerSize(); ++outer) { + for(Index inner = 0; inner < dst.innerSize(); ++inner) { + dstEvaluator.copyCoeffByOuterInner(outer, inner, srcEvaluator); + } + } + } +}; + +template +struct copy_using_evaluator_impl +{ + EIGEN_STRONG_INLINE static void run(DstXprType &dst, const SrcXprType &src) + { + typedef typename evaluator::type DstEvaluatorType; + typedef typename evaluator::type SrcEvaluatorType; + + DstEvaluatorType dstEvaluator(dst); + SrcEvaluatorType srcEvaluator(src); + + copy_using_evaluator_DefaultTraversal_CompleteUnrolling + + ::run(dstEvaluator, srcEvaluator); + } +}; + +template +struct copy_using_evaluator_impl +{ + typedef typename DstXprType::Index Index; + EIGEN_STRONG_INLINE static void run(DstXprType &dst, const SrcXprType &src) + { + typedef typename evaluator::type DstEvaluatorType; + typedef typename evaluator::type SrcEvaluatorType; + + DstEvaluatorType dstEvaluator(dst); + SrcEvaluatorType srcEvaluator(src); + + const Index outerSize = dst.outerSize(); + for(Index outer = 0; outer < outerSize; ++outer) + copy_using_evaluator_DefaultTraversal_InnerUnrolling + + ::run(dstEvaluator, srcEvaluator, outer); + } +}; + +/*************************** +*** Linear vectorization *** +***************************/ + +template +struct unaligned_copy_using_evaluator_impl +{ + // if IsAligned = true, then do nothing + template + static EIGEN_STRONG_INLINE void run(const SrcEvaluatorType&, DstEvaluatorType&, + typename SrcEvaluatorType::Index, typename SrcEvaluatorType::Index) {} +}; + +template <> +struct unaligned_copy_using_evaluator_impl +{ + // MSVC must not inline this functions. If it does, it fails to optimize the + // packet access path. +#ifdef _MSC_VER + template + static EIGEN_DONT_INLINE void run(DstEvaluatorType &dstEvaluator, + const SrcEvaluatorType &srcEvaluator, + typename DstEvaluatorType::Index start, + typename DstEvaluatorType::Index end) +#else + template + static EIGEN_STRONG_INLINE void run(DstEvaluatorType &dstEvaluator, + const SrcEvaluatorType &srcEvaluator, + typename DstEvaluatorType::Index start, + typename DstEvaluatorType::Index end) +#endif + { + for (typename DstEvaluatorType::Index index = start; index < end; ++index) + dstEvaluator.copyCoeff(index, srcEvaluator); + } +}; + +template +struct copy_using_evaluator_impl +{ + EIGEN_STRONG_INLINE static void run(DstXprType &dst, const SrcXprType &src) + { + typedef typename evaluator::type DstEvaluatorType; + typedef typename evaluator::type SrcEvaluatorType; + typedef typename DstXprType::Index Index; + + DstEvaluatorType dstEvaluator(dst); + SrcEvaluatorType srcEvaluator(src); + + const Index size = dst.size(); + typedef packet_traits PacketTraits; + enum { + packetSize = PacketTraits::size, + dstIsAligned = int(copy_using_evaluator_traits::DstIsAligned), + dstAlignment = PacketTraits::AlignedOnScalar ? Aligned : dstIsAligned, + srcAlignment = copy_using_evaluator_traits::JointAlignment + }; + const Index alignedStart = dstIsAligned ? 0 : first_aligned(&dstEvaluator.coeffRef(0), size); + const Index alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize; + + unaligned_copy_using_evaluator_impl::run(dstEvaluator, srcEvaluator, 0, alignedStart); + + for(Index index = alignedStart; index < alignedEnd; index += packetSize) + { + dstEvaluator.template copyPacket(index, srcEvaluator); + } + + unaligned_copy_using_evaluator_impl<>::run(dstEvaluator, srcEvaluator, alignedEnd, size); + } +}; + +template +struct copy_using_evaluator_impl +{ + typedef typename DstXprType::Index Index; + EIGEN_STRONG_INLINE static void run(DstXprType &dst, const SrcXprType &src) + { + typedef typename evaluator::type DstEvaluatorType; + typedef typename evaluator::type SrcEvaluatorType; + + DstEvaluatorType dstEvaluator(dst); + SrcEvaluatorType srcEvaluator(src); + + enum { size = DstXprType::SizeAtCompileTime, + packetSize = packet_traits::size, + alignedSize = (size/packetSize)*packetSize }; + + copy_using_evaluator_innervec_CompleteUnrolling + + ::run(dstEvaluator, srcEvaluator); + copy_using_evaluator_DefaultTraversal_CompleteUnrolling + + ::run(dstEvaluator, srcEvaluator); + } +}; + +/************************** +*** Inner vectorization *** +**************************/ + +template +struct copy_using_evaluator_impl +{ + inline static void run(DstXprType &dst, const SrcXprType &src) + { + typedef typename evaluator::type DstEvaluatorType; + typedef typename evaluator::type SrcEvaluatorType; + typedef typename DstXprType::Index Index; + + DstEvaluatorType dstEvaluator(dst); + SrcEvaluatorType srcEvaluator(src); + + const Index innerSize = dst.innerSize(); + const Index outerSize = dst.outerSize(); + const Index packetSize = packet_traits::size; + for(Index outer = 0; outer < outerSize; ++outer) + for(Index inner = 0; inner < innerSize; inner+=packetSize) { + dstEvaluator.template copyPacketByOuterInner(outer, inner, srcEvaluator); + } + } +}; + +template +struct copy_using_evaluator_impl +{ + EIGEN_STRONG_INLINE static void run(DstXprType &dst, const SrcXprType &src) + { + typedef typename evaluator::type DstEvaluatorType; + typedef typename evaluator::type SrcEvaluatorType; + + DstEvaluatorType dstEvaluator(dst); + SrcEvaluatorType srcEvaluator(src); + + copy_using_evaluator_innervec_CompleteUnrolling + + ::run(dstEvaluator, srcEvaluator); + } +}; + +template +struct copy_using_evaluator_impl +{ + typedef typename DstXprType::Index Index; + EIGEN_STRONG_INLINE static void run(DstXprType &dst, const SrcXprType &src) + { + typedef typename evaluator::type DstEvaluatorType; + typedef typename evaluator::type SrcEvaluatorType; + + DstEvaluatorType dstEvaluator(dst); + SrcEvaluatorType srcEvaluator(src); + + const Index outerSize = dst.outerSize(); + for(Index outer = 0; outer < outerSize; ++outer) + copy_using_evaluator_innervec_InnerUnrolling + + ::run(dstEvaluator, srcEvaluator, outer); + } +}; + +/*********************** +*** Linear traversal *** +***********************/ + +template +struct copy_using_evaluator_impl +{ + inline static void run(DstXprType &dst, const SrcXprType &src) + { + typedef typename evaluator::type DstEvaluatorType; + typedef typename evaluator::type SrcEvaluatorType; + typedef typename DstXprType::Index Index; + + DstEvaluatorType dstEvaluator(dst); + SrcEvaluatorType srcEvaluator(src); + + const Index size = dst.size(); + for(Index i = 0; i < size; ++i) + dstEvaluator.copyCoeff(i, srcEvaluator); + } +}; + +template +struct copy_using_evaluator_impl +{ + EIGEN_STRONG_INLINE static void run(DstXprType &dst, const SrcXprType &src) + { + typedef typename evaluator::type DstEvaluatorType; + typedef typename evaluator::type SrcEvaluatorType; + + DstEvaluatorType dstEvaluator(dst); + SrcEvaluatorType srcEvaluator(src); + + copy_using_evaluator_LinearTraversal_CompleteUnrolling + + ::run(dstEvaluator, srcEvaluator); + } +}; + +/************************** +*** Slice vectorization *** +***************************/ + +template +struct copy_using_evaluator_impl +{ + inline static void run(DstXprType &dst, const SrcXprType &src) + { + typedef typename evaluator::type DstEvaluatorType; + typedef typename evaluator::type SrcEvaluatorType; + typedef typename DstXprType::Index Index; + + DstEvaluatorType dstEvaluator(dst); + SrcEvaluatorType srcEvaluator(src); + + typedef packet_traits PacketTraits; + enum { + packetSize = PacketTraits::size, + alignable = PacketTraits::AlignedOnScalar, + dstAlignment = alignable ? Aligned : int(copy_using_evaluator_traits::DstIsAligned) + }; + const Index packetAlignedMask = packetSize - 1; + const Index innerSize = dst.innerSize(); + const Index outerSize = dst.outerSize(); + const Index alignedStep = alignable ? (packetSize - dst.outerStride() % packetSize) & packetAlignedMask : 0; + Index alignedStart = ((!alignable) || copy_using_evaluator_traits::DstIsAligned) ? 0 + : first_aligned(&dstEvaluator.coeffRef(0,0), innerSize); + + for(Index outer = 0; outer < outerSize; ++outer) + { + const Index alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask); + // do the non-vectorizable part of the assignment + for(Index inner = 0; inner(outer, inner, srcEvaluator); + } + + // do the non-vectorizable part of the assignment + for(Index inner = alignedEnd; inner((alignedStart+alignedStep)%packetSize, innerSize); + } + } +}; + +/**************************** +*** All-at-once traversal *** +****************************/ + +template +struct copy_using_evaluator_impl +{ + inline static void run(DstXprType &dst, const SrcXprType &src) + { + typedef typename evaluator::type DstEvaluatorType; + typedef typename evaluator::type SrcEvaluatorType; + typedef typename DstXprType::Index Index; + + DstEvaluatorType dstEvaluator(dst); + SrcEvaluatorType srcEvaluator(src); + + // Evaluate rhs in temporary to prevent aliasing problems in a = a * a; + // TODO: Do not pass the xpr object to evalTo() + srcEvaluator.evalTo(dstEvaluator, dst); + } +}; + +/*************************************************************************** +* Part 4 : Entry points +***************************************************************************/ + +// Based on DenseBase::LazyAssign() + +template class StorageBase, typename SrcXprType> +EIGEN_STRONG_INLINE +const DstXprType& copy_using_evaluator(const NoAlias& dst, + const EigenBase& src) +{ + return noalias_copy_using_evaluator(dst.expression(), src.derived()); +} + +template::AssumeAliasing> +struct AddEvalIfAssumingAliasing; + +template +struct AddEvalIfAssumingAliasing +{ + static const XprType& run(const XprType& xpr) + { + return xpr; + } +}; + +template +struct AddEvalIfAssumingAliasing +{ + static const EvalToTemp run(const XprType& xpr) + { + return EvalToTemp(xpr); + } +}; + +template +EIGEN_STRONG_INLINE +const DstXprType& copy_using_evaluator(const EigenBase& dst, const EigenBase& src) +{ + return noalias_copy_using_evaluator(dst.const_cast_derived(), + AddEvalIfAssumingAliasing::run(src.derived())); +} + +template +EIGEN_STRONG_INLINE +const DstXprType& noalias_copy_using_evaluator(const PlainObjectBase& dst, const EigenBase& src) +{ +#ifdef EIGEN_DEBUG_ASSIGN + internal::copy_using_evaluator_traits::debug(); +#endif +#ifdef EIGEN_NO_AUTOMATIC_RESIZING + eigen_assert((dst.size()==0 || (IsVectorAtCompileTime ? (dst.size() == src.size()) + : (dst.rows() == src.rows() && dst.cols() == src.cols()))) + && "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined"); +#else + dst.const_cast_derived().resizeLike(src.derived()); +#endif + return copy_using_evaluator_without_resizing(dst.const_cast_derived(), src.derived()); +} + +template +EIGEN_STRONG_INLINE +const DstXprType& noalias_copy_using_evaluator(const EigenBase& dst, const EigenBase& src) +{ + return copy_using_evaluator_without_resizing(dst.const_cast_derived(), src.derived()); +} + +template +const DstXprType& copy_using_evaluator_without_resizing(const DstXprType& dst, const SrcXprType& src) +{ +#ifdef EIGEN_DEBUG_ASSIGN + internal::copy_using_evaluator_traits::debug(); +#endif + eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); + copy_using_evaluator_impl::run(const_cast(dst), src); + return dst; +} + +// Based on DenseBase::swap() +// TODO: Chech whether we need to do something special for swapping two +// Arrays or Matrices. + +template +void swap_using_evaluator(const DstXprType& dst, const SrcXprType& src) +{ + copy_using_evaluator(SwapWrapper(const_cast(dst)), src); +} + +// Based on MatrixBase::operator+= (in CwiseBinaryOp.h) +template +void add_assign_using_evaluator(const MatrixBase& dst, const MatrixBase& src) +{ + typedef typename DstXprType::Scalar Scalar; + SelfCwiseBinaryOp, DstXprType, SrcXprType> tmp(dst.const_cast_derived()); + copy_using_evaluator(tmp, src.derived()); +} + +// Based on ArrayBase::operator+= +template +void add_assign_using_evaluator(const ArrayBase& dst, const ArrayBase& src) +{ + typedef typename DstXprType::Scalar Scalar; + SelfCwiseBinaryOp, DstXprType, SrcXprType> tmp(dst.const_cast_derived()); + copy_using_evaluator(tmp, src.derived()); +} + +// TODO: Add add_assign_using_evaluator for EigenBase ? + +template +void subtract_assign_using_evaluator(const MatrixBase& dst, const MatrixBase& src) +{ + typedef typename DstXprType::Scalar Scalar; + SelfCwiseBinaryOp, DstXprType, SrcXprType> tmp(dst.const_cast_derived()); + copy_using_evaluator(tmp, src.derived()); +} + +template +void subtract_assign_using_evaluator(const ArrayBase& dst, const ArrayBase& src) +{ + typedef typename DstXprType::Scalar Scalar; + SelfCwiseBinaryOp, DstXprType, SrcXprType> tmp(dst.const_cast_derived()); + copy_using_evaluator(tmp, src.derived()); +} + +template +void multiply_assign_using_evaluator(const ArrayBase& dst, const ArrayBase& src) +{ + typedef typename DstXprType::Scalar Scalar; + SelfCwiseBinaryOp, DstXprType, SrcXprType> tmp(dst.const_cast_derived()); + copy_using_evaluator(tmp, src.derived()); +} + +template +void divide_assign_using_evaluator(const ArrayBase& dst, const ArrayBase& src) +{ + typedef typename DstXprType::Scalar Scalar; + SelfCwiseBinaryOp, DstXprType, SrcXprType> tmp(dst.const_cast_derived()); + copy_using_evaluator(tmp, src.derived()); +} + + +} // namespace internal + +} // end namespace Eigen + +#endif // EIGEN_ASSIGN_EVALUATOR_H diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Assign_MKL.h b/resources/3rdparty/eigen/Eigen/src/Core/Assign_MKL.h index 428c6367b..7772951b9 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/Assign_MKL.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/Assign_MKL.h @@ -210,7 +210,7 @@ EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(sqrt, Sqrt) EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(square, Sqr) // The vm*powx functions are not avaibale in the windows version of MKL. -#ifdef _WIN32 +#ifndef _WIN32 EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmspowx_, float, float) EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmdpowx_, double, double) EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmcpowx_, scomplex, MKL_Complex8) diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Block.h b/resources/3rdparty/eigen/Eigen/src/Core/Block.h index 5f29cb3d1..9c3f9acb6 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/Block.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/Block.h @@ -124,27 +124,27 @@ template= 0 && BlockRows >= 1 && startRow + BlockRows <= xpr.rows() - && startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= xpr.cols()); + eigen_assert(a_startRow >= 0 && BlockRows >= 1 && a_startRow + BlockRows <= xpr.rows() + && a_startCol >= 0 && BlockCols >= 1 && a_startCol + BlockCols <= xpr.cols()); } /** Dynamic-size constructor */ inline Block(XprType& xpr, - Index startRow, Index startCol, + Index a_startRow, Index a_startCol, Index blockRows, Index blockCols) - : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol), + : m_xpr(xpr), m_startRow(a_startRow), m_startCol(a_startCol), m_blockRows(blockRows), m_blockCols(blockCols) { eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows) && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols)); - eigen_assert(startRow >= 0 && blockRows >= 0 && startRow + blockRows <= xpr.rows() - && startCol >= 0 && blockCols >= 0 && startCol + blockCols <= xpr.cols()); + eigen_assert(a_startRow >= 0 && blockRows >= 0 && a_startRow + blockRows <= xpr.rows() + && a_startCol >= 0 && blockCols >= 0 && a_startCol + blockCols <= xpr.cols()); } EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block) @@ -152,22 +152,22 @@ template - inline PacketScalar packet(Index row, Index col) const + inline PacketScalar packet(Index rowId, Index colId) const { return m_xpr.template packet - (row + m_startRow.value(), col + m_startCol.value()); + (rowId + m_startRow.value(), colId + m_startCol.value()); } template - inline void writePacket(Index row, Index col, const PacketScalar& x) + inline void writePacket(Index rowId, Index colId, const PacketScalar& val) { m_xpr.const_cast_derived().template writePacket - (row + m_startRow.value(), col + m_startCol.value(), x); + (rowId + m_startRow.value(), colId + m_startCol.value(), val); } template @@ -215,11 +215,11 @@ template - inline void writePacket(Index index, const PacketScalar& x) + inline void writePacket(Index index, const PacketScalar& val) { m_xpr.const_cast_derived().template writePacket (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), - m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0), x); + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0), val); } #ifdef EIGEN_PARSED_BY_DOXYGEN diff --git a/resources/3rdparty/eigen/Eigen/src/Core/CoreEvaluators.h b/resources/3rdparty/eigen/Eigen/src/Core/CoreEvaluators.h new file mode 100644 index 000000000..cca01251c --- /dev/null +++ b/resources/3rdparty/eigen/Eigen/src/Core/CoreEvaluators.h @@ -0,0 +1,1299 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Benoit Jacob +// Copyright (C) 2011 Gael Guennebaud +// Copyright (C) 2011-2012 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +#ifndef EIGEN_COREEVALUATORS_H +#define EIGEN_COREEVALUATORS_H + +namespace Eigen { + +namespace internal { + +// evaluator_traits contains traits for evaluator_impl + +template +struct evaluator_traits +{ + // 1 if evaluator_impl::evalTo() exists + // 0 if evaluator_impl allows coefficient-based access + static const int HasEvalTo = 0; + + // 1 if assignment A = B assumes aliasing when B is of type T and thus B needs to be evaluated into a + // temporary; 0 if not. + static const int AssumeAliasing = 0; +}; + +// expression class for evaluating nested expression to a temporary + +template +class EvalToTemp; + +// evaluator::type is type of evaluator for T +// evaluator::nestedType is type of evaluator if T is nested inside another evaluator + +template +struct evaluator_impl +{ }; + +template::HasEvalTo> +struct evaluator_nested_type; + +template +struct evaluator_nested_type +{ + typedef evaluator_impl type; +}; + +template +struct evaluator_nested_type +{ + typedef evaluator_impl > type; +}; + +template +struct evaluator +{ + typedef evaluator_impl type; + typedef typename evaluator_nested_type::type nestedType; +}; + +// TODO: Think about const-correctness + +template +struct evaluator + : evaluator +{ }; + +// ---------- base class for all writable evaluators ---------- + +template +struct evaluator_impl_base +{ + typedef typename ExpressionType::Index Index; + + template + void copyCoeff(Index row, Index col, const OtherEvaluatorType& other) + { + derived().coeffRef(row, col) = other.coeff(row, col); + } + + template + void copyCoeffByOuterInner(Index outer, Index inner, const OtherEvaluatorType& other) + { + Index row = rowIndexByOuterInner(outer, inner); + Index col = colIndexByOuterInner(outer, inner); + derived().copyCoeff(row, col, other); + } + + template + void copyCoeff(Index index, const OtherEvaluatorType& other) + { + derived().coeffRef(index) = other.coeff(index); + } + + template + void copyPacket(Index row, Index col, const OtherEvaluatorType& other) + { + derived().template writePacket(row, col, + other.template packet(row, col)); + } + + template + void copyPacketByOuterInner(Index outer, Index inner, const OtherEvaluatorType& other) + { + Index row = rowIndexByOuterInner(outer, inner); + Index col = colIndexByOuterInner(outer, inner); + derived().template copyPacket(row, col, other); + } + + template + void copyPacket(Index index, const OtherEvaluatorType& other) + { + derived().template writePacket(index, + other.template packet(index)); + } + + Index rowIndexByOuterInner(Index outer, Index inner) const + { + return int(ExpressionType::RowsAtCompileTime) == 1 ? 0 + : int(ExpressionType::ColsAtCompileTime) == 1 ? inner + : int(ExpressionType::Flags)&RowMajorBit ? outer + : inner; + } + + Index colIndexByOuterInner(Index outer, Index inner) const + { + return int(ExpressionType::ColsAtCompileTime) == 1 ? 0 + : int(ExpressionType::RowsAtCompileTime) == 1 ? inner + : int(ExpressionType::Flags)&RowMajorBit ? inner + : outer; + } + + evaluator_impl& derived() + { + return *static_cast*>(this); + } +}; + +// -------------------- Matrix and Array -------------------- +// +// evaluator_impl is a common base class for the +// Matrix and Array evaluators. + +template +struct evaluator_impl > + : evaluator_impl_base +{ + typedef PlainObjectBase PlainObjectType; + + enum { + IsRowMajor = PlainObjectType::IsRowMajor, + IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime, + RowsAtCompileTime = PlainObjectType::RowsAtCompileTime, + ColsAtCompileTime = PlainObjectType::ColsAtCompileTime + }; + + evaluator_impl(const PlainObjectType& m) + : m_data(m.data()), m_outerStride(IsVectorAtCompileTime ? 0 : m.outerStride()) + { } + + typedef typename PlainObjectType::Index Index; + typedef typename PlainObjectType::Scalar Scalar; + typedef typename PlainObjectType::CoeffReturnType CoeffReturnType; + typedef typename PlainObjectType::PacketScalar PacketScalar; + typedef typename PlainObjectType::PacketReturnType PacketReturnType; + + CoeffReturnType coeff(Index row, Index col) const + { + if (IsRowMajor) + return m_data[row * m_outerStride.value() + col]; + else + return m_data[row + col * m_outerStride.value()]; + } + + CoeffReturnType coeff(Index index) const + { + return m_data[index]; + } + + Scalar& coeffRef(Index row, Index col) + { + if (IsRowMajor) + return const_cast(m_data)[row * m_outerStride.value() + col]; + else + return const_cast(m_data)[row + col * m_outerStride.value()]; + } + + Scalar& coeffRef(Index index) + { + return const_cast(m_data)[index]; + } + + template + PacketReturnType packet(Index row, Index col) const + { + if (IsRowMajor) + return ploadt(m_data + row * m_outerStride.value() + col); + else + return ploadt(m_data + row + col * m_outerStride.value()); + } + + template + PacketReturnType packet(Index index) const + { + return ploadt(m_data + index); + } + + template + void writePacket(Index row, Index col, const PacketScalar& x) + { + if (IsRowMajor) + return pstoret + (const_cast(m_data) + row * m_outerStride.value() + col, x); + else + return pstoret + (const_cast(m_data) + row + col * m_outerStride.value(), x); + } + + template + void writePacket(Index index, const PacketScalar& x) + { + return pstoret(const_cast(m_data) + index, x); + } + +protected: + const Scalar *m_data; + + // We do not need to know the outer stride for vectors + variable_if_dynamic m_outerStride; +}; + +template +struct evaluator_impl > + : evaluator_impl > > +{ + typedef Matrix XprType; + + evaluator_impl(const XprType& m) + : evaluator_impl >(m) + { } +}; + +template +struct evaluator_impl > + : evaluator_impl > > +{ + typedef Array XprType; + + evaluator_impl(const XprType& m) + : evaluator_impl >(m) + { } +}; + +// -------------------- EvalToTemp -------------------- + +template +struct traits > + : public traits +{ }; + +template +class EvalToTemp + : public dense_xpr_base >::type +{ + public: + + typedef typename dense_xpr_base::type Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(EvalToTemp) + + EvalToTemp(const ArgType& arg) + : m_arg(arg) + { } + + const ArgType& arg() const + { + return m_arg; + } + + Index rows() const + { + return m_arg.rows(); + } + + Index cols() const + { + return m_arg.cols(); + } + + private: + const ArgType& m_arg; +}; + +template +struct evaluator_impl > +{ + typedef EvalToTemp XprType; + typedef typename ArgType::PlainObject PlainObject; + + evaluator_impl(const XprType& xpr) + : m_result(xpr.rows(), xpr.cols()), m_resultImpl(m_result) + { + copy_using_evaluator_without_resizing(m_result, xpr.arg()); + } + + // This constructor is used when nesting an EvalTo evaluator in another evaluator + evaluator_impl(const ArgType& arg) + : m_result(arg.rows(), arg.cols()), m_resultImpl(m_result) + { + copy_using_evaluator_without_resizing(m_result, arg); + } + + typedef typename PlainObject::Index Index; + typedef typename PlainObject::Scalar Scalar; + typedef typename PlainObject::CoeffReturnType CoeffReturnType; + typedef typename PlainObject::PacketScalar PacketScalar; + typedef typename PlainObject::PacketReturnType PacketReturnType; + + // All other functions are forwarded to m_resultImpl + + CoeffReturnType coeff(Index row, Index col) const + { + return m_resultImpl.coeff(row, col); + } + + CoeffReturnType coeff(Index index) const + { + return m_resultImpl.coeff(index); + } + + Scalar& coeffRef(Index row, Index col) + { + return m_resultImpl.coeffRef(row, col); + } + + Scalar& coeffRef(Index index) + { + return m_resultImpl.coeffRef(index); + } + + template + PacketReturnType packet(Index row, Index col) const + { + return m_resultImpl.packet(row, col); + } + + template + PacketReturnType packet(Index index) const + { + return m_resultImpl.packet(index); + } + + template + void writePacket(Index row, Index col, const PacketScalar& x) + { + m_resultImpl.writePacket(row, col, x); + } + + template + void writePacket(Index index, const PacketScalar& x) + { + m_resultImpl.writePacket(index, x); + } + +protected: + PlainObject m_result; + typename evaluator::nestedType m_resultImpl; +}; + +// -------------------- Transpose -------------------- + +template +struct evaluator_impl > + : evaluator_impl_base > +{ + typedef Transpose XprType; + + evaluator_impl(const XprType& t) : m_argImpl(t.nestedExpression()) {} + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename XprType::PacketScalar PacketScalar; + typedef typename XprType::PacketReturnType PacketReturnType; + + CoeffReturnType coeff(Index row, Index col) const + { + return m_argImpl.coeff(col, row); + } + + CoeffReturnType coeff(Index index) const + { + return m_argImpl.coeff(index); + } + + Scalar& coeffRef(Index row, Index col) + { + return m_argImpl.coeffRef(col, row); + } + + typename XprType::Scalar& coeffRef(Index index) + { + return m_argImpl.coeffRef(index); + } + + template + PacketReturnType packet(Index row, Index col) const + { + return m_argImpl.template packet(col, row); + } + + template + PacketReturnType packet(Index index) const + { + return m_argImpl.template packet(index); + } + + template + void writePacket(Index row, Index col, const PacketScalar& x) + { + m_argImpl.template writePacket(col, row, x); + } + + template + void writePacket(Index index, const PacketScalar& x) + { + m_argImpl.template writePacket(index, x); + } + +protected: + typename evaluator::nestedType m_argImpl; +}; + +// -------------------- CwiseNullaryOp -------------------- + +template +struct evaluator_impl > +{ + typedef CwiseNullaryOp XprType; + + evaluator_impl(const XprType& n) + : m_functor(n.functor()) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename XprType::PacketScalar PacketScalar; + + CoeffReturnType coeff(Index row, Index col) const + { + return m_functor(row, col); + } + + CoeffReturnType coeff(Index index) const + { + return m_functor(index); + } + + template + PacketScalar packet(Index row, Index col) const + { + return m_functor.packetOp(row, col); + } + + template + PacketScalar packet(Index index) const + { + return m_functor.packetOp(index); + } + +protected: + const NullaryOp m_functor; +}; + +// -------------------- CwiseUnaryOp -------------------- + +template +struct evaluator_impl > +{ + typedef CwiseUnaryOp XprType; + + evaluator_impl(const XprType& op) + : m_functor(op.functor()), + m_argImpl(op.nestedExpression()) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename XprType::PacketScalar PacketScalar; + + CoeffReturnType coeff(Index row, Index col) const + { + return m_functor(m_argImpl.coeff(row, col)); + } + + CoeffReturnType coeff(Index index) const + { + return m_functor(m_argImpl.coeff(index)); + } + + template + PacketScalar packet(Index row, Index col) const + { + return m_functor.packetOp(m_argImpl.template packet(row, col)); + } + + template + PacketScalar packet(Index index) const + { + return m_functor.packetOp(m_argImpl.template packet(index)); + } + +protected: + const UnaryOp m_functor; + typename evaluator::nestedType m_argImpl; +}; + +// -------------------- CwiseBinaryOp -------------------- + +template +struct evaluator_impl > +{ + typedef CwiseBinaryOp XprType; + + evaluator_impl(const XprType& xpr) + : m_functor(xpr.functor()), + m_lhsImpl(xpr.lhs()), + m_rhsImpl(xpr.rhs()) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename XprType::PacketScalar PacketScalar; + + CoeffReturnType coeff(Index row, Index col) const + { + return m_functor(m_lhsImpl.coeff(row, col), m_rhsImpl.coeff(row, col)); + } + + CoeffReturnType coeff(Index index) const + { + return m_functor(m_lhsImpl.coeff(index), m_rhsImpl.coeff(index)); + } + + template + PacketScalar packet(Index row, Index col) const + { + return m_functor.packetOp(m_lhsImpl.template packet(row, col), + m_rhsImpl.template packet(row, col)); + } + + template + PacketScalar packet(Index index) const + { + return m_functor.packetOp(m_lhsImpl.template packet(index), + m_rhsImpl.template packet(index)); + } + +protected: + const BinaryOp m_functor; + typename evaluator::nestedType m_lhsImpl; + typename evaluator::nestedType m_rhsImpl; +}; + +// -------------------- CwiseUnaryView -------------------- + +template +struct evaluator_impl > + : evaluator_impl_base > +{ + typedef CwiseUnaryView XprType; + + evaluator_impl(const XprType& op) + : m_unaryOp(op.functor()), + m_argImpl(op.nestedExpression()) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + CoeffReturnType coeff(Index row, Index col) const + { + return m_unaryOp(m_argImpl.coeff(row, col)); + } + + CoeffReturnType coeff(Index index) const + { + return m_unaryOp(m_argImpl.coeff(index)); + } + + Scalar& coeffRef(Index row, Index col) + { + return m_unaryOp(m_argImpl.coeffRef(row, col)); + } + + Scalar& coeffRef(Index index) + { + return m_unaryOp(m_argImpl.coeffRef(index)); + } + +protected: + const UnaryOp m_unaryOp; + typename evaluator::nestedType m_argImpl; +}; + +// -------------------- Map -------------------- + +template +struct evaluator_impl > + : evaluator_impl_base +{ + typedef MapBase MapType; + typedef Derived XprType; + + typedef typename XprType::PointerType PointerType; + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename XprType::PacketScalar PacketScalar; + typedef typename XprType::PacketReturnType PacketReturnType; + + evaluator_impl(const XprType& map) + : m_data(const_cast(map.data())), + m_rowStride(map.rowStride()), + m_colStride(map.colStride()) + { } + + enum { + RowsAtCompileTime = XprType::RowsAtCompileTime + }; + + CoeffReturnType coeff(Index row, Index col) const + { + return m_data[col * m_colStride + row * m_rowStride]; + } + + CoeffReturnType coeff(Index index) const + { + return coeff(RowsAtCompileTime == 1 ? 0 : index, + RowsAtCompileTime == 1 ? index : 0); + } + + Scalar& coeffRef(Index row, Index col) + { + return m_data[col * m_colStride + row * m_rowStride]; + } + + Scalar& coeffRef(Index index) + { + return coeffRef(RowsAtCompileTime == 1 ? 0 : index, + RowsAtCompileTime == 1 ? index : 0); + } + + template + PacketReturnType packet(Index row, Index col) const + { + PointerType ptr = m_data + row * m_rowStride + col * m_colStride; + return internal::ploadt(ptr); + } + + template + PacketReturnType packet(Index index) const + { + return packet(RowsAtCompileTime == 1 ? 0 : index, + RowsAtCompileTime == 1 ? index : 0); + } + + template + void writePacket(Index row, Index col, const PacketScalar& x) + { + PointerType ptr = m_data + row * m_rowStride + col * m_colStride; + return internal::pstoret(ptr, x); + } + + template + void writePacket(Index index, const PacketScalar& x) + { + return writePacket(RowsAtCompileTime == 1 ? 0 : index, + RowsAtCompileTime == 1 ? index : 0, + x); + } + +protected: + PointerType m_data; + int m_rowStride; + int m_colStride; +}; + +template +struct evaluator_impl > + : public evaluator_impl > > +{ + typedef Map XprType; + + evaluator_impl(const XprType& map) + : evaluator_impl >(map) + { } +}; + +// -------------------- Block -------------------- + +template +struct evaluator_impl > + : evaluator_impl_base > +{ + typedef Block XprType; + + evaluator_impl(const XprType& block) + : m_argImpl(block.nestedExpression()), + m_startRow(block.startRow()), + m_startCol(block.startCol()) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename XprType::PacketScalar PacketScalar; + typedef typename XprType::PacketReturnType PacketReturnType; + + enum { + RowsAtCompileTime = XprType::RowsAtCompileTime + }; + + CoeffReturnType coeff(Index row, Index col) const + { + return m_argImpl.coeff(m_startRow.value() + row, m_startCol.value() + col); + } + + CoeffReturnType coeff(Index index) const + { + return coeff(RowsAtCompileTime == 1 ? 0 : index, + RowsAtCompileTime == 1 ? index : 0); + } + + Scalar& coeffRef(Index row, Index col) + { + return m_argImpl.coeffRef(m_startRow.value() + row, m_startCol.value() + col); + } + + Scalar& coeffRef(Index index) + { + return coeffRef(RowsAtCompileTime == 1 ? 0 : index, + RowsAtCompileTime == 1 ? index : 0); + } + + template + PacketReturnType packet(Index row, Index col) const + { + return m_argImpl.template packet(m_startRow.value() + row, m_startCol.value() + col); + } + + template + PacketReturnType packet(Index index) const + { + return packet(RowsAtCompileTime == 1 ? 0 : index, + RowsAtCompileTime == 1 ? index : 0); + } + + template + void writePacket(Index row, Index col, const PacketScalar& x) + { + return m_argImpl.template writePacket(m_startRow.value() + row, m_startCol.value() + col, x); + } + + template + void writePacket(Index index, const PacketScalar& x) + { + return writePacket(RowsAtCompileTime == 1 ? 0 : index, + RowsAtCompileTime == 1 ? index : 0, + x); + } + +protected: + typename evaluator::nestedType m_argImpl; + const variable_if_dynamic m_startRow; + const variable_if_dynamic m_startCol; +}; + +// TODO: This evaluator does not actually use the child evaluator; +// all action is via the data() as returned by the Block expression. + +template +struct evaluator_impl > + : evaluator_impl > > +{ + typedef Block XprType; + + evaluator_impl(const XprType& block) + : evaluator_impl >(block) + { } +}; + + +// -------------------- Select -------------------- + +template +struct evaluator_impl > +{ + typedef Select XprType; + + evaluator_impl(const XprType& select) + : m_conditionImpl(select.conditionMatrix()), + m_thenImpl(select.thenMatrix()), + m_elseImpl(select.elseMatrix()) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + CoeffReturnType coeff(Index row, Index col) const + { + if (m_conditionImpl.coeff(row, col)) + return m_thenImpl.coeff(row, col); + else + return m_elseImpl.coeff(row, col); + } + + CoeffReturnType coeff(Index index) const + { + if (m_conditionImpl.coeff(index)) + return m_thenImpl.coeff(index); + else + return m_elseImpl.coeff(index); + } + +protected: + typename evaluator::nestedType m_conditionImpl; + typename evaluator::nestedType m_thenImpl; + typename evaluator::nestedType m_elseImpl; +}; + + +// -------------------- Replicate -------------------- + +template +struct evaluator_impl > +{ + typedef Replicate XprType; + + evaluator_impl(const XprType& replicate) + : m_argImpl(replicate.nestedExpression()), + m_rows(replicate.nestedExpression().rows()), + m_cols(replicate.nestedExpression().cols()) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename XprType::PacketReturnType PacketReturnType; + + CoeffReturnType coeff(Index row, Index col) const + { + // try to avoid using modulo; this is a pure optimization strategy + const Index actual_row = internal::traits::RowsAtCompileTime==1 ? 0 + : RowFactor==1 ? row + : row % m_rows.value(); + const Index actual_col = internal::traits::ColsAtCompileTime==1 ? 0 + : ColFactor==1 ? col + : col % m_cols.value(); + + return m_argImpl.coeff(actual_row, actual_col); + } + + template + PacketReturnType packet(Index row, Index col) const + { + const Index actual_row = internal::traits::RowsAtCompileTime==1 ? 0 + : RowFactor==1 ? row + : row % m_rows.value(); + const Index actual_col = internal::traits::ColsAtCompileTime==1 ? 0 + : ColFactor==1 ? col + : col % m_cols.value(); + + return m_argImpl.template packet(actual_row, actual_col); + } + +protected: + typename evaluator::nestedType m_argImpl; + const variable_if_dynamic m_rows; + const variable_if_dynamic m_cols; +}; + + +// -------------------- PartialReduxExpr -------------------- +// +// This is a wrapper around the expression object. +// TODO: Find out how to write a proper evaluator without duplicating +// the row() and col() member functions. + +template< typename ArgType, typename MemberOp, int Direction> +struct evaluator_impl > +{ + typedef PartialReduxExpr XprType; + + evaluator_impl(const XprType expr) + : m_expr(expr) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + CoeffReturnType coeff(Index row, Index col) const + { + return m_expr.coeff(row, col); + } + + CoeffReturnType coeff(Index index) const + { + return m_expr.coeff(index); + } + +protected: + const XprType m_expr; +}; + + +// -------------------- MatrixWrapper and ArrayWrapper -------------------- +// +// evaluator_impl_wrapper_base is a common base class for the +// MatrixWrapper and ArrayWrapper evaluators. + +template +struct evaluator_impl_wrapper_base + : evaluator_impl_base +{ + typedef typename remove_all::type ArgType; + + evaluator_impl_wrapper_base(const ArgType& arg) : m_argImpl(arg) {} + + typedef typename ArgType::Index Index; + typedef typename ArgType::Scalar Scalar; + typedef typename ArgType::CoeffReturnType CoeffReturnType; + typedef typename ArgType::PacketScalar PacketScalar; + typedef typename ArgType::PacketReturnType PacketReturnType; + + CoeffReturnType coeff(Index row, Index col) const + { + return m_argImpl.coeff(row, col); + } + + CoeffReturnType coeff(Index index) const + { + return m_argImpl.coeff(index); + } + + Scalar& coeffRef(Index row, Index col) + { + return m_argImpl.coeffRef(row, col); + } + + Scalar& coeffRef(Index index) + { + return m_argImpl.coeffRef(index); + } + + template + PacketReturnType packet(Index row, Index col) const + { + return m_argImpl.template packet(row, col); + } + + template + PacketReturnType packet(Index index) const + { + return m_argImpl.template packet(index); + } + + template + void writePacket(Index row, Index col, const PacketScalar& x) + { + m_argImpl.template writePacket(row, col, x); + } + + template + void writePacket(Index index, const PacketScalar& x) + { + m_argImpl.template writePacket(index, x); + } + +protected: + typename evaluator::nestedType m_argImpl; +}; + +template +struct evaluator_impl > + : evaluator_impl_wrapper_base > +{ + typedef MatrixWrapper XprType; + + evaluator_impl(const XprType& wrapper) + : evaluator_impl_wrapper_base >(wrapper.nestedExpression()) + { } +}; + +template +struct evaluator_impl > + : evaluator_impl_wrapper_base > +{ + typedef ArrayWrapper XprType; + + evaluator_impl(const XprType& wrapper) + : evaluator_impl_wrapper_base >(wrapper.nestedExpression()) + { } +}; + + +// -------------------- Reverse -------------------- + +// defined in Reverse.h: +template struct reverse_packet_cond; + +template +struct evaluator_impl > + : evaluator_impl_base > +{ + typedef Reverse XprType; + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename XprType::PacketScalar PacketScalar; + typedef typename XprType::PacketReturnType PacketReturnType; + + enum { + PacketSize = internal::packet_traits::size, + IsRowMajor = XprType::IsRowMajor, + IsColMajor = !IsRowMajor, + ReverseRow = (Direction == Vertical) || (Direction == BothDirections), + ReverseCol = (Direction == Horizontal) || (Direction == BothDirections), + OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1, + OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1, + ReversePacket = (Direction == BothDirections) + || ((Direction == Vertical) && IsColMajor) + || ((Direction == Horizontal) && IsRowMajor) + }; + typedef internal::reverse_packet_cond reverse_packet; + + evaluator_impl(const XprType& reverse) + : m_argImpl(reverse.nestedExpression()), + m_rows(ReverseRow ? reverse.nestedExpression().rows() : 0), + m_cols(ReverseCol ? reverse.nestedExpression().cols() : 0) + { } + + CoeffReturnType coeff(Index row, Index col) const + { + return m_argImpl.coeff(ReverseRow ? m_rows.value() - row - 1 : row, + ReverseCol ? m_cols.value() - col - 1 : col); + } + + CoeffReturnType coeff(Index index) const + { + return m_argImpl.coeff(m_rows.value() * m_cols.value() - index - 1); + } + + Scalar& coeffRef(Index row, Index col) + { + return m_argImpl.coeffRef(ReverseRow ? m_rows.value() - row - 1 : row, + ReverseCol ? m_cols.value() - col - 1 : col); + } + + Scalar& coeffRef(Index index) + { + return m_argImpl.coeffRef(m_rows.value() * m_cols.value() - index - 1); + } + + template + PacketScalar packet(Index row, Index col) const + { + return reverse_packet::run(m_argImpl.template packet( + ReverseRow ? m_rows.value() - row - OffsetRow : row, + ReverseCol ? m_cols.value() - col - OffsetCol : col)); + } + + template + PacketScalar packet(Index index) const + { + return preverse(m_argImpl.template packet(m_rows.value() * m_cols.value() - index - PacketSize)); + } + + template + void writePacket(Index row, Index col, const PacketScalar& x) + { + m_argImpl.template writePacket( + ReverseRow ? m_rows.value() - row - OffsetRow : row, + ReverseCol ? m_cols.value() - col - OffsetCol : col, + reverse_packet::run(x)); + } + + template + void writePacket(Index index, const PacketScalar& x) + { + m_argImpl.template writePacket + (m_rows.value() * m_cols.value() - index - PacketSize, preverse(x)); + } + +protected: + typename evaluator::nestedType m_argImpl; + + // If we do not reverse rows, then we do not need to know the number of rows; same for columns + const variable_if_dynamic m_rows; + const variable_if_dynamic m_cols; +}; + + +// -------------------- Diagonal -------------------- + +template +struct evaluator_impl > + : evaluator_impl_base > +{ + typedef Diagonal XprType; + + evaluator_impl(const XprType& diagonal) + : m_argImpl(diagonal.nestedExpression()), + m_index(diagonal.index()) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + CoeffReturnType coeff(Index row, Index) const + { + return m_argImpl.coeff(row + rowOffset(), row + colOffset()); + } + + CoeffReturnType coeff(Index index) const + { + return m_argImpl.coeff(index + rowOffset(), index + colOffset()); + } + + Scalar& coeffRef(Index row, Index) + { + return m_argImpl.coeffRef(row + rowOffset(), row + colOffset()); + } + + Scalar& coeffRef(Index index) + { + return m_argImpl.coeffRef(index + rowOffset(), index + colOffset()); + } + +protected: + typename evaluator::nestedType m_argImpl; + const internal::variable_if_dynamicindex m_index; + +private: + EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value() > 0 ? 0 : -m_index.value(); } + EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value() > 0 ? m_index.value() : 0; } +}; + + +// ---------- SwapWrapper ---------- + +template +struct evaluator_impl > + : evaluator_impl_base > +{ + typedef SwapWrapper XprType; + + evaluator_impl(const XprType& swapWrapper) + : m_argImpl(swapWrapper.expression()) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::Packet Packet; + + // This function and the next one are needed by assign to correctly align loads/stores + // TODO make Assign use .data() + Scalar& coeffRef(Index row, Index col) + { + return m_argImpl.coeffRef(row, col); + } + + inline Scalar& coeffRef(Index index) + { + return m_argImpl.coeffRef(index); + } + + template + void copyCoeff(Index row, Index col, const OtherEvaluatorType& other) + { + OtherEvaluatorType& nonconst_other = const_cast(other); + Scalar tmp = m_argImpl.coeff(row, col); + m_argImpl.coeffRef(row, col) = nonconst_other.coeff(row, col); + nonconst_other.coeffRef(row, col) = tmp; + } + + template + void copyCoeff(Index index, const OtherEvaluatorType& other) + { + OtherEvaluatorType& nonconst_other = const_cast(other); + Scalar tmp = m_argImpl.coeff(index); + m_argImpl.coeffRef(index) = nonconst_other.coeff(index); + nonconst_other.coeffRef(index) = tmp; + } + + template + void copyPacket(Index row, Index col, const OtherEvaluatorType& other) + { + OtherEvaluatorType& nonconst_other = const_cast(other); + Packet tmp = m_argImpl.template packet(row, col); + m_argImpl.template writePacket + (row, col, nonconst_other.template packet(row, col)); + nonconst_other.template writePacket(row, col, tmp); + } + + template + void copyPacket(Index index, const OtherEvaluatorType& other) + { + OtherEvaluatorType& nonconst_other = const_cast(other); + Packet tmp = m_argImpl.template packet(index); + m_argImpl.template writePacket + (index, nonconst_other.template packet(index)); + nonconst_other.template writePacket(index, tmp); + } + +protected: + typename evaluator::nestedType m_argImpl; +}; + + +// ---------- SelfCwiseBinaryOp ---------- + +template +struct evaluator_impl > + : evaluator_impl_base > +{ + typedef SelfCwiseBinaryOp XprType; + + evaluator_impl(const XprType& selfCwiseBinaryOp) + : m_argImpl(selfCwiseBinaryOp.expression()), + m_functor(selfCwiseBinaryOp.functor()) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::Packet Packet; + + // This function and the next one are needed by assign to correctly align loads/stores + // TODO make Assign use .data() + Scalar& coeffRef(Index row, Index col) + { + return m_argImpl.coeffRef(row, col); + } + + inline Scalar& coeffRef(Index index) + { + return m_argImpl.coeffRef(index); + } + + template + void copyCoeff(Index row, Index col, const OtherEvaluatorType& other) + { + Scalar& tmp = m_argImpl.coeffRef(row, col); + tmp = m_functor(tmp, other.coeff(row, col)); + } + + template + void copyCoeff(Index index, const OtherEvaluatorType& other) + { + Scalar& tmp = m_argImpl.coeffRef(index); + tmp = m_functor(tmp, other.coeff(index)); + } + + template + void copyPacket(Index row, Index col, const OtherEvaluatorType& other) + { + const Packet res = m_functor.packetOp(m_argImpl.template packet(row, col), + other.template packet(row, col)); + m_argImpl.template writePacket(row, col, res); + } + + template + void copyPacket(Index index, const OtherEvaluatorType& other) + { + const Packet res = m_functor.packetOp(m_argImpl.template packet(index), + other.template packet(index)); + m_argImpl.template writePacket(index, res); + } + +protected: + typename evaluator::nestedType m_argImpl; + const BinaryOp m_functor; +}; + + +} // namespace internal + +} // end namespace Eigen + +#endif // EIGEN_COREEVALUATORS_H diff --git a/resources/3rdparty/eigen/Eigen/src/Core/CwiseBinaryOp.h b/resources/3rdparty/eigen/Eigen/src/Core/CwiseBinaryOp.h index 1b93af31b..686c2afa3 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/CwiseBinaryOp.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/CwiseBinaryOp.h @@ -122,13 +122,13 @@ class CwiseBinaryOp : internal::no_assignment_operator, typedef typename internal::remove_reference::type _LhsNested; typedef typename internal::remove_reference::type _RhsNested; - EIGEN_STRONG_INLINE CwiseBinaryOp(const Lhs& lhs, const Rhs& rhs, const BinaryOp& func = BinaryOp()) - : m_lhs(lhs), m_rhs(rhs), m_functor(func) + EIGEN_STRONG_INLINE CwiseBinaryOp(const Lhs& aLhs, const Rhs& aRhs, const BinaryOp& func = BinaryOp()) + : m_lhs(aLhs), m_rhs(aRhs), m_functor(func) { EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp,typename Lhs::Scalar,typename Rhs::Scalar); // require the sizes to match EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs, Rhs) - eigen_assert(lhs.rows() == rhs.rows() && lhs.cols() == rhs.cols()); + eigen_assert(aLhs.rows() == aRhs.rows() && aLhs.cols() == aRhs.cols()); } EIGEN_STRONG_INLINE Index rows() const { @@ -169,17 +169,17 @@ class CwiseBinaryOpImpl typedef typename internal::dense_xpr_base >::type Base; EIGEN_DENSE_PUBLIC_INTERFACE( Derived ) - EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const + EIGEN_STRONG_INLINE const Scalar coeff(Index rowId, Index colId) const { - return derived().functor()(derived().lhs().coeff(row, col), - derived().rhs().coeff(row, col)); + return derived().functor()(derived().lhs().coeff(rowId, colId), + derived().rhs().coeff(rowId, colId)); } template - EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const + EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const { - return derived().functor().packetOp(derived().lhs().template packet(row, col), - derived().rhs().template packet(row, col)); + return derived().functor().packetOp(derived().lhs().template packet(rowId, colId), + derived().rhs().template packet(rowId, colId)); } EIGEN_STRONG_INLINE const Scalar coeff(Index index) const diff --git a/resources/3rdparty/eigen/Eigen/src/Core/CwiseNullaryOp.h b/resources/3rdparty/eigen/Eigen/src/Core/CwiseNullaryOp.h index 2635a62b0..edd2bed46 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/CwiseNullaryOp.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/CwiseNullaryOp.h @@ -54,27 +54,27 @@ class CwiseNullaryOp : internal::no_assignment_operator, typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp) - CwiseNullaryOp(Index rows, Index cols, const NullaryOp& func = NullaryOp()) - : m_rows(rows), m_cols(cols), m_functor(func) + CwiseNullaryOp(Index nbRows, Index nbCols, const NullaryOp& func = NullaryOp()) + : m_rows(nbRows), m_cols(nbCols), m_functor(func) { - eigen_assert(rows >= 0 - && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) - && cols >= 0 - && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)); + eigen_assert(nbRows >= 0 + && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == nbRows) + && nbCols >= 0 + && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == nbCols)); } EIGEN_STRONG_INLINE Index rows() const { return m_rows.value(); } EIGEN_STRONG_INLINE Index cols() const { return m_cols.value(); } - EIGEN_STRONG_INLINE const Scalar coeff(Index rows, Index cols) const + EIGEN_STRONG_INLINE const Scalar coeff(Index rowId, Index colId) const { - return m_functor(rows, cols); + return m_functor(rowId, colId); } template - EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const + EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const { - return m_functor.packetOp(row, col); + return m_functor.packetOp(rowId, colId); } EIGEN_STRONG_INLINE const Scalar coeff(Index index) const @@ -295,11 +295,11 @@ DenseBase::LinSpaced(const Scalar& low, const Scalar& high) /** \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */ template bool DenseBase::isApproxToConstant -(const Scalar& value, RealScalar prec) const +(const Scalar& val, const RealScalar& prec) const { for(Index j = 0; j < cols(); ++j) for(Index i = 0; i < rows(); ++i) - if(!internal::isApprox(this->coeff(i, j), value, prec)) + if(!internal::isApprox(this->coeff(i, j), val, prec)) return false; return true; } @@ -309,9 +309,9 @@ bool DenseBase::isApproxToConstant * \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */ template bool DenseBase::isConstant -(const Scalar& value, RealScalar prec) const +(const Scalar& val, const RealScalar& prec) const { - return isApproxToConstant(value, prec); + return isApproxToConstant(val, prec); } /** Alias for setConstant(): sets all coefficients in this expression to \a value. @@ -319,9 +319,9 @@ bool DenseBase::isConstant * \sa setConstant(), Constant(), class CwiseNullaryOp */ template -EIGEN_STRONG_INLINE void DenseBase::fill(const Scalar& value) +EIGEN_STRONG_INLINE void DenseBase::fill(const Scalar& val) { - setConstant(value); + setConstant(val); } /** Sets all coefficients in this expression to \a value. @@ -329,9 +329,9 @@ EIGEN_STRONG_INLINE void DenseBase::fill(const Scalar& value) * \sa fill(), setConstant(Index,const Scalar&), setConstant(Index,Index,const Scalar&), setZero(), setOnes(), Constant(), class CwiseNullaryOp, setZero(), setOnes() */ template -EIGEN_STRONG_INLINE Derived& DenseBase::setConstant(const Scalar& value) +EIGEN_STRONG_INLINE Derived& DenseBase::setConstant(const Scalar& val) { - return derived() = Constant(rows(), cols(), value); + return derived() = Constant(rows(), cols(), val); } /** Resizes to the given \a size, and sets all coefficients in this expression to the given \a value. @@ -345,10 +345,10 @@ EIGEN_STRONG_INLINE Derived& DenseBase::setConstant(const Scalar& value */ template EIGEN_STRONG_INLINE Derived& -PlainObjectBase::setConstant(Index size, const Scalar& value) +PlainObjectBase::setConstant(Index size, const Scalar& val) { resize(size); - return setConstant(value); + return setConstant(val); } /** Resizes to the given size, and sets all coefficients in this expression to the given \a value. @@ -364,10 +364,10 @@ PlainObjectBase::setConstant(Index size, const Scalar& value) */ template EIGEN_STRONG_INLINE Derived& -PlainObjectBase::setConstant(Index rows, Index cols, const Scalar& value) +PlainObjectBase::setConstant(Index nbRows, Index nbCols, const Scalar& val) { - resize(rows, cols); - return setConstant(value); + resize(nbRows, nbCols); + return setConstant(val); } /** @@ -384,10 +384,10 @@ PlainObjectBase::setConstant(Index rows, Index cols, const Scalar& valu * \sa CwiseNullaryOp */ template -EIGEN_STRONG_INLINE Derived& DenseBase::setLinSpaced(Index size, const Scalar& low, const Scalar& high) +EIGEN_STRONG_INLINE Derived& DenseBase::setLinSpaced(Index newSize, const Scalar& low, const Scalar& high) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return derived() = Derived::NullaryExpr(size, internal::linspaced_op(low,high,size)); + return derived() = Derived::NullaryExpr(newSize, internal::linspaced_op(low,high,newSize)); } /** @@ -425,9 +425,9 @@ EIGEN_STRONG_INLINE Derived& DenseBase::setLinSpaced(const Scalar& low, */ template EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType -DenseBase::Zero(Index rows, Index cols) +DenseBase::Zero(Index nbRows, Index nbCols) { - return Constant(rows, cols, Scalar(0)); + return Constant(nbRows, nbCols, Scalar(0)); } /** \returns an expression of a zero vector. @@ -479,7 +479,7 @@ DenseBase::Zero() * \sa class CwiseNullaryOp, Zero() */ template -bool DenseBase::isZero(RealScalar prec) const +bool DenseBase::isZero(const RealScalar& prec) const { for(Index j = 0; j < cols(); ++j) for(Index i = 0; i < rows(); ++i) @@ -512,9 +512,9 @@ EIGEN_STRONG_INLINE Derived& DenseBase::setZero() */ template EIGEN_STRONG_INLINE Derived& -PlainObjectBase::setZero(Index size) +PlainObjectBase::setZero(Index newSize) { - resize(size); + resize(newSize); return setConstant(Scalar(0)); } @@ -530,9 +530,9 @@ PlainObjectBase::setZero(Index size) */ template EIGEN_STRONG_INLINE Derived& -PlainObjectBase::setZero(Index rows, Index cols) +PlainObjectBase::setZero(Index nbRows, Index nbCols) { - resize(rows, cols); + resize(nbRows, nbCols); return setConstant(Scalar(0)); } @@ -554,9 +554,9 @@ PlainObjectBase::setZero(Index rows, Index cols) */ template EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType -DenseBase::Ones(Index rows, Index cols) +DenseBase::Ones(Index nbRows, Index nbCols) { - return Constant(rows, cols, Scalar(1)); + return Constant(nbRows, nbCols, Scalar(1)); } /** \returns an expression of a vector where all coefficients equal one. @@ -577,9 +577,9 @@ DenseBase::Ones(Index rows, Index cols) */ template EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType -DenseBase::Ones(Index size) +DenseBase::Ones(Index newSize) { - return Constant(size, Scalar(1)); + return Constant(newSize, Scalar(1)); } /** \returns an expression of a fixed-size matrix or vector where all coefficients equal one. @@ -609,7 +609,7 @@ DenseBase::Ones() */ template bool DenseBase::isOnes -(RealScalar prec) const +(const RealScalar& prec) const { return isApproxToConstant(Scalar(1), prec); } @@ -638,9 +638,9 @@ EIGEN_STRONG_INLINE Derived& DenseBase::setOnes() */ template EIGEN_STRONG_INLINE Derived& -PlainObjectBase::setOnes(Index size) +PlainObjectBase::setOnes(Index newSize) { - resize(size); + resize(newSize); return setConstant(Scalar(1)); } @@ -656,9 +656,9 @@ PlainObjectBase::setOnes(Index size) */ template EIGEN_STRONG_INLINE Derived& -PlainObjectBase::setOnes(Index rows, Index cols) +PlainObjectBase::setOnes(Index nbRows, Index nbCols) { - resize(rows, cols); + resize(nbRows, nbCols); return setConstant(Scalar(1)); } @@ -680,9 +680,9 @@ PlainObjectBase::setOnes(Index rows, Index cols) */ template EIGEN_STRONG_INLINE const typename MatrixBase::IdentityReturnType -MatrixBase::Identity(Index rows, Index cols) +MatrixBase::Identity(Index nbRows, Index nbCols) { - return DenseBase::NullaryExpr(rows, cols, internal::scalar_identity_op()); + return DenseBase::NullaryExpr(nbRows, nbCols, internal::scalar_identity_op()); } /** \returns an expression of the identity matrix (not necessarily square). @@ -714,7 +714,7 @@ MatrixBase::Identity() */ template bool MatrixBase::isIdentity -(RealScalar prec) const +(const RealScalar& prec) const { for(Index j = 0; j < cols(); ++j) { @@ -785,9 +785,9 @@ EIGEN_STRONG_INLINE Derived& MatrixBase::setIdentity() * \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity() */ template -EIGEN_STRONG_INLINE Derived& MatrixBase::setIdentity(Index rows, Index cols) +EIGEN_STRONG_INLINE Derived& MatrixBase::setIdentity(Index nbRows, Index nbCols) { - derived().resize(rows, cols); + derived().resize(nbRows, nbCols); return setIdentity(); } @@ -798,10 +798,10 @@ EIGEN_STRONG_INLINE Derived& MatrixBase::setIdentity(Index rows, Index * \sa MatrixBase::Unit(Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template -EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::Unit(Index size, Index i) +EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::Unit(Index newSize, Index i) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return BasisReturnType(SquareMatrixType::Identity(size,size), i); + return BasisReturnType(SquareMatrixType::Identity(newSize,newSize), i); } /** \returns an expression of the i-th unit (basis) vector. diff --git a/resources/3rdparty/eigen/Eigen/src/Core/CwiseUnaryOp.h b/resources/3rdparty/eigen/Eigen/src/Core/CwiseUnaryOp.h index 063355ae5..f2de749f9 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/CwiseUnaryOp.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/CwiseUnaryOp.h @@ -98,15 +98,15 @@ class CwiseUnaryOpImpl typedef typename internal::dense_xpr_base >::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Derived) - EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const + EIGEN_STRONG_INLINE const Scalar coeff(Index rowId, Index colId) const { - return derived().functor()(derived().nestedExpression().coeff(row, col)); + return derived().functor()(derived().nestedExpression().coeff(rowId, colId)); } template - EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const + EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const { - return derived().functor().packetOp(derived().nestedExpression().template packet(row, col)); + return derived().functor().packetOp(derived().nestedExpression().template packet(rowId, colId)); } EIGEN_STRONG_INLINE const Scalar coeff(Index index) const diff --git a/resources/3rdparty/eigen/Eigen/src/Core/DenseBase.h b/resources/3rdparty/eigen/Eigen/src/Core/DenseBase.h index 1cc0314ef..8dc593174 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/DenseBase.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/DenseBase.h @@ -204,21 +204,21 @@ template class DenseBase * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does * nothing else. */ - void resize(Index size) + void resize(Index newSize) { - EIGEN_ONLY_USED_FOR_DEBUG(size); - eigen_assert(size == this->size() + EIGEN_ONLY_USED_FOR_DEBUG(newSize); + eigen_assert(newSize == this->size() && "DenseBase::resize() does not actually allow to resize."); } /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does * nothing else. */ - void resize(Index rows, Index cols) + void resize(Index nbRows, Index nbCols) { - EIGEN_ONLY_USED_FOR_DEBUG(rows); - EIGEN_ONLY_USED_FOR_DEBUG(cols); - eigen_assert(rows == this->rows() && cols == this->cols() + EIGEN_ONLY_USED_FOR_DEBUG(nbRows); + EIGEN_ONLY_USED_FOR_DEBUG(nbCols); + eigen_assert(nbRows == this->rows() && nbCols == this->cols() && "DenseBase::resize() does not actually allow to resize."); } @@ -348,17 +348,17 @@ template class DenseBase template bool isApprox(const DenseBase& other, - RealScalar prec = NumTraits::dummy_precision()) const; + const RealScalar& prec = NumTraits::dummy_precision()) const; bool isMuchSmallerThan(const RealScalar& other, - RealScalar prec = NumTraits::dummy_precision()) const; + const RealScalar& prec = NumTraits::dummy_precision()) const; template bool isMuchSmallerThan(const DenseBase& other, - RealScalar prec = NumTraits::dummy_precision()) const; + const RealScalar& prec = NumTraits::dummy_precision()) const; - bool isApproxToConstant(const Scalar& value, RealScalar prec = NumTraits::dummy_precision()) const; - bool isConstant(const Scalar& value, RealScalar prec = NumTraits::dummy_precision()) const; - bool isZero(RealScalar prec = NumTraits::dummy_precision()) const; - bool isOnes(RealScalar prec = NumTraits::dummy_precision()) const; + bool isApproxToConstant(const Scalar& value, const RealScalar& prec = NumTraits::dummy_precision()) const; + bool isConstant(const Scalar& value, const RealScalar& prec = NumTraits::dummy_precision()) const; + bool isZero(const RealScalar& prec = NumTraits::dummy_precision()) const; + bool isOnes(const RealScalar& prec = NumTraits::dummy_precision()) const; inline Derived& operator*=(const Scalar& other); inline Derived& operator/=(const Scalar& other); diff --git a/resources/3rdparty/eigen/Eigen/src/Core/DenseCoeffsBase.h b/resources/3rdparty/eigen/Eigen/src/Core/DenseCoeffsBase.h index 72704c2d7..3c890f215 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/DenseCoeffsBase.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/DenseCoeffsBase.h @@ -427,22 +427,22 @@ class DenseCoeffsBase : public DenseCoeffsBase EIGEN_STRONG_INLINE void writePacket - (Index row, Index col, const typename internal::packet_traits::type& x) + (Index row, Index col, const typename internal::packet_traits::type& val) { eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); - derived().template writePacket(row,col,x); + derived().template writePacket(row,col,val); } /** \internal */ template EIGEN_STRONG_INLINE void writePacketByOuterInner - (Index outer, Index inner, const typename internal::packet_traits::type& x) + (Index outer, Index inner, const typename internal::packet_traits::type& val) { writePacket(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner), - x); + val); } /** \internal @@ -456,10 +456,10 @@ class DenseCoeffsBase : public DenseCoeffsBase EIGEN_STRONG_INLINE void writePacket - (Index index, const typename internal::packet_traits::type& x) + (Index index, const typename internal::packet_traits::type& val) { eigen_internal_assert(index >= 0 && index < size()); - derived().template writePacket(index,x); + derived().template writePacket(index,val); } #ifndef EIGEN_PARSED_BY_DOXYGEN diff --git a/resources/3rdparty/eigen/Eigen/src/Core/DenseStorage.h b/resources/3rdparty/eigen/Eigen/src/Core/DenseStorage.h index 1fc2daf2c..9d34ec934 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/DenseStorage.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/DenseStorage.h @@ -35,8 +35,16 @@ template struct plain_array { EIGEN_USER_ALIGN16 T array[Size]; - plain_array() { EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(0xf) } - plain_array(constructor_without_unaligned_array_assert) {} + + plain_array() + { + EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(0xf); + EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG); + } + + plain_array(constructor_without_unaligned_array_assert) + { + EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG); + } }; template @@ -135,13 +152,13 @@ template class DenseStorage class DenseStorage class DenseStorage class DenseStorage(size)), m_rows(rows), m_cols(cols) + inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols) + : m_data(internal::conditional_aligned_new_auto(size)), m_rows(nbRows), m_cols(nbCols) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } inline ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, m_rows*m_cols); } inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); } inline DenseIndex rows(void) const {return m_rows;} inline DenseIndex cols(void) const {return m_cols;} - inline void conservativeResize(DenseIndex size, DenseIndex rows, DenseIndex cols) + inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols) { m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, m_rows*m_cols); - m_rows = rows; - m_cols = cols; + m_rows = nbRows; + m_cols = nbCols; } - void resize(DenseIndex size, DenseIndex rows, DenseIndex cols) + void resize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols) { if(size != m_rows*m_cols) { @@ -219,8 +236,8 @@ template class DenseStorage class DenseStorage(size)), m_cols(cols) + inline DenseStorage(DenseIndex size, DenseIndex, DenseIndex nbCols) : m_data(internal::conditional_aligned_new_auto(size)), m_cols(nbCols) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } inline ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, _Rows*m_cols); } inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } static inline DenseIndex rows(void) {return _Rows;} inline DenseIndex cols(void) const {return m_cols;} - inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex cols) + inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex nbCols) { m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, _Rows*m_cols); - m_cols = cols; + m_cols = nbCols; } - EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex, DenseIndex cols) + EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex, DenseIndex nbCols) { if(size != _Rows*m_cols) { @@ -256,7 +273,7 @@ template class DenseStorage class DenseStorage(size)), m_rows(rows) + inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex) : m_data(internal::conditional_aligned_new_auto(size)), m_rows(nbRows) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } inline ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, _Cols*m_rows); } inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } inline DenseIndex rows(void) const {return m_rows;} static inline DenseIndex cols(void) {return _Cols;} - inline void conservativeResize(DenseIndex size, DenseIndex rows, DenseIndex) + inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex) { m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, m_rows*_Cols); - m_rows = rows; + m_rows = nbRows; } - EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex rows, DenseIndex) + EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex nbRows, DenseIndex) { if(size != m_rows*_Cols) { @@ -292,7 +309,7 @@ template class DenseStorage > typedef typename remove_reference::type _MatrixTypeNested; typedef typename MatrixType::StorageKind StorageKind; enum { - RowsAtCompileTime = (int(DiagIndex) == Dynamic || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic - : (EIGEN_PLAIN_ENUM_MIN(MatrixType::RowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0), - MatrixType::ColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))), + RowsAtCompileTime = (int(DiagIndex) == DynamicIndex || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic + : (EIGEN_PLAIN_ENUM_MIN(MatrixType::RowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0), + MatrixType::ColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))), ColsAtCompileTime = 1, MaxRowsAtCompileTime = int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic - : DiagIndex == Dynamic ? EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::MaxRowsAtCompileTime, + : DiagIndex == DynamicIndex ? EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime) : (EIGEN_PLAIN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0), MatrixType::MaxColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))), @@ -61,15 +61,16 @@ struct traits > }; } -template class Diagonal - : public internal::dense_xpr_base< Diagonal >::type +template class Diagonal + : public internal::dense_xpr_base< Diagonal >::type { public: + enum { DiagIndex = _DiagIndex }; typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal) - inline Diagonal(MatrixType& matrix, Index index = DiagIndex) : m_matrix(matrix), m_index(index) {} + inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) : m_matrix(matrix), m_index(a_index) {} EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal) @@ -113,20 +114,20 @@ template class Diagonal return m_matrix.coeff(row+rowOffset(), row+colOffset()); } - inline Scalar& coeffRef(Index index) + inline Scalar& coeffRef(Index idx) { EIGEN_STATIC_ASSERT_LVALUE(MatrixType) - return m_matrix.const_cast_derived().coeffRef(index+rowOffset(), index+colOffset()); + return m_matrix.const_cast_derived().coeffRef(idx+rowOffset(), idx+colOffset()); } - inline const Scalar& coeffRef(Index index) const + inline const Scalar& coeffRef(Index idx) const { - return m_matrix.const_cast_derived().coeffRef(index+rowOffset(), index+colOffset()); + return m_matrix.const_cast_derived().coeffRef(idx+rowOffset(), idx+colOffset()); } - inline CoeffReturnType coeff(Index index) const + inline CoeffReturnType coeff(Index idx) const { - return m_matrix.coeff(index+rowOffset(), index+colOffset()); + return m_matrix.coeff(idx+rowOffset(), idx+colOffset()); } const typename internal::remove_all::type& @@ -142,7 +143,7 @@ template class Diagonal protected: typename MatrixType::Nested m_matrix; - const internal::variable_if_dynamic m_index; + const internal::variable_if_dynamicindex m_index; private: // some compilers may fail to optimize std::max etc in case of compile-time constants... @@ -189,18 +190,18 @@ MatrixBase::diagonal() const * * \sa MatrixBase::diagonal(), class Diagonal */ template -inline typename MatrixBase::template DiagonalIndexReturnType::Type +inline typename MatrixBase::template DiagonalIndexReturnType::Type MatrixBase::diagonal(Index index) { - return typename DiagonalIndexReturnType::Type(derived(), index); + return typename DiagonalIndexReturnType::Type(derived(), index); } /** This is the const version of diagonal(Index). */ template -inline typename MatrixBase::template ConstDiagonalIndexReturnType::Type +inline typename MatrixBase::template ConstDiagonalIndexReturnType::Type MatrixBase::diagonal(Index index) const { - return typename ConstDiagonalIndexReturnType::Type(derived(), index); + return typename ConstDiagonalIndexReturnType::Type(derived(), index); } /** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this diff --git a/resources/3rdparty/eigen/Eigen/src/Core/DiagonalMatrix.h b/resources/3rdparty/eigen/Eigen/src/Core/DiagonalMatrix.h index 88190da68..da0264b0e 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/DiagonalMatrix.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/DiagonalMatrix.h @@ -20,6 +20,7 @@ class DiagonalBase : public EigenBase public: typedef typename internal::traits::DiagonalVectorType DiagonalVectorType; typedef typename DiagonalVectorType::Scalar Scalar; + typedef typename DiagonalVectorType::RealScalar RealScalar; typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::traits::Index Index; @@ -65,6 +66,17 @@ class DiagonalBase : public EigenBase return diagonal().cwiseInverse(); } + inline const DiagonalWrapper, const DiagonalVectorType> > + operator*(const Scalar& scalar) const + { + return diagonal() * scalar; + } + friend inline const DiagonalWrapper, const DiagonalVectorType> > + operator*(const Scalar& scalar, const DiagonalBase& other) + { + return other.diagonal() * scalar; + } + #ifdef EIGEN2_SUPPORT template bool isApprox(const DiagonalBase& other, typename NumTraits::Real precision = NumTraits::dummy_precision()) const @@ -238,7 +250,7 @@ class DiagonalWrapper #endif /** Constructor from expression of diagonal coefficients to wrap. */ - inline DiagonalWrapper(DiagonalVectorType& diagonal) : m_diagonal(diagonal) {} + inline DiagonalWrapper(DiagonalVectorType& a_diagonal) : m_diagonal(a_diagonal) {} /** \returns a const reference to the wrapped expression of diagonal coefficients. */ const DiagonalVectorType& diagonal() const { return m_diagonal; } @@ -272,7 +284,7 @@ MatrixBase::asDiagonal() const * \sa asDiagonal() */ template -bool MatrixBase::isDiagonal(RealScalar prec) const +bool MatrixBase::isDiagonal(const RealScalar& prec) const { if(cols() != rows()) return false; RealScalar maxAbsOnDiagonal = static_cast(-1); diff --git a/resources/3rdparty/eigen/Eigen/src/Core/DiagonalProduct.h b/resources/3rdparty/eigen/Eigen/src/Core/DiagonalProduct.h index 598c6b3e1..8c7b2d978 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/DiagonalProduct.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/DiagonalProduct.h @@ -103,9 +103,9 @@ class DiagonalProduct : internal::no_assignment_operator, template template inline const DiagonalProduct -MatrixBase::operator*(const DiagonalBase &diagonal) const +MatrixBase::operator*(const DiagonalBase &a_diagonal) const { - return DiagonalProduct(derived(), diagonal.derived()); + return DiagonalProduct(derived(), a_diagonal.derived()); } /** \returns the diagonal matrix product of \c *this by the matrix \a matrix. diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Dot.h b/resources/3rdparty/eigen/Eigen/src/Core/Dot.h index ae9274e36..a7a18c939 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/Dot.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/Dot.h @@ -223,7 +223,7 @@ MatrixBase::lpNorm() const template template bool MatrixBase::isOrthogonal -(const MatrixBase& other, RealScalar prec) const +(const MatrixBase& other, const RealScalar& prec) const { typename internal::nested::type nested(derived()); typename internal::nested::type otherNested(other.derived()); @@ -242,7 +242,7 @@ bool MatrixBase::isOrthogonal * Output: \verbinclude MatrixBase_isUnitary.out */ template -bool MatrixBase::isUnitary(RealScalar prec) const +bool MatrixBase::isUnitary(const RealScalar& prec) const { typename Derived::Nested nested(derived()); for(Index i = 0; i < cols(); ++i) diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Functors.h b/resources/3rdparty/eigen/Eigen/src/Core/Functors.h index 278c46c6b..09388972a 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/Functors.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/Functors.h @@ -204,21 +204,28 @@ struct functor_traits > { * * \sa class CwiseBinaryOp, Cwise::operator/() */ -template struct scalar_quotient_op { +template struct scalar_quotient_op { + enum { + // TODO vectorize mixed product + Vectorizable = is_same::value && packet_traits::HasDiv && packet_traits::HasDiv + }; + typedef typename scalar_product_traits::ReturnType result_type; EIGEN_EMPTY_STRUCT_CTOR(scalar_quotient_op) - EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a / b; } + EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a / b; } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const { return internal::pdiv(a,b); } }; -template -struct functor_traits > { +template +struct functor_traits > { enum { - Cost = 2 * NumTraits::MulCost, - PacketAccess = packet_traits::HasDiv + Cost = (NumTraits::MulCost + NumTraits::MulCost), // rough estimate! + PacketAccess = scalar_quotient_op::Vectorizable }; }; + + /** \internal * \brief Template functor to compute the and of two booleans * @@ -447,7 +454,7 @@ struct functor_traits > * indeed it seems better to declare m_other as a Packet and do the pset1() once * in the constructor. However, in practice: * - GCC does not like m_other as a Packet and generate a load every time it needs it - * - on the other hand GCC is able to moves the pset1() away the loop :) + * - on the other hand GCC is able to moves the pset1() outside the loop :) * - simpler code ;) * (ICC and gcc 4.4 seems to perform well in both cases, the issue is visible with y = a*x + b*y) */ @@ -478,33 +485,6 @@ template struct functor_traits > { enum { Cost = NumTraits::MulCost, PacketAccess = false }; }; -template -struct scalar_quotient1_impl { - typedef typename packet_traits::type Packet; - // FIXME default copy constructors seems bugged with std::complex<> - EIGEN_STRONG_INLINE scalar_quotient1_impl(const scalar_quotient1_impl& other) : m_other(other.m_other) { } - EIGEN_STRONG_INLINE scalar_quotient1_impl(const Scalar& other) : m_other(static_cast(1) / other) {} - EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a * m_other; } - EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const - { return internal::pmul(a, pset1(m_other)); } - const Scalar m_other; -}; -template -struct functor_traits > -{ enum { Cost = NumTraits::MulCost, PacketAccess = packet_traits::HasMul }; }; - -template -struct scalar_quotient1_impl { - // FIXME default copy constructors seems bugged with std::complex<> - EIGEN_STRONG_INLINE scalar_quotient1_impl(const scalar_quotient1_impl& other) : m_other(other.m_other) { } - EIGEN_STRONG_INLINE scalar_quotient1_impl(const Scalar& other) : m_other(other) {} - EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a / m_other; } - typename add_const_on_value_type::Nested>::type m_other; -}; -template -struct functor_traits > -{ enum { Cost = 2 * NumTraits::MulCost, PacketAccess = false }; }; - /** \internal * \brief Template functor to divide a scalar by a fixed other one * @@ -514,14 +494,19 @@ struct functor_traits > * \sa class CwiseUnaryOp, MatrixBase::operator/ */ template -struct scalar_quotient1_op : scalar_quotient1_impl::IsInteger > { - EIGEN_STRONG_INLINE scalar_quotient1_op(const Scalar& other) - : scalar_quotient1_impl::IsInteger >(other) {} +struct scalar_quotient1_op { + typedef typename packet_traits::type Packet; + // FIXME default copy constructors seems bugged with std::complex<> + EIGEN_STRONG_INLINE scalar_quotient1_op(const scalar_quotient1_op& other) : m_other(other.m_other) { } + EIGEN_STRONG_INLINE scalar_quotient1_op(const Scalar& other) : m_other(other) {} + EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a / m_other; } + EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const + { return internal::pdiv(a, pset1(m_other)); } + typename add_const_on_value_type::Nested>::type m_other; }; template struct functor_traits > -: functor_traits::IsInteger> > -{}; +{ enum { Cost = 2 * NumTraits::MulCost, PacketAccess = packet_traits::HasDiv }; }; // nullary functors @@ -660,6 +645,7 @@ template struct functor_has_linear_access struct functor_allows_mixing_real_and_complex { enum { ret = 0 }; }; template struct functor_allows_mixing_real_and_complex > { enum { ret = 1 }; }; template struct functor_allows_mixing_real_and_complex > { enum { ret = 1 }; }; +template struct functor_allows_mixing_real_and_complex > { enum { ret = 1 }; }; /** \internal diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Fuzzy.h b/resources/3rdparty/eigen/Eigen/src/Core/Fuzzy.h index d74edcfdb..8fb9a01dd 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/Fuzzy.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/Fuzzy.h @@ -19,7 +19,7 @@ namespace internal template::IsInteger> struct isApprox_selector { - static bool run(const Derived& x, const OtherDerived& y, typename Derived::RealScalar prec) + static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec) { using std::min; typename internal::nested::type nested(x); @@ -31,7 +31,7 @@ struct isApprox_selector template struct isApprox_selector { - static bool run(const Derived& x, const OtherDerived& y, typename Derived::RealScalar) + static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar&) { return x.matrix() == y.matrix(); } @@ -40,7 +40,7 @@ struct isApprox_selector template::IsInteger> struct isMuchSmallerThan_object_selector { - static bool run(const Derived& x, const OtherDerived& y, typename Derived::RealScalar prec) + static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec) { return x.cwiseAbs2().sum() <= abs2(prec) * y.cwiseAbs2().sum(); } @@ -49,7 +49,7 @@ struct isMuchSmallerThan_object_selector template struct isMuchSmallerThan_object_selector { - static bool run(const Derived& x, const OtherDerived&, typename Derived::RealScalar) + static bool run(const Derived& x, const OtherDerived&, const typename Derived::RealScalar&) { return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix(); } @@ -58,7 +58,7 @@ struct isMuchSmallerThan_object_selector template::IsInteger> struct isMuchSmallerThan_scalar_selector { - static bool run(const Derived& x, const typename Derived::RealScalar& y, typename Derived::RealScalar prec) + static bool run(const Derived& x, const typename Derived::RealScalar& y, const typename Derived::RealScalar& prec) { return x.cwiseAbs2().sum() <= abs2(prec * y); } @@ -67,7 +67,7 @@ struct isMuchSmallerThan_scalar_selector template struct isMuchSmallerThan_scalar_selector { - static bool run(const Derived& x, const typename Derived::RealScalar&, typename Derived::RealScalar) + static bool run(const Derived& x, const typename Derived::RealScalar&, const typename Derived::RealScalar&) { return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix(); } @@ -97,7 +97,7 @@ template template bool DenseBase::isApprox( const DenseBase& other, - RealScalar prec + const RealScalar& prec ) const { return internal::isApprox_selector::run(derived(), other.derived(), prec); @@ -119,7 +119,7 @@ bool DenseBase::isApprox( template bool DenseBase::isMuchSmallerThan( const typename NumTraits::Real& other, - RealScalar prec + const RealScalar& prec ) const { return internal::isMuchSmallerThan_scalar_selector::run(derived(), other, prec); @@ -139,7 +139,7 @@ template template bool DenseBase::isMuchSmallerThan( const DenseBase& other, - RealScalar prec + const RealScalar& prec ) const { return internal::isMuchSmallerThan_object_selector::run(derived(), other.derived(), prec); diff --git a/resources/3rdparty/eigen/Eigen/src/Core/GeneralProduct.h b/resources/3rdparty/eigen/Eigen/src/Core/GeneralProduct.h index bfc2a67b1..9abc7b286 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/GeneralProduct.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/GeneralProduct.h @@ -311,7 +311,7 @@ class GeneralProduct typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; - GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) + GeneralProduct(const Lhs& a_lhs, const Rhs& a_rhs) : Base(a_lhs,a_rhs) { // EIGEN_STATIC_ASSERT((internal::is_same::value), // YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Map.h b/resources/3rdparty/eigen/Eigen/src/Core/Map.h index 15a19226e..2b0a44697 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/Map.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/Map.h @@ -148,8 +148,8 @@ template class Ma * \param size the size of the vector expression * \param stride optional Stride object, passing the strides. */ - inline Map(PointerArgType data, Index size, const StrideType& stride = StrideType()) - : Base(cast_to_pointer_type(data), size), m_stride(stride) + inline Map(PointerArgType dataPtr, Index a_size, const StrideType& a_stride = StrideType()) + : Base(cast_to_pointer_type(dataPtr), a_size), m_stride(a_stride) { PlainObjectType::Base::_check_template_params(); } @@ -161,8 +161,8 @@ template class Ma * \param cols the number of columns of the matrix expression * \param stride optional Stride object, passing the strides. */ - inline Map(PointerArgType data, Index rows, Index cols, const StrideType& stride = StrideType()) - : Base(cast_to_pointer_type(data), rows, cols), m_stride(stride) + inline Map(PointerArgType dataPtr, Index nbRows, Index nbCols, const StrideType& a_stride = StrideType()) + : Base(cast_to_pointer_type(dataPtr), nbRows, nbCols), m_stride(a_stride) { PlainObjectType::Base::_check_template_params(); } diff --git a/resources/3rdparty/eigen/Eigen/src/Core/MapBase.h b/resources/3rdparty/eigen/Eigen/src/Core/MapBase.h index a388d61ea..6876de588 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/MapBase.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/MapBase.h @@ -87,9 +87,9 @@ template class MapBase */ inline const Scalar* data() const { return m_data; } - inline const Scalar& coeff(Index row, Index col) const + inline const Scalar& coeff(Index rowId, Index colId) const { - return m_data[col * colStride() + row * rowStride()]; + return m_data[colId * colStride() + rowId * rowStride()]; } inline const Scalar& coeff(Index index) const @@ -98,9 +98,9 @@ template class MapBase return m_data[index * innerStride()]; } - inline const Scalar& coeffRef(Index row, Index col) const + inline const Scalar& coeffRef(Index rowId, Index colId) const { - return this->m_data[col * colStride() + row * rowStride()]; + return this->m_data[colId * colStride() + rowId * rowStride()]; } inline const Scalar& coeffRef(Index index) const @@ -110,10 +110,10 @@ template class MapBase } template - inline PacketScalar packet(Index row, Index col) const + inline PacketScalar packet(Index rowId, Index colId) const { return internal::ploadt - (m_data + (col * colStride() + row * rowStride())); + (m_data + (colId * colStride() + rowId * rowStride())); } template @@ -123,29 +123,29 @@ template class MapBase return internal::ploadt(m_data + index * innerStride()); } - inline MapBase(PointerType data) : m_data(data), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime) + inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime) { EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) checkSanity(); } - inline MapBase(PointerType data, Index size) - : m_data(data), - m_rows(RowsAtCompileTime == Dynamic ? size : Index(RowsAtCompileTime)), - m_cols(ColsAtCompileTime == Dynamic ? size : Index(ColsAtCompileTime)) + inline MapBase(PointerType dataPtr, Index vecSize) + : m_data(dataPtr), + m_rows(RowsAtCompileTime == Dynamic ? vecSize : Index(RowsAtCompileTime)), + m_cols(ColsAtCompileTime == Dynamic ? vecSize : Index(ColsAtCompileTime)) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - eigen_assert(size >= 0); - eigen_assert(data == 0 || SizeAtCompileTime == Dynamic || SizeAtCompileTime == size); + eigen_assert(vecSize >= 0); + eigen_assert(dataPtr == 0 || SizeAtCompileTime == Dynamic || SizeAtCompileTime == vecSize); checkSanity(); } - inline MapBase(PointerType data, Index rows, Index cols) - : m_data(data), m_rows(rows), m_cols(cols) + inline MapBase(PointerType dataPtr, Index nbRows, Index nbCols) + : m_data(dataPtr), m_rows(nbRows), m_cols(nbCols) { - eigen_assert( (data == 0) - || ( rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) - && cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols))); + eigen_assert( (dataPtr == 0) + || ( nbRows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == nbRows) + && nbCols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == nbCols))); checkSanity(); } @@ -210,23 +210,23 @@ template class MapBase } template - inline void writePacket(Index row, Index col, const PacketScalar& x) + inline void writePacket(Index row, Index col, const PacketScalar& val) { internal::pstoret - (this->m_data + (col * colStride() + row * rowStride()), x); + (this->m_data + (col * colStride() + row * rowStride()), val); } template - inline void writePacket(Index index, const PacketScalar& x) + inline void writePacket(Index index, const PacketScalar& val) { EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) internal::pstoret - (this->m_data + index * innerStride(), x); + (this->m_data + index * innerStride(), val); } - explicit inline MapBase(PointerType data) : Base(data) {} - inline MapBase(PointerType data, Index size) : Base(data, size) {} - inline MapBase(PointerType data, Index rows, Index cols) : Base(data, rows, cols) {} + explicit inline MapBase(PointerType dataPtr) : Base(dataPtr) {} + inline MapBase(PointerType dataPtr, Index vecSize) : Base(dataPtr, vecSize) {} + inline MapBase(PointerType dataPtr, Index nbRows, Index nbCols) : Base(dataPtr, nbRows, nbCols) {} Derived& operator=(const MapBase& other) { diff --git a/resources/3rdparty/eigen/Eigen/src/Core/MathFunctions.h b/resources/3rdparty/eigen/Eigen/src/Core/MathFunctions.h index 05e913f2f..5b57c2ff2 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/MathFunctions.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/MathFunctions.h @@ -519,6 +519,53 @@ inline EIGEN_MATHFUNC_RETVAL(atan2, Scalar) atan2(const Scalar& x, const Scalar& return EIGEN_MATHFUNC_IMPL(atan2, Scalar)::run(x, y); } +/**************************************************************************** +* Implementation of atanh2 * +****************************************************************************/ + +template +struct atanh2_default_impl +{ + typedef Scalar retval; + typedef typename NumTraits::Real RealScalar; + static inline Scalar run(const Scalar& x, const Scalar& y) + { + using std::abs; + using std::log; + using std::sqrt; + Scalar z = x / y; + if (abs(z) > sqrt(NumTraits::epsilon())) + return RealScalar(0.5) * log((y + x) / (y - x)); + else + return z + z*z*z / RealScalar(3); + } +}; + +template +struct atanh2_default_impl +{ + static inline Scalar run(const Scalar&, const Scalar&) + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) + return Scalar(0); + } +}; + +template +struct atanh2_impl : atanh2_default_impl::IsInteger> {}; + +template +struct atanh2_retval +{ + typedef Scalar type; +}; + +template +inline EIGEN_MATHFUNC_RETVAL(atanh2, Scalar) atanh2(const Scalar& x, const Scalar& y) +{ + return EIGEN_MATHFUNC_IMPL(atanh2, Scalar)::run(x, y); +} + /**************************************************************************** * Implementation of pow * ****************************************************************************/ diff --git a/resources/3rdparty/eigen/Eigen/src/Core/MatrixBase.h b/resources/3rdparty/eigen/Eigen/src/Core/MatrixBase.h index c1e0ed132..521bba18a 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/MatrixBase.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/MatrixBase.h @@ -162,6 +162,9 @@ template class MatrixBase #ifndef EIGEN_PARSED_BY_DOXYGEN template Derived& lazyAssign(const ProductBase& other); + + template + Derived& lazyAssign(const MatrixPowerProductBase& other); #endif // not EIGEN_PARSED_BY_DOXYGEN template @@ -224,11 +227,11 @@ template class MatrixBase // Note: The "MatrixBase::" prefixes are added to help MSVC9 to match these declarations with the later implementations. // On the other hand they confuse MSVC8... #if (defined _MSC_VER) && (_MSC_VER >= 1500) // 2008 or later - typename MatrixBase::template DiagonalIndexReturnType::Type diagonal(Index index); - typename MatrixBase::template ConstDiagonalIndexReturnType::Type diagonal(Index index) const; + typename MatrixBase::template DiagonalIndexReturnType::Type diagonal(Index index); + typename MatrixBase::template ConstDiagonalIndexReturnType::Type diagonal(Index index) const; #else - typename DiagonalIndexReturnType::Type diagonal(Index index); - typename ConstDiagonalIndexReturnType::Type diagonal(Index index) const; + typename DiagonalIndexReturnType::Type diagonal(Index index); + typename ConstDiagonalIndexReturnType::Type diagonal(Index index) const; #endif #ifdef EIGEN2_SUPPORT @@ -237,7 +240,7 @@ template class MatrixBase // huuuge hack. make Eigen2's matrix.part() work in eigen3. Problem: Diagonal is now a class template instead // of an integer constant. Solution: overload the part() method template wrt template parameters list. - template class U> + template class U> const DiagonalWrapper part() const { return diagonal().asDiagonal(); } #endif // EIGEN2_SUPPORT @@ -255,7 +258,7 @@ template class MatrixBase template typename ConstSelfAdjointViewReturnType::Type selfadjointView() const; const SparseView sparseView(const Scalar& m_reference = Scalar(0), - typename NumTraits::Real m_epsilon = NumTraits::dummy_precision()) const; + const typename NumTraits::Real& m_epsilon = NumTraits::dummy_precision()) const; static const IdentityReturnType Identity(); static const IdentityReturnType Identity(Index rows, Index cols); static const BasisReturnType Unit(Index size, Index i); @@ -271,16 +274,16 @@ template class MatrixBase Derived& setIdentity(); Derived& setIdentity(Index rows, Index cols); - bool isIdentity(RealScalar prec = NumTraits::dummy_precision()) const; - bool isDiagonal(RealScalar prec = NumTraits::dummy_precision()) const; + bool isIdentity(const RealScalar& prec = NumTraits::dummy_precision()) const; + bool isDiagonal(const RealScalar& prec = NumTraits::dummy_precision()) const; - bool isUpperTriangular(RealScalar prec = NumTraits::dummy_precision()) const; - bool isLowerTriangular(RealScalar prec = NumTraits::dummy_precision()) const; + bool isUpperTriangular(const RealScalar& prec = NumTraits::dummy_precision()) const; + bool isLowerTriangular(const RealScalar& prec = NumTraits::dummy_precision()) const; template bool isOrthogonal(const MatrixBase& other, - RealScalar prec = NumTraits::dummy_precision()) const; - bool isUnitary(RealScalar prec = NumTraits::dummy_precision()) const; + const RealScalar& prec = NumTraits::dummy_precision()) const; + bool isUnitary(const RealScalar& prec = NumTraits::dummy_precision()) const; /** \returns true if each coefficients of \c *this and \a other are all exactly equal. * \warning When using floating point scalar values you probably should rather use a @@ -454,6 +457,7 @@ template class MatrixBase const MatrixFunctionReturnValue sin() const; const MatrixSquareRootReturnValue sqrt() const; const MatrixLogarithmReturnValue log() const; + const MatrixPowerReturnValue pow(RealScalar p) const; #ifdef EIGEN2_SUPPORT template diff --git a/resources/3rdparty/eigen/Eigen/src/Core/NoAlias.h b/resources/3rdparty/eigen/Eigen/src/Core/NoAlias.h index ecb3fa285..0112c865b 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/NoAlias.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/NoAlias.h @@ -82,6 +82,11 @@ class NoAlias { return m_expression.derived() -= CoeffBasedProduct(other.lhs(), other.rhs()); } #endif + ExpressionType& expression() const + { + return m_expression; + } + protected: ExpressionType& m_expression; }; diff --git a/resources/3rdparty/eigen/Eigen/src/Core/PermutationMatrix.h b/resources/3rdparty/eigen/Eigen/src/Core/PermutationMatrix.h index bc29f8142..86b63ea14 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/PermutationMatrix.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/PermutationMatrix.h @@ -139,9 +139,9 @@ class PermutationBase : public EigenBase /** Resizes to given size. */ - inline void resize(Index size) + inline void resize(Index newSize) { - indices().resize(size); + indices().resize(newSize); } /** Sets *this to be the identity permutation matrix */ @@ -153,9 +153,9 @@ class PermutationBase : public EigenBase /** Sets *this to be the identity permutation matrix of given size. */ - void setIdentity(Index size) + void setIdentity(Index newSize) { - resize(size); + resize(newSize); setIdentity(); } @@ -317,7 +317,7 @@ class PermutationMatrix : public PermutationBase - explicit inline PermutationMatrix(const MatrixBase& indices) : m_indices(indices) + explicit inline PermutationMatrix(const MatrixBase& a_indices) : m_indices(a_indices) {} /** Convert the Transpositions \a tr to a permutation matrix */ @@ -406,12 +406,12 @@ class Map, typedef typename IndicesType::Scalar Index; #endif - inline Map(const Index* indices) - : m_indices(indices) + inline Map(const Index* indicesPtr) + : m_indices(indicesPtr) {} - inline Map(const Index* indices, Index size) - : m_indices(indices,size) + inline Map(const Index* indicesPtr, Index size) + : m_indices(indicesPtr,size) {} /** Copies the other permutation into *this */ @@ -490,8 +490,8 @@ class PermutationWrapper : public PermutationBase -EIGEN_ALWAYS_INLINE void check_rows_cols_for_overflow(Index rows, Index cols) -{ - // http://hg.mozilla.org/mozilla-central/file/6c8a909977d3/xpcom/ds/CheckedInt.h#l242 - // we assume Index is signed - Index max_index = (size_t(1) << (8 * sizeof(Index) - 1)) - 1; // assume Index is signed - bool error = (rows < 0 || cols < 0) ? true - : (rows == 0 || cols == 0) ? false - : (rows > max_index / cols); - if (error) - throw_std_bad_alloc(); -} +template struct check_rows_cols_for_overflow { + template + static EIGEN_ALWAYS_INLINE void run(Index, Index) + { + } +}; + +template<> struct check_rows_cols_for_overflow { + template + static EIGEN_ALWAYS_INLINE void run(Index rows, Index cols) + { + // http://hg.mozilla.org/mozilla-central/file/6c8a909977d3/xpcom/ds/CheckedInt.h#l242 + // we assume Index is signed + Index max_index = (size_t(1) << (8 * sizeof(Index) - 1)) - 1; // assume Index is signed + bool error = (rows == 0 || cols == 0) ? false + : (rows > max_index / cols); + if (error) + throw_std_bad_alloc(); + } +}; template struct conservative_resize_like_impl; @@ -119,12 +127,12 @@ class PlainObjectBase : public internal::dense_xpr_base::type EIGEN_STRONG_INLINE Index rows() const { return m_storage.rows(); } EIGEN_STRONG_INLINE Index cols() const { return m_storage.cols(); } - EIGEN_STRONG_INLINE const Scalar& coeff(Index row, Index col) const + EIGEN_STRONG_INLINE const Scalar& coeff(Index rowId, Index colId) const { if(Flags & RowMajorBit) - return m_storage.data()[col + row * m_storage.cols()]; + return m_storage.data()[colId + rowId * m_storage.cols()]; else // column-major - return m_storage.data()[row + col * m_storage.rows()]; + return m_storage.data()[rowId + colId * m_storage.rows()]; } EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const @@ -132,12 +140,12 @@ class PlainObjectBase : public internal::dense_xpr_base::type return m_storage.data()[index]; } - EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) + EIGEN_STRONG_INLINE Scalar& coeffRef(Index rowId, Index colId) { if(Flags & RowMajorBit) - return m_storage.data()[col + row * m_storage.cols()]; + return m_storage.data()[colId + rowId * m_storage.cols()]; else // column-major - return m_storage.data()[row + col * m_storage.rows()]; + return m_storage.data()[rowId + colId * m_storage.rows()]; } EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) @@ -145,12 +153,12 @@ class PlainObjectBase : public internal::dense_xpr_base::type return m_storage.data()[index]; } - EIGEN_STRONG_INLINE const Scalar& coeffRef(Index row, Index col) const + EIGEN_STRONG_INLINE const Scalar& coeffRef(Index rowId, Index colId) const { if(Flags & RowMajorBit) - return m_storage.data()[col + row * m_storage.cols()]; + return m_storage.data()[colId + rowId * m_storage.cols()]; else // column-major - return m_storage.data()[row + col * m_storage.rows()]; + return m_storage.data()[rowId + colId * m_storage.rows()]; } EIGEN_STRONG_INLINE const Scalar& coeffRef(Index index) const @@ -160,12 +168,12 @@ class PlainObjectBase : public internal::dense_xpr_base::type /** \internal */ template - EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const + EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const { return internal::ploadt (m_storage.data() + (Flags & RowMajorBit - ? col + row * m_storage.cols() - : row + col * m_storage.rows())); + ? colId + rowId * m_storage.cols() + : rowId + colId * m_storage.rows())); } /** \internal */ @@ -177,19 +185,19 @@ class PlainObjectBase : public internal::dense_xpr_base::type /** \internal */ template - EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketScalar& x) + EIGEN_STRONG_INLINE void writePacket(Index rowId, Index colId, const PacketScalar& val) { internal::pstoret (m_storage.data() + (Flags & RowMajorBit - ? col + row * m_storage.cols() - : row + col * m_storage.rows()), x); + ? colId + rowId * m_storage.cols() + : rowId + colId * m_storage.rows()), val); } /** \internal */ template - EIGEN_STRONG_INLINE void writePacket(Index index, const PacketScalar& x) + EIGEN_STRONG_INLINE void writePacket(Index index, const PacketScalar& val) { - internal::pstoret(m_storage.data() + index, x); + internal::pstoret(m_storage.data() + index, val); } /** \returns a const pointer to the data array of this matrix */ @@ -216,17 +224,22 @@ class PlainObjectBase : public internal::dense_xpr_base::type * * \sa resize(Index) for vectors, resize(NoChange_t, Index), resize(Index, NoChange_t) */ - EIGEN_STRONG_INLINE void resize(Index rows, Index cols) - { + EIGEN_STRONG_INLINE void resize(Index nbRows, Index nbCols) + { + eigen_assert( EIGEN_IMPLIES(RowsAtCompileTime!=Dynamic,nbRows==RowsAtCompileTime) + && EIGEN_IMPLIES(ColsAtCompileTime!=Dynamic,nbCols==ColsAtCompileTime) + && EIGEN_IMPLIES(RowsAtCompileTime==Dynamic && MaxRowsAtCompileTime!=Dynamic,nbRows<=MaxRowsAtCompileTime) + && EIGEN_IMPLIES(ColsAtCompileTime==Dynamic && MaxColsAtCompileTime!=Dynamic,nbCols<=MaxColsAtCompileTime) + && nbRows>=0 && nbCols>=0 && "Invalid sizes when resizing a matrix or array."); + internal::check_rows_cols_for_overflow::run(nbRows, nbCols); #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO - internal::check_rows_cols_for_overflow(rows, cols); - Index size = rows*cols; + Index size = nbRows*nbCols; bool size_changed = size != this->size(); - m_storage.resize(size, rows, cols); + m_storage.resize(size, nbRows, nbCols); if(size_changed) EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED #else - internal::check_rows_cols_for_overflow(rows, cols); - m_storage.resize(rows*cols, rows, cols); + internal::check_rows_cols_for_overflow::run(nbRows, nbCols); + m_storage.resize(nbRows*nbCols, nbRows, nbCols); #endif } @@ -244,7 +257,7 @@ class PlainObjectBase : public internal::dense_xpr_base::type inline void resize(Index size) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(PlainObjectBase) - eigen_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == size); + eigen_assert(((SizeAtCompileTime == Dynamic && (MaxSizeAtCompileTime==Dynamic || size<=MaxSizeAtCompileTime)) || SizeAtCompileTime == size) && size>=0); #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO bool size_changed = size != this->size(); #endif @@ -265,9 +278,9 @@ class PlainObjectBase : public internal::dense_xpr_base::type * * \sa resize(Index,Index) */ - inline void resize(NoChange_t, Index cols) + inline void resize(NoChange_t, Index nbCols) { - resize(rows(), cols); + resize(rows(), nbCols); } /** Resizes the matrix, changing only the number of rows. For the parameter of type NoChange_t, just pass the special value \c NoChange @@ -278,9 +291,9 @@ class PlainObjectBase : public internal::dense_xpr_base::type * * \sa resize(Index,Index) */ - inline void resize(Index rows, NoChange_t) + inline void resize(Index nbRows, NoChange_t) { - resize(rows, cols()); + resize(nbRows, cols()); } /** Resizes \c *this to have the same dimensions as \a other. @@ -294,7 +307,7 @@ class PlainObjectBase : public internal::dense_xpr_base::type EIGEN_STRONG_INLINE void resizeLike(const EigenBase& _other) { const OtherDerived& other = _other.derived(); - internal::check_rows_cols_for_overflow(other.rows(), other.cols()); + internal::check_rows_cols_for_overflow::run(other.rows(), other.cols()); const Index othersize = other.rows()*other.cols(); if(RowsAtCompileTime == 1) { @@ -318,9 +331,9 @@ class PlainObjectBase : public internal::dense_xpr_base::type * Matrices are resized relative to the top-left element. In case values need to be * appended to the matrix they will be uninitialized. */ - EIGEN_STRONG_INLINE void conservativeResize(Index rows, Index cols) + EIGEN_STRONG_INLINE void conservativeResize(Index nbRows, Index nbCols) { - internal::conservative_resize_like_impl::run(*this, rows, cols); + internal::conservative_resize_like_impl::run(*this, nbRows, nbCols); } /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. @@ -330,10 +343,10 @@ class PlainObjectBase : public internal::dense_xpr_base::type * * In case the matrix is growing, new rows will be uninitialized. */ - EIGEN_STRONG_INLINE void conservativeResize(Index rows, NoChange_t) + EIGEN_STRONG_INLINE void conservativeResize(Index nbRows, NoChange_t) { // Note: see the comment in conservativeResize(Index,Index) - conservativeResize(rows, cols()); + conservativeResize(nbRows, cols()); } /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. @@ -343,10 +356,10 @@ class PlainObjectBase : public internal::dense_xpr_base::type * * In case the matrix is growing, new columns will be uninitialized. */ - EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, Index cols) + EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, Index nbCols) { // Note: see the comment in conservativeResize(Index,Index) - conservativeResize(rows(), cols); + conservativeResize(rows(), nbCols); } /** Resizes the vector to \a size while retaining old values. @@ -416,8 +429,8 @@ class PlainObjectBase : public internal::dense_xpr_base::type } #endif - EIGEN_STRONG_INLINE PlainObjectBase(Index size, Index rows, Index cols) - : m_storage(size, rows, cols) + EIGEN_STRONG_INLINE PlainObjectBase(Index a_size, Index nbRows, Index nbCols) + : m_storage(a_size, nbRows, nbCols) { // _check_template_params(); // EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED @@ -439,7 +452,7 @@ class PlainObjectBase : public internal::dense_xpr_base::type : m_storage(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols()) { _check_template_params(); - internal::check_rows_cols_for_overflow(other.derived().rows(), other.derived().cols()); + internal::check_rows_cols_for_overflow::run(other.derived().rows(), other.derived().cols()); Base::operator=(other.derived()); } @@ -600,23 +613,19 @@ class PlainObjectBase : public internal::dense_xpr_base::type } template - EIGEN_STRONG_INLINE void _init2(Index rows, Index cols, typename internal::enable_if::type* = 0) + EIGEN_STRONG_INLINE void _init2(Index nbRows, Index nbCols, typename internal::enable_if::type* = 0) { EIGEN_STATIC_ASSERT(bool(NumTraits::IsInteger) && bool(NumTraits::IsInteger), FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED) - eigen_assert(rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) - && cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)); - internal::check_rows_cols_for_overflow(rows, cols); - m_storage.resize(rows*cols,rows,cols); - EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED + resize(nbRows,nbCols); } template - EIGEN_STRONG_INLINE void _init2(const Scalar& x, const Scalar& y, typename internal::enable_if::type* = 0) + EIGEN_STRONG_INLINE void _init2(const Scalar& val0, const Scalar& val1, typename internal::enable_if::type* = 0) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 2) - m_storage.data()[0] = x; - m_storage.data()[1] = y; + m_storage.data()[0] = val0; + m_storage.data()[1] = val1; } template @@ -665,7 +674,7 @@ struct internal::conservative_resize_like_impl if ( ( Derived::IsRowMajor && _this.cols() == cols) || // row-major and we change only the number of rows (!Derived::IsRowMajor && _this.rows() == rows) ) // column-major and we change only the number of columns { - internal::check_rows_cols_for_overflow(rows, cols); + internal::check_rows_cols_for_overflow::run(rows, cols); _this.derived().m_storage.conservativeResize(rows*cols,rows,cols); } else diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Product.h b/resources/3rdparty/eigen/Eigen/src/Core/Product.h index 30aa8943b..314851d2e 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/Product.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/Product.h @@ -3,13 +3,15 @@ // // Copyright (C) 2008-2011 Gael Guennebaud // -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at http://mozilla.org/MPL/2.0/. +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PRODUCT_H #define EIGEN_PRODUCT_H +namespace Eigen { + template class Product; template class ProductImpl; @@ -25,25 +27,16 @@ template class ProductImpl; * */ +// Use ProductReturnType to get correct traits, in particular vectorization flags namespace internal { template struct traits > -{ - typedef MatrixXpr XprKind; - typedef typename remove_all::type LhsCleaned; - typedef typename remove_all::type RhsCleaned; - typedef typename scalar_product_traits::Scalar, typename traits::Scalar>::ReturnType Scalar; - typedef typename promote_storage_type::StorageKind, - typename traits::StorageKind>::ret StorageKind; - typedef typename promote_index_type::Index, - typename traits::Index>::type Index; + : traits::Type> +{ + // We want A+B*C to be of type Product and not Product + // TODO: This flag should eventually go in a separate evaluator traits class enum { - RowsAtCompileTime = LhsCleaned::RowsAtCompileTime, - ColsAtCompileTime = RhsCleaned::ColsAtCompileTime, - MaxRowsAtCompileTime = LhsCleaned::MaxRowsAtCompileTime, - MaxColsAtCompileTime = RhsCleaned::MaxColsAtCompileTime, - Flags = (MaxRowsAtCompileTime==1 ? RowMajorBit : 0), // TODO should be no storage order - CoeffReadCost = 0 // TODO CoeffReadCost should not be part of the expression traits + Flags = traits::Type>::Flags & ~EvalBeforeNestingBit }; }; } // end namespace internal @@ -95,4 +88,20 @@ class ProductImpl : public internal::dense_xpr_base +const Product +prod(const Lhs& lhs, const Rhs& rhs) +{ + return Product(lhs,rhs); +} + +} // end namespace Eigen + #endif // EIGEN_PRODUCT_H diff --git a/resources/3rdparty/eigen/Eigen/src/Core/ProductBase.h b/resources/3rdparty/eigen/Eigen/src/Core/ProductBase.h index ec12e5c9f..9748167a5 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/ProductBase.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/ProductBase.h @@ -87,10 +87,10 @@ class ProductBase : public MatrixBase typedef typename Base::PlainObject PlainObject; - ProductBase(const Lhs& lhs, const Rhs& rhs) - : m_lhs(lhs), m_rhs(rhs) + ProductBase(const Lhs& a_lhs, const Rhs& a_rhs) + : m_lhs(a_lhs), m_rhs(a_rhs) { - eigen_assert(lhs.cols() == rhs.rows() + eigen_assert(a_lhs.cols() == a_rhs.rows() && "invalid matrix product" && "if you wanted a coeff-wise or a dot product use the respective explicit functions"); } @@ -201,7 +201,7 @@ operator*(const ProductBase& prod, typename Derived::Scalar x) template typename internal::enable_if::value, const ScaledProduct >::type -operator*(const ProductBase& prod, typename Derived::RealScalar x) +operator*(const ProductBase& prod, const typename Derived::RealScalar& x) { return ScaledProduct(prod.derived(), x); } @@ -213,7 +213,7 @@ operator*(typename Derived::Scalar x,const ProductBase& prod) template typename internal::enable_if::value, const ScaledProduct >::type -operator*(typename Derived::RealScalar x,const ProductBase& prod) +operator*(const typename Derived::RealScalar& x,const ProductBase& prod) { return ScaledProduct(prod.derived(), x); } namespace internal { @@ -254,7 +254,7 @@ class ScaledProduct inline void subTo(Dest& dst) const { scaleAndAddTo(dst, Scalar(-1)); } template - inline void scaleAndAddTo(Dest& dst,Scalar alpha) const { m_prod.derived().scaleAndAddTo(dst,alpha * m_alpha); } + inline void scaleAndAddTo(Dest& dst,Scalar a_alpha) const { m_prod.derived().scaleAndAddTo(dst,a_alpha * m_alpha); } const Scalar& alpha() const { return m_alpha; } diff --git a/resources/3rdparty/eigen/Eigen/src/Core/ProductEvaluators.h b/resources/3rdparty/eigen/Eigen/src/Core/ProductEvaluators.h new file mode 100644 index 000000000..0c0570e44 --- /dev/null +++ b/resources/3rdparty/eigen/Eigen/src/Core/ProductEvaluators.h @@ -0,0 +1,411 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2008-2010 Gael Guennebaud +// Copyright (C) 2011 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +#ifndef EIGEN_PRODUCTEVALUATORS_H +#define EIGEN_PRODUCTEVALUATORS_H + +namespace Eigen { + +namespace internal { + +// We can evaluate the product either all at once, like GeneralProduct and its evalTo() function, or +// traverse the matrix coefficient by coefficient, like CoeffBasedProduct. Use the existing logic +// in ProductReturnType to decide. + +template +struct product_evaluator_dispatcher; + +template +struct evaluator_impl > + : product_evaluator_dispatcher, typename ProductReturnType::Type> +{ + typedef Product XprType; + typedef product_evaluator_dispatcher::Type> Base; + + evaluator_impl(const XprType& xpr) : Base(xpr) + { } +}; + +template +struct product_evaluator_traits_dispatcher; + +template +struct evaluator_traits > + : product_evaluator_traits_dispatcher, typename ProductReturnType::Type> +{ + static const int AssumeAliasing = 1; +}; + +// Case 1: Evaluate all at once +// +// We can view the GeneralProduct class as a part of the product evaluator. +// Four sub-cases: InnerProduct, OuterProduct, GemmProduct and GemvProduct. +// InnerProduct is special because GeneralProduct does not have an evalTo() method in this case. + +template +struct product_evaluator_traits_dispatcher, GeneralProduct > +{ + static const int HasEvalTo = 0; +}; + +template +struct product_evaluator_dispatcher, GeneralProduct > + : public evaluator::PlainObject>::type +{ + typedef Product XprType; + typedef typename XprType::PlainObject PlainObject; + typedef typename evaluator::type evaluator_base; + + // TODO: Computation is too early (?) + product_evaluator_dispatcher(const XprType& xpr) : evaluator_base(m_result) + { + m_result.coeffRef(0,0) = (xpr.lhs().transpose().cwiseProduct(xpr.rhs())).sum(); + } + +protected: + PlainObject m_result; +}; + +// For the other three subcases, simply call the evalTo() method of GeneralProduct +// TODO: GeneralProduct should take evaluators, not expression objects. + +template +struct product_evaluator_traits_dispatcher, GeneralProduct > +{ + static const int HasEvalTo = 1; +}; + +template +struct product_evaluator_dispatcher, GeneralProduct > +{ + typedef Product XprType; + typedef typename XprType::PlainObject PlainObject; + typedef typename evaluator::type evaluator_base; + + product_evaluator_dispatcher(const XprType& xpr) : m_xpr(xpr) + { } + + template + void evalTo(DstEvaluatorType /* not used */, DstXprType& dst) + { + dst.resize(m_xpr.rows(), m_xpr.cols()); + GeneralProduct(m_xpr.lhs(), m_xpr.rhs()).evalTo(dst); + } + +protected: + const XprType& m_xpr; +}; + +// Case 2: Evaluate coeff by coeff +// +// This is mostly taken from CoeffBasedProduct.h +// The main difference is that we add an extra argument to the etor_product_*_impl::run() function +// for the inner dimension of the product, because evaluator object do not know their size. + +template +struct etor_product_coeff_impl; + +template +struct etor_product_packet_impl; + +template +struct product_evaluator_traits_dispatcher, CoeffBasedProduct > +{ + static const int HasEvalTo = 0; +}; + +template +struct product_evaluator_dispatcher, CoeffBasedProduct > + : evaluator_impl_base > +{ + typedef Product XprType; + typedef CoeffBasedProduct CoeffBasedProductType; + + product_evaluator_dispatcher(const XprType& xpr) + : m_lhsImpl(xpr.lhs()), + m_rhsImpl(xpr.rhs()), + m_innerDim(xpr.lhs().cols()) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename XprType::PacketScalar PacketScalar; + typedef typename XprType::PacketReturnType PacketReturnType; + + // Everything below here is taken from CoeffBasedProduct.h + + enum { + RowsAtCompileTime = traits::RowsAtCompileTime, + PacketSize = packet_traits::size, + InnerSize = traits::InnerSize, + CoeffReadCost = traits::CoeffReadCost, + Unroll = CoeffReadCost != Dynamic && CoeffReadCost <= EIGEN_UNROLLING_LIMIT, + CanVectorizeInner = traits::CanVectorizeInner + }; + + typedef typename evaluator::type LhsEtorType; + typedef typename evaluator::type RhsEtorType; + typedef etor_product_coeff_impl CoeffImpl; + + const CoeffReturnType coeff(Index row, Index col) const + { + Scalar res; + CoeffImpl::run(row, col, m_lhsImpl, m_rhsImpl, m_innerDim, res); + return res; + } + + /* Allow index-based non-packet access. It is impossible though to allow index-based packed access, + * which is why we don't set the LinearAccessBit. + */ + const CoeffReturnType coeff(Index index) const + { + Scalar res; + const Index row = RowsAtCompileTime == 1 ? 0 : index; + const Index col = RowsAtCompileTime == 1 ? index : 0; + CoeffImpl::run(row, col, m_lhsImpl, m_rhsImpl, m_innerDim, res); + return res; + } + + template + const PacketReturnType packet(Index row, Index col) const + { + PacketScalar res; + typedef etor_product_packet_impl PacketImpl; + PacketImpl::run(row, col, m_lhsImpl, m_rhsImpl, m_innerDim, res); + return res; + } + +protected: + typename evaluator::type m_lhsImpl; + typename evaluator::type m_rhsImpl; + + // TODO: Get rid of m_innerDim if known at compile time + Index m_innerDim; +}; + +/*************************************************************************** +* Normal product .coeff() implementation (with meta-unrolling) +***************************************************************************/ + +/************************************** +*** Scalar path - no vectorization *** +**************************************/ + +template +struct etor_product_coeff_impl +{ + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, RetScalar &res) + { + etor_product_coeff_impl::run(row, col, lhs, rhs, innerDim, res); + res += lhs.coeff(row, UnrollingIndex) * rhs.coeff(UnrollingIndex, col); + } +}; + +template +struct etor_product_coeff_impl +{ + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, RetScalar &res) + { + res = lhs.coeff(row, 0) * rhs.coeff(0, col); + } +}; + +template +struct etor_product_coeff_impl +{ + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, RetScalar& res) + { + eigen_assert(innerDim>0 && "you are using a non initialized matrix"); + res = lhs.coeff(row, 0) * rhs.coeff(0, col); + for(Index i = 1; i < innerDim; ++i) + res += lhs.coeff(row, i) * rhs.coeff(i, col); + } +}; + +/******************************************* +*** Scalar path with inner vectorization *** +*******************************************/ + +template +struct etor_product_coeff_vectorized_unroller +{ + typedef typename Lhs::Index Index; + enum { PacketSize = packet_traits::size }; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, typename Lhs::PacketScalar &pres) + { + etor_product_coeff_vectorized_unroller::run(row, col, lhs, rhs, innerDim, pres); + pres = padd(pres, pmul( lhs.template packet(row, UnrollingIndex) , rhs.template packet(UnrollingIndex, col) )); + } +}; + +template +struct etor_product_coeff_vectorized_unroller<0, Lhs, Rhs, Packet> +{ + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, typename Lhs::PacketScalar &pres) + { + pres = pmul(lhs.template packet(row, 0) , rhs.template packet(0, col)); + } +}; + +template +struct etor_product_coeff_impl +{ + typedef typename Lhs::PacketScalar Packet; + typedef typename Lhs::Index Index; + enum { PacketSize = packet_traits::size }; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, RetScalar &res) + { + Packet pres; + etor_product_coeff_vectorized_unroller::run(row, col, lhs, rhs, innerDim, pres); + etor_product_coeff_impl::run(row, col, lhs, rhs, innerDim, res); + res = predux(pres); + } +}; + +template +struct etor_product_coeff_vectorized_dyn_selector +{ + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, typename Lhs::Scalar &res) + { + res = lhs.row(row).transpose().cwiseProduct(rhs.col(col)).sum(); + } +}; + +// NOTE the 3 following specializations are because taking .col(0) on a vector is a bit slower +// NOTE maybe they are now useless since we have a specialization for Block +template +struct etor_product_coeff_vectorized_dyn_selector +{ + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index /*row*/, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, typename Lhs::Scalar &res) + { + res = lhs.transpose().cwiseProduct(rhs.col(col)).sum(); + } +}; + +template +struct etor_product_coeff_vectorized_dyn_selector +{ + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index /*col*/, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, typename Lhs::Scalar &res) + { + res = lhs.row(row).transpose().cwiseProduct(rhs).sum(); + } +}; + +template +struct etor_product_coeff_vectorized_dyn_selector +{ + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index /*row*/, Index /*col*/, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, typename Lhs::Scalar &res) + { + res = lhs.transpose().cwiseProduct(rhs).sum(); + } +}; + +template +struct etor_product_coeff_impl +{ + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, typename Lhs::Scalar &res) + { + etor_product_coeff_vectorized_dyn_selector::run(row, col, lhs, rhs, innerDim, res); + } +}; + +/******************* +*** Packet path *** +*******************/ + +template +struct etor_product_packet_impl +{ + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res) + { + etor_product_packet_impl::run(row, col, lhs, rhs, innerDim, res); + res = pmadd(pset1(lhs.coeff(row, UnrollingIndex)), rhs.template packet(UnrollingIndex, col), res); + } +}; + +template +struct etor_product_packet_impl +{ + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res) + { + etor_product_packet_impl::run(row, col, lhs, rhs, innerDim, res); + res = pmadd(lhs.template packet(row, UnrollingIndex), pset1(rhs.coeff(UnrollingIndex, col)), res); + } +}; + +template +struct etor_product_packet_impl +{ + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res) + { + res = pmul(pset1(lhs.coeff(row, 0)),rhs.template packet(0, col)); + } +}; + +template +struct etor_product_packet_impl +{ + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res) + { + res = pmul(lhs.template packet(row, 0), pset1(rhs.coeff(0, col))); + } +}; + +template +struct etor_product_packet_impl +{ + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res) + { + eigen_assert(innerDim>0 && "you are using a non initialized matrix"); + res = pmul(pset1(lhs.coeff(row, 0)),rhs.template packet(0, col)); + for(Index i = 1; i < innerDim; ++i) + res = pmadd(pset1(lhs.coeff(row, i)), rhs.template packet(i, col), res); + } +}; + +template +struct etor_product_packet_impl +{ + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res) + { + eigen_assert(innerDim>0 && "you are using a non initialized matrix"); + res = pmul(lhs.template packet(row, 0), pset1(rhs.coeff(0, col))); + for(Index i = 1; i < innerDim; ++i) + res = pmadd(lhs.template packet(row, i), pset1(rhs.coeff(i, col)), res); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PRODUCT_EVALUATORS_H diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Random.h b/resources/3rdparty/eigen/Eigen/src/Core/Random.h index a9f7f4346..bba99fc7c 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/Random.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/Random.h @@ -141,9 +141,9 @@ PlainObjectBase::setRandom(Index size) */ template EIGEN_STRONG_INLINE Derived& -PlainObjectBase::setRandom(Index rows, Index cols) +PlainObjectBase::setRandom(Index nbRows, Index nbCols) { - resize(rows, cols); + resize(nbRows, nbCols); return setRandom(); } diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Ref.h b/resources/3rdparty/eigen/Eigen/src/Core/Ref.h new file mode 100644 index 000000000..9c409eecf --- /dev/null +++ b/resources/3rdparty/eigen/Eigen/src/Core/Ref.h @@ -0,0 +1,254 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_REF_H +#define EIGEN_REF_H + +namespace Eigen { + +template class RefBase; +template,OuterStride<> >::type > class Ref; + +/** \class Ref + * \ingroup Core_Module + * + * \brief A matrix or vector expression mapping an existing expressions + * + * \tparam PlainObjectType the equivalent matrix type of the mapped data + * \tparam Options specifies whether the pointer is \c #Aligned, or \c #Unaligned. + * The default is \c #Unaligned. + * \tparam StrideType optionally specifies strides. By default, Ref implies a contiguous storage along the inner dimension (inner stride==1), + * but accept a variable outer stride (leading dimension). + * This can be overridden by specifying strides. + * The type passed here must be a specialization of the Stride template, see examples below. + * + * This class permits to write non template functions taking Eigen's object as parameters while limiting the number of copies. + * A Ref<> object can represent either a const expression or a l-value: + * \code + * // in-out argument: + * void foo1(Ref x); + * + * // read-only const argument: + * void foo2(const Ref& x); + * \endcode + * + * In the in-out case, the input argument must satisfies the constraints of the actual Ref<> type, otherwise a compilation issue will be triggered. + * By default, a Ref can reference any dense vector expression of float having a contiguous memory layout. + * Likewise, a Ref can reference any column major dense matrix expression of float whose column's elements are contiguously stored with + * the possibility to have a constant space inbetween each column, i.e.: the inner stride mmust be equal to 1, but the outer-stride (or leading dimension), + * can be greater than the number of rows. + * + * In the const case, if the input expression does not match the above requirement, then it is evaluated into a temporary before being passed to the function. + * Here are some examples: + * \code + * MatrixXf A; + * VectorXf a; + * foo1(a.head()); // OK + * foo1(A.col()); // OK + * foo1(A.row()); // compilation error because here innerstride!=1 + * foo2(A.row()); // The row is copied into a contiguous temporary + * foo2(2*a); // The expression is evaluated into a temporary + * foo2(A.col().segment(2,4)); // No temporary + * \endcode + * + * The range of inputs that can be referenced without temporary can be enlarged using the last two template parameter. + * Here is an example accepting an innerstride!=1: + * \code + * // in-out argument: + * void foo3(Ref > x); + * foo3(A.row()); // OK + * \endcode + * The downside here is that the function foo3 might be significantly slower than foo1 because it won't be able to exploit vectorization, and will involved more + * expensive address computations even if the input is contiguously stored in memory. To overcome this issue, one might propose to overloads internally calling a + * template function, e.g.: + * \code + * // in the .h: + * void foo(const Ref& A); + * void foo(const Ref >& A); + * + * // in the .cpp: + * template void foo_impl(const TypeOfA& A) { + * ... // crazy code goes here + * } + * void foo(const Ref& A) { foo_impl(A); } + * void foo(const Ref >& A) { foo_impl(A); } + * \endcode + * + * + * \sa PlainObjectBase::Map(), \ref TopicStorageOrders + */ + +namespace internal { + +template +struct traits > + : public traits > +{ + typedef _PlainObjectType PlainObjectType; + typedef _StrideType StrideType; + enum { + Options = _Options + }; + + template struct match { + enum { + HasDirectAccess = internal::has_direct_access::ret, + StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)), + InnerStrideMatch = int(StrideType::InnerStrideAtCompileTime)==int(Dynamic) + || int(StrideType::InnerStrideAtCompileTime)==int(Derived::InnerStrideAtCompileTime) + || (int(StrideType::InnerStrideAtCompileTime)==0 && int(Derived::InnerStrideAtCompileTime)==1), + OuterStrideMatch = Derived::IsVectorAtCompileTime + || int(StrideType::OuterStrideAtCompileTime)==int(Dynamic) || int(StrideType::OuterStrideAtCompileTime)==int(Derived::OuterStrideAtCompileTime), + AlignmentMatch = (_Options!=Aligned) || ((PlainObjectType::Flags&AlignedBit)==0) || ((traits::Flags&AlignedBit)==AlignedBit), + MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch + }; + typedef typename internal::conditional::type type; + }; + +}; + +template +struct traits > : public traits {}; + +} + +template class RefBase + : public MapBase +{ + typedef typename internal::traits::PlainObjectType PlainObjectType; + typedef typename internal::traits::StrideType StrideType; + +public: + + typedef MapBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(RefBase) + + inline Index innerStride() const + { + return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1; + } + + inline Index outerStride() const + { + return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer() + : IsVectorAtCompileTime ? this->size() + : int(Flags)&RowMajorBit ? this->cols() + : this->rows(); + } + + RefBase() + : Base(0,RowsAtCompileTime==Dynamic?0:RowsAtCompileTime,ColsAtCompileTime==Dynamic?0:ColsAtCompileTime), + // Stride<> does not allow default ctor for Dynamic strides, so let' initialize it with dummy values: + m_stride(StrideType::OuterStrideAtCompileTime==Dynamic?0:StrideType::OuterStrideAtCompileTime, + StrideType::InnerStrideAtCompileTime==Dynamic?0:StrideType::InnerStrideAtCompileTime) + {} + +protected: + + typedef Stride StrideBase; + + template + void construct(Expression& expr) + { + if(PlainObjectType::RowsAtCompileTime==1) + { + eigen_assert(expr.rows()==1 || expr.cols()==1); + ::new (static_cast(this)) Base(expr.data(), 1, expr.size()); + } + else if(PlainObjectType::ColsAtCompileTime==1) + { + eigen_assert(expr.rows()==1 || expr.cols()==1); + ::new (static_cast(this)) Base(expr.data(), expr.size(), 1); + } + else + ::new (static_cast(this)) Base(expr.data(), expr.rows(), expr.cols()); + ::new (&m_stride) StrideBase(StrideType::OuterStrideAtCompileTime==0?0:expr.outerStride(), + StrideType::InnerStrideAtCompileTime==0?0:expr.innerStride()); + } + + StrideBase m_stride; +}; + + +template class Ref + : public RefBase > +{ + typedef internal::traits Traits; + public: + + typedef RefBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Ref) + + + #ifndef EIGEN_PARSED_BY_DOXYGEN + template + inline Ref(PlainObjectBase& expr, + typename internal::enable_if::MatchAtCompileTime),Derived>::type* = 0) + { + Base::construct(expr); + } + template + inline Ref(const DenseBase& expr, + typename internal::enable_if::value&&bool(Traits::template match::MatchAtCompileTime)),Derived>::type* = 0, + int = Derived::ThisConstantIsPrivateInPlainObjectBase) + #else + template + inline Ref(DenseBase& expr) + #endif + { + Base::construct(expr.const_cast_derived()); + } + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Ref) + +}; + +// this is the const ref version +template class Ref + : public RefBase > +{ + typedef internal::traits Traits; + public: + + typedef RefBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Ref) + + template + inline Ref(const DenseBase& expr) + { +// std::cout << match_helper::HasDirectAccess << "," << match_helper::OuterStrideMatch << "," << match_helper::InnerStrideMatch << "\n"; +// std::cout << int(StrideType::OuterStrideAtCompileTime) << " - " << int(Derived::OuterStrideAtCompileTime) << "\n"; +// std::cout << int(StrideType::InnerStrideAtCompileTime) << " - " << int(Derived::InnerStrideAtCompileTime) << "\n"; + construct(expr.derived(), typename Traits::template match::type()); + } + + protected: + + template + void construct(const Expression& expr,internal::true_type) + { + Base::construct(expr); + } + + template + void construct(const Expression& expr, internal::false_type) + { +// std::cout << "Ref: copy\n"; + m_object = expr; + Base::construct(m_object); + } + + protected: + PlainObjectType m_object; +}; + +} // end namespace Eigen + +#endif // EIGEN_REF_H diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Replicate.h b/resources/3rdparty/eigen/Eigen/src/Core/Replicate.h index b61fdc29e..dde86a834 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/Replicate.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/Replicate.h @@ -70,8 +70,8 @@ template class Replicate EIGEN_DENSE_PUBLIC_INTERFACE(Replicate) template - inline explicit Replicate(const OriginalMatrixType& matrix) - : m_matrix(matrix), m_rowFactor(RowFactor), m_colFactor(ColFactor) + inline explicit Replicate(const OriginalMatrixType& a_matrix) + : m_matrix(a_matrix), m_rowFactor(RowFactor), m_colFactor(ColFactor) { EIGEN_STATIC_ASSERT((internal::is_same::type,OriginalMatrixType>::value), THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE) @@ -79,8 +79,8 @@ template class Replicate } template - inline Replicate(const OriginalMatrixType& matrix, Index rowFactor, Index colFactor) - : m_matrix(matrix), m_rowFactor(rowFactor), m_colFactor(colFactor) + inline Replicate(const OriginalMatrixType& a_matrix, Index rowFactor, Index colFactor) + : m_matrix(a_matrix), m_rowFactor(rowFactor), m_colFactor(colFactor) { EIGEN_STATIC_ASSERT((internal::is_same::type,OriginalMatrixType>::value), THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE) @@ -89,27 +89,27 @@ template class Replicate inline Index rows() const { return m_matrix.rows() * m_rowFactor.value(); } inline Index cols() const { return m_matrix.cols() * m_colFactor.value(); } - inline Scalar coeff(Index row, Index col) const + inline Scalar coeff(Index rowId, Index colId) const { // try to avoid using modulo; this is a pure optimization strategy const Index actual_row = internal::traits::RowsAtCompileTime==1 ? 0 - : RowFactor==1 ? row - : row%m_matrix.rows(); + : RowFactor==1 ? rowId + : rowId%m_matrix.rows(); const Index actual_col = internal::traits::ColsAtCompileTime==1 ? 0 - : ColFactor==1 ? col - : col%m_matrix.cols(); + : ColFactor==1 ? colId + : colId%m_matrix.cols(); return m_matrix.coeff(actual_row, actual_col); } template - inline PacketScalar packet(Index row, Index col) const + inline PacketScalar packet(Index rowId, Index colId) const { const Index actual_row = internal::traits::RowsAtCompileTime==1 ? 0 - : RowFactor==1 ? row - : row%m_matrix.rows(); + : RowFactor==1 ? rowId + : rowId%m_matrix.rows(); const Index actual_col = internal::traits::ColsAtCompileTime==1 ? 0 - : ColFactor==1 ? col - : col%m_matrix.cols(); + : ColFactor==1 ? colId + : colId%m_matrix.cols(); return m_matrix.template packet(actual_row, actual_col); } diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Select.h b/resources/3rdparty/eigen/Eigen/src/Core/Select.h index 2bf6e91d0..7ee8f23ba 100644 --- a/resources/3rdparty/eigen/Eigen/src/Core/Select.h +++ b/resources/3rdparty/eigen/Eigen/src/Core/Select.h @@ -60,10 +60,10 @@ class Select : internal::no_assignment_operator, typedef typename internal::dense_xpr_base