From f5fd37f992e0df679dda1ad0bd41ec18dcb4d31b Mon Sep 17 00:00:00 2001
From: PBerger <philipp.berger@rwth-aachen.de>
Date: Mon, 19 Nov 2012 01:34:09 +0100
Subject: [PATCH] Updated Eigen to 3.1.2 (5097c01bcdc4)

---
 resources/3rdParty/eigen/.hg_archival.txt     |    4 +
 resources/{3rdparty => 3rdParty}/eigen/.hgeol |    0
 .../{3rdparty => 3rdParty}/eigen/.hgignore    |    0
 resources/3rdParty/eigen/.hgtags              |   24 +
 resources/3rdParty/eigen/.krazy               |    3 +
 .../eigen/CMakeLists.txt                      |    0
 .../{3rdparty => 3rdParty}/eigen/COPYING.BSD  |    0
 .../{3rdparty => 3rdParty}/eigen/COPYING.GPL  |    0
 .../{3rdparty => 3rdParty}/eigen/COPYING.LGPL |    0
 .../eigen/COPYING.MINPACK                     |    0
 .../{3rdparty => 3rdParty}/eigen/COPYING.MPL2 |    0
 .../eigen/COPYING.README                      |    0
 .../eigen/CTestConfig.cmake                   |    0
 .../eigen/CTestCustom.cmake.in                |    0
 .../{3rdparty => 3rdParty}/eigen/Eigen/Array  |    0
 .../eigen/Eigen/CMakeLists.txt                |    0
 .../eigen/Eigen/Cholesky                      |    0
 .../eigen/Eigen/CholmodSupport                |    0
 resources/3rdParty/eigen/Eigen/Core           |  366 +++++
 .../{3rdparty => 3rdParty}/eigen/Eigen/Dense  |    0
 .../{3rdparty => 3rdParty}/eigen/Eigen/Eigen  |    0
 .../eigen/Eigen/Eigen2Support                 |    0
 resources/3rdParty/eigen/Eigen/Eigenvalues    |   46 +
 .../eigen/Eigen/Geometry                      |    0
 .../eigen/Eigen/Householder                   |    0
 .../eigen/Eigen/IterativeLinearSolvers        |    0
 .../{3rdparty => 3rdParty}/eigen/Eigen/Jacobi |    0
 .../{3rdparty => 3rdParty}/eigen/Eigen/LU     |    0
 .../eigen/Eigen/LeastSquares                  |    0
 .../3rdParty/eigen/Eigen/OrderingMethods      |   23 +
 .../eigen/Eigen/PaStiXSupport                 |    0
 .../eigen/Eigen/PardisoSupport                |    0
 .../{3rdparty => 3rdParty}/eigen/Eigen/QR     |    0
 .../eigen/Eigen/QtAlignedMalloc               |    0
 .../{3rdparty => 3rdParty}/eigen/Eigen/SVD    |    0
 .../{3rdparty => 3rdParty}/eigen/Eigen/Sparse |    0
 .../eigen/Eigen/SparseCholesky                |    0
 .../eigen/Eigen/SparseCore                    |    0
 .../eigen/Eigen/StdDeque                      |    0
 .../eigen/Eigen/StdList                       |    0
 .../eigen/Eigen/StdVector                     |    0
 .../eigen/Eigen/SuperLUSupport                |    0
 .../eigen/Eigen/UmfPackSupport                |    0
 .../eigen/Eigen/src/CMakeLists.txt            |    0
 .../eigen/Eigen/src/Cholesky/CMakeLists.txt   |    0
 .../3rdParty/eigen/Eigen/src/Cholesky/LDLT.h  |  592 +++++++
 .../eigen/Eigen/src/Cholesky/LLT.h            |    0
 .../eigen/Eigen/src/Cholesky/LLT_MKL.h        |    0
 .../Eigen/src/CholmodSupport/CMakeLists.txt   |    0
 .../Eigen/src/CholmodSupport/CholmodSupport.h |  579 +++++++
 .../3rdParty/eigen/Eigen/src/Core/Array.h     |  308 ++++
 .../eigen/Eigen/src/Core/ArrayBase.h          |    0
 .../eigen/Eigen/src/Core/ArrayWrapper.h       |  254 +++
 .../eigen/Eigen/src/Core/Assign.h             |    0
 .../eigen/Eigen/src/Core/Assign_MKL.h         |  224 +++
 .../eigen/Eigen/src/Core/BandMatrix.h         |    0
 .../3rdParty/eigen/Eigen/src/Core/Block.h     |  357 ++++
 .../eigen/Eigen/src/Core/BooleanRedux.h       |    0
 .../eigen/Eigen/src/Core/CMakeLists.txt       |    0
 .../eigen/Eigen/src/Core/CommaInitializer.h   |  141 ++
 .../eigen/Eigen/src/Core/CwiseBinaryOp.h      |  229 +++
 .../eigen/Eigen/src/Core/CwiseNullaryOp.h     |  864 ++++++++++
 .../eigen/Eigen/src/Core/CwiseUnaryOp.h       |  126 ++
 .../eigen/Eigen/src/Core/CwiseUnaryView.h     |    0
 .../3rdParty/eigen/Eigen/src/Core/DenseBase.h |  533 ++++++
 .../eigen/Eigen/src/Core/DenseCoeffsBase.h    |  754 +++++++++
 .../eigen/Eigen/src/Core/DenseStorage.h       |  303 ++++
 .../3rdParty/eigen/Eigen/src/Core/Diagonal.h  |  236 +++
 .../eigen/Eigen/src/Core/DiagonalMatrix.h     |  307 ++++
 .../eigen/Eigen/src/Core/DiagonalProduct.h    |  123 ++
 resources/3rdParty/eigen/Eigen/src/Core/Dot.h |  261 +++
 .../eigen/Eigen/src/Core/EigenBase.h          |    0
 .../eigen/Eigen/src/Core/Flagged.h            |    0
 .../eigen/Eigen/src/Core/ForceAlignedAccess.h |    0
 .../3rdParty/eigen/Eigen/src/Core/Functors.h  |  967 +++++++++++
 .../3rdParty/eigen/Eigen/src/Core/Fuzzy.h     |  150 ++
 .../eigen/Eigen/src/Core/GeneralProduct.h     |  613 +++++++
 .../eigen/Eigen/src/Core/GenericPacketMath.h  |    0
 .../eigen/Eigen/src/Core/GlobalFunctions.h    |    0
 .../eigen/Eigen/src/Core/IO.h                 |    0
 resources/3rdParty/eigen/Eigen/src/Core/Map.h |  192 +++
 .../3rdParty/eigen/Eigen/src/Core/MapBase.h   |  242 +++
 .../eigen/Eigen/src/Core/MathFunctions.h      |  842 ++++++++++
 .../eigen/Eigen/src/Core/Matrix.h             |    0
 .../eigen/Eigen/src/Core/MatrixBase.h         |  511 ++++++
 .../eigen/Eigen/src/Core/NestByValue.h        |    0
 .../3rdParty/eigen/Eigen/src/Core/NoAlias.h   |  125 ++
 .../eigen/Eigen/src/Core/NumTraits.h          |    0
 .../eigen/Eigen/src/Core/PermutationMatrix.h  |  687 ++++++++
 .../eigen/Eigen/src/Core/PlainObjectBase.h    |  767 +++++++++
 .../3rdParty/eigen/Eigen/src/Core/Product.h   |   98 ++
 .../eigen/Eigen/src/Core/ProductBase.h        |  278 ++++
 .../3rdParty/eigen/Eigen/src/Core/Random.h    |  152 ++
 .../eigen/Eigen/src/Core/Redux.h              |    0
 .../3rdParty/eigen/Eigen/src/Core/Replicate.h |  177 ++
 .../eigen/Eigen/src/Core/ReturnByValue.h      |    0
 .../eigen/Eigen/src/Core/Reverse.h            |    0
 .../3rdParty/eigen/Eigen/src/Core/Select.h    |  162 ++
 .../eigen/Eigen/src/Core/SelfAdjointView.h    |    0
 .../eigen/Eigen/src/Core/SelfCwiseBinaryOp.h  |    0
 .../eigen/Eigen/src/Core/SolveTriangular.h    |    0
 .../eigen/Eigen/src/Core/StableNorm.h         |    0
 .../eigen/Eigen/src/Core/Stride.h             |    0
 .../3rdParty/eigen/Eigen/src/Core/Swap.h      |  126 ++
 .../3rdParty/eigen/Eigen/src/Core/Transpose.h |  414 +++++
 .../eigen/Eigen/src/Core/Transpositions.h     |  436 +++++
 .../eigen/Eigen/src/Core/TriangularMatrix.h   |  828 ++++++++++
 .../eigen/Eigen/src/Core/VectorBlock.h        |  284 ++++
 .../eigen/Eigen/src/Core/VectorwiseOp.h       |    0
 .../3rdParty/eigen/Eigen/src/Core/Visitor.h   |  237 +++
 .../src/Core/arch/AltiVec/CMakeLists.txt      |    0
 .../Eigen/src/Core/arch/AltiVec/Complex.h     |    0
 .../Eigen/src/Core/arch/AltiVec/PacketMath.h  |    0
 .../eigen/Eigen/src/Core/arch/CMakeLists.txt  |    0
 .../src/Core/arch/Default/CMakeLists.txt      |    0
 .../Eigen/src/Core/arch/Default/Settings.h    |    0
 .../Eigen/src/Core/arch/NEON/CMakeLists.txt   |    0
 .../eigen/Eigen/src/Core/arch/NEON/Complex.h  |    0
 .../Eigen/src/Core/arch/NEON/PacketMath.h     |  424 +++++
 .../Eigen/src/Core/arch/SSE/CMakeLists.txt    |    0
 .../eigen/Eigen/src/Core/arch/SSE/Complex.h   |    0
 .../Eigen/src/Core/arch/SSE/MathFunctions.h   |  384 +++++
 .../Eigen/src/Core/arch/SSE/PacketMath.h      |  632 ++++++++
 .../Eigen/src/Core/products/CMakeLists.txt    |    0
 .../src/Core/products/CoeffBasedProduct.h     |    0
 .../Core/products/GeneralBlockPanelKernel.h   | 1319 +++++++++++++++
 .../src/Core/products/GeneralMatrixMatrix.h   |    0
 .../products/GeneralMatrixMatrixTriangular.h  |    0
 .../GeneralMatrixMatrixTriangular_MKL.h       |    0
 .../Core/products/GeneralMatrixMatrix_MKL.h   |    0
 .../src/Core/products/GeneralMatrixVector.h   |  552 +++++++
 .../Core/products/GeneralMatrixVector_MKL.h   |    0
 .../Eigen/src/Core/products/Parallelizer.h    |    0
 .../Core/products/SelfadjointMatrixMatrix.h   |    0
 .../products/SelfadjointMatrixMatrix_MKL.h    |    0
 .../Core/products/SelfadjointMatrixVector.h   |    0
 .../products/SelfadjointMatrixVector_MKL.h    |    0
 .../src/Core/products/SelfadjointProduct.h    |    0
 .../Core/products/SelfadjointRank2Update.h    |    0
 .../Core/products/TriangularMatrixMatrix.h    |    0
 .../products/TriangularMatrixMatrix_MKL.h     |    0
 .../Core/products/TriangularMatrixVector.h    |    0
 .../products/TriangularMatrixVector_MKL.h     |    0
 .../Core/products/TriangularSolverMatrix.h    |    0
 .../products/TriangularSolverMatrix_MKL.h     |    0
 .../Core/products/TriangularSolverVector.h    |    0
 .../eigen/Eigen/src/Core/util/BlasUtil.h      |    0
 .../eigen/Eigen/src/Core/util/CMakeLists.txt  |    0
 .../eigen/Eigen/src/Core/util/Constants.h     |  431 +++++
 .../src/Core/util/DisableStupidWarnings.h     |    0
 .../Eigen/src/Core/util/ForwardDeclarations.h |  298 ++++
 .../eigen/Eigen/src/Core/util/MKL_support.h   |    0
 .../eigen/Eigen/src/Core/util/Macros.h        |  410 +++++
 .../eigen/Eigen/src/Core/util/Memory.h        |  952 +++++++++++
 .../eigen/Eigen/src/Core/util/Meta.h          |    0
 .../eigen/Eigen/src/Core/util/NonMPL2.h       |    0
 .../src/Core/util/ReenableStupidWarnings.h    |    0
 .../eigen/Eigen/src/Core/util/StaticAssert.h  |  205 +++
 .../eigen/Eigen/src/Core/util/XprHelper.h     |  447 +++++
 .../eigen/Eigen/src/Eigen2Support/Block.h     |    0
 .../Eigen/src/Eigen2Support/CMakeLists.txt    |    0
 .../eigen/Eigen/src/Eigen2Support/Cwise.h     |    0
 .../Eigen/src/Eigen2Support/CwiseOperators.h  |    0
 .../src/Eigen2Support/Geometry/AlignedBox.h   |  159 ++
 .../Eigen/src/Eigen2Support/Geometry/All.h    |    0
 .../src/Eigen2Support/Geometry/AngleAxis.h    |  214 +++
 .../src/Eigen2Support/Geometry/CMakeLists.txt |    0
 .../src/Eigen2Support/Geometry/Hyperplane.h   |  254 +++
 .../Eigen2Support/Geometry/ParametrizedLine.h |  141 ++
 .../src/Eigen2Support/Geometry/Quaternion.h   |  495 ++++++
 .../src/Eigen2Support/Geometry/Rotation2D.h   |  145 ++
 .../src/Eigen2Support/Geometry/RotationBase.h |  123 ++
 .../src/Eigen2Support/Geometry/Scaling.h      |  167 ++
 .../src/Eigen2Support/Geometry/Transform.h    |  786 +++++++++
 .../src/Eigen2Support/Geometry/Translation.h  |  184 +++
 .../eigen/Eigen/src/Eigen2Support/LU.h        |    0
 .../eigen/Eigen/src/Eigen2Support/Lazy.h      |    0
 .../Eigen/src/Eigen2Support/LeastSquares.h    |  170 ++
 .../eigen/Eigen/src/Eigen2Support/Macros.h    |    0
 .../Eigen/src/Eigen2Support/MathFunctions.h   |    0
 .../eigen/Eigen/src/Eigen2Support/Memory.h    |    0
 .../eigen/Eigen/src/Eigen2Support/Meta.h      |    0
 .../eigen/Eigen/src/Eigen2Support/Minor.h     |    0
 .../eigen/Eigen/src/Eigen2Support/QR.h        |    0
 .../eigen/Eigen/src/Eigen2Support/SVD.h       |  638 ++++++++
 .../src/Eigen2Support/TriangularSolver.h      |    0
 .../Eigen/src/Eigen2Support/VectorBlock.h     |    0
 .../Eigen/src/Eigenvalues/CMakeLists.txt      |    0
 .../src/Eigenvalues/ComplexEigenSolver.h      |  319 ++++
 .../Eigen/src/Eigenvalues/ComplexSchur.h      |  398 +++++
 .../Eigen/src/Eigenvalues/ComplexSchur_MKL.h  |    0
 .../eigen/Eigen/src/Eigenvalues/EigenSolver.h |  579 +++++++
 .../GeneralizedSelfAdjointEigenSolver.h       |    0
 .../src/Eigenvalues/HessenbergDecomposition.h |    0
 .../src/Eigenvalues/MatrixBaseEigenvalues.h   |    0
 .../eigen/Eigen/src/Eigenvalues/RealSchur.h   |  466 ++++++
 .../Eigen/src/Eigenvalues/RealSchur_MKL.h     |    0
 .../src/Eigenvalues/SelfAdjointEigenSolver.h  |    0
 .../Eigenvalues/SelfAdjointEigenSolver_MKL.h  |    0
 .../src/Eigenvalues/Tridiagonalization.h      |    0
 .../eigen/Eigen/src/Geometry/AlignedBox.h     |  375 +++++
 .../eigen/Eigen/src/Geometry/AngleAxis.h      |  230 +++
 .../eigen/Eigen/src/Geometry/CMakeLists.txt   |    0
 .../eigen/Eigen/src/Geometry/EulerAngles.h    |    0
 .../eigen/Eigen/src/Geometry/Homogeneous.h    |    0
 .../eigen/Eigen/src/Geometry/Hyperplane.h     |  269 +++
 .../eigen/Eigen/src/Geometry/OrthoMethods.h   |    0
 .../Eigen/src/Geometry/ParametrizedLine.h     |  195 +++
 .../eigen/Eigen/src/Geometry/Quaternion.h     |  778 +++++++++
 .../eigen/Eigen/src/Geometry/Rotation2D.h     |  154 ++
 .../eigen/Eigen/src/Geometry/RotationBase.h   |    0
 .../eigen/Eigen/src/Geometry/Scaling.h        |  166 ++
 .../eigen/Eigen/src/Geometry/Transform.h      | 1440 +++++++++++++++++
 .../eigen/Eigen/src/Geometry/Translation.h    |    0
 .../eigen/Eigen/src/Geometry/Umeyama.h        |  172 ++
 .../Eigen/src/Geometry/arch/CMakeLists.txt    |    0
 .../Eigen/src/Geometry/arch/Geometry_SSE.h    |    0
 .../Eigen/src/Householder/BlockHouseholder.h  |    0
 .../Eigen/src/Householder/CMakeLists.txt      |    0
 .../eigen/Eigen/src/Householder/Householder.h |    0
 .../src/Householder/HouseholderSequence.h     |    0
 .../BasicPreconditioners.h                    |    0
 .../src/IterativeLinearSolvers/BiCGSTAB.h     |  254 +++
 .../src/IterativeLinearSolvers/CMakeLists.txt |    0
 .../ConjugateGradient.h                       |    0
 .../IterativeLinearSolvers/IncompleteLUT.h    |  466 ++++++
 .../IterativeSolverBase.h                     |    0
 .../eigen/Eigen/src/Jacobi/CMakeLists.txt     |    0
 .../3rdParty/eigen/Eigen/src/Jacobi/Jacobi.h  |  420 +++++
 .../eigen/Eigen/src/LU/CMakeLists.txt         |    0
 .../eigen/Eigen/src/LU/Determinant.h          |    0
 .../eigen/Eigen/src/LU/FullPivLU.h            |    0
 .../eigen/Eigen/src/LU/Inverse.h              |    0
 .../eigen/Eigen/src/LU/PartialPivLU.h         |    0
 .../eigen/Eigen/src/LU/PartialPivLU_MKL.h     |    0
 .../eigen/Eigen/src/LU/arch/CMakeLists.txt    |    0
 .../eigen/Eigen/src/LU/arch/Inverse_SSE.h     |    0
 .../eigen/Eigen/src/OrderingMethods/Amd.h     |    0
 .../Eigen/src/OrderingMethods/CMakeLists.txt  |    0
 .../Eigen/src/PaStiXSupport/CMakeLists.txt    |    0
 .../Eigen/src/PaStiXSupport/PaStiXSupport.h   |    0
 .../Eigen/src/PardisoSupport/CMakeLists.txt   |    0
 .../Eigen/src/PardisoSupport/PardisoSupport.h |  615 +++++++
 .../eigen/Eigen/src/QR/CMakeLists.txt         |    0
 .../eigen/Eigen/src/QR/ColPivHouseholderQR.h  |    0
 .../Eigen/src/QR/ColPivHouseholderQR_MKL.h    |    0
 .../eigen/Eigen/src/QR/FullPivHouseholderQR.h |    0
 .../eigen/Eigen/src/QR/HouseholderQR.h        |  343 ++++
 .../eigen/Eigen/src/QR/HouseholderQR_MKL.h    |    0
 .../eigen/Eigen/src/SVD/CMakeLists.txt        |    0
 .../3rdParty/eigen/Eigen/src/SVD/JacobiSVD.h  |  867 ++++++++++
 .../eigen/Eigen/src/SVD/JacobiSVD_MKL.h       |    0
 .../Eigen/src/SVD/UpperBidiagonalization.h    |    0
 .../Eigen/src/SparseCholesky/CMakeLists.txt   |    0
 .../src/SparseCholesky/SimplicialCholesky.h   |    0
 .../eigen/Eigen/src/SparseCore/AmbiVector.h   |    0
 .../eigen/Eigen/src/SparseCore/CMakeLists.txt |    0
 .../Eigen/src/SparseCore/CompressedStorage.h  |  233 +++
 .../ConservativeSparseSparseProduct.h         |    0
 .../Eigen/src/SparseCore/CoreIterators.h      |    0
 .../Eigen/src/SparseCore/MappedSparseMatrix.h |    0
 .../eigen/Eigen/src/SparseCore/SparseAssign.h |    0
 .../eigen/Eigen/src/SparseCore/SparseBlock.h  |    0
 .../src/SparseCore/SparseCwiseBinaryOp.h      |    0
 .../Eigen/src/SparseCore/SparseCwiseUnaryOp.h |    0
 .../Eigen/src/SparseCore/SparseDenseProduct.h |  300 ++++
 .../src/SparseCore/SparseDiagonalProduct.h    |  192 +++
 .../eigen/Eigen/src/SparseCore/SparseDot.h    |    0
 .../eigen/Eigen/src/SparseCore/SparseFuzzy.h  |    0
 .../eigen/Eigen/src/SparseCore/SparseMatrix.h | 1134 +++++++++++++
 .../Eigen/src/SparseCore/SparseMatrixBase.h   |  458 ++++++
 .../Eigen/src/SparseCore/SparsePermutation.h  |    0
 .../Eigen/src/SparseCore/SparseProduct.h      |  186 +++
 .../eigen/Eigen/src/SparseCore/SparseRedux.h  |    0
 .../src/SparseCore/SparseSelfAdjointView.h    |  480 ++++++
 .../SparseSparseProductWithPruning.h          |  149 ++
 .../Eigen/src/SparseCore/SparseTranspose.h    |   61 +
 .../src/SparseCore/SparseTriangularView.h     |    0
 .../eigen/Eigen/src/SparseCore/SparseUtil.h   |    0
 .../eigen/Eigen/src/SparseCore/SparseVector.h |  398 +++++
 .../eigen/Eigen/src/SparseCore/SparseView.h   |   98 ++
 .../Eigen/src/SparseCore/TriangularSolver.h   |    0
 .../eigen/Eigen/src/StlSupport/CMakeLists.txt |    0
 .../eigen/Eigen/src/StlSupport/StdDeque.h     |    0
 .../eigen/Eigen/src/StlSupport/StdList.h      |    0
 .../eigen/Eigen/src/StlSupport/StdVector.h    |    0
 .../eigen/Eigen/src/StlSupport/details.h      |    0
 .../Eigen/src/SuperLUSupport/CMakeLists.txt   |    0
 .../Eigen/src/SuperLUSupport/SuperLUSupport.h | 1025 ++++++++++++
 .../Eigen/src/UmfPackSupport/CMakeLists.txt   |    0
 .../Eigen/src/UmfPackSupport/UmfPackSupport.h |    0
 .../eigen/Eigen/src/misc/CMakeLists.txt       |    0
 .../eigen/Eigen/src/misc/Image.h              |    0
 .../eigen/Eigen/src/misc/Kernel.h             |    0
 .../eigen/Eigen/src/misc/Solve.h              |    0
 .../eigen/Eigen/src/misc/SparseSolve.h        |    0
 .../eigen/Eigen/src/misc/blas.h               |    0
 .../Eigen/src/plugins/ArrayCwiseBinaryOps.h   |    0
 .../Eigen/src/plugins/ArrayCwiseUnaryOps.h    |    0
 .../eigen/Eigen/src/plugins/BlockMethods.h    |    0
 .../eigen/Eigen/src/plugins/CMakeLists.txt    |    0
 .../Eigen/src/plugins/CommonCwiseBinaryOps.h  |    0
 .../Eigen/src/plugins/CommonCwiseUnaryOps.h   |    0
 .../Eigen/src/plugins/MatrixCwiseBinaryOps.h  |    0
 .../Eigen/src/plugins/MatrixCwiseUnaryOps.h   |    0
 .../{3rdparty => 3rdParty}/eigen/INSTALL      |    0
 .../eigen/bench/BenchSparseUtil.h             |    0
 .../eigen/bench/BenchTimer.h                  |    0
 .../eigen/bench/BenchUtil.h                   |    0
 .../eigen/bench/README.txt                    |    0
 .../eigen/bench/basicbench.cxxlist            |    0
 .../eigen/bench/basicbenchmark.cpp            |    0
 .../eigen/bench/basicbenchmark.h              |    0
 .../eigen/bench/benchBlasGemm.cpp             |    0
 .../eigen/bench/benchCholesky.cpp             |    0
 .../eigen/bench/benchEigenSolver.cpp          |    0
 .../eigen/bench/benchFFT.cpp                  |    0
 .../eigen/bench/benchVecAdd.cpp               |    0
 resources/3rdParty/eigen/bench/bench_gemm.cpp |  271 ++++
 .../eigen/bench/bench_multi_compilers.sh      |    0
 .../eigen/bench/bench_norm.cpp                |    0
 .../eigen/bench/bench_reverse.cpp             |    0
 .../eigen/bench/bench_sum.cpp                 |    0
 .../eigen/bench/bench_unrolling               |    0
 .../eigen/bench/benchmark.cpp                 |    0
 .../eigen/bench/benchmarkSlice.cpp            |    0
 .../eigen/bench/benchmarkX.cpp                |    0
 .../eigen/bench/benchmarkXcwise.cpp           |    0
 .../eigen/bench/benchmark_suite               |    0
 .../eigen/bench/btl/CMakeLists.txt            |    0
 .../eigen/bench/btl/COPYING                   |    0
 .../eigen/bench/btl/README                    |    0
 .../bench/btl/actions/action_aat_product.hh   |    0
 .../bench/btl/actions/action_ata_product.hh   |    0
 .../bench/btl/actions/action_atv_product.hh   |    0
 .../eigen/bench/btl/actions/action_axpby.hh   |    0
 .../eigen/bench/btl/actions/action_axpy.hh    |    0
 .../bench/btl/actions/action_cholesky.hh      |    0
 .../eigen/bench/btl/actions/action_ger.hh     |    0
 .../bench/btl/actions/action_hessenberg.hh    |    0
 .../bench/btl/actions/action_lu_decomp.hh     |    0
 .../bench/btl/actions/action_lu_solve.hh      |    0
 .../actions/action_matrix_matrix_product.hh   |    0
 .../action_matrix_matrix_product_bis.hh       |    0
 .../actions/action_matrix_vector_product.hh   |    0
 .../bench/btl/actions/action_partial_lu.hh    |    0
 .../eigen/bench/btl/actions/action_rot.hh     |    0
 .../eigen/bench/btl/actions/action_symv.hh    |    0
 .../eigen/bench/btl/actions/action_syr2.hh    |    0
 .../bench/btl/actions/action_trisolve.hh      |    0
 .../btl/actions/action_trisolve_matrix.hh     |    0
 .../eigen/bench/btl/actions/action_trmm.hh    |    0
 .../eigen/bench/btl/actions/basic_actions.hh  |    0
 .../eigen/bench/btl/cmake/FindACML.cmake      |    0
 .../eigen/bench/btl/cmake/FindATLAS.cmake     |    0
 .../eigen/bench/btl/cmake/FindBlitz.cmake     |    0
 .../eigen/bench/btl/cmake/FindCBLAS.cmake     |    0
 .../eigen/bench/btl/cmake/FindGMM.cmake       |    0
 .../eigen/bench/btl/cmake/FindGOTO.cmake      |    0
 .../eigen/bench/btl/cmake/FindGOTO2.cmake     |    0
 .../eigen/bench/btl/cmake/FindMKL.cmake       |    0
 .../eigen/bench/btl/cmake/FindMTL4.cmake      |    0
 .../cmake/FindPackageHandleStandardArgs.cmake |    0
 .../eigen/bench/btl/cmake/FindTvmet.cmake     |    0
 .../cmake/MacroOptionalAddSubdirectory.cmake  |    0
 .../eigen/bench/btl/data/CMakeLists.txt       |    0
 .../eigen/bench/btl/data/action_settings.txt  |    0
 .../bench/btl/data/gnuplot_common_settings.hh |    0
 .../eigen/bench/btl/data/go_mean              |    0
 .../eigen/bench/btl/data/mean.cxx             |    0
 .../eigen/bench/btl/data/mk_gnuplot_script.sh |    0
 .../eigen/bench/btl/data/mk_mean_script.sh    |    0
 .../eigen/bench/btl/data/mk_new_gnuplot.sh    |    0
 .../bench/btl/data/perlib_plot_settings.txt   |    0
 .../eigen/bench/btl/data/regularize.cxx       |    0
 .../eigen/bench/btl/data/smooth.cxx           |    0
 .../eigen/bench/btl/data/smooth_all.sh        |    0
 .../eigen/bench/btl/generic_bench/bench.hh    |    0
 .../btl/generic_bench/bench_parameter.hh      |    0
 .../eigen/bench/btl/generic_bench/btl.hh      |    0
 .../btl/generic_bench/init/init_function.hh   |    0
 .../btl/generic_bench/init/init_matrix.hh     |    0
 .../btl/generic_bench/init/init_vector.hh     |    0
 .../btl/generic_bench/static/bench_static.hh  |    0
 .../static/intel_bench_fixed_size.hh          |    0
 .../static/static_size_generator.hh           |    0
 .../generic_bench/timers/STL_perf_analyzer.hh |    0
 .../btl/generic_bench/timers/STL_timer.hh     |    0
 .../timers/mixed_perf_analyzer.hh             |    0
 .../timers/portable_perf_analyzer.hh          |    0
 .../timers/portable_perf_analyzer_old.hh      |    0
 .../generic_bench/timers/portable_timer.hh    |    0
 .../generic_bench/timers/x86_perf_analyzer.hh |    0
 .../btl/generic_bench/timers/x86_timer.hh     |    0
 .../btl/generic_bench/utils/size_lin_log.hh   |    0
 .../bench/btl/generic_bench/utils/size_log.hh |    0
 .../bench/btl/generic_bench/utils/utilities.h |    0
 .../bench/btl/generic_bench/utils/xy_file.hh  |    0
 .../eigen/bench/btl/libs/BLAS/CMakeLists.txt  |    0
 .../eigen/bench/btl/libs/BLAS/blas.h          |    0
 .../bench/btl/libs/BLAS/blas_interface.hh     |    0
 .../btl/libs/BLAS/blas_interface_impl.hh      |    0
 .../bench/btl/libs/BLAS/c_interface_base.h    |    0
 .../eigen/bench/btl/libs/BLAS/main.cpp        |    0
 .../eigen/bench/btl/libs/STL/CMakeLists.txt   |    0
 .../eigen/bench/btl/libs/STL/STL_interface.hh |    0
 .../eigen/bench/btl/libs/STL/main.cpp         |    0
 .../eigen/bench/btl/libs/blitz/CMakeLists.txt |    0
 .../libs/blitz/blitz_LU_solve_interface.hh    |    0
 .../bench/btl/libs/blitz/blitz_interface.hh   |    0
 .../eigen/bench/btl/libs/blitz/btl_blitz.cpp  |    0
 .../bench/btl/libs/blitz/btl_tiny_blitz.cpp   |    0
 .../btl/libs/blitz/tiny_blitz_interface.hh    |    0
 .../bench/btl/libs/eigen2/CMakeLists.txt      |    0
 .../bench/btl/libs/eigen2/btl_tiny_eigen2.cpp |    0
 .../bench/btl/libs/eigen2/eigen2_interface.hh |    0
 .../eigen/bench/btl/libs/eigen2/main_adv.cpp  |    0
 .../bench/btl/libs/eigen2/main_linear.cpp     |    0
 .../bench/btl/libs/eigen2/main_matmat.cpp     |    0
 .../bench/btl/libs/eigen2/main_vecmat.cpp     |    0
 .../bench/btl/libs/eigen3/CMakeLists.txt      |    0
 .../bench/btl/libs/eigen3/btl_tiny_eigen3.cpp |    0
 .../bench/btl/libs/eigen3/eigen3_interface.hh |    0
 .../eigen/bench/btl/libs/eigen3/main_adv.cpp  |    0
 .../bench/btl/libs/eigen3/main_linear.cpp     |    0
 .../bench/btl/libs/eigen3/main_matmat.cpp     |    0
 .../bench/btl/libs/eigen3/main_vecmat.cpp     |    0
 .../eigen/bench/btl/libs/gmm/CMakeLists.txt   |    0
 .../btl/libs/gmm/gmm_LU_solve_interface.hh    |    0
 .../eigen/bench/btl/libs/gmm/gmm_interface.hh |    0
 .../eigen/bench/btl/libs/gmm/main.cpp         |    0
 .../eigen/bench/btl/libs/mtl4/.kdbgrc.main    |    0
 .../eigen/bench/btl/libs/mtl4/CMakeLists.txt  |    0
 .../eigen/bench/btl/libs/mtl4/main.cpp        |    0
 .../btl/libs/mtl4/mtl4_LU_solve_interface.hh  |    0
 .../bench/btl/libs/mtl4/mtl4_interface.hh     |    0
 .../eigen/bench/btl/libs/tvmet/CMakeLists.txt |    0
 .../eigen/bench/btl/libs/tvmet/main.cpp       |    0
 .../bench/btl/libs/tvmet/tvmet_interface.hh   |    0
 .../eigen/bench/btl/libs/ublas/CMakeLists.txt |    0
 .../eigen/bench/btl/libs/ublas/main.cpp       |    0
 .../bench/btl/libs/ublas/ublas_interface.hh   |    0
 .../eigen/bench/check_cache_queries.cpp       |    0
 .../eigen/bench/eig33.cpp                     |    0
 .../eigen/bench/geometry.cpp                  |    0
 .../eigen/bench/product_threshold.cpp         |    0
 .../eigen/bench/quat_slerp.cpp                |    0
 .../eigen/bench/quatmul.cpp                   |    0
 .../eigen/bench/sparse_cholesky.cpp           |    0
 .../eigen/bench/sparse_dense_product.cpp      |    0
 .../eigen/bench/sparse_lu.cpp                 |    0
 .../eigen/bench/sparse_product.cpp            |    0
 .../eigen/bench/sparse_randomsetter.cpp       |    0
 .../eigen/bench/sparse_setter.cpp             |    0
 .../eigen/bench/sparse_transpose.cpp          |    0
 .../eigen/bench/sparse_trisolver.cpp          |    0
 .../eigen/bench/spbench/CMakeLists.txt        |   65 +
 .../eigen/bench/spbench/spbenchsolver.cpp     |   90 ++
 .../eigen/bench/spbench/spbenchsolver.h       |  533 ++++++
 .../eigen/bench/spmv.cpp                      |    0
 .../eigen/bench/vdw_new.cpp                   |    0
 .../eigen/blas/BandTriangularSolver.h         |    0
 resources/3rdParty/eigen/blas/CMakeLists.txt  |   57 +
 .../eigen/blas/README.txt                     |    0
 .../{3rdparty => 3rdParty}/eigen/blas/chbmv.f |    0
 .../{3rdparty => 3rdParty}/eigen/blas/chpmv.f |    0
 resources/3rdParty/eigen/blas/chpr.f          |  220 +++
 resources/3rdParty/eigen/blas/chpr2.f         |  255 +++
 resources/3rdParty/eigen/blas/common.h        |  140 ++
 .../eigen/blas/complex_double.cpp             |    0
 .../eigen/blas/complex_single.cpp             |    0
 .../eigen/blas/complexdots.f                  |    0
 .../{3rdparty => 3rdParty}/eigen/blas/ctbmv.f |    0
 resources/3rdParty/eigen/blas/ctpmv.f         |  329 ++++
 resources/3rdParty/eigen/blas/ctpsv.f         |  332 ++++
 resources/3rdParty/eigen/blas/double.cpp      |   19 +
 .../{3rdparty => 3rdParty}/eigen/blas/drotm.f |    0
 .../eigen/blas/drotmg.f                       |    0
 .../{3rdparty => 3rdParty}/eigen/blas/dsbmv.f |    0
 .../{3rdparty => 3rdParty}/eigen/blas/dspmv.f |    0
 resources/3rdParty/eigen/blas/dspr.f          |  202 +++
 resources/3rdParty/eigen/blas/dspr2.f         |  233 +++
 .../{3rdparty => 3rdParty}/eigen/blas/dtbmv.f |    0
 resources/3rdParty/eigen/blas/dtpmv.f         |  293 ++++
 resources/3rdParty/eigen/blas/dtpsv.f         |  296 ++++
 .../eigen/blas/level1_cplx_impl.h             |    0
 .../eigen/blas/level1_impl.h                  |    0
 .../eigen/blas/level1_real_impl.h             |    0
 .../3rdParty/eigen/blas/level2_cplx_impl.h    |  270 ++++
 resources/3rdParty/eigen/blas/level2_impl.h   |  457 ++++++
 .../3rdParty/eigen/blas/level2_real_impl.h    |  210 +++
 resources/3rdParty/eigen/blas/level3_impl.h   |  632 ++++++++
 .../{3rdparty => 3rdParty}/eigen/blas/lsame.f |    0
 resources/3rdParty/eigen/blas/single.cpp      |   19 +
 .../{3rdparty => 3rdParty}/eigen/blas/srotm.f |    0
 .../eigen/blas/srotmg.f                       |    0
 .../{3rdparty => 3rdParty}/eigen/blas/ssbmv.f |    0
 .../{3rdparty => 3rdParty}/eigen/blas/sspmv.f |    0
 resources/3rdParty/eigen/blas/sspr.f          |  202 +++
 resources/3rdParty/eigen/blas/sspr2.f         |  233 +++
 .../{3rdparty => 3rdParty}/eigen/blas/stbmv.f |    0
 resources/3rdParty/eigen/blas/stpmv.f         |  293 ++++
 resources/3rdParty/eigen/blas/stpsv.f         |  296 ++++
 .../eigen/blas/testing/CMakeLists.txt         |    0
 .../eigen/blas/testing/cblat1.f               |    0
 .../eigen/blas/testing/cblat2.dat             |    0
 .../eigen/blas/testing/cblat2.f               |    0
 .../eigen/blas/testing/cblat3.dat             |    0
 .../eigen/blas/testing/cblat3.f               |    0
 .../3rdParty/eigen/blas/testing/dblat1.f      |  769 +++++++++
 .../eigen/blas/testing/dblat2.dat             |    0
 .../eigen/blas/testing/dblat2.f               |    0
 .../eigen/blas/testing/dblat3.dat             |    0
 .../eigen/blas/testing/dblat3.f               |    0
 .../eigen/blas/testing/runblastest.sh         |    0
 .../3rdParty/eigen/blas/testing/sblat1.f      |  769 +++++++++
 .../eigen/blas/testing/sblat2.dat             |    0
 .../eigen/blas/testing/sblat2.f               |    0
 .../eigen/blas/testing/sblat3.dat             |    0
 .../eigen/blas/testing/sblat3.f               |    0
 .../eigen/blas/testing/zblat1.f               |    0
 .../eigen/blas/testing/zblat2.dat             |    0
 .../eigen/blas/testing/zblat2.f               |    0
 .../eigen/blas/testing/zblat3.dat             |    0
 .../eigen/blas/testing/zblat3.f               |    0
 .../eigen/blas/xerbla.cpp                     |    0
 .../{3rdparty => 3rdParty}/eigen/blas/zhbmv.f |    0
 .../{3rdparty => 3rdParty}/eigen/blas/zhpmv.f |    0
 resources/3rdParty/eigen/blas/zhpr.f          |  220 +++
 resources/3rdParty/eigen/blas/zhpr2.f         |  255 +++
 .../{3rdparty => 3rdParty}/eigen/blas/ztbmv.f |    0
 resources/3rdParty/eigen/blas/ztpmv.f         |  329 ++++
 resources/3rdParty/eigen/blas/ztpsv.f         |  332 ++++
 .../cmake/CMakeDetermineVSServicePack.cmake   |    0
 .../eigen/cmake/EigenConfigureTesting.cmake   |    0
 .../eigen/cmake/EigenDetermineOSVersion.cmake |    0
 .../eigen/cmake/EigenTesting.cmake            |    0
 .../eigen/cmake/FindAdolc.cmake               |    0
 .../eigen/cmake/FindBLAS.cmake                |    0
 .../eigen/cmake/FindCholmod.cmake             |    0
 .../eigen/cmake/FindEigen2.cmake              |    0
 .../eigen/cmake/FindEigen3.cmake              |    0
 .../eigen/cmake/FindFFTW.cmake                |    0
 .../eigen/cmake/FindGLEW.cmake                |    0
 .../eigen/cmake/FindGMP.cmake                 |    0
 .../eigen/cmake/FindGSL.cmake                 |    0
 .../eigen/cmake/FindGoogleHash.cmake          |    0
 .../eigen/cmake/FindLAPACK.cmake              |    0
 .../eigen/cmake/FindMPFR.cmake                |    0
 .../3rdParty/eigen/cmake/FindMetis.cmake      |   24 +
 .../eigen/cmake/FindPastix.cmake              |    0
 .../eigen/cmake/FindScotch.cmake              |    0
 .../eigen/cmake/FindStandardMathLibrary.cmake |    0
 .../eigen/cmake/FindSuperLU.cmake             |    0
 .../eigen/cmake/FindUmfpack.cmake             |    0
 .../eigen/cmake/RegexUtils.cmake              |    0
 .../eigen/cmake/language_support.cmake        |    0
 .../eigen/debug/gdb/__init__.py               |    0
 .../eigen/debug/gdb/printers.py               |    0
 .../eigen/debug/msvc/eigen_autoexp_part.dat   |    0
 .../eigen/demos/CMakeLists.txt                |    0
 .../eigen/demos/mandelbrot/CMakeLists.txt     |    0
 .../eigen/demos/mandelbrot/README             |    0
 .../eigen/demos/mandelbrot/mandelbrot.cpp     |    0
 .../eigen/demos/mandelbrot/mandelbrot.h       |    0
 .../eigen/demos/mix_eigen_and_c/README        |    0
 .../demos/mix_eigen_and_c/binary_library.cpp  |    0
 .../demos/mix_eigen_and_c/binary_library.h    |    0
 .../eigen/demos/mix_eigen_and_c/example.c     |    0
 .../eigen/demos/opengl/CMakeLists.txt         |   28 +
 .../eigen/demos/opengl/README                 |    0
 .../eigen/demos/opengl/camera.cpp             |    0
 .../eigen/demos/opengl/camera.h               |    0
 .../eigen/demos/opengl/gpuhelper.cpp          |    0
 .../eigen/demos/opengl/gpuhelper.h            |    0
 .../eigen/demos/opengl/icosphere.cpp          |    0
 .../eigen/demos/opengl/icosphere.h            |    0
 .../eigen/demos/opengl/quaternion_demo.cpp    |    0
 .../eigen/demos/opengl/quaternion_demo.h      |    0
 .../eigen/demos/opengl/trackball.cpp          |    0
 .../eigen/demos/opengl/trackball.h            |    0
 .../eigen/doc/A05_PortingFrom2To3.dox         |    0
 .../eigen/doc/A10_Eigen2SupportModes.dox      |    0
 .../eigen/doc/AsciiQuickReference.txt         |    0
 .../eigen/doc/B01_Experimental.dox            |    0
 .../eigen/doc/C00_QuickStartGuide.dox         |    0
 .../eigen/doc/C01_TutorialMatrixClass.dox     |    0
 .../doc/C02_TutorialMatrixArithmetic.dox      |    0
 .../eigen/doc/C03_TutorialArrayClass.dox      |    0
 .../eigen/doc/C04_TutorialBlockOperations.dox |    0
 .../C05_TutorialAdvancedInitialization.dox    |    0
 .../eigen/doc/C06_TutorialLinearAlgebra.dox   |    0
 ...TutorialReductionsVisitorsBroadcasting.dox |    0
 .../eigen/doc/C08_TutorialGeometry.dox        |    0
 .../3rdParty/eigen/doc/C09_TutorialSparse.dox |  455 ++++++
 .../eigen/doc/C10_TutorialMapClass.dox        |    0
 .../eigen/doc/CMakeLists.txt                  |    0
 .../3rdParty/eigen/doc/D01_StlContainers.dox  |   65 +
 .../eigen/doc/D03_WrongStackAlignment.dox     |    0
 .../eigen/doc/D07_PassingByValue.dox          |    0
 .../doc/D09_StructHavingEigenMembers.dox      |    0
 .../eigen/doc/D11_UnalignedArrayAssert.dox    |    0
 .../eigen/doc/Doxyfile.in                     |    0
 .../eigen/doc/Eigen_Silly_Professor_64x64.png |  Bin
 .../eigen/doc/I00_CustomizingEigen.dox        |    0
 .../eigen/doc/I01_TopicLazyEvaluation.dox     |    0
 .../3rdParty/eigen/doc/I02_HiPerformance.dox  |  128 ++
 .../eigen/doc/I03_InsideEigenExample.dox      |    0
 .../eigen/doc/I05_FixedSizeVectorizable.dox   |    0
 .../doc/I06_TopicEigenExpressionTemplates.dox |    0
 .../eigen/doc/I07_TopicScalarTypes.dox        |    0
 .../eigen/doc/I08_Resizing.dox                |    0
 .../eigen/doc/I09_Vectorization.dox           |    0
 .../3rdParty/eigen/doc/I10_Assertions.dox     |   13 +
 .../eigen/doc/I11_Aliasing.dox                |    0
 .../eigen/doc/I12_ClassHierarchy.dox          |    0
 .../doc/I13_FunctionsTakingEigenTypes.dox     |    0
 .../eigen/doc/I14_PreprocessorDirectives.dox  |    0
 .../eigen/doc/I15_StorageOrders.dox           |    0
 .../eigen/doc/I16_TemplateKeyword.dox         |    0
 .../eigen/doc/Overview.dox                    |    0
 .../eigen/doc/QuickReference.dox              |    0
 .../eigen/doc/SparseQuickReference.dox        |    0
 .../doc/TopicLinearAlgebraDecompositions.dox  |    0
 .../eigen/doc/TopicMultithreading.dox         |    0
 .../doc/TutorialSparse_example_details.dox    |    0
 .../eigen/doc/UsingIntelMKL.dox               |    0
 .../eigen/doc/eigendoxy.css                   |    0
 .../eigen/doc/eigendoxy_footer.html.in        |    0
 .../eigen/doc/eigendoxy_header.html.in        |    0
 .../eigen/doc/eigendoxy_tabs.css              |    0
 .../eigen/doc/examples/.krazy                 |    0
 .../eigen/doc/examples/CMakeLists.txt         |    0
 .../doc/examples/DenseBase_middleCols_int.cpp |    0
 .../doc/examples/DenseBase_middleRows_int.cpp |    0
 .../DenseBase_template_int_middleCols.cpp     |    0
 .../DenseBase_template_int_middleRows.cpp     |    0
 .../doc/examples/MatrixBase_cwise_const.cpp   |    0
 .../eigen/doc/examples/QuickStart_example.cpp |    0
 .../examples/QuickStart_example2_dynamic.cpp  |    0
 .../examples/QuickStart_example2_fixed.cpp    |    0
 .../doc/examples/TemplateKeyword_flexible.cpp |    0
 .../doc/examples/TemplateKeyword_simple.cpp   |    0
 .../examples/TutorialLinAlgComputeTwice.cpp   |    0
 .../TutorialLinAlgExComputeSolveError.cpp     |    0
 ...torialLinAlgExSolveColPivHouseholderQR.cpp |    0
 .../examples/TutorialLinAlgExSolveLDLT.cpp    |    0
 .../TutorialLinAlgInverseDeterminant.cpp      |    0
 .../examples/TutorialLinAlgRankRevealing.cpp  |    0
 .../doc/examples/TutorialLinAlgSVDSolve.cpp   |    0
 .../TutorialLinAlgSelfAdjointEigenSolver.cpp  |    0
 .../examples/TutorialLinAlgSetThreshold.cpp   |    0
 .../Tutorial_ArrayClass_accessors.cpp         |    0
 .../examples/Tutorial_ArrayClass_addition.cpp |    0
 .../Tutorial_ArrayClass_cwise_other.cpp       |    0
 .../examples/Tutorial_ArrayClass_interop.cpp  |    0
 .../Tutorial_ArrayClass_interop_matrix.cpp    |    0
 .../doc/examples/Tutorial_ArrayClass_mult.cpp |    0
 ...orial_BlockOperations_block_assignment.cpp |    0
 .../Tutorial_BlockOperations_colrow.cpp       |    0
 .../Tutorial_BlockOperations_corner.cpp       |    0
 .../Tutorial_BlockOperations_print_block.cpp  |    0
 .../Tutorial_BlockOperations_vector.cpp       |    0
 .../doc/examples/Tutorial_PartialLU_solve.cpp |    0
 ...ionsVisitorsBroadcasting_broadcast_1nn.cpp |    0
 ...sVisitorsBroadcasting_broadcast_simple.cpp |    0
 ...sBroadcasting_broadcast_simple_rowwise.cpp |    0
 ...ReductionsVisitorsBroadcasting_colwise.cpp |    0
 ...ReductionsVisitorsBroadcasting_maxnorm.cpp |    0
 ...nsVisitorsBroadcasting_reductions_bool.cpp |    0
 ...nsVisitorsBroadcasting_reductions_norm.cpp |    0
 ...ReductionsVisitorsBroadcasting_rowwise.cpp |    0
 ...eductionsVisitorsBroadcasting_visitors.cpp |    0
 .../Tutorial_simple_example_dynamic_size.cpp  |    0
 .../Tutorial_simple_example_fixed_size.cpp    |    0
 .../eigen/doc/examples/class_Block.cpp        |    0
 .../doc/examples/class_CwiseBinaryOp.cpp      |    0
 .../eigen/doc/examples/class_CwiseUnaryOp.cpp |    0
 .../examples/class_CwiseUnaryOp_ptrfun.cpp    |    0
 .../eigen/doc/examples/class_FixedBlock.cpp   |    0
 .../doc/examples/class_FixedVectorBlock.cpp   |    0
 .../eigen/doc/examples/class_VectorBlock.cpp  |    0
 .../examples/function_taking_eigenbase.cpp    |    0
 .../doc/examples/tut_arithmetic_add_sub.cpp   |    0
 .../doc/examples/tut_arithmetic_dot_cross.cpp |    0
 .../examples/tut_arithmetic_matrix_mul.cpp    |    0
 .../examples/tut_arithmetic_redux_basic.cpp   |    0
 .../tut_arithmetic_scalar_mul_div.cpp         |    0
 .../tut_matrix_coefficient_accessors.cpp      |    0
 .../eigen/doc/examples/tut_matrix_resize.cpp  |    0
 .../examples/tut_matrix_resize_fixed_size.cpp |    0
 .../eigen/doc/snippets/.krazy                 |    0
 .../doc/snippets/AngleAxis_mimic_euler.cpp    |    0
 .../eigen/doc/snippets/CMakeLists.txt         |    0
 .../snippets/ColPivHouseholderQR_solve.cpp    |    0
 .../snippets/ComplexEigenSolver_compute.cpp   |    0
 .../ComplexEigenSolver_eigenvalues.cpp        |    0
 .../ComplexEigenSolver_eigenvectors.cpp       |    0
 .../doc/snippets/ComplexSchur_compute.cpp     |    0
 .../doc/snippets/ComplexSchur_matrixT.cpp     |    0
 .../doc/snippets/ComplexSchur_matrixU.cpp     |    0
 .../eigen/doc/snippets/Cwise_abs.cpp          |    0
 .../eigen/doc/snippets/Cwise_abs2.cpp         |    0
 .../eigen/doc/snippets/Cwise_acos.cpp         |    0
 .../eigen/doc/snippets/Cwise_boolean_and.cpp  |    0
 .../eigen/doc/snippets/Cwise_boolean_or.cpp   |    0
 .../eigen/doc/snippets/Cwise_cos.cpp          |    0
 .../eigen/doc/snippets/Cwise_cube.cpp         |    0
 .../eigen/doc/snippets/Cwise_equal_equal.cpp  |    0
 .../eigen/doc/snippets/Cwise_exp.cpp          |    0
 .../eigen/doc/snippets/Cwise_greater.cpp      |    0
 .../doc/snippets/Cwise_greater_equal.cpp      |    0
 .../eigen/doc/snippets/Cwise_inverse.cpp      |    0
 .../eigen/doc/snippets/Cwise_less.cpp         |    0
 .../eigen/doc/snippets/Cwise_less_equal.cpp   |    0
 .../eigen/doc/snippets/Cwise_log.cpp          |    0
 .../eigen/doc/snippets/Cwise_max.cpp          |    0
 .../eigen/doc/snippets/Cwise_min.cpp          |    0
 .../eigen/doc/snippets/Cwise_minus.cpp        |    0
 .../eigen/doc/snippets/Cwise_minus_equal.cpp  |    0
 .../eigen/doc/snippets/Cwise_not_equal.cpp    |    0
 .../eigen/doc/snippets/Cwise_plus.cpp         |    0
 .../eigen/doc/snippets/Cwise_plus_equal.cpp   |    0
 .../eigen/doc/snippets/Cwise_pow.cpp          |    0
 .../eigen/doc/snippets/Cwise_product.cpp      |    0
 .../eigen/doc/snippets/Cwise_quotient.cpp     |    0
 .../eigen/doc/snippets/Cwise_sin.cpp          |    0
 .../eigen/doc/snippets/Cwise_slash_equal.cpp  |    0
 .../eigen/doc/snippets/Cwise_sqrt.cpp         |    0
 .../eigen/doc/snippets/Cwise_square.cpp       |    0
 .../eigen/doc/snippets/Cwise_tan.cpp          |    0
 .../eigen/doc/snippets/Cwise_times_equal.cpp  |    0
 .../doc/snippets/DenseBase_LinSpaced.cpp      |    0
 .../doc/snippets/DenseBase_LinSpaced_seq.cpp  |    0
 .../doc/snippets/DenseBase_setLinSpaced.cpp   |    0
 .../doc/snippets/DirectionWise_replicate.cpp  |    0
 .../snippets/DirectionWise_replicate_int.cpp  |    0
 .../EigenSolver_EigenSolver_MatrixType.cpp    |    0
 .../doc/snippets/EigenSolver_compute.cpp      |    0
 .../doc/snippets/EigenSolver_eigenvalues.cpp  |    0
 .../doc/snippets/EigenSolver_eigenvectors.cpp |    0
 .../EigenSolver_pseudoEigenvectors.cpp        |    0
 .../snippets/FullPivHouseholderQR_solve.cpp   |    0
 .../eigen/doc/snippets/FullPivLU_image.cpp    |    0
 .../eigen/doc/snippets/FullPivLU_kernel.cpp   |    0
 .../eigen/doc/snippets/FullPivLU_solve.cpp    |    0
 .../HessenbergDecomposition_compute.cpp       |    0
 .../HessenbergDecomposition_matrixH.cpp       |    0
 .../HessenbergDecomposition_packedMatrix.cpp  |    0
 .../doc/snippets/HouseholderQR_solve.cpp      |    0
 ...ouseholderSequence_HouseholderSequence.cpp |    0
 .../eigen/doc/snippets/IOFormat.cpp           |    0
 .../eigen/doc/snippets/JacobiSVD_basic.cpp    |    0
 .../eigen/doc/snippets/Jacobi_makeGivens.cpp  |    0
 .../eigen/doc/snippets/Jacobi_makeJacobi.cpp  |    0
 .../eigen/doc/snippets/LLT_example.cpp        |    0
 .../eigen/doc/snippets/LLT_solve.cpp          |    0
 .../eigen/doc/snippets/Map_general_stride.cpp |    0
 .../eigen/doc/snippets/Map_inner_stride.cpp   |    0
 .../eigen/doc/snippets/Map_outer_stride.cpp   |    0
 .../eigen/doc/snippets/Map_placement_new.cpp  |    0
 .../eigen/doc/snippets/Map_simple.cpp         |    0
 .../eigen/doc/snippets/MatrixBase_adjoint.cpp |    0
 .../eigen/doc/snippets/MatrixBase_all.cpp     |    0
 .../eigen/doc/snippets/MatrixBase_array.cpp   |    0
 .../doc/snippets/MatrixBase_array_const.cpp   |    0
 .../doc/snippets/MatrixBase_asDiagonal.cpp    |    0
 .../doc/snippets/MatrixBase_block_int_int.cpp |    0
 .../MatrixBase_block_int_int_int_int.cpp      |    0
 .../MatrixBase_bottomLeftCorner_int_int.cpp   |    0
 .../MatrixBase_bottomRightCorner_int_int.cpp  |    0
 .../snippets/MatrixBase_bottomRows_int.cpp    |    0
 .../eigen/doc/snippets/MatrixBase_cast.cpp    |    0
 .../eigen/doc/snippets/MatrixBase_col.cpp     |    0
 .../eigen/doc/snippets/MatrixBase_colwise.cpp |    0
 ...trixBase_computeInverseAndDetWithCheck.cpp |    0
 .../MatrixBase_computeInverseWithCheck.cpp    |    0
 .../doc/snippets/MatrixBase_cwiseAbs.cpp      |    0
 .../doc/snippets/MatrixBase_cwiseAbs2.cpp     |    0
 .../doc/snippets/MatrixBase_cwiseEqual.cpp    |    0
 .../doc/snippets/MatrixBase_cwiseInverse.cpp  |    0
 .../doc/snippets/MatrixBase_cwiseMax.cpp      |    0
 .../doc/snippets/MatrixBase_cwiseMin.cpp      |    0
 .../doc/snippets/MatrixBase_cwiseNotEqual.cpp |    0
 .../doc/snippets/MatrixBase_cwiseProduct.cpp  |    0
 .../doc/snippets/MatrixBase_cwiseQuotient.cpp |    0
 .../doc/snippets/MatrixBase_cwiseSqrt.cpp     |    0
 .../doc/snippets/MatrixBase_diagonal.cpp      |    0
 .../doc/snippets/MatrixBase_diagonal_int.cpp  |    0
 .../MatrixBase_diagonal_template_int.cpp      |    0
 .../doc/snippets/MatrixBase_eigenvalues.cpp   |    0
 .../eigen/doc/snippets/MatrixBase_end_int.cpp |    0
 .../eigen/doc/snippets/MatrixBase_eval.cpp    |    0
 .../eigen/doc/snippets/MatrixBase_extract.cpp |    0
 .../MatrixBase_fixedBlock_int_int.cpp         |    0
 .../doc/snippets/MatrixBase_identity.cpp      |    0
 .../snippets/MatrixBase_identity_int_int.cpp  |    0
 .../eigen/doc/snippets/MatrixBase_inverse.cpp |    0
 .../doc/snippets/MatrixBase_isDiagonal.cpp    |    0
 .../doc/snippets/MatrixBase_isIdentity.cpp    |    0
 .../eigen/doc/snippets/MatrixBase_isOnes.cpp  |    0
 .../doc/snippets/MatrixBase_isOrthogonal.cpp  |    0
 .../doc/snippets/MatrixBase_isUnitary.cpp     |    0
 .../eigen/doc/snippets/MatrixBase_isZero.cpp  |    0
 .../doc/snippets/MatrixBase_leftCols_int.cpp  |    0
 .../eigen/doc/snippets/MatrixBase_marked.cpp  |    0
 .../eigen/doc/snippets/MatrixBase_noalias.cpp |    0
 .../eigen/doc/snippets/MatrixBase_ones.cpp    |    0
 .../doc/snippets/MatrixBase_ones_int.cpp      |    0
 .../doc/snippets/MatrixBase_ones_int_int.cpp  |    0
 .../doc/snippets/MatrixBase_operatorNorm.cpp  |    0
 .../eigen/doc/snippets/MatrixBase_part.cpp    |    0
 .../eigen/doc/snippets/MatrixBase_prod.cpp    |    0
 .../eigen/doc/snippets/MatrixBase_random.cpp  |    0
 .../doc/snippets/MatrixBase_random_int.cpp    |    0
 .../snippets/MatrixBase_random_int_int.cpp    |    0
 .../doc/snippets/MatrixBase_replicate.cpp     |    0
 .../snippets/MatrixBase_replicate_int_int.cpp |    0
 .../eigen/doc/snippets/MatrixBase_reverse.cpp |    0
 .../doc/snippets/MatrixBase_rightCols_int.cpp |    0
 .../eigen/doc/snippets/MatrixBase_row.cpp     |    0
 .../eigen/doc/snippets/MatrixBase_rowwise.cpp |    0
 .../snippets/MatrixBase_segment_int_int.cpp   |    0
 .../eigen/doc/snippets/MatrixBase_select.cpp  |    0
 .../eigen/doc/snippets/MatrixBase_set.cpp     |    0
 .../doc/snippets/MatrixBase_setIdentity.cpp   |    0
 .../eigen/doc/snippets/MatrixBase_setOnes.cpp |    0
 .../doc/snippets/MatrixBase_setRandom.cpp     |    0
 .../eigen/doc/snippets/MatrixBase_setZero.cpp |    0
 .../doc/snippets/MatrixBase_start_int.cpp     |    0
 .../MatrixBase_template_int_bottomRows.cpp    |    0
 .../snippets/MatrixBase_template_int_end.cpp  |    0
 ...Base_template_int_int_bottomLeftCorner.cpp |    0
 ...ase_template_int_int_bottomRightCorner.cpp |    0
 ...rixBase_template_int_int_topLeftCorner.cpp |    0
 ...ixBase_template_int_int_topRightCorner.cpp |    0
 .../MatrixBase_template_int_leftCols.cpp      |    0
 .../MatrixBase_template_int_rightCols.cpp     |    0
 .../MatrixBase_template_int_segment.cpp       |    0
 .../MatrixBase_template_int_start.cpp         |    0
 .../MatrixBase_template_int_topRows.cpp       |    0
 .../MatrixBase_topLeftCorner_int_int.cpp      |    0
 .../MatrixBase_topRightCorner_int_int.cpp     |    0
 .../doc/snippets/MatrixBase_topRows_int.cpp   |    0
 .../doc/snippets/MatrixBase_transpose.cpp     |    0
 .../eigen/doc/snippets/MatrixBase_zero.cpp    |    0
 .../doc/snippets/MatrixBase_zero_int.cpp      |    0
 .../doc/snippets/MatrixBase_zero_int_int.cpp  |    0
 .../snippets/Matrix_resize_NoChange_int.cpp   |    0
 .../eigen/doc/snippets/Matrix_resize_int.cpp  |    0
 .../snippets/Matrix_resize_int_NoChange.cpp   |    0
 .../doc/snippets/Matrix_resize_int_int.cpp    |    0
 .../doc/snippets/Matrix_setConstant_int.cpp   |    0
 .../snippets/Matrix_setConstant_int_int.cpp   |    0
 .../snippets/Matrix_setIdentity_int_int.cpp   |    0
 .../eigen/doc/snippets/Matrix_setOnes_int.cpp |    0
 .../doc/snippets/Matrix_setOnes_int_int.cpp   |    0
 .../doc/snippets/Matrix_setRandom_int.cpp     |    0
 .../doc/snippets/Matrix_setRandom_int_int.cpp |    0
 .../eigen/doc/snippets/Matrix_setZero_int.cpp |    0
 .../doc/snippets/Matrix_setZero_int_int.cpp   |    0
 .../eigen/doc/snippets/PartialPivLU_solve.cpp |    0
 .../eigen/doc/snippets/PartialRedux_count.cpp |    0
 .../doc/snippets/PartialRedux_maxCoeff.cpp    |    0
 .../doc/snippets/PartialRedux_minCoeff.cpp    |    0
 .../eigen/doc/snippets/PartialRedux_norm.cpp  |    0
 .../eigen/doc/snippets/PartialRedux_prod.cpp  |    0
 .../doc/snippets/PartialRedux_squaredNorm.cpp |    0
 .../eigen/doc/snippets/PartialRedux_sum.cpp   |    0
 .../RealSchur_RealSchur_MatrixType.cpp        |    0
 .../eigen/doc/snippets/RealSchur_compute.cpp  |    0
 ...ointEigenSolver_SelfAdjointEigenSolver.cpp |    0
 ...lver_SelfAdjointEigenSolver_MatrixType.cpp |    0
 ...ver_SelfAdjointEigenSolver_MatrixType2.cpp |    0
 ...fAdjointEigenSolver_compute_MatrixType.cpp |    0
 ...AdjointEigenSolver_compute_MatrixType2.cpp |    0
 .../SelfAdjointEigenSolver_eigenvalues.cpp    |    0
 .../SelfAdjointEigenSolver_eigenvectors.cpp   |    0
 ...AdjointEigenSolver_operatorInverseSqrt.cpp |    0
 .../SelfAdjointEigenSolver_operatorSqrt.cpp   |    0
 .../snippets/SelfAdjointView_eigenvalues.cpp  |    0
 .../snippets/SelfAdjointView_operatorNorm.cpp |    0
 .../doc/snippets/TopicAliasing_block.cpp      |    0
 .../snippets/TopicAliasing_block_correct.cpp  |    0
 .../doc/snippets/TopicAliasing_cwise.cpp      |    0
 .../doc/snippets/TopicAliasing_mult1.cpp      |    0
 .../doc/snippets/TopicAliasing_mult2.cpp      |    0
 .../doc/snippets/TopicAliasing_mult3.cpp      |    0
 .../snippets/TopicStorageOrders_example.cpp   |    0
 ...lization_Tridiagonalization_MatrixType.cpp |    0
 .../snippets/Tridiagonalization_compute.cpp   |    0
 .../Tridiagonalization_decomposeInPlace.cpp   |    0
 .../snippets/Tridiagonalization_diagonal.cpp  |    0
 ...iagonalization_householderCoefficients.cpp |    0
 .../Tridiagonalization_packedMatrix.cpp       |    0
 .../Tutorial_AdvancedInitialization_Block.cpp |    0
 ..._AdvancedInitialization_CommaTemporary.cpp |    0
 .../Tutorial_AdvancedInitialization_Join.cpp  |    0
 ...orial_AdvancedInitialization_LinSpaced.cpp |    0
 ...orial_AdvancedInitialization_ThreeWays.cpp |    0
 .../Tutorial_AdvancedInitialization_Zero.cpp  |    0
 .../doc/snippets/Tutorial_Map_rowmajor.cpp    |    0
 .../eigen/doc/snippets/Tutorial_Map_using.cpp |    0
 .../doc/snippets/Tutorial_commainit_01.cpp    |    0
 .../doc/snippets/Tutorial_commainit_01b.cpp   |    0
 .../doc/snippets/Tutorial_commainit_02.cpp    |    0
 .../Tutorial_solve_matrix_inverse.cpp         |    0
 .../snippets/Tutorial_solve_multiple_rhs.cpp  |    0
 .../Tutorial_solve_reuse_decomposition.cpp    |    0
 .../doc/snippets/Tutorial_solve_singular.cpp  |    0
 .../snippets/Tutorial_solve_triangular.cpp    |    0
 .../Tutorial_solve_triangular_inplace.cpp     |    0
 .../eigen/doc/snippets/Vectorwise_reverse.cpp |    0
 .../eigen/doc/snippets/class_FullPivLU.cpp    |    0
 .../eigen/doc/snippets/compile_snippet.cpp.in |    0
 .../snippets/tut_arithmetic_redux_minmax.cpp  |    0
 .../tut_arithmetic_transpose_aliasing.cpp     |    0
 .../tut_arithmetic_transpose_conjugate.cpp    |    0
 .../tut_arithmetic_transpose_inplace.cpp      |    0
 .../tut_matrix_assignment_resizing.cpp        |    0
 .../eigen/doc/special_examples/CMakeLists.txt |    0
 .../Tutorial_sparse_example.cpp               |    0
 .../Tutorial_sparse_example_details.cpp       |    0
 .../eigen/doc/tutorial.cpp                    |    0
 .../{3rdparty => 3rdParty}/eigen/eigen3.pc.in |    0
 .../eigen/failtest/CMakeLists.txt             |    0
 .../block_nonconst_ctor_on_const_xpr_0.cpp    |    0
 .../block_nonconst_ctor_on_const_xpr_1.cpp    |    0
 .../block_nonconst_ctor_on_const_xpr_2.cpp    |    0
 .../block_on_const_type_actually_const_0.cpp  |    0
 .../block_on_const_type_actually_const_1.cpp  |    0
 .../const_qualified_block_method_retval_0.cpp |    0
 .../const_qualified_block_method_retval_1.cpp |    0
 ...const_qualified_diagonal_method_retval.cpp |    0
 ...onst_qualified_transpose_method_retval.cpp |    0
 .../diagonal_nonconst_ctor_on_const_xpr.cpp   |    0
 .../diagonal_on_const_type_actually_const.cpp |    0
 .../eigen/failtest/failtest_sanity_check.cpp  |    0
 .../map_nonconst_ctor_on_const_ptr_0.cpp      |    0
 .../map_nonconst_ctor_on_const_ptr_1.cpp      |    0
 .../map_nonconst_ctor_on_const_ptr_2.cpp      |    0
 .../map_nonconst_ctor_on_const_ptr_3.cpp      |    0
 .../map_nonconst_ctor_on_const_ptr_4.cpp      |    0
 .../map_on_const_type_actually_const_0.cpp    |    0
 .../map_on_const_type_actually_const_1.cpp    |    0
 .../transpose_nonconst_ctor_on_const_xpr.cpp  |    0
 ...transpose_on_const_type_actually_const.cpp |    0
 .../eigen/lapack/CMakeLists.txt               |    0
 .../eigen/lapack/cholesky.cpp                 |    0
 .../eigen/lapack/complex_double.cpp           |    0
 .../eigen/lapack/complex_single.cpp           |    0
 .../eigen/lapack/double.cpp                   |    0
 .../eigen/lapack/eigenvalues.cpp              |    0
 .../eigen/lapack/lapack_common.h              |    0
 .../eigen/lapack/lu.cpp                       |    0
 .../eigen/lapack/single.cpp                   |    0
 .../eigen/scripts/CMakeLists.txt              |    0
 .../eigen/scripts/buildtests.in               |    0
 .../eigen/scripts/check.in                    |    0
 .../eigen/scripts/debug.in                    |    0
 .../eigen/scripts/eigen_gen_credits.cpp       |    0
 .../3rdParty/eigen/scripts/eigen_gen_docs     |   22 +
 .../eigen/scripts/release.in                  |    0
 .../eigen/scripts/relicense.py                |    0
 .../eigen/signature_of_eigen3_matrix_library  |    0
 resources/3rdParty/eigen/test/CMakeLists.txt  |  243 +++
 .../eigen/test/adjoint.cpp                    |    0
 .../eigen/test/array.cpp                      |    0
 .../eigen/test/array_for_matrix.cpp           |    0
 .../eigen/test/array_replicate.cpp            |    0
 .../eigen/test/array_reverse.cpp              |    0
 .../eigen/test/bandmatrix.cpp                 |    0
 .../eigen/test/basicstuff.cpp                 |    0
 .../eigen/test/bicgstab.cpp                   |    0
 .../eigen/test/block.cpp                      |    0
 resources/3rdParty/eigen/test/cholesky.cpp    |  310 ++++
 .../eigen/test/cholmod_support.cpp            |    0
 .../eigen/test/commainitializer.cpp           |    0
 .../eigen/test/conjugate_gradient.cpp         |    0
 .../eigen/test/conservative_resize.cpp        |    0
 .../eigen/test/corners.cpp                    |    0
 .../eigen/test/cwiseop.cpp                    |    0
 .../eigen/test/determinant.cpp                |    0
 resources/3rdParty/eigen/test/diagonal.cpp    |   83 +
 .../eigen/test/diagonalmatrices.cpp           |    0
 .../eigen/test/dontalign.cpp                  |    0
 .../eigen/test/dynalloc.cpp                   |    0
 .../eigen/test/eigen2/CMakeLists.txt          |    0
 .../eigen/test/eigen2/eigen2_adjoint.cpp      |    0
 .../eigen/test/eigen2/eigen2_alignedbox.cpp   |    0
 .../eigen/test/eigen2/eigen2_array.cpp        |    0
 .../eigen/test/eigen2/eigen2_basicstuff.cpp   |    0
 .../eigen/test/eigen2/eigen2_bug_132.cpp      |    0
 .../eigen/test/eigen2/eigen2_cholesky.cpp     |    0
 .../test/eigen2/eigen2_commainitializer.cpp   |    0
 .../eigen/test/eigen2/eigen2_cwiseop.cpp      |    0
 .../eigen/test/eigen2/eigen2_determinant.cpp  |    0
 .../eigen/test/eigen2/eigen2_dynalloc.cpp     |    0
 .../eigen/test/eigen2/eigen2_eigensolver.cpp  |    0
 .../test/eigen2/eigen2_first_aligned.cpp      |    0
 .../eigen/test/eigen2/eigen2_geometry.cpp     |    0
 .../eigen2_geometry_with_eigen2_prefix.cpp    |    0
 .../eigen/test/eigen2/eigen2_hyperplane.cpp   |    0
 .../eigen/test/eigen2/eigen2_inverse.cpp      |    0
 .../test/eigen2/eigen2_linearstructure.cpp    |    0
 .../eigen/test/eigen2/eigen2_lu.cpp           |    0
 .../eigen/test/eigen2/eigen2_map.cpp          |    0
 .../eigen/test/eigen2/eigen2_meta.cpp         |    0
 .../eigen/test/eigen2/eigen2_miscmatrices.cpp |    0
 .../eigen/test/eigen2/eigen2_mixingtypes.cpp  |    0
 .../eigen/test/eigen2/eigen2_newstdvector.cpp |    0
 .../eigen/test/eigen2/eigen2_nomalloc.cpp     |    0
 .../eigen/test/eigen2/eigen2_packetmath.cpp   |    0
 .../test/eigen2/eigen2_parametrizedline.cpp   |    0
 .../test/eigen2/eigen2_prec_inverse_4x4.cpp   |    0
 .../test/eigen2/eigen2_product_large.cpp      |    0
 .../test/eigen2/eigen2_product_small.cpp      |    0
 .../eigen/test/eigen2/eigen2_qr.cpp           |    0
 .../eigen/test/eigen2/eigen2_qtvector.cpp     |    0
 .../eigen/test/eigen2/eigen2_regression.cpp   |    0
 .../eigen/test/eigen2/eigen2_sizeof.cpp       |    0
 .../eigen/test/eigen2/eigen2_smallvectors.cpp |    0
 .../eigen/test/eigen2/eigen2_sparse_basic.cpp |    0
 .../test/eigen2/eigen2_sparse_product.cpp     |    0
 .../test/eigen2/eigen2_sparse_solvers.cpp     |    0
 .../test/eigen2/eigen2_sparse_vector.cpp      |    0
 .../eigen/test/eigen2/eigen2_stdvector.cpp    |    0
 .../eigen/test/eigen2/eigen2_submatrices.cpp  |    0
 .../eigen/test/eigen2/eigen2_sum.cpp          |    0
 .../eigen/test/eigen2/eigen2_svd.cpp          |    0
 .../eigen/test/eigen2/eigen2_swap.cpp         |    0
 .../eigen/test/eigen2/eigen2_triangular.cpp   |    0
 .../test/eigen2/eigen2_unalignedassert.cpp    |    0
 .../eigen/test/eigen2/eigen2_visitor.cpp      |    0
 .../eigen/test/eigen2/gsl_helper.h            |    0
 .../eigen/test/eigen2/main.h                  |    0
 .../eigen/test/eigen2/product.h               |    0
 .../eigen/test/eigen2/runtest.sh              |    0
 .../eigen/test/eigen2/sparse.h                |    0
 .../eigen/test/eigen2/testsuite.cmake         |    0
 .../eigen/test/eigen2support.cpp              |    0
 .../eigen/test/eigensolver_complex.cpp        |  115 ++
 .../eigen/test/eigensolver_generic.cpp        |  115 ++
 .../eigen/test/eigensolver_selfadjoint.cpp    |    0
 .../eigen/test/exceptions.cpp                 |    0
 .../eigen/test/first_aligned.cpp              |    0
 .../eigen/test/geo_alignedbox.cpp             |    0
 .../eigen/test/geo_eulerangles.cpp            |    0
 .../eigen/test/geo_homogeneous.cpp            |    0
 .../eigen/test/geo_hyperplane.cpp             |    0
 .../eigen/test/geo_orthomethods.cpp           |    0
 .../eigen/test/geo_parametrizedline.cpp       |    0
 .../eigen/test/geo_quaternion.cpp             |    0
 .../eigen/test/geo_transformations.cpp        |    0
 .../eigen/test/hessenberg.cpp                 |    0
 .../eigen/test/householder.cpp                |    0
 .../eigen/test/integer_types.cpp              |    0
 .../eigen/test/inverse.cpp                    |    0
 .../eigen/test/jacobi.cpp                     |    0
 .../eigen/test/jacobisvd.cpp                  |    0
 .../eigen/test/linearstructure.cpp            |    0
 .../{3rdparty => 3rdParty}/eigen/test/lu.cpp  |    0
 .../{3rdparty => 3rdParty}/eigen/test/main.h  |    0
 .../{3rdparty => 3rdParty}/eigen/test/map.cpp |    0
 .../eigen/test/mapstaticmethods.cpp           |    0
 .../eigen/test/mapstride.cpp                  |    0
 .../eigen/test/meta.cpp                       |    0
 .../eigen/test/miscmatrices.cpp               |    0
 .../eigen/test/mixingtypes.cpp                |    0
 .../eigen/test/nesting_ops.cpp                |    0
 .../eigen/test/nomalloc.cpp                   |    0
 .../eigen/test/nullary.cpp                    |    0
 .../eigen/test/packetmath.cpp                 |    0
 .../eigen/test/pardiso_support.cpp            |    0
 .../eigen/test/pastix_support.cpp             |    0
 .../eigen/test/permutationmatrices.cpp        |    0
 .../eigen/test/prec_inverse_4x4.cpp           |    0
 .../eigen/test/product.h                      |    0
 .../eigen/test/product_extra.cpp              |    0
 .../eigen/test/product_large.cpp              |    0
 .../eigen/test/product_mmtr.cpp               |    0
 .../eigen/test/product_notemporary.cpp        |    0
 .../eigen/test/product_selfadjoint.cpp        |    0
 .../eigen/test/product_small.cpp              |    0
 .../eigen/test/product_symm.cpp               |    0
 .../eigen/test/product_syrk.cpp               |    0
 .../eigen/test/product_trmm.cpp               |    0
 .../eigen/test/product_trmv.cpp               |    0
 .../eigen/test/product_trsolve.cpp            |    0
 .../{3rdparty => 3rdParty}/eigen/test/qr.cpp  |    0
 .../eigen/test/qr_colpivoting.cpp             |    0
 .../eigen/test/qr_fullpivoting.cpp            |    0
 .../eigen/test/qtvector.cpp                   |    0
 .../eigen/test/redux.cpp                      |    0
 .../eigen/test/resize.cpp                     |    0
 .../eigen/test/runtest.sh                     |    0
 .../3rdParty/eigen/test/schur_complex.cpp     |   74 +
 resources/3rdParty/eigen/test/schur_real.cpp  |   93 ++
 .../eigen/test/selfadjoint.cpp                |    0
 .../eigen/test/simplicial_cholesky.cpp        |    0
 .../eigen/test/sizeof.cpp                     |    0
 .../eigen/test/sizeoverflow.cpp               |    0
 .../eigen/test/smallvectors.cpp               |    0
 .../eigen/test/sparse.h                       |    0
 .../3rdParty/eigen/test/sparse_basic.cpp      |  401 +++++
 .../eigen/test/sparse_permutations.cpp        |    0
 .../eigen/test/sparse_product.cpp             |    0
 resources/3rdParty/eigen/test/sparse_solver.h |  309 ++++
 .../eigen/test/sparse_solvers.cpp             |    0
 .../eigen/test/sparse_vector.cpp              |    0
 .../eigen/test/stable_norm.cpp                |    0
 .../eigen/test/stddeque.cpp                   |    0
 .../eigen/test/stdlist.cpp                    |    0
 .../eigen/test/stdvector.cpp                  |    0
 .../eigen/test/stdvector_overload.cpp         |    0
 .../eigen/test/superlu_support.cpp            |    0
 .../eigen/test/swap.cpp                       |    0
 .../eigen/test/testsuite.cmake                |    0
 .../eigen/test/triangular.cpp                 |    0
 .../eigen/test/umeyama.cpp                    |    0
 .../eigen/test/umfpack_support.cpp            |    0
 .../eigen/test/unalignedassert.cpp            |    0
 .../eigen/test/unalignedcount.cpp             |    0
 .../eigen/test/upperbidiagonalization.cpp     |    0
 .../eigen/test/vectorization_logic.cpp        |    0
 .../eigen/test/vectorwiseop.cpp               |    0
 .../eigen/test/visitor.cpp                    |    0
 .../eigen/test/zerosized.cpp                  |    0
 .../eigen/unsupported/CMakeLists.txt          |    0
 .../eigen/unsupported/Eigen/AdolcForward      |    0
 .../eigen/unsupported/Eigen/AlignedVector3    |    0
 .../eigen/unsupported/Eigen/AutoDiff          |    0
 .../eigen/unsupported/Eigen/BVH               |    0
 .../eigen/unsupported/Eigen/CMakeLists.txt    |    0
 .../eigen/unsupported/Eigen/FFT               |    0
 .../eigen/unsupported/Eigen/IterativeSolvers  |   40 +
 .../eigen/unsupported/Eigen/KroneckerProduct  |    0
 .../eigen/unsupported/Eigen/MPRealSupport     |    0
 .../eigen/unsupported/Eigen/MatrixFunctions   |  380 +++++
 .../eigen/unsupported/Eigen/MoreVectorization |    0
 .../unsupported/Eigen/NonLinearOptimization   |    0
 .../eigen/unsupported/Eigen/NumericalDiff     |    0
 .../eigen/unsupported/Eigen/OpenGLSupport     |    0
 .../eigen/unsupported/Eigen/Polynomials       |    0
 .../eigen/unsupported/Eigen/Skyline           |    0
 .../eigen/unsupported/Eigen/SparseExtra       |    0
 .../eigen/unsupported/Eigen/Splines           |    0
 .../Eigen/src/AutoDiff/AutoDiffJacobian.h     |    0
 .../Eigen/src/AutoDiff/AutoDiffScalar.h       |    0
 .../Eigen/src/AutoDiff/AutoDiffVector.h       |    0
 .../Eigen/src/AutoDiff/CMakeLists.txt         |    0
 .../unsupported/Eigen/src/BVH/BVAlgorithms.h  |    0
 .../unsupported/Eigen/src/BVH/CMakeLists.txt  |    0
 .../eigen/unsupported/Eigen/src/BVH/KdBVH.h   |    0
 .../unsupported/Eigen/src/CMakeLists.txt      |    0
 .../unsupported/Eigen/src/FFT/CMakeLists.txt  |    0
 .../unsupported/Eigen/src/FFT/ei_fftw_impl.h  |    0
 .../Eigen/src/FFT/ei_kissfft_impl.h           |    0
 .../Eigen/src/IterativeSolvers/CMakeLists.txt |    0
 .../IterativeSolvers/ConstrainedConjGrad.h    |    0
 .../Eigen/src/IterativeSolvers/GMRES.h        |    0
 .../Eigen/src/IterativeSolvers/IncompleteLU.h |    0
 .../IterativeSolvers/IterationController.h    |    0
 .../Eigen/src/IterativeSolvers/Scaling.h      |    0
 .../Eigen/src/KroneckerProduct/CMakeLists.txt |    0
 .../KroneckerProduct/KroneckerTensorProduct.h |    0
 .../Eigen/src/MatrixFunctions/CMakeLists.txt  |    0
 .../src/MatrixFunctions/MatrixExponential.h   |  454 ++++++
 .../src/MatrixFunctions/MatrixFunction.h      |  590 +++++++
 .../MatrixFunctions/MatrixFunctionAtomic.h    |    0
 .../src/MatrixFunctions/MatrixLogarithm.h     |  495 ++++++
 .../src/MatrixFunctions/MatrixSquareRoot.h    |  484 ++++++
 .../Eigen/src/MatrixFunctions/StemFunction.h  |    0
 .../src/MoreVectorization/CMakeLists.txt      |    0
 .../src/MoreVectorization/MathFunctions.h     |    0
 .../src/NonLinearOptimization/CMakeLists.txt  |    0
 .../HybridNonLinearSolver.h                   |    0
 .../LevenbergMarquardt.h                      |    0
 .../Eigen/src/NonLinearOptimization/chkder.h  |    0
 .../Eigen/src/NonLinearOptimization/covar.h   |    0
 .../Eigen/src/NonLinearOptimization/dogleg.h  |    0
 .../Eigen/src/NonLinearOptimization/fdjac1.h  |    0
 .../Eigen/src/NonLinearOptimization/lmpar.h   |    0
 .../Eigen/src/NonLinearOptimization/qrsolv.h  |    0
 .../Eigen/src/NonLinearOptimization/r1mpyq.h  |    0
 .../Eigen/src/NonLinearOptimization/r1updt.h  |    0
 .../Eigen/src/NonLinearOptimization/rwupdt.h  |    0
 .../Eigen/src/NumericalDiff/CMakeLists.txt    |    0
 .../Eigen/src/NumericalDiff/NumericalDiff.h   |    0
 .../Eigen/src/Polynomials/CMakeLists.txt      |    0
 .../Eigen/src/Polynomials/Companion.h         |    0
 .../Eigen/src/Polynomials/PolynomialSolver.h  |    0
 .../Eigen/src/Polynomials/PolynomialUtils.h   |    0
 .../Eigen/src/Skyline/CMakeLists.txt          |    0
 .../Eigen/src/Skyline/SkylineInplaceLU.h      |    0
 .../Eigen/src/Skyline/SkylineMatrix.h         |    0
 .../Eigen/src/Skyline/SkylineMatrixBase.h     |    0
 .../Eigen/src/Skyline/SkylineProduct.h        |    0
 .../Eigen/src/Skyline/SkylineStorage.h        |    0
 .../Eigen/src/Skyline/SkylineUtil.h           |    0
 .../SparseExtra/BlockOfDynamicSparseMatrix.h  |    0
 .../Eigen/src/SparseExtra/CMakeLists.txt      |    0
 .../src/SparseExtra/DynamicSparseMatrix.h     |    0
 .../Eigen/src/SparseExtra/MarketIO.h          |    0
 .../src/SparseExtra/MatrixMarketIterator.h    |  221 +++
 .../Eigen/src/SparseExtra/RandomSetter.h      |    0
 .../Eigen/src/Splines/CMakeLists.txt          |    0
 .../unsupported/Eigen/src/Splines/Spline.h    |  464 ++++++
 .../Eigen/src/Splines/SplineFitting.h         |    0
 .../unsupported/Eigen/src/Splines/SplineFwd.h |    0
 .../eigen/unsupported/README.txt              |    0
 .../eigen/unsupported/doc/CMakeLists.txt      |    0
 .../eigen/unsupported/doc/Doxyfile.in         |    0
 .../eigen/unsupported/doc/Overview.dox        |    0
 .../unsupported/doc/examples/BVH_Example.cpp  |    0
 .../unsupported/doc/examples/CMakeLists.txt   |    0
 .../eigen/unsupported/doc/examples/FFT.cpp    |    0
 .../doc/examples/MatrixExponential.cpp        |    0
 .../doc/examples/MatrixFunction.cpp           |    0
 .../doc/examples/MatrixLogarithm.cpp          |    0
 .../unsupported/doc/examples/MatrixSine.cpp   |    0
 .../unsupported/doc/examples/MatrixSinh.cpp   |    0
 .../doc/examples/MatrixSquareRoot.cpp         |    0
 .../doc/examples/PolynomialSolver1.cpp        |    0
 .../doc/examples/PolynomialUtils1.cpp         |    0
 .../unsupported/doc/snippets/CMakeLists.txt   |    0
 .../eigen/unsupported/test/BVH.cpp            |    0
 .../eigen/unsupported/test/CMakeLists.txt     |   87 +
 .../eigen/unsupported/test/FFT.cpp            |    0
 .../eigen/unsupported/test/FFTW.cpp           |    0
 .../test/NonLinearOptimization.cpp            |    0
 .../eigen/unsupported/test/NumericalDiff.cpp  |    0
 .../eigen/unsupported/test/alignedvector3.cpp |    0
 .../eigen/unsupported/test/autodiff.cpp       |    0
 .../eigen/unsupported/test/forward_adolc.cpp  |    0
 .../eigen/unsupported/test/gmres.cpp          |    0
 .../unsupported/test/kronecker_product.cpp    |    0
 .../unsupported/test/matrix_exponential.cpp   |  149 ++
 .../unsupported/test/matrix_function.cpp      |    0
 .../unsupported/test/matrix_square_root.cpp   |   62 +
 .../eigen/unsupported/test/mpreal/dlmalloc.c  |    0
 .../eigen/unsupported/test/mpreal/dlmalloc.h  |    0
 .../eigen/unsupported/test/mpreal/mpreal.cpp  |    0
 .../eigen/unsupported/test/mpreal/mpreal.h    |    0
 .../eigen/unsupported/test/mpreal_support.cpp |    0
 .../eigen/unsupported/test/openglsupport.cpp  |    0
 .../unsupported/test/polynomialsolver.cpp     |    0
 .../unsupported/test/polynomialutils.cpp      |    0
 .../eigen/unsupported/test/sparse_extra.cpp   |    0
 .../eigen/unsupported/test/splines.cpp        |    0
 resources/3rdparty/eigen/.hg_archival.txt     |    5 -
 resources/3rdparty/eigen/.hgtags              |   22 -
 resources/3rdparty/eigen/Eigen/Core           |  380 -----
 resources/3rdparty/eigen/Eigen/Eigenvalues    |   48 -
 .../3rdparty/eigen/Eigen/OrderingMethods      |   23 -
 .../3rdparty/eigen/Eigen/src/Cholesky/LDLT.h  |  599 -------
 .../Eigen/src/CholmodSupport/CholmodSupport.h |  599 -------
 .../3rdparty/eigen/Eigen/src/Core/Array.h     |  308 ----
 .../eigen/Eigen/src/Core/ArrayWrapper.h       |  254 ---
 .../eigen/Eigen/src/Core/Assign_MKL.h         |  224 ---
 .../3rdparty/eigen/Eigen/src/Core/Block.h     |  357 ----
 .../eigen/Eigen/src/Core/CommaInitializer.h   |  139 --
 .../eigen/Eigen/src/Core/CwiseBinaryOp.h      |  229 ---
 .../eigen/Eigen/src/Core/CwiseNullaryOp.h     |  864 ----------
 .../eigen/Eigen/src/Core/CwiseUnaryOp.h       |  126 --
 .../3rdparty/eigen/Eigen/src/Core/DenseBase.h |  533 ------
 .../eigen/Eigen/src/Core/DenseCoeffsBase.h    |  754 ---------
 .../eigen/Eigen/src/Core/DenseStorage.h       |  320 ----
 .../3rdparty/eigen/Eigen/src/Core/Diagonal.h  |  237 ---
 .../eigen/Eigen/src/Core/DiagonalMatrix.h     |  307 ----
 .../eigen/Eigen/src/Core/DiagonalProduct.h    |  123 --
 resources/3rdparty/eigen/Eigen/src/Core/Dot.h |  261 ---
 .../3rdparty/eigen/Eigen/src/Core/Functors.h  |  975 -----------
 .../3rdparty/eigen/Eigen/src/Core/Fuzzy.h     |  150 --
 .../eigen/Eigen/src/Core/GeneralProduct.h     |  613 -------
 resources/3rdparty/eigen/Eigen/src/Core/Map.h |  192 ---
 .../3rdparty/eigen/Eigen/src/Core/MapBase.h   |  242 ---
 .../eigen/Eigen/src/Core/MathFunctions.h      |  889 ----------
 .../eigen/Eigen/src/Core/MatrixBase.h         |  515 ------
 .../3rdparty/eigen/Eigen/src/Core/NoAlias.h   |  130 --
 .../eigen/Eigen/src/Core/PermutationMatrix.h  |  687 --------
 .../eigen/Eigen/src/Core/PlainObjectBase.h    |  776 ---------
 .../3rdparty/eigen/Eigen/src/Core/Product.h   |  107 --
 .../eigen/Eigen/src/Core/ProductBase.h        |  278 ----
 .../3rdparty/eigen/Eigen/src/Core/Random.h    |  152 --
 .../3rdparty/eigen/Eigen/src/Core/Replicate.h |  177 --
 .../3rdparty/eigen/Eigen/src/Core/Select.h    |  162 --
 .../3rdparty/eigen/Eigen/src/Core/Swap.h      |  126 --
 .../3rdparty/eigen/Eigen/src/Core/Transpose.h |  414 -----
 .../eigen/Eigen/src/Core/Transpositions.h     |  436 -----
 .../eigen/Eigen/src/Core/TriangularMatrix.h   |  828 ----------
 .../eigen/Eigen/src/Core/VectorBlock.h        |  284 ----
 .../3rdparty/eigen/Eigen/src/Core/Visitor.h   |  237 ---
 .../Eigen/src/Core/arch/NEON/PacketMath.h     |  407 -----
 .../Eigen/src/Core/arch/SSE/MathFunctions.h   |  460 ------
 .../Eigen/src/Core/arch/SSE/PacketMath.h      |  636 --------
 .../Core/products/GeneralBlockPanelKernel.h   | 1319 ---------------
 .../src/Core/products/GeneralMatrixVector.h   |  554 -------
 .../eigen/Eigen/src/Core/util/Constants.h     |  438 -----
 .../Eigen/src/Core/util/ForwardDeclarations.h |  300 ----
 .../eigen/Eigen/src/Core/util/Macros.h        |  410 -----
 .../eigen/Eigen/src/Core/util/Memory.h        |  952 -----------
 .../eigen/Eigen/src/Core/util/StaticAssert.h  |  206 ---
 .../eigen/Eigen/src/Core/util/XprHelper.h     |  468 ------
 .../src/Eigen2Support/Geometry/AlignedBox.h   |  159 --
 .../src/Eigen2Support/Geometry/AngleAxis.h    |  214 ---
 .../src/Eigen2Support/Geometry/Hyperplane.h   |  254 ---
 .../Eigen2Support/Geometry/ParametrizedLine.h |  141 --
 .../src/Eigen2Support/Geometry/Quaternion.h   |  495 ------
 .../src/Eigen2Support/Geometry/Rotation2D.h   |  145 --
 .../src/Eigen2Support/Geometry/RotationBase.h |  123 --
 .../src/Eigen2Support/Geometry/Scaling.h      |  167 --
 .../src/Eigen2Support/Geometry/Transform.h    |  786 ---------
 .../src/Eigen2Support/Geometry/Translation.h  |  184 ---
 .../Eigen/src/Eigen2Support/LeastSquares.h    |  170 --
 .../eigen/Eigen/src/Eigen2Support/SVD.h       |  638 --------
 .../src/Eigenvalues/ComplexEigenSolver.h      |  333 ----
 .../Eigen/src/Eigenvalues/ComplexSchur.h      |  426 -----
 .../eigen/Eigen/src/Eigenvalues/EigenSolver.h |  594 -------
 .../eigen/Eigen/src/Eigenvalues/RealSchur.h   |  492 ------
 .../eigen/Eigen/src/Geometry/AlignedBox.h     |  375 -----
 .../eigen/Eigen/src/Geometry/AngleAxis.h      |  230 ---
 .../eigen/Eigen/src/Geometry/Hyperplane.h     |  269 ---
 .../Eigen/src/Geometry/ParametrizedLine.h     |  195 ---
 .../eigen/Eigen/src/Geometry/Quaternion.h     |  778 ---------
 .../eigen/Eigen/src/Geometry/Rotation2D.h     |  154 --
 .../eigen/Eigen/src/Geometry/Scaling.h        |  166 --
 .../eigen/Eigen/src/Geometry/Transform.h      | 1440 -----------------
 .../eigen/Eigen/src/Geometry/Umeyama.h        |  177 --
 .../src/IterativeLinearSolvers/BiCGSTAB.h     |  256 ---
 .../IterativeLinearSolvers/IncompleteLUT.h    |  465 ------
 .../3rdparty/eigen/Eigen/src/Jacobi/Jacobi.h  |  424 -----
 .../Eigen/src/PardisoSupport/PardisoSupport.h |  614 -------
 .../eigen/Eigen/src/QR/HouseholderQR.h        |  351 ----
 .../3rdparty/eigen/Eigen/src/SVD/JacobiSVD.h  |  869 ----------
 .../Eigen/src/SparseCore/CompressedStorage.h  |  233 ---
 .../Eigen/src/SparseCore/SparseDenseProduct.h |  300 ----
 .../src/SparseCore/SparseDiagonalProduct.h    |  184 ---
 .../eigen/Eigen/src/SparseCore/SparseMatrix.h | 1209 --------------
 .../Eigen/src/SparseCore/SparseMatrixBase.h   |  458 ------
 .../Eigen/src/SparseCore/SparseProduct.h      |  186 ---
 .../src/SparseCore/SparseSelfAdjointView.h    |  480 ------
 .../SparseSparseProductWithPruning.h          |  149 --
 .../Eigen/src/SparseCore/SparseTranspose.h    |   61 -
 .../eigen/Eigen/src/SparseCore/SparseVector.h |  398 -----
 .../eigen/Eigen/src/SparseCore/SparseView.h   |   98 --
 .../Eigen/src/SuperLUSupport/SuperLUSupport.h | 1026 ------------
 resources/3rdparty/eigen/bench/bench_gemm.cpp |  271 ----
 .../eigen/bench/spbench/CMakeLists.txt        |   78 -
 .../eigen/bench/spbench/spbenchsolver.cpp     |   87 -
 .../eigen/bench/spbench/spbenchsolver.h       |  554 -------
 resources/3rdparty/eigen/blas/CMakeLists.txt  |   57 -
 resources/3rdparty/eigen/blas/common.h        |  145 --
 resources/3rdparty/eigen/blas/double.cpp      |   33 -
 .../3rdparty/eigen/blas/level2_cplx_impl.h    |  394 -----
 resources/3rdparty/eigen/blas/level2_impl.h   |  524 ------
 .../3rdparty/eigen/blas/level2_real_impl.h    |  370 -----
 resources/3rdparty/eigen/blas/level3_impl.h   |  634 --------
 resources/3rdparty/eigen/blas/single.cpp      |   22 -
 .../3rdparty/eigen/blas/testing/dblat1.f      | 1065 ------------
 .../3rdparty/eigen/blas/testing/sblat1.f      | 1021 ------------
 .../3rdparty/eigen/cmake/FindMetis.cmake      |   25 -
 .../eigen/demos/opengl/CMakeLists.txt         |   20 -
 .../3rdparty/eigen/doc/C09_TutorialSparse.dox |  455 ------
 .../3rdparty/eigen/doc/D01_StlContainers.dox  |   65 -
 .../3rdparty/eigen/doc/I02_HiPerformance.dox  |  128 --
 .../3rdparty/eigen/doc/I10_Assertions.dox     |  114 --
 .../3rdparty/eigen/scripts/eigen_gen_docs     |   22 -
 resources/3rdparty/eigen/test/CMakeLists.txt  |  246 ---
 resources/3rdparty/eigen/test/cholesky.cpp    |  324 ----
 resources/3rdparty/eigen/test/diagonal.cpp    |   80 -
 .../eigen/test/eigensolver_complex.cpp        |  126 --
 .../eigen/test/eigensolver_generic.cpp        |  126 --
 .../3rdparty/eigen/test/schur_complex.cpp     |   91 --
 resources/3rdparty/eigen/test/schur_real.cpp  |  112 --
 .../3rdparty/eigen/test/sparse_basic.cpp      |  436 -----
 resources/3rdparty/eigen/test/sparse_solver.h |  309 ----
 .../eigen/unsupported/Eigen/IterativeSolvers  |   41 -
 .../eigen/unsupported/Eigen/MatrixFunctions   |  446 -----
 .../src/MatrixFunctions/MatrixExponential.h   |  451 ------
 .../src/MatrixFunctions/MatrixFunction.h      |  590 -------
 .../src/MatrixFunctions/MatrixLogarithm.h     |  486 ------
 .../src/MatrixFunctions/MatrixSquareRoot.h    |  484 ------
 .../src/SparseExtra/MatrixMarketIterator.h    |  232 ---
 .../unsupported/Eigen/src/Splines/Spline.h    |  479 ------
 .../eigen/unsupported/test/CMakeLists.txt     |   88 -
 .../unsupported/test/matrix_exponential.cpp   |  141 --
 .../unsupported/test/matrix_square_root.cpp   |   31 -
 1388 files changed, 52220 insertions(+), 49501 deletions(-)
 create mode 100644 resources/3rdParty/eigen/.hg_archival.txt
 rename resources/{3rdparty => 3rdParty}/eigen/.hgeol (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/.hgignore (100%)
 create mode 100644 resources/3rdParty/eigen/.hgtags
 create mode 100644 resources/3rdParty/eigen/.krazy
 rename resources/{3rdparty => 3rdParty}/eigen/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/COPYING.BSD (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/COPYING.GPL (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/COPYING.LGPL (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/COPYING.MINPACK (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/COPYING.MPL2 (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/COPYING.README (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/CTestConfig.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/CTestCustom.cmake.in (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/Array (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/Cholesky (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/CholmodSupport (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/Core
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/Dense (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/Eigen (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/Eigen2Support (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/Eigenvalues
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/Geometry (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/Householder (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/IterativeLinearSolvers (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/Jacobi (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/LU (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/LeastSquares (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/OrderingMethods
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/PaStiXSupport (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/PardisoSupport (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/QR (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/QtAlignedMalloc (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/SVD (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/Sparse (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/SparseCholesky (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/SparseCore (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/StdDeque (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/StdList (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/StdVector (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/SuperLUSupport (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/UmfPackSupport (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Cholesky/CMakeLists.txt (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Cholesky/LDLT.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Cholesky/LLT.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Cholesky/LLT_MKL.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/CholmodSupport/CMakeLists.txt (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/CholmodSupport/CholmodSupport.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/Array.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/ArrayBase.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/ArrayWrapper.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/Assign.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/Assign_MKL.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/BandMatrix.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/Block.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/BooleanRedux.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/CMakeLists.txt (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/CommaInitializer.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/CwiseBinaryOp.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/CwiseNullaryOp.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/CwiseUnaryOp.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/CwiseUnaryView.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/DenseBase.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/DenseCoeffsBase.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/DenseStorage.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/Diagonal.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/DiagonalMatrix.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/DiagonalProduct.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/Dot.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/EigenBase.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/Flagged.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/ForceAlignedAccess.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/Functors.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/Fuzzy.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/GeneralProduct.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/GenericPacketMath.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/GlobalFunctions.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/IO.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/Map.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/MapBase.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/MathFunctions.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/Matrix.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/MatrixBase.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/NestByValue.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/NoAlias.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/NumTraits.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/PermutationMatrix.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/PlainObjectBase.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/Product.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/ProductBase.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/Random.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/Redux.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/Replicate.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/ReturnByValue.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/Reverse.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/Select.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/SelfAdjointView.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/SelfCwiseBinaryOp.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/SolveTriangular.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/StableNorm.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/Stride.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/Swap.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/Transpose.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/Transpositions.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/TriangularMatrix.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/VectorBlock.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/VectorwiseOp.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/Visitor.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/arch/AltiVec/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/arch/AltiVec/Complex.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/arch/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/arch/Default/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/arch/Default/Settings.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/arch/NEON/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/arch/NEON/Complex.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/arch/NEON/PacketMath.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/arch/SSE/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/arch/SSE/Complex.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/arch/SSE/PacketMath.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/products/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/products/CoeffBasedProduct.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_MKL.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/products/GeneralMatrixMatrix_MKL.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/products/GeneralMatrixVector.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/products/GeneralMatrixVector_MKL.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/products/Parallelizer.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/products/SelfadjointProduct.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/products/SelfadjointRank2Update.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/products/TriangularMatrixVector.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/products/TriangularMatrixVector_MKL.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/products/TriangularSolverMatrix.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/products/TriangularSolverVector.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/util/BlasUtil.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/util/CMakeLists.txt (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/util/Constants.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/util/DisableStupidWarnings.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/util/ForwardDeclarations.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/util/MKL_support.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/util/Macros.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/util/Memory.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/util/Meta.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/util/NonMPL2.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Core/util/ReenableStupidWarnings.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/util/StaticAssert.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Core/util/XprHelper.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigen2Support/Block.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigen2Support/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigen2Support/Cwise.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigen2Support/CwiseOperators.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/AlignedBox.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigen2Support/Geometry/All.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/AngleAxis.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigen2Support/Geometry/CMakeLists.txt (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Hyperplane.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Quaternion.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Rotation2D.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/RotationBase.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Scaling.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Transform.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Translation.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigen2Support/LU.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigen2Support/Lazy.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Eigen2Support/LeastSquares.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigen2Support/Macros.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigen2Support/MathFunctions.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigen2Support/Memory.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigen2Support/Meta.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigen2Support/Minor.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigen2Support/QR.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Eigen2Support/SVD.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigen2Support/TriangularSolver.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigen2Support/VectorBlock.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigenvalues/CMakeLists.txt (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Eigenvalues/ComplexSchur.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigenvalues/ComplexSchur_MKL.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Eigenvalues/EigenSolver.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Eigenvalues/RealSchur.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigenvalues/RealSchur_MKL.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Eigenvalues/Tridiagonalization.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Geometry/AlignedBox.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Geometry/AngleAxis.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Geometry/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Geometry/EulerAngles.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Geometry/Homogeneous.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Geometry/Hyperplane.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Geometry/OrthoMethods.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Geometry/ParametrizedLine.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Geometry/Quaternion.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Geometry/Rotation2D.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Geometry/RotationBase.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Geometry/Scaling.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Geometry/Transform.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Geometry/Translation.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Geometry/Umeyama.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Geometry/arch/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Geometry/arch/Geometry_SSE.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Householder/BlockHouseholder.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Householder/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Householder/Householder.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Householder/HouseholderSequence.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/IterativeLinearSolvers/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/Jacobi/CMakeLists.txt (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/Jacobi/Jacobi.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/LU/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/LU/Determinant.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/LU/FullPivLU.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/LU/Inverse.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/LU/PartialPivLU.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/LU/PartialPivLU_MKL.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/LU/arch/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/LU/arch/Inverse_SSE.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/OrderingMethods/Amd.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/OrderingMethods/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/PaStiXSupport/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/PardisoSupport/CMakeLists.txt (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/PardisoSupport/PardisoSupport.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/QR/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/QR/ColPivHouseholderQR.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/QR/ColPivHouseholderQR_MKL.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/QR/FullPivHouseholderQR.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/QR/HouseholderQR.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/QR/HouseholderQR_MKL.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/SVD/CMakeLists.txt (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/SVD/JacobiSVD.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/SVD/JacobiSVD_MKL.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/SVD/UpperBidiagonalization.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/SparseCholesky/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/SparseCore/AmbiVector.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/SparseCore/CMakeLists.txt (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/SparseCore/CompressedStorage.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/SparseCore/CoreIterators.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/SparseCore/SparseAssign.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/SparseCore/SparseBlock.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/SparseCore/SparseDenseProduct.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/SparseCore/SparseDot.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/SparseCore/SparseFuzzy.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/SparseCore/SparseMatrix.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/SparseCore/SparseMatrixBase.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/SparseCore/SparsePermutation.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/SparseCore/SparseProduct.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/SparseCore/SparseRedux.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/SparseCore/SparseTranspose.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/SparseCore/SparseTriangularView.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/SparseCore/SparseUtil.h (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/SparseCore/SparseVector.h
 create mode 100644 resources/3rdParty/eigen/Eigen/src/SparseCore/SparseView.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/SparseCore/TriangularSolver.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/StlSupport/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/StlSupport/StdDeque.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/StlSupport/StdList.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/StlSupport/StdVector.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/StlSupport/details.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/SuperLUSupport/CMakeLists.txt (100%)
 create mode 100644 resources/3rdParty/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/UmfPackSupport/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/misc/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/misc/Image.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/misc/Kernel.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/misc/Solve.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/misc/SparseSolve.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/misc/blas.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/plugins/BlockMethods.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/plugins/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/plugins/CommonCwiseBinaryOps.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/plugins/CommonCwiseUnaryOps.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/INSTALL (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/BenchSparseUtil.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/BenchTimer.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/BenchUtil.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/README.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/basicbench.cxxlist (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/basicbenchmark.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/basicbenchmark.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/benchBlasGemm.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/benchCholesky.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/benchEigenSolver.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/benchFFT.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/benchVecAdd.cpp (100%)
 create mode 100644 resources/3rdParty/eigen/bench/bench_gemm.cpp
 rename resources/{3rdparty => 3rdParty}/eigen/bench/bench_multi_compilers.sh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/bench_norm.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/bench_reverse.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/bench_sum.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/bench_unrolling (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/benchmark.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/benchmarkSlice.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/benchmarkX.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/benchmarkXcwise.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/benchmark_suite (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/COPYING (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/README (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/actions/action_aat_product.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/actions/action_ata_product.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/actions/action_atv_product.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/actions/action_axpby.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/actions/action_axpy.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/actions/action_cholesky.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/actions/action_ger.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/actions/action_hessenberg.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/actions/action_lu_decomp.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/actions/action_lu_solve.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/actions/action_matrix_matrix_product.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/actions/action_matrix_matrix_product_bis.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/actions/action_matrix_vector_product.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/actions/action_partial_lu.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/actions/action_rot.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/actions/action_symv.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/actions/action_syr2.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/actions/action_trisolve.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/actions/action_trisolve_matrix.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/actions/action_trmm.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/actions/basic_actions.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/cmake/FindACML.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/cmake/FindATLAS.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/cmake/FindBlitz.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/cmake/FindCBLAS.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/cmake/FindGMM.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/cmake/FindGOTO.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/cmake/FindGOTO2.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/cmake/FindMKL.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/cmake/FindMTL4.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/cmake/FindPackageHandleStandardArgs.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/cmake/FindTvmet.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/cmake/MacroOptionalAddSubdirectory.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/data/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/data/action_settings.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/data/gnuplot_common_settings.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/data/go_mean (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/data/mean.cxx (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/data/mk_gnuplot_script.sh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/data/mk_mean_script.sh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/data/mk_new_gnuplot.sh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/data/perlib_plot_settings.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/data/regularize.cxx (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/data/smooth.cxx (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/data/smooth_all.sh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/generic_bench/bench.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/generic_bench/bench_parameter.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/generic_bench/btl.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/generic_bench/init/init_function.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/generic_bench/init/init_matrix.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/generic_bench/init/init_vector.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/generic_bench/static/bench_static.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/generic_bench/static/intel_bench_fixed_size.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/generic_bench/static/static_size_generator.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/generic_bench/timers/STL_perf_analyzer.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/generic_bench/timers/STL_timer.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/generic_bench/timers/mixed_perf_analyzer.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/generic_bench/timers/portable_perf_analyzer.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/generic_bench/timers/portable_perf_analyzer_old.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/generic_bench/timers/portable_timer.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/generic_bench/timers/x86_perf_analyzer.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/generic_bench/timers/x86_timer.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/generic_bench/utils/size_lin_log.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/generic_bench/utils/size_log.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/generic_bench/utils/utilities.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/generic_bench/utils/xy_file.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/BLAS/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/BLAS/blas.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/BLAS/blas_interface.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/BLAS/blas_interface_impl.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/BLAS/c_interface_base.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/BLAS/main.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/STL/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/STL/STL_interface.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/STL/main.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/blitz/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/blitz/blitz_LU_solve_interface.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/blitz/blitz_interface.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/blitz/btl_blitz.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/blitz/btl_tiny_blitz.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/blitz/tiny_blitz_interface.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/eigen2/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/eigen2/btl_tiny_eigen2.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/eigen2/eigen2_interface.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/eigen2/main_adv.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/eigen2/main_linear.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/eigen2/main_matmat.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/eigen2/main_vecmat.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/eigen3/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/eigen3/btl_tiny_eigen3.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/eigen3/eigen3_interface.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/eigen3/main_adv.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/eigen3/main_linear.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/eigen3/main_matmat.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/eigen3/main_vecmat.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/gmm/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/gmm/gmm_LU_solve_interface.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/gmm/gmm_interface.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/gmm/main.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/mtl4/.kdbgrc.main (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/mtl4/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/mtl4/main.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/mtl4/mtl4_LU_solve_interface.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/mtl4/mtl4_interface.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/tvmet/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/tvmet/main.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/tvmet/tvmet_interface.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/ublas/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/ublas/main.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/btl/libs/ublas/ublas_interface.hh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/check_cache_queries.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/eig33.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/geometry.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/product_threshold.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/quat_slerp.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/quatmul.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/sparse_cholesky.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/sparse_dense_product.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/sparse_lu.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/sparse_product.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/sparse_randomsetter.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/sparse_setter.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/sparse_transpose.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/sparse_trisolver.cpp (100%)
 create mode 100644 resources/3rdParty/eigen/bench/spbench/CMakeLists.txt
 create mode 100644 resources/3rdParty/eigen/bench/spbench/spbenchsolver.cpp
 create mode 100644 resources/3rdParty/eigen/bench/spbench/spbenchsolver.h
 rename resources/{3rdparty => 3rdParty}/eigen/bench/spmv.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/bench/vdw_new.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/BandTriangularSolver.h (100%)
 create mode 100644 resources/3rdParty/eigen/blas/CMakeLists.txt
 rename resources/{3rdparty => 3rdParty}/eigen/blas/README.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/chbmv.f (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/chpmv.f (100%)
 create mode 100644 resources/3rdParty/eigen/blas/chpr.f
 create mode 100644 resources/3rdParty/eigen/blas/chpr2.f
 create mode 100644 resources/3rdParty/eigen/blas/common.h
 rename resources/{3rdparty => 3rdParty}/eigen/blas/complex_double.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/complex_single.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/complexdots.f (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/ctbmv.f (100%)
 create mode 100644 resources/3rdParty/eigen/blas/ctpmv.f
 create mode 100644 resources/3rdParty/eigen/blas/ctpsv.f
 create mode 100644 resources/3rdParty/eigen/blas/double.cpp
 rename resources/{3rdparty => 3rdParty}/eigen/blas/drotm.f (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/drotmg.f (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/dsbmv.f (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/dspmv.f (100%)
 create mode 100644 resources/3rdParty/eigen/blas/dspr.f
 create mode 100644 resources/3rdParty/eigen/blas/dspr2.f
 rename resources/{3rdparty => 3rdParty}/eigen/blas/dtbmv.f (100%)
 create mode 100644 resources/3rdParty/eigen/blas/dtpmv.f
 create mode 100644 resources/3rdParty/eigen/blas/dtpsv.f
 rename resources/{3rdparty => 3rdParty}/eigen/blas/level1_cplx_impl.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/level1_impl.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/level1_real_impl.h (100%)
 create mode 100644 resources/3rdParty/eigen/blas/level2_cplx_impl.h
 create mode 100644 resources/3rdParty/eigen/blas/level2_impl.h
 create mode 100644 resources/3rdParty/eigen/blas/level2_real_impl.h
 create mode 100644 resources/3rdParty/eigen/blas/level3_impl.h
 rename resources/{3rdparty => 3rdParty}/eigen/blas/lsame.f (100%)
 create mode 100644 resources/3rdParty/eigen/blas/single.cpp
 rename resources/{3rdparty => 3rdParty}/eigen/blas/srotm.f (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/srotmg.f (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/ssbmv.f (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/sspmv.f (100%)
 create mode 100644 resources/3rdParty/eigen/blas/sspr.f
 create mode 100644 resources/3rdParty/eigen/blas/sspr2.f
 rename resources/{3rdparty => 3rdParty}/eigen/blas/stbmv.f (100%)
 create mode 100644 resources/3rdParty/eigen/blas/stpmv.f
 create mode 100644 resources/3rdParty/eigen/blas/stpsv.f
 rename resources/{3rdparty => 3rdParty}/eigen/blas/testing/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/testing/cblat1.f (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/testing/cblat2.dat (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/testing/cblat2.f (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/testing/cblat3.dat (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/testing/cblat3.f (100%)
 create mode 100644 resources/3rdParty/eigen/blas/testing/dblat1.f
 rename resources/{3rdparty => 3rdParty}/eigen/blas/testing/dblat2.dat (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/testing/dblat2.f (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/testing/dblat3.dat (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/testing/dblat3.f (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/testing/runblastest.sh (100%)
 create mode 100644 resources/3rdParty/eigen/blas/testing/sblat1.f
 rename resources/{3rdparty => 3rdParty}/eigen/blas/testing/sblat2.dat (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/testing/sblat2.f (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/testing/sblat3.dat (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/testing/sblat3.f (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/testing/zblat1.f (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/testing/zblat2.dat (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/testing/zblat2.f (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/testing/zblat3.dat (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/testing/zblat3.f (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/xerbla.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/zhbmv.f (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/blas/zhpmv.f (100%)
 create mode 100644 resources/3rdParty/eigen/blas/zhpr.f
 create mode 100644 resources/3rdParty/eigen/blas/zhpr2.f
 rename resources/{3rdparty => 3rdParty}/eigen/blas/ztbmv.f (100%)
 create mode 100644 resources/3rdParty/eigen/blas/ztpmv.f
 create mode 100644 resources/3rdParty/eigen/blas/ztpsv.f
 rename resources/{3rdparty => 3rdParty}/eigen/cmake/CMakeDetermineVSServicePack.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/cmake/EigenConfigureTesting.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/cmake/EigenDetermineOSVersion.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/cmake/EigenTesting.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/cmake/FindAdolc.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/cmake/FindBLAS.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/cmake/FindCholmod.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/cmake/FindEigen2.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/cmake/FindEigen3.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/cmake/FindFFTW.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/cmake/FindGLEW.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/cmake/FindGMP.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/cmake/FindGSL.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/cmake/FindGoogleHash.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/cmake/FindLAPACK.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/cmake/FindMPFR.cmake (100%)
 create mode 100644 resources/3rdParty/eigen/cmake/FindMetis.cmake
 rename resources/{3rdparty => 3rdParty}/eigen/cmake/FindPastix.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/cmake/FindScotch.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/cmake/FindStandardMathLibrary.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/cmake/FindSuperLU.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/cmake/FindUmfpack.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/cmake/RegexUtils.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/cmake/language_support.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/debug/gdb/__init__.py (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/debug/gdb/printers.py (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/debug/msvc/eigen_autoexp_part.dat (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/demos/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/demos/mandelbrot/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/demos/mandelbrot/README (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/demos/mandelbrot/mandelbrot.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/demos/mandelbrot/mandelbrot.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/demos/mix_eigen_and_c/README (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/demos/mix_eigen_and_c/binary_library.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/demos/mix_eigen_and_c/binary_library.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/demos/mix_eigen_and_c/example.c (100%)
 create mode 100644 resources/3rdParty/eigen/demos/opengl/CMakeLists.txt
 rename resources/{3rdparty => 3rdParty}/eigen/demos/opengl/README (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/demos/opengl/camera.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/demos/opengl/camera.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/demos/opengl/gpuhelper.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/demos/opengl/gpuhelper.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/demos/opengl/icosphere.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/demos/opengl/icosphere.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/demos/opengl/quaternion_demo.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/demos/opengl/quaternion_demo.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/demos/opengl/trackball.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/demos/opengl/trackball.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/A05_PortingFrom2To3.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/A10_Eigen2SupportModes.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/AsciiQuickReference.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/B01_Experimental.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/C00_QuickStartGuide.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/C01_TutorialMatrixClass.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/C02_TutorialMatrixArithmetic.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/C03_TutorialArrayClass.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/C04_TutorialBlockOperations.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/C05_TutorialAdvancedInitialization.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/C06_TutorialLinearAlgebra.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/C07_TutorialReductionsVisitorsBroadcasting.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/C08_TutorialGeometry.dox (100%)
 create mode 100644 resources/3rdParty/eigen/doc/C09_TutorialSparse.dox
 rename resources/{3rdparty => 3rdParty}/eigen/doc/C10_TutorialMapClass.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/CMakeLists.txt (100%)
 create mode 100644 resources/3rdParty/eigen/doc/D01_StlContainers.dox
 rename resources/{3rdparty => 3rdParty}/eigen/doc/D03_WrongStackAlignment.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/D07_PassingByValue.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/D09_StructHavingEigenMembers.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/D11_UnalignedArrayAssert.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/Doxyfile.in (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/Eigen_Silly_Professor_64x64.png (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/I00_CustomizingEigen.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/I01_TopicLazyEvaluation.dox (100%)
 create mode 100644 resources/3rdParty/eigen/doc/I02_HiPerformance.dox
 rename resources/{3rdparty => 3rdParty}/eigen/doc/I03_InsideEigenExample.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/I05_FixedSizeVectorizable.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/I06_TopicEigenExpressionTemplates.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/I07_TopicScalarTypes.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/I08_Resizing.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/I09_Vectorization.dox (100%)
 create mode 100644 resources/3rdParty/eigen/doc/I10_Assertions.dox
 rename resources/{3rdparty => 3rdParty}/eigen/doc/I11_Aliasing.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/I12_ClassHierarchy.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/I13_FunctionsTakingEigenTypes.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/I14_PreprocessorDirectives.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/I15_StorageOrders.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/I16_TemplateKeyword.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/Overview.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/QuickReference.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/SparseQuickReference.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/TopicLinearAlgebraDecompositions.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/TopicMultithreading.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/TutorialSparse_example_details.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/UsingIntelMKL.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/eigendoxy.css (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/eigendoxy_footer.html.in (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/eigendoxy_header.html.in (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/eigendoxy_tabs.css (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/.krazy (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/DenseBase_middleCols_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/DenseBase_middleRows_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/DenseBase_template_int_middleCols.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/DenseBase_template_int_middleRows.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/MatrixBase_cwise_const.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/QuickStart_example.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/QuickStart_example2_dynamic.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/QuickStart_example2_fixed.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/TemplateKeyword_flexible.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/TemplateKeyword_simple.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/TutorialLinAlgComputeTwice.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/TutorialLinAlgExComputeSolveError.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/TutorialLinAlgExSolveColPivHouseholderQR.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/TutorialLinAlgExSolveLDLT.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/TutorialLinAlgInverseDeterminant.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/TutorialLinAlgRankRevealing.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/TutorialLinAlgSVDSolve.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/TutorialLinAlgSelfAdjointEigenSolver.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/TutorialLinAlgSetThreshold.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/Tutorial_ArrayClass_accessors.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/Tutorial_ArrayClass_addition.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/Tutorial_ArrayClass_cwise_other.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/Tutorial_ArrayClass_interop.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/Tutorial_ArrayClass_interop_matrix.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/Tutorial_ArrayClass_mult.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/Tutorial_BlockOperations_block_assignment.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/Tutorial_BlockOperations_colrow.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/Tutorial_BlockOperations_corner.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/Tutorial_BlockOperations_print_block.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/Tutorial_BlockOperations_vector.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/Tutorial_PartialLU_solve.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_1nn.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple_rowwise.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_colwise.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_maxnorm.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_bool.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_norm.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_rowwise.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_visitors.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/Tutorial_simple_example_dynamic_size.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/Tutorial_simple_example_fixed_size.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/class_Block.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/class_CwiseBinaryOp.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/class_CwiseUnaryOp.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/class_CwiseUnaryOp_ptrfun.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/class_FixedBlock.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/class_FixedVectorBlock.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/class_VectorBlock.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/function_taking_eigenbase.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/tut_arithmetic_add_sub.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/tut_arithmetic_dot_cross.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/tut_arithmetic_matrix_mul.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/tut_arithmetic_redux_basic.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/tut_arithmetic_scalar_mul_div.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/tut_matrix_coefficient_accessors.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/tut_matrix_resize.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/examples/tut_matrix_resize_fixed_size.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/.krazy (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/AngleAxis_mimic_euler.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/ColPivHouseholderQR_solve.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/ComplexEigenSolver_compute.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/ComplexEigenSolver_eigenvalues.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/ComplexEigenSolver_eigenvectors.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/ComplexSchur_compute.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/ComplexSchur_matrixT.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/ComplexSchur_matrixU.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_abs.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_abs2.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_acos.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_boolean_and.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_boolean_or.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_cos.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_cube.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_equal_equal.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_exp.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_greater.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_greater_equal.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_inverse.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_less.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_less_equal.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_log.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_max.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_min.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_minus.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_minus_equal.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_not_equal.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_plus.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_plus_equal.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_pow.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_product.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_quotient.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_sin.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_slash_equal.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_sqrt.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_square.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_tan.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Cwise_times_equal.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/DenseBase_LinSpaced.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/DenseBase_LinSpaced_seq.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/DenseBase_setLinSpaced.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/DirectionWise_replicate.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/DirectionWise_replicate_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/EigenSolver_EigenSolver_MatrixType.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/EigenSolver_compute.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/EigenSolver_eigenvalues.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/EigenSolver_eigenvectors.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/EigenSolver_pseudoEigenvectors.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/FullPivHouseholderQR_solve.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/FullPivLU_image.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/FullPivLU_kernel.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/FullPivLU_solve.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/HessenbergDecomposition_compute.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/HessenbergDecomposition_matrixH.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/HessenbergDecomposition_packedMatrix.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/HouseholderQR_solve.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/HouseholderSequence_HouseholderSequence.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/IOFormat.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/JacobiSVD_basic.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Jacobi_makeGivens.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Jacobi_makeJacobi.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/LLT_example.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/LLT_solve.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Map_general_stride.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Map_inner_stride.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Map_outer_stride.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Map_placement_new.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Map_simple.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_adjoint.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_all.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_array.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_array_const.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_asDiagonal.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_block_int_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_block_int_int_int_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_bottomLeftCorner_int_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_bottomRightCorner_int_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_bottomRows_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_cast.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_col.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_colwise.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_computeInverseAndDetWithCheck.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_computeInverseWithCheck.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_cwiseAbs.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_cwiseAbs2.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_cwiseEqual.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_cwiseInverse.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_cwiseMax.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_cwiseMin.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_cwiseNotEqual.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_cwiseProduct.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_cwiseQuotient.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_cwiseSqrt.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_diagonal.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_diagonal_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_diagonal_template_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_eigenvalues.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_end_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_eval.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_extract.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_fixedBlock_int_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_identity.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_identity_int_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_inverse.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_isDiagonal.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_isIdentity.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_isOnes.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_isOrthogonal.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_isUnitary.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_isZero.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_leftCols_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_marked.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_noalias.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_ones.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_ones_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_ones_int_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_operatorNorm.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_part.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_prod.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_random.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_random_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_random_int_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_replicate.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_replicate_int_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_reverse.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_rightCols_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_row.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_rowwise.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_segment_int_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_select.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_set.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_setIdentity.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_setOnes.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_setRandom.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_setZero.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_start_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_template_int_bottomRows.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_template_int_end.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_template_int_int_bottomLeftCorner.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_template_int_int_bottomRightCorner.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_template_int_int_topLeftCorner.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_template_int_int_topRightCorner.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_template_int_leftCols.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_template_int_rightCols.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_template_int_segment.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_template_int_start.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_template_int_topRows.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_topLeftCorner_int_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_topRightCorner_int_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_topRows_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_transpose.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_zero.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_zero_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/MatrixBase_zero_int_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Matrix_resize_NoChange_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Matrix_resize_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Matrix_resize_int_NoChange.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Matrix_resize_int_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Matrix_setConstant_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Matrix_setConstant_int_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Matrix_setIdentity_int_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Matrix_setOnes_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Matrix_setOnes_int_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Matrix_setRandom_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Matrix_setRandom_int_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Matrix_setZero_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Matrix_setZero_int_int.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/PartialPivLU_solve.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/PartialRedux_count.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/PartialRedux_maxCoeff.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/PartialRedux_minCoeff.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/PartialRedux_norm.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/PartialRedux_prod.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/PartialRedux_squaredNorm.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/PartialRedux_sum.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/RealSchur_RealSchur_MatrixType.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/RealSchur_compute.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/SelfAdjointEigenSolver_compute_MatrixType.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/SelfAdjointEigenSolver_compute_MatrixType2.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/SelfAdjointEigenSolver_eigenvalues.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/SelfAdjointEigenSolver_eigenvectors.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/SelfAdjointEigenSolver_operatorInverseSqrt.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/SelfAdjointEigenSolver_operatorSqrt.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/SelfAdjointView_eigenvalues.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/SelfAdjointView_operatorNorm.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/TopicAliasing_block.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/TopicAliasing_block_correct.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/TopicAliasing_cwise.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/TopicAliasing_mult1.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/TopicAliasing_mult2.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/TopicAliasing_mult3.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/TopicStorageOrders_example.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Tridiagonalization_Tridiagonalization_MatrixType.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Tridiagonalization_compute.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Tridiagonalization_decomposeInPlace.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Tridiagonalization_diagonal.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Tridiagonalization_householderCoefficients.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Tridiagonalization_packedMatrix.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Tutorial_AdvancedInitialization_Block.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Tutorial_AdvancedInitialization_CommaTemporary.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Tutorial_AdvancedInitialization_Join.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Tutorial_AdvancedInitialization_LinSpaced.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Tutorial_AdvancedInitialization_ThreeWays.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Tutorial_AdvancedInitialization_Zero.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Tutorial_Map_rowmajor.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Tutorial_Map_using.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Tutorial_commainit_01.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Tutorial_commainit_01b.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Tutorial_commainit_02.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Tutorial_solve_matrix_inverse.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Tutorial_solve_multiple_rhs.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Tutorial_solve_reuse_decomposition.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Tutorial_solve_singular.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Tutorial_solve_triangular.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Tutorial_solve_triangular_inplace.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/Vectorwise_reverse.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/class_FullPivLU.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/compile_snippet.cpp.in (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/tut_arithmetic_redux_minmax.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/tut_arithmetic_transpose_aliasing.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/tut_arithmetic_transpose_conjugate.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/tut_arithmetic_transpose_inplace.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/snippets/tut_matrix_assignment_resizing.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/special_examples/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/special_examples/Tutorial_sparse_example.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/special_examples/Tutorial_sparse_example_details.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/doc/tutorial.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/eigen3.pc.in (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/failtest/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/failtest/block_nonconst_ctor_on_const_xpr_0.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/failtest/block_nonconst_ctor_on_const_xpr_1.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/failtest/block_nonconst_ctor_on_const_xpr_2.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/failtest/block_on_const_type_actually_const_0.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/failtest/block_on_const_type_actually_const_1.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/failtest/const_qualified_block_method_retval_0.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/failtest/const_qualified_block_method_retval_1.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/failtest/const_qualified_diagonal_method_retval.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/failtest/const_qualified_transpose_method_retval.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/failtest/diagonal_nonconst_ctor_on_const_xpr.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/failtest/diagonal_on_const_type_actually_const.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/failtest/failtest_sanity_check.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/failtest/map_nonconst_ctor_on_const_ptr_0.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/failtest/map_nonconst_ctor_on_const_ptr_1.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/failtest/map_nonconst_ctor_on_const_ptr_2.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/failtest/map_nonconst_ctor_on_const_ptr_3.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/failtest/map_nonconst_ctor_on_const_ptr_4.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/failtest/map_on_const_type_actually_const_0.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/failtest/map_on_const_type_actually_const_1.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/failtest/transpose_nonconst_ctor_on_const_xpr.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/failtest/transpose_on_const_type_actually_const.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/lapack/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/lapack/cholesky.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/lapack/complex_double.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/lapack/complex_single.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/lapack/double.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/lapack/eigenvalues.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/lapack/lapack_common.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/lapack/lu.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/lapack/single.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/scripts/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/scripts/buildtests.in (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/scripts/check.in (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/scripts/debug.in (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/scripts/eigen_gen_credits.cpp (100%)
 create mode 100644 resources/3rdParty/eigen/scripts/eigen_gen_docs
 rename resources/{3rdparty => 3rdParty}/eigen/scripts/release.in (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/scripts/relicense.py (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/signature_of_eigen3_matrix_library (100%)
 create mode 100644 resources/3rdParty/eigen/test/CMakeLists.txt
 rename resources/{3rdparty => 3rdParty}/eigen/test/adjoint.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/array.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/array_for_matrix.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/array_replicate.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/array_reverse.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/bandmatrix.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/basicstuff.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/bicgstab.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/block.cpp (100%)
 create mode 100644 resources/3rdParty/eigen/test/cholesky.cpp
 rename resources/{3rdparty => 3rdParty}/eigen/test/cholmod_support.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/commainitializer.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/conjugate_gradient.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/conservative_resize.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/corners.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/cwiseop.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/determinant.cpp (100%)
 create mode 100644 resources/3rdParty/eigen/test/diagonal.cpp
 rename resources/{3rdparty => 3rdParty}/eigen/test/diagonalmatrices.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/dontalign.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/dynalloc.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_adjoint.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_alignedbox.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_array.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_basicstuff.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_bug_132.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_cholesky.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_commainitializer.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_cwiseop.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_determinant.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_dynalloc.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_eigensolver.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_first_aligned.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_geometry.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_geometry_with_eigen2_prefix.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_hyperplane.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_inverse.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_linearstructure.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_lu.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_map.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_meta.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_miscmatrices.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_mixingtypes.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_newstdvector.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_nomalloc.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_packetmath.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_parametrizedline.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_prec_inverse_4x4.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_product_large.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_product_small.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_qr.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_qtvector.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_regression.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_sizeof.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_smallvectors.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_sparse_basic.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_sparse_product.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_sparse_solvers.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_sparse_vector.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_stdvector.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_submatrices.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_sum.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_svd.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_swap.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_triangular.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_unalignedassert.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/eigen2_visitor.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/gsl_helper.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/main.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/product.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/runtest.sh (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/sparse.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2/testsuite.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigen2support.cpp (100%)
 create mode 100644 resources/3rdParty/eigen/test/eigensolver_complex.cpp
 create mode 100644 resources/3rdParty/eigen/test/eigensolver_generic.cpp
 rename resources/{3rdparty => 3rdParty}/eigen/test/eigensolver_selfadjoint.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/exceptions.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/first_aligned.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/geo_alignedbox.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/geo_eulerangles.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/geo_homogeneous.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/geo_hyperplane.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/geo_orthomethods.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/geo_parametrizedline.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/geo_quaternion.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/geo_transformations.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/hessenberg.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/householder.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/integer_types.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/inverse.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/jacobi.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/jacobisvd.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/linearstructure.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/lu.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/main.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/map.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/mapstaticmethods.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/mapstride.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/meta.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/miscmatrices.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/mixingtypes.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/nesting_ops.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/nomalloc.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/nullary.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/packetmath.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/pardiso_support.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/pastix_support.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/permutationmatrices.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/prec_inverse_4x4.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/product.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/product_extra.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/product_large.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/product_mmtr.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/product_notemporary.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/product_selfadjoint.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/product_small.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/product_symm.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/product_syrk.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/product_trmm.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/product_trmv.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/product_trsolve.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/qr.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/qr_colpivoting.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/qr_fullpivoting.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/qtvector.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/redux.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/resize.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/runtest.sh (100%)
 create mode 100644 resources/3rdParty/eigen/test/schur_complex.cpp
 create mode 100644 resources/3rdParty/eigen/test/schur_real.cpp
 rename resources/{3rdparty => 3rdParty}/eigen/test/selfadjoint.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/simplicial_cholesky.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/sizeof.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/sizeoverflow.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/smallvectors.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/sparse.h (100%)
 create mode 100644 resources/3rdParty/eigen/test/sparse_basic.cpp
 rename resources/{3rdparty => 3rdParty}/eigen/test/sparse_permutations.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/sparse_product.cpp (100%)
 create mode 100644 resources/3rdParty/eigen/test/sparse_solver.h
 rename resources/{3rdparty => 3rdParty}/eigen/test/sparse_solvers.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/sparse_vector.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/stable_norm.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/stddeque.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/stdlist.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/stdvector.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/stdvector_overload.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/superlu_support.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/swap.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/testsuite.cmake (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/triangular.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/umeyama.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/umfpack_support.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/unalignedassert.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/unalignedcount.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/upperbidiagonalization.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/vectorization_logic.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/vectorwiseop.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/visitor.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/test/zerosized.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/AdolcForward (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/AlignedVector3 (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/AutoDiff (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/BVH (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/FFT (100%)
 create mode 100644 resources/3rdParty/eigen/unsupported/Eigen/IterativeSolvers
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/KroneckerProduct (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/MPRealSupport (100%)
 create mode 100644 resources/3rdParty/eigen/unsupported/Eigen/MatrixFunctions
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/MoreVectorization (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/NonLinearOptimization (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/NumericalDiff (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/OpenGLSupport (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/Polynomials (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/Skyline (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/SparseExtra (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/Splines (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffVector.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/AutoDiff/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/BVH/BVAlgorithms.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/BVH/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/BVH/KdBVH.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/FFT/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/FFT/ei_fftw_impl.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/FFT/ei_kissfft_impl.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/IterativeSolvers/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/IterativeSolvers/GMRES.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/IterativeSolvers/IterationController.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/IterativeSolvers/Scaling.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/KroneckerProduct/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/MatrixFunctions/CMakeLists.txt (100%)
 create mode 100644 resources/3rdParty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
 create mode 100644 resources/3rdParty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h (100%)
 create mode 100644 resources/3rdParty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
 create mode 100644 resources/3rdParty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/MatrixFunctions/StemFunction.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/MoreVectorization/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/MoreVectorization/MathFunctions.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/NonLinearOptimization/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/NonLinearOptimization/chkder.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/NonLinearOptimization/covar.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/NonLinearOptimization/dogleg.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/NonLinearOptimization/lmpar.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/NonLinearOptimization/r1updt.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/NumericalDiff/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/Polynomials/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/Polynomials/Companion.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/Polynomials/PolynomialSolver.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/Polynomials/PolynomialUtils.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/Skyline/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/Skyline/SkylineMatrix.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/Skyline/SkylineProduct.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/Skyline/SkylineStorage.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/Skyline/SkylineUtil.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/SparseExtra/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/SparseExtra/MarketIO.h (100%)
 create mode 100644 resources/3rdParty/eigen/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/SparseExtra/RandomSetter.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/Splines/CMakeLists.txt (100%)
 create mode 100644 resources/3rdParty/eigen/unsupported/Eigen/src/Splines/Spline.h
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/Splines/SplineFitting.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/Eigen/src/Splines/SplineFwd.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/README.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/doc/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/doc/Doxyfile.in (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/doc/Overview.dox (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/doc/examples/BVH_Example.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/doc/examples/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/doc/examples/FFT.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/doc/examples/MatrixExponential.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/doc/examples/MatrixFunction.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/doc/examples/MatrixLogarithm.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/doc/examples/MatrixSine.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/doc/examples/MatrixSinh.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/doc/examples/MatrixSquareRoot.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/doc/examples/PolynomialSolver1.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/doc/examples/PolynomialUtils1.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/doc/snippets/CMakeLists.txt (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/test/BVH.cpp (100%)
 create mode 100644 resources/3rdParty/eigen/unsupported/test/CMakeLists.txt
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/test/FFT.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/test/FFTW.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/test/NonLinearOptimization.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/test/NumericalDiff.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/test/alignedvector3.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/test/autodiff.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/test/forward_adolc.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/test/gmres.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/test/kronecker_product.cpp (100%)
 create mode 100644 resources/3rdParty/eigen/unsupported/test/matrix_exponential.cpp
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/test/matrix_function.cpp (100%)
 create mode 100644 resources/3rdParty/eigen/unsupported/test/matrix_square_root.cpp
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/test/mpreal/dlmalloc.c (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/test/mpreal/dlmalloc.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/test/mpreal/mpreal.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/test/mpreal/mpreal.h (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/test/mpreal_support.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/test/openglsupport.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/test/polynomialsolver.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/test/polynomialutils.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/test/sparse_extra.cpp (100%)
 rename resources/{3rdparty => 3rdParty}/eigen/unsupported/test/splines.cpp (100%)
 delete mode 100644 resources/3rdparty/eigen/.hg_archival.txt
 delete mode 100644 resources/3rdparty/eigen/.hgtags
 delete mode 100644 resources/3rdparty/eigen/Eigen/Core
 delete mode 100644 resources/3rdparty/eigen/Eigen/Eigenvalues
 delete mode 100644 resources/3rdparty/eigen/Eigen/OrderingMethods
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Cholesky/LDLT.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/CholmodSupport/CholmodSupport.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/Array.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/ArrayWrapper.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/Assign_MKL.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/Block.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/CommaInitializer.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/CwiseBinaryOp.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/CwiseNullaryOp.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/CwiseUnaryOp.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/DenseBase.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/DenseCoeffsBase.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/DenseStorage.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/Diagonal.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/DiagonalMatrix.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/DiagonalProduct.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/Dot.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/Functors.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/Fuzzy.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/GeneralProduct.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/Map.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/MapBase.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/MathFunctions.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/MatrixBase.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/NoAlias.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/PermutationMatrix.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/PlainObjectBase.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/Product.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/ProductBase.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/Random.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/Replicate.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/Select.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/Swap.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/Transpose.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/Transpositions.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/TriangularMatrix.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/VectorBlock.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/Visitor.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/arch/NEON/PacketMath.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/arch/SSE/PacketMath.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/products/GeneralMatrixVector.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/util/Constants.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/util/ForwardDeclarations.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/util/Macros.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/util/Memory.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/util/StaticAssert.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Core/util/XprHelper.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/AlignedBox.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/AngleAxis.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Hyperplane.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Quaternion.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Rotation2D.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/RotationBase.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Scaling.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Transform.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Translation.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Eigen2Support/LeastSquares.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Eigen2Support/SVD.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Eigenvalues/ComplexSchur.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Eigenvalues/EigenSolver.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Eigenvalues/RealSchur.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Geometry/AlignedBox.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Geometry/AngleAxis.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Geometry/Hyperplane.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Geometry/ParametrizedLine.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Geometry/Quaternion.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Geometry/Rotation2D.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Geometry/Scaling.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Geometry/Transform.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Geometry/Umeyama.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/Jacobi/Jacobi.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/PardisoSupport/PardisoSupport.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/QR/HouseholderQR.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/SVD/JacobiSVD.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/SparseCore/CompressedStorage.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/SparseCore/SparseDenseProduct.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/SparseCore/SparseMatrix.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/SparseCore/SparseMatrixBase.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/SparseCore/SparseProduct.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/SparseCore/SparseTranspose.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/SparseCore/SparseVector.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/SparseCore/SparseView.h
 delete mode 100644 resources/3rdparty/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h
 delete mode 100644 resources/3rdparty/eigen/bench/bench_gemm.cpp
 delete mode 100644 resources/3rdparty/eigen/bench/spbench/CMakeLists.txt
 delete mode 100644 resources/3rdparty/eigen/bench/spbench/spbenchsolver.cpp
 delete mode 100644 resources/3rdparty/eigen/bench/spbench/spbenchsolver.h
 delete mode 100644 resources/3rdparty/eigen/blas/CMakeLists.txt
 delete mode 100644 resources/3rdparty/eigen/blas/common.h
 delete mode 100644 resources/3rdparty/eigen/blas/double.cpp
 delete mode 100644 resources/3rdparty/eigen/blas/level2_cplx_impl.h
 delete mode 100644 resources/3rdparty/eigen/blas/level2_impl.h
 delete mode 100644 resources/3rdparty/eigen/blas/level2_real_impl.h
 delete mode 100644 resources/3rdparty/eigen/blas/level3_impl.h
 delete mode 100644 resources/3rdparty/eigen/blas/single.cpp
 delete mode 100644 resources/3rdparty/eigen/blas/testing/dblat1.f
 delete mode 100644 resources/3rdparty/eigen/blas/testing/sblat1.f
 delete mode 100644 resources/3rdparty/eigen/cmake/FindMetis.cmake
 delete mode 100644 resources/3rdparty/eigen/demos/opengl/CMakeLists.txt
 delete mode 100644 resources/3rdparty/eigen/doc/C09_TutorialSparse.dox
 delete mode 100644 resources/3rdparty/eigen/doc/D01_StlContainers.dox
 delete mode 100644 resources/3rdparty/eigen/doc/I02_HiPerformance.dox
 delete mode 100644 resources/3rdparty/eigen/doc/I10_Assertions.dox
 delete mode 100644 resources/3rdparty/eigen/scripts/eigen_gen_docs
 delete mode 100644 resources/3rdparty/eigen/test/CMakeLists.txt
 delete mode 100644 resources/3rdparty/eigen/test/cholesky.cpp
 delete mode 100644 resources/3rdparty/eigen/test/diagonal.cpp
 delete mode 100644 resources/3rdparty/eigen/test/eigensolver_complex.cpp
 delete mode 100644 resources/3rdparty/eigen/test/eigensolver_generic.cpp
 delete mode 100644 resources/3rdparty/eigen/test/schur_complex.cpp
 delete mode 100644 resources/3rdparty/eigen/test/schur_real.cpp
 delete mode 100644 resources/3rdparty/eigen/test/sparse_basic.cpp
 delete mode 100644 resources/3rdparty/eigen/test/sparse_solver.h
 delete mode 100644 resources/3rdparty/eigen/unsupported/Eigen/IterativeSolvers
 delete mode 100644 resources/3rdparty/eigen/unsupported/Eigen/MatrixFunctions
 delete mode 100644 resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
 delete mode 100644 resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
 delete mode 100644 resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
 delete mode 100644 resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h
 delete mode 100644 resources/3rdparty/eigen/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h
 delete mode 100644 resources/3rdparty/eigen/unsupported/Eigen/src/Splines/Spline.h
 delete mode 100644 resources/3rdparty/eigen/unsupported/test/CMakeLists.txt
 delete mode 100644 resources/3rdparty/eigen/unsupported/test/matrix_exponential.cpp
 delete mode 100644 resources/3rdparty/eigen/unsupported/test/matrix_square_root.cpp

diff --git a/resources/3rdParty/eigen/.hg_archival.txt b/resources/3rdParty/eigen/.hg_archival.txt
new file mode 100644
index 000000000..89488d417
--- /dev/null
+++ b/resources/3rdParty/eigen/.hg_archival.txt
@@ -0,0 +1,4 @@
+repo: 8a21fd850624c931e448cbcfb38168cb2717c790
+node: 5097c01bcdc4dc59c4c85b620e9a0d9825f5d6a5
+branch: 3.1
+tag: 3.1.2
diff --git a/resources/3rdparty/eigen/.hgeol b/resources/3rdParty/eigen/.hgeol
similarity index 100%
rename from resources/3rdparty/eigen/.hgeol
rename to resources/3rdParty/eigen/.hgeol
diff --git a/resources/3rdparty/eigen/.hgignore b/resources/3rdParty/eigen/.hgignore
similarity index 100%
rename from resources/3rdparty/eigen/.hgignore
rename to resources/3rdParty/eigen/.hgignore
diff --git a/resources/3rdParty/eigen/.hgtags b/resources/3rdParty/eigen/.hgtags
new file mode 100644
index 000000000..4e0539d1d
--- /dev/null
+++ b/resources/3rdParty/eigen/.hgtags
@@ -0,0 +1,24 @@
+2db9468678c6480c9633b6272ff0e3599d1e11a3 2.0-beta3
+375224817dce669b6fa31d920d4c895a63fabf32 2.0-beta1
+3b8120f077865e2a072e10f5be33e1d942b83a06 2.0-rc1
+19dfc0e7666bcee26f7a49eb42f39a0280a3485e 2.0-beta5
+7a7d8a9526f003ffa2430dfb0c2c535b5add3023 2.0-beta4
+7d14ad088ac23769c349518762704f0257f6a39b 2.0.1
+b9d48561579fd7d4c05b2aa42235dc9de6484bf2 2.0-beta6
+e17630a40408243cb1a51ad0fe3a99beb75b7450 before-hg-migration
+eda654d4cda2210ce80719addcf854773e6dec5a 2.0.0
+ee9a7c468a9e73fab12f38f02bac24b07f29ed71 2.0-beta2
+d49097c25d8049e730c254a2fed725a240ce4858 after-hg-migration
+655348878731bcb5d9bbe0854077b052e75e5237 actual-start-from-scratch
+12a658962d4e6dfdc9a1c350fe7b69e36e70675c 3.0-beta1
+5c4180ad827b3f869b13b1d82f5a6ce617d6fcee 3.0-beta2
+7ae24ca6f3891d5ac58ddc7db60ad413c8d6ec35 3.0-beta3
+c40708b9088d622567fecc9208ad4a426621d364 3.0-beta4
+b6456624eae74f49ae8683d8e7b2882a2ca0342a 3.0-rc1
+a810d5dbab47acfe65b3350236efdd98f67d4d8a 3.1.0-alpha1
+304c88ca3affc16dd0b008b1104873986edd77af 3.1.0-alpha2
+920fc730b5930daae0a6dbe296d60ce2e3808215 3.1.0-beta1
+8383e883ebcc6f14695ff0b5e20bb631abab43fb 3.1.0-rc1
+bf4cb8c934fa3a79f45f1e629610f0225e93e493 3.1.0-rc2
+ca142d0540d3384180c5082d24ef056bd3c354b6 3.1.0
+43d9075b23ef596ddf396101956d06f446fc0765 3.1.1
diff --git a/resources/3rdParty/eigen/.krazy b/resources/3rdParty/eigen/.krazy
new file mode 100644
index 000000000..d719866a6
--- /dev/null
+++ b/resources/3rdParty/eigen/.krazy
@@ -0,0 +1,3 @@
+SKIP /disabled/
+SKIP /bench/
+SKIP /build/
diff --git a/resources/3rdparty/eigen/CMakeLists.txt b/resources/3rdParty/eigen/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/CMakeLists.txt
rename to resources/3rdParty/eigen/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/COPYING.BSD b/resources/3rdParty/eigen/COPYING.BSD
similarity index 100%
rename from resources/3rdparty/eigen/COPYING.BSD
rename to resources/3rdParty/eigen/COPYING.BSD
diff --git a/resources/3rdparty/eigen/COPYING.GPL b/resources/3rdParty/eigen/COPYING.GPL
similarity index 100%
rename from resources/3rdparty/eigen/COPYING.GPL
rename to resources/3rdParty/eigen/COPYING.GPL
diff --git a/resources/3rdparty/eigen/COPYING.LGPL b/resources/3rdParty/eigen/COPYING.LGPL
similarity index 100%
rename from resources/3rdparty/eigen/COPYING.LGPL
rename to resources/3rdParty/eigen/COPYING.LGPL
diff --git a/resources/3rdparty/eigen/COPYING.MINPACK b/resources/3rdParty/eigen/COPYING.MINPACK
similarity index 100%
rename from resources/3rdparty/eigen/COPYING.MINPACK
rename to resources/3rdParty/eigen/COPYING.MINPACK
diff --git a/resources/3rdparty/eigen/COPYING.MPL2 b/resources/3rdParty/eigen/COPYING.MPL2
similarity index 100%
rename from resources/3rdparty/eigen/COPYING.MPL2
rename to resources/3rdParty/eigen/COPYING.MPL2
diff --git a/resources/3rdparty/eigen/COPYING.README b/resources/3rdParty/eigen/COPYING.README
similarity index 100%
rename from resources/3rdparty/eigen/COPYING.README
rename to resources/3rdParty/eigen/COPYING.README
diff --git a/resources/3rdparty/eigen/CTestConfig.cmake b/resources/3rdParty/eigen/CTestConfig.cmake
similarity index 100%
rename from resources/3rdparty/eigen/CTestConfig.cmake
rename to resources/3rdParty/eigen/CTestConfig.cmake
diff --git a/resources/3rdparty/eigen/CTestCustom.cmake.in b/resources/3rdParty/eigen/CTestCustom.cmake.in
similarity index 100%
rename from resources/3rdparty/eigen/CTestCustom.cmake.in
rename to resources/3rdParty/eigen/CTestCustom.cmake.in
diff --git a/resources/3rdparty/eigen/Eigen/Array b/resources/3rdParty/eigen/Eigen/Array
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/Array
rename to resources/3rdParty/eigen/Eigen/Array
diff --git a/resources/3rdparty/eigen/Eigen/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/Eigen/Cholesky b/resources/3rdParty/eigen/Eigen/Cholesky
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/Cholesky
rename to resources/3rdParty/eigen/Eigen/Cholesky
diff --git a/resources/3rdparty/eigen/Eigen/CholmodSupport b/resources/3rdParty/eigen/Eigen/CholmodSupport
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/CholmodSupport
rename to resources/3rdParty/eigen/Eigen/CholmodSupport
diff --git a/resources/3rdParty/eigen/Eigen/Core b/resources/3rdParty/eigen/Eigen/Core
new file mode 100644
index 000000000..d48017022
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/Core
@@ -0,0 +1,366 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2007-2011 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CORE_H
+#define EIGEN_CORE_H
+
+// first thing Eigen does: stop the compiler from committing suicide
+#include "src/Core/util/DisableStupidWarnings.h"
+
+// then include this file where all our macros are defined. It's really important to do it first because
+// it's where we do all the alignment settings (platform detection and honoring the user's will if he
+// defined e.g. EIGEN_DONT_ALIGN) so it needs to be done before we do anything with vectorization.
+#include "src/Core/util/Macros.h"
+
+#include <complex>
+
+// this include file manages BLAS and MKL related macros
+// and inclusion of their respective header files
+#include "src/Core/util/MKL_support.h"
+
+// if alignment is disabled, then disable vectorization. Note: EIGEN_ALIGN is the proper check, it takes into
+// account both the user's will (EIGEN_DONT_ALIGN) and our own platform checks
+#if !EIGEN_ALIGN
+  #ifndef EIGEN_DONT_VECTORIZE
+    #define EIGEN_DONT_VECTORIZE
+  #endif
+#endif
+
+#ifdef _MSC_VER
+  #include <malloc.h> // for _aligned_malloc -- need it regardless of whether vectorization is enabled
+  #if (_MSC_VER >= 1500) // 2008 or later
+    // Remember that usage of defined() in a #define is undefined by the standard.
+    // a user reported that in 64-bit mode, MSVC doesn't care to define _M_IX86_FP.
+    #if (defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) || defined(_M_X64)
+      #define EIGEN_SSE2_ON_MSVC_2008_OR_LATER
+    #endif
+  #endif
+#else
+  // Remember that usage of defined() in a #define is undefined by the standard
+  #if (defined __SSE2__) && ( (!defined __GNUC__) || EIGEN_GNUC_AT_LEAST(4,2) )
+    #define EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC
+  #endif
+#endif
+
+#ifndef EIGEN_DONT_VECTORIZE
+
+  #if defined (EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC) || defined(EIGEN_SSE2_ON_MSVC_2008_OR_LATER)
+
+    // Defines symbols for compile-time detection of which instructions are
+    // used.
+    // EIGEN_VECTORIZE_YY is defined if and only if the instruction set YY is used
+    #define EIGEN_VECTORIZE
+    #define EIGEN_VECTORIZE_SSE
+    #define EIGEN_VECTORIZE_SSE2
+
+    // Detect sse3/ssse3/sse4:
+    // gcc and icc defines __SSE3__, ...
+    // there is no way to know about this on msvc. You can define EIGEN_VECTORIZE_SSE* if you
+    // want to force the use of those instructions with msvc.
+    #ifdef __SSE3__
+      #define EIGEN_VECTORIZE_SSE3
+    #endif
+    #ifdef __SSSE3__
+      #define EIGEN_VECTORIZE_SSSE3
+    #endif
+    #ifdef __SSE4_1__
+      #define EIGEN_VECTORIZE_SSE4_1
+    #endif
+    #ifdef __SSE4_2__
+      #define EIGEN_VECTORIZE_SSE4_2
+    #endif
+
+    // include files
+
+    // This extern "C" works around a MINGW-w64 compilation issue
+    // https://sourceforge.net/tracker/index.php?func=detail&aid=3018394&group_id=202880&atid=983354
+    // In essence, intrin.h is included by windows.h and also declares intrinsics (just as emmintrin.h etc. below do).
+    // However, intrin.h uses an extern "C" declaration, and g++ thus complains of duplicate declarations
+    // with conflicting linkage.  The linkage for intrinsics doesn't matter, but at that stage the compiler doesn't know;
+    // so, to avoid compile errors when windows.h is included after Eigen/Core, ensure intrinsics are extern "C" here too.
+    // notice that since these are C headers, the extern "C" is theoretically needed anyways.
+    extern "C" {
+      #include <emmintrin.h>
+      #include <xmmintrin.h>
+      #ifdef  EIGEN_VECTORIZE_SSE3
+      #include <pmmintrin.h>
+      #endif
+      #ifdef EIGEN_VECTORIZE_SSSE3
+      #include <tmmintrin.h>
+      #endif
+      #ifdef EIGEN_VECTORIZE_SSE4_1
+      #include <smmintrin.h>
+      #endif
+      #ifdef EIGEN_VECTORIZE_SSE4_2
+      #include <nmmintrin.h>
+      #endif
+    } // end extern "C"
+  #elif defined __ALTIVEC__
+    #define EIGEN_VECTORIZE
+    #define EIGEN_VECTORIZE_ALTIVEC
+    #include <altivec.h>
+    // We need to #undef all these ugly tokens defined in <altivec.h>
+    // => use __vector instead of vector
+    #undef bool
+    #undef vector
+    #undef pixel
+  #elif defined  __ARM_NEON__
+    #define EIGEN_VECTORIZE
+    #define EIGEN_VECTORIZE_NEON
+    #include <arm_neon.h>
+  #endif
+#endif
+
+#if (defined _OPENMP) && (!defined EIGEN_DONT_PARALLELIZE)
+  #define EIGEN_HAS_OPENMP
+#endif
+
+#ifdef EIGEN_HAS_OPENMP
+#include <omp.h>
+#endif
+
+// MSVC for windows mobile does not have the errno.h file
+#if !(defined(_MSC_VER) && defined(_WIN32_WCE)) && !defined(__ARMCC_VERSION)
+#define EIGEN_HAS_ERRNO
+#endif
+
+#ifdef EIGEN_HAS_ERRNO
+#include <cerrno>
+#endif
+#include <cstddef>
+#include <cstdlib>
+#include <cmath>
+#include <cassert>
+#include <functional>
+#include <iosfwd>
+#include <cstring>
+#include <string>
+#include <limits>
+#include <climits> // for CHAR_BIT
+// for min/max:
+#include <algorithm>
+
+// for outputting debug info
+#ifdef EIGEN_DEBUG_ASSIGN
+#include <iostream>
+#endif
+
+// required for __cpuid, needs to be included after cmath
+#if defined(_MSC_VER) && (defined(_M_IX86)||defined(_M_X64))
+  #include <intrin.h>
+#endif
+
+#if defined(_CPPUNWIND) || defined(__EXCEPTIONS)
+  #define EIGEN_EXCEPTIONS
+#endif
+
+#ifdef EIGEN_EXCEPTIONS
+  #include <new>
+#endif
+
+/** \brief Namespace containing all symbols from the %Eigen library. */
+namespace Eigen {
+
+inline static const char *SimdInstructionSetsInUse(void) {
+#if defined(EIGEN_VECTORIZE_SSE4_2)
+  return "SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2";
+#elif defined(EIGEN_VECTORIZE_SSE4_1)
+  return "SSE, SSE2, SSE3, SSSE3, SSE4.1";
+#elif defined(EIGEN_VECTORIZE_SSSE3)
+  return "SSE, SSE2, SSE3, SSSE3";
+#elif defined(EIGEN_VECTORIZE_SSE3)
+  return "SSE, SSE2, SSE3";
+#elif defined(EIGEN_VECTORIZE_SSE2)
+  return "SSE, SSE2";
+#elif defined(EIGEN_VECTORIZE_ALTIVEC)
+  return "AltiVec";
+#elif defined(EIGEN_VECTORIZE_NEON)
+  return "ARM NEON";
+#else
+  return "None";
+#endif
+}
+
+} // end namespace Eigen
+
+#define STAGE10_FULL_EIGEN2_API             10
+#define STAGE20_RESOLVE_API_CONFLICTS       20
+#define STAGE30_FULL_EIGEN3_API             30
+#define STAGE40_FULL_EIGEN3_STRICTNESS      40
+#define STAGE99_NO_EIGEN2_SUPPORT           99
+
+#if   defined EIGEN2_SUPPORT_STAGE40_FULL_EIGEN3_STRICTNESS
+  #define EIGEN2_SUPPORT
+  #define EIGEN2_SUPPORT_STAGE STAGE40_FULL_EIGEN3_STRICTNESS
+#elif defined EIGEN2_SUPPORT_STAGE30_FULL_EIGEN3_API
+  #define EIGEN2_SUPPORT
+  #define EIGEN2_SUPPORT_STAGE STAGE30_FULL_EIGEN3_API
+#elif defined EIGEN2_SUPPORT_STAGE20_RESOLVE_API_CONFLICTS
+  #define EIGEN2_SUPPORT
+  #define EIGEN2_SUPPORT_STAGE STAGE20_RESOLVE_API_CONFLICTS
+#elif defined EIGEN2_SUPPORT_STAGE10_FULL_EIGEN2_API
+  #define EIGEN2_SUPPORT
+  #define EIGEN2_SUPPORT_STAGE STAGE10_FULL_EIGEN2_API
+#elif defined EIGEN2_SUPPORT
+  // default to stage 3, that's what it's always meant
+  #define EIGEN2_SUPPORT_STAGE30_FULL_EIGEN3_API
+  #define EIGEN2_SUPPORT_STAGE STAGE30_FULL_EIGEN3_API
+#else
+  #define EIGEN2_SUPPORT_STAGE STAGE99_NO_EIGEN2_SUPPORT
+#endif
+
+#ifdef EIGEN2_SUPPORT
+#undef minor
+#endif
+
+// we use size_t frequently and we'll never remember to prepend it with std:: everytime just to
+// ensure QNX/QCC support
+using std::size_t;
+// gcc 4.6.0 wants std:: for ptrdiff_t 
+using std::ptrdiff_t;
+
+/** \defgroup Core_Module Core module
+  * This is the main module of Eigen providing dense matrix and vector support
+  * (both fixed and dynamic size) with all the features corresponding to a BLAS library
+  * and much more...
+  *
+  * \code
+  * #include <Eigen/Core>
+  * \endcode
+  */
+
+/** \defgroup Support_modules Support modules [category]
+  * Category of modules which add support for external libraries.
+  */
+
+#include "src/Core/util/Constants.h"
+#include "src/Core/util/ForwardDeclarations.h"
+#include "src/Core/util/Meta.h"
+#include "src/Core/util/XprHelper.h"
+#include "src/Core/util/StaticAssert.h"
+#include "src/Core/util/Memory.h"
+
+#include "src/Core/NumTraits.h"
+#include "src/Core/MathFunctions.h"
+#include "src/Core/GenericPacketMath.h"
+
+#if defined EIGEN_VECTORIZE_SSE
+  #include "src/Core/arch/SSE/PacketMath.h"
+  #include "src/Core/arch/SSE/MathFunctions.h"
+  #include "src/Core/arch/SSE/Complex.h"
+#elif defined EIGEN_VECTORIZE_ALTIVEC
+  #include "src/Core/arch/AltiVec/PacketMath.h"
+  #include "src/Core/arch/AltiVec/Complex.h"
+#elif defined EIGEN_VECTORIZE_NEON
+  #include "src/Core/arch/NEON/PacketMath.h"
+  #include "src/Core/arch/NEON/Complex.h"
+#endif
+
+#include "src/Core/arch/Default/Settings.h"
+
+#include "src/Core/Functors.h"
+#include "src/Core/DenseCoeffsBase.h"
+#include "src/Core/DenseBase.h"
+#include "src/Core/MatrixBase.h"
+#include "src/Core/EigenBase.h"
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN // work around Doxygen bug triggered by Assign.h r814874
+                                // at least confirmed with Doxygen 1.5.5 and 1.5.6
+  #include "src/Core/Assign.h"
+#endif
+
+#include "src/Core/util/BlasUtil.h"
+#include "src/Core/DenseStorage.h"
+#include "src/Core/NestByValue.h"
+#include "src/Core/ForceAlignedAccess.h"
+#include "src/Core/ReturnByValue.h"
+#include "src/Core/NoAlias.h"
+#include "src/Core/PlainObjectBase.h"
+#include "src/Core/Matrix.h"
+#include "src/Core/Array.h"
+#include "src/Core/CwiseBinaryOp.h"
+#include "src/Core/CwiseUnaryOp.h"
+#include "src/Core/CwiseNullaryOp.h"
+#include "src/Core/CwiseUnaryView.h"
+#include "src/Core/SelfCwiseBinaryOp.h"
+#include "src/Core/Dot.h"
+#include "src/Core/StableNorm.h"
+#include "src/Core/MapBase.h"
+#include "src/Core/Stride.h"
+#include "src/Core/Map.h"
+#include "src/Core/Block.h"
+#include "src/Core/VectorBlock.h"
+#include "src/Core/Transpose.h"
+#include "src/Core/DiagonalMatrix.h"
+#include "src/Core/Diagonal.h"
+#include "src/Core/DiagonalProduct.h"
+#include "src/Core/PermutationMatrix.h"
+#include "src/Core/Transpositions.h"
+#include "src/Core/Redux.h"
+#include "src/Core/Visitor.h"
+#include "src/Core/Fuzzy.h"
+#include "src/Core/IO.h"
+#include "src/Core/Swap.h"
+#include "src/Core/CommaInitializer.h"
+#include "src/Core/Flagged.h"
+#include "src/Core/ProductBase.h"
+#include "src/Core/GeneralProduct.h"
+#include "src/Core/TriangularMatrix.h"
+#include "src/Core/SelfAdjointView.h"
+#include "src/Core/products/GeneralBlockPanelKernel.h"
+#include "src/Core/products/Parallelizer.h"
+#include "src/Core/products/CoeffBasedProduct.h"
+#include "src/Core/products/GeneralMatrixVector.h"
+#include "src/Core/products/GeneralMatrixMatrix.h"
+#include "src/Core/SolveTriangular.h"
+#include "src/Core/products/GeneralMatrixMatrixTriangular.h"
+#include "src/Core/products/SelfadjointMatrixVector.h"
+#include "src/Core/products/SelfadjointMatrixMatrix.h"
+#include "src/Core/products/SelfadjointProduct.h"
+#include "src/Core/products/SelfadjointRank2Update.h"
+#include "src/Core/products/TriangularMatrixVector.h"
+#include "src/Core/products/TriangularMatrixMatrix.h"
+#include "src/Core/products/TriangularSolverMatrix.h"
+#include "src/Core/products/TriangularSolverVector.h"
+#include "src/Core/BandMatrix.h"
+
+#include "src/Core/BooleanRedux.h"
+#include "src/Core/Select.h"
+#include "src/Core/VectorwiseOp.h"
+#include "src/Core/Random.h"
+#include "src/Core/Replicate.h"
+#include "src/Core/Reverse.h"
+#include "src/Core/ArrayBase.h"
+#include "src/Core/ArrayWrapper.h"
+
+#ifdef EIGEN_USE_BLAS
+#include "src/Core/products/GeneralMatrixMatrix_MKL.h"
+#include "src/Core/products/GeneralMatrixVector_MKL.h"
+#include "src/Core/products/GeneralMatrixMatrixTriangular_MKL.h"
+#include "src/Core/products/SelfadjointMatrixMatrix_MKL.h"
+#include "src/Core/products/SelfadjointMatrixVector_MKL.h"
+#include "src/Core/products/TriangularMatrixMatrix_MKL.h"
+#include "src/Core/products/TriangularMatrixVector_MKL.h"
+#include "src/Core/products/TriangularSolverMatrix_MKL.h"
+#endif // EIGEN_USE_BLAS
+
+#ifdef EIGEN_USE_MKL_VML
+#include "src/Core/Assign_MKL.h"
+#endif
+
+#include "src/Core/GlobalFunctions.h"
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#ifdef EIGEN2_SUPPORT
+#include "Eigen2Support"
+#endif
+
+#endif // EIGEN_CORE_H
diff --git a/resources/3rdparty/eigen/Eigen/Dense b/resources/3rdParty/eigen/Eigen/Dense
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/Dense
rename to resources/3rdParty/eigen/Eigen/Dense
diff --git a/resources/3rdparty/eigen/Eigen/Eigen b/resources/3rdParty/eigen/Eigen/Eigen
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/Eigen
rename to resources/3rdParty/eigen/Eigen/Eigen
diff --git a/resources/3rdparty/eigen/Eigen/Eigen2Support b/resources/3rdParty/eigen/Eigen/Eigen2Support
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/Eigen2Support
rename to resources/3rdParty/eigen/Eigen/Eigen2Support
diff --git a/resources/3rdParty/eigen/Eigen/Eigenvalues b/resources/3rdParty/eigen/Eigen/Eigenvalues
new file mode 100644
index 000000000..af99ccd1f
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/Eigenvalues
@@ -0,0 +1,46 @@
+#ifndef EIGEN_EIGENVALUES_MODULE_H
+#define EIGEN_EIGENVALUES_MODULE_H
+
+#include "Core"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+#include "Cholesky"
+#include "Jacobi"
+#include "Householder"
+#include "LU"
+#include "Geometry"
+
+/** \defgroup Eigenvalues_Module Eigenvalues module
+  *
+  *
+  *
+  * This module mainly provides various eigenvalue solvers.
+  * This module also provides some MatrixBase methods, including:
+  *  - MatrixBase::eigenvalues(),
+  *  - MatrixBase::operatorNorm()
+  *
+  * \code
+  * #include <Eigen/Eigenvalues>
+  * \endcode
+  */
+
+#include "src/Eigenvalues/Tridiagonalization.h"
+#include "src/Eigenvalues/RealSchur.h"
+#include "src/Eigenvalues/EigenSolver.h"
+#include "src/Eigenvalues/SelfAdjointEigenSolver.h"
+#include "src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h"
+#include "src/Eigenvalues/HessenbergDecomposition.h"
+#include "src/Eigenvalues/ComplexSchur.h"
+#include "src/Eigenvalues/ComplexEigenSolver.h"
+#include "src/Eigenvalues/MatrixBaseEigenvalues.h"
+#ifdef EIGEN_USE_LAPACKE
+#include "src/Eigenvalues/RealSchur_MKL.h"
+#include "src/Eigenvalues/ComplexSchur_MKL.h"
+#include "src/Eigenvalues/SelfAdjointEigenSolver_MKL.h"
+#endif
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_EIGENVALUES_MODULE_H
+/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/resources/3rdparty/eigen/Eigen/Geometry b/resources/3rdParty/eigen/Eigen/Geometry
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/Geometry
rename to resources/3rdParty/eigen/Eigen/Geometry
diff --git a/resources/3rdparty/eigen/Eigen/Householder b/resources/3rdParty/eigen/Eigen/Householder
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/Householder
rename to resources/3rdParty/eigen/Eigen/Householder
diff --git a/resources/3rdparty/eigen/Eigen/IterativeLinearSolvers b/resources/3rdParty/eigen/Eigen/IterativeLinearSolvers
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/IterativeLinearSolvers
rename to resources/3rdParty/eigen/Eigen/IterativeLinearSolvers
diff --git a/resources/3rdparty/eigen/Eigen/Jacobi b/resources/3rdParty/eigen/Eigen/Jacobi
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/Jacobi
rename to resources/3rdParty/eigen/Eigen/Jacobi
diff --git a/resources/3rdparty/eigen/Eigen/LU b/resources/3rdParty/eigen/Eigen/LU
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/LU
rename to resources/3rdParty/eigen/Eigen/LU
diff --git a/resources/3rdparty/eigen/Eigen/LeastSquares b/resources/3rdParty/eigen/Eigen/LeastSquares
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/LeastSquares
rename to resources/3rdParty/eigen/Eigen/LeastSquares
diff --git a/resources/3rdParty/eigen/Eigen/OrderingMethods b/resources/3rdParty/eigen/Eigen/OrderingMethods
new file mode 100644
index 000000000..1e2d87452
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/OrderingMethods
@@ -0,0 +1,23 @@
+#ifndef EIGEN_ORDERINGMETHODS_MODULE_H
+#define EIGEN_ORDERINGMETHODS_MODULE_H
+
+#include "SparseCore"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+/** \ingroup Sparse_modules
+  * \defgroup OrderingMethods_Module OrderingMethods module
+  *
+  * This module is currently for internal use only.
+  *
+  *
+  * \code
+  * #include <Eigen/OrderingMethods>
+  * \endcode
+  */
+
+#include "src/OrderingMethods/Amd.h"
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_ORDERINGMETHODS_MODULE_H
diff --git a/resources/3rdparty/eigen/Eigen/PaStiXSupport b/resources/3rdParty/eigen/Eigen/PaStiXSupport
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/PaStiXSupport
rename to resources/3rdParty/eigen/Eigen/PaStiXSupport
diff --git a/resources/3rdparty/eigen/Eigen/PardisoSupport b/resources/3rdParty/eigen/Eigen/PardisoSupport
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/PardisoSupport
rename to resources/3rdParty/eigen/Eigen/PardisoSupport
diff --git a/resources/3rdparty/eigen/Eigen/QR b/resources/3rdParty/eigen/Eigen/QR
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/QR
rename to resources/3rdParty/eigen/Eigen/QR
diff --git a/resources/3rdparty/eigen/Eigen/QtAlignedMalloc b/resources/3rdParty/eigen/Eigen/QtAlignedMalloc
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/QtAlignedMalloc
rename to resources/3rdParty/eigen/Eigen/QtAlignedMalloc
diff --git a/resources/3rdparty/eigen/Eigen/SVD b/resources/3rdParty/eigen/Eigen/SVD
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/SVD
rename to resources/3rdParty/eigen/Eigen/SVD
diff --git a/resources/3rdparty/eigen/Eigen/Sparse b/resources/3rdParty/eigen/Eigen/Sparse
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/Sparse
rename to resources/3rdParty/eigen/Eigen/Sparse
diff --git a/resources/3rdparty/eigen/Eigen/SparseCholesky b/resources/3rdParty/eigen/Eigen/SparseCholesky
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/SparseCholesky
rename to resources/3rdParty/eigen/Eigen/SparseCholesky
diff --git a/resources/3rdparty/eigen/Eigen/SparseCore b/resources/3rdParty/eigen/Eigen/SparseCore
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/SparseCore
rename to resources/3rdParty/eigen/Eigen/SparseCore
diff --git a/resources/3rdparty/eigen/Eigen/StdDeque b/resources/3rdParty/eigen/Eigen/StdDeque
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/StdDeque
rename to resources/3rdParty/eigen/Eigen/StdDeque
diff --git a/resources/3rdparty/eigen/Eigen/StdList b/resources/3rdParty/eigen/Eigen/StdList
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/StdList
rename to resources/3rdParty/eigen/Eigen/StdList
diff --git a/resources/3rdparty/eigen/Eigen/StdVector b/resources/3rdParty/eigen/Eigen/StdVector
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/StdVector
rename to resources/3rdParty/eigen/Eigen/StdVector
diff --git a/resources/3rdparty/eigen/Eigen/SuperLUSupport b/resources/3rdParty/eigen/Eigen/SuperLUSupport
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/SuperLUSupport
rename to resources/3rdParty/eigen/Eigen/SuperLUSupport
diff --git a/resources/3rdparty/eigen/Eigen/UmfPackSupport b/resources/3rdParty/eigen/Eigen/UmfPackSupport
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/UmfPackSupport
rename to resources/3rdParty/eigen/Eigen/UmfPackSupport
diff --git a/resources/3rdparty/eigen/Eigen/src/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/Eigen/src/Cholesky/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/Cholesky/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Cholesky/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/Cholesky/CMakeLists.txt
diff --git a/resources/3rdParty/eigen/Eigen/src/Cholesky/LDLT.h b/resources/3rdParty/eigen/Eigen/src/Cholesky/LDLT.h
new file mode 100644
index 000000000..68e54b1d4
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Cholesky/LDLT.h
@@ -0,0 +1,592 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Keir Mierle <mierle@gmail.com>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2011 Timothy E. Holy <tim.holy@gmail.com >
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_LDLT_H
+#define EIGEN_LDLT_H
+
+namespace Eigen { 
+
+namespace internal {
+template<typename MatrixType, int UpLo> struct LDLT_Traits;
+}
+
+/** \ingroup Cholesky_Module
+  *
+  * \class LDLT
+  *
+  * \brief Robust Cholesky decomposition of a matrix with pivoting
+  *
+  * \param MatrixType the type of the matrix of which to compute the LDL^T Cholesky decomposition
+  * \param UpLo the triangular part that will be used for the decompositon: Lower (default) or Upper.
+  *             The other triangular part won't be read.
+  *
+  * Perform a robust Cholesky decomposition of a positive semidefinite or negative semidefinite
+  * matrix \f$ A \f$ such that \f$ A =  P^TLDL^*P \f$, where P is a permutation matrix, L
+  * is lower triangular with a unit diagonal and D is a diagonal matrix.
+  *
+  * The decomposition uses pivoting to ensure stability, so that L will have
+  * zeros in the bottom right rank(A) - n submatrix. Avoiding the square root
+  * on D also stabilizes the computation.
+  *
+  * Remember that Cholesky decompositions are not rank-revealing. Also, do not use a Cholesky
+  * decomposition to determine whether a system of equations has a solution.
+  *
+  * \sa MatrixBase::ldlt(), class LLT
+  */
+template<typename _MatrixType, int _UpLo> class LDLT
+{
+  public:
+    typedef _MatrixType MatrixType;
+    enum {
+      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      Options = MatrixType::Options & ~RowMajorBit, // these are the options for the TmpMatrixType, we need a ColMajor matrix here!
+      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+      UpLo = _UpLo
+    };
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+    typedef typename MatrixType::Index Index;
+    typedef Matrix<Scalar, RowsAtCompileTime, 1, Options, MaxRowsAtCompileTime, 1> TmpMatrixType;
+
+    typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType;
+    typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationType;
+
+    typedef internal::LDLT_Traits<MatrixType,UpLo> Traits;
+
+    /** \brief Default Constructor.
+      *
+      * The default constructor is useful in cases in which the user intends to
+      * perform decompositions via LDLT::compute(const MatrixType&).
+      */
+    LDLT() : m_matrix(), m_transpositions(), m_isInitialized(false) {}
+
+    /** \brief Default Constructor with memory preallocation
+      *
+      * Like the default constructor but with preallocation of the internal data
+      * according to the specified problem \a size.
+      * \sa LDLT()
+      */
+    LDLT(Index size)
+      : m_matrix(size, size),
+        m_transpositions(size),
+        m_temporary(size),
+        m_isInitialized(false)
+    {}
+
+    /** \brief Constructor with decomposition
+      *
+      * This calculates the decomposition for the input \a matrix.
+      * \sa LDLT(Index size)
+      */
+    LDLT(const MatrixType& matrix)
+      : m_matrix(matrix.rows(), matrix.cols()),
+        m_transpositions(matrix.rows()),
+        m_temporary(matrix.rows()),
+        m_isInitialized(false)
+    {
+      compute(matrix);
+    }
+
+    /** Clear any existing decomposition
+     * \sa rankUpdate(w,sigma)
+     */
+    void setZero()
+    {
+      m_isInitialized = false;
+    }
+
+    /** \returns a view of the upper triangular matrix U */
+    inline typename Traits::MatrixU matrixU() const
+    {
+      eigen_assert(m_isInitialized && "LDLT is not initialized.");
+      return Traits::getU(m_matrix);
+    }
+
+    /** \returns a view of the lower triangular matrix L */
+    inline typename Traits::MatrixL matrixL() const
+    {
+      eigen_assert(m_isInitialized && "LDLT is not initialized.");
+      return Traits::getL(m_matrix);
+    }
+
+    /** \returns the permutation matrix P as a transposition sequence.
+      */
+    inline const TranspositionType& transpositionsP() const
+    {
+      eigen_assert(m_isInitialized && "LDLT is not initialized.");
+      return m_transpositions;
+    }
+
+    /** \returns the coefficients of the diagonal matrix D */
+    inline Diagonal<const MatrixType> vectorD() const
+    {
+      eigen_assert(m_isInitialized && "LDLT is not initialized.");
+      return m_matrix.diagonal();
+    }
+
+    /** \returns true if the matrix is positive (semidefinite) */
+    inline bool isPositive() const
+    {
+      eigen_assert(m_isInitialized && "LDLT is not initialized.");
+      return m_sign == 1;
+    }
+    
+    #ifdef EIGEN2_SUPPORT
+    inline bool isPositiveDefinite() const
+    {
+      return isPositive();
+    }
+    #endif
+
+    /** \returns true if the matrix is negative (semidefinite) */
+    inline bool isNegative(void) const
+    {
+      eigen_assert(m_isInitialized && "LDLT is not initialized.");
+      return m_sign == -1;
+    }
+
+    /** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A.
+      *
+      * This function also supports in-place solves using the syntax <tt>x = decompositionObject.solve(x)</tt> .
+      *
+      * \note_about_checking_solutions
+      *
+      * More precisely, this method solves \f$ A x = b \f$ using the decomposition \f$ A = P^T L D L^* P \f$
+      * by solving the systems \f$ P^T y_1 = b \f$, \f$ L y_2 = y_1 \f$, \f$ D y_3 = y_2 \f$, 
+      * \f$ L^* y_4 = y_3 \f$ and \f$ P x = y_4 \f$ in succession. If the matrix \f$ A \f$ is singular, then
+      * \f$ D \f$ will also be singular (all the other matrices are invertible). In that case, the
+      * least-square solution of \f$ D y_3 = y_2 \f$ is computed. This does not mean that this function
+      * computes the least-square solution of \f$ A x = b \f$ is \f$ A \f$ is singular.
+      *
+      * \sa MatrixBase::ldlt()
+      */
+    template<typename Rhs>
+    inline const internal::solve_retval<LDLT, Rhs>
+    solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "LDLT is not initialized.");
+      eigen_assert(m_matrix.rows()==b.rows()
+                && "LDLT::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::solve_retval<LDLT, Rhs>(*this, b.derived());
+    }
+
+    #ifdef EIGEN2_SUPPORT
+    template<typename OtherDerived, typename ResultType>
+    bool solve(const MatrixBase<OtherDerived>& b, ResultType *result) const
+    {
+      *result = this->solve(b);
+      return true;
+    }
+    #endif
+
+    template<typename Derived>
+    bool solveInPlace(MatrixBase<Derived> &bAndX) const;
+
+    LDLT& compute(const MatrixType& matrix);
+
+    template <typename Derived>
+    LDLT& rankUpdate(const MatrixBase<Derived>& w,RealScalar alpha=1);
+
+    /** \returns the internal LDLT decomposition matrix
+      *
+      * TODO: document the storage layout
+      */
+    inline const MatrixType& matrixLDLT() const
+    {
+      eigen_assert(m_isInitialized && "LDLT is not initialized.");
+      return m_matrix;
+    }
+
+    MatrixType reconstructedMatrix() const;
+
+    inline Index rows() const { return m_matrix.rows(); }
+    inline Index cols() const { return m_matrix.cols(); }
+
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful,
+      *          \c NumericalIssue if the matrix.appears to be negative.
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "LDLT is not initialized.");
+      return Success;
+    }
+
+  protected:
+
+    /** \internal
+      * Used to compute and store the Cholesky decomposition A = L D L^* = U^* D U.
+      * The strict upper part is used during the decomposition, the strict lower
+      * part correspond to the coefficients of L (its diagonal is equal to 1 and
+      * is not stored), and the diagonal entries correspond to D.
+      */
+    MatrixType m_matrix;
+    TranspositionType m_transpositions;
+    TmpMatrixType m_temporary;
+    int m_sign;
+    bool m_isInitialized;
+};
+
+namespace internal {
+
+template<int UpLo> struct ldlt_inplace;
+
+template<> struct ldlt_inplace<Lower>
+{
+  template<typename MatrixType, typename TranspositionType, typename Workspace>
+  static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0)
+  {
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::RealScalar RealScalar;
+    typedef typename MatrixType::Index Index;
+    eigen_assert(mat.rows()==mat.cols());
+    const Index size = mat.rows();
+
+    if (size <= 1)
+    {
+      transpositions.setIdentity();
+      if(sign)
+        *sign = real(mat.coeff(0,0))>0 ? 1:-1;
+      return true;
+    }
+
+    RealScalar cutoff(0), biggest_in_corner;
+
+    for (Index k = 0; k < size; ++k)
+    {
+      // Find largest diagonal element
+      Index index_of_biggest_in_corner;
+      biggest_in_corner = mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner);
+      index_of_biggest_in_corner += k;
+
+      if(k == 0)
+      {
+        // The biggest overall is the point of reference to which further diagonals
+        // are compared; if any diagonal is negligible compared
+        // to the largest overall, the algorithm bails.
+        cutoff = abs(NumTraits<Scalar>::epsilon() * biggest_in_corner);
+
+        if(sign)
+          *sign = real(mat.diagonal().coeff(index_of_biggest_in_corner)) > 0 ? 1 : -1;
+      }
+
+      // Finish early if the matrix is not full rank.
+      if(biggest_in_corner < cutoff)
+      {
+        for(Index i = k; i < size; i++) transpositions.coeffRef(i) = i;
+        break;
+      }
+
+      transpositions.coeffRef(k) = index_of_biggest_in_corner;
+      if(k != index_of_biggest_in_corner)
+      {
+        // apply the transposition while taking care to consider only
+        // the lower triangular part
+        Index s = size-index_of_biggest_in_corner-1; // trailing size after the biggest element
+        mat.row(k).head(k).swap(mat.row(index_of_biggest_in_corner).head(k));
+        mat.col(k).tail(s).swap(mat.col(index_of_biggest_in_corner).tail(s));
+        std::swap(mat.coeffRef(k,k),mat.coeffRef(index_of_biggest_in_corner,index_of_biggest_in_corner));
+        for(int i=k+1;i<index_of_biggest_in_corner;++i)
+        {
+          Scalar tmp = mat.coeffRef(i,k);
+          mat.coeffRef(i,k) = conj(mat.coeffRef(index_of_biggest_in_corner,i));
+          mat.coeffRef(index_of_biggest_in_corner,i) = conj(tmp);
+        }
+        if(NumTraits<Scalar>::IsComplex)
+          mat.coeffRef(index_of_biggest_in_corner,k) = conj(mat.coeff(index_of_biggest_in_corner,k));
+      }
+
+      // partition the matrix:
+      //       A00 |  -  |  -
+      // lu  = A10 | A11 |  -
+      //       A20 | A21 | A22
+      Index rs = size - k - 1;
+      Block<MatrixType,Dynamic,1> A21(mat,k+1,k,rs,1);
+      Block<MatrixType,1,Dynamic> A10(mat,k,0,1,k);
+      Block<MatrixType,Dynamic,Dynamic> A20(mat,k+1,0,rs,k);
+
+      if(k>0)
+      {
+        temp.head(k) = mat.diagonal().head(k).asDiagonal() * A10.adjoint();
+        mat.coeffRef(k,k) -= (A10 * temp.head(k)).value();
+        if(rs>0)
+          A21.noalias() -= A20 * temp.head(k);
+      }
+      if((rs>0) && (abs(mat.coeffRef(k,k)) > cutoff))
+        A21 /= mat.coeffRef(k,k);
+    }
+
+    return true;
+  }
+
+  // Reference for the algorithm: Davis and Hager, "Multiple Rank
+  // Modifications of a Sparse Cholesky Factorization" (Algorithm 1)
+  // Trivial rearrangements of their computations (Timothy E. Holy)
+  // allow their algorithm to work for rank-1 updates even if the
+  // original matrix is not of full rank.
+  // Here only rank-1 updates are implemented, to reduce the
+  // requirement for intermediate storage and improve accuracy
+  template<typename MatrixType, typename WDerived>
+  static bool updateInPlace(MatrixType& mat, MatrixBase<WDerived>& w, typename MatrixType::RealScalar sigma=1)
+  {
+    using internal::isfinite;
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::RealScalar RealScalar;
+    typedef typename MatrixType::Index Index;
+
+    const Index size = mat.rows();
+    eigen_assert(mat.cols() == size && w.size()==size);
+
+    RealScalar alpha = 1;
+
+    // Apply the update
+    for (Index j = 0; j < size; j++)
+    {
+      // Check for termination due to an original decomposition of low-rank
+      if (!(isfinite)(alpha))
+        break;
+
+      // Update the diagonal terms
+      RealScalar dj = real(mat.coeff(j,j));
+      Scalar wj = w.coeff(j);
+      RealScalar swj2 = sigma*abs2(wj);
+      RealScalar gamma = dj*alpha + swj2;
+
+      mat.coeffRef(j,j) += swj2/alpha;
+      alpha += swj2/dj;
+
+
+      // Update the terms of L
+      Index rs = size-j-1;
+      w.tail(rs) -= wj * mat.col(j).tail(rs);
+      if(gamma != 0)
+        mat.col(j).tail(rs) += (sigma*conj(wj)/gamma)*w.tail(rs);
+    }
+    return true;
+  }
+
+  template<typename MatrixType, typename TranspositionType, typename Workspace, typename WType>
+  static bool update(MatrixType& mat, const TranspositionType& transpositions, Workspace& tmp, const WType& w, typename MatrixType::RealScalar sigma=1)
+  {
+    // Apply the permutation to the input w
+    tmp = transpositions * w;
+
+    return ldlt_inplace<Lower>::updateInPlace(mat,tmp,sigma);
+  }
+};
+
+template<> struct ldlt_inplace<Upper>
+{
+  template<typename MatrixType, typename TranspositionType, typename Workspace>
+  static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0)
+  {
+    Transpose<MatrixType> matt(mat);
+    return ldlt_inplace<Lower>::unblocked(matt, transpositions, temp, sign);
+  }
+
+  template<typename MatrixType, typename TranspositionType, typename Workspace, typename WType>
+  static EIGEN_STRONG_INLINE bool update(MatrixType& mat, TranspositionType& transpositions, Workspace& tmp, WType& w, typename MatrixType::RealScalar sigma=1)
+  {
+    Transpose<MatrixType> matt(mat);
+    return ldlt_inplace<Lower>::update(matt, transpositions, tmp, w.conjugate(), sigma);
+  }
+};
+
+template<typename MatrixType> struct LDLT_Traits<MatrixType,Lower>
+{
+  typedef const TriangularView<const MatrixType, UnitLower> MatrixL;
+  typedef const TriangularView<const typename MatrixType::AdjointReturnType, UnitUpper> MatrixU;
+  static inline MatrixL getL(const MatrixType& m) { return m; }
+  static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); }
+};
+
+template<typename MatrixType> struct LDLT_Traits<MatrixType,Upper>
+{
+  typedef const TriangularView<const typename MatrixType::AdjointReturnType, UnitLower> MatrixL;
+  typedef const TriangularView<const MatrixType, UnitUpper> MatrixU;
+  static inline MatrixL getL(const MatrixType& m) { return m.adjoint(); }
+  static inline MatrixU getU(const MatrixType& m) { return m; }
+};
+
+} // end namespace internal
+
+/** Compute / recompute the LDLT decomposition A = L D L^* = U^* D U of \a matrix
+  */
+template<typename MatrixType, int _UpLo>
+LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
+{
+  eigen_assert(a.rows()==a.cols());
+  const Index size = a.rows();
+
+  m_matrix = a;
+
+  m_transpositions.resize(size);
+  m_isInitialized = false;
+  m_temporary.resize(size);
+
+  internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, &m_sign);
+
+  m_isInitialized = true;
+  return *this;
+}
+
+/** Update the LDLT decomposition:  given A = L D L^T, efficiently compute the decomposition of A + sigma w w^T.
+ * \param w a vector to be incorporated into the decomposition.
+ * \param sigma a scalar, +1 for updates and -1 for "downdates," which correspond to removing previously-added column vectors. Optional; default value is +1.
+ * \sa setZero()
+  */
+template<typename MatrixType, int _UpLo>
+template<typename Derived>
+LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w,typename NumTraits<typename MatrixType::Scalar>::Real sigma)
+{
+  const Index size = w.rows();
+  if (m_isInitialized)
+  {
+    eigen_assert(m_matrix.rows()==size);
+  }
+  else
+  {    
+    m_matrix.resize(size,size);
+    m_matrix.setZero();
+    m_transpositions.resize(size);
+    for (Index i = 0; i < size; i++)
+      m_transpositions.coeffRef(i) = i;
+    m_temporary.resize(size);
+    m_sign = sigma>=0 ? 1 : -1;
+    m_isInitialized = true;
+  }
+
+  internal::ldlt_inplace<UpLo>::update(m_matrix, m_transpositions, m_temporary, w, sigma);
+
+  return *this;
+}
+
+namespace internal {
+template<typename _MatrixType, int _UpLo, typename Rhs>
+struct solve_retval<LDLT<_MatrixType,_UpLo>, Rhs>
+  : solve_retval_base<LDLT<_MatrixType,_UpLo>, Rhs>
+{
+  typedef LDLT<_MatrixType,_UpLo> LDLTType;
+  EIGEN_MAKE_SOLVE_HELPERS(LDLTType,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    eigen_assert(rhs().rows() == dec().matrixLDLT().rows());
+    // dst = P b
+    dst = dec().transpositionsP() * rhs();
+
+    // dst = L^-1 (P b)
+    dec().matrixL().solveInPlace(dst);
+
+    // dst = D^-1 (L^-1 P b)
+    // more precisely, use pseudo-inverse of D (see bug 241)
+    using std::abs;
+    using std::max;
+    typedef typename LDLTType::MatrixType MatrixType;
+    typedef typename LDLTType::Scalar Scalar;
+    typedef typename LDLTType::RealScalar RealScalar;
+    const Diagonal<const MatrixType> vectorD = dec().vectorD();
+    RealScalar tolerance = (max)(vectorD.array().abs().maxCoeff() * NumTraits<Scalar>::epsilon(),
+				 RealScalar(1) / NumTraits<RealScalar>::highest()); // motivated by LAPACK's xGELSS
+    for (Index i = 0; i < vectorD.size(); ++i) {
+      if(abs(vectorD(i)) > tolerance)
+	dst.row(i) /= vectorD(i);
+      else
+	dst.row(i).setZero();
+    }
+
+    // dst = L^-T (D^-1 L^-1 P b)
+    dec().matrixU().solveInPlace(dst);
+
+    // dst = P^-1 (L^-T D^-1 L^-1 P b) = A^-1 b
+    dst = dec().transpositionsP().transpose() * dst;
+  }
+};
+}
+
+/** \internal use x = ldlt_object.solve(x);
+  *
+  * This is the \em in-place version of solve().
+  *
+  * \param bAndX represents both the right-hand side matrix b and result x.
+  *
+  * \returns true always! If you need to check for existence of solutions, use another decomposition like LU, QR, or SVD.
+  *
+  * This version avoids a copy when the right hand side matrix b is not
+  * needed anymore.
+  *
+  * \sa LDLT::solve(), MatrixBase::ldlt()
+  */
+template<typename MatrixType,int _UpLo>
+template<typename Derived>
+bool LDLT<MatrixType,_UpLo>::solveInPlace(MatrixBase<Derived> &bAndX) const
+{
+  eigen_assert(m_isInitialized && "LDLT is not initialized.");
+  const Index size = m_matrix.rows();
+  eigen_assert(size == bAndX.rows());
+
+  bAndX = this->solve(bAndX);
+
+  return true;
+}
+
+/** \returns the matrix represented by the decomposition,
+ * i.e., it returns the product: P^T L D L^* P.
+ * This function is provided for debug purpose. */
+template<typename MatrixType, int _UpLo>
+MatrixType LDLT<MatrixType,_UpLo>::reconstructedMatrix() const
+{
+  eigen_assert(m_isInitialized && "LDLT is not initialized.");
+  const Index size = m_matrix.rows();
+  MatrixType res(size,size);
+
+  // P
+  res.setIdentity();
+  res = transpositionsP() * res;
+  // L^* P
+  res = matrixU() * res;
+  // D(L^*P)
+  res = vectorD().asDiagonal() * res;
+  // L(DL^*P)
+  res = matrixL() * res;
+  // P^T (LDL^*P)
+  res = transpositionsP().transpose() * res;
+
+  return res;
+}
+
+/** \cholesky_module
+  * \returns the Cholesky decomposition with full pivoting without square root of \c *this
+  */
+template<typename MatrixType, unsigned int UpLo>
+inline const LDLT<typename SelfAdjointView<MatrixType, UpLo>::PlainObject, UpLo>
+SelfAdjointView<MatrixType, UpLo>::ldlt() const
+{
+  return LDLT<PlainObject,UpLo>(m_matrix);
+}
+
+/** \cholesky_module
+  * \returns the Cholesky decomposition with full pivoting without square root of \c *this
+  */
+template<typename Derived>
+inline const LDLT<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::ldlt() const
+{
+  return LDLT<PlainObject>(derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_LDLT_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Cholesky/LLT.h b/resources/3rdParty/eigen/Eigen/src/Cholesky/LLT.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Cholesky/LLT.h
rename to resources/3rdParty/eigen/Eigen/src/Cholesky/LLT.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Cholesky/LLT_MKL.h b/resources/3rdParty/eigen/Eigen/src/Cholesky/LLT_MKL.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Cholesky/LLT_MKL.h
rename to resources/3rdParty/eigen/Eigen/src/Cholesky/LLT_MKL.h
diff --git a/resources/3rdparty/eigen/Eigen/src/CholmodSupport/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/CholmodSupport/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/CholmodSupport/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/CholmodSupport/CMakeLists.txt
diff --git a/resources/3rdParty/eigen/Eigen/src/CholmodSupport/CholmodSupport.h b/resources/3rdParty/eigen/Eigen/src/CholmodSupport/CholmodSupport.h
new file mode 100644
index 000000000..37f142150
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/CholmodSupport/CholmodSupport.h
@@ -0,0 +1,579 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CHOLMODSUPPORT_H
+#define EIGEN_CHOLMODSUPPORT_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename Scalar, typename CholmodType>
+void cholmod_configure_matrix(CholmodType& mat)
+{
+  if (internal::is_same<Scalar,float>::value)
+  {
+    mat.xtype = CHOLMOD_REAL;
+    mat.dtype = CHOLMOD_SINGLE;
+  }
+  else if (internal::is_same<Scalar,double>::value)
+  {
+    mat.xtype = CHOLMOD_REAL;
+    mat.dtype = CHOLMOD_DOUBLE;
+  }
+  else if (internal::is_same<Scalar,std::complex<float> >::value)
+  {
+    mat.xtype = CHOLMOD_COMPLEX;
+    mat.dtype = CHOLMOD_SINGLE;
+  }
+  else if (internal::is_same<Scalar,std::complex<double> >::value)
+  {
+    mat.xtype = CHOLMOD_COMPLEX;
+    mat.dtype = CHOLMOD_DOUBLE;
+  }
+  else
+  {
+    eigen_assert(false && "Scalar type not supported by CHOLMOD");
+  }
+}
+
+} // namespace internal
+
+/** Wraps the Eigen sparse matrix \a mat into a Cholmod sparse matrix object.
+  * Note that the data are shared.
+  */
+template<typename _Scalar, int _Options, typename _Index>
+cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat)
+{
+  typedef SparseMatrix<_Scalar,_Options,_Index> MatrixType;
+  cholmod_sparse res;
+  res.nzmax   = mat.nonZeros();
+  res.nrow    = mat.rows();;
+  res.ncol    = mat.cols();
+  res.p       = mat.outerIndexPtr();
+  res.i       = mat.innerIndexPtr();
+  res.x       = mat.valuePtr();
+  res.sorted  = 1;
+  if(mat.isCompressed())
+  {
+    res.packed  = 1;
+  }
+  else
+  {
+    res.packed  = 0;
+    res.nz = mat.innerNonZeroPtr();
+  }
+
+  res.dtype   = 0;
+  res.stype   = -1;
+  
+  if (internal::is_same<_Index,int>::value)
+  {
+    res.itype = CHOLMOD_INT;
+  }
+  else
+  {
+    eigen_assert(false && "Index type different than int is not supported yet");
+  }
+
+  // setup res.xtype
+  internal::cholmod_configure_matrix<_Scalar>(res);
+  
+  res.stype = 0;
+  
+  return res;
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+const cholmod_sparse viewAsCholmod(const SparseMatrix<_Scalar,_Options,_Index>& mat)
+{
+  cholmod_sparse res = viewAsCholmod(mat.const_cast_derived());
+  return res;
+}
+
+/** Returns a view of the Eigen sparse matrix \a mat as Cholmod sparse matrix.
+  * The data are not copied but shared. */
+template<typename _Scalar, int _Options, typename _Index, unsigned int UpLo>
+cholmod_sparse viewAsCholmod(const SparseSelfAdjointView<SparseMatrix<_Scalar,_Options,_Index>, UpLo>& mat)
+{
+  cholmod_sparse res = viewAsCholmod(mat.matrix().const_cast_derived());
+  
+  if(UpLo==Upper) res.stype =  1;
+  if(UpLo==Lower) res.stype = -1;
+
+  return res;
+}
+
+/** Returns a view of the Eigen \b dense matrix \a mat as Cholmod dense matrix.
+  * The data are not copied but shared. */
+template<typename Derived>
+cholmod_dense viewAsCholmod(MatrixBase<Derived>& mat)
+{
+  EIGEN_STATIC_ASSERT((internal::traits<Derived>::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+  typedef typename Derived::Scalar Scalar;
+
+  cholmod_dense res;
+  res.nrow   = mat.rows();
+  res.ncol   = mat.cols();
+  res.nzmax  = res.nrow * res.ncol;
+  res.d      = Derived::IsVectorAtCompileTime ? mat.derived().size() : mat.derived().outerStride();
+  res.x      = mat.derived().data();
+  res.z      = 0;
+
+  internal::cholmod_configure_matrix<Scalar>(res);
+
+  return res;
+}
+
+/** Returns a view of the Cholmod sparse matrix \a cm as an Eigen sparse matrix.
+  * The data are not copied but shared. */
+template<typename Scalar, int Flags, typename Index>
+MappedSparseMatrix<Scalar,Flags,Index> viewAsEigen(cholmod_sparse& cm)
+{
+  return MappedSparseMatrix<Scalar,Flags,Index>
+         (cm.nrow, cm.ncol, reinterpret_cast<Index*>(cm.p)[cm.ncol],
+          reinterpret_cast<Index*>(cm.p), reinterpret_cast<Index*>(cm.i),reinterpret_cast<Scalar*>(cm.x) );
+}
+
+enum CholmodMode {
+  CholmodAuto, CholmodSimplicialLLt, CholmodSupernodalLLt, CholmodLDLt
+};
+
+
+/** \ingroup CholmodSupport_Module
+  * \class CholmodBase
+  * \brief The base class for the direct Cholesky factorization of Cholmod
+  * \sa class CholmodSupernodalLLT, class CholmodSimplicialLDLT, class CholmodSimplicialLLT
+  */
+template<typename _MatrixType, int _UpLo, typename Derived>
+class CholmodBase : internal::noncopyable
+{
+  public:
+    typedef _MatrixType MatrixType;
+    enum { UpLo = _UpLo };
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::RealScalar RealScalar;
+    typedef MatrixType CholMatrixType;
+    typedef typename MatrixType::Index Index;
+
+  public:
+
+    CholmodBase()
+      : m_cholmodFactor(0), m_info(Success), m_isInitialized(false)
+    {
+      cholmod_start(&m_cholmod);
+    }
+
+    CholmodBase(const MatrixType& matrix)
+      : m_cholmodFactor(0), m_info(Success), m_isInitialized(false)
+    {
+      cholmod_start(&m_cholmod);
+      compute(matrix);
+    }
+
+    ~CholmodBase()
+    {
+      if(m_cholmodFactor)
+        cholmod_free_factor(&m_cholmodFactor, &m_cholmod);
+      cholmod_finish(&m_cholmod);
+    }
+    
+    inline Index cols() const { return m_cholmodFactor->n; }
+    inline Index rows() const { return m_cholmodFactor->n; }
+    
+    Derived& derived() { return *static_cast<Derived*>(this); }
+    const Derived& derived() const { return *static_cast<const Derived*>(this); }
+    
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful,
+      *          \c NumericalIssue if the matrix.appears to be negative.
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+      return m_info;
+    }
+
+    /** Computes the sparse Cholesky decomposition of \a matrix */
+    Derived& compute(const MatrixType& matrix)
+    {
+      analyzePattern(matrix);
+      factorize(matrix);
+      return derived();
+    }
+    
+    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+      *
+      * \sa compute()
+      */
+    template<typename Rhs>
+    inline const internal::solve_retval<CholmodBase, Rhs>
+    solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "LLT is not initialized.");
+      eigen_assert(rows()==b.rows()
+                && "CholmodDecomposition::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::solve_retval<CholmodBase, Rhs>(*this, b.derived());
+    }
+    
+    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+      *
+      * \sa compute()
+      */
+    template<typename Rhs>
+    inline const internal::sparse_solve_retval<CholmodBase, Rhs>
+    solve(const SparseMatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "LLT is not initialized.");
+      eigen_assert(rows()==b.rows()
+                && "CholmodDecomposition::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::sparse_solve_retval<CholmodBase, Rhs>(*this, b.derived());
+    }
+    
+    /** Performs a symbolic decomposition on the sparcity of \a matrix.
+      *
+      * This function is particularly useful when solving for several problems having the same structure.
+      * 
+      * \sa factorize()
+      */
+    void analyzePattern(const MatrixType& matrix)
+    {
+      if(m_cholmodFactor)
+      {
+        cholmod_free_factor(&m_cholmodFactor, &m_cholmod);
+        m_cholmodFactor = 0;
+      }
+      cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>());
+      m_cholmodFactor = cholmod_analyze(&A, &m_cholmod);
+      
+      this->m_isInitialized = true;
+      this->m_info = Success;
+      m_analysisIsOk = true;
+      m_factorizationIsOk = false;
+    }
+    
+    /** Performs a numeric decomposition of \a matrix
+      *
+      * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+      *
+      * \sa analyzePattern()
+      */
+    void factorize(const MatrixType& matrix)
+    {
+      eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+      cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>());
+      cholmod_factorize(&A, m_cholmodFactor, &m_cholmod);
+      
+      this->m_info = Success;
+      m_factorizationIsOk = true;
+    }
+    
+    /** Returns a reference to the Cholmod's configuration structure to get a full control over the performed operations.
+     *  See the Cholmod user guide for details. */
+    cholmod_common& cholmod() { return m_cholmod; }
+    
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** \internal */
+    template<typename Rhs,typename Dest>
+    void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const
+    {
+      eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
+      const Index size = m_cholmodFactor->n;
+      eigen_assert(size==b.rows());
+
+      // note: cd stands for Cholmod Dense
+      cholmod_dense b_cd = viewAsCholmod(b.const_cast_derived());
+      cholmod_dense* x_cd = cholmod_solve(CHOLMOD_A, m_cholmodFactor, &b_cd, &m_cholmod);
+      if(!x_cd)
+      {
+        this->m_info = NumericalIssue;
+      }
+      // TODO optimize this copy by swapping when possible (be carreful with alignment, etc.)
+      dest = Matrix<Scalar,Dest::RowsAtCompileTime,Dest::ColsAtCompileTime>::Map(reinterpret_cast<Scalar*>(x_cd->x),b.rows(),b.cols());
+      cholmod_free_dense(&x_cd, &m_cholmod);
+    }
+    
+    /** \internal */
+    template<typename RhsScalar, int RhsOptions, typename RhsIndex, typename DestScalar, int DestOptions, typename DestIndex>
+    void _solve(const SparseMatrix<RhsScalar,RhsOptions,RhsIndex> &b, SparseMatrix<DestScalar,DestOptions,DestIndex> &dest) const
+    {
+      eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
+      const Index size = m_cholmodFactor->n;
+      eigen_assert(size==b.rows());
+
+      // note: cs stands for Cholmod Sparse
+      cholmod_sparse b_cs = viewAsCholmod(b);
+      cholmod_sparse* x_cs = cholmod_spsolve(CHOLMOD_A, m_cholmodFactor, &b_cs, &m_cholmod);
+      if(!x_cs)
+      {
+        this->m_info = NumericalIssue;
+      }
+      // TODO optimize this copy by swapping when possible (be carreful with alignment, etc.)
+      dest = viewAsEigen<DestScalar,DestOptions,DestIndex>(*x_cs);
+      cholmod_free_sparse(&x_cs, &m_cholmod);
+    }
+    #endif // EIGEN_PARSED_BY_DOXYGEN
+    
+    template<typename Stream>
+    void dumpMemory(Stream& s)
+    {}
+    
+  protected:
+    mutable cholmod_common m_cholmod;
+    cholmod_factor* m_cholmodFactor;
+    mutable ComputationInfo m_info;
+    bool m_isInitialized;
+    int m_factorizationIsOk;
+    int m_analysisIsOk;
+};
+
+/** \ingroup CholmodSupport_Module
+  * \class CholmodSimplicialLLT
+  * \brief A simplicial direct Cholesky (LLT) factorization and solver based on Cholmod
+  *
+  * This class allows to solve for A.X = B sparse linear problems via a simplicial LL^T Cholesky factorization
+  * using the Cholmod library.
+  * This simplicial variant is equivalent to Eigen's built-in SimplicialLLT class. Thefore, it has little practical interest.
+  * The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
+  * X and B can be either dense or sparse.
+  *
+  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+  *               or Upper. Default is Lower.
+  *
+  * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.
+  *
+  * \sa \ref TutorialSparseDirectSolvers, class CholmodSupernodalLLT, class SimplicialLLT
+  */
+template<typename _MatrixType, int _UpLo = Lower>
+class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT<_MatrixType, _UpLo> >
+{
+    typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT> Base;
+    using Base::m_cholmod;
+    
+  public:
+    
+    typedef _MatrixType MatrixType;
+    
+    CholmodSimplicialLLT() : Base() { init(); }
+
+    CholmodSimplicialLLT(const MatrixType& matrix) : Base()
+    {
+      init();
+      compute(matrix);
+    }
+
+    ~CholmodSimplicialLLT() {}
+  protected:
+    void init()
+    {
+      m_cholmod.final_asis = 0;
+      m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;
+      m_cholmod.final_ll = 1;
+    }
+};
+
+
+/** \ingroup CholmodSupport_Module
+  * \class CholmodSimplicialLDLT
+  * \brief A simplicial direct Cholesky (LDLT) factorization and solver based on Cholmod
+  *
+  * This class allows to solve for A.X = B sparse linear problems via a simplicial LDL^T Cholesky factorization
+  * using the Cholmod library.
+  * This simplicial variant is equivalent to Eigen's built-in SimplicialLDLT class. Thefore, it has little practical interest.
+  * The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
+  * X and B can be either dense or sparse.
+  *
+  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+  *               or Upper. Default is Lower.
+  *
+  * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.
+  *
+  * \sa \ref TutorialSparseDirectSolvers, class CholmodSupernodalLLT, class SimplicialLDLT
+  */
+template<typename _MatrixType, int _UpLo = Lower>
+class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT<_MatrixType, _UpLo> >
+{
+    typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT> Base;
+    using Base::m_cholmod;
+    
+  public:
+    
+    typedef _MatrixType MatrixType;
+    
+    CholmodSimplicialLDLT() : Base() { init(); }
+
+    CholmodSimplicialLDLT(const MatrixType& matrix) : Base()
+    {
+      init();
+      compute(matrix);
+    }
+
+    ~CholmodSimplicialLDLT() {}
+  protected:
+    void init()
+    {
+      m_cholmod.final_asis = 1;
+      m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;
+    }
+};
+
+/** \ingroup CholmodSupport_Module
+  * \class CholmodSupernodalLLT
+  * \brief A supernodal Cholesky (LLT) factorization and solver based on Cholmod
+  *
+  * This class allows to solve for A.X = B sparse linear problems via a supernodal LL^T Cholesky factorization
+  * using the Cholmod library.
+  * This supernodal variant performs best on dense enough problems, e.g., 3D FEM, or very high order 2D FEM.
+  * The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
+  * X and B can be either dense or sparse.
+  *
+  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+  *               or Upper. Default is Lower.
+  *
+  * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.
+  *
+  * \sa \ref TutorialSparseDirectSolvers
+  */
+template<typename _MatrixType, int _UpLo = Lower>
+class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT<_MatrixType, _UpLo> >
+{
+    typedef CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT> Base;
+    using Base::m_cholmod;
+    
+  public:
+    
+    typedef _MatrixType MatrixType;
+    
+    CholmodSupernodalLLT() : Base() { init(); }
+
+    CholmodSupernodalLLT(const MatrixType& matrix) : Base()
+    {
+      init();
+      compute(matrix);
+    }
+
+    ~CholmodSupernodalLLT() {}
+  protected:
+    void init()
+    {
+      m_cholmod.final_asis = 1;
+      m_cholmod.supernodal = CHOLMOD_SUPERNODAL;
+    }
+};
+
+/** \ingroup CholmodSupport_Module
+  * \class CholmodDecomposition
+  * \brief A general Cholesky factorization and solver based on Cholmod
+  *
+  * This class allows to solve for A.X = B sparse linear problems via a LL^T or LDL^T Cholesky factorization
+  * using the Cholmod library. The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
+  * X and B can be either dense or sparse.
+  *
+  * This variant permits to change the underlying Cholesky method at runtime.
+  * On the other hand, it does not provide access to the result of the factorization.
+  * The default is to let Cholmod automatically choose between a simplicial and supernodal factorization.
+  *
+  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+  *               or Upper. Default is Lower.
+  *
+  * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.
+  *
+  * \sa \ref TutorialSparseDirectSolvers
+  */
+template<typename _MatrixType, int _UpLo = Lower>
+class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecomposition<_MatrixType, _UpLo> >
+{
+    typedef CholmodBase<_MatrixType, _UpLo, CholmodDecomposition> Base;
+    using Base::m_cholmod;
+    
+  public:
+    
+    typedef _MatrixType MatrixType;
+    
+    CholmodDecomposition() : Base() { init(); }
+
+    CholmodDecomposition(const MatrixType& matrix) : Base()
+    {
+      init();
+      compute(matrix);
+    }
+
+    ~CholmodDecomposition() {}
+    
+    void setMode(CholmodMode mode)
+    {
+      switch(mode)
+      {
+        case CholmodAuto:
+          m_cholmod.final_asis = 1;
+          m_cholmod.supernodal = CHOLMOD_AUTO;
+          break;
+        case CholmodSimplicialLLt:
+          m_cholmod.final_asis = 0;
+          m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;
+          m_cholmod.final_ll = 1;
+          break;
+        case CholmodSupernodalLLt:
+          m_cholmod.final_asis = 1;
+          m_cholmod.supernodal = CHOLMOD_SUPERNODAL;
+          break;
+        case CholmodLDLt:
+          m_cholmod.final_asis = 1;
+          m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;
+          break;
+        default:
+          break;
+      }
+    }
+  protected:
+    void init()
+    {
+      m_cholmod.final_asis = 1;
+      m_cholmod.supernodal = CHOLMOD_AUTO;
+    }
+};
+
+namespace internal {
+  
+template<typename _MatrixType, int _UpLo, typename Derived, typename Rhs>
+struct solve_retval<CholmodBase<_MatrixType,_UpLo,Derived>, Rhs>
+  : solve_retval_base<CholmodBase<_MatrixType,_UpLo,Derived>, Rhs>
+{
+  typedef CholmodBase<_MatrixType,_UpLo,Derived> Dec;
+  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec()._solve(rhs(),dst);
+  }
+};
+
+template<typename _MatrixType, int _UpLo, typename Derived, typename Rhs>
+struct sparse_solve_retval<CholmodBase<_MatrixType,_UpLo,Derived>, Rhs>
+  : sparse_solve_retval_base<CholmodBase<_MatrixType,_UpLo,Derived>, Rhs>
+{
+  typedef CholmodBase<_MatrixType,_UpLo,Derived> Dec;
+  EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec()._solve(rhs(),dst);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_CHOLMODSUPPORT_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/Array.h b/resources/3rdParty/eigen/Eigen/src/Core/Array.h
new file mode 100644
index 000000000..aaa389978
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/Array.h
@@ -0,0 +1,308 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ARRAY_H
+#define EIGEN_ARRAY_H
+
+namespace Eigen {
+
+/** \class Array 
+  * \ingroup Core_Module
+  *
+  * \brief General-purpose arrays with easy API for coefficient-wise operations
+  *
+  * The %Array class is very similar to the Matrix class. It provides
+  * general-purpose one- and two-dimensional arrays. The difference between the
+  * %Array and the %Matrix class is primarily in the API: the API for the
+  * %Array class provides easy access to coefficient-wise operations, while the
+  * API for the %Matrix class provides easy access to linear-algebra
+  * operations.
+  *
+  * This class can be extended with the help of the plugin mechanism described on the page
+  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_ARRAY_PLUGIN.
+  *
+  * \sa \ref TutorialArrayClass, \ref TopicClassHierarchy
+  */
+namespace internal {
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+struct traits<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > : traits<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
+{
+  typedef ArrayXpr XprKind;
+  typedef ArrayBase<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > XprBase;
+};
+}
+
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+class Array
+  : public PlainObjectBase<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
+{
+  public:
+
+    typedef PlainObjectBase<Array> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Array)
+
+    enum { Options = _Options };
+    typedef typename Base::PlainObject PlainObject;
+
+  protected:
+    template <typename Derived, typename OtherDerived, bool IsVector>
+    friend struct internal::conservative_resize_like_impl;
+
+    using Base::m_storage;
+
+  public:
+
+    using Base::base;
+    using Base::coeff;
+    using Base::coeffRef;
+
+    /**
+      * The usage of
+      *   using Base::operator=;
+      * fails on MSVC. Since the code below is working with GCC and MSVC, we skipped
+      * the usage of 'using'. This should be done only for operator=.
+      */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Array& operator=(const EigenBase<OtherDerived> &other)
+    {
+      return Base::operator=(other);
+    }
+
+    /** Copies the value of the expression \a other into \c *this with automatic resizing.
+      *
+      * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized),
+      * it will be initialized.
+      *
+      * Note that copying a row-vector into a vector (and conversely) is allowed.
+      * The resizing, if any, is then done in the appropriate way so that row-vectors
+      * remain row-vectors and vectors remain vectors.
+      */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Array& operator=(const ArrayBase<OtherDerived>& other)
+    {
+      return Base::_set(other);
+    }
+
+    /** This is a special case of the templated operator=. Its purpose is to
+      * prevent a default operator= from hiding the templated operator=.
+      */
+    EIGEN_STRONG_INLINE Array& operator=(const Array& other)
+    {
+      return Base::_set(other);
+    }
+
+    /** Default constructor.
+      *
+      * For fixed-size matrices, does nothing.
+      *
+      * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix
+      * is called a null matrix. This constructor is the unique way to create null matrices: resizing
+      * a matrix to 0 is not supported.
+      *
+      * \sa resize(Index,Index)
+      */
+    EIGEN_STRONG_INLINE explicit Array() : Base()
+    {
+      Base::_check_template_params();
+      EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+    }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    // FIXME is it still needed ??
+    /** \internal */
+    Array(internal::constructor_without_unaligned_array_assert)
+      : Base(internal::constructor_without_unaligned_array_assert())
+    {
+      Base::_check_template_params();
+      EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+    }
+#endif
+
+    /** Constructs a vector or row-vector with given dimension. \only_for_vectors
+      *
+      * Note that this is only useful for dynamic-size vectors. For fixed-size vectors,
+      * it is redundant to pass the dimension here, so it makes more sense to use the default
+      * constructor Matrix() instead.
+      */
+    EIGEN_STRONG_INLINE explicit Array(Index dim)
+      : Base(dim, RowsAtCompileTime == 1 ? 1 : dim, ColsAtCompileTime == 1 ? 1 : dim)
+    {
+      Base::_check_template_params();
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(Array)
+      eigen_assert(dim >= 0);
+      eigen_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == dim);
+      EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    template<typename T0, typename T1>
+    EIGEN_STRONG_INLINE Array(const T0& x, const T1& y)
+    {
+      Base::_check_template_params();
+      this->template _init2<T0,T1>(x, y);
+    }
+    #else
+    /** constructs an uninitialized matrix with \a rows rows and \a cols columns.
+      *
+      * This is useful for dynamic-size matrices. For fixed-size matrices,
+      * it is redundant to pass these parameters, so one should use the default constructor
+      * Matrix() instead. */
+    Array(Index rows, Index cols);
+    /** constructs an initialized 2D vector with given coefficients */
+    Array(const Scalar& x, const Scalar& y);
+    #endif
+
+    /** constructs an initialized 3D vector with given coefficients */
+    EIGEN_STRONG_INLINE Array(const Scalar& x, const Scalar& y, const Scalar& z)
+    {
+      Base::_check_template_params();
+      EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 3)
+      m_storage.data()[0] = x;
+      m_storage.data()[1] = y;
+      m_storage.data()[2] = z;
+    }
+    /** constructs an initialized 4D vector with given coefficients */
+    EIGEN_STRONG_INLINE Array(const Scalar& x, const Scalar& y, const Scalar& z, const Scalar& w)
+    {
+      Base::_check_template_params();
+      EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 4)
+      m_storage.data()[0] = x;
+      m_storage.data()[1] = y;
+      m_storage.data()[2] = z;
+      m_storage.data()[3] = w;
+    }
+
+    explicit Array(const Scalar *data);
+
+    /** Constructor copying the value of the expression \a other */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Array(const ArrayBase<OtherDerived>& other)
+             : Base(other.rows() * other.cols(), other.rows(), other.cols())
+    {
+      Base::_check_template_params();
+      Base::_set_noalias(other);
+    }
+    /** Copy constructor */
+    EIGEN_STRONG_INLINE Array(const Array& other)
+            : Base(other.rows() * other.cols(), other.rows(), other.cols())
+    {
+      Base::_check_template_params();
+      Base::_set_noalias(other);
+    }
+    /** Copy constructor with in-place evaluation */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Array(const ReturnByValue<OtherDerived>& other)
+    {
+      Base::_check_template_params();
+      Base::resize(other.rows(), other.cols());
+      other.evalTo(*this);
+    }
+
+    /** \sa MatrixBase::operator=(const EigenBase<OtherDerived>&) */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Array(const EigenBase<OtherDerived> &other)
+      : Base(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
+    {
+      Base::_check_template_params();
+      Base::resize(other.rows(), other.cols());
+      *this = other;
+    }
+
+    /** Override MatrixBase::swap() since for dynamic-sized matrices of same type it is enough to swap the
+      * data pointers.
+      */
+    template<typename OtherDerived>
+    void swap(ArrayBase<OtherDerived> const & other)
+    { this->_swap(other.derived()); }
+
+    inline Index innerStride() const { return 1; }
+    inline Index outerStride() const { return this->innerSize(); }
+
+    #ifdef EIGEN_ARRAY_PLUGIN
+    #include EIGEN_ARRAY_PLUGIN
+    #endif
+
+  private:
+
+    template<typename MatrixType, typename OtherDerived, bool SwapPointers>
+    friend struct internal::matrix_swap_impl;
+};
+
+/** \defgroup arraytypedefs Global array typedefs
+  * \ingroup Core_Module
+  *
+  * Eigen defines several typedef shortcuts for most common 1D and 2D array types.
+  *
+  * The general patterns are the following:
+  *
+  * \c ArrayRowsColsType where \c Rows and \c Cols can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size,
+  * and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c cd
+  * for complex double.
+  *
+  * For example, \c Array33d is a fixed-size 3x3 array type of doubles, and \c ArrayXXf is a dynamic-size matrix of floats.
+  *
+  * There are also \c ArraySizeType which are self-explanatory. For example, \c Array4cf is
+  * a fixed-size 1D array of 4 complex floats.
+  *
+  * \sa class Array
+  */
+
+#define EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix)   \
+/** \ingroup arraytypedefs */                                    \
+typedef Array<Type, Size, Size> Array##SizeSuffix##SizeSuffix##TypeSuffix;  \
+/** \ingroup arraytypedefs */                                    \
+typedef Array<Type, Size, 1>    Array##SizeSuffix##TypeSuffix;
+
+#define EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, Size)         \
+/** \ingroup arraytypedefs */                                    \
+typedef Array<Type, Size, Dynamic> Array##Size##X##TypeSuffix;  \
+/** \ingroup arraytypedefs */                                    \
+typedef Array<Type, Dynamic, Size> Array##X##Size##TypeSuffix;
+
+#define EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \
+EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 2, 2) \
+EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 3, 3) \
+EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 4, 4) \
+EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \
+EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \
+EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \
+EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 4)
+
+EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(int,                  i)
+EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(float,                f)
+EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(double,               d)
+EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex<float>,  cf)
+EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex<double>, cd)
+
+#undef EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES
+#undef EIGEN_MAKE_ARRAY_TYPEDEFS
+
+#undef EIGEN_MAKE_ARRAY_TYPEDEFS_LARGE
+
+#define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, SizeSuffix) \
+using Eigen::Matrix##SizeSuffix##TypeSuffix; \
+using Eigen::Vector##SizeSuffix##TypeSuffix; \
+using Eigen::RowVector##SizeSuffix##TypeSuffix;
+
+#define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(TypeSuffix) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 2) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 3) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 4) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, X) \
+
+#define EIGEN_USING_ARRAY_TYPEDEFS \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(i) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(f) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(d) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cf) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cd)
+
+} // end namespace Eigen
+
+#endif // EIGEN_ARRAY_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/ArrayBase.h b/resources/3rdParty/eigen/Eigen/src/Core/ArrayBase.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/ArrayBase.h
rename to resources/3rdParty/eigen/Eigen/src/Core/ArrayBase.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/ArrayWrapper.h b/resources/3rdParty/eigen/Eigen/src/Core/ArrayWrapper.h
new file mode 100644
index 000000000..65ffd64ca
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/ArrayWrapper.h
@@ -0,0 +1,254 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ARRAYWRAPPER_H
+#define EIGEN_ARRAYWRAPPER_H
+
+namespace Eigen { 
+
+/** \class ArrayWrapper
+  * \ingroup Core_Module
+  *
+  * \brief Expression of a mathematical vector or matrix as an array object
+  *
+  * This class is the return type of MatrixBase::array(), and most of the time
+  * this is the only way it is use.
+  *
+  * \sa MatrixBase::array(), class MatrixWrapper
+  */
+
+namespace internal {
+template<typename ExpressionType>
+struct traits<ArrayWrapper<ExpressionType> >
+  : public traits<typename remove_all<typename ExpressionType::Nested>::type >
+{
+  typedef ArrayXpr XprKind;
+};
+}
+
+template<typename ExpressionType>
+class ArrayWrapper : public ArrayBase<ArrayWrapper<ExpressionType> >
+{
+  public:
+    typedef ArrayBase<ArrayWrapper> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(ArrayWrapper)
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ArrayWrapper)
+
+    typedef typename internal::conditional<
+                       internal::is_lvalue<ExpressionType>::value,
+                       Scalar,
+                       const Scalar
+                     >::type ScalarWithConstIfNotLvalue;
+
+    typedef typename internal::nested<ExpressionType>::type NestedExpressionType;
+
+    inline ArrayWrapper(ExpressionType& matrix) : m_expression(matrix) {}
+
+    inline Index rows() const { return m_expression.rows(); }
+    inline Index cols() const { return m_expression.cols(); }
+    inline Index outerStride() const { return m_expression.outerStride(); }
+    inline Index innerStride() const { return m_expression.innerStride(); }
+
+    inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
+    inline const Scalar* data() const { return m_expression.data(); }
+
+    inline CoeffReturnType coeff(Index row, Index col) const
+    {
+      return m_expression.coeff(row, col);
+    }
+
+    inline Scalar& coeffRef(Index row, Index col)
+    {
+      return m_expression.const_cast_derived().coeffRef(row, col);
+    }
+
+    inline const Scalar& coeffRef(Index row, Index col) const
+    {
+      return m_expression.const_cast_derived().coeffRef(row, col);
+    }
+
+    inline CoeffReturnType coeff(Index index) const
+    {
+      return m_expression.coeff(index);
+    }
+
+    inline Scalar& coeffRef(Index index)
+    {
+      return m_expression.const_cast_derived().coeffRef(index);
+    }
+
+    inline const Scalar& coeffRef(Index index) const
+    {
+      return m_expression.const_cast_derived().coeffRef(index);
+    }
+
+    template<int LoadMode>
+    inline const PacketScalar packet(Index row, Index col) const
+    {
+      return m_expression.template packet<LoadMode>(row, col);
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index row, Index col, const PacketScalar& x)
+    {
+      m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x);
+    }
+
+    template<int LoadMode>
+    inline const PacketScalar packet(Index index) const
+    {
+      return m_expression.template packet<LoadMode>(index);
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index index, const PacketScalar& x)
+    {
+      m_expression.const_cast_derived().template writePacket<LoadMode>(index, x);
+    }
+
+    template<typename Dest>
+    inline void evalTo(Dest& dst) const { dst = m_expression; }
+
+    const typename internal::remove_all<NestedExpressionType>::type& 
+    nestedExpression() const 
+    {
+      return m_expression;
+    }
+
+    /** Forwards the resizing request to the nested expression
+      * \sa DenseBase::resize(Index)  */
+    void resize(Index newSize) { m_expression.const_cast_derived().resize(newSize); }
+    /** Forwards the resizing request to the nested expression
+      * \sa DenseBase::resize(Index,Index)*/
+    void resize(Index nbRows, Index nbCols) { m_expression.const_cast_derived().resize(nbRows,nbCols); }
+
+  protected:
+    NestedExpressionType m_expression;
+};
+
+/** \class MatrixWrapper
+  * \ingroup Core_Module
+  *
+  * \brief Expression of an array as a mathematical vector or matrix
+  *
+  * This class is the return type of ArrayBase::matrix(), and most of the time
+  * this is the only way it is use.
+  *
+  * \sa MatrixBase::matrix(), class ArrayWrapper
+  */
+
+namespace internal {
+template<typename ExpressionType>
+struct traits<MatrixWrapper<ExpressionType> >
+ : public traits<typename remove_all<typename ExpressionType::Nested>::type >
+{
+  typedef MatrixXpr XprKind;
+};
+}
+
+template<typename ExpressionType>
+class MatrixWrapper : public MatrixBase<MatrixWrapper<ExpressionType> >
+{
+  public:
+    typedef MatrixBase<MatrixWrapper<ExpressionType> > Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(MatrixWrapper)
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(MatrixWrapper)
+
+    typedef typename internal::conditional<
+                       internal::is_lvalue<ExpressionType>::value,
+                       Scalar,
+                       const Scalar
+                     >::type ScalarWithConstIfNotLvalue;
+
+    typedef typename internal::nested<ExpressionType>::type NestedExpressionType;
+
+    inline MatrixWrapper(ExpressionType& matrix) : m_expression(matrix) {}
+
+    inline Index rows() const { return m_expression.rows(); }
+    inline Index cols() const { return m_expression.cols(); }
+    inline Index outerStride() const { return m_expression.outerStride(); }
+    inline Index innerStride() const { return m_expression.innerStride(); }
+
+    inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
+    inline const Scalar* data() const { return m_expression.data(); }
+
+    inline CoeffReturnType coeff(Index row, Index col) const
+    {
+      return m_expression.coeff(row, col);
+    }
+
+    inline Scalar& coeffRef(Index row, Index col)
+    {
+      return m_expression.const_cast_derived().coeffRef(row, col);
+    }
+
+    inline const Scalar& coeffRef(Index row, Index col) const
+    {
+      return m_expression.derived().coeffRef(row, col);
+    }
+
+    inline CoeffReturnType coeff(Index index) const
+    {
+      return m_expression.coeff(index);
+    }
+
+    inline Scalar& coeffRef(Index index)
+    {
+      return m_expression.const_cast_derived().coeffRef(index);
+    }
+
+    inline const Scalar& coeffRef(Index index) const
+    {
+      return m_expression.const_cast_derived().coeffRef(index);
+    }
+
+    template<int LoadMode>
+    inline const PacketScalar packet(Index row, Index col) const
+    {
+      return m_expression.template packet<LoadMode>(row, col);
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index row, Index col, const PacketScalar& x)
+    {
+      m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x);
+    }
+
+    template<int LoadMode>
+    inline const PacketScalar packet(Index index) const
+    {
+      return m_expression.template packet<LoadMode>(index);
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index index, const PacketScalar& x)
+    {
+      m_expression.const_cast_derived().template writePacket<LoadMode>(index, x);
+    }
+
+    const typename internal::remove_all<NestedExpressionType>::type& 
+    nestedExpression() const 
+    {
+      return m_expression;
+    }
+
+    /** Forwards the resizing request to the nested expression
+      * \sa DenseBase::resize(Index)  */
+    void resize(Index newSize) { m_expression.const_cast_derived().resize(newSize); }
+    /** Forwards the resizing request to the nested expression
+      * \sa DenseBase::resize(Index,Index)*/
+    void resize(Index nbRows, Index nbCols) { m_expression.const_cast_derived().resize(nbRows,nbCols); }
+
+  protected:
+    NestedExpressionType m_expression;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_ARRAYWRAPPER_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Assign.h b/resources/3rdParty/eigen/Eigen/src/Core/Assign.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/Assign.h
rename to resources/3rdParty/eigen/Eigen/src/Core/Assign.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/Assign_MKL.h b/resources/3rdParty/eigen/Eigen/src/Core/Assign_MKL.h
new file mode 100644
index 000000000..428c6367b
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/Assign_MKL.h
@@ -0,0 +1,224 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ *   Content : Eigen bindings to Intel(R) MKL
+ *   MKL VML support for coefficient-wise unary Eigen expressions like a=b.sin()
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_ASSIGN_VML_H
+#define EIGEN_ASSIGN_VML_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename Op> struct vml_call
+{ enum { IsSupported = 0 }; };
+
+template<typename Dst, typename Src, typename UnaryOp>
+class vml_assign_traits
+{
+  private:
+    enum {
+      DstHasDirectAccess = Dst::Flags & DirectAccessBit,
+      SrcHasDirectAccess = Src::Flags & DirectAccessBit,
+
+      StorageOrdersAgree = (int(Dst::IsRowMajor) == int(Src::IsRowMajor)),
+      InnerSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::SizeAtCompileTime)
+                : int(Dst::Flags)&RowMajorBit ? int(Dst::ColsAtCompileTime)
+                : int(Dst::RowsAtCompileTime),
+      InnerMaxSize  = int(Dst::IsVectorAtCompileTime) ? int(Dst::MaxSizeAtCompileTime)
+                    : int(Dst::Flags)&RowMajorBit ? int(Dst::MaxColsAtCompileTime)
+                    : int(Dst::MaxRowsAtCompileTime),
+      MaxSizeAtCompileTime = Dst::SizeAtCompileTime,
+
+      MightEnableVml =  vml_call<UnaryOp>::IsSupported && StorageOrdersAgree && DstHasDirectAccess && SrcHasDirectAccess
+                     && Src::InnerStrideAtCompileTime==1 && Dst::InnerStrideAtCompileTime==1,
+      MightLinearize = MightEnableVml && (int(Dst::Flags) & int(Src::Flags) & LinearAccessBit),
+      VmlSize = MightLinearize ? MaxSizeAtCompileTime : InnerMaxSize,
+      LargeEnough = VmlSize==Dynamic || VmlSize>=EIGEN_MKL_VML_THRESHOLD,
+      MayEnableVml = MightEnableVml && LargeEnough,
+      MayLinearize = MayEnableVml && MightLinearize
+    };
+  public:
+    enum {
+      Traversal = MayLinearize ? LinearVectorizedTraversal
+                : MayEnableVml ? InnerVectorizedTraversal
+                : DefaultTraversal
+    };
+};
+
+template<typename Derived1, typename Derived2, typename UnaryOp, int Traversal, int Unrolling,
+         int VmlTraversal = vml_assign_traits<Derived1, Derived2, UnaryOp>::Traversal >
+struct vml_assign_impl
+  : assign_impl<Derived1, Eigen::CwiseUnaryOp<UnaryOp, Derived2>,Traversal,Unrolling,BuiltIn>
+{
+};
+
+template<typename Derived1, typename Derived2, typename UnaryOp, int Traversal, int Unrolling>
+struct vml_assign_impl<Derived1, Derived2, UnaryOp, Traversal, Unrolling, InnerVectorizedTraversal>
+{
+  typedef typename Derived1::Scalar Scalar;
+  typedef typename Derived1::Index Index;
+  static inline void run(Derived1& dst, const CwiseUnaryOp<UnaryOp, Derived2>& src)
+  {
+    // in case we want to (or have to) skip VML at runtime we can call:
+    // assign_impl<Derived1,Eigen::CwiseUnaryOp<UnaryOp, Derived2>,Traversal,Unrolling,BuiltIn>::run(dst,src);
+    const Index innerSize = dst.innerSize();
+    const Index outerSize = dst.outerSize();
+    for(Index outer = 0; outer < outerSize; ++outer) {
+      const Scalar *src_ptr = src.IsRowMajor ?  &(src.nestedExpression().coeffRef(outer,0)) :
+                                                &(src.nestedExpression().coeffRef(0, outer));
+      Scalar *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer,0)) : &(dst.coeffRef(0, outer));
+      vml_call<UnaryOp>::run(src.functor(), innerSize, src_ptr, dst_ptr );
+    }
+  }
+};
+
+template<typename Derived1, typename Derived2, typename UnaryOp, int Traversal, int Unrolling>
+struct vml_assign_impl<Derived1, Derived2, UnaryOp, Traversal, Unrolling, LinearVectorizedTraversal>
+{
+  static inline void run(Derived1& dst, const CwiseUnaryOp<UnaryOp, Derived2>& src)
+  {
+    // in case we want to (or have to) skip VML at runtime we can call:
+    // assign_impl<Derived1,Eigen::CwiseUnaryOp<UnaryOp, Derived2>,Traversal,Unrolling,BuiltIn>::run(dst,src);
+    vml_call<UnaryOp>::run(src.functor(), dst.size(), src.nestedExpression().data(), dst.data() );
+  }
+};
+
+// Macroses
+
+#define EIGEN_MKL_VML_SPECIALIZE_ASSIGN(TRAVERSAL,UNROLLING) \
+  template<typename Derived1, typename Derived2, typename UnaryOp> \
+  struct assign_impl<Derived1, Eigen::CwiseUnaryOp<UnaryOp, Derived2>, TRAVERSAL, UNROLLING, Specialized>  {  \
+    static inline void run(Derived1 &dst, const Eigen::CwiseUnaryOp<UnaryOp, Derived2> &src) { \
+      vml_assign_impl<Derived1,Derived2,UnaryOp,TRAVERSAL,UNROLLING>::run(dst, src); \
+    } \
+  };
+
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(DefaultTraversal,NoUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(DefaultTraversal,CompleteUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(DefaultTraversal,InnerUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearTraversal,NoUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearTraversal,CompleteUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(InnerVectorizedTraversal,NoUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(InnerVectorizedTraversal,CompleteUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(InnerVectorizedTraversal,InnerUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearVectorizedTraversal,CompleteUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearVectorizedTraversal,NoUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(SliceVectorizedTraversal,NoUnrolling)
+
+
+#if !defined (EIGEN_FAST_MATH) || (EIGEN_FAST_MATH != 1)
+#define  EIGEN_MKL_VML_MODE VML_HA
+#else
+#define  EIGEN_MKL_VML_MODE VML_LA
+#endif
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE)     \
+  template<> struct vml_call< scalar_##EIGENOP##_op<EIGENTYPE> > {               \
+    enum { IsSupported = 1 };                                                    \
+    static inline void run( const scalar_##EIGENOP##_op<EIGENTYPE>& /*func*/,        \
+                            int size, const EIGENTYPE* src, EIGENTYPE* dst) {    \
+      VMLOP(size, (const VMLTYPE*)src, (VMLTYPE*)dst);                           \
+    }                                                                            \
+  };
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE)  \
+  template<> struct vml_call< scalar_##EIGENOP##_op<EIGENTYPE> > {               \
+    enum { IsSupported = 1 };                                                    \
+    static inline void run( const scalar_##EIGENOP##_op<EIGENTYPE>& /*func*/,        \
+                            int size, const EIGENTYPE* src, EIGENTYPE* dst) {    \
+      MKL_INT64 vmlMode = EIGEN_MKL_VML_MODE;                                    \
+      VMLOP(size, (const VMLTYPE*)src, (VMLTYPE*)dst, vmlMode);                  \
+    }                                                                            \
+  };
+
+#define EIGEN_MKL_VML_DECLARE_POW_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE)       \
+  template<> struct vml_call< scalar_##EIGENOP##_op<EIGENTYPE> > {               \
+    enum { IsSupported = 1 };                                                    \
+    static inline void run( const scalar_##EIGENOP##_op<EIGENTYPE>& func,        \
+                          int size, const EIGENTYPE* src, EIGENTYPE* dst) {      \
+      EIGENTYPE exponent = func.m_exponent;                                      \
+      MKL_INT64 vmlMode = EIGEN_MKL_VML_MODE;                                    \
+      VMLOP(&size, (const VMLTYPE*)src, (const VMLTYPE*)&exponent,               \
+                        (VMLTYPE*)dst, &vmlMode);                                \
+    }                                                                            \
+  };
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP)                   \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vs##VMLOP, float, float)             \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vd##VMLOP, double, double)
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX(EIGENOP, VMLOP)                \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vc##VMLOP, scomplex, MKL_Complex8)   \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vz##VMLOP, dcomplex, MKL_Complex16)
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS(EIGENOP, VMLOP)                        \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP)                         \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX(EIGENOP, VMLOP)
+
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL_LA(EIGENOP, VMLOP)                \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vms##VMLOP, float, float)         \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vmd##VMLOP, double, double)
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX_LA(EIGENOP, VMLOP)             \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vmc##VMLOP, scomplex, MKL_Complex8)  \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vmz##VMLOP, dcomplex, MKL_Complex16)
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(EIGENOP, VMLOP)                     \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL_LA(EIGENOP, VMLOP)                      \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX_LA(EIGENOP, VMLOP)
+
+
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(sin,  Sin)
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(asin, Asin)
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(cos,  Cos)
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(acos, Acos)
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(tan,  Tan)
+//EIGEN_MKL_VML_DECLARE_UNARY_CALLS(abs,  Abs)
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(exp,  Exp)
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(log,  Ln)
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(sqrt, Sqrt)
+
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(square, Sqr)
+
+// The vm*powx functions are not avaibale in the windows version of MKL.
+#ifdef _WIN32
+EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmspowx_, float, float)
+EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmdpowx_, double, double)
+EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmcpowx_, scomplex, MKL_Complex8)
+EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmzpowx_, dcomplex, MKL_Complex16)
+#endif
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_ASSIGN_VML_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/BandMatrix.h b/resources/3rdParty/eigen/Eigen/src/Core/BandMatrix.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/BandMatrix.h
rename to resources/3rdParty/eigen/Eigen/src/Core/BandMatrix.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/Block.h b/resources/3rdParty/eigen/Eigen/src/Core/Block.h
new file mode 100644
index 000000000..5f29cb3d1
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/Block.h
@@ -0,0 +1,357 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_BLOCK_H
+#define EIGEN_BLOCK_H
+
+namespace Eigen { 
+
+/** \class Block
+  * \ingroup Core_Module
+  *
+  * \brief Expression of a fixed-size or dynamic-size block
+  *
+  * \param XprType the type of the expression in which we are taking a block
+  * \param BlockRows the number of rows of the block we are taking at compile time (optional)
+  * \param BlockCols the number of columns of the block we are taking at compile time (optional)
+  * \param _DirectAccessStatus \internal used for partial specialization
+  *
+  * This class represents an expression of either a fixed-size or dynamic-size block. It is the return
+  * type of DenseBase::block(Index,Index,Index,Index) and DenseBase::block<int,int>(Index,Index) and
+  * most of the time this is the only way it is used.
+  *
+  * However, if you want to directly maniputate block expressions,
+  * for instance if you want to write a function returning such an expression, you
+  * will need to use this class.
+  *
+  * Here is an example illustrating the dynamic case:
+  * \include class_Block.cpp
+  * Output: \verbinclude class_Block.out
+  *
+  * \note Even though this expression has dynamic size, in the case where \a XprType
+  * has fixed size, this expression inherits a fixed maximal size which means that evaluating
+  * it does not cause a dynamic memory allocation.
+  *
+  * Here is an example illustrating the fixed-size case:
+  * \include class_FixedBlock.cpp
+  * Output: \verbinclude class_FixedBlock.out
+  *
+  * \sa DenseBase::block(Index,Index,Index,Index), DenseBase::block(Index,Index), class VectorBlock
+  */
+
+namespace internal {
+template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool HasDirectAccess>
+struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel, HasDirectAccess> > : traits<XprType>
+{
+  typedef typename traits<XprType>::Scalar Scalar;
+  typedef typename traits<XprType>::StorageKind StorageKind;
+  typedef typename traits<XprType>::XprKind XprKind;
+  typedef typename nested<XprType>::type XprTypeNested;
+  typedef typename remove_reference<XprTypeNested>::type _XprTypeNested;
+  enum{
+    MatrixRows = traits<XprType>::RowsAtCompileTime,
+    MatrixCols = traits<XprType>::ColsAtCompileTime,
+    RowsAtCompileTime = MatrixRows == 0 ? 0 : BlockRows,
+    ColsAtCompileTime = MatrixCols == 0 ? 0 : BlockCols,
+    MaxRowsAtCompileTime = BlockRows==0 ? 0
+                         : RowsAtCompileTime != Dynamic ? int(RowsAtCompileTime)
+                         : int(traits<XprType>::MaxRowsAtCompileTime),
+    MaxColsAtCompileTime = BlockCols==0 ? 0
+                         : ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime)
+                         : int(traits<XprType>::MaxColsAtCompileTime),
+    XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0,
+    IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
+               : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
+               : XprTypeIsRowMajor,
+    HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor),
+    InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
+    InnerStrideAtCompileTime = HasSameStorageOrderAsXprType
+                             ? int(inner_stride_at_compile_time<XprType>::ret)
+                             : int(outer_stride_at_compile_time<XprType>::ret),
+    OuterStrideAtCompileTime = HasSameStorageOrderAsXprType
+                             ? int(outer_stride_at_compile_time<XprType>::ret)
+                             : int(inner_stride_at_compile_time<XprType>::ret),
+    MaskPacketAccessBit = (InnerSize == Dynamic || (InnerSize % packet_traits<Scalar>::size) == 0)
+                       && (InnerStrideAtCompileTime == 1)
+                        ? PacketAccessBit : 0,
+    MaskAlignedBit = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % 16) == 0)) ? AlignedBit : 0,
+    FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0,
+    FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0,
+    FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0,
+    Flags0 = traits<XprType>::Flags & ( (HereditaryBits & ~RowMajorBit) |
+                                        DirectAccessBit |
+                                        MaskPacketAccessBit |
+                                        MaskAlignedBit),
+    Flags = Flags0 | FlagsLinearAccessBit | FlagsLvalueBit | FlagsRowMajorBit
+  };
+};
+}
+
+template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool HasDirectAccess> class Block
+  : public internal::dense_xpr_base<Block<XprType, BlockRows, BlockCols, InnerPanel, HasDirectAccess> >::type
+{
+  public:
+
+    typedef typename internal::dense_xpr_base<Block>::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Block)
+
+    class InnerIterator;
+
+    /** Column or Row constructor
+      */
+    inline Block(XprType& xpr, Index i)
+      : m_xpr(xpr),
+        // It is a row if and only if BlockRows==1 and BlockCols==XprType::ColsAtCompileTime,
+        // and it is a column if and only if BlockRows==XprType::RowsAtCompileTime and BlockCols==1,
+        // all other cases are invalid.
+        // The case a 1x1 matrix seems ambiguous, but the result is the same anyway.
+        m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
+        m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
+        m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
+        m_blockCols(BlockCols==1 ? 1 : xpr.cols())
+    {
+      eigen_assert( (i>=0) && (
+          ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i<xpr.rows())
+        ||((BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) && i<xpr.cols())));
+    }
+
+    /** Fixed-size constructor
+      */
+    inline Block(XprType& xpr, Index startRow, Index startCol)
+      : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol),
+        m_blockRows(BlockRows), m_blockCols(BlockCols)
+    {
+      EIGEN_STATIC_ASSERT(RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic,THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE)
+      eigen_assert(startRow >= 0 && BlockRows >= 1 && startRow + BlockRows <= xpr.rows()
+             && startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= xpr.cols());
+    }
+
+    /** Dynamic-size constructor
+      */
+    inline Block(XprType& xpr,
+          Index startRow, Index startCol,
+          Index blockRows, Index blockCols)
+      : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol),
+                          m_blockRows(blockRows), m_blockCols(blockCols)
+    {
+      eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows)
+          && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols));
+      eigen_assert(startRow >= 0 && blockRows >= 0 && startRow + blockRows <= xpr.rows()
+          && startCol >= 0 && blockCols >= 0 && startCol + blockCols <= xpr.cols());
+    }
+
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block)
+
+    inline Index rows() const { return m_blockRows.value(); }
+    inline Index cols() const { return m_blockCols.value(); }
+
+    inline Scalar& coeffRef(Index row, Index col)
+    {
+      EIGEN_STATIC_ASSERT_LVALUE(XprType)
+      return m_xpr.const_cast_derived()
+               .coeffRef(row + m_startRow.value(), col + m_startCol.value());
+    }
+
+    inline const Scalar& coeffRef(Index row, Index col) const
+    {
+      return m_xpr.derived()
+               .coeffRef(row + m_startRow.value(), col + m_startCol.value());
+    }
+
+    EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index row, Index col) const
+    {
+      return m_xpr.coeff(row + m_startRow.value(), col + m_startCol.value());
+    }
+
+    inline Scalar& coeffRef(Index index)
+    {
+      EIGEN_STATIC_ASSERT_LVALUE(XprType)
+      return m_xpr.const_cast_derived()
+             .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+                       m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+    }
+
+    inline const Scalar& coeffRef(Index index) const
+    {
+      return m_xpr.const_cast_derived()
+             .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+                       m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+    }
+
+    inline const CoeffReturnType coeff(Index index) const
+    {
+      return m_xpr
+             .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+                    m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+    }
+
+    template<int LoadMode>
+    inline PacketScalar packet(Index row, Index col) const
+    {
+      return m_xpr.template packet<Unaligned>
+              (row + m_startRow.value(), col + m_startCol.value());
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index row, Index col, const PacketScalar& x)
+    {
+      m_xpr.const_cast_derived().template writePacket<Unaligned>
+              (row + m_startRow.value(), col + m_startCol.value(), x);
+    }
+
+    template<int LoadMode>
+    inline PacketScalar packet(Index index) const
+    {
+      return m_xpr.template packet<Unaligned>
+              (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+               m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index index, const PacketScalar& x)
+    {
+      m_xpr.const_cast_derived().template writePacket<Unaligned>
+         (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+          m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0), x);
+    }
+
+    #ifdef EIGEN_PARSED_BY_DOXYGEN
+    /** \sa MapBase::data() */
+    inline const Scalar* data() const;
+    inline Index innerStride() const;
+    inline Index outerStride() const;
+    #endif
+
+    const typename internal::remove_all<typename XprType::Nested>::type& nestedExpression() const 
+    { 
+      return m_xpr; 
+    }
+      
+    Index startRow() const 
+    { 
+      return m_startRow.value(); 
+    }
+      
+    Index startCol() const 
+    { 
+      return m_startCol.value(); 
+    }
+
+  protected:
+
+    const typename XprType::Nested m_xpr;
+    const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
+    const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
+    const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
+    const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
+};
+
+/** \internal */
+template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
+class Block<XprType,BlockRows,BlockCols, InnerPanel,true>
+  : public MapBase<Block<XprType, BlockRows, BlockCols, InnerPanel, true> >
+{
+  public:
+
+    typedef MapBase<Block> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Block)
+
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block)
+
+    /** Column or Row constructor
+      */
+    inline Block(XprType& xpr, Index i)
+      : Base(internal::const_cast_ptr(&xpr.coeffRef(
+              (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0,
+              (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0)),
+             BlockRows==1 ? 1 : xpr.rows(),
+             BlockCols==1 ? 1 : xpr.cols()),
+        m_xpr(xpr)
+    {
+      eigen_assert( (i>=0) && (
+          ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i<xpr.rows())
+        ||((BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) && i<xpr.cols())));
+      init();
+    }
+
+    /** Fixed-size constructor
+      */
+    inline Block(XprType& xpr, Index startRow, Index startCol)
+      : Base(internal::const_cast_ptr(&xpr.coeffRef(startRow,startCol))), m_xpr(xpr)
+    {
+      eigen_assert(startRow >= 0 && BlockRows >= 1 && startRow + BlockRows <= xpr.rows()
+             && startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= xpr.cols());
+      init();
+    }
+
+    /** Dynamic-size constructor
+      */
+    inline Block(XprType& xpr,
+          Index startRow, Index startCol,
+          Index blockRows, Index blockCols)
+      : Base(internal::const_cast_ptr(&xpr.coeffRef(startRow,startCol)), blockRows, blockCols),
+        m_xpr(xpr)
+    {
+      eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows)
+             && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols));
+      eigen_assert(startRow >= 0 && blockRows >= 0 && startRow + blockRows <= xpr.rows()
+             && startCol >= 0 && blockCols >= 0 && startCol + blockCols <= xpr.cols());
+      init();
+    }
+
+    const typename internal::remove_all<typename XprType::Nested>::type& nestedExpression() const 
+    { 
+      return m_xpr; 
+    }
+      
+    /** \sa MapBase::innerStride() */
+    inline Index innerStride() const
+    {
+      return internal::traits<Block>::HasSameStorageOrderAsXprType
+             ? m_xpr.innerStride()
+             : m_xpr.outerStride();
+    }
+
+    /** \sa MapBase::outerStride() */
+    inline Index outerStride() const
+    {
+      return m_outerStride;
+    }
+
+  #ifndef __SUNPRO_CC
+  // FIXME sunstudio is not friendly with the above friend...
+  // META-FIXME there is no 'friend' keyword around here. Is this obsolete?
+  protected:
+  #endif
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** \internal used by allowAligned() */
+    inline Block(XprType& xpr, const Scalar* data, Index blockRows, Index blockCols)
+      : Base(data, blockRows, blockCols), m_xpr(xpr)
+    {
+      init();
+    }
+    #endif
+
+  protected:
+    void init()
+    {
+      m_outerStride = internal::traits<Block>::HasSameStorageOrderAsXprType
+                    ? m_xpr.outerStride()
+                    : m_xpr.innerStride();
+    }
+
+    typename XprType::Nested m_xpr;
+    Index m_outerStride;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_BLOCK_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/BooleanRedux.h b/resources/3rdParty/eigen/Eigen/src/Core/BooleanRedux.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/BooleanRedux.h
rename to resources/3rdParty/eigen/Eigen/src/Core/BooleanRedux.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/Core/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/Core/CMakeLists.txt
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/CommaInitializer.h b/resources/3rdParty/eigen/Eigen/src/Core/CommaInitializer.h
new file mode 100644
index 000000000..f20c1774c
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/CommaInitializer.h
@@ -0,0 +1,141 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_COMMAINITIALIZER_H
+#define EIGEN_COMMAINITIALIZER_H
+
+namespace Eigen { 
+
+/** \class CommaInitializer
+  * \ingroup Core_Module
+  *
+  * \brief Helper class used by the comma initializer operator
+  *
+  * This class is internally used to implement the comma initializer feature. It is
+  * the return type of MatrixBase::operator<<, and most of the time this is the only
+  * way it is used.
+  *
+  * \sa \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished()
+  */
+template<typename XprType>
+struct CommaInitializer
+{
+  typedef typename XprType::Scalar Scalar;
+  typedef typename XprType::Index Index;
+
+  inline CommaInitializer(XprType& xpr, const Scalar& s)
+    : m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1)
+  {
+    m_xpr.coeffRef(0,0) = s;
+  }
+
+  template<typename OtherDerived>
+  inline CommaInitializer(XprType& xpr, const DenseBase<OtherDerived>& other)
+    : m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows())
+  {
+    m_xpr.block(0, 0, other.rows(), other.cols()) = other;
+  }
+
+  /* inserts a scalar value in the target matrix */
+  CommaInitializer& operator,(const Scalar& s)
+  {
+    if (m_col==m_xpr.cols())
+    {
+      m_row+=m_currentBlockRows;
+      m_col = 0;
+      m_currentBlockRows = 1;
+      eigen_assert(m_row<m_xpr.rows()
+        && "Too many rows passed to comma initializer (operator<<)");
+    }
+    eigen_assert(m_col<m_xpr.cols()
+      && "Too many coefficients passed to comma initializer (operator<<)");
+    eigen_assert(m_currentBlockRows==1);
+    m_xpr.coeffRef(m_row, m_col++) = s;
+    return *this;
+  }
+
+  /* inserts a matrix expression in the target matrix */
+  template<typename OtherDerived>
+  CommaInitializer& operator,(const DenseBase<OtherDerived>& other)
+  {
+    if(other.cols()==0 || other.rows()==0)
+      return *this;
+    if (m_col==m_xpr.cols())
+    {
+      m_row+=m_currentBlockRows;
+      m_col = 0;
+      m_currentBlockRows = other.rows();
+      eigen_assert(m_row+m_currentBlockRows<=m_xpr.rows()
+        && "Too many rows passed to comma initializer (operator<<)");
+    }
+    eigen_assert(m_col<m_xpr.cols()
+      && "Too many coefficients passed to comma initializer (operator<<)");
+    eigen_assert(m_currentBlockRows==other.rows());
+    if (OtherDerived::SizeAtCompileTime != Dynamic)
+      m_xpr.template block<OtherDerived::RowsAtCompileTime != Dynamic ? OtherDerived::RowsAtCompileTime : 1,
+                              OtherDerived::ColsAtCompileTime != Dynamic ? OtherDerived::ColsAtCompileTime : 1>
+                    (m_row, m_col) = other;
+    else
+      m_xpr.block(m_row, m_col, other.rows(), other.cols()) = other;
+    m_col += other.cols();
+    return *this;
+  }
+
+  inline ~CommaInitializer()
+  {
+    eigen_assert((m_row+m_currentBlockRows) == m_xpr.rows()
+         && m_col == m_xpr.cols()
+         && "Too few coefficients passed to comma initializer (operator<<)");
+  }
+
+  /** \returns the built matrix once all its coefficients have been set.
+    * Calling finished is 100% optional. Its purpose is to write expressions
+    * like this:
+    * \code
+    * quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished());
+    * \endcode
+    */
+  inline XprType& finished() { return m_xpr; }
+
+  XprType& m_xpr;   // target expression
+  Index m_row;              // current row id
+  Index m_col;              // current col id
+  Index m_currentBlockRows; // current block height
+};
+
+/** \anchor MatrixBaseCommaInitRef
+  * Convenient operator to set the coefficients of a matrix.
+  *
+  * The coefficients must be provided in a row major order and exactly match
+  * the size of the matrix. Otherwise an assertion is raised.
+  *
+  * Example: \include MatrixBase_set.cpp
+  * Output: \verbinclude MatrixBase_set.out
+  *
+  * \sa CommaInitializer::finished(), class CommaInitializer
+  */
+template<typename Derived>
+inline CommaInitializer<Derived> DenseBase<Derived>::operator<< (const Scalar& s)
+{
+  return CommaInitializer<Derived>(*static_cast<Derived*>(this), s);
+}
+
+/** \sa operator<<(const Scalar&) */
+template<typename Derived>
+template<typename OtherDerived>
+inline CommaInitializer<Derived>
+DenseBase<Derived>::operator<<(const DenseBase<OtherDerived>& other)
+{
+  return CommaInitializer<Derived>(*static_cast<Derived *>(this), other);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_COMMAINITIALIZER_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/CwiseBinaryOp.h b/resources/3rdParty/eigen/Eigen/src/Core/CwiseBinaryOp.h
new file mode 100644
index 000000000..1b93af31b
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/CwiseBinaryOp.h
@@ -0,0 +1,229 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CWISE_BINARY_OP_H
+#define EIGEN_CWISE_BINARY_OP_H
+
+namespace Eigen {
+
+/** \class CwiseBinaryOp
+  * \ingroup Core_Module
+  *
+  * \brief Generic expression where a coefficient-wise binary operator is applied to two expressions
+  *
+  * \param BinaryOp template functor implementing the operator
+  * \param Lhs the type of the left-hand side
+  * \param Rhs the type of the right-hand side
+  *
+  * This class represents an expression  where a coefficient-wise binary operator is applied to two expressions.
+  * It is the return type of binary operators, by which we mean only those binary operators where
+  * both the left-hand side and the right-hand side are Eigen expressions.
+  * For example, the return type of matrix1+matrix2 is a CwiseBinaryOp.
+  *
+  * Most of the time, this is the only way that it is used, so you typically don't have to name
+  * CwiseBinaryOp types explicitly.
+  *
+  * \sa MatrixBase::binaryExpr(const MatrixBase<OtherDerived> &,const CustomBinaryOp &) const, class CwiseUnaryOp, class CwiseNullaryOp
+  */
+
+namespace internal {
+template<typename BinaryOp, typename Lhs, typename Rhs>
+struct traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
+{
+  // we must not inherit from traits<Lhs> since it has
+  // the potential to cause problems with MSVC
+  typedef typename remove_all<Lhs>::type Ancestor;
+  typedef typename traits<Ancestor>::XprKind XprKind;
+  enum {
+    RowsAtCompileTime = traits<Ancestor>::RowsAtCompileTime,
+    ColsAtCompileTime = traits<Ancestor>::ColsAtCompileTime,
+    MaxRowsAtCompileTime = traits<Ancestor>::MaxRowsAtCompileTime,
+    MaxColsAtCompileTime = traits<Ancestor>::MaxColsAtCompileTime
+  };
+
+  // even though we require Lhs and Rhs to have the same scalar type (see CwiseBinaryOp constructor),
+  // we still want to handle the case when the result type is different.
+  typedef typename result_of<
+                     BinaryOp(
+                       typename Lhs::Scalar,
+                       typename Rhs::Scalar
+                     )
+                   >::type Scalar;
+  typedef typename promote_storage_type<typename traits<Lhs>::StorageKind,
+                                           typename traits<Rhs>::StorageKind>::ret StorageKind;
+  typedef typename promote_index_type<typename traits<Lhs>::Index,
+                                         typename traits<Rhs>::Index>::type Index;
+  typedef typename Lhs::Nested LhsNested;
+  typedef typename Rhs::Nested RhsNested;
+  typedef typename remove_reference<LhsNested>::type _LhsNested;
+  typedef typename remove_reference<RhsNested>::type _RhsNested;
+  enum {
+    LhsCoeffReadCost = _LhsNested::CoeffReadCost,
+    RhsCoeffReadCost = _RhsNested::CoeffReadCost,
+    LhsFlags = _LhsNested::Flags,
+    RhsFlags = _RhsNested::Flags,
+    SameType = is_same<typename _LhsNested::Scalar,typename _RhsNested::Scalar>::value,
+    StorageOrdersAgree = (int(Lhs::Flags)&RowMajorBit)==(int(Rhs::Flags)&RowMajorBit),
+    Flags0 = (int(LhsFlags) | int(RhsFlags)) & (
+        HereditaryBits
+      | (int(LhsFlags) & int(RhsFlags) &
+           ( AlignedBit
+           | (StorageOrdersAgree ? LinearAccessBit : 0)
+           | (functor_traits<BinaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)
+           )
+        )
+     ),
+    Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit),
+    CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + functor_traits<BinaryOp>::Cost
+  };
+};
+} // end namespace internal
+
+// we require Lhs and Rhs to have the same scalar type. Currently there is no example of a binary functor
+// that would take two operands of different types. If there were such an example, then this check should be
+// moved to the BinaryOp functors, on a per-case basis. This would however require a change in the BinaryOp functors, as
+// currently they take only one typename Scalar template parameter.
+// It is tempting to always allow mixing different types but remember that this is often impossible in the vectorized paths.
+// So allowing mixing different types gives very unexpected errors when enabling vectorization, when the user tries to
+// add together a float matrix and a double matrix.
+#define EIGEN_CHECK_BINARY_COMPATIBILIY(BINOP,LHS,RHS) \
+  EIGEN_STATIC_ASSERT((internal::functor_allows_mixing_real_and_complex<BINOP>::ret \
+                        ? int(internal::is_same<typename NumTraits<LHS>::Real, typename NumTraits<RHS>::Real>::value) \
+                        : int(internal::is_same<LHS, RHS>::value)), \
+    YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+template<typename BinaryOp, typename Lhs, typename Rhs, typename StorageKind>
+class CwiseBinaryOpImpl;
+
+template<typename BinaryOp, typename Lhs, typename Rhs>
+class CwiseBinaryOp : internal::no_assignment_operator,
+  public CwiseBinaryOpImpl<
+          BinaryOp, Lhs, Rhs,
+          typename internal::promote_storage_type<typename internal::traits<Lhs>::StorageKind,
+                                           typename internal::traits<Rhs>::StorageKind>::ret>
+{
+  public:
+
+    typedef typename CwiseBinaryOpImpl<
+        BinaryOp, Lhs, Rhs,
+        typename internal::promote_storage_type<typename internal::traits<Lhs>::StorageKind,
+                                         typename internal::traits<Rhs>::StorageKind>::ret>::Base Base;
+    EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseBinaryOp)
+
+    typedef typename internal::nested<Lhs>::type LhsNested;
+    typedef typename internal::nested<Rhs>::type RhsNested;
+    typedef typename internal::remove_reference<LhsNested>::type _LhsNested;
+    typedef typename internal::remove_reference<RhsNested>::type _RhsNested;
+
+    EIGEN_STRONG_INLINE CwiseBinaryOp(const Lhs& lhs, const Rhs& rhs, const BinaryOp& func = BinaryOp())
+      : m_lhs(lhs), m_rhs(rhs), m_functor(func)
+    {
+      EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp,typename Lhs::Scalar,typename Rhs::Scalar);
+      // require the sizes to match
+      EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs, Rhs)
+      eigen_assert(lhs.rows() == rhs.rows() && lhs.cols() == rhs.cols());
+    }
+
+    EIGEN_STRONG_INLINE Index rows() const {
+      // return the fixed size type if available to enable compile time optimizations
+      if (internal::traits<typename internal::remove_all<LhsNested>::type>::RowsAtCompileTime==Dynamic)
+        return m_rhs.rows();
+      else
+        return m_lhs.rows();
+    }
+    EIGEN_STRONG_INLINE Index cols() const {
+      // return the fixed size type if available to enable compile time optimizations
+      if (internal::traits<typename internal::remove_all<LhsNested>::type>::ColsAtCompileTime==Dynamic)
+        return m_rhs.cols();
+      else
+        return m_lhs.cols();
+    }
+
+    /** \returns the left hand side nested expression */
+    const _LhsNested& lhs() const { return m_lhs; }
+    /** \returns the right hand side nested expression */
+    const _RhsNested& rhs() const { return m_rhs; }
+    /** \returns the functor representing the binary operation */
+    const BinaryOp& functor() const { return m_functor; }
+
+  protected:
+    LhsNested m_lhs;
+    RhsNested m_rhs;
+    const BinaryOp m_functor;
+};
+
+template<typename BinaryOp, typename Lhs, typename Rhs>
+class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Dense>
+  : public internal::dense_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >::type
+{
+    typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> Derived;
+  public:
+
+    typedef typename internal::dense_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE( Derived )
+
+    EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
+    {
+      return derived().functor()(derived().lhs().coeff(row, col),
+                                 derived().rhs().coeff(row, col));
+    }
+
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
+    {
+      return derived().functor().packetOp(derived().lhs().template packet<LoadMode>(row, col),
+                                          derived().rhs().template packet<LoadMode>(row, col));
+    }
+
+    EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
+    {
+      return derived().functor()(derived().lhs().coeff(index),
+                                 derived().rhs().coeff(index));
+    }
+
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
+    {
+      return derived().functor().packetOp(derived().lhs().template packet<LoadMode>(index),
+                                          derived().rhs().template packet<LoadMode>(index));
+    }
+};
+
+/** replaces \c *this by \c *this - \a other.
+  *
+  * \returns a reference to \c *this
+  */
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+MatrixBase<Derived>::operator-=(const MatrixBase<OtherDerived> &other)
+{
+  SelfCwiseBinaryOp<internal::scalar_difference_op<Scalar>, Derived, OtherDerived> tmp(derived());
+  tmp = other.derived();
+  return derived();
+}
+
+/** replaces \c *this by \c *this + \a other.
+  *
+  * \returns a reference to \c *this
+  */
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+MatrixBase<Derived>::operator+=(const MatrixBase<OtherDerived>& other)
+{
+  SelfCwiseBinaryOp<internal::scalar_sum_op<Scalar>, Derived, OtherDerived> tmp(derived());
+  tmp = other.derived();
+  return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_CWISE_BINARY_OP_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/CwiseNullaryOp.h b/resources/3rdParty/eigen/Eigen/src/Core/CwiseNullaryOp.h
new file mode 100644
index 000000000..2635a62b0
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/CwiseNullaryOp.h
@@ -0,0 +1,864 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CWISE_NULLARY_OP_H
+#define EIGEN_CWISE_NULLARY_OP_H
+
+namespace Eigen {
+
+/** \class CwiseNullaryOp
+  * \ingroup Core_Module
+  *
+  * \brief Generic expression of a matrix where all coefficients are defined by a functor
+  *
+  * \param NullaryOp template functor implementing the operator
+  * \param PlainObjectType the underlying plain matrix/array type
+  *
+  * This class represents an expression of a generic nullary operator.
+  * It is the return type of the Ones(), Zero(), Constant(), Identity() and Random() methods,
+  * and most of the time this is the only way it is used.
+  *
+  * However, if you want to write a function returning such an expression, you
+  * will need to use this class.
+  *
+  * \sa class CwiseUnaryOp, class CwiseBinaryOp, DenseBase::NullaryExpr()
+  */
+
+namespace internal {
+template<typename NullaryOp, typename PlainObjectType>
+struct traits<CwiseNullaryOp<NullaryOp, PlainObjectType> > : traits<PlainObjectType>
+{
+  enum {
+    Flags = (traits<PlainObjectType>::Flags
+      & (  HereditaryBits
+         | (functor_has_linear_access<NullaryOp>::ret ? LinearAccessBit : 0)
+         | (functor_traits<NullaryOp>::PacketAccess ? PacketAccessBit : 0)))
+      | (functor_traits<NullaryOp>::IsRepeatable ? 0 : EvalBeforeNestingBit),
+    CoeffReadCost = functor_traits<NullaryOp>::Cost
+  };
+};
+}
+
+template<typename NullaryOp, typename PlainObjectType>
+class CwiseNullaryOp : internal::no_assignment_operator,
+  public internal::dense_xpr_base< CwiseNullaryOp<NullaryOp, PlainObjectType> >::type
+{
+  public:
+
+    typedef typename internal::dense_xpr_base<CwiseNullaryOp>::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp)
+
+    CwiseNullaryOp(Index rows, Index cols, const NullaryOp& func = NullaryOp())
+      : m_rows(rows), m_cols(cols), m_functor(func)
+    {
+      eigen_assert(rows >= 0
+            && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows)
+            &&  cols >= 0
+            && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols));
+    }
+
+    EIGEN_STRONG_INLINE Index rows() const { return m_rows.value(); }
+    EIGEN_STRONG_INLINE Index cols() const { return m_cols.value(); }
+
+    EIGEN_STRONG_INLINE const Scalar coeff(Index rows, Index cols) const
+    {
+      return m_functor(rows, cols);
+    }
+
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
+    {
+      return m_functor.packetOp(row, col);
+    }
+
+    EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
+    {
+      return m_functor(index);
+    }
+
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
+    {
+      return m_functor.packetOp(index);
+    }
+
+    /** \returns the functor representing the nullary operation */
+    const NullaryOp& functor() const { return m_functor; }
+
+  protected:
+    const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_rows;
+    const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_cols;
+    const NullaryOp m_functor;
+};
+
+
+/** \returns an expression of a matrix defined by a custom functor \a func
+  *
+  * The parameters \a rows and \a cols are the number of rows and of columns of
+  * the returned matrix. Must be compatible with this MatrixBase type.
+  *
+  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
+  * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used
+  * instead.
+  *
+  * The template parameter \a CustomNullaryOp is the type of the functor.
+  *
+  * \sa class CwiseNullaryOp
+  */
+template<typename Derived>
+template<typename CustomNullaryOp>
+EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, Derived>
+DenseBase<Derived>::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func)
+{
+  return CwiseNullaryOp<CustomNullaryOp, Derived>(rows, cols, func);
+}
+
+/** \returns an expression of a matrix defined by a custom functor \a func
+  *
+  * The parameter \a size is the size of the returned vector.
+  * Must be compatible with this MatrixBase type.
+  *
+  * \only_for_vectors
+  *
+  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
+  * it is redundant to pass \a size as argument, so Zero() should be used
+  * instead.
+  *
+  * The template parameter \a CustomNullaryOp is the type of the functor.
+  *
+  * \sa class CwiseNullaryOp
+  */
+template<typename Derived>
+template<typename CustomNullaryOp>
+EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, Derived>
+DenseBase<Derived>::NullaryExpr(Index size, const CustomNullaryOp& func)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  if(RowsAtCompileTime == 1) return CwiseNullaryOp<CustomNullaryOp, Derived>(1, size, func);
+  else return CwiseNullaryOp<CustomNullaryOp, Derived>(size, 1, func);
+}
+
+/** \returns an expression of a matrix defined by a custom functor \a func
+  *
+  * This variant is only for fixed-size DenseBase types. For dynamic-size types, you
+  * need to use the variants taking size arguments.
+  *
+  * The template parameter \a CustomNullaryOp is the type of the functor.
+  *
+  * \sa class CwiseNullaryOp
+  */
+template<typename Derived>
+template<typename CustomNullaryOp>
+EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, Derived>
+DenseBase<Derived>::NullaryExpr(const CustomNullaryOp& func)
+{
+  return CwiseNullaryOp<CustomNullaryOp, Derived>(RowsAtCompileTime, ColsAtCompileTime, func);
+}
+
+/** \returns an expression of a constant matrix of value \a value
+  *
+  * The parameters \a rows and \a cols are the number of rows and of columns of
+  * the returned matrix. Must be compatible with this DenseBase type.
+  *
+  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
+  * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used
+  * instead.
+  *
+  * The template parameter \a CustomNullaryOp is the type of the functor.
+  *
+  * \sa class CwiseNullaryOp
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Constant(Index rows, Index cols, const Scalar& value)
+{
+  return DenseBase<Derived>::NullaryExpr(rows, cols, internal::scalar_constant_op<Scalar>(value));
+}
+
+/** \returns an expression of a constant matrix of value \a value
+  *
+  * The parameter \a size is the size of the returned vector.
+  * Must be compatible with this DenseBase type.
+  *
+  * \only_for_vectors
+  *
+  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
+  * it is redundant to pass \a size as argument, so Zero() should be used
+  * instead.
+  *
+  * The template parameter \a CustomNullaryOp is the type of the functor.
+  *
+  * \sa class CwiseNullaryOp
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Constant(Index size, const Scalar& value)
+{
+  return DenseBase<Derived>::NullaryExpr(size, internal::scalar_constant_op<Scalar>(value));
+}
+
+/** \returns an expression of a constant matrix of value \a value
+  *
+  * This variant is only for fixed-size DenseBase types. For dynamic-size types, you
+  * need to use the variants taking size arguments.
+  *
+  * The template parameter \a CustomNullaryOp is the type of the functor.
+  *
+  * \sa class CwiseNullaryOp
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Constant(const Scalar& value)
+{
+  EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
+  return DenseBase<Derived>::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_constant_op<Scalar>(value));
+}
+
+/**
+  * \brief Sets a linearly space vector.
+  *
+  * The function generates 'size' equally spaced values in the closed interval [low,high].
+  * This particular version of LinSpaced() uses sequential access, i.e. vector access is
+  * assumed to be a(0), a(1), ..., a(size). This assumption allows for better vectorization
+  * and yields faster code than the random access version.
+  *
+  * When size is set to 1, a vector of length 1 containing 'high' is returned.
+  *
+  * \only_for_vectors
+  *
+  * Example: \include DenseBase_LinSpaced_seq.cpp
+  * Output: \verbinclude DenseBase_LinSpaced_seq.out
+  *
+  * \sa setLinSpaced(Index,const Scalar&,const Scalar&), LinSpaced(Index,Scalar,Scalar), CwiseNullaryOp
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::SequentialLinSpacedReturnType
+DenseBase<Derived>::LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,false>(low,high,size));
+}
+
+/**
+  * \copydoc DenseBase::LinSpaced(Sequential_t, Index, const Scalar&, const Scalar&)
+  * Special version for fixed size types which does not require the size parameter.
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::SequentialLinSpacedReturnType
+DenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
+  return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,false>(low,high,Derived::SizeAtCompileTime));
+}
+
+/**
+  * \brief Sets a linearly space vector.
+  *
+  * The function generates 'size' equally spaced values in the closed interval [low,high].
+  * When size is set to 1, a vector of length 1 containing 'high' is returned.
+  *
+  * \only_for_vectors
+  *
+  * Example: \include DenseBase_LinSpaced.cpp
+  * Output: \verbinclude DenseBase_LinSpaced.out
+  *
+  * \sa setLinSpaced(Index,const Scalar&,const Scalar&), LinSpaced(Sequential_t,Index,const Scalar&,const Scalar&,Index), CwiseNullaryOp
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
+DenseBase<Derived>::LinSpaced(Index size, const Scalar& low, const Scalar& high)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,true>(low,high,size));
+}
+
+/**
+  * \copydoc DenseBase::LinSpaced(Index, const Scalar&, const Scalar&)
+  * Special version for fixed size types which does not require the size parameter.
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
+DenseBase<Derived>::LinSpaced(const Scalar& low, const Scalar& high)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
+  return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,true>(low,high,Derived::SizeAtCompileTime));
+}
+
+/** \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */
+template<typename Derived>
+bool DenseBase<Derived>::isApproxToConstant
+(const Scalar& value, RealScalar prec) const
+{
+  for(Index j = 0; j < cols(); ++j)
+    for(Index i = 0; i < rows(); ++i)
+      if(!internal::isApprox(this->coeff(i, j), value, prec))
+        return false;
+  return true;
+}
+
+/** This is just an alias for isApproxToConstant().
+  *
+  * \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */
+template<typename Derived>
+bool DenseBase<Derived>::isConstant
+(const Scalar& value, RealScalar prec) const
+{
+  return isApproxToConstant(value, prec);
+}
+
+/** Alias for setConstant(): sets all coefficients in this expression to \a value.
+  *
+  * \sa setConstant(), Constant(), class CwiseNullaryOp
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE void DenseBase<Derived>::fill(const Scalar& value)
+{
+  setConstant(value);
+}
+
+/** Sets all coefficients in this expression to \a value.
+  *
+  * \sa fill(), setConstant(Index,const Scalar&), setConstant(Index,Index,const Scalar&), setZero(), setOnes(), Constant(), class CwiseNullaryOp, setZero(), setOnes()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setConstant(const Scalar& value)
+{
+  return derived() = Constant(rows(), cols(), value);
+}
+
+/** Resizes to the given \a size, and sets all coefficients in this expression to the given \a value.
+  *
+  * \only_for_vectors
+  *
+  * Example: \include Matrix_setConstant_int.cpp
+  * Output: \verbinclude Matrix_setConstant_int.out
+  *
+  * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setConstant(Index size, const Scalar& value)
+{
+  resize(size);
+  return setConstant(value);
+}
+
+/** Resizes to the given size, and sets all coefficients in this expression to the given \a value.
+  *
+  * \param rows the new number of rows
+  * \param cols the new number of columns
+  * \param value the value to which all coefficients are set
+  *
+  * Example: \include Matrix_setConstant_int_int.cpp
+  * Output: \verbinclude Matrix_setConstant_int_int.out
+  *
+  * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setConstant(Index rows, Index cols, const Scalar& value)
+{
+  resize(rows, cols);
+  return setConstant(value);
+}
+
+/**
+  * \brief Sets a linearly space vector.
+  *
+  * The function generates 'size' equally spaced values in the closed interval [low,high].
+  * When size is set to 1, a vector of length 1 containing 'high' is returned.
+  *
+  * \only_for_vectors
+  *
+  * Example: \include DenseBase_setLinSpaced.cpp
+  * Output: \verbinclude DenseBase_setLinSpaced.out
+  *
+  * \sa CwiseNullaryOp
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(Index size, const Scalar& low, const Scalar& high)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return derived() = Derived::NullaryExpr(size, internal::linspaced_op<Scalar,false>(low,high,size));
+}
+
+/**
+  * \brief Sets a linearly space vector.
+  *
+  * The function fill *this with equally spaced values in the closed interval [low,high].
+  * When size is set to 1, a vector of length 1 containing 'high' is returned.
+  *
+  * \only_for_vectors
+  *
+  * \sa setLinSpaced(Index, const Scalar&, const Scalar&), CwiseNullaryOp
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(const Scalar& low, const Scalar& high)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return setLinSpaced(size(), low, high);
+}
+
+// zero:
+
+/** \returns an expression of a zero matrix.
+  *
+  * The parameters \a rows and \a cols are the number of rows and of columns of
+  * the returned matrix. Must be compatible with this MatrixBase type.
+  *
+  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
+  * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used
+  * instead.
+  *
+  * Example: \include MatrixBase_zero_int_int.cpp
+  * Output: \verbinclude MatrixBase_zero_int_int.out
+  *
+  * \sa Zero(), Zero(Index)
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Zero(Index rows, Index cols)
+{
+  return Constant(rows, cols, Scalar(0));
+}
+
+/** \returns an expression of a zero vector.
+  *
+  * The parameter \a size is the size of the returned vector.
+  * Must be compatible with this MatrixBase type.
+  *
+  * \only_for_vectors
+  *
+  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
+  * it is redundant to pass \a size as argument, so Zero() should be used
+  * instead.
+  *
+  * Example: \include MatrixBase_zero_int.cpp
+  * Output: \verbinclude MatrixBase_zero_int.out
+  *
+  * \sa Zero(), Zero(Index,Index)
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Zero(Index size)
+{
+  return Constant(size, Scalar(0));
+}
+
+/** \returns an expression of a fixed-size zero matrix or vector.
+  *
+  * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
+  * need to use the variants taking size arguments.
+  *
+  * Example: \include MatrixBase_zero.cpp
+  * Output: \verbinclude MatrixBase_zero.out
+  *
+  * \sa Zero(Index), Zero(Index,Index)
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Zero()
+{
+  return Constant(Scalar(0));
+}
+
+/** \returns true if *this is approximately equal to the zero matrix,
+  *          within the precision given by \a prec.
+  *
+  * Example: \include MatrixBase_isZero.cpp
+  * Output: \verbinclude MatrixBase_isZero.out
+  *
+  * \sa class CwiseNullaryOp, Zero()
+  */
+template<typename Derived>
+bool DenseBase<Derived>::isZero(RealScalar prec) const
+{
+  for(Index j = 0; j < cols(); ++j)
+    for(Index i = 0; i < rows(); ++i)
+      if(!internal::isMuchSmallerThan(this->coeff(i, j), static_cast<Scalar>(1), prec))
+        return false;
+  return true;
+}
+
+/** Sets all coefficients in this expression to zero.
+  *
+  * Example: \include MatrixBase_setZero.cpp
+  * Output: \verbinclude MatrixBase_setZero.out
+  *
+  * \sa class CwiseNullaryOp, Zero()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setZero()
+{
+  return setConstant(Scalar(0));
+}
+
+/** Resizes to the given \a size, and sets all coefficients in this expression to zero.
+  *
+  * \only_for_vectors
+  *
+  * Example: \include Matrix_setZero_int.cpp
+  * Output: \verbinclude Matrix_setZero_int.out
+  *
+  * \sa DenseBase::setZero(), setZero(Index,Index), class CwiseNullaryOp, DenseBase::Zero()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setZero(Index size)
+{
+  resize(size);
+  return setConstant(Scalar(0));
+}
+
+/** Resizes to the given size, and sets all coefficients in this expression to zero.
+  *
+  * \param rows the new number of rows
+  * \param cols the new number of columns
+  *
+  * Example: \include Matrix_setZero_int_int.cpp
+  * Output: \verbinclude Matrix_setZero_int_int.out
+  *
+  * \sa DenseBase::setZero(), setZero(Index), class CwiseNullaryOp, DenseBase::Zero()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setZero(Index rows, Index cols)
+{
+  resize(rows, cols);
+  return setConstant(Scalar(0));
+}
+
+// ones:
+
+/** \returns an expression of a matrix where all coefficients equal one.
+  *
+  * The parameters \a rows and \a cols are the number of rows and of columns of
+  * the returned matrix. Must be compatible with this MatrixBase type.
+  *
+  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
+  * it is redundant to pass \a rows and \a cols as arguments, so Ones() should be used
+  * instead.
+  *
+  * Example: \include MatrixBase_ones_int_int.cpp
+  * Output: \verbinclude MatrixBase_ones_int_int.out
+  *
+  * \sa Ones(), Ones(Index), isOnes(), class Ones
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Ones(Index rows, Index cols)
+{
+  return Constant(rows, cols, Scalar(1));
+}
+
+/** \returns an expression of a vector where all coefficients equal one.
+  *
+  * The parameter \a size is the size of the returned vector.
+  * Must be compatible with this MatrixBase type.
+  *
+  * \only_for_vectors
+  *
+  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
+  * it is redundant to pass \a size as argument, so Ones() should be used
+  * instead.
+  *
+  * Example: \include MatrixBase_ones_int.cpp
+  * Output: \verbinclude MatrixBase_ones_int.out
+  *
+  * \sa Ones(), Ones(Index,Index), isOnes(), class Ones
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Ones(Index size)
+{
+  return Constant(size, Scalar(1));
+}
+
+/** \returns an expression of a fixed-size matrix or vector where all coefficients equal one.
+  *
+  * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
+  * need to use the variants taking size arguments.
+  *
+  * Example: \include MatrixBase_ones.cpp
+  * Output: \verbinclude MatrixBase_ones.out
+  *
+  * \sa Ones(Index), Ones(Index,Index), isOnes(), class Ones
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Ones()
+{
+  return Constant(Scalar(1));
+}
+
+/** \returns true if *this is approximately equal to the matrix where all coefficients
+  *          are equal to 1, within the precision given by \a prec.
+  *
+  * Example: \include MatrixBase_isOnes.cpp
+  * Output: \verbinclude MatrixBase_isOnes.out
+  *
+  * \sa class CwiseNullaryOp, Ones()
+  */
+template<typename Derived>
+bool DenseBase<Derived>::isOnes
+(RealScalar prec) const
+{
+  return isApproxToConstant(Scalar(1), prec);
+}
+
+/** Sets all coefficients in this expression to one.
+  *
+  * Example: \include MatrixBase_setOnes.cpp
+  * Output: \verbinclude MatrixBase_setOnes.out
+  *
+  * \sa class CwiseNullaryOp, Ones()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setOnes()
+{
+  return setConstant(Scalar(1));
+}
+
+/** Resizes to the given \a size, and sets all coefficients in this expression to one.
+  *
+  * \only_for_vectors
+  *
+  * Example: \include Matrix_setOnes_int.cpp
+  * Output: \verbinclude Matrix_setOnes_int.out
+  *
+  * \sa MatrixBase::setOnes(), setOnes(Index,Index), class CwiseNullaryOp, MatrixBase::Ones()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setOnes(Index size)
+{
+  resize(size);
+  return setConstant(Scalar(1));
+}
+
+/** Resizes to the given size, and sets all coefficients in this expression to one.
+  *
+  * \param rows the new number of rows
+  * \param cols the new number of columns
+  *
+  * Example: \include Matrix_setOnes_int_int.cpp
+  * Output: \verbinclude Matrix_setOnes_int_int.out
+  *
+  * \sa MatrixBase::setOnes(), setOnes(Index), class CwiseNullaryOp, MatrixBase::Ones()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setOnes(Index rows, Index cols)
+{
+  resize(rows, cols);
+  return setConstant(Scalar(1));
+}
+
+// Identity:
+
+/** \returns an expression of the identity matrix (not necessarily square).
+  *
+  * The parameters \a rows and \a cols are the number of rows and of columns of
+  * the returned matrix. Must be compatible with this MatrixBase type.
+  *
+  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
+  * it is redundant to pass \a rows and \a cols as arguments, so Identity() should be used
+  * instead.
+  *
+  * Example: \include MatrixBase_identity_int_int.cpp
+  * Output: \verbinclude MatrixBase_identity_int_int.out
+  *
+  * \sa Identity(), setIdentity(), isIdentity()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType
+MatrixBase<Derived>::Identity(Index rows, Index cols)
+{
+  return DenseBase<Derived>::NullaryExpr(rows, cols, internal::scalar_identity_op<Scalar>());
+}
+
+/** \returns an expression of the identity matrix (not necessarily square).
+  *
+  * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
+  * need to use the variant taking size arguments.
+  *
+  * Example: \include MatrixBase_identity.cpp
+  * Output: \verbinclude MatrixBase_identity.out
+  *
+  * \sa Identity(Index,Index), setIdentity(), isIdentity()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType
+MatrixBase<Derived>::Identity()
+{
+  EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
+  return MatrixBase<Derived>::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_identity_op<Scalar>());
+}
+
+/** \returns true if *this is approximately equal to the identity matrix
+  *          (not necessarily square),
+  *          within the precision given by \a prec.
+  *
+  * Example: \include MatrixBase_isIdentity.cpp
+  * Output: \verbinclude MatrixBase_isIdentity.out
+  *
+  * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), setIdentity()
+  */
+template<typename Derived>
+bool MatrixBase<Derived>::isIdentity
+(RealScalar prec) const
+{
+  for(Index j = 0; j < cols(); ++j)
+  {
+    for(Index i = 0; i < rows(); ++i)
+    {
+      if(i == j)
+      {
+        if(!internal::isApprox(this->coeff(i, j), static_cast<Scalar>(1), prec))
+          return false;
+      }
+      else
+      {
+        if(!internal::isMuchSmallerThan(this->coeff(i, j), static_cast<RealScalar>(1), prec))
+          return false;
+      }
+    }
+  }
+  return true;
+}
+
+namespace internal {
+
+template<typename Derived, bool Big = (Derived::SizeAtCompileTime>=16)>
+struct setIdentity_impl
+{
+  static EIGEN_STRONG_INLINE Derived& run(Derived& m)
+  {
+    return m = Derived::Identity(m.rows(), m.cols());
+  }
+};
+
+template<typename Derived>
+struct setIdentity_impl<Derived, true>
+{
+  typedef typename Derived::Index Index;
+  static EIGEN_STRONG_INLINE Derived& run(Derived& m)
+  {
+    m.setZero();
+    const Index size = (std::min)(m.rows(), m.cols());
+    for(Index i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1);
+    return m;
+  }
+};
+
+} // end namespace internal
+
+/** Writes the identity expression (not necessarily square) into *this.
+  *
+  * Example: \include MatrixBase_setIdentity.cpp
+  * Output: \verbinclude MatrixBase_setIdentity.out
+  *
+  * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), isIdentity()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity()
+{
+  return internal::setIdentity_impl<Derived>::run(derived());
+}
+
+/** \brief Resizes to the given size, and writes the identity expression (not necessarily square) into *this.
+  *
+  * \param rows the new number of rows
+  * \param cols the new number of columns
+  *
+  * Example: \include Matrix_setIdentity_int_int.cpp
+  * Output: \verbinclude Matrix_setIdentity_int_int.out
+  *
+  * \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity(Index rows, Index cols)
+{
+  derived().resize(rows, cols);
+  return setIdentity();
+}
+
+/** \returns an expression of the i-th unit (basis) vector.
+  *
+  * \only_for_vectors
+  *
+  * \sa MatrixBase::Unit(Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index size, Index i)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return BasisReturnType(SquareMatrixType::Identity(size,size), i);
+}
+
+/** \returns an expression of the i-th unit (basis) vector.
+  *
+  * \only_for_vectors
+  *
+  * This variant is for fixed-size vector only.
+  *
+  * \sa MatrixBase::Unit(Index,Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index i)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return BasisReturnType(SquareMatrixType::Identity(),i);
+}
+
+/** \returns an expression of the X axis unit vector (1{,0}^*)
+  *
+  * \only_for_vectors
+  *
+  * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitX()
+{ return Derived::Unit(0); }
+
+/** \returns an expression of the Y axis unit vector (0,1{,0}^*)
+  *
+  * \only_for_vectors
+  *
+  * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitY()
+{ return Derived::Unit(1); }
+
+/** \returns an expression of the Z axis unit vector (0,0,1{,0}^*)
+  *
+  * \only_for_vectors
+  *
+  * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitZ()
+{ return Derived::Unit(2); }
+
+/** \returns an expression of the W axis unit vector (0,0,0,1)
+  *
+  * \only_for_vectors
+  *
+  * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitW()
+{ return Derived::Unit(3); }
+
+} // end namespace Eigen
+
+#endif // EIGEN_CWISE_NULLARY_OP_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/CwiseUnaryOp.h b/resources/3rdParty/eigen/Eigen/src/Core/CwiseUnaryOp.h
new file mode 100644
index 000000000..063355ae5
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/CwiseUnaryOp.h
@@ -0,0 +1,126 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CWISE_UNARY_OP_H
+#define EIGEN_CWISE_UNARY_OP_H
+
+namespace Eigen { 
+
+/** \class CwiseUnaryOp
+  * \ingroup Core_Module
+  *
+  * \brief Generic expression where a coefficient-wise unary operator is applied to an expression
+  *
+  * \param UnaryOp template functor implementing the operator
+  * \param XprType the type of the expression to which we are applying the unary operator
+  *
+  * This class represents an expression where a unary operator is applied to an expression.
+  * It is the return type of all operations taking exactly 1 input expression, regardless of the
+  * presence of other inputs such as scalars. For example, the operator* in the expression 3*matrix
+  * is considered unary, because only the right-hand side is an expression, and its
+  * return type is a specialization of CwiseUnaryOp.
+  *
+  * Most of the time, this is the only way that it is used, so you typically don't have to name
+  * CwiseUnaryOp types explicitly.
+  *
+  * \sa MatrixBase::unaryExpr(const CustomUnaryOp &) const, class CwiseBinaryOp, class CwiseNullaryOp
+  */
+
+namespace internal {
+template<typename UnaryOp, typename XprType>
+struct traits<CwiseUnaryOp<UnaryOp, XprType> >
+ : traits<XprType>
+{
+  typedef typename result_of<
+                     UnaryOp(typename XprType::Scalar)
+                   >::type Scalar;
+  typedef typename XprType::Nested XprTypeNested;
+  typedef typename remove_reference<XprTypeNested>::type _XprTypeNested;
+  enum {
+    Flags = _XprTypeNested::Flags & (
+      HereditaryBits | LinearAccessBit | AlignedBit
+      | (functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0)),
+    CoeffReadCost = _XprTypeNested::CoeffReadCost + functor_traits<UnaryOp>::Cost
+  };
+};
+}
+
+template<typename UnaryOp, typename XprType, typename StorageKind>
+class CwiseUnaryOpImpl;
+
+template<typename UnaryOp, typename XprType>
+class CwiseUnaryOp : internal::no_assignment_operator,
+  public CwiseUnaryOpImpl<UnaryOp, XprType, typename internal::traits<XprType>::StorageKind>
+{
+  public:
+
+    typedef typename CwiseUnaryOpImpl<UnaryOp, XprType,typename internal::traits<XprType>::StorageKind>::Base Base;
+    EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryOp)
+
+    inline CwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp())
+      : m_xpr(xpr), m_functor(func) {}
+
+    EIGEN_STRONG_INLINE Index rows() const { return m_xpr.rows(); }
+    EIGEN_STRONG_INLINE Index cols() const { return m_xpr.cols(); }
+
+    /** \returns the functor representing the unary operation */
+    const UnaryOp& functor() const { return m_functor; }
+
+    /** \returns the nested expression */
+    const typename internal::remove_all<typename XprType::Nested>::type&
+    nestedExpression() const { return m_xpr; }
+
+    /** \returns the nested expression */
+    typename internal::remove_all<typename XprType::Nested>::type&
+    nestedExpression() { return m_xpr.const_cast_derived(); }
+
+  protected:
+    typename XprType::Nested m_xpr;
+    const UnaryOp m_functor;
+};
+
+// This is the generic implementation for dense storage.
+// It can be used for any expression types implementing the dense concept.
+template<typename UnaryOp, typename XprType>
+class CwiseUnaryOpImpl<UnaryOp,XprType,Dense>
+  : public internal::dense_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type
+{
+  public:
+
+    typedef CwiseUnaryOp<UnaryOp, XprType> Derived;
+    typedef typename internal::dense_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
+
+    EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
+    {
+      return derived().functor()(derived().nestedExpression().coeff(row, col));
+    }
+
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
+    {
+      return derived().functor().packetOp(derived().nestedExpression().template packet<LoadMode>(row, col));
+    }
+
+    EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
+    {
+      return derived().functor()(derived().nestedExpression().coeff(index));
+    }
+
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
+    {
+      return derived().functor().packetOp(derived().nestedExpression().template packet<LoadMode>(index));
+    }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CWISE_UNARY_OP_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/CwiseUnaryView.h b/resources/3rdParty/eigen/Eigen/src/Core/CwiseUnaryView.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/CwiseUnaryView.h
rename to resources/3rdParty/eigen/Eigen/src/Core/CwiseUnaryView.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/DenseBase.h b/resources/3rdParty/eigen/Eigen/src/Core/DenseBase.h
new file mode 100644
index 000000000..1cc0314ef
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/DenseBase.h
@@ -0,0 +1,533 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_DENSEBASE_H
+#define EIGEN_DENSEBASE_H
+
+namespace Eigen {
+
+/** \class DenseBase
+  * \ingroup Core_Module
+  *
+  * \brief Base class for all dense matrices, vectors, and arrays
+  *
+  * This class is the base that is inherited by all dense objects (matrix, vector, arrays,
+  * and related expression types). The common Eigen API for dense objects is contained in this class.
+  *
+  * \tparam Derived is the derived type, e.g., a matrix type or an expression.
+  *
+  * This class can be extended with the help of the plugin mechanism described on the page
+  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_DENSEBASE_PLUGIN.
+  *
+  * \sa \ref TopicClassHierarchy
+  */
+template<typename Derived> class DenseBase
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+  : public internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
+                                     typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>
+#else
+  : public DenseCoeffsBase<Derived>
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+{
+  public:
+    using internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
+                typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>::operator*;
+
+    class InnerIterator;
+
+    typedef typename internal::traits<Derived>::StorageKind StorageKind;
+
+    /** \brief The type of indices 
+      * \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE.
+      * \sa \ref TopicPreprocessorDirectives.
+      */
+    typedef typename internal::traits<Derived>::Index Index; 
+
+    typedef typename internal::traits<Derived>::Scalar Scalar;
+    typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+
+    typedef DenseCoeffsBase<Derived> Base;
+    using Base::derived;
+    using Base::const_cast_derived;
+    using Base::rows;
+    using Base::cols;
+    using Base::size;
+    using Base::rowIndexByOuterInner;
+    using Base::colIndexByOuterInner;
+    using Base::coeff;
+    using Base::coeffByOuterInner;
+    using Base::packet;
+    using Base::packetByOuterInner;
+    using Base::writePacket;
+    using Base::writePacketByOuterInner;
+    using Base::coeffRef;
+    using Base::coeffRefByOuterInner;
+    using Base::copyCoeff;
+    using Base::copyCoeffByOuterInner;
+    using Base::copyPacket;
+    using Base::copyPacketByOuterInner;
+    using Base::operator();
+    using Base::operator[];
+    using Base::x;
+    using Base::y;
+    using Base::z;
+    using Base::w;
+    using Base::stride;
+    using Base::innerStride;
+    using Base::outerStride;
+    using Base::rowStride;
+    using Base::colStride;
+    typedef typename Base::CoeffReturnType CoeffReturnType;
+
+    enum {
+
+      RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
+        /**< The number of rows at compile-time. This is just a copy of the value provided
+          * by the \a Derived type. If a value is not known at compile-time,
+          * it is set to the \a Dynamic constant.
+          * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */
+
+      ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
+        /**< The number of columns at compile-time. This is just a copy of the value provided
+          * by the \a Derived type. If a value is not known at compile-time,
+          * it is set to the \a Dynamic constant.
+          * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
+
+
+      SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
+                                                   internal::traits<Derived>::ColsAtCompileTime>::ret),
+        /**< This is equal to the number of coefficients, i.e. the number of
+          * rows times the number of columns, or to \a Dynamic if this is not
+          * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
+
+      MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,
+        /**< This value is equal to the maximum possible number of rows that this expression
+          * might have. If this expression might have an arbitrarily high number of rows,
+          * this value is set to \a Dynamic.
+          *
+          * This value is useful to know when evaluating an expression, in order to determine
+          * whether it is possible to avoid doing a dynamic memory allocation.
+          *
+          * \sa RowsAtCompileTime, MaxColsAtCompileTime, MaxSizeAtCompileTime
+          */
+
+      MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime,
+        /**< This value is equal to the maximum possible number of columns that this expression
+          * might have. If this expression might have an arbitrarily high number of columns,
+          * this value is set to \a Dynamic.
+          *
+          * This value is useful to know when evaluating an expression, in order to determine
+          * whether it is possible to avoid doing a dynamic memory allocation.
+          *
+          * \sa ColsAtCompileTime, MaxRowsAtCompileTime, MaxSizeAtCompileTime
+          */
+
+      MaxSizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::MaxRowsAtCompileTime,
+                                                      internal::traits<Derived>::MaxColsAtCompileTime>::ret),
+        /**< This value is equal to the maximum possible number of coefficients that this expression
+          * might have. If this expression might have an arbitrarily high number of coefficients,
+          * this value is set to \a Dynamic.
+          *
+          * This value is useful to know when evaluating an expression, in order to determine
+          * whether it is possible to avoid doing a dynamic memory allocation.
+          *
+          * \sa SizeAtCompileTime, MaxRowsAtCompileTime, MaxColsAtCompileTime
+          */
+
+      IsVectorAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime == 1
+                           || internal::traits<Derived>::MaxColsAtCompileTime == 1,
+        /**< This is set to true if either the number of rows or the number of
+          * columns is known at compile-time to be equal to 1. Indeed, in that case,
+          * we are dealing with a column-vector (if there is only one column) or with
+          * a row-vector (if there is only one row). */
+
+      Flags = internal::traits<Derived>::Flags,
+        /**< This stores expression \ref flags flags which may or may not be inherited by new expressions
+          * constructed from this one. See the \ref flags "list of flags".
+          */
+
+      IsRowMajor = int(Flags) & RowMajorBit, /**< True if this expression has row-major storage order. */
+
+      InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime)
+                             : int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
+
+      CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
+        /**< This is a rough measure of how expensive it is to read one coefficient from
+          * this expression.
+          */
+
+      InnerStrideAtCompileTime = internal::inner_stride_at_compile_time<Derived>::ret,
+      OuterStrideAtCompileTime = internal::outer_stride_at_compile_time<Derived>::ret
+    };
+
+    enum { ThisConstantIsPrivateInPlainObjectBase };
+
+    /** \returns the number of nonzero coefficients which is in practice the number
+      * of stored coefficients. */
+    inline Index nonZeros() const { return size(); }
+    /** \returns true if either the number of rows or the number of columns is equal to 1.
+      * In other words, this function returns
+      * \code rows()==1 || cols()==1 \endcode
+      * \sa rows(), cols(), IsVectorAtCompileTime. */
+
+    /** \returns the outer size.
+      *
+      * \note For a vector, this returns just 1. For a matrix (non-vector), this is the major dimension
+      * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of columns for a
+      * column-major matrix, and the number of rows for a row-major matrix. */
+    Index outerSize() const
+    {
+      return IsVectorAtCompileTime ? 1
+           : int(IsRowMajor) ? this->rows() : this->cols();
+    }
+
+    /** \returns the inner size.
+      *
+      * \note For a vector, this is just the size. For a matrix (non-vector), this is the minor dimension
+      * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of rows for a 
+      * column-major matrix, and the number of columns for a row-major matrix. */
+    Index innerSize() const
+    {
+      return IsVectorAtCompileTime ? this->size()
+           : int(IsRowMajor) ? this->cols() : this->rows();
+    }
+
+    /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are
+      * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does
+      * nothing else.
+      */
+    void resize(Index size)
+    {
+      EIGEN_ONLY_USED_FOR_DEBUG(size);
+      eigen_assert(size == this->size()
+                && "DenseBase::resize() does not actually allow to resize.");
+    }
+    /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are
+      * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does
+      * nothing else.
+      */
+    void resize(Index rows, Index cols)
+    {
+      EIGEN_ONLY_USED_FOR_DEBUG(rows);
+      EIGEN_ONLY_USED_FOR_DEBUG(cols);
+      eigen_assert(rows == this->rows() && cols == this->cols()
+                && "DenseBase::resize() does not actually allow to resize.");
+    }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+
+    /** \internal Represents a matrix with all coefficients equal to one another*/
+    typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Derived> ConstantReturnType;
+    /** \internal Represents a vector with linearly spaced coefficients that allows sequential access only. */
+    typedef CwiseNullaryOp<internal::linspaced_op<Scalar,false>,Derived> SequentialLinSpacedReturnType;
+    /** \internal Represents a vector with linearly spaced coefficients that allows random access. */
+    typedef CwiseNullaryOp<internal::linspaced_op<Scalar,true>,Derived> RandomAccessLinSpacedReturnType;
+    /** \internal the return type of MatrixBase::eigenvalues() */
+    typedef Matrix<typename NumTraits<typename internal::traits<Derived>::Scalar>::Real, internal::traits<Derived>::ColsAtCompileTime, 1> EigenvaluesReturnType;
+
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+    /** Copies \a other into *this. \returns a reference to *this. */
+    template<typename OtherDerived>
+    Derived& operator=(const DenseBase<OtherDerived>& other);
+
+    /** Special case of the template operator=, in order to prevent the compiler
+      * from generating a default operator= (issue hit with g++ 4.1)
+      */
+    Derived& operator=(const DenseBase& other);
+
+    template<typename OtherDerived>
+    Derived& operator=(const EigenBase<OtherDerived> &other);
+
+    template<typename OtherDerived>
+    Derived& operator+=(const EigenBase<OtherDerived> &other);
+
+    template<typename OtherDerived>
+    Derived& operator-=(const EigenBase<OtherDerived> &other);
+
+    template<typename OtherDerived>
+    Derived& operator=(const ReturnByValue<OtherDerived>& func);
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** Copies \a other into *this without evaluating other. \returns a reference to *this. */
+    template<typename OtherDerived>
+    Derived& lazyAssign(const DenseBase<OtherDerived>& other);
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+    CommaInitializer<Derived> operator<< (const Scalar& s);
+
+    template<unsigned int Added,unsigned int Removed>
+    const Flagged<Derived, Added, Removed> flagged() const;
+
+    template<typename OtherDerived>
+    CommaInitializer<Derived> operator<< (const DenseBase<OtherDerived>& other);
+
+    Eigen::Transpose<Derived> transpose();
+    typedef const Transpose<const Derived> ConstTransposeReturnType;
+    ConstTransposeReturnType transpose() const;
+    void transposeInPlace();
+#ifndef EIGEN_NO_DEBUG
+  protected:
+    template<typename OtherDerived>
+    void checkTransposeAliasing(const OtherDerived& other) const;
+  public:
+#endif
+
+    typedef VectorBlock<Derived> SegmentReturnType;
+    typedef const VectorBlock<const Derived> ConstSegmentReturnType;
+    template<int Size> struct FixedSegmentReturnType { typedef VectorBlock<Derived, Size> Type; };
+    template<int Size> struct ConstFixedSegmentReturnType { typedef const VectorBlock<const Derived, Size> Type; };
+    
+    // Note: The "DenseBase::" prefixes are added to help MSVC9 to match these declarations with the later implementations.
+    SegmentReturnType segment(Index start, Index size);
+    typename DenseBase::ConstSegmentReturnType segment(Index start, Index size) const;
+
+    SegmentReturnType head(Index size);
+    typename DenseBase::ConstSegmentReturnType head(Index size) const;
+
+    SegmentReturnType tail(Index size);
+    typename DenseBase::ConstSegmentReturnType tail(Index size) const;
+
+    template<int Size> typename FixedSegmentReturnType<Size>::Type head();
+    template<int Size> typename ConstFixedSegmentReturnType<Size>::Type head() const;
+
+    template<int Size> typename FixedSegmentReturnType<Size>::Type tail();
+    template<int Size> typename ConstFixedSegmentReturnType<Size>::Type tail() const;
+
+    template<int Size> typename FixedSegmentReturnType<Size>::Type segment(Index start);
+    template<int Size> typename ConstFixedSegmentReturnType<Size>::Type segment(Index start) const;
+
+    static const ConstantReturnType
+    Constant(Index rows, Index cols, const Scalar& value);
+    static const ConstantReturnType
+    Constant(Index size, const Scalar& value);
+    static const ConstantReturnType
+    Constant(const Scalar& value);
+
+    static const SequentialLinSpacedReturnType
+    LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high);
+    static const RandomAccessLinSpacedReturnType
+    LinSpaced(Index size, const Scalar& low, const Scalar& high);
+    static const SequentialLinSpacedReturnType
+    LinSpaced(Sequential_t, const Scalar& low, const Scalar& high);
+    static const RandomAccessLinSpacedReturnType
+    LinSpaced(const Scalar& low, const Scalar& high);
+
+    template<typename CustomNullaryOp>
+    static const CwiseNullaryOp<CustomNullaryOp, Derived>
+    NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func);
+    template<typename CustomNullaryOp>
+    static const CwiseNullaryOp<CustomNullaryOp, Derived>
+    NullaryExpr(Index size, const CustomNullaryOp& func);
+    template<typename CustomNullaryOp>
+    static const CwiseNullaryOp<CustomNullaryOp, Derived>
+    NullaryExpr(const CustomNullaryOp& func);
+
+    static const ConstantReturnType Zero(Index rows, Index cols);
+    static const ConstantReturnType Zero(Index size);
+    static const ConstantReturnType Zero();
+    static const ConstantReturnType Ones(Index rows, Index cols);
+    static const ConstantReturnType Ones(Index size);
+    static const ConstantReturnType Ones();
+
+    void fill(const Scalar& value);
+    Derived& setConstant(const Scalar& value);
+    Derived& setLinSpaced(Index size, const Scalar& low, const Scalar& high);
+    Derived& setLinSpaced(const Scalar& low, const Scalar& high);
+    Derived& setZero();
+    Derived& setOnes();
+    Derived& setRandom();
+
+    template<typename OtherDerived>
+    bool isApprox(const DenseBase<OtherDerived>& other,
+                  RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+    bool isMuchSmallerThan(const RealScalar& other,
+                           RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+    template<typename OtherDerived>
+    bool isMuchSmallerThan(const DenseBase<OtherDerived>& other,
+                           RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+
+    bool isApproxToConstant(const Scalar& value, RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+    bool isConstant(const Scalar& value, RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+    bool isZero(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+    bool isOnes(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+
+    inline Derived& operator*=(const Scalar& other);
+    inline Derived& operator/=(const Scalar& other);
+
+    typedef typename internal::add_const_on_value_type<typename internal::eval<Derived>::type>::type EvalReturnType;
+    /** \returns the matrix or vector obtained by evaluating this expression.
+      *
+      * Notice that in the case of a plain matrix or vector (not an expression) this function just returns
+      * a const reference, in order to avoid a useless copy.
+      */
+    EIGEN_STRONG_INLINE EvalReturnType eval() const
+    {
+      // Even though MSVC does not honor strong inlining when the return type
+      // is a dynamic matrix, we desperately need strong inlining for fixed
+      // size types on MSVC.
+      return typename internal::eval<Derived>::type(derived());
+    }
+
+    /** swaps *this with the expression \a other.
+      *
+      */
+    template<typename OtherDerived>
+    void swap(const DenseBase<OtherDerived>& other,
+              int = OtherDerived::ThisConstantIsPrivateInPlainObjectBase)
+    {
+      SwapWrapper<Derived>(derived()).lazyAssign(other.derived());
+    }
+
+    /** swaps *this with the matrix or array \a other.
+      *
+      */
+    template<typename OtherDerived>
+    void swap(PlainObjectBase<OtherDerived>& other)
+    {
+      SwapWrapper<Derived>(derived()).lazyAssign(other.derived());
+    }
+
+
+    inline const NestByValue<Derived> nestByValue() const;
+    inline const ForceAlignedAccess<Derived> forceAlignedAccess() const;
+    inline ForceAlignedAccess<Derived> forceAlignedAccess();
+    template<bool Enable> inline const typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type forceAlignedAccessIf() const;
+    template<bool Enable> inline typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type forceAlignedAccessIf();
+
+    Scalar sum() const;
+    Scalar mean() const;
+    Scalar trace() const;
+
+    Scalar prod() const;
+
+    typename internal::traits<Derived>::Scalar minCoeff() const;
+    typename internal::traits<Derived>::Scalar maxCoeff() const;
+
+    template<typename IndexType>
+    typename internal::traits<Derived>::Scalar minCoeff(IndexType* row, IndexType* col) const;
+    template<typename IndexType>
+    typename internal::traits<Derived>::Scalar maxCoeff(IndexType* row, IndexType* col) const;
+    template<typename IndexType>
+    typename internal::traits<Derived>::Scalar minCoeff(IndexType* index) const;
+    template<typename IndexType>
+    typename internal::traits<Derived>::Scalar maxCoeff(IndexType* index) const;
+
+    template<typename BinaryOp>
+    typename internal::result_of<BinaryOp(typename internal::traits<Derived>::Scalar)>::type
+    redux(const BinaryOp& func) const;
+
+    template<typename Visitor>
+    void visit(Visitor& func) const;
+
+    inline const WithFormat<Derived> format(const IOFormat& fmt) const;
+
+    /** \returns the unique coefficient of a 1x1 expression */
+    CoeffReturnType value() const
+    {
+      EIGEN_STATIC_ASSERT_SIZE_1x1(Derived)
+      eigen_assert(this->rows() == 1 && this->cols() == 1);
+      return derived().coeff(0,0);
+    }
+
+/////////// Array module ///////////
+
+    bool all(void) const;
+    bool any(void) const;
+    Index count() const;
+
+    typedef VectorwiseOp<Derived, Horizontal> RowwiseReturnType;
+    typedef const VectorwiseOp<const Derived, Horizontal> ConstRowwiseReturnType;
+    typedef VectorwiseOp<Derived, Vertical> ColwiseReturnType;
+    typedef const VectorwiseOp<const Derived, Vertical> ConstColwiseReturnType;
+
+    ConstRowwiseReturnType rowwise() const;
+    RowwiseReturnType rowwise();
+    ConstColwiseReturnType colwise() const;
+    ColwiseReturnType colwise();
+
+    static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random(Index rows, Index cols);
+    static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random(Index size);
+    static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random();
+
+    template<typename ThenDerived,typename ElseDerived>
+    const Select<Derived,ThenDerived,ElseDerived>
+    select(const DenseBase<ThenDerived>& thenMatrix,
+           const DenseBase<ElseDerived>& elseMatrix) const;
+
+    template<typename ThenDerived>
+    inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>
+    select(const DenseBase<ThenDerived>& thenMatrix, typename ThenDerived::Scalar elseScalar) const;
+
+    template<typename ElseDerived>
+    inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >
+    select(typename ElseDerived::Scalar thenScalar, const DenseBase<ElseDerived>& elseMatrix) const;
+
+    template<int p> RealScalar lpNorm() const;
+
+    template<int RowFactor, int ColFactor>
+    const Replicate<Derived,RowFactor,ColFactor> replicate() const;
+    const Replicate<Derived,Dynamic,Dynamic> replicate(Index rowFacor,Index colFactor) const;
+
+    typedef Reverse<Derived, BothDirections> ReverseReturnType;
+    typedef const Reverse<const Derived, BothDirections> ConstReverseReturnType;
+    ReverseReturnType reverse();
+    ConstReverseReturnType reverse() const;
+    void reverseInPlace();
+
+#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::DenseBase
+#   include "../plugins/BlockMethods.h"
+#   ifdef EIGEN_DENSEBASE_PLUGIN
+#     include EIGEN_DENSEBASE_PLUGIN
+#   endif
+#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
+
+#ifdef EIGEN2_SUPPORT
+
+    Block<Derived> corner(CornerType type, Index cRows, Index cCols);
+    const Block<Derived> corner(CornerType type, Index cRows, Index cCols) const;
+    template<int CRows, int CCols>
+    Block<Derived, CRows, CCols> corner(CornerType type);
+    template<int CRows, int CCols>
+    const Block<Derived, CRows, CCols> corner(CornerType type) const;
+
+#endif // EIGEN2_SUPPORT
+
+
+    // disable the use of evalTo for dense objects with a nice compilation error
+    template<typename Dest> inline void evalTo(Dest& ) const
+    {
+      EIGEN_STATIC_ASSERT((internal::is_same<Dest,void>::value),THE_EVAL_EVALTO_FUNCTION_SHOULD_NEVER_BE_CALLED_FOR_DENSE_OBJECTS);
+    }
+
+  protected:
+    /** Default constructor. Do nothing. */
+    DenseBase()
+    {
+      /* Just checks for self-consistency of the flags.
+       * Only do it when debugging Eigen, as this borders on paranoiac and could slow compilation down
+       */
+#ifdef EIGEN_INTERNAL_DEBUGGING
+      EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, int(IsRowMajor))
+                        && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, int(!IsRowMajor))),
+                          INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION)
+#endif
+    }
+
+  private:
+    explicit DenseBase(int);
+    DenseBase(int,int);
+    template<typename OtherDerived> explicit DenseBase(const DenseBase<OtherDerived>&);
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_DENSEBASE_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/DenseCoeffsBase.h b/resources/3rdParty/eigen/Eigen/src/Core/DenseCoeffsBase.h
new file mode 100644
index 000000000..72704c2d7
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/DenseCoeffsBase.h
@@ -0,0 +1,754 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_DENSECOEFFSBASE_H
+#define EIGEN_DENSECOEFFSBASE_H
+
+namespace Eigen {
+
+namespace internal {
+template<typename T> struct add_const_on_value_type_if_arithmetic
+{
+  typedef typename conditional<is_arithmetic<T>::value, T, typename add_const_on_value_type<T>::type>::type type;
+};
+}
+
+/** \brief Base class providing read-only coefficient access to matrices and arrays.
+  * \ingroup Core_Module
+  * \tparam Derived Type of the derived class
+  * \tparam #ReadOnlyAccessors Constant indicating read-only access
+  *
+  * This class defines the \c operator() \c const function and friends, which can be used to read specific
+  * entries of a matrix or array.
+  * 
+  * \sa DenseCoeffsBase<Derived, WriteAccessors>, DenseCoeffsBase<Derived, DirectAccessors>,
+  *     \ref TopicClassHierarchy
+  */
+template<typename Derived>
+class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived>
+{
+  public:
+    typedef typename internal::traits<Derived>::StorageKind StorageKind;
+    typedef typename internal::traits<Derived>::Index Index;
+    typedef typename internal::traits<Derived>::Scalar Scalar;
+    typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+
+    // Explanation for this CoeffReturnType typedef.
+    // - This is the return type of the coeff() method.
+    // - The LvalueBit means exactly that we can offer a coeffRef() method, which means exactly that we can get references
+    // to coeffs, which means exactly that we can have coeff() return a const reference (as opposed to returning a value).
+    // - The is_artihmetic check is required since "const int", "const double", etc. will cause warnings on some systems
+    // while the declaration of "const T", where T is a non arithmetic type does not. Always returning "const Scalar&" is
+    // not possible, since the underlying expressions might not offer a valid address the reference could be referring to.
+    typedef typename internal::conditional<bool(internal::traits<Derived>::Flags&LvalueBit),
+                         const Scalar&,
+                         typename internal::conditional<internal::is_arithmetic<Scalar>::value, Scalar, const Scalar>::type
+                     >::type CoeffReturnType;
+
+    typedef typename internal::add_const_on_value_type_if_arithmetic<
+                         typename internal::packet_traits<Scalar>::type
+                     >::type PacketReturnType;
+
+    typedef EigenBase<Derived> Base;
+    using Base::rows;
+    using Base::cols;
+    using Base::size;
+    using Base::derived;
+
+    EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner) const
+    {
+      return int(Derived::RowsAtCompileTime) == 1 ? 0
+          : int(Derived::ColsAtCompileTime) == 1 ? inner
+          : int(Derived::Flags)&RowMajorBit ? outer
+          : inner;
+    }
+
+    EIGEN_STRONG_INLINE Index colIndexByOuterInner(Index outer, Index inner) const
+    {
+      return int(Derived::ColsAtCompileTime) == 1 ? 0
+          : int(Derived::RowsAtCompileTime) == 1 ? inner
+          : int(Derived::Flags)&RowMajorBit ? inner
+          : outer;
+    }
+
+    /** Short version: don't use this function, use
+      * \link operator()(Index,Index) const \endlink instead.
+      *
+      * Long version: this function is similar to
+      * \link operator()(Index,Index) const \endlink, but without the assertion.
+      * Use this for limiting the performance cost of debugging code when doing
+      * repeated coefficient access. Only use this when it is guaranteed that the
+      * parameters \a row and \a col are in range.
+      *
+      * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
+      * function equivalent to \link operator()(Index,Index) const \endlink.
+      *
+      * \sa operator()(Index,Index) const, coeffRef(Index,Index), coeff(Index) const
+      */
+    EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const
+    {
+      eigen_internal_assert(row >= 0 && row < rows()
+                        && col >= 0 && col < cols());
+      return derived().coeff(row, col);
+    }
+
+    EIGEN_STRONG_INLINE CoeffReturnType coeffByOuterInner(Index outer, Index inner) const
+    {
+      return coeff(rowIndexByOuterInner(outer, inner),
+                   colIndexByOuterInner(outer, inner));
+    }
+
+    /** \returns the coefficient at given the given row and column.
+      *
+      * \sa operator()(Index,Index), operator[](Index)
+      */
+    EIGEN_STRONG_INLINE CoeffReturnType operator()(Index row, Index col) const
+    {
+      eigen_assert(row >= 0 && row < rows()
+          && col >= 0 && col < cols());
+      return derived().coeff(row, col);
+    }
+
+    /** Short version: don't use this function, use
+      * \link operator[](Index) const \endlink instead.
+      *
+      * Long version: this function is similar to
+      * \link operator[](Index) const \endlink, but without the assertion.
+      * Use this for limiting the performance cost of debugging code when doing
+      * repeated coefficient access. Only use this when it is guaranteed that the
+      * parameter \a index is in range.
+      *
+      * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
+      * function equivalent to \link operator[](Index) const \endlink.
+      *
+      * \sa operator[](Index) const, coeffRef(Index), coeff(Index,Index) const
+      */
+
+    EIGEN_STRONG_INLINE CoeffReturnType
+    coeff(Index index) const
+    {
+      eigen_internal_assert(index >= 0 && index < size());
+      return derived().coeff(index);
+    }
+
+
+    /** \returns the coefficient at given index.
+      *
+      * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
+      *
+      * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const,
+      * z() const, w() const
+      */
+
+    EIGEN_STRONG_INLINE CoeffReturnType
+    operator[](Index index) const
+    {
+      #ifndef EIGEN2_SUPPORT
+      EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime,
+                          THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD)
+      #endif
+      eigen_assert(index >= 0 && index < size());
+      return derived().coeff(index);
+    }
+
+    /** \returns the coefficient at given index.
+      *
+      * This is synonymous to operator[](Index) const.
+      *
+      * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
+      *
+      * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const,
+      * z() const, w() const
+      */
+
+    EIGEN_STRONG_INLINE CoeffReturnType
+    operator()(Index index) const
+    {
+      eigen_assert(index >= 0 && index < size());
+      return derived().coeff(index);
+    }
+
+    /** equivalent to operator[](0).  */
+
+    EIGEN_STRONG_INLINE CoeffReturnType
+    x() const { return (*this)[0]; }
+
+    /** equivalent to operator[](1).  */
+
+    EIGEN_STRONG_INLINE CoeffReturnType
+    y() const { return (*this)[1]; }
+
+    /** equivalent to operator[](2).  */
+
+    EIGEN_STRONG_INLINE CoeffReturnType
+    z() const { return (*this)[2]; }
+
+    /** equivalent to operator[](3).  */
+
+    EIGEN_STRONG_INLINE CoeffReturnType
+    w() const { return (*this)[3]; }
+
+    /** \internal
+      * \returns the packet of coefficients starting at the given row and column. It is your responsibility
+      * to ensure that a packet really starts there. This method is only available on expressions having the
+      * PacketAccessBit.
+      *
+      * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select
+      * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
+      * starting at an address which is a multiple of the packet size.
+      */
+
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketReturnType packet(Index row, Index col) const
+    {
+      eigen_internal_assert(row >= 0 && row < rows()
+                      && col >= 0 && col < cols());
+      return derived().template packet<LoadMode>(row,col);
+    }
+
+
+    /** \internal */
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketReturnType packetByOuterInner(Index outer, Index inner) const
+    {
+      return packet<LoadMode>(rowIndexByOuterInner(outer, inner),
+                              colIndexByOuterInner(outer, inner));
+    }
+
+    /** \internal
+      * \returns the packet of coefficients starting at the given index. It is your responsibility
+      * to ensure that a packet really starts there. This method is only available on expressions having the
+      * PacketAccessBit and the LinearAccessBit.
+      *
+      * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select
+      * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
+      * starting at an address which is a multiple of the packet size.
+      */
+
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+    {
+      eigen_internal_assert(index >= 0 && index < size());
+      return derived().template packet<LoadMode>(index);
+    }
+
+  protected:
+    // explanation: DenseBase is doing "using ..." on the methods from DenseCoeffsBase.
+    // But some methods are only available in the DirectAccess case.
+    // So we add dummy methods here with these names, so that "using... " doesn't fail.
+    // It's not private so that the child class DenseBase can access them, and it's not public
+    // either since it's an implementation detail, so has to be protected.
+    void coeffRef();
+    void coeffRefByOuterInner();
+    void writePacket();
+    void writePacketByOuterInner();
+    void copyCoeff();
+    void copyCoeffByOuterInner();
+    void copyPacket();
+    void copyPacketByOuterInner();
+    void stride();
+    void innerStride();
+    void outerStride();
+    void rowStride();
+    void colStride();
+};
+
+/** \brief Base class providing read/write coefficient access to matrices and arrays.
+  * \ingroup Core_Module
+  * \tparam Derived Type of the derived class
+  * \tparam #WriteAccessors Constant indicating read/write access
+  *
+  * This class defines the non-const \c operator() function and friends, which can be used to write specific
+  * entries of a matrix or array. This class inherits DenseCoeffsBase<Derived, ReadOnlyAccessors> which
+  * defines the const variant for reading specific entries.
+  * 
+  * \sa DenseCoeffsBase<Derived, DirectAccessors>, \ref TopicClassHierarchy
+  */
+template<typename Derived>
+class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived, ReadOnlyAccessors>
+{
+  public:
+
+    typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base;
+
+    typedef typename internal::traits<Derived>::StorageKind StorageKind;
+    typedef typename internal::traits<Derived>::Index Index;
+    typedef typename internal::traits<Derived>::Scalar Scalar;
+    typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+
+    using Base::coeff;
+    using Base::rows;
+    using Base::cols;
+    using Base::size;
+    using Base::derived;
+    using Base::rowIndexByOuterInner;
+    using Base::colIndexByOuterInner;
+    using Base::operator[];
+    using Base::operator();
+    using Base::x;
+    using Base::y;
+    using Base::z;
+    using Base::w;
+
+    /** Short version: don't use this function, use
+      * \link operator()(Index,Index) \endlink instead.
+      *
+      * Long version: this function is similar to
+      * \link operator()(Index,Index) \endlink, but without the assertion.
+      * Use this for limiting the performance cost of debugging code when doing
+      * repeated coefficient access. Only use this when it is guaranteed that the
+      * parameters \a row and \a col are in range.
+      *
+      * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
+      * function equivalent to \link operator()(Index,Index) \endlink.
+      *
+      * \sa operator()(Index,Index), coeff(Index, Index) const, coeffRef(Index)
+      */
+    EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col)
+    {
+      eigen_internal_assert(row >= 0 && row < rows()
+                        && col >= 0 && col < cols());
+      return derived().coeffRef(row, col);
+    }
+
+    EIGEN_STRONG_INLINE Scalar&
+    coeffRefByOuterInner(Index outer, Index inner)
+    {
+      return coeffRef(rowIndexByOuterInner(outer, inner),
+                      colIndexByOuterInner(outer, inner));
+    }
+
+    /** \returns a reference to the coefficient at given the given row and column.
+      *
+      * \sa operator[](Index)
+      */
+
+    EIGEN_STRONG_INLINE Scalar&
+    operator()(Index row, Index col)
+    {
+      eigen_assert(row >= 0 && row < rows()
+          && col >= 0 && col < cols());
+      return derived().coeffRef(row, col);
+    }
+
+
+    /** Short version: don't use this function, use
+      * \link operator[](Index) \endlink instead.
+      *
+      * Long version: this function is similar to
+      * \link operator[](Index) \endlink, but without the assertion.
+      * Use this for limiting the performance cost of debugging code when doing
+      * repeated coefficient access. Only use this when it is guaranteed that the
+      * parameters \a row and \a col are in range.
+      *
+      * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
+      * function equivalent to \link operator[](Index) \endlink.
+      *
+      * \sa operator[](Index), coeff(Index) const, coeffRef(Index,Index)
+      */
+
+    EIGEN_STRONG_INLINE Scalar&
+    coeffRef(Index index)
+    {
+      eigen_internal_assert(index >= 0 && index < size());
+      return derived().coeffRef(index);
+    }
+
+    /** \returns a reference to the coefficient at given index.
+      *
+      * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
+      *
+      * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w()
+      */
+
+    EIGEN_STRONG_INLINE Scalar&
+    operator[](Index index)
+    {
+      #ifndef EIGEN2_SUPPORT
+      EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime,
+                          THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD)
+      #endif
+      eigen_assert(index >= 0 && index < size());
+      return derived().coeffRef(index);
+    }
+
+    /** \returns a reference to the coefficient at given index.
+      *
+      * This is synonymous to operator[](Index).
+      *
+      * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
+      *
+      * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w()
+      */
+
+    EIGEN_STRONG_INLINE Scalar&
+    operator()(Index index)
+    {
+      eigen_assert(index >= 0 && index < size());
+      return derived().coeffRef(index);
+    }
+
+    /** equivalent to operator[](0).  */
+
+    EIGEN_STRONG_INLINE Scalar&
+    x() { return (*this)[0]; }
+
+    /** equivalent to operator[](1).  */
+
+    EIGEN_STRONG_INLINE Scalar&
+    y() { return (*this)[1]; }
+
+    /** equivalent to operator[](2).  */
+
+    EIGEN_STRONG_INLINE Scalar&
+    z() { return (*this)[2]; }
+
+    /** equivalent to operator[](3).  */
+
+    EIGEN_STRONG_INLINE Scalar&
+    w() { return (*this)[3]; }
+
+    /** \internal
+      * Stores the given packet of coefficients, at the given row and column of this expression. It is your responsibility
+      * to ensure that a packet really starts there. This method is only available on expressions having the
+      * PacketAccessBit.
+      *
+      * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select
+      * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
+      * starting at an address which is a multiple of the packet size.
+      */
+
+    template<int StoreMode>
+    EIGEN_STRONG_INLINE void writePacket
+    (Index row, Index col, const typename internal::packet_traits<Scalar>::type& x)
+    {
+      eigen_internal_assert(row >= 0 && row < rows()
+                        && col >= 0 && col < cols());
+      derived().template writePacket<StoreMode>(row,col,x);
+    }
+
+
+    /** \internal */
+    template<int StoreMode>
+    EIGEN_STRONG_INLINE void writePacketByOuterInner
+    (Index outer, Index inner, const typename internal::packet_traits<Scalar>::type& x)
+    {
+      writePacket<StoreMode>(rowIndexByOuterInner(outer, inner),
+                            colIndexByOuterInner(outer, inner),
+                            x);
+    }
+
+    /** \internal
+      * Stores the given packet of coefficients, at the given index in this expression. It is your responsibility
+      * to ensure that a packet really starts there. This method is only available on expressions having the
+      * PacketAccessBit and the LinearAccessBit.
+      *
+      * The \a LoadMode parameter may have the value \a Aligned or \a Unaligned. Its effect is to select
+      * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
+      * starting at an address which is a multiple of the packet size.
+      */
+    template<int StoreMode>
+    EIGEN_STRONG_INLINE void writePacket
+    (Index index, const typename internal::packet_traits<Scalar>::type& x)
+    {
+      eigen_internal_assert(index >= 0 && index < size());
+      derived().template writePacket<StoreMode>(index,x);
+    }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+
+    /** \internal Copies the coefficient at position (row,col) of other into *this.
+      *
+      * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
+      * with usual assignments.
+      *
+      * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
+      */
+
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE void copyCoeff(Index row, Index col, const DenseBase<OtherDerived>& other)
+    {
+      eigen_internal_assert(row >= 0 && row < rows()
+                        && col >= 0 && col < cols());
+      derived().coeffRef(row, col) = other.derived().coeff(row, col);
+    }
+
+    /** \internal Copies the coefficient at the given index of other into *this.
+      *
+      * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
+      * with usual assignments.
+      *
+      * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
+      */
+
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE void copyCoeff(Index index, const DenseBase<OtherDerived>& other)
+    {
+      eigen_internal_assert(index >= 0 && index < size());
+      derived().coeffRef(index) = other.derived().coeff(index);
+    }
+
+
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE void copyCoeffByOuterInner(Index outer, Index inner, const DenseBase<OtherDerived>& other)
+    {
+      const Index row = rowIndexByOuterInner(outer,inner);
+      const Index col = colIndexByOuterInner(outer,inner);
+      // derived() is important here: copyCoeff() may be reimplemented in Derived!
+      derived().copyCoeff(row, col, other);
+    }
+
+    /** \internal Copies the packet at position (row,col) of other into *this.
+      *
+      * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
+      * with usual assignments.
+      *
+      * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
+      */
+
+    template<typename OtherDerived, int StoreMode, int LoadMode>
+    EIGEN_STRONG_INLINE void copyPacket(Index row, Index col, const DenseBase<OtherDerived>& other)
+    {
+      eigen_internal_assert(row >= 0 && row < rows()
+                        && col >= 0 && col < cols());
+      derived().template writePacket<StoreMode>(row, col,
+        other.derived().template packet<LoadMode>(row, col));
+    }
+
+    /** \internal Copies the packet at the given index of other into *this.
+      *
+      * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
+      * with usual assignments.
+      *
+      * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
+      */
+
+    template<typename OtherDerived, int StoreMode, int LoadMode>
+    EIGEN_STRONG_INLINE void copyPacket(Index index, const DenseBase<OtherDerived>& other)
+    {
+      eigen_internal_assert(index >= 0 && index < size());
+      derived().template writePacket<StoreMode>(index,
+        other.derived().template packet<LoadMode>(index));
+    }
+
+    /** \internal */
+    template<typename OtherDerived, int StoreMode, int LoadMode>
+    EIGEN_STRONG_INLINE void copyPacketByOuterInner(Index outer, Index inner, const DenseBase<OtherDerived>& other)
+    {
+      const Index row = rowIndexByOuterInner(outer,inner);
+      const Index col = colIndexByOuterInner(outer,inner);
+      // derived() is important here: copyCoeff() may be reimplemented in Derived!
+      derived().template copyPacket< OtherDerived, StoreMode, LoadMode>(row, col, other);
+    }
+#endif
+
+};
+
+/** \brief Base class providing direct read-only coefficient access to matrices and arrays.
+  * \ingroup Core_Module
+  * \tparam Derived Type of the derived class
+  * \tparam #DirectAccessors Constant indicating direct access
+  *
+  * This class defines functions to work with strides which can be used to access entries directly. This class
+  * inherits DenseCoeffsBase<Derived, ReadOnlyAccessors> which defines functions to access entries read-only using
+  * \c operator() .
+  *
+  * \sa \ref TopicClassHierarchy
+  */
+template<typename Derived>
+class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived, ReadOnlyAccessors>
+{
+  public:
+
+    typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base;
+    typedef typename internal::traits<Derived>::Index Index;
+    typedef typename internal::traits<Derived>::Scalar Scalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+
+    using Base::rows;
+    using Base::cols;
+    using Base::size;
+    using Base::derived;
+
+    /** \returns the pointer increment between two consecutive elements within a slice in the inner direction.
+      *
+      * \sa outerStride(), rowStride(), colStride()
+      */
+    inline Index innerStride() const
+    {
+      return derived().innerStride();
+    }
+
+    /** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns
+      *          in a column-major matrix).
+      *
+      * \sa innerStride(), rowStride(), colStride()
+      */
+    inline Index outerStride() const
+    {
+      return derived().outerStride();
+    }
+
+    // FIXME shall we remove it ?
+    inline Index stride() const
+    {
+      return Derived::IsVectorAtCompileTime ? innerStride() : outerStride();
+    }
+
+    /** \returns the pointer increment between two consecutive rows.
+      *
+      * \sa innerStride(), outerStride(), colStride()
+      */
+    inline Index rowStride() const
+    {
+      return Derived::IsRowMajor ? outerStride() : innerStride();
+    }
+
+    /** \returns the pointer increment between two consecutive columns.
+      *
+      * \sa innerStride(), outerStride(), rowStride()
+      */
+    inline Index colStride() const
+    {
+      return Derived::IsRowMajor ? innerStride() : outerStride();
+    }
+};
+
+/** \brief Base class providing direct read/write coefficient access to matrices and arrays.
+  * \ingroup Core_Module
+  * \tparam Derived Type of the derived class
+  * \tparam #DirectWriteAccessors Constant indicating direct access
+  *
+  * This class defines functions to work with strides which can be used to access entries directly. This class
+  * inherits DenseCoeffsBase<Derived, WriteAccessors> which defines functions to access entries read/write using
+  * \c operator().
+  *
+  * \sa \ref TopicClassHierarchy
+  */
+template<typename Derived>
+class DenseCoeffsBase<Derived, DirectWriteAccessors>
+  : public DenseCoeffsBase<Derived, WriteAccessors>
+{
+  public:
+
+    typedef DenseCoeffsBase<Derived, WriteAccessors> Base;
+    typedef typename internal::traits<Derived>::Index Index;
+    typedef typename internal::traits<Derived>::Scalar Scalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+
+    using Base::rows;
+    using Base::cols;
+    using Base::size;
+    using Base::derived;
+
+    /** \returns the pointer increment between two consecutive elements within a slice in the inner direction.
+      *
+      * \sa outerStride(), rowStride(), colStride()
+      */
+    inline Index innerStride() const
+    {
+      return derived().innerStride();
+    }
+
+    /** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns
+      *          in a column-major matrix).
+      *
+      * \sa innerStride(), rowStride(), colStride()
+      */
+    inline Index outerStride() const
+    {
+      return derived().outerStride();
+    }
+
+    // FIXME shall we remove it ?
+    inline Index stride() const
+    {
+      return Derived::IsVectorAtCompileTime ? innerStride() : outerStride();
+    }
+
+    /** \returns the pointer increment between two consecutive rows.
+      *
+      * \sa innerStride(), outerStride(), colStride()
+      */
+    inline Index rowStride() const
+    {
+      return Derived::IsRowMajor ? outerStride() : innerStride();
+    }
+
+    /** \returns the pointer increment between two consecutive columns.
+      *
+      * \sa innerStride(), outerStride(), rowStride()
+      */
+    inline Index colStride() const
+    {
+      return Derived::IsRowMajor ? innerStride() : outerStride();
+    }
+};
+
+namespace internal {
+
+template<typename Derived, bool JustReturnZero>
+struct first_aligned_impl
+{
+  static inline typename Derived::Index run(const Derived&)
+  { return 0; }
+};
+
+template<typename Derived>
+struct first_aligned_impl<Derived, false>
+{
+  static inline typename Derived::Index run(const Derived& m)
+  {
+    return internal::first_aligned(&m.const_cast_derived().coeffRef(0,0), m.size());
+  }
+};
+
+/** \internal \returns the index of the first element of the array that is well aligned for vectorization.
+  *
+  * There is also the variant first_aligned(const Scalar*, Integer) defined in Memory.h. See it for more
+  * documentation.
+  */
+template<typename Derived>
+static inline typename Derived::Index first_aligned(const Derived& m)
+{
+  return first_aligned_impl
+          <Derived, (Derived::Flags & AlignedBit) || !(Derived::Flags & DirectAccessBit)>
+          ::run(m);
+}
+
+template<typename Derived, bool HasDirectAccess = has_direct_access<Derived>::ret>
+struct inner_stride_at_compile_time
+{
+  enum { ret = traits<Derived>::InnerStrideAtCompileTime };
+};
+
+template<typename Derived>
+struct inner_stride_at_compile_time<Derived, false>
+{
+  enum { ret = 0 };
+};
+
+template<typename Derived, bool HasDirectAccess = has_direct_access<Derived>::ret>
+struct outer_stride_at_compile_time
+{
+  enum { ret = traits<Derived>::OuterStrideAtCompileTime };
+};
+
+template<typename Derived>
+struct outer_stride_at_compile_time<Derived, false>
+{
+  enum { ret = 0 };
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_DENSECOEFFSBASE_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/DenseStorage.h b/resources/3rdParty/eigen/Eigen/src/Core/DenseStorage.h
new file mode 100644
index 000000000..1fc2daf2c
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/DenseStorage.h
@@ -0,0 +1,303 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATRIXSTORAGE_H
+#define EIGEN_MATRIXSTORAGE_H
+
+#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+  #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN EIGEN_DENSE_STORAGE_CTOR_PLUGIN;
+#else
+  #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
+#endif
+
+namespace Eigen {
+
+namespace internal {
+
+struct constructor_without_unaligned_array_assert {};
+
+/** \internal
+  * Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned:
+  * to 16 bytes boundary if the total size is a multiple of 16 bytes.
+  */
+template <typename T, int Size, int MatrixOrArrayOptions,
+          int Alignment = (MatrixOrArrayOptions&DontAlign) ? 0
+                        : (((Size*sizeof(T))%16)==0) ? 16
+                        : 0 >
+struct plain_array
+{
+  T array[Size];
+  plain_array() {}
+  plain_array(constructor_without_unaligned_array_assert) {}
+};
+
+#ifdef EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
+  #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask)
+#else
+  #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \
+    eigen_assert((reinterpret_cast<size_t>(array) & sizemask) == 0 \
+              && "this assertion is explained here: " \
+              "http://eigen.tuxfamily.org/dox-devel/TopicUnalignedArrayAssert.html" \
+              " **** READ THIS WEB PAGE !!! ****");
+#endif
+
+template <typename T, int Size, int MatrixOrArrayOptions>
+struct plain_array<T, Size, MatrixOrArrayOptions, 16>
+{
+  EIGEN_USER_ALIGN16 T array[Size];
+  plain_array() { EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(0xf) }
+  plain_array(constructor_without_unaligned_array_assert) {}
+};
+
+template <typename T, int MatrixOrArrayOptions, int Alignment>
+struct plain_array<T, 0, MatrixOrArrayOptions, Alignment>
+{
+  EIGEN_USER_ALIGN16 T array[1];
+  plain_array() {}
+  plain_array(constructor_without_unaligned_array_assert) {}
+};
+
+} // end namespace internal
+
+/** \internal
+  *
+  * \class DenseStorage
+  * \ingroup Core_Module
+  *
+  * \brief Stores the data of a matrix
+  *
+  * This class stores the data of fixed-size, dynamic-size or mixed matrices
+  * in a way as compact as possible.
+  *
+  * \sa Matrix
+  */
+template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseStorage;
+
+// purely fixed-size matrix
+template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseStorage
+{
+    internal::plain_array<T,Size,_Options> m_data;
+  public:
+    inline explicit DenseStorage() {}
+    inline DenseStorage(internal::constructor_without_unaligned_array_assert)
+      : m_data(internal::constructor_without_unaligned_array_assert()) {}
+    inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
+    inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); }
+    static inline DenseIndex rows(void) {return _Rows;}
+    static inline DenseIndex cols(void) {return _Cols;}
+    inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
+    inline void resize(DenseIndex,DenseIndex,DenseIndex) {}
+    inline const T *data() const { return m_data.array; }
+    inline T *data() { return m_data.array; }
+};
+
+// null matrix
+template<typename T, int _Rows, int _Cols, int _Options> class DenseStorage<T, 0, _Rows, _Cols, _Options>
+{
+  public:
+    inline explicit DenseStorage() {}
+    inline DenseStorage(internal::constructor_without_unaligned_array_assert) {}
+    inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
+    inline void swap(DenseStorage& ) {}
+    static inline DenseIndex rows(void) {return _Rows;}
+    static inline DenseIndex cols(void) {return _Cols;}
+    inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
+    inline void resize(DenseIndex,DenseIndex,DenseIndex) {}
+    inline const T *data() const { return 0; }
+    inline T *data() { return 0; }
+};
+
+// more specializations for null matrices; these are necessary to resolve ambiguities
+template<typename T, int _Options> class DenseStorage<T, 0, Dynamic, Dynamic, _Options>
+: public DenseStorage<T, 0, 0, 0, _Options> { };
+
+template<typename T, int _Rows, int _Options> class DenseStorage<T, 0, _Rows, Dynamic, _Options>
+: public DenseStorage<T, 0, 0, 0, _Options> { };
+
+template<typename T, int _Cols, int _Options> class DenseStorage<T, 0, Dynamic, _Cols, _Options>
+: public DenseStorage<T, 0, 0, 0, _Options> { };
+
+// dynamic-size matrix with fixed-size storage
+template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic, Dynamic, _Options>
+{
+    internal::plain_array<T,Size,_Options> m_data;
+    DenseIndex m_rows;
+    DenseIndex m_cols;
+  public:
+    inline explicit DenseStorage() : m_rows(0), m_cols(0) {}
+    inline DenseStorage(internal::constructor_without_unaligned_array_assert)
+      : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {}
+    inline DenseStorage(DenseIndex, DenseIndex rows, DenseIndex cols) : m_rows(rows), m_cols(cols) {}
+    inline void swap(DenseStorage& other)
+    { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
+    inline DenseIndex rows(void) const {return m_rows;}
+    inline DenseIndex cols(void) const {return m_cols;}
+    inline void conservativeResize(DenseIndex, DenseIndex rows, DenseIndex cols) { m_rows = rows; m_cols = cols; }
+    inline void resize(DenseIndex, DenseIndex rows, DenseIndex cols) { m_rows = rows; m_cols = cols; }
+    inline const T *data() const { return m_data.array; }
+    inline T *data() { return m_data.array; }
+};
+
+// dynamic-size matrix with fixed-size storage and fixed width
+template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Size, Dynamic, _Cols, _Options>
+{
+    internal::plain_array<T,Size,_Options> m_data;
+    DenseIndex m_rows;
+  public:
+    inline explicit DenseStorage() : m_rows(0) {}
+    inline DenseStorage(internal::constructor_without_unaligned_array_assert)
+      : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {}
+    inline DenseStorage(DenseIndex, DenseIndex rows, DenseIndex) : m_rows(rows) {}
+    inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
+    inline DenseIndex rows(void) const {return m_rows;}
+    inline DenseIndex cols(void) const {return _Cols;}
+    inline void conservativeResize(DenseIndex, DenseIndex rows, DenseIndex) { m_rows = rows; }
+    inline void resize(DenseIndex, DenseIndex rows, DenseIndex) { m_rows = rows; }
+    inline const T *data() const { return m_data.array; }
+    inline T *data() { return m_data.array; }
+};
+
+// dynamic-size matrix with fixed-size storage and fixed height
+template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Size, _Rows, Dynamic, _Options>
+{
+    internal::plain_array<T,Size,_Options> m_data;
+    DenseIndex m_cols;
+  public:
+    inline explicit DenseStorage() : m_cols(0) {}
+    inline DenseStorage(internal::constructor_without_unaligned_array_assert)
+      : m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {}
+    inline DenseStorage(DenseIndex, DenseIndex, DenseIndex cols) : m_cols(cols) {}
+    inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
+    inline DenseIndex rows(void) const {return _Rows;}
+    inline DenseIndex cols(void) const {return m_cols;}
+    inline void conservativeResize(DenseIndex, DenseIndex, DenseIndex cols) { m_cols = cols; }
+    inline void resize(DenseIndex, DenseIndex, DenseIndex cols) { m_cols = cols; }
+    inline const T *data() const { return m_data.array; }
+    inline T *data() { return m_data.array; }
+};
+
+// purely dynamic matrix.
+template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynamic, _Options>
+{
+    T *m_data;
+    DenseIndex m_rows;
+    DenseIndex m_cols;
+  public:
+    inline explicit DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
+    inline DenseStorage(internal::constructor_without_unaligned_array_assert)
+       : m_data(0), m_rows(0), m_cols(0) {}
+    inline DenseStorage(DenseIndex size, DenseIndex rows, DenseIndex cols)
+      : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(rows), m_cols(cols) 
+    { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
+    inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
+    inline void swap(DenseStorage& other)
+    { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
+    inline DenseIndex rows(void) const {return m_rows;}
+    inline DenseIndex cols(void) const {return m_cols;}
+    inline void conservativeResize(DenseIndex size, DenseIndex rows, DenseIndex cols)
+    {
+      m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*m_cols);
+      m_rows = rows;
+      m_cols = cols;
+    }
+    void resize(DenseIndex size, DenseIndex rows, DenseIndex cols)
+    {
+      if(size != m_rows*m_cols)
+      {
+        internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols);
+        if (size)
+          m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);
+        else
+          m_data = 0;
+        EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
+      }
+      m_rows = rows;
+      m_cols = cols;
+    }
+    inline const T *data() const { return m_data; }
+    inline T *data() { return m_data; }
+};
+
+// matrix with dynamic width and fixed height (so that matrix has dynamic size).
+template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Rows, Dynamic, _Options>
+{
+    T *m_data;
+    DenseIndex m_cols;
+  public:
+    inline explicit DenseStorage() : m_data(0), m_cols(0) {}
+    inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
+    inline DenseStorage(DenseIndex size, DenseIndex, DenseIndex cols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(cols)
+    { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
+    inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
+    inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
+    static inline DenseIndex rows(void) {return _Rows;}
+    inline DenseIndex cols(void) const {return m_cols;}
+    inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex cols)
+    {
+      m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols);
+      m_cols = cols;
+    }
+    EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex, DenseIndex cols)
+    {
+      if(size != _Rows*m_cols)
+      {
+        internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols);
+        if (size)
+          m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);
+        else
+          m_data = 0;
+        EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
+      }
+      m_cols = cols;
+    }
+    inline const T *data() const { return m_data; }
+    inline T *data() { return m_data; }
+};
+
+// matrix with dynamic height and fixed width (so that matrix has dynamic size).
+template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dynamic, _Cols, _Options>
+{
+    T *m_data;
+    DenseIndex m_rows;
+  public:
+    inline explicit DenseStorage() : m_data(0), m_rows(0) {}
+    inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
+    inline DenseStorage(DenseIndex size, DenseIndex rows, DenseIndex) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(rows)
+    { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
+    inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
+    inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
+    inline DenseIndex rows(void) const {return m_rows;}
+    static inline DenseIndex cols(void) {return _Cols;}
+    inline void conservativeResize(DenseIndex size, DenseIndex rows, DenseIndex)
+    {
+      m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols);
+      m_rows = rows;
+    }
+    EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex rows, DenseIndex)
+    {
+      if(size != m_rows*_Cols)
+      {
+        internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows);
+        if (size)
+          m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);
+        else
+          m_data = 0;
+        EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
+      }
+      m_rows = rows;
+    }
+    inline const T *data() const { return m_data; }
+    inline T *data() { return m_data; }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATRIX_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/Diagonal.h b/resources/3rdParty/eigen/Eigen/src/Core/Diagonal.h
new file mode 100644
index 000000000..16261968a
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/Diagonal.h
@@ -0,0 +1,236 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_DIAGONAL_H
+#define EIGEN_DIAGONAL_H
+
+namespace Eigen { 
+
+/** \class Diagonal
+  * \ingroup Core_Module
+  *
+  * \brief Expression of a diagonal/subdiagonal/superdiagonal in a matrix
+  *
+  * \param MatrixType the type of the object in which we are taking a sub/main/super diagonal
+  * \param DiagIndex the index of the sub/super diagonal. The default is 0 and it means the main diagonal.
+  *              A positive value means a superdiagonal, a negative value means a subdiagonal.
+  *              You can also use Dynamic so the index can be set at runtime.
+  *
+  * The matrix is not required to be square.
+  *
+  * This class represents an expression of the main diagonal, or any sub/super diagonal
+  * of a square matrix. It is the return type of MatrixBase::diagonal() and MatrixBase::diagonal(Index) and most of the
+  * time this is the only way it is used.
+  *
+  * \sa MatrixBase::diagonal(), MatrixBase::diagonal(Index)
+  */
+
+namespace internal {
+template<typename MatrixType, int DiagIndex>
+struct traits<Diagonal<MatrixType,DiagIndex> >
+ : traits<MatrixType>
+{
+  typedef typename nested<MatrixType>::type MatrixTypeNested;
+  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
+  typedef typename MatrixType::StorageKind StorageKind;
+  enum {
+    RowsAtCompileTime = (int(DiagIndex) == Dynamic || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic
+    : (EIGEN_PLAIN_ENUM_MIN(MatrixType::RowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0),
+                            MatrixType::ColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))),
+    ColsAtCompileTime = 1,
+    MaxRowsAtCompileTime = int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic
+                         : DiagIndex == Dynamic ? EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::MaxRowsAtCompileTime,
+                                                                              MatrixType::MaxColsAtCompileTime)
+                         : (EIGEN_PLAIN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0),
+                                                 MatrixType::MaxColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))),
+    MaxColsAtCompileTime = 1,
+    MaskLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,
+    Flags = (unsigned int)_MatrixTypeNested::Flags & (HereditaryBits | LinearAccessBit | MaskLvalueBit | DirectAccessBit) & ~RowMajorBit,
+    CoeffReadCost = _MatrixTypeNested::CoeffReadCost,
+    MatrixTypeOuterStride = outer_stride_at_compile_time<MatrixType>::ret,
+    InnerStrideAtCompileTime = MatrixTypeOuterStride == Dynamic ? Dynamic : MatrixTypeOuterStride+1,
+    OuterStrideAtCompileTime = 0
+  };
+};
+}
+
+template<typename MatrixType, int DiagIndex> class Diagonal
+   : public internal::dense_xpr_base< Diagonal<MatrixType,DiagIndex> >::type
+{
+  public:
+
+    typedef typename internal::dense_xpr_base<Diagonal>::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal)
+
+    inline Diagonal(MatrixType& matrix, Index index = DiagIndex) : m_matrix(matrix), m_index(index) {}
+
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal)
+
+    inline Index rows() const
+    { return m_index.value()<0 ? (std::min)(m_matrix.cols(),m_matrix.rows()+m_index.value()) : (std::min)(m_matrix.rows(),m_matrix.cols()-m_index.value()); }
+
+    inline Index cols() const { return 1; }
+
+    inline Index innerStride() const
+    {
+      return m_matrix.outerStride() + 1;
+    }
+
+    inline Index outerStride() const
+    {
+      return 0;
+    }
+
+    typedef typename internal::conditional<
+                       internal::is_lvalue<MatrixType>::value,
+                       Scalar,
+                       const Scalar
+                     >::type ScalarWithConstIfNotLvalue;
+
+    inline ScalarWithConstIfNotLvalue* data() { return &(m_matrix.const_cast_derived().coeffRef(rowOffset(), colOffset())); }
+    inline const Scalar* data() const { return &(m_matrix.const_cast_derived().coeffRef(rowOffset(), colOffset())); }
+
+    inline Scalar& coeffRef(Index row, Index)
+    {
+      EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
+      return m_matrix.const_cast_derived().coeffRef(row+rowOffset(), row+colOffset());
+    }
+
+    inline const Scalar& coeffRef(Index row, Index) const
+    {
+      return m_matrix.const_cast_derived().coeffRef(row+rowOffset(), row+colOffset());
+    }
+
+    inline CoeffReturnType coeff(Index row, Index) const
+    {
+      return m_matrix.coeff(row+rowOffset(), row+colOffset());
+    }
+
+    inline Scalar& coeffRef(Index index)
+    {
+      EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
+      return m_matrix.const_cast_derived().coeffRef(index+rowOffset(), index+colOffset());
+    }
+
+    inline const Scalar& coeffRef(Index index) const
+    {
+      return m_matrix.const_cast_derived().coeffRef(index+rowOffset(), index+colOffset());
+    }
+
+    inline CoeffReturnType coeff(Index index) const
+    {
+      return m_matrix.coeff(index+rowOffset(), index+colOffset());
+    }
+
+    const typename internal::remove_all<typename MatrixType::Nested>::type& 
+    nestedExpression() const 
+    {
+      return m_matrix;
+    }
+
+    int index() const
+    {
+      return m_index.value();
+    }
+
+  protected:
+    typename MatrixType::Nested m_matrix;
+    const internal::variable_if_dynamic<Index, DiagIndex> m_index;
+
+  private:
+    // some compilers may fail to optimize std::max etc in case of compile-time constants...
+    EIGEN_STRONG_INLINE Index absDiagIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); }
+    EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); }
+    EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value()>0 ? m_index.value() : 0; }
+    // triger a compile time error is someone try to call packet
+    template<int LoadMode> typename MatrixType::PacketReturnType packet(Index) const;
+    template<int LoadMode> typename MatrixType::PacketReturnType packet(Index,Index) const;
+};
+
+/** \returns an expression of the main diagonal of the matrix \c *this
+  *
+  * \c *this is not required to be square.
+  *
+  * Example: \include MatrixBase_diagonal.cpp
+  * Output: \verbinclude MatrixBase_diagonal.out
+  *
+  * \sa class Diagonal */
+template<typename Derived>
+inline typename MatrixBase<Derived>::DiagonalReturnType
+MatrixBase<Derived>::diagonal()
+{
+  return derived();
+}
+
+/** This is the const version of diagonal(). */
+template<typename Derived>
+inline const typename MatrixBase<Derived>::ConstDiagonalReturnType
+MatrixBase<Derived>::diagonal() const
+{
+  return ConstDiagonalReturnType(derived());
+}
+
+/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
+  *
+  * \c *this is not required to be square.
+  *
+  * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0
+  * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal.
+  *
+  * Example: \include MatrixBase_diagonal_int.cpp
+  * Output: \verbinclude MatrixBase_diagonal_int.out
+  *
+  * \sa MatrixBase::diagonal(), class Diagonal */
+template<typename Derived>
+inline typename MatrixBase<Derived>::template DiagonalIndexReturnType<Dynamic>::Type
+MatrixBase<Derived>::diagonal(Index index)
+{
+  return typename DiagonalIndexReturnType<Dynamic>::Type(derived(), index);
+}
+
+/** This is the const version of diagonal(Index). */
+template<typename Derived>
+inline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<Dynamic>::Type
+MatrixBase<Derived>::diagonal(Index index) const
+{
+  return typename ConstDiagonalIndexReturnType<Dynamic>::Type(derived(), index);
+}
+
+/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
+  *
+  * \c *this is not required to be square.
+  *
+  * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0
+  * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal.
+  *
+  * Example: \include MatrixBase_diagonal_template_int.cpp
+  * Output: \verbinclude MatrixBase_diagonal_template_int.out
+  *
+  * \sa MatrixBase::diagonal(), class Diagonal */
+template<typename Derived>
+template<int Index>
+inline typename MatrixBase<Derived>::template DiagonalIndexReturnType<Index>::Type
+MatrixBase<Derived>::diagonal()
+{
+  return derived();
+}
+
+/** This is the const version of diagonal<int>(). */
+template<typename Derived>
+template<int Index>
+inline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<Index>::Type
+MatrixBase<Derived>::diagonal() const
+{
+  return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_DIAGONAL_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/DiagonalMatrix.h b/resources/3rdParty/eigen/Eigen/src/Core/DiagonalMatrix.h
new file mode 100644
index 000000000..6e8b50fab
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/DiagonalMatrix.h
@@ -0,0 +1,307 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_DIAGONALMATRIX_H
+#define EIGEN_DIAGONALMATRIX_H
+
+namespace Eigen { 
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+template<typename Derived>
+class DiagonalBase : public EigenBase<Derived>
+{
+  public:
+    typedef typename internal::traits<Derived>::DiagonalVectorType DiagonalVectorType;
+    typedef typename DiagonalVectorType::Scalar Scalar;
+    typedef typename DiagonalVectorType::RealScalar RealScalar;
+    typedef typename internal::traits<Derived>::StorageKind StorageKind;
+    typedef typename internal::traits<Derived>::Index Index;
+
+    enum {
+      RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
+      ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
+      MaxRowsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,
+      MaxColsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,
+      IsVectorAtCompileTime = 0,
+      Flags = 0
+    };
+
+    typedef Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime, 0, MaxRowsAtCompileTime, MaxColsAtCompileTime> DenseMatrixType;
+    typedef DenseMatrixType DenseType;
+    typedef DiagonalMatrix<Scalar,DiagonalVectorType::SizeAtCompileTime,DiagonalVectorType::MaxSizeAtCompileTime> PlainObject;
+
+    inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
+    inline Derived& derived() { return *static_cast<Derived*>(this); }
+
+    DenseMatrixType toDenseMatrix() const { return derived(); }
+    template<typename DenseDerived>
+    void evalTo(MatrixBase<DenseDerived> &other) const;
+    template<typename DenseDerived>
+    void addTo(MatrixBase<DenseDerived> &other) const
+    { other.diagonal() += diagonal(); }
+    template<typename DenseDerived>
+    void subTo(MatrixBase<DenseDerived> &other) const
+    { other.diagonal() -= diagonal(); }
+
+    inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); }
+    inline DiagonalVectorType& diagonal() { return derived().diagonal(); }
+
+    inline Index rows() const { return diagonal().size(); }
+    inline Index cols() const { return diagonal().size(); }
+
+    template<typename MatrixDerived>
+    const DiagonalProduct<MatrixDerived, Derived, OnTheLeft>
+    operator*(const MatrixBase<MatrixDerived> &matrix) const;
+
+    inline const DiagonalWrapper<const CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const DiagonalVectorType> >
+    inverse() const
+    {
+      return diagonal().cwiseInverse();
+    }
+    
+    inline const DiagonalWrapper<const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DiagonalVectorType> >
+    operator*(const Scalar& scalar) const
+    {
+      return diagonal() * scalar;
+    }
+    friend inline const DiagonalWrapper<const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DiagonalVectorType> >
+    operator*(const Scalar& scalar, const DiagonalBase& other)
+    {
+      return other.diagonal() * scalar;
+    }
+    
+    #ifdef EIGEN2_SUPPORT
+    template<typename OtherDerived>
+    bool isApprox(const DiagonalBase<OtherDerived>& other, typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision()) const
+    {
+      return diagonal().isApprox(other.diagonal(), precision);
+    }
+    template<typename OtherDerived>
+    bool isApprox(const MatrixBase<OtherDerived>& other, typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision()) const
+    {
+      return toDenseMatrix().isApprox(other, precision);
+    }
+    #endif
+};
+
+template<typename Derived>
+template<typename DenseDerived>
+void DiagonalBase<Derived>::evalTo(MatrixBase<DenseDerived> &other) const
+{
+  other.setZero();
+  other.diagonal() = diagonal();
+}
+#endif
+
+/** \class DiagonalMatrix
+  * \ingroup Core_Module
+  *
+  * \brief Represents a diagonal matrix with its storage
+  *
+  * \param _Scalar the type of coefficients
+  * \param SizeAtCompileTime the dimension of the matrix, or Dynamic
+  * \param MaxSizeAtCompileTime the dimension of the matrix, or Dynamic. This parameter is optional and defaults
+  *        to SizeAtCompileTime. Most of the time, you do not need to specify it.
+  *
+  * \sa class DiagonalWrapper
+  */
+
+namespace internal {
+template<typename _Scalar, int SizeAtCompileTime, int MaxSizeAtCompileTime>
+struct traits<DiagonalMatrix<_Scalar,SizeAtCompileTime,MaxSizeAtCompileTime> >
+ : traits<Matrix<_Scalar,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >
+{
+  typedef Matrix<_Scalar,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1> DiagonalVectorType;
+  typedef Dense StorageKind;
+  typedef DenseIndex Index;
+  enum {
+    Flags = LvalueBit
+  };
+};
+}
+template<typename _Scalar, int SizeAtCompileTime, int MaxSizeAtCompileTime>
+class DiagonalMatrix
+  : public DiagonalBase<DiagonalMatrix<_Scalar,SizeAtCompileTime,MaxSizeAtCompileTime> >
+{
+  public:
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    typedef typename internal::traits<DiagonalMatrix>::DiagonalVectorType DiagonalVectorType;
+    typedef const DiagonalMatrix& Nested;
+    typedef _Scalar Scalar;
+    typedef typename internal::traits<DiagonalMatrix>::StorageKind StorageKind;
+    typedef typename internal::traits<DiagonalMatrix>::Index Index;
+    #endif
+
+  protected:
+
+    DiagonalVectorType m_diagonal;
+
+  public:
+
+    /** const version of diagonal(). */
+    inline const DiagonalVectorType& diagonal() const { return m_diagonal; }
+    /** \returns a reference to the stored vector of diagonal coefficients. */
+    inline DiagonalVectorType& diagonal() { return m_diagonal; }
+
+    /** Default constructor without initialization */
+    inline DiagonalMatrix() {}
+
+    /** Constructs a diagonal matrix with given dimension  */
+    inline DiagonalMatrix(Index dim) : m_diagonal(dim) {}
+
+    /** 2D constructor. */
+    inline DiagonalMatrix(const Scalar& x, const Scalar& y) : m_diagonal(x,y) {}
+
+    /** 3D constructor. */
+    inline DiagonalMatrix(const Scalar& x, const Scalar& y, const Scalar& z) : m_diagonal(x,y,z) {}
+
+    /** Copy constructor. */
+    template<typename OtherDerived>
+    inline DiagonalMatrix(const DiagonalBase<OtherDerived>& other) : m_diagonal(other.diagonal()) {}
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** copy constructor. prevent a default copy constructor from hiding the other templated constructor */
+    inline DiagonalMatrix(const DiagonalMatrix& other) : m_diagonal(other.diagonal()) {}
+    #endif
+
+    /** generic constructor from expression of the diagonal coefficients */
+    template<typename OtherDerived>
+    explicit inline DiagonalMatrix(const MatrixBase<OtherDerived>& other) : m_diagonal(other)
+    {}
+
+    /** Copy operator. */
+    template<typename OtherDerived>
+    DiagonalMatrix& operator=(const DiagonalBase<OtherDerived>& other)
+    {
+      m_diagonal = other.diagonal();
+      return *this;
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** This is a special case of the templated operator=. Its purpose is to
+      * prevent a default operator= from hiding the templated operator=.
+      */
+    DiagonalMatrix& operator=(const DiagonalMatrix& other)
+    {
+      m_diagonal = other.diagonal();
+      return *this;
+    }
+    #endif
+
+    /** Resizes to given size. */
+    inline void resize(Index size) { m_diagonal.resize(size); }
+    /** Sets all coefficients to zero. */
+    inline void setZero() { m_diagonal.setZero(); }
+    /** Resizes and sets all coefficients to zero. */
+    inline void setZero(Index size) { m_diagonal.setZero(size); }
+    /** Sets this matrix to be the identity matrix of the current size. */
+    inline void setIdentity() { m_diagonal.setOnes(); }
+    /** Sets this matrix to be the identity matrix of the given size. */
+    inline void setIdentity(Index size) { m_diagonal.setOnes(size); }
+};
+
+/** \class DiagonalWrapper
+  * \ingroup Core_Module
+  *
+  * \brief Expression of a diagonal matrix
+  *
+  * \param _DiagonalVectorType the type of the vector of diagonal coefficients
+  *
+  * This class is an expression of a diagonal matrix, but not storing its own vector of diagonal coefficients,
+  * instead wrapping an existing vector expression. It is the return type of MatrixBase::asDiagonal()
+  * and most of the time this is the only way that it is used.
+  *
+  * \sa class DiagonalMatrix, class DiagonalBase, MatrixBase::asDiagonal()
+  */
+
+namespace internal {
+template<typename _DiagonalVectorType>
+struct traits<DiagonalWrapper<_DiagonalVectorType> >
+{
+  typedef _DiagonalVectorType DiagonalVectorType;
+  typedef typename DiagonalVectorType::Scalar Scalar;
+  typedef typename DiagonalVectorType::Index Index;
+  typedef typename DiagonalVectorType::StorageKind StorageKind;
+  enum {
+    RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
+    ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
+    MaxRowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
+    MaxColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
+    Flags =  traits<DiagonalVectorType>::Flags & LvalueBit
+  };
+};
+}
+
+template<typename _DiagonalVectorType>
+class DiagonalWrapper
+  : public DiagonalBase<DiagonalWrapper<_DiagonalVectorType> >, internal::no_assignment_operator
+{
+  public:
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    typedef _DiagonalVectorType DiagonalVectorType;
+    typedef DiagonalWrapper Nested;
+    #endif
+
+    /** Constructor from expression of diagonal coefficients to wrap. */
+    inline DiagonalWrapper(DiagonalVectorType& diagonal) : m_diagonal(diagonal) {}
+
+    /** \returns a const reference to the wrapped expression of diagonal coefficients. */
+    const DiagonalVectorType& diagonal() const { return m_diagonal; }
+
+  protected:
+    typename DiagonalVectorType::Nested m_diagonal;
+};
+
+/** \returns a pseudo-expression of a diagonal matrix with *this as vector of diagonal coefficients
+  *
+  * \only_for_vectors
+  *
+  * Example: \include MatrixBase_asDiagonal.cpp
+  * Output: \verbinclude MatrixBase_asDiagonal.out
+  *
+  * \sa class DiagonalWrapper, class DiagonalMatrix, diagonal(), isDiagonal()
+  **/
+template<typename Derived>
+inline const DiagonalWrapper<const Derived>
+MatrixBase<Derived>::asDiagonal() const
+{
+  return derived();
+}
+
+/** \returns true if *this is approximately equal to a diagonal matrix,
+  *          within the precision given by \a prec.
+  *
+  * Example: \include MatrixBase_isDiagonal.cpp
+  * Output: \verbinclude MatrixBase_isDiagonal.out
+  *
+  * \sa asDiagonal()
+  */
+template<typename Derived>
+bool MatrixBase<Derived>::isDiagonal(RealScalar prec) const
+{
+  if(cols() != rows()) return false;
+  RealScalar maxAbsOnDiagonal = static_cast<RealScalar>(-1);
+  for(Index j = 0; j < cols(); ++j)
+  {
+    RealScalar absOnDiagonal = internal::abs(coeff(j,j));
+    if(absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal;
+  }
+  for(Index j = 0; j < cols(); ++j)
+    for(Index i = 0; i < j; ++i)
+    {
+      if(!internal::isMuchSmallerThan(coeff(i, j), maxAbsOnDiagonal, prec)) return false;
+      if(!internal::isMuchSmallerThan(coeff(j, i), maxAbsOnDiagonal, prec)) return false;
+    }
+  return true;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_DIAGONALMATRIX_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/DiagonalProduct.h b/resources/3rdParty/eigen/Eigen/src/Core/DiagonalProduct.h
new file mode 100644
index 000000000..598c6b3e1
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/DiagonalProduct.h
@@ -0,0 +1,123 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_DIAGONALPRODUCT_H
+#define EIGEN_DIAGONALPRODUCT_H
+
+namespace Eigen { 
+
+namespace internal {
+template<typename MatrixType, typename DiagonalType, int ProductOrder>
+struct traits<DiagonalProduct<MatrixType, DiagonalType, ProductOrder> >
+ : traits<MatrixType>
+{
+  typedef typename scalar_product_traits<typename MatrixType::Scalar, typename DiagonalType::Scalar>::ReturnType Scalar;
+  enum {
+    RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+    ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+    MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+
+    _StorageOrder = MatrixType::Flags & RowMajorBit ? RowMajor : ColMajor,
+    _PacketOnDiag = !((int(_StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft)
+                    ||(int(_StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)),
+    _SameTypes = is_same<typename MatrixType::Scalar, typename DiagonalType::Scalar>::value,
+    // FIXME currently we need same types, but in the future the next rule should be the one
+    //_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && ((!_PacketOnDiag) || (_SameTypes && bool(int(DiagonalType::Flags)&PacketAccessBit))),
+    _Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && ((!_PacketOnDiag) || (bool(int(DiagonalType::Flags)&PacketAccessBit))),
+
+    Flags = (HereditaryBits & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0),
+    CoeffReadCost = NumTraits<Scalar>::MulCost + MatrixType::CoeffReadCost + DiagonalType::DiagonalVectorType::CoeffReadCost
+  };
+};
+}
+
+template<typename MatrixType, typename DiagonalType, int ProductOrder>
+class DiagonalProduct : internal::no_assignment_operator,
+                        public MatrixBase<DiagonalProduct<MatrixType, DiagonalType, ProductOrder> >
+{
+  public:
+
+    typedef MatrixBase<DiagonalProduct> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(DiagonalProduct)
+
+    inline DiagonalProduct(const MatrixType& matrix, const DiagonalType& diagonal)
+      : m_matrix(matrix), m_diagonal(diagonal)
+    {
+      eigen_assert(diagonal.diagonal().size() == (ProductOrder == OnTheLeft ? matrix.rows() : matrix.cols()));
+    }
+
+    inline Index rows() const { return m_matrix.rows(); }
+    inline Index cols() const { return m_matrix.cols(); }
+
+    const Scalar coeff(Index row, Index col) const
+    {
+      return m_diagonal.diagonal().coeff(ProductOrder == OnTheLeft ? row : col) * m_matrix.coeff(row, col);
+    }
+
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
+    {
+      enum {
+        StorageOrder = Flags & RowMajorBit ? RowMajor : ColMajor
+      };
+      const Index indexInDiagonalVector = ProductOrder == OnTheLeft ? row : col;
+
+      return packet_impl<LoadMode>(row,col,indexInDiagonalVector,typename internal::conditional<
+        ((int(StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft)
+       ||(int(StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)), internal::true_type, internal::false_type>::type());
+    }
+
+  protected:
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet_impl(Index row, Index col, Index id, internal::true_type) const
+    {
+      return internal::pmul(m_matrix.template packet<LoadMode>(row, col),
+                     internal::pset1<PacketScalar>(m_diagonal.diagonal().coeff(id)));
+    }
+
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet_impl(Index row, Index col, Index id, internal::false_type) const
+    {
+      enum {
+        InnerSize = (MatrixType::Flags & RowMajorBit) ? MatrixType::ColsAtCompileTime : MatrixType::RowsAtCompileTime,
+        DiagonalVectorPacketLoadMode = (LoadMode == Aligned && ((InnerSize%16) == 0)) ? Aligned : Unaligned
+      };
+      return internal::pmul(m_matrix.template packet<LoadMode>(row, col),
+                     m_diagonal.diagonal().template packet<DiagonalVectorPacketLoadMode>(id));
+    }
+
+    typename MatrixType::Nested m_matrix;
+    typename DiagonalType::Nested m_diagonal;
+};
+
+/** \returns the diagonal matrix product of \c *this by the diagonal matrix \a diagonal.
+  */
+template<typename Derived>
+template<typename DiagonalDerived>
+inline const DiagonalProduct<Derived, DiagonalDerived, OnTheRight>
+MatrixBase<Derived>::operator*(const DiagonalBase<DiagonalDerived> &diagonal) const
+{
+  return DiagonalProduct<Derived, DiagonalDerived, OnTheRight>(derived(), diagonal.derived());
+}
+
+/** \returns the diagonal matrix product of \c *this by the matrix \a matrix.
+  */
+template<typename DiagonalDerived>
+template<typename MatrixDerived>
+inline const DiagonalProduct<MatrixDerived, DiagonalDerived, OnTheLeft>
+DiagonalBase<DiagonalDerived>::operator*(const MatrixBase<MatrixDerived> &matrix) const
+{
+  return DiagonalProduct<MatrixDerived, DiagonalDerived, OnTheLeft>(matrix.derived(), derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_DIAGONALPRODUCT_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/Dot.h b/resources/3rdParty/eigen/Eigen/src/Core/Dot.h
new file mode 100644
index 000000000..ae9274e36
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/Dot.h
@@ -0,0 +1,261 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2008, 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_DOT_H
+#define EIGEN_DOT_H
+
+namespace Eigen { 
+
+namespace internal {
+
+// helper function for dot(). The problem is that if we put that in the body of dot(), then upon calling dot
+// with mismatched types, the compiler emits errors about failing to instantiate cwiseProduct BEFORE
+// looking at the static assertions. Thus this is a trick to get better compile errors.
+template<typename T, typename U,
+// the NeedToTranspose condition here is taken straight from Assign.h
+         bool NeedToTranspose = T::IsVectorAtCompileTime
+                && U::IsVectorAtCompileTime
+                && ((int(T::RowsAtCompileTime) == 1 && int(U::ColsAtCompileTime) == 1)
+                      |  // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
+                         // revert to || as soon as not needed anymore.
+                    (int(T::ColsAtCompileTime) == 1 && int(U::RowsAtCompileTime) == 1))
+>
+struct dot_nocheck
+{
+  typedef typename scalar_product_traits<typename traits<T>::Scalar,typename traits<U>::Scalar>::ReturnType ResScalar;
+  static inline ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)
+  {
+    return a.template binaryExpr<scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> >(b).sum();
+  }
+};
+
+template<typename T, typename U>
+struct dot_nocheck<T, U, true>
+{
+  typedef typename scalar_product_traits<typename traits<T>::Scalar,typename traits<U>::Scalar>::ReturnType ResScalar;
+  static inline ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)
+  {
+    return a.transpose().template binaryExpr<scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> >(b).sum();
+  }
+};
+
+} // end namespace internal
+
+/** \returns the dot product of *this with other.
+  *
+  * \only_for_vectors
+  *
+  * \note If the scalar type is complex numbers, then this function returns the hermitian
+  * (sesquilinear) dot product, conjugate-linear in the first variable and linear in the
+  * second variable.
+  *
+  * \sa squaredNorm(), norm()
+  */
+template<typename Derived>
+template<typename OtherDerived>
+typename internal::scalar_product_traits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType
+MatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+  EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
+  typedef internal::scalar_conj_product_op<Scalar,typename OtherDerived::Scalar> func;
+  EIGEN_CHECK_BINARY_COMPATIBILIY(func,Scalar,typename OtherDerived::Scalar);
+
+  eigen_assert(size() == other.size());
+
+  return internal::dot_nocheck<Derived,OtherDerived>::run(*this, other);
+}
+
+#ifdef EIGEN2_SUPPORT
+/** \returns the dot product of *this with other, with the Eigen2 convention that the dot product is linear in the first variable
+  * (conjugating the second variable). Of course this only makes a difference in the complex case.
+  *
+  * This method is only available in EIGEN2_SUPPORT mode.
+  *
+  * \only_for_vectors
+  *
+  * \sa dot()
+  */
+template<typename Derived>
+template<typename OtherDerived>
+typename internal::traits<Derived>::Scalar
+MatrixBase<Derived>::eigen2_dot(const MatrixBase<OtherDerived>& other) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+  EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
+  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+    YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+  eigen_assert(size() == other.size());
+
+  return internal::dot_nocheck<OtherDerived,Derived>::run(other,*this);
+}
+#endif
+
+
+//---------- implementation of L2 norm and related functions ----------
+
+/** \returns, for vectors, the squared \em l2 norm of \c *this, and for matrices the Frobenius norm.
+  * In both cases, it consists in the sum of the square of all the matrix entries.
+  * For vectors, this is also equals to the dot product of \c *this with itself.
+  *
+  * \sa dot(), norm()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::squaredNorm() const
+{
+  return internal::real((*this).cwiseAbs2().sum());
+}
+
+/** \returns, for vectors, the \em l2 norm of \c *this, and for matrices the Frobenius norm.
+  * In both cases, it consists in the square root of the sum of the square of all the matrix entries.
+  * For vectors, this is also equals to the square root of the dot product of \c *this with itself.
+  *
+  * \sa dot(), squaredNorm()
+  */
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::norm() const
+{
+  return internal::sqrt(squaredNorm());
+}
+
+/** \returns an expression of the quotient of *this by its own norm.
+  *
+  * \only_for_vectors
+  *
+  * \sa norm(), normalize()
+  */
+template<typename Derived>
+inline const typename MatrixBase<Derived>::PlainObject
+MatrixBase<Derived>::normalized() const
+{
+  typedef typename internal::nested<Derived>::type Nested;
+  typedef typename internal::remove_reference<Nested>::type _Nested;
+  _Nested n(derived());
+  return n / n.norm();
+}
+
+/** Normalizes the vector, i.e. divides it by its own norm.
+  *
+  * \only_for_vectors
+  *
+  * \sa norm(), normalized()
+  */
+template<typename Derived>
+inline void MatrixBase<Derived>::normalize()
+{
+  *this /= norm();
+}
+
+//---------- implementation of other norms ----------
+
+namespace internal {
+
+template<typename Derived, int p>
+struct lpNorm_selector
+{
+  typedef typename NumTraits<typename traits<Derived>::Scalar>::Real RealScalar;
+  static inline RealScalar run(const MatrixBase<Derived>& m)
+  {
+    return pow(m.cwiseAbs().array().pow(p).sum(), RealScalar(1)/p);
+  }
+};
+
+template<typename Derived>
+struct lpNorm_selector<Derived, 1>
+{
+  static inline typename NumTraits<typename traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m)
+  {
+    return m.cwiseAbs().sum();
+  }
+};
+
+template<typename Derived>
+struct lpNorm_selector<Derived, 2>
+{
+  static inline typename NumTraits<typename traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m)
+  {
+    return m.norm();
+  }
+};
+
+template<typename Derived>
+struct lpNorm_selector<Derived, Infinity>
+{
+  static inline typename NumTraits<typename traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m)
+  {
+    return m.cwiseAbs().maxCoeff();
+  }
+};
+
+} // end namespace internal
+
+/** \returns the \f$ \ell^p \f$ norm of *this, that is, returns the p-th root of the sum of the p-th powers of the absolute values
+  *          of the coefficients of *this. If \a p is the special value \a Eigen::Infinity, this function returns the \f$ \ell^\infty \f$
+  *          norm, that is the maximum of the absolute values of the coefficients of *this.
+  *
+  * \sa norm()
+  */
+template<typename Derived>
+template<int p>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+MatrixBase<Derived>::lpNorm() const
+{
+  return internal::lpNorm_selector<Derived, p>::run(*this);
+}
+
+//---------- implementation of isOrthogonal / isUnitary ----------
+
+/** \returns true if *this is approximately orthogonal to \a other,
+  *          within the precision given by \a prec.
+  *
+  * Example: \include MatrixBase_isOrthogonal.cpp
+  * Output: \verbinclude MatrixBase_isOrthogonal.out
+  */
+template<typename Derived>
+template<typename OtherDerived>
+bool MatrixBase<Derived>::isOrthogonal
+(const MatrixBase<OtherDerived>& other, RealScalar prec) const
+{
+  typename internal::nested<Derived,2>::type nested(derived());
+  typename internal::nested<OtherDerived,2>::type otherNested(other.derived());
+  return internal::abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm();
+}
+
+/** \returns true if *this is approximately an unitary matrix,
+  *          within the precision given by \a prec. In the case where the \a Scalar
+  *          type is real numbers, a unitary matrix is an orthogonal matrix, whence the name.
+  *
+  * \note This can be used to check whether a family of vectors forms an orthonormal basis.
+  *       Indeed, \c m.isUnitary() returns true if and only if the columns (equivalently, the rows) of m form an
+  *       orthonormal basis.
+  *
+  * Example: \include MatrixBase_isUnitary.cpp
+  * Output: \verbinclude MatrixBase_isUnitary.out
+  */
+template<typename Derived>
+bool MatrixBase<Derived>::isUnitary(RealScalar prec) const
+{
+  typename Derived::Nested nested(derived());
+  for(Index i = 0; i < cols(); ++i)
+  {
+    if(!internal::isApprox(nested.col(i).squaredNorm(), static_cast<RealScalar>(1), prec))
+      return false;
+    for(Index j = 0; j < i; ++j)
+      if(!internal::isMuchSmallerThan(nested.col(i).dot(nested.col(j)), static_cast<Scalar>(1), prec))
+        return false;
+  }
+  return true;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_DOT_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/EigenBase.h b/resources/3rdParty/eigen/Eigen/src/Core/EigenBase.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/EigenBase.h
rename to resources/3rdParty/eigen/Eigen/src/Core/EigenBase.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Flagged.h b/resources/3rdParty/eigen/Eigen/src/Core/Flagged.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/Flagged.h
rename to resources/3rdParty/eigen/Eigen/src/Core/Flagged.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/ForceAlignedAccess.h b/resources/3rdParty/eigen/Eigen/src/Core/ForceAlignedAccess.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/ForceAlignedAccess.h
rename to resources/3rdParty/eigen/Eigen/src/Core/ForceAlignedAccess.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/Functors.h b/resources/3rdParty/eigen/Eigen/src/Core/Functors.h
new file mode 100644
index 000000000..2f46abfdd
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/Functors.h
@@ -0,0 +1,967 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_FUNCTORS_H
+#define EIGEN_FUNCTORS_H
+
+namespace Eigen {
+
+namespace internal {
+
+// associative functors:
+
+/** \internal
+  * \brief Template functor to compute the sum of two scalars
+  *
+  * \sa class CwiseBinaryOp, MatrixBase::operator+, class VectorwiseOp, MatrixBase::sum()
+  */
+template<typename Scalar> struct scalar_sum_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_sum_op)
+  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a + b; }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+  { return internal::padd(a,b); }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Scalar predux(const Packet& a) const
+  { return internal::predux(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_sum_op<Scalar> > {
+  enum {
+    Cost = NumTraits<Scalar>::AddCost,
+    PacketAccess = packet_traits<Scalar>::HasAdd
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the product of two scalars
+  *
+  * \sa class CwiseBinaryOp, Cwise::operator*(), class VectorwiseOp, MatrixBase::redux()
+  */
+template<typename LhsScalar,typename RhsScalar> struct scalar_product_op {
+  enum {
+    // TODO vectorize mixed product
+    Vectorizable = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasMul && packet_traits<RhsScalar>::HasMul
+  };
+  typedef typename scalar_product_traits<LhsScalar,RhsScalar>::ReturnType result_type;
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_product_op)
+  EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a * b; }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+  { return internal::pmul(a,b); }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const
+  { return internal::predux_mul(a); }
+};
+template<typename LhsScalar,typename RhsScalar>
+struct functor_traits<scalar_product_op<LhsScalar,RhsScalar> > {
+  enum {
+    Cost = (NumTraits<LhsScalar>::MulCost + NumTraits<RhsScalar>::MulCost)/2, // rough estimate!
+    PacketAccess = scalar_product_op<LhsScalar,RhsScalar>::Vectorizable
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the conjugate product of two scalars
+  *
+  * This is a short cut for conj(x) * y which is needed for optimization purpose; in Eigen2 support mode, this becomes x * conj(y)
+  */
+template<typename LhsScalar,typename RhsScalar> struct scalar_conj_product_op {
+
+  enum {
+    Conj = NumTraits<LhsScalar>::IsComplex
+  };
+  
+  typedef typename scalar_product_traits<LhsScalar,RhsScalar>::ReturnType result_type;
+  
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_conj_product_op)
+  EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const
+  { return conj_helper<LhsScalar,RhsScalar,Conj,false>().pmul(a,b); }
+  
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+  { return conj_helper<Packet,Packet,Conj,false>().pmul(a,b); }
+};
+template<typename LhsScalar,typename RhsScalar>
+struct functor_traits<scalar_conj_product_op<LhsScalar,RhsScalar> > {
+  enum {
+    Cost = NumTraits<LhsScalar>::MulCost,
+    PacketAccess = internal::is_same<LhsScalar, RhsScalar>::value && packet_traits<LhsScalar>::HasMul
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the min of two scalars
+  *
+  * \sa class CwiseBinaryOp, MatrixBase::cwiseMin, class VectorwiseOp, MatrixBase::minCoeff()
+  */
+template<typename Scalar> struct scalar_min_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_min_op)
+  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::min; return (min)(a, b); }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+  { return internal::pmin(a,b); }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Scalar predux(const Packet& a) const
+  { return internal::predux_min(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_min_op<Scalar> > {
+  enum {
+    Cost = NumTraits<Scalar>::AddCost,
+    PacketAccess = packet_traits<Scalar>::HasMin
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the max of two scalars
+  *
+  * \sa class CwiseBinaryOp, MatrixBase::cwiseMax, class VectorwiseOp, MatrixBase::maxCoeff()
+  */
+template<typename Scalar> struct scalar_max_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_max_op)
+  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::max; return (max)(a, b); }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+  { return internal::pmax(a,b); }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Scalar predux(const Packet& a) const
+  { return internal::predux_max(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_max_op<Scalar> > {
+  enum {
+    Cost = NumTraits<Scalar>::AddCost,
+    PacketAccess = packet_traits<Scalar>::HasMax
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the hypot of two scalars
+  *
+  * \sa MatrixBase::stableNorm(), class Redux
+  */
+template<typename Scalar> struct scalar_hypot_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_hypot_op)
+//   typedef typename NumTraits<Scalar>::Real result_type;
+  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& _x, const Scalar& _y) const
+  {
+    using std::max;
+    using std::min;
+    Scalar p = (max)(_x, _y);
+    Scalar q = (min)(_x, _y);
+    Scalar qp = q/p;
+    return p * sqrt(Scalar(1) + qp*qp);
+  }
+};
+template<typename Scalar>
+struct functor_traits<scalar_hypot_op<Scalar> > {
+  enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess=0 };
+};
+
+/** \internal
+  * \brief Template functor to compute the pow of two scalars
+  */
+template<typename Scalar, typename OtherScalar> struct scalar_binary_pow_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_binary_pow_op)
+  inline Scalar operator() (const Scalar& a, const OtherScalar& b) const { return internal::pow(a, b); }
+};
+template<typename Scalar, typename OtherScalar>
+struct functor_traits<scalar_binary_pow_op<Scalar,OtherScalar> > {
+  enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false };
+};
+
+// other binary functors:
+
+/** \internal
+  * \brief Template functor to compute the difference of two scalars
+  *
+  * \sa class CwiseBinaryOp, MatrixBase::operator-
+  */
+template<typename Scalar> struct scalar_difference_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_difference_op)
+  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a - b; }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+  { return internal::psub(a,b); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_difference_op<Scalar> > {
+  enum {
+    Cost = NumTraits<Scalar>::AddCost,
+    PacketAccess = packet_traits<Scalar>::HasSub
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the quotient of two scalars
+  *
+  * \sa class CwiseBinaryOp, Cwise::operator/()
+  */
+template<typename Scalar> struct scalar_quotient_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_quotient_op)
+  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a / b; }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+  { return internal::pdiv(a,b); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_quotient_op<Scalar> > {
+  enum {
+    Cost = 2 * NumTraits<Scalar>::MulCost,
+    PacketAccess = packet_traits<Scalar>::HasDiv
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the and of two booleans
+  *
+  * \sa class CwiseBinaryOp, ArrayBase::operator&&
+  */
+struct scalar_boolean_and_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_and_op)
+  EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a && b; }
+};
+template<> struct functor_traits<scalar_boolean_and_op> {
+  enum {
+    Cost = NumTraits<bool>::AddCost,
+    PacketAccess = false
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the or of two booleans
+  *
+  * \sa class CwiseBinaryOp, ArrayBase::operator||
+  */
+struct scalar_boolean_or_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_or_op)
+  EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a || b; }
+};
+template<> struct functor_traits<scalar_boolean_or_op> {
+  enum {
+    Cost = NumTraits<bool>::AddCost,
+    PacketAccess = false
+  };
+};
+
+// unary functors:
+
+/** \internal
+  * \brief Template functor to compute the opposite of a scalar
+  *
+  * \sa class CwiseUnaryOp, MatrixBase::operator-
+  */
+template<typename Scalar> struct scalar_opposite_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_opposite_op)
+  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return -a; }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
+  { return internal::pnegate(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_opposite_op<Scalar> >
+{ enum {
+    Cost = NumTraits<Scalar>::AddCost,
+    PacketAccess = packet_traits<Scalar>::HasNegate };
+};
+
+/** \internal
+  * \brief Template functor to compute the absolute value of a scalar
+  *
+  * \sa class CwiseUnaryOp, Cwise::abs
+  */
+template<typename Scalar> struct scalar_abs_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_abs_op)
+  typedef typename NumTraits<Scalar>::Real result_type;
+  EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return internal::abs(a); }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
+  { return internal::pabs(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_abs_op<Scalar> >
+{
+  enum {
+    Cost = NumTraits<Scalar>::AddCost,
+    PacketAccess = packet_traits<Scalar>::HasAbs
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the squared absolute value of a scalar
+  *
+  * \sa class CwiseUnaryOp, Cwise::abs2
+  */
+template<typename Scalar> struct scalar_abs2_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_abs2_op)
+  typedef typename NumTraits<Scalar>::Real result_type;
+  EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return internal::abs2(a); }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
+  { return internal::pmul(a,a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_abs2_op<Scalar> >
+{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasAbs2 }; };
+
+/** \internal
+  * \brief Template functor to compute the conjugate of a complex value
+  *
+  * \sa class CwiseUnaryOp, MatrixBase::conjugate()
+  */
+template<typename Scalar> struct scalar_conjugate_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_conjugate_op)
+  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return internal::conj(a); }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const { return internal::pconj(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_conjugate_op<Scalar> >
+{
+  enum {
+    Cost = NumTraits<Scalar>::IsComplex ? NumTraits<Scalar>::AddCost : 0,
+    PacketAccess = packet_traits<Scalar>::HasConj
+  };
+};
+
+/** \internal
+  * \brief Template functor to cast a scalar to another type
+  *
+  * \sa class CwiseUnaryOp, MatrixBase::cast()
+  */
+template<typename Scalar, typename NewType>
+struct scalar_cast_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
+  typedef NewType result_type;
+  EIGEN_STRONG_INLINE const NewType operator() (const Scalar& a) const { return cast<Scalar, NewType>(a); }
+};
+template<typename Scalar, typename NewType>
+struct functor_traits<scalar_cast_op<Scalar,NewType> >
+{ enum { Cost = is_same<Scalar, NewType>::value ? 0 : NumTraits<NewType>::AddCost, PacketAccess = false }; };
+
+/** \internal
+  * \brief Template functor to extract the real part of a complex
+  *
+  * \sa class CwiseUnaryOp, MatrixBase::real()
+  */
+template<typename Scalar>
+struct scalar_real_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_real_op)
+  typedef typename NumTraits<Scalar>::Real result_type;
+  EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return internal::real(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_real_op<Scalar> >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+/** \internal
+  * \brief Template functor to extract the imaginary part of a complex
+  *
+  * \sa class CwiseUnaryOp, MatrixBase::imag()
+  */
+template<typename Scalar>
+struct scalar_imag_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_op)
+  typedef typename NumTraits<Scalar>::Real result_type;
+  EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return internal::imag(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_imag_op<Scalar> >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+/** \internal
+  * \brief Template functor to extract the real part of a complex as a reference
+  *
+  * \sa class CwiseUnaryOp, MatrixBase::real()
+  */
+template<typename Scalar>
+struct scalar_real_ref_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_real_ref_op)
+  typedef typename NumTraits<Scalar>::Real result_type;
+  EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return internal::real_ref(*const_cast<Scalar*>(&a)); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_real_ref_op<Scalar> >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+/** \internal
+  * \brief Template functor to extract the imaginary part of a complex as a reference
+  *
+  * \sa class CwiseUnaryOp, MatrixBase::imag()
+  */
+template<typename Scalar>
+struct scalar_imag_ref_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_ref_op)
+  typedef typename NumTraits<Scalar>::Real result_type;
+  EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return internal::imag_ref(*const_cast<Scalar*>(&a)); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_imag_ref_op<Scalar> >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+/** \internal
+  *
+  * \brief Template functor to compute the exponential of a scalar
+  *
+  * \sa class CwiseUnaryOp, Cwise::exp()
+  */
+template<typename Scalar> struct scalar_exp_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_exp_op)
+  inline const Scalar operator() (const Scalar& a) const { return internal::exp(a); }
+  typedef typename packet_traits<Scalar>::type Packet;
+  inline Packet packetOp(const Packet& a) const { return internal::pexp(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_exp_op<Scalar> >
+{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasExp }; };
+
+/** \internal
+  *
+  * \brief Template functor to compute the logarithm of a scalar
+  *
+  * \sa class CwiseUnaryOp, Cwise::log()
+  */
+template<typename Scalar> struct scalar_log_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_log_op)
+  inline const Scalar operator() (const Scalar& a) const { return internal::log(a); }
+  typedef typename packet_traits<Scalar>::type Packet;
+  inline Packet packetOp(const Packet& a) const { return internal::plog(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_log_op<Scalar> >
+{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasLog }; };
+
+/** \internal
+  * \brief Template functor to multiply a scalar by a fixed other one
+  *
+  * \sa class CwiseUnaryOp, MatrixBase::operator*, MatrixBase::operator/
+  */
+/* NOTE why doing the pset1() in packetOp *is* an optimization ?
+ * indeed it seems better to declare m_other as a Packet and do the pset1() once
+ * in the constructor. However, in practice:
+ *  - GCC does not like m_other as a Packet and generate a load every time it needs it
+ *  - on the other hand GCC is able to moves the pset1() outside the loop :)
+ *  - simpler code ;)
+ * (ICC and gcc 4.4 seems to perform well in both cases, the issue is visible with y = a*x + b*y)
+ */
+template<typename Scalar>
+struct scalar_multiple_op {
+  typedef typename packet_traits<Scalar>::type Packet;
+  // FIXME default copy constructors seems bugged with std::complex<>
+  EIGEN_STRONG_INLINE scalar_multiple_op(const scalar_multiple_op& other) : m_other(other.m_other) { }
+  EIGEN_STRONG_INLINE scalar_multiple_op(const Scalar& other) : m_other(other) { }
+  EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a * m_other; }
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
+  { return internal::pmul(a, pset1<Packet>(m_other)); }
+  typename add_const_on_value_type<typename NumTraits<Scalar>::Nested>::type m_other;
+};
+template<typename Scalar>
+struct functor_traits<scalar_multiple_op<Scalar> >
+{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasMul }; };
+
+template<typename Scalar1, typename Scalar2>
+struct scalar_multiple2_op {
+  typedef typename scalar_product_traits<Scalar1,Scalar2>::ReturnType result_type;
+  EIGEN_STRONG_INLINE scalar_multiple2_op(const scalar_multiple2_op& other) : m_other(other.m_other) { }
+  EIGEN_STRONG_INLINE scalar_multiple2_op(const Scalar2& other) : m_other(other) { }
+  EIGEN_STRONG_INLINE result_type operator() (const Scalar1& a) const { return a * m_other; }
+  typename add_const_on_value_type<typename NumTraits<Scalar2>::Nested>::type m_other;
+};
+template<typename Scalar1,typename Scalar2>
+struct functor_traits<scalar_multiple2_op<Scalar1,Scalar2> >
+{ enum { Cost = NumTraits<Scalar1>::MulCost, PacketAccess = false }; };
+
+/** \internal
+  * \brief Template functor to divide a scalar by a fixed other one
+  *
+  * This functor is used to implement the quotient of a matrix by
+  * a scalar where the scalar type is not necessarily a floating point type.
+  *
+  * \sa class CwiseUnaryOp, MatrixBase::operator/
+  */
+template<typename Scalar>
+struct scalar_quotient1_op {
+  typedef typename packet_traits<Scalar>::type Packet;
+  // FIXME default copy constructors seems bugged with std::complex<>
+  EIGEN_STRONG_INLINE scalar_quotient1_op(const scalar_quotient1_op& other) : m_other(other.m_other) { }
+  EIGEN_STRONG_INLINE scalar_quotient1_op(const Scalar& other) : m_other(other) {}
+  EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a / m_other; }
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
+  { return internal::pdiv(a, pset1<Packet>(m_other)); }
+  typename add_const_on_value_type<typename NumTraits<Scalar>::Nested>::type m_other;
+};
+template<typename Scalar>
+struct functor_traits<scalar_quotient1_op<Scalar> >
+{ enum { Cost = 2 * NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasDiv }; };
+
+// nullary functors
+
+template<typename Scalar>
+struct scalar_constant_op {
+  typedef typename packet_traits<Scalar>::type Packet;
+  EIGEN_STRONG_INLINE scalar_constant_op(const scalar_constant_op& other) : m_other(other.m_other) { }
+  EIGEN_STRONG_INLINE scalar_constant_op(const Scalar& other) : m_other(other) { }
+  template<typename Index>
+  EIGEN_STRONG_INLINE const Scalar operator() (Index, Index = 0) const { return m_other; }
+  template<typename Index>
+  EIGEN_STRONG_INLINE const Packet packetOp(Index, Index = 0) const { return internal::pset1<Packet>(m_other); }
+  const Scalar m_other;
+};
+template<typename Scalar>
+struct functor_traits<scalar_constant_op<Scalar> >
+// FIXME replace this packet test by a safe one
+{ enum { Cost = 1, PacketAccess = packet_traits<Scalar>::Vectorizable, IsRepeatable = true }; };
+
+template<typename Scalar> struct scalar_identity_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_identity_op)
+  template<typename Index>
+  EIGEN_STRONG_INLINE const Scalar operator() (Index row, Index col) const { return row==col ? Scalar(1) : Scalar(0); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_identity_op<Scalar> >
+{ enum { Cost = NumTraits<Scalar>::AddCost, PacketAccess = false, IsRepeatable = true }; };
+
+template <typename Scalar, bool RandomAccess> struct linspaced_op_impl;
+
+// linear access for packet ops:
+// 1) initialization
+//   base = [low, ..., low] + ([step, ..., step] * [-size, ..., 0])
+// 2) each step
+//   base += [size*step, ..., size*step]
+template <typename Scalar>
+struct linspaced_op_impl<Scalar,false>
+{
+  typedef typename packet_traits<Scalar>::type Packet;
+
+  linspaced_op_impl(Scalar low, Scalar step) :
+  m_low(low), m_step(step),
+  m_packetStep(pset1<Packet>(packet_traits<Scalar>::size*step)),
+  m_base(padd(pset1<Packet>(low),pmul(pset1<Packet>(step),plset<Scalar>(-packet_traits<Scalar>::size)))) {}
+
+  template<typename Index>
+  EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return m_low+i*m_step; }
+  template<typename Index>
+  EIGEN_STRONG_INLINE const Packet packetOp(Index) const { return m_base = padd(m_base,m_packetStep); }
+
+  const Scalar m_low;
+  const Scalar m_step;
+  const Packet m_packetStep;
+  mutable Packet m_base;
+};
+
+// random access for packet ops:
+// 1) each step
+//   [low, ..., low] + ( [step, ..., step] * ( [i, ..., i] + [0, ..., size] ) )
+template <typename Scalar>
+struct linspaced_op_impl<Scalar,true>
+{
+  typedef typename packet_traits<Scalar>::type Packet;
+
+  linspaced_op_impl(Scalar low, Scalar step) :
+  m_low(low), m_step(step),
+  m_lowPacket(pset1<Packet>(m_low)), m_stepPacket(pset1<Packet>(m_step)), m_interPacket(plset<Scalar>(0)) {}
+
+  template<typename Index>
+  EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return m_low+i*m_step; }
+
+  template<typename Index>
+  EIGEN_STRONG_INLINE const Packet packetOp(Index i) const
+  { return internal::padd(m_lowPacket, pmul(m_stepPacket, padd(pset1<Packet>(i),m_interPacket))); }
+
+  const Scalar m_low;
+  const Scalar m_step;
+  const Packet m_lowPacket;
+  const Packet m_stepPacket;
+  const Packet m_interPacket;
+};
+
+// ----- Linspace functor ----------------------------------------------------------------
+
+// Forward declaration (we default to random access which does not really give
+// us a speed gain when using packet access but it allows to use the functor in
+// nested expressions).
+template <typename Scalar, bool RandomAccess = true> struct linspaced_op;
+template <typename Scalar, bool RandomAccess> struct functor_traits< linspaced_op<Scalar,RandomAccess> >
+{ enum { Cost = 1, PacketAccess = packet_traits<Scalar>::HasSetLinear, IsRepeatable = true }; };
+template <typename Scalar, bool RandomAccess> struct linspaced_op
+{
+  typedef typename packet_traits<Scalar>::type Packet;
+  linspaced_op(Scalar low, Scalar high, int num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/(num_steps-1))) {}
+
+  template<typename Index>
+  EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return impl(i); }
+
+  // We need this function when assigning e.g. a RowVectorXd to a MatrixXd since
+  // there row==0 and col is used for the actual iteration.
+  template<typename Index>
+  EIGEN_STRONG_INLINE const Scalar operator() (Index row, Index col) const 
+  {
+    eigen_assert(col==0 || row==0);
+    return impl(col + row);
+  }
+
+  template<typename Index>
+  EIGEN_STRONG_INLINE const Packet packetOp(Index i) const { return impl.packetOp(i); }
+
+  // We need this function when assigning e.g. a RowVectorXd to a MatrixXd since
+  // there row==0 and col is used for the actual iteration.
+  template<typename Index>
+  EIGEN_STRONG_INLINE const Packet packetOp(Index row, Index col) const
+  {
+    eigen_assert(col==0 || row==0);
+    return impl.packetOp(col + row);
+  }
+
+  // This proxy object handles the actual required temporaries, the different
+  // implementations (random vs. sequential access) as well as the
+  // correct piping to size 2/4 packet operations.
+  const linspaced_op_impl<Scalar,RandomAccess> impl;
+};
+
+// all functors allow linear access, except scalar_identity_op. So we fix here a quick meta
+// to indicate whether a functor allows linear access, just always answering 'yes' except for
+// scalar_identity_op.
+// FIXME move this to functor_traits adding a functor_default
+template<typename Functor> struct functor_has_linear_access { enum { ret = 1 }; };
+template<typename Scalar> struct functor_has_linear_access<scalar_identity_op<Scalar> > { enum { ret = 0 }; };
+
+// in CwiseBinaryOp, we require the Lhs and Rhs to have the same scalar type, except for multiplication
+// where we only require them to have the same _real_ scalar type so one may multiply, say, float by complex<float>.
+// FIXME move this to functor_traits adding a functor_default
+template<typename Functor> struct functor_allows_mixing_real_and_complex { enum { ret = 0 }; };
+template<typename LhsScalar,typename RhsScalar> struct functor_allows_mixing_real_and_complex<scalar_product_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; };
+template<typename LhsScalar,typename RhsScalar> struct functor_allows_mixing_real_and_complex<scalar_conj_product_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; };
+
+
+/** \internal
+  * \brief Template functor to add a scalar to a fixed other one
+  * \sa class CwiseUnaryOp, Array::operator+
+  */
+/* If you wonder why doing the pset1() in packetOp() is an optimization check scalar_multiple_op */
+template<typename Scalar>
+struct scalar_add_op {
+  typedef typename packet_traits<Scalar>::type Packet;
+  // FIXME default copy constructors seems bugged with std::complex<>
+  inline scalar_add_op(const scalar_add_op& other) : m_other(other.m_other) { }
+  inline scalar_add_op(const Scalar& other) : m_other(other) { }
+  inline Scalar operator() (const Scalar& a) const { return a + m_other; }
+  inline const Packet packetOp(const Packet& a) const
+  { return internal::padd(a, pset1<Packet>(m_other)); }
+  const Scalar m_other;
+};
+template<typename Scalar>
+struct functor_traits<scalar_add_op<Scalar> >
+{ enum { Cost = NumTraits<Scalar>::AddCost, PacketAccess = packet_traits<Scalar>::HasAdd }; };
+
+/** \internal
+  * \brief Template functor to compute the square root of a scalar
+  * \sa class CwiseUnaryOp, Cwise::sqrt()
+  */
+template<typename Scalar> struct scalar_sqrt_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_sqrt_op)
+  inline const Scalar operator() (const Scalar& a) const { return internal::sqrt(a); }
+  typedef typename packet_traits<Scalar>::type Packet;
+  inline Packet packetOp(const Packet& a) const { return internal::psqrt(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_sqrt_op<Scalar> >
+{ enum {
+    Cost = 5 * NumTraits<Scalar>::MulCost,
+    PacketAccess = packet_traits<Scalar>::HasSqrt
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the cosine of a scalar
+  * \sa class CwiseUnaryOp, ArrayBase::cos()
+  */
+template<typename Scalar> struct scalar_cos_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_cos_op)
+  inline Scalar operator() (const Scalar& a) const { return internal::cos(a); }
+  typedef typename packet_traits<Scalar>::type Packet;
+  inline Packet packetOp(const Packet& a) const { return internal::pcos(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_cos_op<Scalar> >
+{
+  enum {
+    Cost = 5 * NumTraits<Scalar>::MulCost,
+    PacketAccess = packet_traits<Scalar>::HasCos
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the sine of a scalar
+  * \sa class CwiseUnaryOp, ArrayBase::sin()
+  */
+template<typename Scalar> struct scalar_sin_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_sin_op)
+  inline const Scalar operator() (const Scalar& a) const { return internal::sin(a); }
+  typedef typename packet_traits<Scalar>::type Packet;
+  inline Packet packetOp(const Packet& a) const { return internal::psin(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_sin_op<Scalar> >
+{
+  enum {
+    Cost = 5 * NumTraits<Scalar>::MulCost,
+    PacketAccess = packet_traits<Scalar>::HasSin
+  };
+};
+
+
+/** \internal
+  * \brief Template functor to compute the tan of a scalar
+  * \sa class CwiseUnaryOp, ArrayBase::tan()
+  */
+template<typename Scalar> struct scalar_tan_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_tan_op)
+  inline const Scalar operator() (const Scalar& a) const { return internal::tan(a); }
+  typedef typename packet_traits<Scalar>::type Packet;
+  inline Packet packetOp(const Packet& a) const { return internal::ptan(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_tan_op<Scalar> >
+{
+  enum {
+    Cost = 5 * NumTraits<Scalar>::MulCost,
+    PacketAccess = packet_traits<Scalar>::HasTan
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the arc cosine of a scalar
+  * \sa class CwiseUnaryOp, ArrayBase::acos()
+  */
+template<typename Scalar> struct scalar_acos_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_acos_op)
+  inline const Scalar operator() (const Scalar& a) const { return internal::acos(a); }
+  typedef typename packet_traits<Scalar>::type Packet;
+  inline Packet packetOp(const Packet& a) const { return internal::pacos(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_acos_op<Scalar> >
+{
+  enum {
+    Cost = 5 * NumTraits<Scalar>::MulCost,
+    PacketAccess = packet_traits<Scalar>::HasACos
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the arc sine of a scalar
+  * \sa class CwiseUnaryOp, ArrayBase::asin()
+  */
+template<typename Scalar> struct scalar_asin_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_asin_op)
+  inline const Scalar operator() (const Scalar& a) const { return internal::asin(a); }
+  typedef typename packet_traits<Scalar>::type Packet;
+  inline Packet packetOp(const Packet& a) const { return internal::pasin(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_asin_op<Scalar> >
+{
+  enum {
+    Cost = 5 * NumTraits<Scalar>::MulCost,
+    PacketAccess = packet_traits<Scalar>::HasASin
+  };
+};
+
+/** \internal
+  * \brief Template functor to raise a scalar to a power
+  * \sa class CwiseUnaryOp, Cwise::pow
+  */
+template<typename Scalar>
+struct scalar_pow_op {
+  // FIXME default copy constructors seems bugged with std::complex<>
+  inline scalar_pow_op(const scalar_pow_op& other) : m_exponent(other.m_exponent) { }
+  inline scalar_pow_op(const Scalar& exponent) : m_exponent(exponent) {}
+  inline Scalar operator() (const Scalar& a) const { return internal::pow(a, m_exponent); }
+  const Scalar m_exponent;
+};
+template<typename Scalar>
+struct functor_traits<scalar_pow_op<Scalar> >
+{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false }; };
+
+/** \internal
+  * \brief Template functor to compute the quotient between a scalar and array entries.
+  * \sa class CwiseUnaryOp, Cwise::inverse()
+  */
+template<typename Scalar>
+struct scalar_inverse_mult_op {
+  scalar_inverse_mult_op(const Scalar& other) : m_other(other) {}
+  inline Scalar operator() (const Scalar& a) const { return m_other / a; }
+  template<typename Packet>
+  inline const Packet packetOp(const Packet& a) const
+  { return internal::pdiv(pset1<Packet>(m_other),a); }
+  Scalar m_other;
+};
+
+/** \internal
+  * \brief Template functor to compute the inverse of a scalar
+  * \sa class CwiseUnaryOp, Cwise::inverse()
+  */
+template<typename Scalar>
+struct scalar_inverse_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_inverse_op)
+  inline Scalar operator() (const Scalar& a) const { return Scalar(1)/a; }
+  template<typename Packet>
+  inline const Packet packetOp(const Packet& a) const
+  { return internal::pdiv(pset1<Packet>(Scalar(1)),a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_inverse_op<Scalar> >
+{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasDiv }; };
+
+/** \internal
+  * \brief Template functor to compute the square of a scalar
+  * \sa class CwiseUnaryOp, Cwise::square()
+  */
+template<typename Scalar>
+struct scalar_square_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_square_op)
+  inline Scalar operator() (const Scalar& a) const { return a*a; }
+  template<typename Packet>
+  inline const Packet packetOp(const Packet& a) const
+  { return internal::pmul(a,a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_square_op<Scalar> >
+{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasMul }; };
+
+/** \internal
+  * \brief Template functor to compute the cube of a scalar
+  * \sa class CwiseUnaryOp, Cwise::cube()
+  */
+template<typename Scalar>
+struct scalar_cube_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_cube_op)
+  inline Scalar operator() (const Scalar& a) const { return a*a*a; }
+  template<typename Packet>
+  inline const Packet packetOp(const Packet& a) const
+  { return internal::pmul(a,pmul(a,a)); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_cube_op<Scalar> >
+{ enum { Cost = 2*NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasMul }; };
+
+// default functor traits for STL functors:
+
+template<typename T>
+struct functor_traits<std::multiplies<T> >
+{ enum { Cost = NumTraits<T>::MulCost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::divides<T> >
+{ enum { Cost = NumTraits<T>::MulCost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::plus<T> >
+{ enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::minus<T> >
+{ enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::negate<T> >
+{ enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::logical_or<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::logical_and<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::logical_not<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::greater<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::less<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::greater_equal<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::less_equal<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::equal_to<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::not_equal_to<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::binder2nd<T> >
+{ enum { Cost = functor_traits<T>::Cost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::binder1st<T> >
+{ enum { Cost = functor_traits<T>::Cost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::unary_negate<T> >
+{ enum { Cost = 1 + functor_traits<T>::Cost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::binary_negate<T> >
+{ enum { Cost = 1 + functor_traits<T>::Cost, PacketAccess = false }; };
+
+#ifdef EIGEN_STDEXT_SUPPORT
+
+template<typename T0,typename T1>
+struct functor_traits<std::project1st<T0,T1> >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+template<typename T0,typename T1>
+struct functor_traits<std::project2nd<T0,T1> >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+template<typename T0,typename T1>
+struct functor_traits<std::select2nd<std::pair<T0,T1> > >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+template<typename T0,typename T1>
+struct functor_traits<std::select1st<std::pair<T0,T1> > >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+template<typename T0,typename T1>
+struct functor_traits<std::unary_compose<T0,T1> >
+{ enum { Cost = functor_traits<T0>::Cost + functor_traits<T1>::Cost, PacketAccess = false }; };
+
+template<typename T0,typename T1,typename T2>
+struct functor_traits<std::binary_compose<T0,T1,T2> >
+{ enum { Cost = functor_traits<T0>::Cost + functor_traits<T1>::Cost + functor_traits<T2>::Cost, PacketAccess = false }; };
+
+#endif // EIGEN_STDEXT_SUPPORT
+
+// allow to add new functors and specializations of functor_traits from outside Eigen.
+// this macro is really needed because functor_traits must be specialized after it is declared but before it is used...
+#ifdef EIGEN_FUNCTORS_PLUGIN
+#include EIGEN_FUNCTORS_PLUGIN
+#endif
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_FUNCTORS_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/Fuzzy.h b/resources/3rdParty/eigen/Eigen/src/Core/Fuzzy.h
new file mode 100644
index 000000000..d74edcfdb
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/Fuzzy.h
@@ -0,0 +1,150 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_FUZZY_H
+#define EIGEN_FUZZY_H
+
+namespace Eigen { 
+
+namespace internal
+{
+
+template<typename Derived, typename OtherDerived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
+struct isApprox_selector
+{
+  static bool run(const Derived& x, const OtherDerived& y, typename Derived::RealScalar prec)
+  {
+    using std::min;
+    typename internal::nested<Derived,2>::type nested(x);
+    typename internal::nested<OtherDerived,2>::type otherNested(y);
+    return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * (min)(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum());
+  }
+};
+
+template<typename Derived, typename OtherDerived>
+struct isApprox_selector<Derived, OtherDerived, true>
+{
+  static bool run(const Derived& x, const OtherDerived& y, typename Derived::RealScalar)
+  {
+    return x.matrix() == y.matrix();
+  }
+};
+
+template<typename Derived, typename OtherDerived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
+struct isMuchSmallerThan_object_selector
+{
+  static bool run(const Derived& x, const OtherDerived& y, typename Derived::RealScalar prec)
+  {
+    return x.cwiseAbs2().sum() <= abs2(prec) * y.cwiseAbs2().sum();
+  }
+};
+
+template<typename Derived, typename OtherDerived>
+struct isMuchSmallerThan_object_selector<Derived, OtherDerived, true>
+{
+  static bool run(const Derived& x, const OtherDerived&, typename Derived::RealScalar)
+  {
+    return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix();
+  }
+};
+
+template<typename Derived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
+struct isMuchSmallerThan_scalar_selector
+{
+  static bool run(const Derived& x, const typename Derived::RealScalar& y, typename Derived::RealScalar prec)
+  {
+    return x.cwiseAbs2().sum() <= abs2(prec * y);
+  }
+};
+
+template<typename Derived>
+struct isMuchSmallerThan_scalar_selector<Derived, true>
+{
+  static bool run(const Derived& x, const typename Derived::RealScalar&, typename Derived::RealScalar)
+  {
+    return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix();
+  }
+};
+
+} // end namespace internal
+
+
+/** \returns \c true if \c *this is approximately equal to \a other, within the precision
+  * determined by \a prec.
+  *
+  * \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$
+  * are considered to be approximately equal within precision \f$ p \f$ if
+  * \f[ \Vert v - w \Vert \leqslant p\,\min(\Vert v\Vert, \Vert w\Vert). \f]
+  * For matrices, the comparison is done using the Hilbert-Schmidt norm (aka Frobenius norm
+  * L2 norm).
+  *
+  * \note Because of the multiplicativeness of this comparison, one can't use this function
+  * to check whether \c *this is approximately equal to the zero matrix or vector.
+  * Indeed, \c isApprox(zero) returns false unless \c *this itself is exactly the zero matrix
+  * or vector. If you want to test whether \c *this is zero, use internal::isMuchSmallerThan(const
+  * RealScalar&, RealScalar) instead.
+  *
+  * \sa internal::isMuchSmallerThan(const RealScalar&, RealScalar) const
+  */
+template<typename Derived>
+template<typename OtherDerived>
+bool DenseBase<Derived>::isApprox(
+  const DenseBase<OtherDerived>& other,
+  RealScalar prec
+) const
+{
+  return internal::isApprox_selector<Derived, OtherDerived>::run(derived(), other.derived(), prec);
+}
+
+/** \returns \c true if the norm of \c *this is much smaller than \a other,
+  * within the precision determined by \a prec.
+  *
+  * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is
+  * considered to be much smaller than \f$ x \f$ within precision \f$ p \f$ if
+  * \f[ \Vert v \Vert \leqslant p\,\vert x\vert. \f]
+  *
+  * For matrices, the comparison is done using the Hilbert-Schmidt norm. For this reason,
+  * the value of the reference scalar \a other should come from the Hilbert-Schmidt norm
+  * of a reference matrix of same dimensions.
+  *
+  * \sa isApprox(), isMuchSmallerThan(const DenseBase<OtherDerived>&, RealScalar) const
+  */
+template<typename Derived>
+bool DenseBase<Derived>::isMuchSmallerThan(
+  const typename NumTraits<Scalar>::Real& other,
+  RealScalar prec
+) const
+{
+  return internal::isMuchSmallerThan_scalar_selector<Derived>::run(derived(), other, prec);
+}
+
+/** \returns \c true if the norm of \c *this is much smaller than the norm of \a other,
+  * within the precision determined by \a prec.
+  *
+  * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is
+  * considered to be much smaller than a vector \f$ w \f$ within precision \f$ p \f$ if
+  * \f[ \Vert v \Vert \leqslant p\,\Vert w\Vert. \f]
+  * For matrices, the comparison is done using the Hilbert-Schmidt norm.
+  *
+  * \sa isApprox(), isMuchSmallerThan(const RealScalar&, RealScalar) const
+  */
+template<typename Derived>
+template<typename OtherDerived>
+bool DenseBase<Derived>::isMuchSmallerThan(
+  const DenseBase<OtherDerived>& other,
+  RealScalar prec
+) const
+{
+  return internal::isMuchSmallerThan_object_selector<Derived, OtherDerived>::run(derived(), other.derived(), prec);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_FUZZY_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/GeneralProduct.h b/resources/3rdParty/eigen/Eigen/src/Core/GeneralProduct.h
new file mode 100644
index 000000000..bfc2a67b1
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/GeneralProduct.h
@@ -0,0 +1,613 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_GENERAL_PRODUCT_H
+#define EIGEN_GENERAL_PRODUCT_H
+
+namespace Eigen { 
+
+/** \class GeneralProduct
+  * \ingroup Core_Module
+  *
+  * \brief Expression of the product of two general matrices or vectors
+  *
+  * \param LhsNested the type used to store the left-hand side
+  * \param RhsNested the type used to store the right-hand side
+  * \param ProductMode the type of the product
+  *
+  * This class represents an expression of the product of two general matrices.
+  * We call a general matrix, a dense matrix with full storage. For instance,
+  * This excludes triangular, selfadjoint, and sparse matrices.
+  * It is the return type of the operator* between general matrices. Its template
+  * arguments are determined automatically by ProductReturnType. Therefore,
+  * GeneralProduct should never be used direclty. To determine the result type of a
+  * function which involves a matrix product, use ProductReturnType::Type.
+  *
+  * \sa ProductReturnType, MatrixBase::operator*(const MatrixBase<OtherDerived>&)
+  */
+template<typename Lhs, typename Rhs, int ProductType = internal::product_type<Lhs,Rhs>::value>
+class GeneralProduct;
+
+enum {
+  Large = 2,
+  Small = 3
+};
+
+namespace internal {
+
+template<int Rows, int Cols, int Depth> struct product_type_selector;
+
+template<int Size, int MaxSize> struct product_size_category
+{
+  enum { is_large = MaxSize == Dynamic ||
+                    Size >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD,
+         value = is_large  ? Large
+               : Size == 1 ? 1
+                           : Small
+  };
+};
+
+template<typename Lhs, typename Rhs> struct product_type
+{
+  typedef typename remove_all<Lhs>::type _Lhs;
+  typedef typename remove_all<Rhs>::type _Rhs;
+  enum {
+    MaxRows  = _Lhs::MaxRowsAtCompileTime,
+    Rows  = _Lhs::RowsAtCompileTime,
+    MaxCols  = _Rhs::MaxColsAtCompileTime,
+    Cols  = _Rhs::ColsAtCompileTime,
+    MaxDepth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::MaxColsAtCompileTime,
+                                           _Rhs::MaxRowsAtCompileTime),
+    Depth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::ColsAtCompileTime,
+                                        _Rhs::RowsAtCompileTime),
+    LargeThreshold = EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
+  };
+
+  // the splitting into different lines of code here, introducing the _select enums and the typedef below,
+  // is to work around an internal compiler error with gcc 4.1 and 4.2.
+private:
+  enum {
+    rows_select = product_size_category<Rows,MaxRows>::value,
+    cols_select = product_size_category<Cols,MaxCols>::value,
+    depth_select = product_size_category<Depth,MaxDepth>::value
+  };
+  typedef product_type_selector<rows_select, cols_select, depth_select> selector;
+
+public:
+  enum {
+    value = selector::ret
+  };
+#ifdef EIGEN_DEBUG_PRODUCT
+  static void debug()
+  {
+      EIGEN_DEBUG_VAR(Rows);
+      EIGEN_DEBUG_VAR(Cols);
+      EIGEN_DEBUG_VAR(Depth);
+      EIGEN_DEBUG_VAR(rows_select);
+      EIGEN_DEBUG_VAR(cols_select);
+      EIGEN_DEBUG_VAR(depth_select);
+      EIGEN_DEBUG_VAR(value);
+  }
+#endif
+};
+
+
+/* The following allows to select the kind of product at compile time
+ * based on the three dimensions of the product.
+ * This is a compile time mapping from {1,Small,Large}^3 -> {product types} */
+// FIXME I'm not sure the current mapping is the ideal one.
+template<int M, int N>  struct product_type_selector<M,N,1>              { enum { ret = OuterProduct }; };
+template<int Depth>     struct product_type_selector<1,    1,    Depth>  { enum { ret = InnerProduct }; };
+template<>              struct product_type_selector<1,    1,    1>      { enum { ret = InnerProduct }; };
+template<>              struct product_type_selector<Small,1,    Small>  { enum { ret = CoeffBasedProductMode }; };
+template<>              struct product_type_selector<1,    Small,Small>  { enum { ret = CoeffBasedProductMode }; };
+template<>              struct product_type_selector<Small,Small,Small>  { enum { ret = CoeffBasedProductMode }; };
+template<>              struct product_type_selector<Small, Small, 1>    { enum { ret = LazyCoeffBasedProductMode }; };
+template<>              struct product_type_selector<Small, Large, 1>    { enum { ret = LazyCoeffBasedProductMode }; };
+template<>              struct product_type_selector<Large, Small, 1>    { enum { ret = LazyCoeffBasedProductMode }; };
+template<>              struct product_type_selector<1,    Large,Small>  { enum { ret = CoeffBasedProductMode }; };
+template<>              struct product_type_selector<1,    Large,Large>  { enum { ret = GemvProduct }; };
+template<>              struct product_type_selector<1,    Small,Large>  { enum { ret = CoeffBasedProductMode }; };
+template<>              struct product_type_selector<Large,1,    Small>  { enum { ret = CoeffBasedProductMode }; };
+template<>              struct product_type_selector<Large,1,    Large>  { enum { ret = GemvProduct }; };
+template<>              struct product_type_selector<Small,1,    Large>  { enum { ret = CoeffBasedProductMode }; };
+template<>              struct product_type_selector<Small,Small,Large>  { enum { ret = GemmProduct }; };
+template<>              struct product_type_selector<Large,Small,Large>  { enum { ret = GemmProduct }; };
+template<>              struct product_type_selector<Small,Large,Large>  { enum { ret = GemmProduct }; };
+template<>              struct product_type_selector<Large,Large,Large>  { enum { ret = GemmProduct }; };
+template<>              struct product_type_selector<Large,Small,Small>  { enum { ret = GemmProduct }; };
+template<>              struct product_type_selector<Small,Large,Small>  { enum { ret = GemmProduct }; };
+template<>              struct product_type_selector<Large,Large,Small>  { enum { ret = GemmProduct }; };
+
+} // end namespace internal
+
+/** \class ProductReturnType
+  * \ingroup Core_Module
+  *
+  * \brief Helper class to get the correct and optimized returned type of operator*
+  *
+  * \param Lhs the type of the left-hand side
+  * \param Rhs the type of the right-hand side
+  * \param ProductMode the type of the product (determined automatically by internal::product_mode)
+  *
+  * This class defines the typename Type representing the optimized product expression
+  * between two matrix expressions. In practice, using ProductReturnType<Lhs,Rhs>::Type
+  * is the recommended way to define the result type of a function returning an expression
+  * which involve a matrix product. The class Product should never be
+  * used directly.
+  *
+  * \sa class Product, MatrixBase::operator*(const MatrixBase<OtherDerived>&)
+  */
+template<typename Lhs, typename Rhs, int ProductType>
+struct ProductReturnType
+{
+  // TODO use the nested type to reduce instanciations ????
+//   typedef typename internal::nested<Lhs,Rhs::ColsAtCompileTime>::type LhsNested;
+//   typedef typename internal::nested<Rhs,Lhs::RowsAtCompileTime>::type RhsNested;
+
+  typedef GeneralProduct<Lhs/*Nested*/, Rhs/*Nested*/, ProductType> Type;
+};
+
+template<typename Lhs, typename Rhs>
+struct ProductReturnType<Lhs,Rhs,CoeffBasedProductMode>
+{
+  typedef typename internal::nested<Lhs, Rhs::ColsAtCompileTime, typename internal::plain_matrix_type<Lhs>::type >::type LhsNested;
+  typedef typename internal::nested<Rhs, Lhs::RowsAtCompileTime, typename internal::plain_matrix_type<Rhs>::type >::type RhsNested;
+  typedef CoeffBasedProduct<LhsNested, RhsNested, EvalBeforeAssigningBit | EvalBeforeNestingBit> Type;
+};
+
+template<typename Lhs, typename Rhs>
+struct ProductReturnType<Lhs,Rhs,LazyCoeffBasedProductMode>
+{
+  typedef typename internal::nested<Lhs, Rhs::ColsAtCompileTime, typename internal::plain_matrix_type<Lhs>::type >::type LhsNested;
+  typedef typename internal::nested<Rhs, Lhs::RowsAtCompileTime, typename internal::plain_matrix_type<Rhs>::type >::type RhsNested;
+  typedef CoeffBasedProduct<LhsNested, RhsNested, NestByRefBit> Type;
+};
+
+// this is a workaround for sun CC
+template<typename Lhs, typename Rhs>
+struct LazyProductReturnType : public ProductReturnType<Lhs,Rhs,LazyCoeffBasedProductMode>
+{};
+
+/***********************************************************************
+*  Implementation of Inner Vector Vector Product
+***********************************************************************/
+
+// FIXME : maybe the "inner product" could return a Scalar
+// instead of a 1x1 matrix ??
+// Pro: more natural for the user
+// Cons: this could be a problem if in a meta unrolled algorithm a matrix-matrix
+// product ends up to a row-vector times col-vector product... To tackle this use
+// case, we could have a specialization for Block<MatrixType,1,1> with: operator=(Scalar x);
+
+namespace internal {
+
+template<typename Lhs, typename Rhs>
+struct traits<GeneralProduct<Lhs,Rhs,InnerProduct> >
+ : traits<Matrix<typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType,1,1> >
+{};
+
+}
+
+template<typename Lhs, typename Rhs>
+class GeneralProduct<Lhs, Rhs, InnerProduct>
+  : internal::no_assignment_operator,
+    public Matrix<typename internal::scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType,1,1>
+{
+    typedef Matrix<typename internal::scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType,1,1> Base;
+  public:
+    GeneralProduct(const Lhs& lhs, const Rhs& rhs)
+    {
+      EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::RealScalar, typename Rhs::RealScalar>::value),
+        YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+      Base::coeffRef(0,0) = (lhs.transpose().cwiseProduct(rhs)).sum();
+    }
+
+    /** Convertion to scalar */
+    operator const typename Base::Scalar() const {
+      return Base::coeff(0,0);
+    }
+};
+
+/***********************************************************************
+*  Implementation of Outer Vector Vector Product
+***********************************************************************/
+
+namespace internal {
+template<int StorageOrder> struct outer_product_selector;
+
+template<typename Lhs, typename Rhs>
+struct traits<GeneralProduct<Lhs,Rhs,OuterProduct> >
+ : traits<ProductBase<GeneralProduct<Lhs,Rhs,OuterProduct>, Lhs, Rhs> >
+{};
+
+}
+
+template<typename Lhs, typename Rhs>
+class GeneralProduct<Lhs, Rhs, OuterProduct>
+  : public ProductBase<GeneralProduct<Lhs,Rhs,OuterProduct>, Lhs, Rhs>
+{
+  public:
+    EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
+
+    GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+    {
+      EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::RealScalar, typename Rhs::RealScalar>::value),
+        YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+    }
+
+    template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+    {
+      internal::outer_product_selector<(int(Dest::Flags)&RowMajorBit) ? RowMajor : ColMajor>::run(*this, dest, alpha);
+    }
+};
+
+namespace internal {
+
+template<> struct outer_product_selector<ColMajor> {
+  template<typename ProductType, typename Dest>
+  static EIGEN_DONT_INLINE void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) {
+    typedef typename Dest::Index Index;
+    // FIXME make sure lhs is sequentially stored
+    // FIXME not very good if rhs is real and lhs complex while alpha is real too
+    const Index cols = dest.cols();
+    for (Index j=0; j<cols; ++j)
+      dest.col(j) += (alpha * prod.rhs().coeff(j)) * prod.lhs();
+  }
+};
+
+template<> struct outer_product_selector<RowMajor> {
+  template<typename ProductType, typename Dest>
+  static EIGEN_DONT_INLINE void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) {
+    typedef typename Dest::Index Index;
+    // FIXME make sure rhs is sequentially stored
+    // FIXME not very good if lhs is real and rhs complex while alpha is real too
+    const Index rows = dest.rows();
+    for (Index i=0; i<rows; ++i)
+      dest.row(i) += (alpha * prod.lhs().coeff(i)) * prod.rhs();
+  }
+};
+
+} // end namespace internal
+
+/***********************************************************************
+*  Implementation of General Matrix Vector Product
+***********************************************************************/
+
+/*  According to the shape/flags of the matrix we have to distinghish 3 different cases:
+ *   1 - the matrix is col-major, BLAS compatible and M is large => call fast BLAS-like colmajor routine
+ *   2 - the matrix is row-major, BLAS compatible and N is large => call fast BLAS-like rowmajor routine
+ *   3 - all other cases are handled using a simple loop along the outer-storage direction.
+ *  Therefore we need a lower level meta selector.
+ *  Furthermore, if the matrix is the rhs, then the product has to be transposed.
+ */
+namespace internal {
+
+template<typename Lhs, typename Rhs>
+struct traits<GeneralProduct<Lhs,Rhs,GemvProduct> >
+ : traits<ProductBase<GeneralProduct<Lhs,Rhs,GemvProduct>, Lhs, Rhs> >
+{};
+
+template<int Side, int StorageOrder, bool BlasCompatible>
+struct gemv_selector;
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class GeneralProduct<Lhs, Rhs, GemvProduct>
+  : public ProductBase<GeneralProduct<Lhs,Rhs,GemvProduct>, Lhs, Rhs>
+{
+  public:
+    EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
+
+    typedef typename Lhs::Scalar LhsScalar;
+    typedef typename Rhs::Scalar RhsScalar;
+
+    GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+    {
+//       EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::Scalar, typename Rhs::Scalar>::value),
+//         YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+    }
+
+    enum { Side = Lhs::IsVectorAtCompileTime ? OnTheLeft : OnTheRight };
+    typedef typename internal::conditional<int(Side)==OnTheRight,_LhsNested,_RhsNested>::type MatrixType;
+
+    template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
+    {
+      eigen_assert(m_lhs.rows() == dst.rows() && m_rhs.cols() == dst.cols());
+      internal::gemv_selector<Side,(int(MatrixType::Flags)&RowMajorBit) ? RowMajor : ColMajor,
+                       bool(internal::blas_traits<MatrixType>::HasUsableDirectAccess)>::run(*this, dst, alpha);
+    }
+};
+
+namespace internal {
+
+// The vector is on the left => transposition
+template<int StorageOrder, bool BlasCompatible>
+struct gemv_selector<OnTheLeft,StorageOrder,BlasCompatible>
+{
+  template<typename ProductType, typename Dest>
+  static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+  {
+    Transpose<Dest> destT(dest);
+    enum { OtherStorageOrder = StorageOrder == RowMajor ? ColMajor : RowMajor };
+    gemv_selector<OnTheRight,OtherStorageOrder,BlasCompatible>
+      ::run(GeneralProduct<Transpose<const typename ProductType::_RhsNested>,Transpose<const typename ProductType::_LhsNested>, GemvProduct>
+        (prod.rhs().transpose(), prod.lhs().transpose()), destT, alpha);
+  }
+};
+
+template<typename Scalar,int Size,int MaxSize,bool Cond> struct gemv_static_vector_if;
+
+template<typename Scalar,int Size,int MaxSize>
+struct gemv_static_vector_if<Scalar,Size,MaxSize,false>
+{
+  EIGEN_STRONG_INLINE  Scalar* data() { eigen_internal_assert(false && "should never be called"); return 0; }
+};
+
+template<typename Scalar,int Size>
+struct gemv_static_vector_if<Scalar,Size,Dynamic,true>
+{
+  EIGEN_STRONG_INLINE Scalar* data() { return 0; }
+};
+
+template<typename Scalar,int Size,int MaxSize>
+struct gemv_static_vector_if<Scalar,Size,MaxSize,true>
+{
+  #if EIGEN_ALIGN_STATICALLY
+  internal::plain_array<Scalar,EIGEN_SIZE_MIN_PREFER_FIXED(Size,MaxSize),0> m_data;
+  EIGEN_STRONG_INLINE Scalar* data() { return m_data.array; }
+  #else
+  // Some architectures cannot align on the stack,
+  // => let's manually enforce alignment by allocating more data and return the address of the first aligned element.
+  enum {
+    ForceAlignment  = internal::packet_traits<Scalar>::Vectorizable,
+    PacketSize      = internal::packet_traits<Scalar>::size
+  };
+  internal::plain_array<Scalar,EIGEN_SIZE_MIN_PREFER_FIXED(Size,MaxSize)+(ForceAlignment?PacketSize:0),0> m_data;
+  EIGEN_STRONG_INLINE Scalar* data() {
+    return ForceAlignment
+            ? reinterpret_cast<Scalar*>((reinterpret_cast<size_t>(m_data.array) & ~(size_t(15))) + 16)
+            : m_data.array;
+  }
+  #endif
+};
+
+template<> struct gemv_selector<OnTheRight,ColMajor,true>
+{
+  template<typename ProductType, typename Dest>
+  static inline void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+  {
+    typedef typename ProductType::Index Index;
+    typedef typename ProductType::LhsScalar   LhsScalar;
+    typedef typename ProductType::RhsScalar   RhsScalar;
+    typedef typename ProductType::Scalar      ResScalar;
+    typedef typename ProductType::RealScalar  RealScalar;
+    typedef typename ProductType::ActualLhsType ActualLhsType;
+    typedef typename ProductType::ActualRhsType ActualRhsType;
+    typedef typename ProductType::LhsBlasTraits LhsBlasTraits;
+    typedef typename ProductType::RhsBlasTraits RhsBlasTraits;
+    typedef Map<Matrix<ResScalar,Dynamic,1>, Aligned> MappedDest;
+
+    ActualLhsType actualLhs = LhsBlasTraits::extract(prod.lhs());
+    ActualRhsType actualRhs = RhsBlasTraits::extract(prod.rhs());
+
+    ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs())
+                                  * RhsBlasTraits::extractScalarFactor(prod.rhs());
+
+    enum {
+      // FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
+      // on, the other hand it is good for the cache to pack the vector anyways...
+      EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime==1,
+      ComplexByReal = (NumTraits<LhsScalar>::IsComplex) && (!NumTraits<RhsScalar>::IsComplex),
+      MightCannotUseDest = (Dest::InnerStrideAtCompileTime!=1) || ComplexByReal
+    };
+
+    gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;
+
+    bool alphaIsCompatible = (!ComplexByReal) || (imag(actualAlpha)==RealScalar(0));
+    bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
+    
+    RhsScalar compatibleAlpha = get_factor<ResScalar,RhsScalar>::run(actualAlpha);
+
+    ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(),
+                                                  evalToDest ? dest.data() : static_dest.data());
+    
+    if(!evalToDest)
+    {
+      #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+      int size = dest.size();
+      EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+      #endif
+      if(!alphaIsCompatible)
+      {
+        MappedDest(actualDestPtr, dest.size()).setZero();
+        compatibleAlpha = RhsScalar(1);
+      }
+      else
+        MappedDest(actualDestPtr, dest.size()) = dest;
+    }
+
+    general_matrix_vector_product
+      <Index,LhsScalar,ColMajor,LhsBlasTraits::NeedToConjugate,RhsScalar,RhsBlasTraits::NeedToConjugate>::run(
+        actualLhs.rows(), actualLhs.cols(),
+        actualLhs.data(), actualLhs.outerStride(),
+        actualRhs.data(), actualRhs.innerStride(),
+        actualDestPtr, 1,
+        compatibleAlpha);
+
+    if (!evalToDest)
+    {
+      if(!alphaIsCompatible)
+        dest += actualAlpha * MappedDest(actualDestPtr, dest.size());
+      else
+        dest = MappedDest(actualDestPtr, dest.size());
+    }
+  }
+};
+
+template<> struct gemv_selector<OnTheRight,RowMajor,true>
+{
+  template<typename ProductType, typename Dest>
+  static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+  {
+    typedef typename ProductType::LhsScalar LhsScalar;
+    typedef typename ProductType::RhsScalar RhsScalar;
+    typedef typename ProductType::Scalar    ResScalar;
+    typedef typename ProductType::Index Index;
+    typedef typename ProductType::ActualLhsType ActualLhsType;
+    typedef typename ProductType::ActualRhsType ActualRhsType;
+    typedef typename ProductType::_ActualRhsType _ActualRhsType;
+    typedef typename ProductType::LhsBlasTraits LhsBlasTraits;
+    typedef typename ProductType::RhsBlasTraits RhsBlasTraits;
+
+    typename add_const<ActualLhsType>::type actualLhs = LhsBlasTraits::extract(prod.lhs());
+    typename add_const<ActualRhsType>::type actualRhs = RhsBlasTraits::extract(prod.rhs());
+
+    ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs())
+                                  * RhsBlasTraits::extractScalarFactor(prod.rhs());
+
+    enum {
+      // FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
+      // on, the other hand it is good for the cache to pack the vector anyways...
+      DirectlyUseRhs = _ActualRhsType::InnerStrideAtCompileTime==1
+    };
+
+    gemv_static_vector_if<RhsScalar,_ActualRhsType::SizeAtCompileTime,_ActualRhsType::MaxSizeAtCompileTime,!DirectlyUseRhs> static_rhs;
+
+    ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,actualRhs.size(),
+        DirectlyUseRhs ? const_cast<RhsScalar*>(actualRhs.data()) : static_rhs.data());
+
+    if(!DirectlyUseRhs)
+    {
+      #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+      int size = actualRhs.size();
+      EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+      #endif
+      Map<typename _ActualRhsType::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs;
+    }
+
+    general_matrix_vector_product
+      <Index,LhsScalar,RowMajor,LhsBlasTraits::NeedToConjugate,RhsScalar,RhsBlasTraits::NeedToConjugate>::run(
+        actualLhs.rows(), actualLhs.cols(),
+        actualLhs.data(), actualLhs.outerStride(),
+        actualRhsPtr, 1,
+        dest.data(), dest.innerStride(),
+        actualAlpha);
+  }
+};
+
+template<> struct gemv_selector<OnTheRight,ColMajor,false>
+{
+  template<typename ProductType, typename Dest>
+  static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+  {
+    typedef typename Dest::Index Index;
+    // TODO makes sure dest is sequentially stored in memory, otherwise use a temp
+    const Index size = prod.rhs().rows();
+    for(Index k=0; k<size; ++k)
+      dest += (alpha*prod.rhs().coeff(k)) * prod.lhs().col(k);
+  }
+};
+
+template<> struct gemv_selector<OnTheRight,RowMajor,false>
+{
+  template<typename ProductType, typename Dest>
+  static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+  {
+    typedef typename Dest::Index Index;
+    // TODO makes sure rhs is sequentially stored in memory, otherwise use a temp
+    const Index rows = prod.rows();
+    for(Index i=0; i<rows; ++i)
+      dest.coeffRef(i) += alpha * (prod.lhs().row(i).cwiseProduct(prod.rhs().transpose())).sum();
+  }
+};
+
+} // end namespace internal
+
+/***************************************************************************
+* Implementation of matrix base methods
+***************************************************************************/
+
+/** \returns the matrix product of \c *this and \a other.
+  *
+  * \note If instead of the matrix product you want the coefficient-wise product, see Cwise::operator*().
+  *
+  * \sa lazyProduct(), operator*=(const MatrixBase&), Cwise::operator*()
+  */
+template<typename Derived>
+template<typename OtherDerived>
+inline const typename ProductReturnType<Derived, OtherDerived>::Type
+MatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
+{
+  // A note regarding the function declaration: In MSVC, this function will sometimes
+  // not be inlined since DenseStorage is an unwindable object for dynamic
+  // matrices and product types are holding a member to store the result.
+  // Thus it does not help tagging this function with EIGEN_STRONG_INLINE.
+  enum {
+    ProductIsValid =  Derived::ColsAtCompileTime==Dynamic
+                   || OtherDerived::RowsAtCompileTime==Dynamic
+                   || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime),
+    AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime,
+    SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived)
+  };
+  // note to the lost user:
+  //    * for a dot product use: v1.dot(v2)
+  //    * for a coeff-wise product use: v1.cwiseProduct(v2)
+  EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
+    INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
+  EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
+    INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
+  EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
+#ifdef EIGEN_DEBUG_PRODUCT
+  internal::product_type<Derived,OtherDerived>::debug();
+#endif
+  return typename ProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+
+/** \returns an expression of the matrix product of \c *this and \a other without implicit evaluation.
+  *
+  * The returned product will behave like any other expressions: the coefficients of the product will be
+  * computed once at a time as requested. This might be useful in some extremely rare cases when only
+  * a small and no coherent fraction of the result's coefficients have to be computed.
+  *
+  * \warning This version of the matrix product can be much much slower. So use it only if you know
+  * what you are doing and that you measured a true speed improvement.
+  *
+  * \sa operator*(const MatrixBase&)
+  */
+template<typename Derived>
+template<typename OtherDerived>
+const typename LazyProductReturnType<Derived,OtherDerived>::Type
+MatrixBase<Derived>::lazyProduct(const MatrixBase<OtherDerived> &other) const
+{
+  enum {
+    ProductIsValid =  Derived::ColsAtCompileTime==Dynamic
+                   || OtherDerived::RowsAtCompileTime==Dynamic
+                   || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime),
+    AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime,
+    SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived)
+  };
+  // note to the lost user:
+  //    * for a dot product use: v1.dot(v2)
+  //    * for a coeff-wise product use: v1.cwiseProduct(v2)
+  EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
+    INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
+  EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
+    INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
+  EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
+
+  return typename LazyProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_PRODUCT_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/GenericPacketMath.h b/resources/3rdParty/eigen/Eigen/src/Core/GenericPacketMath.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/GenericPacketMath.h
rename to resources/3rdParty/eigen/Eigen/src/Core/GenericPacketMath.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/GlobalFunctions.h b/resources/3rdParty/eigen/Eigen/src/Core/GlobalFunctions.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/GlobalFunctions.h
rename to resources/3rdParty/eigen/Eigen/src/Core/GlobalFunctions.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/IO.h b/resources/3rdParty/eigen/Eigen/src/Core/IO.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/IO.h
rename to resources/3rdParty/eigen/Eigen/src/Core/IO.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/Map.h b/resources/3rdParty/eigen/Eigen/src/Core/Map.h
new file mode 100644
index 000000000..15a19226e
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/Map.h
@@ -0,0 +1,192 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MAP_H
+#define EIGEN_MAP_H
+
+namespace Eigen { 
+
+/** \class Map
+  * \ingroup Core_Module
+  *
+  * \brief A matrix or vector expression mapping an existing array of data.
+  *
+  * \tparam PlainObjectType the equivalent matrix type of the mapped data
+  * \tparam MapOptions specifies whether the pointer is \c #Aligned, or \c #Unaligned.
+  *                The default is \c #Unaligned.
+  * \tparam StrideType optionally specifies strides. By default, Map assumes the memory layout
+  *                   of an ordinary, contiguous array. This can be overridden by specifying strides.
+  *                   The type passed here must be a specialization of the Stride template, see examples below.
+  *
+  * This class represents a matrix or vector expression mapping an existing array of data.
+  * It can be used to let Eigen interface without any overhead with non-Eigen data structures,
+  * such as plain C arrays or structures from other libraries. By default, it assumes that the
+  * data is laid out contiguously in memory. You can however override this by explicitly specifying
+  * inner and outer strides.
+  *
+  * Here's an example of simply mapping a contiguous array as a \ref TopicStorageOrders "column-major" matrix:
+  * \include Map_simple.cpp
+  * Output: \verbinclude Map_simple.out
+  *
+  * If you need to map non-contiguous arrays, you can do so by specifying strides:
+  *
+  * Here's an example of mapping an array as a vector, specifying an inner stride, that is, the pointer
+  * increment between two consecutive coefficients. Here, we're specifying the inner stride as a compile-time
+  * fixed value.
+  * \include Map_inner_stride.cpp
+  * Output: \verbinclude Map_inner_stride.out
+  *
+  * Here's an example of mapping an array while specifying an outer stride. Here, since we're mapping
+  * as a column-major matrix, 'outer stride' means the pointer increment between two consecutive columns.
+  * Here, we're specifying the outer stride as a runtime parameter. Note that here \c OuterStride<> is
+  * a short version of \c OuterStride<Dynamic> because the default template parameter of OuterStride
+  * is  \c Dynamic
+  * \include Map_outer_stride.cpp
+  * Output: \verbinclude Map_outer_stride.out
+  *
+  * For more details and for an example of specifying both an inner and an outer stride, see class Stride.
+  *
+  * \b Tip: to change the array of data mapped by a Map object, you can use the C++
+  * placement new syntax:
+  *
+  * Example: \include Map_placement_new.cpp
+  * Output: \verbinclude Map_placement_new.out
+  *
+  * This class is the return type of PlainObjectBase::Map() but can also be used directly.
+  *
+  * \sa PlainObjectBase::Map(), \ref TopicStorageOrders
+  */
+
+namespace internal {
+template<typename PlainObjectType, int MapOptions, typename StrideType>
+struct traits<Map<PlainObjectType, MapOptions, StrideType> >
+  : public traits<PlainObjectType>
+{
+  typedef traits<PlainObjectType> TraitsBase;
+  typedef typename PlainObjectType::Index Index;
+  typedef typename PlainObjectType::Scalar Scalar;
+  enum {
+    InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0
+                             ? int(PlainObjectType::InnerStrideAtCompileTime)
+                             : int(StrideType::InnerStrideAtCompileTime),
+    OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0
+                             ? int(PlainObjectType::OuterStrideAtCompileTime)
+                             : int(StrideType::OuterStrideAtCompileTime),
+    HasNoInnerStride = InnerStrideAtCompileTime == 1,
+    HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0,
+    HasNoStride = HasNoInnerStride && HasNoOuterStride,
+    IsAligned = bool(EIGEN_ALIGN) && ((int(MapOptions)&Aligned)==Aligned),
+    IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic,
+    KeepsPacketAccess = bool(HasNoInnerStride)
+                        && ( bool(IsDynamicSize)
+                           || HasNoOuterStride
+                           || ( OuterStrideAtCompileTime!=Dynamic
+                           && ((static_cast<int>(sizeof(Scalar))*OuterStrideAtCompileTime)%16)==0 ) ),
+    Flags0 = TraitsBase::Flags & (~NestByRefBit),
+    Flags1 = IsAligned ? (int(Flags0) | AlignedBit) : (int(Flags0) & ~AlignedBit),
+    Flags2 = (bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime))
+           ? int(Flags1) : int(Flags1 & ~LinearAccessBit),
+    Flags3 = is_lvalue<PlainObjectType>::value ? int(Flags2) : (int(Flags2) & ~LvalueBit),
+    Flags = KeepsPacketAccess ? int(Flags3) : (int(Flags3) & ~PacketAccessBit)
+  };
+private:
+  enum { Options }; // Expressions don't have Options
+};
+}
+
+template<typename PlainObjectType, int MapOptions, typename StrideType> class Map
+  : public MapBase<Map<PlainObjectType, MapOptions, StrideType> >
+{
+  public:
+
+    typedef MapBase<Map> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Map)
+
+    typedef typename Base::PointerType PointerType;
+#if EIGEN2_SUPPORT_STAGE <= STAGE30_FULL_EIGEN3_API
+    typedef const Scalar* PointerArgType;
+    inline PointerType cast_to_pointer_type(PointerArgType ptr) { return const_cast<PointerType>(ptr); }
+#else
+    typedef PointerType PointerArgType;
+    inline PointerType cast_to_pointer_type(PointerArgType ptr) { return ptr; }
+#endif
+
+    inline Index innerStride() const
+    {
+      return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1;
+    }
+
+    inline Index outerStride() const
+    {
+      return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer()
+           : IsVectorAtCompileTime ? this->size()
+           : int(Flags)&RowMajorBit ? this->cols()
+           : this->rows();
+    }
+
+    /** Constructor in the fixed-size case.
+      *
+      * \param data pointer to the array to map
+      * \param stride optional Stride object, passing the strides.
+      */
+    inline Map(PointerArgType data, const StrideType& stride = StrideType())
+      : Base(cast_to_pointer_type(data)), m_stride(stride)
+    {
+      PlainObjectType::Base::_check_template_params();
+    }
+
+    /** Constructor in the dynamic-size vector case.
+      *
+      * \param data pointer to the array to map
+      * \param size the size of the vector expression
+      * \param stride optional Stride object, passing the strides.
+      */
+    inline Map(PointerArgType data, Index size, const StrideType& stride = StrideType())
+      : Base(cast_to_pointer_type(data), size), m_stride(stride)
+    {
+      PlainObjectType::Base::_check_template_params();
+    }
+
+    /** Constructor in the dynamic-size matrix case.
+      *
+      * \param data pointer to the array to map
+      * \param rows the number of rows of the matrix expression
+      * \param cols the number of columns of the matrix expression
+      * \param stride optional Stride object, passing the strides.
+      */
+    inline Map(PointerArgType data, Index rows, Index cols, const StrideType& stride = StrideType())
+      : Base(cast_to_pointer_type(data), rows, cols), m_stride(stride)
+    {
+      PlainObjectType::Base::_check_template_params();
+    }
+
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
+
+  protected:
+    StrideType m_stride;
+};
+
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+inline Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>
+  ::Array(const Scalar *data)
+{
+  this->_set_noalias(Eigen::Map<const Array>(data));
+}
+
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+inline Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>
+  ::Matrix(const Scalar *data)
+{
+  this->_set_noalias(Eigen::Map<const Matrix>(data));
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_MAP_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/MapBase.h b/resources/3rdParty/eigen/Eigen/src/Core/MapBase.h
new file mode 100644
index 000000000..a388d61ea
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/MapBase.h
@@ -0,0 +1,242 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MAPBASE_H
+#define EIGEN_MAPBASE_H
+
+#define EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) \
+      EIGEN_STATIC_ASSERT((int(internal::traits<Derived>::Flags) & LinearAccessBit) || Derived::IsVectorAtCompileTime, \
+                          YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT)
+
+namespace Eigen { 
+
+/** \class MapBase
+  * \ingroup Core_Module
+  *
+  * \brief Base class for Map and Block expression with direct access
+  *
+  * \sa class Map, class Block
+  */
+template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
+  : public internal::dense_xpr_base<Derived>::type
+{
+  public:
+
+    typedef typename internal::dense_xpr_base<Derived>::type Base;
+    enum {
+      RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
+      ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
+      SizeAtCompileTime = Base::SizeAtCompileTime
+    };
+
+    typedef typename internal::traits<Derived>::StorageKind StorageKind;
+    typedef typename internal::traits<Derived>::Index Index;
+    typedef typename internal::traits<Derived>::Scalar Scalar;
+    typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    typedef typename internal::conditional<
+                         bool(internal::is_lvalue<Derived>::value),
+                         Scalar *,
+                         const Scalar *>::type
+                     PointerType;
+
+    using Base::derived;
+//    using Base::RowsAtCompileTime;
+//    using Base::ColsAtCompileTime;
+//    using Base::SizeAtCompileTime;
+    using Base::MaxRowsAtCompileTime;
+    using Base::MaxColsAtCompileTime;
+    using Base::MaxSizeAtCompileTime;
+    using Base::IsVectorAtCompileTime;
+    using Base::Flags;
+    using Base::IsRowMajor;
+
+    using Base::rows;
+    using Base::cols;
+    using Base::size;
+    using Base::coeff;
+    using Base::coeffRef;
+    using Base::lazyAssign;
+    using Base::eval;
+
+    using Base::innerStride;
+    using Base::outerStride;
+    using Base::rowStride;
+    using Base::colStride;
+
+    // bug 217 - compile error on ICC 11.1
+    using Base::operator=;
+
+    typedef typename Base::CoeffReturnType CoeffReturnType;
+
+    inline Index rows() const { return m_rows.value(); }
+    inline Index cols() const { return m_cols.value(); }
+
+    /** Returns a pointer to the first coefficient of the matrix or vector.
+      *
+      * \note When addressing this data, make sure to honor the strides returned by innerStride() and outerStride().
+      *
+      * \sa innerStride(), outerStride()
+      */
+    inline const Scalar* data() const { return m_data; }
+
+    inline const Scalar& coeff(Index row, Index col) const
+    {
+      return m_data[col * colStride() + row * rowStride()];
+    }
+
+    inline const Scalar& coeff(Index index) const
+    {
+      EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
+      return m_data[index * innerStride()];
+    }
+
+    inline const Scalar& coeffRef(Index row, Index col) const
+    {
+      return this->m_data[col * colStride() + row * rowStride()];
+    }
+
+    inline const Scalar& coeffRef(Index index) const
+    {
+      EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
+      return this->m_data[index * innerStride()];
+    }
+
+    template<int LoadMode>
+    inline PacketScalar packet(Index row, Index col) const
+    {
+      return internal::ploadt<PacketScalar, LoadMode>
+               (m_data + (col * colStride() + row * rowStride()));
+    }
+
+    template<int LoadMode>
+    inline PacketScalar packet(Index index) const
+    {
+      EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
+      return internal::ploadt<PacketScalar, LoadMode>(m_data + index * innerStride());
+    }
+
+    inline MapBase(PointerType data) : m_data(data), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)
+    {
+      EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
+      checkSanity();
+    }
+
+    inline MapBase(PointerType data, Index size)
+            : m_data(data),
+              m_rows(RowsAtCompileTime == Dynamic ? size : Index(RowsAtCompileTime)),
+              m_cols(ColsAtCompileTime == Dynamic ? size : Index(ColsAtCompileTime))
+    {
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+      eigen_assert(size >= 0);
+      eigen_assert(data == 0 || SizeAtCompileTime == Dynamic || SizeAtCompileTime == size);
+      checkSanity();
+    }
+
+    inline MapBase(PointerType data, Index rows, Index cols)
+            : m_data(data), m_rows(rows), m_cols(cols)
+    {
+      eigen_assert( (data == 0)
+              || (   rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows)
+                  && cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)));
+      checkSanity();
+    }
+
+  protected:
+
+    void checkSanity() const
+    {
+      EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(internal::traits<Derived>::Flags&PacketAccessBit,
+                                        internal::inner_stride_at_compile_time<Derived>::ret==1),
+                          PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1);
+      eigen_assert(EIGEN_IMPLIES(internal::traits<Derived>::Flags&AlignedBit, (size_t(m_data) % 16) == 0)
+                   && "data is not aligned");
+    }
+
+    PointerType m_data;
+    const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_rows;
+    const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_cols;
+};
+
+template<typename Derived> class MapBase<Derived, WriteAccessors>
+  : public MapBase<Derived, ReadOnlyAccessors>
+{
+  public:
+
+    typedef MapBase<Derived, ReadOnlyAccessors> Base;
+
+    typedef typename Base::Scalar Scalar;
+    typedef typename Base::PacketScalar PacketScalar;
+    typedef typename Base::Index Index;
+    typedef typename Base::PointerType PointerType;
+
+    using Base::derived;
+    using Base::rows;
+    using Base::cols;
+    using Base::size;
+    using Base::coeff;
+    using Base::coeffRef;
+
+    using Base::innerStride;
+    using Base::outerStride;
+    using Base::rowStride;
+    using Base::colStride;
+
+    typedef typename internal::conditional<
+                    internal::is_lvalue<Derived>::value,
+                    Scalar,
+                    const Scalar
+                  >::type ScalarWithConstIfNotLvalue;
+
+    inline const Scalar* data() const { return this->m_data; }
+    inline ScalarWithConstIfNotLvalue* data() { return this->m_data; } // no const-cast here so non-const-correct code will give a compile error
+
+    inline ScalarWithConstIfNotLvalue& coeffRef(Index row, Index col)
+    {
+      return this->m_data[col * colStride() + row * rowStride()];
+    }
+
+    inline ScalarWithConstIfNotLvalue& coeffRef(Index index)
+    {
+      EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
+      return this->m_data[index * innerStride()];
+    }
+
+    template<int StoreMode>
+    inline void writePacket(Index row, Index col, const PacketScalar& x)
+    {
+      internal::pstoret<Scalar, PacketScalar, StoreMode>
+               (this->m_data + (col * colStride() + row * rowStride()), x);
+    }
+
+    template<int StoreMode>
+    inline void writePacket(Index index, const PacketScalar& x)
+    {
+      EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
+      internal::pstoret<Scalar, PacketScalar, StoreMode>
+                (this->m_data + index * innerStride(), x);
+    }
+
+    explicit inline MapBase(PointerType data) : Base(data) {}
+    inline MapBase(PointerType data, Index size) : Base(data, size) {}
+    inline MapBase(PointerType data, Index rows, Index cols) : Base(data, rows, cols) {}
+
+    Derived& operator=(const MapBase& other)
+    {
+      Base::Base::operator=(other);
+      return derived();
+    }
+
+    using Base::Base::operator=;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_MAPBASE_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/MathFunctions.h b/resources/3rdParty/eigen/Eigen/src/Core/MathFunctions.h
new file mode 100644
index 000000000..05e913f2f
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/MathFunctions.h
@@ -0,0 +1,842 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATHFUNCTIONS_H
+#define EIGEN_MATHFUNCTIONS_H
+
+namespace Eigen {
+
+namespace internal {
+
+/** \internal \struct global_math_functions_filtering_base
+  *
+  * What it does:
+  * Defines a typedef 'type' as follows:
+  * - if type T has a member typedef Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl, then
+  *   global_math_functions_filtering_base<T>::type is a typedef for it.
+  * - otherwise, global_math_functions_filtering_base<T>::type is a typedef for T.
+  *
+  * How it's used:
+  * To allow to defined the global math functions (like sin...) in certain cases, like the Array expressions.
+  * When you do sin(array1+array2), the object array1+array2 has a complicated expression type, all what you want to know
+  * is that it inherits ArrayBase. So we implement a partial specialization of sin_impl for ArrayBase<Derived>.
+  * So we must make sure to use sin_impl<ArrayBase<Derived> > and not sin_impl<Derived>, otherwise our partial specialization
+  * won't be used. How does sin know that? That's exactly what global_math_functions_filtering_base tells it.
+  *
+  * How it's implemented:
+  * SFINAE in the style of enable_if. Highly susceptible of breaking compilers. With GCC, it sure does work, but if you replace
+  * the typename dummy by an integer template parameter, it doesn't work anymore!
+  */
+
+template<typename T, typename dummy = void>
+struct global_math_functions_filtering_base
+{
+  typedef T type;
+};
+
+template<typename T> struct always_void { typedef void type; };
+
+template<typename T>
+struct global_math_functions_filtering_base
+  <T,
+   typename always_void<typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl>::type
+  >
+{
+  typedef typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl type;
+};
+
+#define EIGEN_MATHFUNC_IMPL(func, scalar) func##_impl<typename global_math_functions_filtering_base<scalar>::type>
+#define EIGEN_MATHFUNC_RETVAL(func, scalar) typename func##_retval<typename global_math_functions_filtering_base<scalar>::type>::type
+
+
+/****************************************************************************
+* Implementation of real                                                 *
+****************************************************************************/
+
+template<typename Scalar>
+struct real_impl
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  static inline RealScalar run(const Scalar& x)
+  {
+    return x;
+  }
+};
+
+template<typename RealScalar>
+struct real_impl<std::complex<RealScalar> >
+{
+  static inline RealScalar run(const std::complex<RealScalar>& x)
+  {
+    using std::real;
+    return real(x);
+  }
+};
+
+template<typename Scalar>
+struct real_retval
+{
+  typedef typename NumTraits<Scalar>::Real type;
+};
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(real, Scalar) real(const Scalar& x)
+{
+  return EIGEN_MATHFUNC_IMPL(real, Scalar)::run(x);
+}
+
+/****************************************************************************
+* Implementation of imag                                                 *
+****************************************************************************/
+
+template<typename Scalar>
+struct imag_impl
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  static inline RealScalar run(const Scalar&)
+  {
+    return RealScalar(0);
+  }
+};
+
+template<typename RealScalar>
+struct imag_impl<std::complex<RealScalar> >
+{
+  static inline RealScalar run(const std::complex<RealScalar>& x)
+  {
+    using std::imag;
+    return imag(x);
+  }
+};
+
+template<typename Scalar>
+struct imag_retval
+{
+  typedef typename NumTraits<Scalar>::Real type;
+};
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(imag, Scalar) imag(const Scalar& x)
+{
+  return EIGEN_MATHFUNC_IMPL(imag, Scalar)::run(x);
+}
+
+/****************************************************************************
+* Implementation of real_ref                                             *
+****************************************************************************/
+
+template<typename Scalar>
+struct real_ref_impl
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  static inline RealScalar& run(Scalar& x)
+  {
+    return reinterpret_cast<RealScalar*>(&x)[0];
+  }
+  static inline const RealScalar& run(const Scalar& x)
+  {
+    return reinterpret_cast<const RealScalar*>(&x)[0];
+  }
+};
+
+template<typename Scalar>
+struct real_ref_retval
+{
+  typedef typename NumTraits<Scalar>::Real & type;
+};
+
+template<typename Scalar>
+inline typename add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) >::type real_ref(const Scalar& x)
+{
+  return real_ref_impl<Scalar>::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) real_ref(Scalar& x)
+{
+  return EIGEN_MATHFUNC_IMPL(real_ref, Scalar)::run(x);
+}
+
+/****************************************************************************
+* Implementation of imag_ref                                             *
+****************************************************************************/
+
+template<typename Scalar, bool IsComplex>
+struct imag_ref_default_impl
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  static inline RealScalar& run(Scalar& x)
+  {
+    return reinterpret_cast<RealScalar*>(&x)[1];
+  }
+  static inline const RealScalar& run(const Scalar& x)
+  {
+    return reinterpret_cast<RealScalar*>(&x)[1];
+  }
+};
+
+template<typename Scalar>
+struct imag_ref_default_impl<Scalar, false>
+{
+  static inline Scalar run(Scalar&)
+  {
+    return Scalar(0);
+  }
+  static inline const Scalar run(const Scalar&)
+  {
+    return Scalar(0);
+  }
+};
+
+template<typename Scalar>
+struct imag_ref_impl : imag_ref_default_impl<Scalar, NumTraits<Scalar>::IsComplex> {};
+
+template<typename Scalar>
+struct imag_ref_retval
+{
+  typedef typename NumTraits<Scalar>::Real & type;
+};
+
+template<typename Scalar>
+inline typename add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) >::type imag_ref(const Scalar& x)
+{
+  return imag_ref_impl<Scalar>::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) imag_ref(Scalar& x)
+{
+  return EIGEN_MATHFUNC_IMPL(imag_ref, Scalar)::run(x);
+}
+
+/****************************************************************************
+* Implementation of conj                                                 *
+****************************************************************************/
+
+template<typename Scalar>
+struct conj_impl
+{
+  static inline Scalar run(const Scalar& x)
+  {
+    return x;
+  }
+};
+
+template<typename RealScalar>
+struct conj_impl<std::complex<RealScalar> >
+{
+  static inline std::complex<RealScalar> run(const std::complex<RealScalar>& x)
+  {
+    using std::conj;
+    return conj(x);
+  }
+};
+
+template<typename Scalar>
+struct conj_retval
+{
+  typedef Scalar type;
+};
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(conj, Scalar) conj(const Scalar& x)
+{
+  return EIGEN_MATHFUNC_IMPL(conj, Scalar)::run(x);
+}
+
+/****************************************************************************
+* Implementation of abs                                                  *
+****************************************************************************/
+
+template<typename Scalar>
+struct abs_impl
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  static inline RealScalar run(const Scalar& x)
+  {
+    using std::abs;
+    return abs(x);
+  }
+};
+
+template<typename Scalar>
+struct abs_retval
+{
+  typedef typename NumTraits<Scalar>::Real type;
+};
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(abs, Scalar) abs(const Scalar& x)
+{
+  return EIGEN_MATHFUNC_IMPL(abs, Scalar)::run(x);
+}
+
+/****************************************************************************
+* Implementation of abs2                                                 *
+****************************************************************************/
+
+template<typename Scalar>
+struct abs2_impl
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  static inline RealScalar run(const Scalar& x)
+  {
+    return x*x;
+  }
+};
+
+template<typename RealScalar>
+struct abs2_impl<std::complex<RealScalar> >
+{
+  static inline RealScalar run(const std::complex<RealScalar>& x)
+  {
+    return real(x)*real(x) + imag(x)*imag(x);
+  }
+};
+
+template<typename Scalar>
+struct abs2_retval
+{
+  typedef typename NumTraits<Scalar>::Real type;
+};
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(abs2, Scalar) abs2(const Scalar& x)
+{
+  return EIGEN_MATHFUNC_IMPL(abs2, Scalar)::run(x);
+}
+
+/****************************************************************************
+* Implementation of norm1                                                *
+****************************************************************************/
+
+template<typename Scalar, bool IsComplex>
+struct norm1_default_impl
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  static inline RealScalar run(const Scalar& x)
+  {
+    return abs(real(x)) + abs(imag(x));
+  }
+};
+
+template<typename Scalar>
+struct norm1_default_impl<Scalar, false>
+{
+  static inline Scalar run(const Scalar& x)
+  {
+    return abs(x);
+  }
+};
+
+template<typename Scalar>
+struct norm1_impl : norm1_default_impl<Scalar, NumTraits<Scalar>::IsComplex> {};
+
+template<typename Scalar>
+struct norm1_retval
+{
+  typedef typename NumTraits<Scalar>::Real type;
+};
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(norm1, Scalar) norm1(const Scalar& x)
+{
+  return EIGEN_MATHFUNC_IMPL(norm1, Scalar)::run(x);
+}
+
+/****************************************************************************
+* Implementation of hypot                                                *
+****************************************************************************/
+
+template<typename Scalar>
+struct hypot_impl
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  static inline RealScalar run(const Scalar& x, const Scalar& y)
+  {
+    using std::max;
+    using std::min;
+    RealScalar _x = abs(x);
+    RealScalar _y = abs(y);
+    RealScalar p = (max)(_x, _y);
+    RealScalar q = (min)(_x, _y);
+    RealScalar qp = q/p;
+    return p * sqrt(RealScalar(1) + qp*qp);
+  }
+};
+
+template<typename Scalar>
+struct hypot_retval
+{
+  typedef typename NumTraits<Scalar>::Real type;
+};
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(hypot, Scalar) hypot(const Scalar& x, const Scalar& y)
+{
+  return EIGEN_MATHFUNC_IMPL(hypot, Scalar)::run(x, y);
+}
+
+/****************************************************************************
+* Implementation of cast                                                 *
+****************************************************************************/
+
+template<typename OldType, typename NewType>
+struct cast_impl
+{
+  static inline NewType run(const OldType& x)
+  {
+    return static_cast<NewType>(x);
+  }
+};
+
+// here, for once, we're plainly returning NewType: we don't want cast to do weird things.
+
+template<typename OldType, typename NewType>
+inline NewType cast(const OldType& x)
+{
+  return cast_impl<OldType, NewType>::run(x);
+}
+
+/****************************************************************************
+* Implementation of sqrt                                                 *
+****************************************************************************/
+
+template<typename Scalar, bool IsInteger>
+struct sqrt_default_impl
+{
+  static inline Scalar run(const Scalar& x)
+  {
+    using std::sqrt;
+    return sqrt(x);
+  }
+};
+
+template<typename Scalar>
+struct sqrt_default_impl<Scalar, true>
+{
+  static inline Scalar run(const Scalar&)
+  {
+#ifdef EIGEN2_SUPPORT
+    eigen_assert(!NumTraits<Scalar>::IsInteger);
+#else
+    EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)
+#endif
+    return Scalar(0);
+  }
+};
+
+template<typename Scalar>
+struct sqrt_impl : sqrt_default_impl<Scalar, NumTraits<Scalar>::IsInteger> {};
+
+template<typename Scalar>
+struct sqrt_retval
+{
+  typedef Scalar type;
+};
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(sqrt, Scalar) sqrt(const Scalar& x)
+{
+  return EIGEN_MATHFUNC_IMPL(sqrt, Scalar)::run(x);
+}
+
+/****************************************************************************
+* Implementation of standard unary real functions (exp, log, sin, cos, ...  *
+****************************************************************************/
+
+// This macro instanciate all the necessary template mechanism which is common to all unary real functions.
+#define EIGEN_MATHFUNC_STANDARD_REAL_UNARY(NAME) \
+  template<typename Scalar, bool IsInteger> struct NAME##_default_impl {            \
+    static inline Scalar run(const Scalar& x) { using std::NAME; return NAME(x); }  \
+  };                                                                                \
+  template<typename Scalar> struct NAME##_default_impl<Scalar, true> {              \
+    static inline Scalar run(const Scalar&) {                                       \
+      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)                                       \
+      return Scalar(0);                                                             \
+    }                                                                               \
+  };                                                                                \
+  template<typename Scalar> struct NAME##_impl                                      \
+    : NAME##_default_impl<Scalar, NumTraits<Scalar>::IsInteger>                     \
+  {};                                                                               \
+  template<typename Scalar> struct NAME##_retval { typedef Scalar type; };          \
+  template<typename Scalar>                                                         \
+  inline EIGEN_MATHFUNC_RETVAL(NAME, Scalar) NAME(const Scalar& x) {                \
+    return EIGEN_MATHFUNC_IMPL(NAME, Scalar)::run(x);                               \
+  }
+
+EIGEN_MATHFUNC_STANDARD_REAL_UNARY(exp)
+EIGEN_MATHFUNC_STANDARD_REAL_UNARY(log)
+EIGEN_MATHFUNC_STANDARD_REAL_UNARY(sin)
+EIGEN_MATHFUNC_STANDARD_REAL_UNARY(cos)
+EIGEN_MATHFUNC_STANDARD_REAL_UNARY(tan)
+EIGEN_MATHFUNC_STANDARD_REAL_UNARY(asin)
+EIGEN_MATHFUNC_STANDARD_REAL_UNARY(acos)
+
+/****************************************************************************
+* Implementation of atan2                                                *
+****************************************************************************/
+
+template<typename Scalar, bool IsInteger>
+struct atan2_default_impl
+{
+  typedef Scalar retval;
+  static inline Scalar run(const Scalar& x, const Scalar& y)
+  {
+    using std::atan2;
+    return atan2(x, y);
+  }
+};
+
+template<typename Scalar>
+struct atan2_default_impl<Scalar, true>
+{
+  static inline Scalar run(const Scalar&, const Scalar&)
+  {
+    EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)
+    return Scalar(0);
+  }
+};
+
+template<typename Scalar>
+struct atan2_impl : atan2_default_impl<Scalar, NumTraits<Scalar>::IsInteger> {};
+
+template<typename Scalar>
+struct atan2_retval
+{
+  typedef Scalar type;
+};
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(atan2, Scalar) atan2(const Scalar& x, const Scalar& y)
+{
+  return EIGEN_MATHFUNC_IMPL(atan2, Scalar)::run(x, y);
+}
+
+/****************************************************************************
+* Implementation of pow                                                  *
+****************************************************************************/
+
+template<typename Scalar, bool IsInteger>
+struct pow_default_impl
+{
+  typedef Scalar retval;
+  static inline Scalar run(const Scalar& x, const Scalar& y)
+  {
+    using std::pow;
+    return pow(x, y);
+  }
+};
+
+template<typename Scalar>
+struct pow_default_impl<Scalar, true>
+{
+  static inline Scalar run(Scalar x, Scalar y)
+  {
+    Scalar res(1);
+    eigen_assert(!NumTraits<Scalar>::IsSigned || y >= 0);
+    if(y & 1) res *= x;
+    y >>= 1;
+    while(y)
+    {
+      x *= x;
+      if(y&1) res *= x;
+      y >>= 1;
+    }
+    return res;
+  }
+};
+
+template<typename Scalar>
+struct pow_impl : pow_default_impl<Scalar, NumTraits<Scalar>::IsInteger> {};
+
+template<typename Scalar>
+struct pow_retval
+{
+  typedef Scalar type;
+};
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(pow, Scalar) pow(const Scalar& x, const Scalar& y)
+{
+  return EIGEN_MATHFUNC_IMPL(pow, Scalar)::run(x, y);
+}
+
+/****************************************************************************
+* Implementation of random                                               *
+****************************************************************************/
+
+template<typename Scalar,
+         bool IsComplex,
+         bool IsInteger>
+struct random_default_impl {};
+
+template<typename Scalar>
+struct random_impl : random_default_impl<Scalar, NumTraits<Scalar>::IsComplex, NumTraits<Scalar>::IsInteger> {};
+
+template<typename Scalar>
+struct random_retval
+{
+  typedef Scalar type;
+};
+
+template<typename Scalar> inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y);
+template<typename Scalar> inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random();
+
+template<typename Scalar>
+struct random_default_impl<Scalar, false, false>
+{
+  static inline Scalar run(const Scalar& x, const Scalar& y)
+  {
+    return x + (y-x) * Scalar(std::rand()) / Scalar(RAND_MAX);
+  }
+  static inline Scalar run()
+  {
+    return run(Scalar(NumTraits<Scalar>::IsSigned ? -1 : 0), Scalar(1));
+  }
+};
+
+enum {
+  floor_log2_terminate,
+  floor_log2_move_up,
+  floor_log2_move_down,
+  floor_log2_bogus
+};
+
+template<unsigned int n, int lower, int upper> struct floor_log2_selector
+{
+  enum { middle = (lower + upper) / 2,
+         value = (upper <= lower + 1) ? int(floor_log2_terminate)
+               : (n < (1 << middle)) ? int(floor_log2_move_down)
+               : (n==0) ? int(floor_log2_bogus)
+               : int(floor_log2_move_up)
+  };
+};
+
+template<unsigned int n,
+         int lower = 0,
+         int upper = sizeof(unsigned int) * CHAR_BIT - 1,
+         int selector = floor_log2_selector<n, lower, upper>::value>
+struct floor_log2 {};
+
+template<unsigned int n, int lower, int upper>
+struct floor_log2<n, lower, upper, floor_log2_move_down>
+{
+  enum { value = floor_log2<n, lower, floor_log2_selector<n, lower, upper>::middle>::value };
+};
+
+template<unsigned int n, int lower, int upper>
+struct floor_log2<n, lower, upper, floor_log2_move_up>
+{
+  enum { value = floor_log2<n, floor_log2_selector<n, lower, upper>::middle, upper>::value };
+};
+
+template<unsigned int n, int lower, int upper>
+struct floor_log2<n, lower, upper, floor_log2_terminate>
+{
+  enum { value = (n >= ((unsigned int)(1) << (lower+1))) ? lower+1 : lower };
+};
+
+template<unsigned int n, int lower, int upper>
+struct floor_log2<n, lower, upper, floor_log2_bogus>
+{
+  // no value, error at compile time
+};
+
+template<typename Scalar>
+struct random_default_impl<Scalar, false, true>
+{
+  typedef typename NumTraits<Scalar>::NonInteger NonInteger;
+
+  static inline Scalar run(const Scalar& x, const Scalar& y)
+  {
+    return x + Scalar((NonInteger(y)-x+1) * std::rand() / (RAND_MAX + NonInteger(1)));
+  }
+
+  static inline Scalar run()
+  {
+#ifdef EIGEN_MAKING_DOCS
+    return run(Scalar(NumTraits<Scalar>::IsSigned ? -10 : 0), Scalar(10));
+#else
+    enum { rand_bits = floor_log2<(unsigned int)(RAND_MAX)+1>::value,
+           scalar_bits = sizeof(Scalar) * CHAR_BIT,
+           shift = EIGEN_PLAIN_ENUM_MAX(0, int(rand_bits) - int(scalar_bits))
+    };
+    Scalar x = Scalar(std::rand() >> shift);
+    Scalar offset = NumTraits<Scalar>::IsSigned ? Scalar(1 << (rand_bits-1)) : Scalar(0);
+    return x - offset;
+#endif
+  }
+};
+
+template<typename Scalar>
+struct random_default_impl<Scalar, true, false>
+{
+  static inline Scalar run(const Scalar& x, const Scalar& y)
+  {
+    return Scalar(random(real(x), real(y)),
+                  random(imag(x), imag(y)));
+  }
+  static inline Scalar run()
+  {
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    return Scalar(random<RealScalar>(), random<RealScalar>());
+  }
+};
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y)
+{
+  return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(x, y);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random()
+{
+  return EIGEN_MATHFUNC_IMPL(random, Scalar)::run();
+}
+
+/****************************************************************************
+* Implementation of fuzzy comparisons                                       *
+****************************************************************************/
+
+template<typename Scalar,
+         bool IsComplex,
+         bool IsInteger>
+struct scalar_fuzzy_default_impl {};
+
+template<typename Scalar>
+struct scalar_fuzzy_default_impl<Scalar, false, false>
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  template<typename OtherScalar>
+  static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec)
+  {
+    return abs(x) <= abs(y) * prec;
+  }
+  static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec)
+  {
+    using std::min;
+    return abs(x - y) <= (min)(abs(x), abs(y)) * prec;
+  }
+  static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar& prec)
+  {
+    return x <= y || isApprox(x, y, prec);
+  }
+};
+
+template<typename Scalar>
+struct scalar_fuzzy_default_impl<Scalar, false, true>
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  template<typename OtherScalar>
+  static inline bool isMuchSmallerThan(const Scalar& x, const Scalar&, const RealScalar&)
+  {
+    return x == Scalar(0);
+  }
+  static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar&)
+  {
+    return x == y;
+  }
+  static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar&)
+  {
+    return x <= y;
+  }
+};
+
+template<typename Scalar>
+struct scalar_fuzzy_default_impl<Scalar, true, false>
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  template<typename OtherScalar>
+  static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec)
+  {
+    return abs2(x) <= abs2(y) * prec * prec;
+  }
+  static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec)
+  {
+    using std::min;
+    return abs2(x - y) <= (min)(abs2(x), abs2(y)) * prec * prec;
+  }
+};
+
+template<typename Scalar>
+struct scalar_fuzzy_impl : scalar_fuzzy_default_impl<Scalar, NumTraits<Scalar>::IsComplex, NumTraits<Scalar>::IsInteger> {};
+
+template<typename Scalar, typename OtherScalar>
+inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y,
+                                   typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
+{
+  return scalar_fuzzy_impl<Scalar>::template isMuchSmallerThan<OtherScalar>(x, y, precision);
+}
+
+template<typename Scalar>
+inline bool isApprox(const Scalar& x, const Scalar& y,
+                          typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
+{
+  return scalar_fuzzy_impl<Scalar>::isApprox(x, y, precision);
+}
+
+template<typename Scalar>
+inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y,
+                                    typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
+{
+  return scalar_fuzzy_impl<Scalar>::isApproxOrLessThan(x, y, precision);
+}
+
+/******************************************
+***  The special case of the  bool type ***
+******************************************/
+
+template<> struct random_impl<bool>
+{
+  static inline bool run()
+  {
+    return random<int>(0,1)==0 ? false : true;
+  }
+};
+
+template<> struct scalar_fuzzy_impl<bool>
+{
+  typedef bool RealScalar;
+  
+  template<typename OtherScalar>
+  static inline bool isMuchSmallerThan(const bool& x, const bool&, const bool&)
+  {
+    return !x;
+  }
+  
+  static inline bool isApprox(bool x, bool y, bool)
+  {
+    return x == y;
+  }
+
+  static inline bool isApproxOrLessThan(const bool& x, const bool& y, const bool&)
+  {
+    return (!x) || y;
+  }
+  
+};
+
+/****************************************************************************
+* Special functions                                                          *
+****************************************************************************/
+
+// std::isfinite is non standard, so let's define our own version,
+// even though it is not very efficient.
+template<typename T> bool (isfinite)(const T& x)
+{
+  return x<NumTraits<T>::highest() && x>NumTraits<T>::lowest();
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATHFUNCTIONS_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Matrix.h b/resources/3rdParty/eigen/Eigen/src/Core/Matrix.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/Matrix.h
rename to resources/3rdParty/eigen/Eigen/src/Core/Matrix.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/MatrixBase.h b/resources/3rdParty/eigen/Eigen/src/Core/MatrixBase.h
new file mode 100644
index 000000000..36ea2cee8
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/MatrixBase.h
@@ -0,0 +1,511 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATRIXBASE_H
+#define EIGEN_MATRIXBASE_H
+
+namespace Eigen {
+
+/** \class MatrixBase
+  * \ingroup Core_Module
+  *
+  * \brief Base class for all dense matrices, vectors, and expressions
+  *
+  * This class is the base that is inherited by all matrix, vector, and related expression
+  * types. Most of the Eigen API is contained in this class, and its base classes. Other important
+  * classes for the Eigen API are Matrix, and VectorwiseOp.
+  *
+  * Note that some methods are defined in other modules such as the \ref LU_Module LU module
+  * for all functions related to matrix inversions.
+  *
+  * \tparam Derived is the derived type, e.g. a matrix type, or an expression, etc.
+  *
+  * When writing a function taking Eigen objects as argument, if you want your function
+  * to take as argument any matrix, vector, or expression, just let it take a
+  * MatrixBase argument. As an example, here is a function printFirstRow which, given
+  * a matrix, vector, or expression \a x, prints the first row of \a x.
+  *
+  * \code
+    template<typename Derived>
+    void printFirstRow(const Eigen::MatrixBase<Derived>& x)
+    {
+      cout << x.row(0) << endl;
+    }
+  * \endcode
+  *
+  * This class can be extended with the help of the plugin mechanism described on the page
+  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_MATRIXBASE_PLUGIN.
+  *
+  * \sa \ref TopicClassHierarchy
+  */
+template<typename Derived> class MatrixBase
+  : public DenseBase<Derived>
+{
+  public:
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    typedef MatrixBase StorageBaseType;
+    typedef typename internal::traits<Derived>::StorageKind StorageKind;
+    typedef typename internal::traits<Derived>::Index Index;
+    typedef typename internal::traits<Derived>::Scalar Scalar;
+    typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+
+    typedef DenseBase<Derived> Base;
+    using Base::RowsAtCompileTime;
+    using Base::ColsAtCompileTime;
+    using Base::SizeAtCompileTime;
+    using Base::MaxRowsAtCompileTime;
+    using Base::MaxColsAtCompileTime;
+    using Base::MaxSizeAtCompileTime;
+    using Base::IsVectorAtCompileTime;
+    using Base::Flags;
+    using Base::CoeffReadCost;
+
+    using Base::derived;
+    using Base::const_cast_derived;
+    using Base::rows;
+    using Base::cols;
+    using Base::size;
+    using Base::coeff;
+    using Base::coeffRef;
+    using Base::lazyAssign;
+    using Base::eval;
+    using Base::operator+=;
+    using Base::operator-=;
+    using Base::operator*=;
+    using Base::operator/=;
+
+    typedef typename Base::CoeffReturnType CoeffReturnType;
+    typedef typename Base::ConstTransposeReturnType ConstTransposeReturnType;
+    typedef typename Base::RowXpr RowXpr;
+    typedef typename Base::ColXpr ColXpr;
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** type of the equivalent square matrix */
+    typedef Matrix<Scalar,EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime),
+                          EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+    /** \returns the size of the main diagonal, which is min(rows(),cols()).
+      * \sa rows(), cols(), SizeAtCompileTime. */
+    inline Index diagonalSize() const { return (std::min)(rows(),cols()); }
+
+    /** \brief The plain matrix type corresponding to this expression.
+      *
+      * This is not necessarily exactly the return type of eval(). In the case of plain matrices,
+      * the return type of eval() is a const reference to a matrix, not a matrix! It is however guaranteed
+      * that the return type of eval() is either PlainObject or const PlainObject&.
+      */
+    typedef Matrix<typename internal::traits<Derived>::Scalar,
+                internal::traits<Derived>::RowsAtCompileTime,
+                internal::traits<Derived>::ColsAtCompileTime,
+                AutoAlign | (internal::traits<Derived>::Flags&RowMajorBit ? RowMajor : ColMajor),
+                internal::traits<Derived>::MaxRowsAtCompileTime,
+                internal::traits<Derived>::MaxColsAtCompileTime
+          > PlainObject;
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** \internal Represents a matrix with all coefficients equal to one another*/
+    typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Derived> ConstantReturnType;
+    /** \internal the return type of MatrixBase::adjoint() */
+    typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
+                        CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, ConstTransposeReturnType>,
+                        ConstTransposeReturnType
+                     >::type AdjointReturnType;
+    /** \internal Return type of eigenvalues() */
+    typedef Matrix<std::complex<RealScalar>, internal::traits<Derived>::ColsAtCompileTime, 1, ColMajor> EigenvaluesReturnType;
+    /** \internal the return type of identity */
+    typedef CwiseNullaryOp<internal::scalar_identity_op<Scalar>,Derived> IdentityReturnType;
+    /** \internal the return type of unit vectors */
+    typedef Block<const CwiseNullaryOp<internal::scalar_identity_op<Scalar>, SquareMatrixType>,
+                  internal::traits<Derived>::RowsAtCompileTime,
+                  internal::traits<Derived>::ColsAtCompileTime> BasisReturnType;
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::MatrixBase
+#   include "../plugins/CommonCwiseUnaryOps.h"
+#   include "../plugins/CommonCwiseBinaryOps.h"
+#   include "../plugins/MatrixCwiseUnaryOps.h"
+#   include "../plugins/MatrixCwiseBinaryOps.h"
+#   ifdef EIGEN_MATRIXBASE_PLUGIN
+#     include EIGEN_MATRIXBASE_PLUGIN
+#   endif
+#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
+
+    /** Special case of the template operator=, in order to prevent the compiler
+      * from generating a default operator= (issue hit with g++ 4.1)
+      */
+    Derived& operator=(const MatrixBase& other);
+
+    // We cannot inherit here via Base::operator= since it is causing
+    // trouble with MSVC.
+
+    template <typename OtherDerived>
+    Derived& operator=(const DenseBase<OtherDerived>& other);
+
+    template <typename OtherDerived>
+    Derived& operator=(const EigenBase<OtherDerived>& other);
+
+    template<typename OtherDerived>
+    Derived& operator=(const ReturnByValue<OtherDerived>& other);
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    template<typename ProductDerived, typename Lhs, typename Rhs>
+    Derived& lazyAssign(const ProductBase<ProductDerived, Lhs,Rhs>& other);
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+    template<typename OtherDerived>
+    Derived& operator+=(const MatrixBase<OtherDerived>& other);
+    template<typename OtherDerived>
+    Derived& operator-=(const MatrixBase<OtherDerived>& other);
+
+    template<typename OtherDerived>
+    const typename ProductReturnType<Derived,OtherDerived>::Type
+    operator*(const MatrixBase<OtherDerived> &other) const;
+
+    template<typename OtherDerived>
+    const typename LazyProductReturnType<Derived,OtherDerived>::Type
+    lazyProduct(const MatrixBase<OtherDerived> &other) const;
+
+    template<typename OtherDerived>
+    Derived& operator*=(const EigenBase<OtherDerived>& other);
+
+    template<typename OtherDerived>
+    void applyOnTheLeft(const EigenBase<OtherDerived>& other);
+
+    template<typename OtherDerived>
+    void applyOnTheRight(const EigenBase<OtherDerived>& other);
+
+    template<typename DiagonalDerived>
+    const DiagonalProduct<Derived, DiagonalDerived, OnTheRight>
+    operator*(const DiagonalBase<DiagonalDerived> &diagonal) const;
+
+    template<typename OtherDerived>
+    typename internal::scalar_product_traits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType
+    dot(const MatrixBase<OtherDerived>& other) const;
+
+    #ifdef EIGEN2_SUPPORT
+      template<typename OtherDerived>
+      Scalar eigen2_dot(const MatrixBase<OtherDerived>& other) const;
+    #endif
+
+    RealScalar squaredNorm() const;
+    RealScalar norm() const;
+    RealScalar stableNorm() const;
+    RealScalar blueNorm() const;
+    RealScalar hypotNorm() const;
+    const PlainObject normalized() const;
+    void normalize();
+
+    const AdjointReturnType adjoint() const;
+    void adjointInPlace();
+
+    typedef Diagonal<Derived> DiagonalReturnType;
+    DiagonalReturnType diagonal();
+    typedef const Diagonal<const Derived> ConstDiagonalReturnType;
+    const ConstDiagonalReturnType diagonal() const;
+
+    template<int Index> struct DiagonalIndexReturnType { typedef Diagonal<Derived,Index> Type; };
+    template<int Index> struct ConstDiagonalIndexReturnType { typedef const Diagonal<const Derived,Index> Type; };
+
+    template<int Index> typename DiagonalIndexReturnType<Index>::Type diagonal();
+    template<int Index> typename ConstDiagonalIndexReturnType<Index>::Type diagonal() const;
+
+    // Note: The "MatrixBase::" prefixes are added to help MSVC9 to match these declarations with the later implementations.
+    // On the other hand they confuse MSVC8...
+    #if (defined _MSC_VER) && (_MSC_VER >= 1500) // 2008 or later
+    typename MatrixBase::template DiagonalIndexReturnType<Dynamic>::Type diagonal(Index index);
+    typename MatrixBase::template ConstDiagonalIndexReturnType<Dynamic>::Type diagonal(Index index) const;
+    #else
+    typename DiagonalIndexReturnType<Dynamic>::Type diagonal(Index index);
+    typename ConstDiagonalIndexReturnType<Dynamic>::Type diagonal(Index index) const;
+    #endif
+
+    #ifdef EIGEN2_SUPPORT
+    template<unsigned int Mode> typename internal::eigen2_part_return_type<Derived, Mode>::type part();
+    template<unsigned int Mode> const typename internal::eigen2_part_return_type<Derived, Mode>::type part() const;
+    
+    // huuuge hack. make Eigen2's matrix.part<Diagonal>() work in eigen3. Problem: Diagonal is now a class template instead
+    // of an integer constant. Solution: overload the part() method template wrt template parameters list.
+    template<template<typename T, int N> class U>
+    const DiagonalWrapper<ConstDiagonalReturnType> part() const
+    { return diagonal().asDiagonal(); }
+    #endif // EIGEN2_SUPPORT
+
+    template<unsigned int Mode> struct TriangularViewReturnType { typedef TriangularView<Derived, Mode> Type; };
+    template<unsigned int Mode> struct ConstTriangularViewReturnType { typedef const TriangularView<const Derived, Mode> Type; };
+
+    template<unsigned int Mode> typename TriangularViewReturnType<Mode>::Type triangularView();
+    template<unsigned int Mode> typename ConstTriangularViewReturnType<Mode>::Type triangularView() const;
+
+    template<unsigned int UpLo> struct SelfAdjointViewReturnType { typedef SelfAdjointView<Derived, UpLo> Type; };
+    template<unsigned int UpLo> struct ConstSelfAdjointViewReturnType { typedef const SelfAdjointView<const Derived, UpLo> Type; };
+
+    template<unsigned int UpLo> typename SelfAdjointViewReturnType<UpLo>::Type selfadjointView();
+    template<unsigned int UpLo> typename ConstSelfAdjointViewReturnType<UpLo>::Type selfadjointView() const;
+
+    const SparseView<Derived> sparseView(const Scalar& m_reference = Scalar(0),
+                                         typename NumTraits<Scalar>::Real m_epsilon = NumTraits<Scalar>::dummy_precision()) const;
+    static const IdentityReturnType Identity();
+    static const IdentityReturnType Identity(Index rows, Index cols);
+    static const BasisReturnType Unit(Index size, Index i);
+    static const BasisReturnType Unit(Index i);
+    static const BasisReturnType UnitX();
+    static const BasisReturnType UnitY();
+    static const BasisReturnType UnitZ();
+    static const BasisReturnType UnitW();
+
+    const DiagonalWrapper<const Derived> asDiagonal() const;
+    const PermutationWrapper<const Derived> asPermutation() const;
+
+    Derived& setIdentity();
+    Derived& setIdentity(Index rows, Index cols);
+
+    bool isIdentity(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+    bool isDiagonal(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+
+    bool isUpperTriangular(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+    bool isLowerTriangular(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+
+    template<typename OtherDerived>
+    bool isOrthogonal(const MatrixBase<OtherDerived>& other,
+                      RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+    bool isUnitary(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+
+    /** \returns true if each coefficients of \c *this and \a other are all exactly equal.
+      * \warning When using floating point scalar values you probably should rather use a
+      *          fuzzy comparison such as isApprox()
+      * \sa isApprox(), operator!= */
+    template<typename OtherDerived>
+    inline bool operator==(const MatrixBase<OtherDerived>& other) const
+    { return cwiseEqual(other).all(); }
+
+    /** \returns true if at least one pair of coefficients of \c *this and \a other are not exactly equal to each other.
+      * \warning When using floating point scalar values you probably should rather use a
+      *          fuzzy comparison such as isApprox()
+      * \sa isApprox(), operator== */
+    template<typename OtherDerived>
+    inline bool operator!=(const MatrixBase<OtherDerived>& other) const
+    { return cwiseNotEqual(other).any(); }
+
+    NoAlias<Derived,Eigen::MatrixBase > noalias();
+
+    inline const ForceAlignedAccess<Derived> forceAlignedAccess() const;
+    inline ForceAlignedAccess<Derived> forceAlignedAccess();
+    template<bool Enable> inline typename internal::add_const_on_value_type<typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type>::type forceAlignedAccessIf() const;
+    template<bool Enable> inline typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type forceAlignedAccessIf();
+
+    Scalar trace() const;
+
+/////////// Array module ///////////
+
+    template<int p> RealScalar lpNorm() const;
+
+    MatrixBase<Derived>& matrix() { return *this; }
+    const MatrixBase<Derived>& matrix() const { return *this; }
+
+    /** \returns an \link ArrayBase Array \endlink expression of this matrix
+      * \sa ArrayBase::matrix() */
+    ArrayWrapper<Derived> array() { return derived(); }
+    const ArrayWrapper<const Derived> array() const { return derived(); }
+
+/////////// LU module ///////////
+
+    const FullPivLU<PlainObject> fullPivLu() const;
+    const PartialPivLU<PlainObject> partialPivLu() const;
+
+    #if EIGEN2_SUPPORT_STAGE < STAGE20_RESOLVE_API_CONFLICTS
+    const LU<PlainObject> lu() const;
+    #endif
+
+    #ifdef EIGEN2_SUPPORT
+    const LU<PlainObject> eigen2_lu() const;
+    #endif
+
+    #if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS
+    const PartialPivLU<PlainObject> lu() const;
+    #endif
+    
+    #ifdef EIGEN2_SUPPORT
+    template<typename ResultType>
+    void computeInverse(MatrixBase<ResultType> *result) const {
+      *result = this->inverse();
+    }
+    #endif
+
+    const internal::inverse_impl<Derived> inverse() const;
+    template<typename ResultType>
+    void computeInverseAndDetWithCheck(
+      ResultType& inverse,
+      typename ResultType::Scalar& determinant,
+      bool& invertible,
+      const RealScalar& absDeterminantThreshold = NumTraits<Scalar>::dummy_precision()
+    ) const;
+    template<typename ResultType>
+    void computeInverseWithCheck(
+      ResultType& inverse,
+      bool& invertible,
+      const RealScalar& absDeterminantThreshold = NumTraits<Scalar>::dummy_precision()
+    ) const;
+    Scalar determinant() const;
+
+/////////// Cholesky module ///////////
+
+    const LLT<PlainObject>  llt() const;
+    const LDLT<PlainObject> ldlt() const;
+
+/////////// QR module ///////////
+
+    const HouseholderQR<PlainObject> householderQr() const;
+    const ColPivHouseholderQR<PlainObject> colPivHouseholderQr() const;
+    const FullPivHouseholderQR<PlainObject> fullPivHouseholderQr() const;
+    
+    #ifdef EIGEN2_SUPPORT
+    const QR<PlainObject> qr() const;
+    #endif
+
+    EigenvaluesReturnType eigenvalues() const;
+    RealScalar operatorNorm() const;
+
+/////////// SVD module ///////////
+
+    JacobiSVD<PlainObject> jacobiSvd(unsigned int computationOptions = 0) const;
+
+    #ifdef EIGEN2_SUPPORT
+    SVD<PlainObject> svd() const;
+    #endif
+
+/////////// Geometry module ///////////
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /// \internal helper struct to form the return type of the cross product
+    template<typename OtherDerived> struct cross_product_return_type {
+      typedef typename internal::scalar_product_traits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType Scalar;
+      typedef Matrix<Scalar,MatrixBase::RowsAtCompileTime,MatrixBase::ColsAtCompileTime> type;
+    };
+    #endif // EIGEN_PARSED_BY_DOXYGEN
+    template<typename OtherDerived>
+    typename cross_product_return_type<OtherDerived>::type
+    cross(const MatrixBase<OtherDerived>& other) const;
+    template<typename OtherDerived>
+    PlainObject cross3(const MatrixBase<OtherDerived>& other) const;
+    PlainObject unitOrthogonal(void) const;
+    Matrix<Scalar,3,1> eulerAngles(Index a0, Index a1, Index a2) const;
+    
+    #if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS
+    ScalarMultipleReturnType operator*(const UniformScaling<Scalar>& s) const;
+    // put this as separate enum value to work around possible GCC 4.3 bug (?)
+    enum { HomogeneousReturnTypeDirection = ColsAtCompileTime==1?Vertical:Horizontal };
+    typedef Homogeneous<Derived, HomogeneousReturnTypeDirection> HomogeneousReturnType;
+    HomogeneousReturnType homogeneous() const;
+    #endif
+    
+    enum {
+      SizeMinusOne = SizeAtCompileTime==Dynamic ? Dynamic : SizeAtCompileTime-1
+    };
+    typedef Block<const Derived,
+                  internal::traits<Derived>::ColsAtCompileTime==1 ? SizeMinusOne : 1,
+                  internal::traits<Derived>::ColsAtCompileTime==1 ? 1 : SizeMinusOne> ConstStartMinusOne;
+    typedef CwiseUnaryOp<internal::scalar_quotient1_op<typename internal::traits<Derived>::Scalar>,
+                const ConstStartMinusOne > HNormalizedReturnType;
+
+    const HNormalizedReturnType hnormalized() const;
+
+////////// Householder module ///////////
+
+    void makeHouseholderInPlace(Scalar& tau, RealScalar& beta);
+    template<typename EssentialPart>
+    void makeHouseholder(EssentialPart& essential,
+                         Scalar& tau, RealScalar& beta) const;
+    template<typename EssentialPart>
+    void applyHouseholderOnTheLeft(const EssentialPart& essential,
+                                   const Scalar& tau,
+                                   Scalar* workspace);
+    template<typename EssentialPart>
+    void applyHouseholderOnTheRight(const EssentialPart& essential,
+                                    const Scalar& tau,
+                                    Scalar* workspace);
+
+///////// Jacobi module /////////
+
+    template<typename OtherScalar>
+    void applyOnTheLeft(Index p, Index q, const JacobiRotation<OtherScalar>& j);
+    template<typename OtherScalar>
+    void applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j);
+
+///////// MatrixFunctions module /////////
+
+    typedef typename internal::stem_function<Scalar>::type StemFunction;
+    const MatrixExponentialReturnValue<Derived> exp() const;
+    const MatrixFunctionReturnValue<Derived> matrixFunction(StemFunction f) const;
+    const MatrixFunctionReturnValue<Derived> cosh() const;
+    const MatrixFunctionReturnValue<Derived> sinh() const;
+    const MatrixFunctionReturnValue<Derived> cos() const;
+    const MatrixFunctionReturnValue<Derived> sin() const;
+    const MatrixSquareRootReturnValue<Derived> sqrt() const;
+    const MatrixLogarithmReturnValue<Derived> log() const;
+
+#ifdef EIGEN2_SUPPORT
+    template<typename ProductDerived, typename Lhs, typename Rhs>
+    Derived& operator+=(const Flagged<ProductBase<ProductDerived, Lhs,Rhs>, 0,
+                                      EvalBeforeAssigningBit>& other);
+
+    template<typename ProductDerived, typename Lhs, typename Rhs>
+    Derived& operator-=(const Flagged<ProductBase<ProductDerived, Lhs,Rhs>, 0,
+                                      EvalBeforeAssigningBit>& other);
+
+    /** \deprecated because .lazy() is deprecated
+      * Overloaded for cache friendly product evaluation */
+    template<typename OtherDerived>
+    Derived& lazyAssign(const Flagged<OtherDerived, 0, EvalBeforeAssigningBit>& other)
+    { return lazyAssign(other._expression()); }
+
+    template<unsigned int Added>
+    const Flagged<Derived, Added, 0> marked() const;
+    const Flagged<Derived, 0, EvalBeforeAssigningBit> lazy() const;
+
+    inline const Cwise<Derived> cwise() const;
+    inline Cwise<Derived> cwise();
+
+    VectorBlock<Derived> start(Index size);
+    const VectorBlock<const Derived> start(Index size) const;
+    VectorBlock<Derived> end(Index size);
+    const VectorBlock<const Derived> end(Index size) const;
+    template<int Size> VectorBlock<Derived,Size> start();
+    template<int Size> const VectorBlock<const Derived,Size> start() const;
+    template<int Size> VectorBlock<Derived,Size> end();
+    template<int Size> const VectorBlock<const Derived,Size> end() const;
+
+    Minor<Derived> minor(Index row, Index col);
+    const Minor<Derived> minor(Index row, Index col) const;
+#endif
+
+  protected:
+    MatrixBase() : Base() {}
+
+  private:
+    explicit MatrixBase(int);
+    MatrixBase(int,int);
+    template<typename OtherDerived> explicit MatrixBase(const MatrixBase<OtherDerived>&);
+  protected:
+    // mixing arrays and matrices is not legal
+    template<typename OtherDerived> Derived& operator+=(const ArrayBase<OtherDerived>& )
+    {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;}
+    // mixing arrays and matrices is not legal
+    template<typename OtherDerived> Derived& operator-=(const ArrayBase<OtherDerived>& )
+    {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;}
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATRIXBASE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/NestByValue.h b/resources/3rdParty/eigen/Eigen/src/Core/NestByValue.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/NestByValue.h
rename to resources/3rdParty/eigen/Eigen/src/Core/NestByValue.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/NoAlias.h b/resources/3rdParty/eigen/Eigen/src/Core/NoAlias.h
new file mode 100644
index 000000000..ecb3fa285
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/NoAlias.h
@@ -0,0 +1,125 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_NOALIAS_H
+#define EIGEN_NOALIAS_H
+
+namespace Eigen {
+
+/** \class NoAlias
+  * \ingroup Core_Module
+  *
+  * \brief Pseudo expression providing an operator = assuming no aliasing
+  *
+  * \param ExpressionType the type of the object on which to do the lazy assignment
+  *
+  * This class represents an expression with special assignment operators
+  * assuming no aliasing between the target expression and the source expression.
+  * More precisely it alloas to bypass the EvalBeforeAssignBit flag of the source expression.
+  * It is the return type of MatrixBase::noalias()
+  * and most of the time this is the only way it is used.
+  *
+  * \sa MatrixBase::noalias()
+  */
+template<typename ExpressionType, template <typename> class StorageBase>
+class NoAlias
+{
+    typedef typename ExpressionType::Scalar Scalar;
+  public:
+    NoAlias(ExpressionType& expression) : m_expression(expression) {}
+
+    /** Behaves like MatrixBase::lazyAssign(other)
+      * \sa MatrixBase::lazyAssign() */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE ExpressionType& operator=(const StorageBase<OtherDerived>& other)
+    { return internal::assign_selector<ExpressionType,OtherDerived,false>::run(m_expression,other.derived()); }
+
+    /** \sa MatrixBase::operator+= */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE ExpressionType& operator+=(const StorageBase<OtherDerived>& other)
+    {
+      typedef SelfCwiseBinaryOp<internal::scalar_sum_op<Scalar>, ExpressionType, OtherDerived> SelfAdder;
+      SelfAdder tmp(m_expression);
+      typedef typename internal::nested<OtherDerived>::type OtherDerivedNested;
+      typedef typename internal::remove_all<OtherDerivedNested>::type _OtherDerivedNested;
+      internal::assign_selector<SelfAdder,_OtherDerivedNested,false>::run(tmp,OtherDerivedNested(other.derived()));
+      return m_expression;
+    }
+
+    /** \sa MatrixBase::operator-= */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE ExpressionType& operator-=(const StorageBase<OtherDerived>& other)
+    {
+      typedef SelfCwiseBinaryOp<internal::scalar_difference_op<Scalar>, ExpressionType, OtherDerived> SelfAdder;
+      SelfAdder tmp(m_expression);
+      typedef typename internal::nested<OtherDerived>::type OtherDerivedNested;
+      typedef typename internal::remove_all<OtherDerivedNested>::type _OtherDerivedNested;
+      internal::assign_selector<SelfAdder,_OtherDerivedNested,false>::run(tmp,OtherDerivedNested(other.derived()));
+      return m_expression;
+    }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    template<typename ProductDerived, typename Lhs, typename Rhs>
+    EIGEN_STRONG_INLINE ExpressionType& operator+=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
+    { other.derived().addTo(m_expression); return m_expression; }
+
+    template<typename ProductDerived, typename Lhs, typename Rhs>
+    EIGEN_STRONG_INLINE ExpressionType& operator-=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
+    { other.derived().subTo(m_expression); return m_expression; }
+
+    template<typename Lhs, typename Rhs, int NestingFlags>
+    EIGEN_STRONG_INLINE ExpressionType& operator+=(const CoeffBasedProduct<Lhs,Rhs,NestingFlags>& other)
+    { return m_expression.derived() += CoeffBasedProduct<Lhs,Rhs,NestByRefBit>(other.lhs(), other.rhs()); }
+
+    template<typename Lhs, typename Rhs, int NestingFlags>
+    EIGEN_STRONG_INLINE ExpressionType& operator-=(const CoeffBasedProduct<Lhs,Rhs,NestingFlags>& other)
+    { return m_expression.derived() -= CoeffBasedProduct<Lhs,Rhs,NestByRefBit>(other.lhs(), other.rhs()); }
+#endif
+
+  protected:
+    ExpressionType& m_expression;
+};
+
+/** \returns a pseudo expression of \c *this with an operator= assuming
+  * no aliasing between \c *this and the source expression.
+  *
+  * More precisely, noalias() allows to bypass the EvalBeforeAssignBit flag.
+  * Currently, even though several expressions may alias, only product
+  * expressions have this flag. Therefore, noalias() is only usefull when
+  * the source expression contains a matrix product.
+  *
+  * Here are some examples where noalias is usefull:
+  * \code
+  * D.noalias()  = A * B;
+  * D.noalias() += A.transpose() * B;
+  * D.noalias() -= 2 * A * B.adjoint();
+  * \endcode
+  *
+  * On the other hand the following example will lead to a \b wrong result:
+  * \code
+  * A.noalias() = A * B;
+  * \endcode
+  * because the result matrix A is also an operand of the matrix product. Therefore,
+  * there is no alternative than evaluating A * B in a temporary, that is the default
+  * behavior when you write:
+  * \code
+  * A = A * B;
+  * \endcode
+  *
+  * \sa class NoAlias
+  */
+template<typename Derived>
+NoAlias<Derived,MatrixBase> MatrixBase<Derived>::noalias()
+{
+  return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_NOALIAS_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/NumTraits.h b/resources/3rdParty/eigen/Eigen/src/Core/NumTraits.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/NumTraits.h
rename to resources/3rdParty/eigen/Eigen/src/Core/NumTraits.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/PermutationMatrix.h b/resources/3rdParty/eigen/Eigen/src/Core/PermutationMatrix.h
new file mode 100644
index 000000000..bc29f8142
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/PermutationMatrix.h
@@ -0,0 +1,687 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PERMUTATIONMATRIX_H
+#define EIGEN_PERMUTATIONMATRIX_H
+
+namespace Eigen { 
+
+template<int RowCol,typename IndicesType,typename MatrixType, typename StorageKind> class PermutedImpl;
+
+/** \class PermutationBase
+  * \ingroup Core_Module
+  *
+  * \brief Base class for permutations
+  *
+  * \param Derived the derived class
+  *
+  * This class is the base class for all expressions representing a permutation matrix,
+  * internally stored as a vector of integers.
+  * The convention followed here is that if \f$ \sigma \f$ is a permutation, the corresponding permutation matrix
+  * \f$ P_\sigma \f$ is such that if \f$ (e_1,\ldots,e_p) \f$ is the canonical basis, we have:
+  *  \f[ P_\sigma(e_i) = e_{\sigma(i)}. \f]
+  * This convention ensures that for any two permutations \f$ \sigma, \tau \f$, we have:
+  *  \f[ P_{\sigma\circ\tau} = P_\sigma P_\tau. \f]
+  *
+  * Permutation matrices are square and invertible.
+  *
+  * Notice that in addition to the member functions and operators listed here, there also are non-member
+  * operator* to multiply any kind of permutation object with any kind of matrix expression (MatrixBase)
+  * on either side.
+  *
+  * \sa class PermutationMatrix, class PermutationWrapper
+  */
+
+namespace internal {
+
+template<typename PermutationType, typename MatrixType, int Side, bool Transposed=false>
+struct permut_matrix_product_retval;
+template<typename PermutationType, typename MatrixType, int Side, bool Transposed=false>
+struct permut_sparsematrix_product_retval;
+enum PermPermProduct_t {PermPermProduct};
+
+} // end namespace internal
+
+template<typename Derived>
+class PermutationBase : public EigenBase<Derived>
+{
+    typedef internal::traits<Derived> Traits;
+    typedef EigenBase<Derived> Base;
+  public:
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    typedef typename Traits::IndicesType IndicesType;
+    enum {
+      Flags = Traits::Flags,
+      CoeffReadCost = Traits::CoeffReadCost,
+      RowsAtCompileTime = Traits::RowsAtCompileTime,
+      ColsAtCompileTime = Traits::ColsAtCompileTime,
+      MaxRowsAtCompileTime = Traits::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = Traits::MaxColsAtCompileTime
+    };
+    typedef typename Traits::Scalar Scalar;
+    typedef typename Traits::Index Index;
+    typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime,0,MaxRowsAtCompileTime,MaxColsAtCompileTime>
+            DenseMatrixType;
+    typedef PermutationMatrix<IndicesType::SizeAtCompileTime,IndicesType::MaxSizeAtCompileTime,Index>
+            PlainPermutationType;
+    using Base::derived;
+    #endif
+
+    /** Copies the other permutation into *this */
+    template<typename OtherDerived>
+    Derived& operator=(const PermutationBase<OtherDerived>& other)
+    {
+      indices() = other.indices();
+      return derived();
+    }
+
+    /** Assignment from the Transpositions \a tr */
+    template<typename OtherDerived>
+    Derived& operator=(const TranspositionsBase<OtherDerived>& tr)
+    {
+      setIdentity(tr.size());
+      for(Index k=size()-1; k>=0; --k)
+        applyTranspositionOnTheRight(k,tr.coeff(k));
+      return derived();
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** This is a special case of the templated operator=. Its purpose is to
+      * prevent a default operator= from hiding the templated operator=.
+      */
+    Derived& operator=(const PermutationBase& other)
+    {
+      indices() = other.indices();
+      return derived();
+    }
+    #endif
+
+    /** \returns the number of rows */
+    inline Index rows() const { return indices().size(); }
+
+    /** \returns the number of columns */
+    inline Index cols() const { return indices().size(); }
+
+    /** \returns the size of a side of the respective square matrix, i.e., the number of indices */
+    inline Index size() const { return indices().size(); }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    template<typename DenseDerived>
+    void evalTo(MatrixBase<DenseDerived>& other) const
+    {
+      other.setZero();
+      for (int i=0; i<rows();++i)
+        other.coeffRef(indices().coeff(i),i) = typename DenseDerived::Scalar(1);
+    }
+    #endif
+
+    /** \returns a Matrix object initialized from this permutation matrix. Notice that it
+      * is inefficient to return this Matrix object by value. For efficiency, favor using
+      * the Matrix constructor taking EigenBase objects.
+      */
+    DenseMatrixType toDenseMatrix() const
+    {
+      return derived();
+    }
+
+    /** const version of indices(). */
+    const IndicesType& indices() const { return derived().indices(); }
+    /** \returns a reference to the stored array representing the permutation. */
+    IndicesType& indices() { return derived().indices(); }
+
+    /** Resizes to given size.
+      */
+    inline void resize(Index size)
+    {
+      indices().resize(size);
+    }
+
+    /** Sets *this to be the identity permutation matrix */
+    void setIdentity()
+    {
+      for(Index i = 0; i < size(); ++i)
+        indices().coeffRef(i) = i;
+    }
+
+    /** Sets *this to be the identity permutation matrix of given size.
+      */
+    void setIdentity(Index size)
+    {
+      resize(size);
+      setIdentity();
+    }
+
+    /** Multiplies *this by the transposition \f$(ij)\f$ on the left.
+      *
+      * \returns a reference to *this.
+      *
+      * \warning This is much slower than applyTranspositionOnTheRight(int,int):
+      * this has linear complexity and requires a lot of branching.
+      *
+      * \sa applyTranspositionOnTheRight(int,int)
+      */
+    Derived& applyTranspositionOnTheLeft(Index i, Index j)
+    {
+      eigen_assert(i>=0 && j>=0 && i<size() && j<size());
+      for(Index k = 0; k < size(); ++k)
+      {
+        if(indices().coeff(k) == i) indices().coeffRef(k) = j;
+        else if(indices().coeff(k) == j) indices().coeffRef(k) = i;
+      }
+      return derived();
+    }
+
+    /** Multiplies *this by the transposition \f$(ij)\f$ on the right.
+      *
+      * \returns a reference to *this.
+      *
+      * This is a fast operation, it only consists in swapping two indices.
+      *
+      * \sa applyTranspositionOnTheLeft(int,int)
+      */
+    Derived& applyTranspositionOnTheRight(Index i, Index j)
+    {
+      eigen_assert(i>=0 && j>=0 && i<size() && j<size());
+      std::swap(indices().coeffRef(i), indices().coeffRef(j));
+      return derived();
+    }
+
+    /** \returns the inverse permutation matrix.
+      *
+      * \note \note_try_to_help_rvo
+      */
+    inline Transpose<PermutationBase> inverse() const
+    { return derived(); }
+    /** \returns the tranpose permutation matrix.
+      *
+      * \note \note_try_to_help_rvo
+      */
+    inline Transpose<PermutationBase> transpose() const
+    { return derived(); }
+
+    /**** multiplication helpers to hopefully get RVO ****/
+
+  
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+  protected:
+    template<typename OtherDerived>
+    void assignTranspose(const PermutationBase<OtherDerived>& other)
+    {
+      for (int i=0; i<rows();++i) indices().coeffRef(other.indices().coeff(i)) = i;
+    }
+    template<typename Lhs,typename Rhs>
+    void assignProduct(const Lhs& lhs, const Rhs& rhs)
+    {
+      eigen_assert(lhs.cols() == rhs.rows());
+      for (int i=0; i<rows();++i) indices().coeffRef(i) = lhs.indices().coeff(rhs.indices().coeff(i));
+    }
+#endif
+
+  public:
+
+    /** \returns the product permutation matrix.
+      *
+      * \note \note_try_to_help_rvo
+      */
+    template<typename Other>
+    inline PlainPermutationType operator*(const PermutationBase<Other>& other) const
+    { return PlainPermutationType(internal::PermPermProduct, derived(), other.derived()); }
+
+    /** \returns the product of a permutation with another inverse permutation.
+      *
+      * \note \note_try_to_help_rvo
+      */
+    template<typename Other>
+    inline PlainPermutationType operator*(const Transpose<PermutationBase<Other> >& other) const
+    { return PlainPermutationType(internal::PermPermProduct, *this, other.eval()); }
+
+    /** \returns the product of an inverse permutation with another permutation.
+      *
+      * \note \note_try_to_help_rvo
+      */
+    template<typename Other> friend
+    inline PlainPermutationType operator*(const Transpose<PermutationBase<Other> >& other, const PermutationBase& perm)
+    { return PlainPermutationType(internal::PermPermProduct, other.eval(), perm); }
+
+  protected:
+
+};
+
+/** \class PermutationMatrix
+  * \ingroup Core_Module
+  *
+  * \brief Permutation matrix
+  *
+  * \param SizeAtCompileTime the number of rows/cols, or Dynamic
+  * \param MaxSizeAtCompileTime the maximum number of rows/cols, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it.
+  * \param IndexType the interger type of the indices
+  *
+  * This class represents a permutation matrix, internally stored as a vector of integers.
+  *
+  * \sa class PermutationBase, class PermutationWrapper, class DiagonalMatrix
+  */
+
+namespace internal {
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType>
+struct traits<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType> >
+ : traits<Matrix<IndexType,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >
+{
+  typedef IndexType Index;
+  typedef Matrix<IndexType, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;
+};
+}
+
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType>
+class PermutationMatrix : public PermutationBase<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType> >
+{
+    typedef PermutationBase<PermutationMatrix> Base;
+    typedef internal::traits<PermutationMatrix> Traits;
+  public:
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    typedef typename Traits::IndicesType IndicesType;
+    #endif
+
+    inline PermutationMatrix()
+    {}
+
+    /** Constructs an uninitialized permutation matrix of given size.
+      */
+    inline PermutationMatrix(int size) : m_indices(size)
+    {}
+
+    /** Copy constructor. */
+    template<typename OtherDerived>
+    inline PermutationMatrix(const PermutationBase<OtherDerived>& other)
+      : m_indices(other.indices()) {}
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** Standard copy constructor. Defined only to prevent a default copy constructor
+      * from hiding the other templated constructor */
+    inline PermutationMatrix(const PermutationMatrix& other) : m_indices(other.indices()) {}
+    #endif
+
+    /** Generic constructor from expression of the indices. The indices
+      * array has the meaning that the permutations sends each integer i to indices[i].
+      *
+      * \warning It is your responsibility to check that the indices array that you passes actually
+      * describes a permutation, i.e., each value between 0 and n-1 occurs exactly once, where n is the
+      * array's size.
+      */
+    template<typename Other>
+    explicit inline PermutationMatrix(const MatrixBase<Other>& indices) : m_indices(indices)
+    {}
+
+    /** Convert the Transpositions \a tr to a permutation matrix */
+    template<typename Other>
+    explicit PermutationMatrix(const TranspositionsBase<Other>& tr)
+      : m_indices(tr.size())
+    {
+      *this = tr;
+    }
+
+    /** Copies the other permutation into *this */
+    template<typename Other>
+    PermutationMatrix& operator=(const PermutationBase<Other>& other)
+    {
+      m_indices = other.indices();
+      return *this;
+    }
+
+    /** Assignment from the Transpositions \a tr */
+    template<typename Other>
+    PermutationMatrix& operator=(const TranspositionsBase<Other>& tr)
+    {
+      return Base::operator=(tr.derived());
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** This is a special case of the templated operator=. Its purpose is to
+      * prevent a default operator= from hiding the templated operator=.
+      */
+    PermutationMatrix& operator=(const PermutationMatrix& other)
+    {
+      m_indices = other.m_indices;
+      return *this;
+    }
+    #endif
+
+    /** const version of indices(). */
+    const IndicesType& indices() const { return m_indices; }
+    /** \returns a reference to the stored array representing the permutation. */
+    IndicesType& indices() { return m_indices; }
+
+
+    /**** multiplication helpers to hopefully get RVO ****/
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    template<typename Other>
+    PermutationMatrix(const Transpose<PermutationBase<Other> >& other)
+      : m_indices(other.nestedPermutation().size())
+    {
+      for (int i=0; i<m_indices.size();++i) m_indices.coeffRef(other.nestedPermutation().indices().coeff(i)) = i;
+    }
+    template<typename Lhs,typename Rhs>
+    PermutationMatrix(internal::PermPermProduct_t, const Lhs& lhs, const Rhs& rhs)
+      : m_indices(lhs.indices().size())
+    {
+      Base::assignProduct(lhs,rhs);
+    }
+#endif
+
+  protected:
+
+    IndicesType m_indices;
+};
+
+
+namespace internal {
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType, int _PacketAccess>
+struct traits<Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType>,_PacketAccess> >
+ : traits<Matrix<IndexType,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >
+{
+  typedef IndexType Index;
+  typedef Map<const Matrix<IndexType, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1>, _PacketAccess> IndicesType;
+};
+}
+
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType, int _PacketAccess>
+class Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType>,_PacketAccess>
+  : public PermutationBase<Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType>,_PacketAccess> >
+{
+    typedef PermutationBase<Map> Base;
+    typedef internal::traits<Map> Traits;
+  public:
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    typedef typename Traits::IndicesType IndicesType;
+    typedef typename IndicesType::Scalar Index;
+    #endif
+
+    inline Map(const Index* indices)
+      : m_indices(indices)
+    {}
+
+    inline Map(const Index* indices, Index size)
+      : m_indices(indices,size)
+    {}
+
+    /** Copies the other permutation into *this */
+    template<typename Other>
+    Map& operator=(const PermutationBase<Other>& other)
+    { return Base::operator=(other.derived()); }
+
+    /** Assignment from the Transpositions \a tr */
+    template<typename Other>
+    Map& operator=(const TranspositionsBase<Other>& tr)
+    { return Base::operator=(tr.derived()); }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** This is a special case of the templated operator=. Its purpose is to
+      * prevent a default operator= from hiding the templated operator=.
+      */
+    Map& operator=(const Map& other)
+    {
+      m_indices = other.m_indices;
+      return *this;
+    }
+    #endif
+
+    /** const version of indices(). */
+    const IndicesType& indices() const { return m_indices; }
+    /** \returns a reference to the stored array representing the permutation. */
+    IndicesType& indices() { return m_indices; }
+
+  protected:
+
+    IndicesType m_indices;
+};
+
+/** \class PermutationWrapper
+  * \ingroup Core_Module
+  *
+  * \brief Class to view a vector of integers as a permutation matrix
+  *
+  * \param _IndicesType the type of the vector of integer (can be any compatible expression)
+  *
+  * This class allows to view any vector expression of integers as a permutation matrix.
+  *
+  * \sa class PermutationBase, class PermutationMatrix
+  */
+
+struct PermutationStorage {};
+
+template<typename _IndicesType> class TranspositionsWrapper;
+namespace internal {
+template<typename _IndicesType>
+struct traits<PermutationWrapper<_IndicesType> >
+{
+  typedef PermutationStorage StorageKind;
+  typedef typename _IndicesType::Scalar Scalar;
+  typedef typename _IndicesType::Scalar Index;
+  typedef _IndicesType IndicesType;
+  enum {
+    RowsAtCompileTime = _IndicesType::SizeAtCompileTime,
+    ColsAtCompileTime = _IndicesType::SizeAtCompileTime,
+    MaxRowsAtCompileTime = IndicesType::MaxRowsAtCompileTime,
+    MaxColsAtCompileTime = IndicesType::MaxColsAtCompileTime,
+    Flags = 0,
+    CoeffReadCost = _IndicesType::CoeffReadCost
+  };
+};
+}
+
+template<typename _IndicesType>
+class PermutationWrapper : public PermutationBase<PermutationWrapper<_IndicesType> >
+{
+    typedef PermutationBase<PermutationWrapper> Base;
+    typedef internal::traits<PermutationWrapper> Traits;
+  public:
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    typedef typename Traits::IndicesType IndicesType;
+    #endif
+
+    inline PermutationWrapper(const IndicesType& indices)
+      : m_indices(indices)
+    {}
+
+    /** const version of indices(). */
+    const typename internal::remove_all<typename IndicesType::Nested>::type&
+    indices() const { return m_indices; }
+
+  protected:
+
+    typename IndicesType::Nested m_indices;
+};
+
+/** \returns the matrix with the permutation applied to the columns.
+  */
+template<typename Derived, typename PermutationDerived>
+inline const internal::permut_matrix_product_retval<PermutationDerived, Derived, OnTheRight>
+operator*(const MatrixBase<Derived>& matrix,
+          const PermutationBase<PermutationDerived> &permutation)
+{
+  return internal::permut_matrix_product_retval
+           <PermutationDerived, Derived, OnTheRight>
+           (permutation.derived(), matrix.derived());
+}
+
+/** \returns the matrix with the permutation applied to the rows.
+  */
+template<typename Derived, typename PermutationDerived>
+inline const internal::permut_matrix_product_retval
+               <PermutationDerived, Derived, OnTheLeft>
+operator*(const PermutationBase<PermutationDerived> &permutation,
+          const MatrixBase<Derived>& matrix)
+{
+  return internal::permut_matrix_product_retval
+           <PermutationDerived, Derived, OnTheLeft>
+           (permutation.derived(), matrix.derived());
+}
+
+namespace internal {
+
+template<typename PermutationType, typename MatrixType, int Side, bool Transposed>
+struct traits<permut_matrix_product_retval<PermutationType, MatrixType, Side, Transposed> >
+{
+  typedef typename MatrixType::PlainObject ReturnType;
+};
+
+template<typename PermutationType, typename MatrixType, int Side, bool Transposed>
+struct permut_matrix_product_retval
+ : public ReturnByValue<permut_matrix_product_retval<PermutationType, MatrixType, Side, Transposed> >
+{
+    typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
+
+    permut_matrix_product_retval(const PermutationType& perm, const MatrixType& matrix)
+      : m_permutation(perm), m_matrix(matrix)
+    {}
+
+    inline int rows() const { return m_matrix.rows(); }
+    inline int cols() const { return m_matrix.cols(); }
+
+    template<typename Dest> inline void evalTo(Dest& dst) const
+    {
+      const int n = Side==OnTheLeft ? rows() : cols();
+
+      if(is_same<MatrixTypeNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_matrix))
+      {
+        // apply the permutation inplace
+        Matrix<bool,PermutationType::RowsAtCompileTime,1,0,PermutationType::MaxRowsAtCompileTime> mask(m_permutation.size());
+        mask.fill(false);
+        int r = 0;
+        while(r < m_permutation.size())
+        {
+          // search for the next seed
+          while(r<m_permutation.size() && mask[r]) r++;
+          if(r>=m_permutation.size())
+            break;
+          // we got one, let's follow it until we are back to the seed
+          int k0 = r++;
+          int kPrev = k0;
+          mask.coeffRef(k0) = true;
+          for(int k=m_permutation.indices().coeff(k0); k!=k0; k=m_permutation.indices().coeff(k))
+          {
+                  Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime>(dst, k)
+            .swap(Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime>
+                       (dst,((Side==OnTheLeft) ^ Transposed) ? k0 : kPrev));
+
+            mask.coeffRef(k) = true;
+            kPrev = k;
+          }
+        }
+      }
+      else
+      {
+        for(int i = 0; i < n; ++i)
+        {
+          Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime>
+               (dst, ((Side==OnTheLeft) ^ Transposed) ? m_permutation.indices().coeff(i) : i)
+
+          =
+
+          Block<const MatrixTypeNestedCleaned,Side==OnTheLeft ? 1 : MatrixType::RowsAtCompileTime,Side==OnTheRight ? 1 : MatrixType::ColsAtCompileTime>
+               (m_matrix, ((Side==OnTheRight) ^ Transposed) ? m_permutation.indices().coeff(i) : i);
+        }
+      }
+    }
+
+  protected:
+    const PermutationType& m_permutation;
+    typename MatrixType::Nested m_matrix;
+};
+
+/* Template partial specialization for transposed/inverse permutations */
+
+template<typename Derived>
+struct traits<Transpose<PermutationBase<Derived> > >
+ : traits<Derived>
+{};
+
+} // end namespace internal
+
+template<typename Derived>
+class Transpose<PermutationBase<Derived> >
+  : public EigenBase<Transpose<PermutationBase<Derived> > >
+{
+    typedef Derived PermutationType;
+    typedef typename PermutationType::IndicesType IndicesType;
+    typedef typename PermutationType::PlainPermutationType PlainPermutationType;
+  public:
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    typedef internal::traits<PermutationType> Traits;
+    typedef typename Derived::DenseMatrixType DenseMatrixType;
+    enum {
+      Flags = Traits::Flags,
+      CoeffReadCost = Traits::CoeffReadCost,
+      RowsAtCompileTime = Traits::RowsAtCompileTime,
+      ColsAtCompileTime = Traits::ColsAtCompileTime,
+      MaxRowsAtCompileTime = Traits::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = Traits::MaxColsAtCompileTime
+    };
+    typedef typename Traits::Scalar Scalar;
+    #endif
+
+    Transpose(const PermutationType& p) : m_permutation(p) {}
+
+    inline int rows() const { return m_permutation.rows(); }
+    inline int cols() const { return m_permutation.cols(); }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    template<typename DenseDerived>
+    void evalTo(MatrixBase<DenseDerived>& other) const
+    {
+      other.setZero();
+      for (int i=0; i<rows();++i)
+        other.coeffRef(i, m_permutation.indices().coeff(i)) = typename DenseDerived::Scalar(1);
+    }
+    #endif
+
+    /** \return the equivalent permutation matrix */
+    PlainPermutationType eval() const { return *this; }
+
+    DenseMatrixType toDenseMatrix() const { return *this; }
+
+    /** \returns the matrix with the inverse permutation applied to the columns.
+      */
+    template<typename OtherDerived> friend
+    inline const internal::permut_matrix_product_retval<PermutationType, OtherDerived, OnTheRight, true>
+    operator*(const MatrixBase<OtherDerived>& matrix, const Transpose& trPerm)
+    {
+      return internal::permut_matrix_product_retval<PermutationType, OtherDerived, OnTheRight, true>(trPerm.m_permutation, matrix.derived());
+    }
+
+    /** \returns the matrix with the inverse permutation applied to the rows.
+      */
+    template<typename OtherDerived>
+    inline const internal::permut_matrix_product_retval<PermutationType, OtherDerived, OnTheLeft, true>
+    operator*(const MatrixBase<OtherDerived>& matrix) const
+    {
+      return internal::permut_matrix_product_retval<PermutationType, OtherDerived, OnTheLeft, true>(m_permutation, matrix.derived());
+    }
+
+    const PermutationType& nestedPermutation() const { return m_permutation; }
+
+  protected:
+    const PermutationType& m_permutation;
+};
+
+template<typename Derived>
+const PermutationWrapper<const Derived> MatrixBase<Derived>::asPermutation() const
+{
+  return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_PERMUTATIONMATRIX_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/PlainObjectBase.h b/resources/3rdParty/eigen/Eigen/src/Core/PlainObjectBase.h
new file mode 100644
index 000000000..71c74309a
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/PlainObjectBase.h
@@ -0,0 +1,767 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_DENSESTORAGEBASE_H
+#define EIGEN_DENSESTORAGEBASE_H
+
+#ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
+# define EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED for(int i=0;i<base().size();++i) coeffRef(i)=Scalar(0);
+#else
+# define EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+#endif
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename Index>
+EIGEN_ALWAYS_INLINE void check_rows_cols_for_overflow(Index rows, Index cols)
+{
+  // http://hg.mozilla.org/mozilla-central/file/6c8a909977d3/xpcom/ds/CheckedInt.h#l242
+  // we assume Index is signed
+  Index max_index = (size_t(1) << (8 * sizeof(Index) - 1)) - 1; // assume Index is signed
+  bool error = (rows < 0  || cols < 0)  ? true
+             : (rows == 0 || cols == 0) ? false
+                                        : (rows > max_index / cols);
+  if (error)
+    throw_std_bad_alloc();
+}
+
+template <typename Derived, typename OtherDerived = Derived, bool IsVector = bool(Derived::IsVectorAtCompileTime)> struct conservative_resize_like_impl;
+
+template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers> struct matrix_swap_impl;
+
+} // end namespace internal
+
+/** \class PlainObjectBase
+  * \brief %Dense storage base class for matrices and arrays.
+  *
+  * This class can be extended with the help of the plugin mechanism described on the page
+  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_PLAINOBJECTBASE_PLUGIN.
+  *
+  * \sa \ref TopicClassHierarchy
+  */
+#ifdef EIGEN_PARSED_BY_DOXYGEN
+namespace internal {
+
+// this is a warkaround to doxygen not being able to understand the inheritence logic
+// when it is hidden by the dense_xpr_base helper struct.
+template<typename Derived> struct dense_xpr_base_dispatcher_for_doxygen;// : public MatrixBase<Derived> {};
+/** This class is just a workaround for Doxygen and it does not not actually exist. */
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+struct dense_xpr_base_dispatcher_for_doxygen<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
+    : public MatrixBase<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > {};
+/** This class is just a workaround for Doxygen and it does not not actually exist. */
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+struct dense_xpr_base_dispatcher_for_doxygen<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
+    : public ArrayBase<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > {};
+
+} // namespace internal
+
+template<typename Derived>
+class PlainObjectBase : public internal::dense_xpr_base_dispatcher_for_doxygen<Derived>
+#else
+template<typename Derived>
+class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
+#endif
+{
+  public:
+    enum { Options = internal::traits<Derived>::Options };
+    typedef typename internal::dense_xpr_base<Derived>::type Base;
+
+    typedef typename internal::traits<Derived>::StorageKind StorageKind;
+    typedef typename internal::traits<Derived>::Index Index;
+    typedef typename internal::traits<Derived>::Scalar Scalar;
+    typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    typedef Derived DenseType;
+
+    using Base::RowsAtCompileTime;
+    using Base::ColsAtCompileTime;
+    using Base::SizeAtCompileTime;
+    using Base::MaxRowsAtCompileTime;
+    using Base::MaxColsAtCompileTime;
+    using Base::MaxSizeAtCompileTime;
+    using Base::IsVectorAtCompileTime;
+    using Base::Flags;
+
+    template<typename PlainObjectType, int MapOptions, typename StrideType> friend class Eigen::Map;
+    friend  class Eigen::Map<Derived, Unaligned>;
+    typedef Eigen::Map<Derived, Unaligned>  MapType;
+    friend  class Eigen::Map<const Derived, Unaligned>;
+    typedef const Eigen::Map<const Derived, Unaligned> ConstMapType;
+    friend  class Eigen::Map<Derived, Aligned>;
+    typedef Eigen::Map<Derived, Aligned> AlignedMapType;
+    friend  class Eigen::Map<const Derived, Aligned>;
+    typedef const Eigen::Map<const Derived, Aligned> ConstAlignedMapType;
+    template<typename StrideType> struct StridedMapType { typedef Eigen::Map<Derived, Unaligned, StrideType> type; };
+    template<typename StrideType> struct StridedConstMapType { typedef Eigen::Map<const Derived, Unaligned, StrideType> type; };
+    template<typename StrideType> struct StridedAlignedMapType { typedef Eigen::Map<Derived, Aligned, StrideType> type; };
+    template<typename StrideType> struct StridedConstAlignedMapType { typedef Eigen::Map<const Derived, Aligned, StrideType> type; };
+
+  protected:
+    DenseStorage<Scalar, Base::MaxSizeAtCompileTime, Base::RowsAtCompileTime, Base::ColsAtCompileTime, Options> m_storage;
+
+  public:
+    enum { NeedsToAlign = SizeAtCompileTime != Dynamic && (internal::traits<Derived>::Flags & AlignedBit) != 0 };
+    EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
+
+    Base& base() { return *static_cast<Base*>(this); }
+    const Base& base() const { return *static_cast<const Base*>(this); }
+
+    EIGEN_STRONG_INLINE Index rows() const { return m_storage.rows(); }
+    EIGEN_STRONG_INLINE Index cols() const { return m_storage.cols(); }
+
+    EIGEN_STRONG_INLINE const Scalar& coeff(Index row, Index col) const
+    {
+      if(Flags & RowMajorBit)
+        return m_storage.data()[col + row * m_storage.cols()];
+      else // column-major
+        return m_storage.data()[row + col * m_storage.rows()];
+    }
+
+    EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
+    {
+      return m_storage.data()[index];
+    }
+
+    EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col)
+    {
+      if(Flags & RowMajorBit)
+        return m_storage.data()[col + row * m_storage.cols()];
+      else // column-major
+        return m_storage.data()[row + col * m_storage.rows()];
+    }
+
+    EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
+    {
+      return m_storage.data()[index];
+    }
+
+    EIGEN_STRONG_INLINE const Scalar& coeffRef(Index row, Index col) const
+    {
+      if(Flags & RowMajorBit)
+        return m_storage.data()[col + row * m_storage.cols()];
+      else // column-major
+        return m_storage.data()[row + col * m_storage.rows()];
+    }
+
+    EIGEN_STRONG_INLINE const Scalar& coeffRef(Index index) const
+    {
+      return m_storage.data()[index];
+    }
+
+    /** \internal */
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
+    {
+      return internal::ploadt<PacketScalar, LoadMode>
+               (m_storage.data() + (Flags & RowMajorBit
+                                   ? col + row * m_storage.cols()
+                                   : row + col * m_storage.rows()));
+    }
+
+    /** \internal */
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
+    {
+      return internal::ploadt<PacketScalar, LoadMode>(m_storage.data() + index);
+    }
+
+    /** \internal */
+    template<int StoreMode>
+    EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketScalar& x)
+    {
+      internal::pstoret<Scalar, PacketScalar, StoreMode>
+              (m_storage.data() + (Flags & RowMajorBit
+                                   ? col + row * m_storage.cols()
+                                   : row + col * m_storage.rows()), x);
+    }
+
+    /** \internal */
+    template<int StoreMode>
+    EIGEN_STRONG_INLINE void writePacket(Index index, const PacketScalar& x)
+    {
+      internal::pstoret<Scalar, PacketScalar, StoreMode>(m_storage.data() + index, x);
+    }
+
+    /** \returns a const pointer to the data array of this matrix */
+    EIGEN_STRONG_INLINE const Scalar *data() const
+    { return m_storage.data(); }
+
+    /** \returns a pointer to the data array of this matrix */
+    EIGEN_STRONG_INLINE Scalar *data()
+    { return m_storage.data(); }
+
+    /** Resizes \c *this to a \a rows x \a cols matrix.
+      *
+      * This method is intended for dynamic-size matrices, although it is legal to call it on any
+      * matrix as long as fixed dimensions are left unchanged. If you only want to change the number
+      * of rows and/or of columns, you can use resize(NoChange_t, Index), resize(Index, NoChange_t).
+      *
+      * If the current number of coefficients of \c *this exactly matches the
+      * product \a rows * \a cols, then no memory allocation is performed and
+      * the current values are left unchanged. In all other cases, including
+      * shrinking, the data is reallocated and all previous values are lost.
+      *
+      * Example: \include Matrix_resize_int_int.cpp
+      * Output: \verbinclude Matrix_resize_int_int.out
+      *
+      * \sa resize(Index) for vectors, resize(NoChange_t, Index), resize(Index, NoChange_t)
+      */
+    EIGEN_STRONG_INLINE void resize(Index rows, Index cols)
+    {
+      #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
+        internal::check_rows_cols_for_overflow(rows, cols);
+        Index size = rows*cols;
+        bool size_changed = size != this->size();
+        m_storage.resize(size, rows, cols);
+        if(size_changed) EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+      #else
+        internal::check_rows_cols_for_overflow(rows, cols);
+        m_storage.resize(rows*cols, rows, cols);
+      #endif
+    }
+
+    /** Resizes \c *this to a vector of length \a size
+      *
+      * \only_for_vectors. This method does not work for
+      * partially dynamic matrices when the static dimension is anything other
+      * than 1. For example it will not work with Matrix<double, 2, Dynamic>.
+      *
+      * Example: \include Matrix_resize_int.cpp
+      * Output: \verbinclude Matrix_resize_int.out
+      *
+      * \sa resize(Index,Index), resize(NoChange_t, Index), resize(Index, NoChange_t)
+      */
+    inline void resize(Index size)
+    {
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(PlainObjectBase)
+      eigen_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == size);
+      #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
+        bool size_changed = size != this->size();
+      #endif
+      if(RowsAtCompileTime == 1)
+        m_storage.resize(size, 1, size);
+      else
+        m_storage.resize(size, size, 1);
+      #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
+        if(size_changed) EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+      #endif
+    }
+
+    /** Resizes the matrix, changing only the number of columns. For the parameter of type NoChange_t, just pass the special value \c NoChange
+      * as in the example below.
+      *
+      * Example: \include Matrix_resize_NoChange_int.cpp
+      * Output: \verbinclude Matrix_resize_NoChange_int.out
+      *
+      * \sa resize(Index,Index)
+      */
+    inline void resize(NoChange_t, Index cols)
+    {
+      resize(rows(), cols);
+    }
+
+    /** Resizes the matrix, changing only the number of rows. For the parameter of type NoChange_t, just pass the special value \c NoChange
+      * as in the example below.
+      *
+      * Example: \include Matrix_resize_int_NoChange.cpp
+      * Output: \verbinclude Matrix_resize_int_NoChange.out
+      *
+      * \sa resize(Index,Index)
+      */
+    inline void resize(Index rows, NoChange_t)
+    {
+      resize(rows, cols());
+    }
+
+    /** Resizes \c *this to have the same dimensions as \a other.
+      * Takes care of doing all the checking that's needed.
+      *
+      * Note that copying a row-vector into a vector (and conversely) is allowed.
+      * The resizing, if any, is then done in the appropriate way so that row-vectors
+      * remain row-vectors and vectors remain vectors.
+      */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE void resizeLike(const EigenBase<OtherDerived>& _other)
+    {
+      const OtherDerived& other = _other.derived();
+      internal::check_rows_cols_for_overflow(other.rows(), other.cols());
+      const Index othersize = other.rows()*other.cols();
+      if(RowsAtCompileTime == 1)
+      {
+        eigen_assert(other.rows() == 1 || other.cols() == 1);
+        resize(1, othersize);
+      }
+      else if(ColsAtCompileTime == 1)
+      {
+        eigen_assert(other.rows() == 1 || other.cols() == 1);
+        resize(othersize, 1);
+      }
+      else resize(other.rows(), other.cols());
+    }
+
+    /** Resizes the matrix to \a rows x \a cols while leaving old values untouched.
+      *
+      * The method is intended for matrices of dynamic size. If you only want to change the number
+      * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or
+      * conservativeResize(Index, NoChange_t).
+      *
+      * Matrices are resized relative to the top-left element. In case values need to be 
+      * appended to the matrix they will be uninitialized.
+      */
+    EIGEN_STRONG_INLINE void conservativeResize(Index rows, Index cols)
+    {
+      internal::conservative_resize_like_impl<Derived>::run(*this, rows, cols);
+    }
+
+    /** Resizes the matrix to \a rows x \a cols while leaving old values untouched.
+      *
+      * As opposed to conservativeResize(Index rows, Index cols), this version leaves
+      * the number of columns unchanged.
+      *
+      * In case the matrix is growing, new rows will be uninitialized.
+      */
+    EIGEN_STRONG_INLINE void conservativeResize(Index rows, NoChange_t)
+    {
+      // Note: see the comment in conservativeResize(Index,Index)
+      conservativeResize(rows, cols());
+    }
+
+    /** Resizes the matrix to \a rows x \a cols while leaving old values untouched.
+      *
+      * As opposed to conservativeResize(Index rows, Index cols), this version leaves
+      * the number of rows unchanged.
+      *
+      * In case the matrix is growing, new columns will be uninitialized.
+      */
+    EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, Index cols)
+    {
+      // Note: see the comment in conservativeResize(Index,Index)
+      conservativeResize(rows(), cols);
+    }
+
+    /** Resizes the vector to \a size while retaining old values.
+      *
+      * \only_for_vectors. This method does not work for
+      * partially dynamic matrices when the static dimension is anything other
+      * than 1. For example it will not work with Matrix<double, 2, Dynamic>.
+      *
+      * When values are appended, they will be uninitialized.
+      */
+    EIGEN_STRONG_INLINE void conservativeResize(Index size)
+    {
+      internal::conservative_resize_like_impl<Derived>::run(*this, size);
+    }
+
+    /** Resizes the matrix to \a rows x \a cols of \c other, while leaving old values untouched.
+      *
+      * The method is intended for matrices of dynamic size. If you only want to change the number
+      * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or
+      * conservativeResize(Index, NoChange_t).
+      *
+      * Matrices are resized relative to the top-left element. In case values need to be 
+      * appended to the matrix they will copied from \c other.
+      */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE void conservativeResizeLike(const DenseBase<OtherDerived>& other)
+    {
+      internal::conservative_resize_like_impl<Derived,OtherDerived>::run(*this, other);
+    }
+
+    /** This is a special case of the templated operator=. Its purpose is to
+      * prevent a default operator= from hiding the templated operator=.
+      */
+    EIGEN_STRONG_INLINE Derived& operator=(const PlainObjectBase& other)
+    {
+      return _set(other);
+    }
+
+    /** \sa MatrixBase::lazyAssign() */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Derived& lazyAssign(const DenseBase<OtherDerived>& other)
+    {
+      _resize_to_match(other);
+      return Base::lazyAssign(other.derived());
+    }
+
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Derived& operator=(const ReturnByValue<OtherDerived>& func)
+    {
+      resize(func.rows(), func.cols());
+      return Base::operator=(func);
+    }
+
+    EIGEN_STRONG_INLINE explicit PlainObjectBase() : m_storage()
+    {
+//       _check_template_params();
+//       EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+    }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    // FIXME is it still needed ?
+    /** \internal */
+    PlainObjectBase(internal::constructor_without_unaligned_array_assert)
+      : m_storage(internal::constructor_without_unaligned_array_assert())
+    {
+//       _check_template_params(); EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+    }
+#endif
+
+    EIGEN_STRONG_INLINE PlainObjectBase(Index size, Index rows, Index cols)
+      : m_storage(size, rows, cols)
+    {
+//       _check_template_params();
+//       EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+    }
+
+    /** \copydoc MatrixBase::operator=(const EigenBase<OtherDerived>&)
+      */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Derived& operator=(const EigenBase<OtherDerived> &other)
+    {
+      _resize_to_match(other);
+      Base::operator=(other.derived());
+      return this->derived();
+    }
+
+    /** \sa MatrixBase::operator=(const EigenBase<OtherDerived>&) */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE PlainObjectBase(const EigenBase<OtherDerived> &other)
+      : m_storage(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
+    {
+      _check_template_params();
+      internal::check_rows_cols_for_overflow(other.derived().rows(), other.derived().cols());
+      Base::operator=(other.derived());
+    }
+
+    /** \name Map
+      * These are convenience functions returning Map objects. The Map() static functions return unaligned Map objects,
+      * while the AlignedMap() functions return aligned Map objects and thus should be called only with 16-byte-aligned
+      * \a data pointers.
+      *
+      * \see class Map
+      */
+    //@{
+    static inline ConstMapType Map(const Scalar* data)
+    { return ConstMapType(data); }
+    static inline MapType Map(Scalar* data)
+    { return MapType(data); }
+    static inline ConstMapType Map(const Scalar* data, Index size)
+    { return ConstMapType(data, size); }
+    static inline MapType Map(Scalar* data, Index size)
+    { return MapType(data, size); }
+    static inline ConstMapType Map(const Scalar* data, Index rows, Index cols)
+    { return ConstMapType(data, rows, cols); }
+    static inline MapType Map(Scalar* data, Index rows, Index cols)
+    { return MapType(data, rows, cols); }
+
+    static inline ConstAlignedMapType MapAligned(const Scalar* data)
+    { return ConstAlignedMapType(data); }
+    static inline AlignedMapType MapAligned(Scalar* data)
+    { return AlignedMapType(data); }
+    static inline ConstAlignedMapType MapAligned(const Scalar* data, Index size)
+    { return ConstAlignedMapType(data, size); }
+    static inline AlignedMapType MapAligned(Scalar* data, Index size)
+    { return AlignedMapType(data, size); }
+    static inline ConstAlignedMapType MapAligned(const Scalar* data, Index rows, Index cols)
+    { return ConstAlignedMapType(data, rows, cols); }
+    static inline AlignedMapType MapAligned(Scalar* data, Index rows, Index cols)
+    { return AlignedMapType(data, rows, cols); }
+
+    template<int Outer, int Inner>
+    static inline typename StridedConstMapType<Stride<Outer, Inner> >::type Map(const Scalar* data, const Stride<Outer, Inner>& stride)
+    { return typename StridedConstMapType<Stride<Outer, Inner> >::type(data, stride); }
+    template<int Outer, int Inner>
+    static inline typename StridedMapType<Stride<Outer, Inner> >::type Map(Scalar* data, const Stride<Outer, Inner>& stride)
+    { return typename StridedMapType<Stride<Outer, Inner> >::type(data, stride); }
+    template<int Outer, int Inner>
+    static inline typename StridedConstMapType<Stride<Outer, Inner> >::type Map(const Scalar* data, Index size, const Stride<Outer, Inner>& stride)
+    { return typename StridedConstMapType<Stride<Outer, Inner> >::type(data, size, stride); }
+    template<int Outer, int Inner>
+    static inline typename StridedMapType<Stride<Outer, Inner> >::type Map(Scalar* data, Index size, const Stride<Outer, Inner>& stride)
+    { return typename StridedMapType<Stride<Outer, Inner> >::type(data, size, stride); }
+    template<int Outer, int Inner>
+    static inline typename StridedConstMapType<Stride<Outer, Inner> >::type Map(const Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride)
+    { return typename StridedConstMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); }
+    template<int Outer, int Inner>
+    static inline typename StridedMapType<Stride<Outer, Inner> >::type Map(Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride)
+    { return typename StridedMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); }
+
+    template<int Outer, int Inner>
+    static inline typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type MapAligned(const Scalar* data, const Stride<Outer, Inner>& stride)
+    { return typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type(data, stride); }
+    template<int Outer, int Inner>
+    static inline typename StridedAlignedMapType<Stride<Outer, Inner> >::type MapAligned(Scalar* data, const Stride<Outer, Inner>& stride)
+    { return typename StridedAlignedMapType<Stride<Outer, Inner> >::type(data, stride); }
+    template<int Outer, int Inner>
+    static inline typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type MapAligned(const Scalar* data, Index size, const Stride<Outer, Inner>& stride)
+    { return typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type(data, size, stride); }
+    template<int Outer, int Inner>
+    static inline typename StridedAlignedMapType<Stride<Outer, Inner> >::type MapAligned(Scalar* data, Index size, const Stride<Outer, Inner>& stride)
+    { return typename StridedAlignedMapType<Stride<Outer, Inner> >::type(data, size, stride); }
+    template<int Outer, int Inner>
+    static inline typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type MapAligned(const Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride)
+    { return typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); }
+    template<int Outer, int Inner>
+    static inline typename StridedAlignedMapType<Stride<Outer, Inner> >::type MapAligned(Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride)
+    { return typename StridedAlignedMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); }
+    //@}
+
+    using Base::setConstant;
+    Derived& setConstant(Index size, const Scalar& value);
+    Derived& setConstant(Index rows, Index cols, const Scalar& value);
+
+    using Base::setZero;
+    Derived& setZero(Index size);
+    Derived& setZero(Index rows, Index cols);
+
+    using Base::setOnes;
+    Derived& setOnes(Index size);
+    Derived& setOnes(Index rows, Index cols);
+
+    using Base::setRandom;
+    Derived& setRandom(Index size);
+    Derived& setRandom(Index rows, Index cols);
+
+    #ifdef EIGEN_PLAINOBJECTBASE_PLUGIN
+    #include EIGEN_PLAINOBJECTBASE_PLUGIN
+    #endif
+
+  protected:
+    /** \internal Resizes *this in preparation for assigning \a other to it.
+      * Takes care of doing all the checking that's needed.
+      *
+      * Note that copying a row-vector into a vector (and conversely) is allowed.
+      * The resizing, if any, is then done in the appropriate way so that row-vectors
+      * remain row-vectors and vectors remain vectors.
+      */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE void _resize_to_match(const EigenBase<OtherDerived>& other)
+    {
+      #ifdef EIGEN_NO_AUTOMATIC_RESIZING
+      eigen_assert((this->size()==0 || (IsVectorAtCompileTime ? (this->size() == other.size())
+                 : (rows() == other.rows() && cols() == other.cols())))
+        && "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined");
+      #else
+      resizeLike(other);
+      #endif
+    }
+
+    /**
+      * \brief Copies the value of the expression \a other into \c *this with automatic resizing.
+      *
+      * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized),
+      * it will be initialized.
+      *
+      * Note that copying a row-vector into a vector (and conversely) is allowed.
+      * The resizing, if any, is then done in the appropriate way so that row-vectors
+      * remain row-vectors and vectors remain vectors.
+      *
+      * \sa operator=(const MatrixBase<OtherDerived>&), _set_noalias()
+      *
+      * \internal
+      */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Derived& _set(const DenseBase<OtherDerived>& other)
+    {
+      _set_selector(other.derived(), typename internal::conditional<static_cast<bool>(int(OtherDerived::Flags) & EvalBeforeAssigningBit), internal::true_type, internal::false_type>::type());
+      return this->derived();
+    }
+
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const internal::true_type&) { _set_noalias(other.eval()); }
+
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const internal::false_type&) { _set_noalias(other); }
+
+    /** \internal Like _set() but additionally makes the assumption that no aliasing effect can happen (which
+      * is the case when creating a new matrix) so one can enforce lazy evaluation.
+      *
+      * \sa operator=(const MatrixBase<OtherDerived>&), _set()
+      */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Derived& _set_noalias(const DenseBase<OtherDerived>& other)
+    {
+      // I don't think we need this resize call since the lazyAssign will anyways resize
+      // and lazyAssign will be called by the assign selector.
+      //_resize_to_match(other);
+      // the 'false' below means to enforce lazy evaluation. We don't use lazyAssign() because
+      // it wouldn't allow to copy a row-vector into a column-vector.
+      return internal::assign_selector<Derived,OtherDerived,false>::run(this->derived(), other.derived());
+    }
+
+    template<typename T0, typename T1>
+    EIGEN_STRONG_INLINE void _init2(Index rows, Index cols, typename internal::enable_if<Base::SizeAtCompileTime!=2,T0>::type* = 0)
+    {
+      EIGEN_STATIC_ASSERT(bool(NumTraits<T0>::IsInteger) &&
+                          bool(NumTraits<T1>::IsInteger),
+                          FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED)
+      eigen_assert(rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows)
+             && cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols));
+      internal::check_rows_cols_for_overflow(rows, cols);      
+      m_storage.resize(rows*cols,rows,cols);
+      EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+    }
+    template<typename T0, typename T1>
+    EIGEN_STRONG_INLINE void _init2(const Scalar& x, const Scalar& y, typename internal::enable_if<Base::SizeAtCompileTime==2,T0>::type* = 0)
+    {
+      EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 2)
+      m_storage.data()[0] = x;
+      m_storage.data()[1] = y;
+    }
+
+    template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers>
+    friend struct internal::matrix_swap_impl;
+
+    /** \internal generic implementation of swap for dense storage since for dynamic-sized matrices of same type it is enough to swap the
+      * data pointers.
+      */
+    template<typename OtherDerived>
+    void _swap(DenseBase<OtherDerived> const & other)
+    {
+      enum { SwapPointers = internal::is_same<Derived, OtherDerived>::value && Base::SizeAtCompileTime==Dynamic };
+      internal::matrix_swap_impl<Derived, OtherDerived, bool(SwapPointers)>::run(this->derived(), other.const_cast_derived());
+    }
+
+  public:
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    static EIGEN_STRONG_INLINE void _check_template_params()
+    {
+      EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, (Options&RowMajor)==RowMajor)
+                        && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, (Options&RowMajor)==0)
+                        && ((RowsAtCompileTime == Dynamic) || (RowsAtCompileTime >= 0))
+                        && ((ColsAtCompileTime == Dynamic) || (ColsAtCompileTime >= 0))
+                        && ((MaxRowsAtCompileTime == Dynamic) || (MaxRowsAtCompileTime >= 0))
+                        && ((MaxColsAtCompileTime == Dynamic) || (MaxColsAtCompileTime >= 0))
+                        && (MaxRowsAtCompileTime == RowsAtCompileTime || RowsAtCompileTime==Dynamic)
+                        && (MaxColsAtCompileTime == ColsAtCompileTime || ColsAtCompileTime==Dynamic)
+                        && (Options & (DontAlign|RowMajor)) == Options),
+        INVALID_MATRIX_TEMPLATE_PARAMETERS)
+    }
+#endif
+
+private:
+    enum { ThisConstantIsPrivateInPlainObjectBase };
+};
+
+template <typename Derived, typename OtherDerived, bool IsVector>
+struct internal::conservative_resize_like_impl
+{
+  typedef typename Derived::Index Index;
+  static void run(DenseBase<Derived>& _this, Index rows, Index cols)
+  {
+    if (_this.rows() == rows && _this.cols() == cols) return;
+    EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived)
+
+    if ( ( Derived::IsRowMajor && _this.cols() == cols) || // row-major and we change only the number of rows
+         (!Derived::IsRowMajor && _this.rows() == rows) )  // column-major and we change only the number of columns
+    {
+      internal::check_rows_cols_for_overflow(rows, cols);
+      _this.derived().m_storage.conservativeResize(rows*cols,rows,cols);
+    }
+    else
+    {
+      // The storage order does not allow us to use reallocation.
+      typename Derived::PlainObject tmp(rows,cols);
+      const Index common_rows = (std::min)(rows, _this.rows());
+      const Index common_cols = (std::min)(cols, _this.cols());
+      tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
+      _this.derived().swap(tmp);
+    }
+  }
+
+  static void run(DenseBase<Derived>& _this, const DenseBase<OtherDerived>& other)
+  {
+    if (_this.rows() == other.rows() && _this.cols() == other.cols()) return;
+
+    // Note: Here is space for improvement. Basically, for conservativeResize(Index,Index),
+    // neither RowsAtCompileTime or ColsAtCompileTime must be Dynamic. If only one of the
+    // dimensions is dynamic, one could use either conservativeResize(Index rows, NoChange_t) or
+    // conservativeResize(NoChange_t, Index cols). For these methods new static asserts like
+    // EIGEN_STATIC_ASSERT_DYNAMIC_ROWS and EIGEN_STATIC_ASSERT_DYNAMIC_COLS would be good.
+    EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived)
+    EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(OtherDerived)
+
+    if ( ( Derived::IsRowMajor && _this.cols() == other.cols()) || // row-major and we change only the number of rows
+         (!Derived::IsRowMajor && _this.rows() == other.rows()) )  // column-major and we change only the number of columns
+    {
+      const Index new_rows = other.rows() - _this.rows();
+      const Index new_cols = other.cols() - _this.cols();
+      _this.derived().m_storage.conservativeResize(other.size(),other.rows(),other.cols());
+      if (new_rows>0)
+        _this.bottomRightCorner(new_rows, other.cols()) = other.bottomRows(new_rows);
+      else if (new_cols>0)
+        _this.bottomRightCorner(other.rows(), new_cols) = other.rightCols(new_cols);
+    }
+    else
+    {
+      // The storage order does not allow us to use reallocation.
+      typename Derived::PlainObject tmp(other);
+      const Index common_rows = (std::min)(tmp.rows(), _this.rows());
+      const Index common_cols = (std::min)(tmp.cols(), _this.cols());
+      tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
+      _this.derived().swap(tmp);
+    }
+  }
+};
+
+namespace internal {
+
+template <typename Derived, typename OtherDerived>
+struct conservative_resize_like_impl<Derived,OtherDerived,true>
+{
+  typedef typename Derived::Index Index;
+  static void run(DenseBase<Derived>& _this, Index size)
+  {
+    const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : size;
+    const Index new_cols = Derived::RowsAtCompileTime==1 ? size : 1;
+    _this.derived().m_storage.conservativeResize(size,new_rows,new_cols);
+  }
+
+  static void run(DenseBase<Derived>& _this, const DenseBase<OtherDerived>& other)
+  {
+    if (_this.rows() == other.rows() && _this.cols() == other.cols()) return;
+
+    const Index num_new_elements = other.size() - _this.size();
+
+    const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : other.rows();
+    const Index new_cols = Derived::RowsAtCompileTime==1 ? other.cols() : 1;
+    _this.derived().m_storage.conservativeResize(other.size(),new_rows,new_cols);
+
+    if (num_new_elements > 0)
+      _this.tail(num_new_elements) = other.tail(num_new_elements);
+  }
+};
+
+template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers>
+struct matrix_swap_impl
+{
+  static inline void run(MatrixTypeA& a, MatrixTypeB& b)
+  {
+    a.base().swap(b);
+  }
+};
+
+template<typename MatrixTypeA, typename MatrixTypeB>
+struct matrix_swap_impl<MatrixTypeA, MatrixTypeB, true>
+{
+  static inline void run(MatrixTypeA& a, MatrixTypeB& b)
+  {
+    static_cast<typename MatrixTypeA::Base&>(a).m_storage.swap(static_cast<typename MatrixTypeB::Base&>(b).m_storage);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_DENSESTORAGEBASE_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/Product.h b/resources/3rdParty/eigen/Eigen/src/Core/Product.h
new file mode 100644
index 000000000..30aa8943b
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/Product.h
@@ -0,0 +1,98 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PRODUCT_H
+#define EIGEN_PRODUCT_H
+
+template<typename Lhs, typename Rhs> class Product;
+template<typename Lhs, typename Rhs, typename StorageKind> class ProductImpl;
+
+/** \class Product
+  * \ingroup Core_Module
+  *
+  * \brief Expression of the product of two arbitrary matrices or vectors
+  *
+  * \param Lhs the type of the left-hand side expression
+  * \param Rhs the type of the right-hand side expression
+  *
+  * This class represents an expression of the product of two arbitrary matrices.
+  *
+  */
+
+namespace internal {
+template<typename Lhs, typename Rhs>
+struct traits<Product<Lhs, Rhs> >
+{
+  typedef MatrixXpr XprKind;
+  typedef typename remove_all<Lhs>::type LhsCleaned;
+  typedef typename remove_all<Rhs>::type RhsCleaned;
+  typedef typename scalar_product_traits<typename traits<LhsCleaned>::Scalar, typename traits<RhsCleaned>::Scalar>::ReturnType Scalar;
+  typedef typename promote_storage_type<typename traits<LhsCleaned>::StorageKind,
+                                        typename traits<RhsCleaned>::StorageKind>::ret StorageKind;
+  typedef typename promote_index_type<typename traits<LhsCleaned>::Index,
+                                      typename traits<RhsCleaned>::Index>::type Index;
+  enum {
+    RowsAtCompileTime = LhsCleaned::RowsAtCompileTime,
+    ColsAtCompileTime = RhsCleaned::ColsAtCompileTime,
+    MaxRowsAtCompileTime = LhsCleaned::MaxRowsAtCompileTime,
+    MaxColsAtCompileTime = RhsCleaned::MaxColsAtCompileTime,
+    Flags = (MaxRowsAtCompileTime==1 ? RowMajorBit : 0), // TODO should be no storage order
+    CoeffReadCost = 0 // TODO CoeffReadCost should not be part of the expression traits
+  };
+};
+} // end namespace internal
+
+
+template<typename Lhs, typename Rhs>
+class Product : public ProductImpl<Lhs,Rhs,typename internal::promote_storage_type<typename internal::traits<Lhs>::StorageKind,
+                                                                            typename internal::traits<Rhs>::StorageKind>::ret>
+{
+  public:
+    
+    typedef typename ProductImpl<
+        Lhs, Rhs,
+        typename internal::promote_storage_type<typename Lhs::StorageKind,
+                                                typename Rhs::StorageKind>::ret>::Base Base;
+    EIGEN_GENERIC_PUBLIC_INTERFACE(Product)
+
+    typedef typename Lhs::Nested LhsNested;
+    typedef typename Rhs::Nested RhsNested;
+    typedef typename internal::remove_all<LhsNested>::type LhsNestedCleaned;
+    typedef typename internal::remove_all<RhsNested>::type RhsNestedCleaned;
+
+    Product(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs)
+    {
+      eigen_assert(lhs.cols() == rhs.rows()
+        && "invalid matrix product"
+        && "if you wanted a coeff-wise or a dot product use the respective explicit functions");
+    }
+
+    inline Index rows() const { return m_lhs.rows(); }
+    inline Index cols() const { return m_rhs.cols(); }
+
+    const LhsNestedCleaned& lhs() const { return m_lhs; }
+    const RhsNestedCleaned& rhs() const { return m_rhs; }
+
+  protected:
+
+    const LhsNested m_lhs;
+    const RhsNested m_rhs;
+};
+
+template<typename Lhs, typename Rhs>
+class ProductImpl<Lhs,Rhs,Dense> : public internal::dense_xpr_base<Product<Lhs,Rhs> >::type
+{
+    typedef Product<Lhs, Rhs> Derived;
+  public:
+
+    typedef typename internal::dense_xpr_base<Product<Lhs, Rhs> >::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
+};
+
+#endif // EIGEN_PRODUCT_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/ProductBase.h b/resources/3rdParty/eigen/Eigen/src/Core/ProductBase.h
new file mode 100644
index 000000000..ec12e5c9f
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/ProductBase.h
@@ -0,0 +1,278 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PRODUCTBASE_H
+#define EIGEN_PRODUCTBASE_H
+
+namespace Eigen { 
+
+/** \class ProductBase
+  * \ingroup Core_Module
+  *
+  */
+
+namespace internal {
+template<typename Derived, typename _Lhs, typename _Rhs>
+struct traits<ProductBase<Derived,_Lhs,_Rhs> >
+{
+  typedef MatrixXpr XprKind;
+  typedef typename remove_all<_Lhs>::type Lhs;
+  typedef typename remove_all<_Rhs>::type Rhs;
+  typedef typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
+  typedef typename promote_storage_type<typename traits<Lhs>::StorageKind,
+                                           typename traits<Rhs>::StorageKind>::ret StorageKind;
+  typedef typename promote_index_type<typename traits<Lhs>::Index,
+                                         typename traits<Rhs>::Index>::type Index;
+  enum {
+    RowsAtCompileTime = traits<Lhs>::RowsAtCompileTime,
+    ColsAtCompileTime = traits<Rhs>::ColsAtCompileTime,
+    MaxRowsAtCompileTime = traits<Lhs>::MaxRowsAtCompileTime,
+    MaxColsAtCompileTime = traits<Rhs>::MaxColsAtCompileTime,
+    Flags = (MaxRowsAtCompileTime==1 ? RowMajorBit : 0)
+          | EvalBeforeNestingBit | EvalBeforeAssigningBit | NestByRefBit,
+                  // Note that EvalBeforeNestingBit and NestByRefBit
+                  // are not used in practice because nested is overloaded for products
+    CoeffReadCost = 0 // FIXME why is it needed ?
+  };
+};
+}
+
+#define EIGEN_PRODUCT_PUBLIC_INTERFACE(Derived) \
+  typedef ProductBase<Derived, Lhs, Rhs > Base; \
+  EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \
+  typedef typename Base::LhsNested LhsNested; \
+  typedef typename Base::_LhsNested _LhsNested; \
+  typedef typename Base::LhsBlasTraits LhsBlasTraits; \
+  typedef typename Base::ActualLhsType ActualLhsType; \
+  typedef typename Base::_ActualLhsType _ActualLhsType; \
+  typedef typename Base::RhsNested RhsNested; \
+  typedef typename Base::_RhsNested _RhsNested; \
+  typedef typename Base::RhsBlasTraits RhsBlasTraits; \
+  typedef typename Base::ActualRhsType ActualRhsType; \
+  typedef typename Base::_ActualRhsType _ActualRhsType; \
+  using Base::m_lhs; \
+  using Base::m_rhs;
+
+template<typename Derived, typename Lhs, typename Rhs>
+class ProductBase : public MatrixBase<Derived>
+{
+  public:
+    typedef MatrixBase<Derived> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(ProductBase)
+    
+    typedef typename Lhs::Nested LhsNested;
+    typedef typename internal::remove_all<LhsNested>::type _LhsNested;
+    typedef internal::blas_traits<_LhsNested> LhsBlasTraits;
+    typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
+    typedef typename internal::remove_all<ActualLhsType>::type _ActualLhsType;
+    typedef typename internal::traits<Lhs>::Scalar LhsScalar;
+
+    typedef typename Rhs::Nested RhsNested;
+    typedef typename internal::remove_all<RhsNested>::type _RhsNested;
+    typedef internal::blas_traits<_RhsNested> RhsBlasTraits;
+    typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
+    typedef typename internal::remove_all<ActualRhsType>::type _ActualRhsType;
+    typedef typename internal::traits<Rhs>::Scalar RhsScalar;
+
+    // Diagonal of a product: no need to evaluate the arguments because they are going to be evaluated only once
+    typedef CoeffBasedProduct<LhsNested, RhsNested, 0> FullyLazyCoeffBaseProductType;
+
+  public:
+
+    typedef typename Base::PlainObject PlainObject;
+
+    ProductBase(const Lhs& lhs, const Rhs& rhs)
+      : m_lhs(lhs), m_rhs(rhs)
+    {
+      eigen_assert(lhs.cols() == rhs.rows()
+        && "invalid matrix product"
+        && "if you wanted a coeff-wise or a dot product use the respective explicit functions");
+    }
+
+    inline Index rows() const { return m_lhs.rows(); }
+    inline Index cols() const { return m_rhs.cols(); }
+
+    template<typename Dest>
+    inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst,Scalar(1)); }
+
+    template<typename Dest>
+    inline void addTo(Dest& dst) const { scaleAndAddTo(dst,Scalar(1)); }
+
+    template<typename Dest>
+    inline void subTo(Dest& dst) const { scaleAndAddTo(dst,Scalar(-1)); }
+
+    template<typename Dest>
+    inline void scaleAndAddTo(Dest& dst,Scalar alpha) const { derived().scaleAndAddTo(dst,alpha); }
+
+    const _LhsNested& lhs() const { return m_lhs; }
+    const _RhsNested& rhs() const { return m_rhs; }
+
+    // Implicit conversion to the nested type (trigger the evaluation of the product)
+    operator const PlainObject& () const
+    {
+      m_result.resize(m_lhs.rows(), m_rhs.cols());
+      derived().evalTo(m_result);
+      return m_result;
+    }
+
+    const Diagonal<const FullyLazyCoeffBaseProductType,0> diagonal() const
+    { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs); }
+
+    template<int Index>
+    const Diagonal<FullyLazyCoeffBaseProductType,Index> diagonal() const
+    { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs); }
+
+    const Diagonal<FullyLazyCoeffBaseProductType,Dynamic> diagonal(Index index) const
+    { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs).diagonal(index); }
+
+    // restrict coeff accessors to 1x1 expressions. No need to care about mutators here since this isnt a Lvalue expression
+    typename Base::CoeffReturnType coeff(Index row, Index col) const
+    {
+#ifdef EIGEN2_SUPPORT
+      return lhs().row(row).cwiseProduct(rhs().col(col).transpose()).sum();
+#else
+      EIGEN_STATIC_ASSERT_SIZE_1x1(Derived)
+      eigen_assert(this->rows() == 1 && this->cols() == 1);
+      Matrix<Scalar,1,1> result = *this;
+      return result.coeff(row,col);
+#endif
+    }
+
+    typename Base::CoeffReturnType coeff(Index i) const
+    {
+      EIGEN_STATIC_ASSERT_SIZE_1x1(Derived)
+      eigen_assert(this->rows() == 1 && this->cols() == 1);
+      Matrix<Scalar,1,1> result = *this;
+      return result.coeff(i);
+    }
+
+    const Scalar& coeffRef(Index row, Index col) const
+    {
+      EIGEN_STATIC_ASSERT_SIZE_1x1(Derived)
+      eigen_assert(this->rows() == 1 && this->cols() == 1);
+      return derived().coeffRef(row,col);
+    }
+
+    const Scalar& coeffRef(Index i) const
+    {
+      EIGEN_STATIC_ASSERT_SIZE_1x1(Derived)
+      eigen_assert(this->rows() == 1 && this->cols() == 1);
+      return derived().coeffRef(i);
+    }
+
+  protected:
+
+    LhsNested m_lhs;
+    RhsNested m_rhs;
+
+    mutable PlainObject m_result;
+};
+
+// here we need to overload the nested rule for products
+// such that the nested type is a const reference to a plain matrix
+namespace internal {
+template<typename Lhs, typename Rhs, int Mode, int N, typename PlainObject>
+struct nested<GeneralProduct<Lhs,Rhs,Mode>, N, PlainObject>
+{
+  typedef PlainObject const& type;
+};
+}
+
+template<typename NestedProduct>
+class ScaledProduct;
+
+// Note that these two operator* functions are not defined as member
+// functions of ProductBase, because, otherwise we would have to
+// define all overloads defined in MatrixBase. Furthermore, Using
+// "using Base::operator*" would not work with MSVC.
+//
+// Also note that here we accept any compatible scalar types
+template<typename Derived,typename Lhs,typename Rhs>
+const ScaledProduct<Derived>
+operator*(const ProductBase<Derived,Lhs,Rhs>& prod, typename Derived::Scalar x)
+{ return ScaledProduct<Derived>(prod.derived(), x); }
+
+template<typename Derived,typename Lhs,typename Rhs>
+typename internal::enable_if<!internal::is_same<typename Derived::Scalar,typename Derived::RealScalar>::value,
+                      const ScaledProduct<Derived> >::type
+operator*(const ProductBase<Derived,Lhs,Rhs>& prod, typename Derived::RealScalar x)
+{ return ScaledProduct<Derived>(prod.derived(), x); }
+
+
+template<typename Derived,typename Lhs,typename Rhs>
+const ScaledProduct<Derived>
+operator*(typename Derived::Scalar x,const ProductBase<Derived,Lhs,Rhs>& prod)
+{ return ScaledProduct<Derived>(prod.derived(), x); }
+
+template<typename Derived,typename Lhs,typename Rhs>
+typename internal::enable_if<!internal::is_same<typename Derived::Scalar,typename Derived::RealScalar>::value,
+                      const ScaledProduct<Derived> >::type
+operator*(typename Derived::RealScalar x,const ProductBase<Derived,Lhs,Rhs>& prod)
+{ return ScaledProduct<Derived>(prod.derived(), x); }
+
+namespace internal {
+template<typename NestedProduct>
+struct traits<ScaledProduct<NestedProduct> >
+ : traits<ProductBase<ScaledProduct<NestedProduct>,
+                         typename NestedProduct::_LhsNested,
+                         typename NestedProduct::_RhsNested> >
+{
+  typedef typename traits<NestedProduct>::StorageKind StorageKind;
+};
+}
+
+template<typename NestedProduct>
+class ScaledProduct
+  : public ProductBase<ScaledProduct<NestedProduct>,
+                       typename NestedProduct::_LhsNested,
+                       typename NestedProduct::_RhsNested>
+{
+  public:
+    typedef ProductBase<ScaledProduct<NestedProduct>,
+                       typename NestedProduct::_LhsNested,
+                       typename NestedProduct::_RhsNested> Base;
+    typedef typename Base::Scalar Scalar;
+    typedef typename Base::PlainObject PlainObject;
+//     EIGEN_PRODUCT_PUBLIC_INTERFACE(ScaledProduct)
+
+    ScaledProduct(const NestedProduct& prod, Scalar x)
+    : Base(prod.lhs(),prod.rhs()), m_prod(prod), m_alpha(x) {}
+
+    template<typename Dest>
+    inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst, Scalar(1)); }
+
+    template<typename Dest>
+    inline void addTo(Dest& dst) const { scaleAndAddTo(dst, Scalar(1)); }
+
+    template<typename Dest>
+    inline void subTo(Dest& dst) const { scaleAndAddTo(dst, Scalar(-1)); }
+
+    template<typename Dest>
+    inline void scaleAndAddTo(Dest& dst,Scalar alpha) const { m_prod.derived().scaleAndAddTo(dst,alpha * m_alpha); }
+
+    const Scalar& alpha() const { return m_alpha; }
+    
+  protected:
+    const NestedProduct& m_prod;
+    Scalar m_alpha;
+};
+
+/** \internal
+  * Overloaded to perform an efficient C = (A*B).lazy() */
+template<typename Derived>
+template<typename ProductDerived, typename Lhs, typename Rhs>
+Derived& MatrixBase<Derived>::lazyAssign(const ProductBase<ProductDerived, Lhs,Rhs>& other)
+{
+  other.derived().evalTo(derived());
+  return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_PRODUCTBASE_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/Random.h b/resources/3rdParty/eigen/Eigen/src/Core/Random.h
new file mode 100644
index 000000000..a9f7f4346
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/Random.h
@@ -0,0 +1,152 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_RANDOM_H
+#define EIGEN_RANDOM_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename Scalar> struct scalar_random_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_random_op)
+  template<typename Index>
+  inline const Scalar operator() (Index, Index = 0) const { return random<Scalar>(); }
+};
+
+template<typename Scalar>
+struct functor_traits<scalar_random_op<Scalar> >
+{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false, IsRepeatable = false }; };
+
+} // end namespace internal
+
+/** \returns a random matrix expression
+  *
+  * The parameters \a rows and \a cols are the number of rows and of columns of
+  * the returned matrix. Must be compatible with this MatrixBase type.
+  *
+  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
+  * it is redundant to pass \a rows and \a cols as arguments, so Random() should be used
+  * instead.
+  *
+  * Example: \include MatrixBase_random_int_int.cpp
+  * Output: \verbinclude MatrixBase_random_int_int.out
+  *
+  * This expression has the "evaluate before nesting" flag so that it will be evaluated into
+  * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected
+  * behavior with expressions involving random matrices.
+  *
+  * \sa MatrixBase::setRandom(), MatrixBase::Random(Index), MatrixBase::Random()
+  */
+template<typename Derived>
+inline const CwiseNullaryOp<internal::scalar_random_op<typename internal::traits<Derived>::Scalar>, Derived>
+DenseBase<Derived>::Random(Index rows, Index cols)
+{
+  return NullaryExpr(rows, cols, internal::scalar_random_op<Scalar>());
+}
+
+/** \returns a random vector expression
+  *
+  * The parameter \a size is the size of the returned vector.
+  * Must be compatible with this MatrixBase type.
+  *
+  * \only_for_vectors
+  *
+  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
+  * it is redundant to pass \a size as argument, so Random() should be used
+  * instead.
+  *
+  * Example: \include MatrixBase_random_int.cpp
+  * Output: \verbinclude MatrixBase_random_int.out
+  *
+  * This expression has the "evaluate before nesting" flag so that it will be evaluated into
+  * a temporary vector whenever it is nested in a larger expression. This prevents unexpected
+  * behavior with expressions involving random matrices.
+  *
+  * \sa MatrixBase::setRandom(), MatrixBase::Random(Index,Index), MatrixBase::Random()
+  */
+template<typename Derived>
+inline const CwiseNullaryOp<internal::scalar_random_op<typename internal::traits<Derived>::Scalar>, Derived>
+DenseBase<Derived>::Random(Index size)
+{
+  return NullaryExpr(size, internal::scalar_random_op<Scalar>());
+}
+
+/** \returns a fixed-size random matrix or vector expression
+  *
+  * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
+  * need to use the variants taking size arguments.
+  *
+  * Example: \include MatrixBase_random.cpp
+  * Output: \verbinclude MatrixBase_random.out
+  *
+  * This expression has the "evaluate before nesting" flag so that it will be evaluated into
+  * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected
+  * behavior with expressions involving random matrices.
+  *
+  * \sa MatrixBase::setRandom(), MatrixBase::Random(Index,Index), MatrixBase::Random(Index)
+  */
+template<typename Derived>
+inline const CwiseNullaryOp<internal::scalar_random_op<typename internal::traits<Derived>::Scalar>, Derived>
+DenseBase<Derived>::Random()
+{
+  return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_random_op<Scalar>());
+}
+
+/** Sets all coefficients in this expression to random values.
+  *
+  * Example: \include MatrixBase_setRandom.cpp
+  * Output: \verbinclude MatrixBase_setRandom.out
+  *
+  * \sa class CwiseNullaryOp, setRandom(Index), setRandom(Index,Index)
+  */
+template<typename Derived>
+inline Derived& DenseBase<Derived>::setRandom()
+{
+  return *this = Random(rows(), cols());
+}
+
+/** Resizes to the given \a size, and sets all coefficients in this expression to random values.
+  *
+  * \only_for_vectors
+  *
+  * Example: \include Matrix_setRandom_int.cpp
+  * Output: \verbinclude Matrix_setRandom_int.out
+  *
+  * \sa MatrixBase::setRandom(), setRandom(Index,Index), class CwiseNullaryOp, MatrixBase::Random()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setRandom(Index size)
+{
+  resize(size);
+  return setRandom();
+}
+
+/** Resizes to the given size, and sets all coefficients in this expression to random values.
+  *
+  * \param rows the new number of rows
+  * \param cols the new number of columns
+  *
+  * Example: \include Matrix_setRandom_int_int.cpp
+  * Output: \verbinclude Matrix_setRandom_int_int.out
+  *
+  * \sa MatrixBase::setRandom(), setRandom(Index), class CwiseNullaryOp, MatrixBase::Random()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setRandom(Index rows, Index cols)
+{
+  resize(rows, cols);
+  return setRandom();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_RANDOM_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Redux.h b/resources/3rdParty/eigen/Eigen/src/Core/Redux.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/Redux.h
rename to resources/3rdParty/eigen/Eigen/src/Core/Redux.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/Replicate.h b/resources/3rdParty/eigen/Eigen/src/Core/Replicate.h
new file mode 100644
index 000000000..b61fdc29e
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/Replicate.h
@@ -0,0 +1,177 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_REPLICATE_H
+#define EIGEN_REPLICATE_H
+
+namespace Eigen { 
+
+/**
+  * \class Replicate
+  * \ingroup Core_Module
+  *
+  * \brief Expression of the multiple replication of a matrix or vector
+  *
+  * \param MatrixType the type of the object we are replicating
+  *
+  * This class represents an expression of the multiple replication of a matrix or vector.
+  * It is the return type of DenseBase::replicate() and most of the time
+  * this is the only way it is used.
+  *
+  * \sa DenseBase::replicate()
+  */
+
+namespace internal {
+template<typename MatrixType,int RowFactor,int ColFactor>
+struct traits<Replicate<MatrixType,RowFactor,ColFactor> >
+ : traits<MatrixType>
+{
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename traits<MatrixType>::StorageKind StorageKind;
+  typedef typename traits<MatrixType>::XprKind XprKind;
+  enum {
+    Factor = (RowFactor==Dynamic || ColFactor==Dynamic) ? Dynamic : RowFactor*ColFactor
+  };
+  typedef typename nested<MatrixType,Factor>::type MatrixTypeNested;
+  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
+  enum {
+    RowsAtCompileTime = RowFactor==Dynamic || int(MatrixType::RowsAtCompileTime)==Dynamic
+                      ? Dynamic
+                      : RowFactor * MatrixType::RowsAtCompileTime,
+    ColsAtCompileTime = ColFactor==Dynamic || int(MatrixType::ColsAtCompileTime)==Dynamic
+                      ? Dynamic
+                      : ColFactor * MatrixType::ColsAtCompileTime,
+   //FIXME we don't propagate the max sizes !!!
+    MaxRowsAtCompileTime = RowsAtCompileTime,
+    MaxColsAtCompileTime = ColsAtCompileTime,
+    IsRowMajor = MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1 ? 1
+               : MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1 ? 0
+               : (MatrixType::Flags & RowMajorBit) ? 1 : 0,
+    Flags = (_MatrixTypeNested::Flags & HereditaryBits & ~RowMajorBit) | (IsRowMajor ? RowMajorBit : 0),
+    CoeffReadCost = _MatrixTypeNested::CoeffReadCost
+  };
+};
+}
+
+template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
+  : public internal::dense_xpr_base< Replicate<MatrixType,RowFactor,ColFactor> >::type
+{
+    typedef typename internal::traits<Replicate>::MatrixTypeNested MatrixTypeNested;
+    typedef typename internal::traits<Replicate>::_MatrixTypeNested _MatrixTypeNested;
+  public:
+
+    typedef typename internal::dense_xpr_base<Replicate>::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Replicate)
+
+    template<typename OriginalMatrixType>
+    inline explicit Replicate(const OriginalMatrixType& matrix)
+      : m_matrix(matrix), m_rowFactor(RowFactor), m_colFactor(ColFactor)
+    {
+      EIGEN_STATIC_ASSERT((internal::is_same<typename internal::remove_const<MatrixType>::type,OriginalMatrixType>::value),
+                          THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)
+      eigen_assert(RowFactor!=Dynamic && ColFactor!=Dynamic);
+    }
+
+    template<typename OriginalMatrixType>
+    inline Replicate(const OriginalMatrixType& matrix, Index rowFactor, Index colFactor)
+      : m_matrix(matrix), m_rowFactor(rowFactor), m_colFactor(colFactor)
+    {
+      EIGEN_STATIC_ASSERT((internal::is_same<typename internal::remove_const<MatrixType>::type,OriginalMatrixType>::value),
+                          THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)
+    }
+
+    inline Index rows() const { return m_matrix.rows() * m_rowFactor.value(); }
+    inline Index cols() const { return m_matrix.cols() * m_colFactor.value(); }
+
+    inline Scalar coeff(Index row, Index col) const
+    {
+      // try to avoid using modulo; this is a pure optimization strategy
+      const Index actual_row  = internal::traits<MatrixType>::RowsAtCompileTime==1 ? 0
+                            : RowFactor==1 ? row
+                            : row%m_matrix.rows();
+      const Index actual_col  = internal::traits<MatrixType>::ColsAtCompileTime==1 ? 0
+                            : ColFactor==1 ? col
+                            : col%m_matrix.cols();
+
+      return m_matrix.coeff(actual_row, actual_col);
+    }
+    template<int LoadMode>
+    inline PacketScalar packet(Index row, Index col) const
+    {
+      const Index actual_row  = internal::traits<MatrixType>::RowsAtCompileTime==1 ? 0
+                            : RowFactor==1 ? row
+                            : row%m_matrix.rows();
+      const Index actual_col  = internal::traits<MatrixType>::ColsAtCompileTime==1 ? 0
+                            : ColFactor==1 ? col
+                            : col%m_matrix.cols();
+
+      return m_matrix.template packet<LoadMode>(actual_row, actual_col);
+    }
+
+    const _MatrixTypeNested& nestedExpression() const
+    { 
+      return m_matrix; 
+    }
+
+  protected:
+    MatrixTypeNested m_matrix;
+    const internal::variable_if_dynamic<Index, RowFactor> m_rowFactor;
+    const internal::variable_if_dynamic<Index, ColFactor> m_colFactor;
+};
+
+/**
+  * \return an expression of the replication of \c *this
+  *
+  * Example: \include MatrixBase_replicate.cpp
+  * Output: \verbinclude MatrixBase_replicate.out
+  *
+  * \sa VectorwiseOp::replicate(), DenseBase::replicate(Index,Index), class Replicate
+  */
+template<typename Derived>
+template<int RowFactor, int ColFactor>
+inline const Replicate<Derived,RowFactor,ColFactor>
+DenseBase<Derived>::replicate() const
+{
+  return Replicate<Derived,RowFactor,ColFactor>(derived());
+}
+
+/**
+  * \return an expression of the replication of \c *this
+  *
+  * Example: \include MatrixBase_replicate_int_int.cpp
+  * Output: \verbinclude MatrixBase_replicate_int_int.out
+  *
+  * \sa VectorwiseOp::replicate(), DenseBase::replicate<int,int>(), class Replicate
+  */
+template<typename Derived>
+inline const Replicate<Derived,Dynamic,Dynamic>
+DenseBase<Derived>::replicate(Index rowFactor,Index colFactor) const
+{
+  return Replicate<Derived,Dynamic,Dynamic>(derived(),rowFactor,colFactor);
+}
+
+/**
+  * \return an expression of the replication of each column (or row) of \c *this
+  *
+  * Example: \include DirectionWise_replicate_int.cpp
+  * Output: \verbinclude DirectionWise_replicate_int.out
+  *
+  * \sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate
+  */
+template<typename ExpressionType, int Direction>
+const typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType
+VectorwiseOp<ExpressionType,Direction>::replicate(Index factor) const
+{
+  return typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType
+          (_expression(),Direction==Vertical?factor:1,Direction==Horizontal?factor:1);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_REPLICATE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/ReturnByValue.h b/resources/3rdParty/eigen/Eigen/src/Core/ReturnByValue.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/ReturnByValue.h
rename to resources/3rdParty/eigen/Eigen/src/Core/ReturnByValue.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Reverse.h b/resources/3rdParty/eigen/Eigen/src/Core/Reverse.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/Reverse.h
rename to resources/3rdParty/eigen/Eigen/src/Core/Reverse.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/Select.h b/resources/3rdParty/eigen/Eigen/src/Core/Select.h
new file mode 100644
index 000000000..2bf6e91d0
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/Select.h
@@ -0,0 +1,162 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SELECT_H
+#define EIGEN_SELECT_H
+
+namespace Eigen { 
+
+/** \class Select
+  * \ingroup Core_Module
+  *
+  * \brief Expression of a coefficient wise version of the C++ ternary operator ?:
+  *
+  * \param ConditionMatrixType the type of the \em condition expression which must be a boolean matrix
+  * \param ThenMatrixType the type of the \em then expression
+  * \param ElseMatrixType the type of the \em else expression
+  *
+  * This class represents an expression of a coefficient wise version of the C++ ternary operator ?:.
+  * It is the return type of DenseBase::select() and most of the time this is the only way it is used.
+  *
+  * \sa DenseBase::select(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const
+  */
+
+namespace internal {
+template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>
+struct traits<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
+ : traits<ThenMatrixType>
+{
+  typedef typename traits<ThenMatrixType>::Scalar Scalar;
+  typedef Dense StorageKind;
+  typedef typename traits<ThenMatrixType>::XprKind XprKind;
+  typedef typename ConditionMatrixType::Nested ConditionMatrixNested;
+  typedef typename ThenMatrixType::Nested ThenMatrixNested;
+  typedef typename ElseMatrixType::Nested ElseMatrixNested;
+  enum {
+    RowsAtCompileTime = ConditionMatrixType::RowsAtCompileTime,
+    ColsAtCompileTime = ConditionMatrixType::ColsAtCompileTime,
+    MaxRowsAtCompileTime = ConditionMatrixType::MaxRowsAtCompileTime,
+    MaxColsAtCompileTime = ConditionMatrixType::MaxColsAtCompileTime,
+    Flags = (unsigned int)ThenMatrixType::Flags & ElseMatrixType::Flags & HereditaryBits,
+    CoeffReadCost = traits<typename remove_all<ConditionMatrixNested>::type>::CoeffReadCost
+                  + EIGEN_SIZE_MAX(traits<typename remove_all<ThenMatrixNested>::type>::CoeffReadCost,
+                                   traits<typename remove_all<ElseMatrixNested>::type>::CoeffReadCost)
+  };
+};
+}
+
+template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>
+class Select : internal::no_assignment_operator,
+  public internal::dense_xpr_base< Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >::type
+{
+  public:
+
+    typedef typename internal::dense_xpr_base<Select>::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Select)
+
+    Select(const ConditionMatrixType& conditionMatrix,
+           const ThenMatrixType& thenMatrix,
+           const ElseMatrixType& elseMatrix)
+      : m_condition(conditionMatrix), m_then(thenMatrix), m_else(elseMatrix)
+    {
+      eigen_assert(m_condition.rows() == m_then.rows() && m_condition.rows() == m_else.rows());
+      eigen_assert(m_condition.cols() == m_then.cols() && m_condition.cols() == m_else.cols());
+    }
+
+    Index rows() const { return m_condition.rows(); }
+    Index cols() const { return m_condition.cols(); }
+
+    const Scalar coeff(Index i, Index j) const
+    {
+      if (m_condition.coeff(i,j))
+        return m_then.coeff(i,j);
+      else
+        return m_else.coeff(i,j);
+    }
+
+    const Scalar coeff(Index i) const
+    {
+      if (m_condition.coeff(i))
+        return m_then.coeff(i);
+      else
+        return m_else.coeff(i);
+    }
+
+    const ConditionMatrixType& conditionMatrix() const
+    {
+      return m_condition;
+    }
+
+    const ThenMatrixType& thenMatrix() const
+    {
+      return m_then;
+    }
+
+    const ElseMatrixType& elseMatrix() const
+    {
+      return m_else;
+    }
+
+  protected:
+    typename ConditionMatrixType::Nested m_condition;
+    typename ThenMatrixType::Nested m_then;
+    typename ElseMatrixType::Nested m_else;
+};
+
+
+/** \returns a matrix where each coefficient (i,j) is equal to \a thenMatrix(i,j)
+  * if \c *this(i,j), and \a elseMatrix(i,j) otherwise.
+  *
+  * Example: \include MatrixBase_select.cpp
+  * Output: \verbinclude MatrixBase_select.out
+  *
+  * \sa class Select
+  */
+template<typename Derived>
+template<typename ThenDerived,typename ElseDerived>
+inline const Select<Derived,ThenDerived,ElseDerived>
+DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix,
+                            const DenseBase<ElseDerived>& elseMatrix) const
+{
+  return Select<Derived,ThenDerived,ElseDerived>(derived(), thenMatrix.derived(), elseMatrix.derived());
+}
+
+/** Version of DenseBase::select(const DenseBase&, const DenseBase&) with
+  * the \em else expression being a scalar value.
+  *
+  * \sa DenseBase::select(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const, class Select
+  */
+template<typename Derived>
+template<typename ThenDerived>
+inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>
+DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix,
+                            typename ThenDerived::Scalar elseScalar) const
+{
+  return Select<Derived,ThenDerived,typename ThenDerived::ConstantReturnType>(
+    derived(), thenMatrix.derived(), ThenDerived::Constant(rows(),cols(),elseScalar));
+}
+
+/** Version of DenseBase::select(const DenseBase&, const DenseBase&) with
+  * the \em then expression being a scalar value.
+  *
+  * \sa DenseBase::select(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const, class Select
+  */
+template<typename Derived>
+template<typename ElseDerived>
+inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >
+DenseBase<Derived>::select(typename ElseDerived::Scalar thenScalar,
+                            const DenseBase<ElseDerived>& elseMatrix) const
+{
+  return Select<Derived,typename ElseDerived::ConstantReturnType,ElseDerived>(
+    derived(), ElseDerived::Constant(rows(),cols(),thenScalar), elseMatrix.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SELECT_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/SelfAdjointView.h b/resources/3rdParty/eigen/Eigen/src/Core/SelfAdjointView.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/SelfAdjointView.h
rename to resources/3rdParty/eigen/Eigen/src/Core/SelfAdjointView.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/SelfCwiseBinaryOp.h b/resources/3rdParty/eigen/Eigen/src/Core/SelfCwiseBinaryOp.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/SelfCwiseBinaryOp.h
rename to resources/3rdParty/eigen/Eigen/src/Core/SelfCwiseBinaryOp.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/SolveTriangular.h b/resources/3rdParty/eigen/Eigen/src/Core/SolveTriangular.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/SolveTriangular.h
rename to resources/3rdParty/eigen/Eigen/src/Core/SolveTriangular.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/StableNorm.h b/resources/3rdParty/eigen/Eigen/src/Core/StableNorm.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/StableNorm.h
rename to resources/3rdParty/eigen/Eigen/src/Core/StableNorm.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Stride.h b/resources/3rdParty/eigen/Eigen/src/Core/Stride.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/Stride.h
rename to resources/3rdParty/eigen/Eigen/src/Core/Stride.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/Swap.h b/resources/3rdParty/eigen/Eigen/src/Core/Swap.h
new file mode 100644
index 000000000..fd73cf3ad
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/Swap.h
@@ -0,0 +1,126 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SWAP_H
+#define EIGEN_SWAP_H
+
+namespace Eigen { 
+
+/** \class SwapWrapper
+  * \ingroup Core_Module
+  *
+  * \internal
+  *
+  * \brief Internal helper class for swapping two expressions
+  */
+namespace internal {
+template<typename ExpressionType>
+struct traits<SwapWrapper<ExpressionType> > : traits<ExpressionType> {};
+}
+
+template<typename ExpressionType> class SwapWrapper
+  : public internal::dense_xpr_base<SwapWrapper<ExpressionType> >::type
+{
+  public:
+
+    typedef typename internal::dense_xpr_base<SwapWrapper>::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(SwapWrapper)
+    typedef typename internal::packet_traits<Scalar>::type Packet;
+
+    inline SwapWrapper(ExpressionType& xpr) : m_expression(xpr) {}
+
+    inline Index rows() const { return m_expression.rows(); }
+    inline Index cols() const { return m_expression.cols(); }
+    inline Index outerStride() const { return m_expression.outerStride(); }
+    inline Index innerStride() const { return m_expression.innerStride(); }
+    
+    typedef typename internal::conditional<
+                       internal::is_lvalue<ExpressionType>::value,
+                       Scalar,
+                       const Scalar
+                     >::type ScalarWithConstIfNotLvalue;
+                     
+    inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
+    inline const Scalar* data() const { return m_expression.data(); }
+
+    inline Scalar& coeffRef(Index row, Index col)
+    {
+      return m_expression.const_cast_derived().coeffRef(row, col);
+    }
+
+    inline Scalar& coeffRef(Index index)
+    {
+      return m_expression.const_cast_derived().coeffRef(index);
+    }
+
+    inline Scalar& coeffRef(Index row, Index col) const
+    {
+      return m_expression.coeffRef(row, col);
+    }
+
+    inline Scalar& coeffRef(Index index) const
+    {
+      return m_expression.coeffRef(index);
+    }
+
+    template<typename OtherDerived>
+    void copyCoeff(Index row, Index col, const DenseBase<OtherDerived>& other)
+    {
+      OtherDerived& _other = other.const_cast_derived();
+      eigen_internal_assert(row >= 0 && row < rows()
+                         && col >= 0 && col < cols());
+      Scalar tmp = m_expression.coeff(row, col);
+      m_expression.coeffRef(row, col) = _other.coeff(row, col);
+      _other.coeffRef(row, col) = tmp;
+    }
+
+    template<typename OtherDerived>
+    void copyCoeff(Index index, const DenseBase<OtherDerived>& other)
+    {
+      OtherDerived& _other = other.const_cast_derived();
+      eigen_internal_assert(index >= 0 && index < m_expression.size());
+      Scalar tmp = m_expression.coeff(index);
+      m_expression.coeffRef(index) = _other.coeff(index);
+      _other.coeffRef(index) = tmp;
+    }
+
+    template<typename OtherDerived, int StoreMode, int LoadMode>
+    void copyPacket(Index row, Index col, const DenseBase<OtherDerived>& other)
+    {
+      OtherDerived& _other = other.const_cast_derived();
+      eigen_internal_assert(row >= 0 && row < rows()
+                        && col >= 0 && col < cols());
+      Packet tmp = m_expression.template packet<StoreMode>(row, col);
+      m_expression.template writePacket<StoreMode>(row, col,
+        _other.template packet<LoadMode>(row, col)
+      );
+      _other.template writePacket<LoadMode>(row, col, tmp);
+    }
+
+    template<typename OtherDerived, int StoreMode, int LoadMode>
+    void copyPacket(Index index, const DenseBase<OtherDerived>& other)
+    {
+      OtherDerived& _other = other.const_cast_derived();
+      eigen_internal_assert(index >= 0 && index < m_expression.size());
+      Packet tmp = m_expression.template packet<StoreMode>(index);
+      m_expression.template writePacket<StoreMode>(index,
+        _other.template packet<LoadMode>(index)
+      );
+      _other.template writePacket<LoadMode>(index, tmp);
+    }
+
+    ExpressionType& expression() const { return m_expression; }
+
+  protected:
+    ExpressionType& m_expression;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SWAP_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/Transpose.h b/resources/3rdParty/eigen/Eigen/src/Core/Transpose.h
new file mode 100644
index 000000000..045a1cce6
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/Transpose.h
@@ -0,0 +1,414 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_TRANSPOSE_H
+#define EIGEN_TRANSPOSE_H
+
+namespace Eigen { 
+
+/** \class Transpose
+  * \ingroup Core_Module
+  *
+  * \brief Expression of the transpose of a matrix
+  *
+  * \param MatrixType the type of the object of which we are taking the transpose
+  *
+  * This class represents an expression of the transpose of a matrix.
+  * It is the return type of MatrixBase::transpose() and MatrixBase::adjoint()
+  * and most of the time this is the only way it is used.
+  *
+  * \sa MatrixBase::transpose(), MatrixBase::adjoint()
+  */
+
+namespace internal {
+template<typename MatrixType>
+struct traits<Transpose<MatrixType> > : traits<MatrixType>
+{
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename nested<MatrixType>::type MatrixTypeNested;
+  typedef typename remove_reference<MatrixTypeNested>::type MatrixTypeNestedPlain;
+  typedef typename traits<MatrixType>::StorageKind StorageKind;
+  typedef typename traits<MatrixType>::XprKind XprKind;
+  enum {
+    RowsAtCompileTime = MatrixType::ColsAtCompileTime,
+    ColsAtCompileTime = MatrixType::RowsAtCompileTime,
+    MaxRowsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+    MaxColsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+    FlagsLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,
+    Flags0 = MatrixTypeNestedPlain::Flags & ~(LvalueBit | NestByRefBit),
+    Flags1 = Flags0 | FlagsLvalueBit,
+    Flags = Flags1 ^ RowMajorBit,
+    CoeffReadCost = MatrixTypeNestedPlain::CoeffReadCost,
+    InnerStrideAtCompileTime = inner_stride_at_compile_time<MatrixType>::ret,
+    OuterStrideAtCompileTime = outer_stride_at_compile_time<MatrixType>::ret
+  };
+};
+}
+
+template<typename MatrixType, typename StorageKind> class TransposeImpl;
+
+template<typename MatrixType> class Transpose
+  : public TransposeImpl<MatrixType,typename internal::traits<MatrixType>::StorageKind>
+{
+  public:
+
+    typedef typename TransposeImpl<MatrixType,typename internal::traits<MatrixType>::StorageKind>::Base Base;
+    EIGEN_GENERIC_PUBLIC_INTERFACE(Transpose)
+
+    inline Transpose(MatrixType& matrix) : m_matrix(matrix) {}
+
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose)
+
+    inline Index rows() const { return m_matrix.cols(); }
+    inline Index cols() const { return m_matrix.rows(); }
+
+    /** \returns the nested expression */
+    const typename internal::remove_all<typename MatrixType::Nested>::type&
+    nestedExpression() const { return m_matrix; }
+
+    /** \returns the nested expression */
+    typename internal::remove_all<typename MatrixType::Nested>::type&
+    nestedExpression() { return m_matrix.const_cast_derived(); }
+
+  protected:
+    typename MatrixType::Nested m_matrix;
+};
+
+namespace internal {
+
+template<typename MatrixType, bool HasDirectAccess = has_direct_access<MatrixType>::ret>
+struct TransposeImpl_base
+{
+  typedef typename dense_xpr_base<Transpose<MatrixType> >::type type;
+};
+
+template<typename MatrixType>
+struct TransposeImpl_base<MatrixType, false>
+{
+  typedef typename dense_xpr_base<Transpose<MatrixType> >::type type;
+};
+
+} // end namespace internal
+
+template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
+  : public internal::TransposeImpl_base<MatrixType>::type
+{
+  public:
+
+    typedef typename internal::TransposeImpl_base<MatrixType>::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Transpose<MatrixType>)
+
+    inline Index innerStride() const { return derived().nestedExpression().innerStride(); }
+    inline Index outerStride() const { return derived().nestedExpression().outerStride(); }
+
+    typedef typename internal::conditional<
+                       internal::is_lvalue<MatrixType>::value,
+                       Scalar,
+                       const Scalar
+                     >::type ScalarWithConstIfNotLvalue;
+
+    inline ScalarWithConstIfNotLvalue* data() { return derived().nestedExpression().data(); }
+    inline const Scalar* data() const { return derived().nestedExpression().data(); }
+
+    inline ScalarWithConstIfNotLvalue& coeffRef(Index row, Index col)
+    {
+      EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
+      return derived().nestedExpression().const_cast_derived().coeffRef(col, row);
+    }
+
+    inline ScalarWithConstIfNotLvalue& coeffRef(Index index)
+    {
+      EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
+      return derived().nestedExpression().const_cast_derived().coeffRef(index);
+    }
+
+    inline const Scalar& coeffRef(Index row, Index col) const
+    {
+      return derived().nestedExpression().coeffRef(col, row);
+    }
+
+    inline const Scalar& coeffRef(Index index) const
+    {
+      return derived().nestedExpression().coeffRef(index);
+    }
+
+    inline CoeffReturnType coeff(Index row, Index col) const
+    {
+      return derived().nestedExpression().coeff(col, row);
+    }
+
+    inline CoeffReturnType coeff(Index index) const
+    {
+      return derived().nestedExpression().coeff(index);
+    }
+
+    template<int LoadMode>
+    inline const PacketScalar packet(Index row, Index col) const
+    {
+      return derived().nestedExpression().template packet<LoadMode>(col, row);
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index row, Index col, const PacketScalar& x)
+    {
+      derived().nestedExpression().const_cast_derived().template writePacket<LoadMode>(col, row, x);
+    }
+
+    template<int LoadMode>
+    inline const PacketScalar packet(Index index) const
+    {
+      return derived().nestedExpression().template packet<LoadMode>(index);
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index index, const PacketScalar& x)
+    {
+      derived().nestedExpression().const_cast_derived().template writePacket<LoadMode>(index, x);
+    }
+};
+
+/** \returns an expression of the transpose of *this.
+  *
+  * Example: \include MatrixBase_transpose.cpp
+  * Output: \verbinclude MatrixBase_transpose.out
+  *
+  * \warning If you want to replace a matrix by its own transpose, do \b NOT do this:
+  * \code
+  * m = m.transpose(); // bug!!! caused by aliasing effect
+  * \endcode
+  * Instead, use the transposeInPlace() method:
+  * \code
+  * m.transposeInPlace();
+  * \endcode
+  * which gives Eigen good opportunities for optimization, or alternatively you can also do:
+  * \code
+  * m = m.transpose().eval();
+  * \endcode
+  *
+  * \sa transposeInPlace(), adjoint() */
+template<typename Derived>
+inline Transpose<Derived>
+DenseBase<Derived>::transpose()
+{
+  return derived();
+}
+
+/** This is the const version of transpose().
+  *
+  * Make sure you read the warning for transpose() !
+  *
+  * \sa transposeInPlace(), adjoint() */
+template<typename Derived>
+inline const typename DenseBase<Derived>::ConstTransposeReturnType
+DenseBase<Derived>::transpose() const
+{
+  return ConstTransposeReturnType(derived());
+}
+
+/** \returns an expression of the adjoint (i.e. conjugate transpose) of *this.
+  *
+  * Example: \include MatrixBase_adjoint.cpp
+  * Output: \verbinclude MatrixBase_adjoint.out
+  *
+  * \warning If you want to replace a matrix by its own adjoint, do \b NOT do this:
+  * \code
+  * m = m.adjoint(); // bug!!! caused by aliasing effect
+  * \endcode
+  * Instead, use the adjointInPlace() method:
+  * \code
+  * m.adjointInPlace();
+  * \endcode
+  * which gives Eigen good opportunities for optimization, or alternatively you can also do:
+  * \code
+  * m = m.adjoint().eval();
+  * \endcode
+  *
+  * \sa adjointInPlace(), transpose(), conjugate(), class Transpose, class internal::scalar_conjugate_op */
+template<typename Derived>
+inline const typename MatrixBase<Derived>::AdjointReturnType
+MatrixBase<Derived>::adjoint() const
+{
+  return this->transpose(); // in the complex case, the .conjugate() is be implicit here
+                            // due to implicit conversion to return type
+}
+
+/***************************************************************************
+* "in place" transpose implementation
+***************************************************************************/
+
+namespace internal {
+
+template<typename MatrixType,
+  bool IsSquare = (MatrixType::RowsAtCompileTime == MatrixType::ColsAtCompileTime) && MatrixType::RowsAtCompileTime!=Dynamic>
+struct inplace_transpose_selector;
+
+template<typename MatrixType>
+struct inplace_transpose_selector<MatrixType,true> { // square matrix
+  static void run(MatrixType& m) {
+    m.template triangularView<StrictlyUpper>().swap(m.transpose());
+  }
+};
+
+template<typename MatrixType>
+struct inplace_transpose_selector<MatrixType,false> { // non square matrix
+  static void run(MatrixType& m) {
+    if (m.rows()==m.cols())
+      m.template triangularView<StrictlyUpper>().swap(m.transpose());
+    else
+      m = m.transpose().eval();
+  }
+};
+
+} // end namespace internal
+
+/** This is the "in place" version of transpose(): it replaces \c *this by its own transpose.
+  * Thus, doing
+  * \code
+  * m.transposeInPlace();
+  * \endcode
+  * has the same effect on m as doing
+  * \code
+  * m = m.transpose().eval();
+  * \endcode
+  * and is faster and also safer because in the latter line of code, forgetting the eval() results
+  * in a bug caused by aliasing.
+  *
+  * Notice however that this method is only useful if you want to replace a matrix by its own transpose.
+  * If you just need the transpose of a matrix, use transpose().
+  *
+  * \note if the matrix is not square, then \c *this must be a resizable matrix.
+  *
+  * \sa transpose(), adjoint(), adjointInPlace() */
+template<typename Derived>
+inline void DenseBase<Derived>::transposeInPlace()
+{
+  internal::inplace_transpose_selector<Derived>::run(derived());
+}
+
+/***************************************************************************
+* "in place" adjoint implementation
+***************************************************************************/
+
+/** This is the "in place" version of adjoint(): it replaces \c *this by its own transpose.
+  * Thus, doing
+  * \code
+  * m.adjointInPlace();
+  * \endcode
+  * has the same effect on m as doing
+  * \code
+  * m = m.adjoint().eval();
+  * \endcode
+  * and is faster and also safer because in the latter line of code, forgetting the eval() results
+  * in a bug caused by aliasing.
+  *
+  * Notice however that this method is only useful if you want to replace a matrix by its own adjoint.
+  * If you just need the adjoint of a matrix, use adjoint().
+  *
+  * \note if the matrix is not square, then \c *this must be a resizable matrix.
+  *
+  * \sa transpose(), adjoint(), transposeInPlace() */
+template<typename Derived>
+inline void MatrixBase<Derived>::adjointInPlace()
+{
+  derived() = adjoint().eval();
+}
+
+#ifndef EIGEN_NO_DEBUG
+
+// The following is to detect aliasing problems in most common cases.
+
+namespace internal {
+
+template<typename BinOp,typename NestedXpr,typename Rhs>
+struct blas_traits<SelfCwiseBinaryOp<BinOp,NestedXpr,Rhs> >
+ : blas_traits<NestedXpr>
+{
+  typedef SelfCwiseBinaryOp<BinOp,NestedXpr,Rhs> XprType;
+  static inline const XprType extract(const XprType& x) { return x; }
+};
+
+template<bool DestIsTransposed, typename OtherDerived>
+struct check_transpose_aliasing_compile_time_selector
+{
+  enum { ret = bool(blas_traits<OtherDerived>::IsTransposed) != DestIsTransposed };
+};
+
+template<bool DestIsTransposed, typename BinOp, typename DerivedA, typename DerivedB>
+struct check_transpose_aliasing_compile_time_selector<DestIsTransposed,CwiseBinaryOp<BinOp,DerivedA,DerivedB> >
+{
+  enum { ret =    bool(blas_traits<DerivedA>::IsTransposed) != DestIsTransposed
+               || bool(blas_traits<DerivedB>::IsTransposed) != DestIsTransposed
+  };
+};
+
+template<typename Scalar, bool DestIsTransposed, typename OtherDerived>
+struct check_transpose_aliasing_run_time_selector
+{
+  static bool run(const Scalar* dest, const OtherDerived& src)
+  {
+    return (bool(blas_traits<OtherDerived>::IsTransposed) != DestIsTransposed) && (dest!=0 && dest==(Scalar*)extract_data(src));
+  }
+};
+
+template<typename Scalar, bool DestIsTransposed, typename BinOp, typename DerivedA, typename DerivedB>
+struct check_transpose_aliasing_run_time_selector<Scalar,DestIsTransposed,CwiseBinaryOp<BinOp,DerivedA,DerivedB> >
+{
+  static bool run(const Scalar* dest, const CwiseBinaryOp<BinOp,DerivedA,DerivedB>& src)
+  {
+    return ((blas_traits<DerivedA>::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(Scalar*)extract_data(src.lhs())))
+        || ((blas_traits<DerivedB>::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(Scalar*)extract_data(src.rhs())));
+  }
+};
+
+// the following selector, checkTransposeAliasing_impl, based on MightHaveTransposeAliasing,
+// is because when the condition controlling the assert is known at compile time, ICC emits a warning.
+// This is actually a good warning: in expressions that don't have any transposing, the condition is
+// known at compile time to be false, and using that, we can avoid generating the code of the assert again
+// and again for all these expressions that don't need it.
+
+template<typename Derived, typename OtherDerived,
+         bool MightHaveTransposeAliasing
+                 = check_transpose_aliasing_compile_time_selector
+                     <blas_traits<Derived>::IsTransposed,OtherDerived>::ret
+        >
+struct checkTransposeAliasing_impl
+{
+    static void run(const Derived& dst, const OtherDerived& other)
+    {
+        eigen_assert((!check_transpose_aliasing_run_time_selector
+                      <typename Derived::Scalar,blas_traits<Derived>::IsTransposed,OtherDerived>
+                      ::run(extract_data(dst), other))
+          && "aliasing detected during tranposition, use transposeInPlace() "
+             "or evaluate the rhs into a temporary using .eval()");
+
+    }
+};
+
+template<typename Derived, typename OtherDerived>
+struct checkTransposeAliasing_impl<Derived, OtherDerived, false>
+{
+    static void run(const Derived&, const OtherDerived&)
+    {
+    }
+};
+
+} // end namespace internal
+
+template<typename Derived>
+template<typename OtherDerived>
+void DenseBase<Derived>::checkTransposeAliasing(const OtherDerived& other) const
+{
+    internal::checkTransposeAliasing_impl<Derived, OtherDerived>::run(derived(), other);
+}
+#endif
+
+} // end namespace Eigen
+
+#endif // EIGEN_TRANSPOSE_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/Transpositions.h b/resources/3rdParty/eigen/Eigen/src/Core/Transpositions.h
new file mode 100644
index 000000000..2cd268a5f
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/Transpositions.h
@@ -0,0 +1,436 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_TRANSPOSITIONS_H
+#define EIGEN_TRANSPOSITIONS_H
+
+namespace Eigen { 
+
+/** \class Transpositions
+  * \ingroup Core_Module
+  *
+  * \brief Represents a sequence of transpositions (row/column interchange)
+  *
+  * \param SizeAtCompileTime the number of transpositions, or Dynamic
+  * \param MaxSizeAtCompileTime the maximum number of transpositions, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it.
+  *
+  * This class represents a permutation transformation as a sequence of \em n transpositions
+  * \f$[T_{n-1} \ldots T_{i} \ldots T_{0}]\f$. It is internally stored as a vector of integers \c indices.
+  * Each transposition \f$ T_{i} \f$ applied on the left of a matrix (\f$ T_{i} M\f$) interchanges
+  * the rows \c i and \c indices[i] of the matrix \c M.
+  * A transposition applied on the right (e.g., \f$ M T_{i}\f$) yields a column interchange.
+  *
+  * Compared to the class PermutationMatrix, such a sequence of transpositions is what is
+  * computed during a decomposition with pivoting, and it is faster when applying the permutation in-place.
+  * 
+  * To apply a sequence of transpositions to a matrix, simply use the operator * as in the following example:
+  * \code
+  * Transpositions tr;
+  * MatrixXf mat;
+  * mat = tr * mat;
+  * \endcode
+  * In this example, we detect that the matrix appears on both side, and so the transpositions
+  * are applied in-place without any temporary or extra copy.
+  *
+  * \sa class PermutationMatrix
+  */
+
+namespace internal {
+template<typename TranspositionType, typename MatrixType, int Side, bool Transposed=false> struct transposition_matrix_product_retval;
+}
+
+template<typename Derived>
+class TranspositionsBase
+{
+    typedef internal::traits<Derived> Traits;
+    
+  public:
+
+    typedef typename Traits::IndicesType IndicesType;
+    typedef typename IndicesType::Scalar Index;
+
+    Derived& derived() { return *static_cast<Derived*>(this); }
+    const Derived& derived() const { return *static_cast<const Derived*>(this); }
+
+    /** Copies the \a other transpositions into \c *this */
+    template<typename OtherDerived>
+    Derived& operator=(const TranspositionsBase<OtherDerived>& other)
+    {
+      indices() = other.indices();
+      return derived();
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** This is a special case of the templated operator=. Its purpose is to
+      * prevent a default operator= from hiding the templated operator=.
+      */
+    Derived& operator=(const TranspositionsBase& other)
+    {
+      indices() = other.indices();
+      return derived();
+    }
+    #endif
+
+    /** \returns the number of transpositions */
+    inline Index size() const { return indices().size(); }
+
+    /** Direct access to the underlying index vector */
+    inline const Index& coeff(Index i) const { return indices().coeff(i); }
+    /** Direct access to the underlying index vector */
+    inline Index& coeffRef(Index i) { return indices().coeffRef(i); }
+    /** Direct access to the underlying index vector */
+    inline const Index& operator()(Index i) const { return indices()(i); }
+    /** Direct access to the underlying index vector */
+    inline Index& operator()(Index i) { return indices()(i); }
+    /** Direct access to the underlying index vector */
+    inline const Index& operator[](Index i) const { return indices()(i); }
+    /** Direct access to the underlying index vector */
+    inline Index& operator[](Index i) { return indices()(i); }
+
+    /** const version of indices(). */
+    const IndicesType& indices() const { return derived().indices(); }
+    /** \returns a reference to the stored array representing the transpositions. */
+    IndicesType& indices() { return derived().indices(); }
+
+    /** Resizes to given size. */
+    inline void resize(int size)
+    {
+      indices().resize(size);
+    }
+
+    /** Sets \c *this to represents an identity transformation */
+    void setIdentity()
+    {
+      for(int i = 0; i < indices().size(); ++i)
+        coeffRef(i) = i;
+    }
+
+    // FIXME: do we want such methods ?
+    // might be usefull when the target matrix expression is complex, e.g.:
+    // object.matrix().block(..,..,..,..) = trans * object.matrix().block(..,..,..,..);
+    /*
+    template<typename MatrixType>
+    void applyForwardToRows(MatrixType& mat) const
+    {
+      for(Index k=0 ; k<size() ; ++k)
+        if(m_indices(k)!=k)
+          mat.row(k).swap(mat.row(m_indices(k)));
+    }
+
+    template<typename MatrixType>
+    void applyBackwardToRows(MatrixType& mat) const
+    {
+      for(Index k=size()-1 ; k>=0 ; --k)
+        if(m_indices(k)!=k)
+          mat.row(k).swap(mat.row(m_indices(k)));
+    }
+    */
+
+    /** \returns the inverse transformation */
+    inline Transpose<TranspositionsBase> inverse() const
+    { return Transpose<TranspositionsBase>(derived()); }
+
+    /** \returns the tranpose transformation */
+    inline Transpose<TranspositionsBase> transpose() const
+    { return Transpose<TranspositionsBase>(derived()); }
+
+  protected:
+};
+
+namespace internal {
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType>
+struct traits<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType> >
+{
+  typedef IndexType Index;
+  typedef Matrix<Index, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;
+};
+}
+
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType>
+class Transpositions : public TranspositionsBase<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType> >
+{
+    typedef internal::traits<Transpositions> Traits;
+  public:
+
+    typedef TranspositionsBase<Transpositions> Base;
+    typedef typename Traits::IndicesType IndicesType;
+    typedef typename IndicesType::Scalar Index;
+
+    inline Transpositions() {}
+
+    /** Copy constructor. */
+    template<typename OtherDerived>
+    inline Transpositions(const TranspositionsBase<OtherDerived>& other)
+      : m_indices(other.indices()) {}
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** Standard copy constructor. Defined only to prevent a default copy constructor
+      * from hiding the other templated constructor */
+    inline Transpositions(const Transpositions& other) : m_indices(other.indices()) {}
+    #endif
+
+    /** Generic constructor from expression of the transposition indices. */
+    template<typename Other>
+    explicit inline Transpositions(const MatrixBase<Other>& indices) : m_indices(indices)
+    {}
+
+    /** Copies the \a other transpositions into \c *this */
+    template<typename OtherDerived>
+    Transpositions& operator=(const TranspositionsBase<OtherDerived>& other)
+    {
+      return Base::operator=(other);
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** This is a special case of the templated operator=. Its purpose is to
+      * prevent a default operator= from hiding the templated operator=.
+      */
+    Transpositions& operator=(const Transpositions& other)
+    {
+      m_indices = other.m_indices;
+      return *this;
+    }
+    #endif
+
+    /** Constructs an uninitialized permutation matrix of given size.
+      */
+    inline Transpositions(Index size) : m_indices(size)
+    {}
+
+    /** const version of indices(). */
+    const IndicesType& indices() const { return m_indices; }
+    /** \returns a reference to the stored array representing the transpositions. */
+    IndicesType& indices() { return m_indices; }
+
+  protected:
+
+    IndicesType m_indices;
+};
+
+
+namespace internal {
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType, int _PacketAccess>
+struct traits<Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType>,_PacketAccess> >
+{
+  typedef IndexType Index;
+  typedef Map<const Matrix<Index,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1>, _PacketAccess> IndicesType;
+};
+}
+
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType, int PacketAccess>
+class Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType>,PacketAccess>
+ : public TranspositionsBase<Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType>,PacketAccess> >
+{
+    typedef internal::traits<Map> Traits;
+  public:
+
+    typedef TranspositionsBase<Map> Base;
+    typedef typename Traits::IndicesType IndicesType;
+    typedef typename IndicesType::Scalar Index;
+
+    inline Map(const Index* indices)
+      : m_indices(indices)
+    {}
+
+    inline Map(const Index* indices, Index size)
+      : m_indices(indices,size)
+    {}
+
+    /** Copies the \a other transpositions into \c *this */
+    template<typename OtherDerived>
+    Map& operator=(const TranspositionsBase<OtherDerived>& other)
+    {
+      return Base::operator=(other);
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** This is a special case of the templated operator=. Its purpose is to
+      * prevent a default operator= from hiding the templated operator=.
+      */
+    Map& operator=(const Map& other)
+    {
+      m_indices = other.m_indices;
+      return *this;
+    }
+    #endif
+
+    /** const version of indices(). */
+    const IndicesType& indices() const { return m_indices; }
+    
+    /** \returns a reference to the stored array representing the transpositions. */
+    IndicesType& indices() { return m_indices; }
+
+  protected:
+
+    IndicesType m_indices;
+};
+
+namespace internal {
+template<typename _IndicesType>
+struct traits<TranspositionsWrapper<_IndicesType> >
+{
+  typedef typename _IndicesType::Scalar Index;
+  typedef _IndicesType IndicesType;
+};
+}
+
+template<typename _IndicesType>
+class TranspositionsWrapper
+ : public TranspositionsBase<TranspositionsWrapper<_IndicesType> >
+{
+    typedef internal::traits<TranspositionsWrapper> Traits;
+  public:
+
+    typedef TranspositionsBase<TranspositionsWrapper> Base;
+    typedef typename Traits::IndicesType IndicesType;
+    typedef typename IndicesType::Scalar Index;
+
+    inline TranspositionsWrapper(IndicesType& indices)
+      : m_indices(indices)
+    {}
+
+    /** Copies the \a other transpositions into \c *this */
+    template<typename OtherDerived>
+    TranspositionsWrapper& operator=(const TranspositionsBase<OtherDerived>& other)
+    {
+      return Base::operator=(other);
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** This is a special case of the templated operator=. Its purpose is to
+      * prevent a default operator= from hiding the templated operator=.
+      */
+    TranspositionsWrapper& operator=(const TranspositionsWrapper& other)
+    {
+      m_indices = other.m_indices;
+      return *this;
+    }
+    #endif
+
+    /** const version of indices(). */
+    const IndicesType& indices() const { return m_indices; }
+
+    /** \returns a reference to the stored array representing the transpositions. */
+    IndicesType& indices() { return m_indices; }
+
+  protected:
+
+    const typename IndicesType::Nested m_indices;
+};
+
+/** \returns the \a matrix with the \a transpositions applied to the columns.
+  */
+template<typename Derived, typename TranspositionsDerived>
+inline const internal::transposition_matrix_product_retval<TranspositionsDerived, Derived, OnTheRight>
+operator*(const MatrixBase<Derived>& matrix,
+          const TranspositionsBase<TranspositionsDerived> &transpositions)
+{
+  return internal::transposition_matrix_product_retval
+           <TranspositionsDerived, Derived, OnTheRight>
+           (transpositions.derived(), matrix.derived());
+}
+
+/** \returns the \a matrix with the \a transpositions applied to the rows.
+  */
+template<typename Derived, typename TranspositionDerived>
+inline const internal::transposition_matrix_product_retval
+               <TranspositionDerived, Derived, OnTheLeft>
+operator*(const TranspositionsBase<TranspositionDerived> &transpositions,
+          const MatrixBase<Derived>& matrix)
+{
+  return internal::transposition_matrix_product_retval
+           <TranspositionDerived, Derived, OnTheLeft>
+           (transpositions.derived(), matrix.derived());
+}
+
+namespace internal {
+
+template<typename TranspositionType, typename MatrixType, int Side, bool Transposed>
+struct traits<transposition_matrix_product_retval<TranspositionType, MatrixType, Side, Transposed> >
+{
+  typedef typename MatrixType::PlainObject ReturnType;
+};
+
+template<typename TranspositionType, typename MatrixType, int Side, bool Transposed>
+struct transposition_matrix_product_retval
+ : public ReturnByValue<transposition_matrix_product_retval<TranspositionType, MatrixType, Side, Transposed> >
+{
+    typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
+    typedef typename TranspositionType::Index Index;
+
+    transposition_matrix_product_retval(const TranspositionType& tr, const MatrixType& matrix)
+      : m_transpositions(tr), m_matrix(matrix)
+    {}
+
+    inline int rows() const { return m_matrix.rows(); }
+    inline int cols() const { return m_matrix.cols(); }
+
+    template<typename Dest> inline void evalTo(Dest& dst) const
+    {
+      const int size = m_transpositions.size();
+      Index j = 0;
+
+      if(!(is_same<MatrixTypeNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_matrix)))
+        dst = m_matrix;
+
+      for(int k=(Transposed?size-1:0) ; Transposed?k>=0:k<size ; Transposed?--k:++k)
+        if((j=m_transpositions.coeff(k))!=k)
+        {
+          if(Side==OnTheLeft)
+            dst.row(k).swap(dst.row(j));
+          else if(Side==OnTheRight)
+            dst.col(k).swap(dst.col(j));
+        }
+    }
+
+  protected:
+    const TranspositionType& m_transpositions;
+    typename MatrixType::Nested m_matrix;
+};
+
+} // end namespace internal
+
+/* Template partial specialization for transposed/inverse transpositions */
+
+template<typename TranspositionsDerived>
+class Transpose<TranspositionsBase<TranspositionsDerived> >
+{
+    typedef TranspositionsDerived TranspositionType;
+    typedef typename TranspositionType::IndicesType IndicesType;
+  public:
+
+    Transpose(const TranspositionType& t) : m_transpositions(t) {}
+
+    inline int size() const { return m_transpositions.size(); }
+
+    /** \returns the \a matrix with the inverse transpositions applied to the columns.
+      */
+    template<typename Derived> friend
+    inline const internal::transposition_matrix_product_retval<TranspositionType, Derived, OnTheRight, true>
+    operator*(const MatrixBase<Derived>& matrix, const Transpose& trt)
+    {
+      return internal::transposition_matrix_product_retval<TranspositionType, Derived, OnTheRight, true>(trt.m_transpositions, matrix.derived());
+    }
+
+    /** \returns the \a matrix with the inverse transpositions applied to the rows.
+      */
+    template<typename Derived>
+    inline const internal::transposition_matrix_product_retval<TranspositionType, Derived, OnTheLeft, true>
+    operator*(const MatrixBase<Derived>& matrix) const
+    {
+      return internal::transposition_matrix_product_retval<TranspositionType, Derived, OnTheLeft, true>(m_transpositions, matrix.derived());
+    }
+
+  protected:
+    const TranspositionType& m_transpositions;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_TRANSPOSITIONS_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/TriangularMatrix.h b/resources/3rdParty/eigen/Eigen/src/Core/TriangularMatrix.h
new file mode 100644
index 000000000..301b0ef24
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/TriangularMatrix.h
@@ -0,0 +1,828 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_TRIANGULARMATRIX_H
+#define EIGEN_TRIANGULARMATRIX_H
+
+namespace Eigen { 
+
+namespace internal {
+  
+template<int Side, typename TriangularType, typename Rhs> struct triangular_solve_retval;
+  
+}
+
+/** \internal
+  *
+  * \class TriangularBase
+  * \ingroup Core_Module
+  *
+  * \brief Base class for triangular part in a matrix
+  */
+template<typename Derived> class TriangularBase : public EigenBase<Derived>
+{
+  public:
+
+    enum {
+      Mode = internal::traits<Derived>::Mode,
+      CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
+      RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
+      ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
+      MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime
+    };
+    typedef typename internal::traits<Derived>::Scalar Scalar;
+    typedef typename internal::traits<Derived>::StorageKind StorageKind;
+    typedef typename internal::traits<Derived>::Index Index;
+    typedef typename internal::traits<Derived>::DenseMatrixType DenseMatrixType;
+    typedef DenseMatrixType DenseType;
+
+    inline TriangularBase() { eigen_assert(!((Mode&UnitDiag) && (Mode&ZeroDiag))); }
+
+    inline Index rows() const { return derived().rows(); }
+    inline Index cols() const { return derived().cols(); }
+    inline Index outerStride() const { return derived().outerStride(); }
+    inline Index innerStride() const { return derived().innerStride(); }
+
+    inline Scalar coeff(Index row, Index col) const  { return derived().coeff(row,col); }
+    inline Scalar& coeffRef(Index row, Index col) { return derived().coeffRef(row,col); }
+
+    /** \see MatrixBase::copyCoeff(row,col)
+      */
+    template<typename Other>
+    EIGEN_STRONG_INLINE void copyCoeff(Index row, Index col, Other& other)
+    {
+      derived().coeffRef(row, col) = other.coeff(row, col);
+    }
+
+    inline Scalar operator()(Index row, Index col) const
+    {
+      check_coordinates(row, col);
+      return coeff(row,col);
+    }
+    inline Scalar& operator()(Index row, Index col)
+    {
+      check_coordinates(row, col);
+      return coeffRef(row,col);
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
+    inline Derived& derived() { return *static_cast<Derived*>(this); }
+    #endif // not EIGEN_PARSED_BY_DOXYGEN
+
+    template<typename DenseDerived>
+    void evalTo(MatrixBase<DenseDerived> &other) const;
+    template<typename DenseDerived>
+    void evalToLazy(MatrixBase<DenseDerived> &other) const;
+
+    DenseMatrixType toDenseMatrix() const
+    {
+      DenseMatrixType res(rows(), cols());
+      evalToLazy(res);
+      return res;
+    }
+
+  protected:
+
+    void check_coordinates(Index row, Index col) const
+    {
+      EIGEN_ONLY_USED_FOR_DEBUG(row);
+      EIGEN_ONLY_USED_FOR_DEBUG(col);
+      eigen_assert(col>=0 && col<cols() && row>=0 && row<rows());
+      const int mode = int(Mode) & ~SelfAdjoint;
+      EIGEN_ONLY_USED_FOR_DEBUG(mode);
+      eigen_assert((mode==Upper && col>=row)
+                || (mode==Lower && col<=row)
+                || ((mode==StrictlyUpper || mode==UnitUpper) && col>row)
+                || ((mode==StrictlyLower || mode==UnitLower) && col<row));
+    }
+
+    #ifdef EIGEN_INTERNAL_DEBUGGING
+    void check_coordinates_internal(Index row, Index col) const
+    {
+      check_coordinates(row, col);
+    }
+    #else
+    void check_coordinates_internal(Index , Index ) const {}
+    #endif
+
+};
+
+/** \class TriangularView
+  * \ingroup Core_Module
+  *
+  * \brief Base class for triangular part in a matrix
+  *
+  * \param MatrixType the type of the object in which we are taking the triangular part
+  * \param Mode the kind of triangular matrix expression to construct. Can be #Upper,
+  *             #Lower, #UnitUpper, #UnitLower, #StrictlyUpper, or #StrictlyLower.
+  *             This is in fact a bit field; it must have either #Upper or #Lower, 
+  *             and additionnaly it may have #UnitDiag or #ZeroDiag or neither.
+  *
+  * This class represents a triangular part of a matrix, not necessarily square. Strictly speaking, for rectangular
+  * matrices one should speak of "trapezoid" parts. This class is the return type
+  * of MatrixBase::triangularView() and most of the time this is the only way it is used.
+  *
+  * \sa MatrixBase::triangularView()
+  */
+namespace internal {
+template<typename MatrixType, unsigned int _Mode>
+struct traits<TriangularView<MatrixType, _Mode> > : traits<MatrixType>
+{
+  typedef typename nested<MatrixType>::type MatrixTypeNested;
+  typedef typename remove_reference<MatrixTypeNested>::type MatrixTypeNestedNonRef;
+  typedef typename remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;
+  typedef MatrixType ExpressionType;
+  typedef typename MatrixType::PlainObject DenseMatrixType;
+  enum {
+    Mode = _Mode,
+    Flags = (MatrixTypeNestedCleaned::Flags & (HereditaryBits) & (~(PacketAccessBit | DirectAccessBit | LinearAccessBit))) | Mode,
+    CoeffReadCost = MatrixTypeNestedCleaned::CoeffReadCost
+  };
+};
+}
+
+template<int Mode, bool LhsIsTriangular,
+         typename Lhs, bool LhsIsVector,
+         typename Rhs, bool RhsIsVector>
+struct TriangularProduct;
+
+template<typename _MatrixType, unsigned int _Mode> class TriangularView
+  : public TriangularBase<TriangularView<_MatrixType, _Mode> >
+{
+  public:
+
+    typedef TriangularBase<TriangularView> Base;
+    typedef typename internal::traits<TriangularView>::Scalar Scalar;
+
+    typedef _MatrixType MatrixType;
+    typedef typename internal::traits<TriangularView>::DenseMatrixType DenseMatrixType;
+    typedef DenseMatrixType PlainObject;
+
+  protected:
+    typedef typename internal::traits<TriangularView>::MatrixTypeNested MatrixTypeNested;
+    typedef typename internal::traits<TriangularView>::MatrixTypeNestedNonRef MatrixTypeNestedNonRef;
+    typedef typename internal::traits<TriangularView>::MatrixTypeNestedCleaned MatrixTypeNestedCleaned;
+
+    typedef typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type MatrixConjugateReturnType;
+    
+  public:
+    using Base::evalToLazy;
+  
+
+    typedef typename internal::traits<TriangularView>::StorageKind StorageKind;
+    typedef typename internal::traits<TriangularView>::Index Index;
+
+    enum {
+      Mode = _Mode,
+      TransposeMode = (Mode & Upper ? Lower : 0)
+                    | (Mode & Lower ? Upper : 0)
+                    | (Mode & (UnitDiag))
+                    | (Mode & (ZeroDiag))
+    };
+
+    inline TriangularView(const MatrixType& matrix) : m_matrix(matrix)
+    {}
+
+    inline Index rows() const { return m_matrix.rows(); }
+    inline Index cols() const { return m_matrix.cols(); }
+    inline Index outerStride() const { return m_matrix.outerStride(); }
+    inline Index innerStride() const { return m_matrix.innerStride(); }
+
+    /** \sa MatrixBase::operator+=() */
+    template<typename Other> TriangularView&  operator+=(const DenseBase<Other>& other) { return *this = m_matrix + other.derived(); }
+    /** \sa MatrixBase::operator-=() */
+    template<typename Other> TriangularView&  operator-=(const DenseBase<Other>& other) { return *this = m_matrix - other.derived(); }
+    /** \sa MatrixBase::operator*=() */
+    TriangularView&  operator*=(const typename internal::traits<MatrixType>::Scalar& other) { return *this = m_matrix * other; }
+    /** \sa MatrixBase::operator/=() */
+    TriangularView&  operator/=(const typename internal::traits<MatrixType>::Scalar& other) { return *this = m_matrix / other; }
+
+    /** \sa MatrixBase::fill() */
+    void fill(const Scalar& value) { setConstant(value); }
+    /** \sa MatrixBase::setConstant() */
+    TriangularView& setConstant(const Scalar& value)
+    { return *this = MatrixType::Constant(rows(), cols(), value); }
+    /** \sa MatrixBase::setZero() */
+    TriangularView& setZero() { return setConstant(Scalar(0)); }
+    /** \sa MatrixBase::setOnes() */
+    TriangularView& setOnes() { return setConstant(Scalar(1)); }
+
+    /** \sa MatrixBase::coeff()
+      * \warning the coordinates must fit into the referenced triangular part
+      */
+    inline Scalar coeff(Index row, Index col) const
+    {
+      Base::check_coordinates_internal(row, col);
+      return m_matrix.coeff(row, col);
+    }
+
+    /** \sa MatrixBase::coeffRef()
+      * \warning the coordinates must fit into the referenced triangular part
+      */
+    inline Scalar& coeffRef(Index row, Index col)
+    {
+      Base::check_coordinates_internal(row, col);
+      return m_matrix.const_cast_derived().coeffRef(row, col);
+    }
+
+    const MatrixTypeNestedCleaned& nestedExpression() const { return m_matrix; }
+    MatrixTypeNestedCleaned& nestedExpression() { return *const_cast<MatrixTypeNestedCleaned*>(&m_matrix); }
+
+    /** Assigns a triangular matrix to a triangular part of a dense matrix */
+    template<typename OtherDerived>
+    TriangularView& operator=(const TriangularBase<OtherDerived>& other);
+
+    template<typename OtherDerived>
+    TriangularView& operator=(const MatrixBase<OtherDerived>& other);
+
+    TriangularView& operator=(const TriangularView& other)
+    { return *this = other.nestedExpression(); }
+
+    template<typename OtherDerived>
+    void lazyAssign(const TriangularBase<OtherDerived>& other);
+
+    template<typename OtherDerived>
+    void lazyAssign(const MatrixBase<OtherDerived>& other);
+
+    /** \sa MatrixBase::conjugate() */
+    inline TriangularView<MatrixConjugateReturnType,Mode> conjugate()
+    { return m_matrix.conjugate(); }
+    /** \sa MatrixBase::conjugate() const */
+    inline const TriangularView<MatrixConjugateReturnType,Mode> conjugate() const
+    { return m_matrix.conjugate(); }
+
+    /** \sa MatrixBase::adjoint() const */
+    inline const TriangularView<const typename MatrixType::AdjointReturnType,TransposeMode> adjoint() const
+    { return m_matrix.adjoint(); }
+
+    /** \sa MatrixBase::transpose() */
+    inline TriangularView<Transpose<MatrixType>,TransposeMode> transpose()
+    {
+      EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
+      return m_matrix.const_cast_derived().transpose();
+    }
+    /** \sa MatrixBase::transpose() const */
+    inline const TriangularView<Transpose<MatrixType>,TransposeMode> transpose() const
+    {
+      return m_matrix.transpose();
+    }
+
+    /** Efficient triangular matrix times vector/matrix product */
+    template<typename OtherDerived>
+    TriangularProduct<Mode,true,MatrixType,false,OtherDerived, OtherDerived::IsVectorAtCompileTime>
+    operator*(const MatrixBase<OtherDerived>& rhs) const
+    {
+      return TriangularProduct
+              <Mode,true,MatrixType,false,OtherDerived,OtherDerived::IsVectorAtCompileTime>
+              (m_matrix, rhs.derived());
+    }
+
+    /** Efficient vector/matrix times triangular matrix product */
+    template<typename OtherDerived> friend
+    TriangularProduct<Mode,false,OtherDerived,OtherDerived::IsVectorAtCompileTime,MatrixType,false>
+    operator*(const MatrixBase<OtherDerived>& lhs, const TriangularView& rhs)
+    {
+      return TriangularProduct
+              <Mode,false,OtherDerived,OtherDerived::IsVectorAtCompileTime,MatrixType,false>
+              (lhs.derived(),rhs.m_matrix);
+    }
+
+    #ifdef EIGEN2_SUPPORT
+    template<typename OtherDerived>
+    struct eigen2_product_return_type
+    {
+      typedef typename TriangularView<MatrixType,Mode>::DenseMatrixType DenseMatrixType;
+      typedef typename OtherDerived::PlainObject::DenseType OtherPlainObject;
+      typedef typename ProductReturnType<DenseMatrixType, OtherPlainObject>::Type ProdRetType;
+      typedef typename ProdRetType::PlainObject type;
+    };
+    template<typename OtherDerived>
+    const typename eigen2_product_return_type<OtherDerived>::type
+    operator*(const EigenBase<OtherDerived>& rhs) const
+    {
+      typename OtherDerived::PlainObject::DenseType rhsPlainObject;
+      rhs.evalTo(rhsPlainObject);
+      return this->toDenseMatrix() * rhsPlainObject;
+    }
+    template<typename OtherMatrixType>
+    bool isApprox(const TriangularView<OtherMatrixType, Mode>& other, typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision()) const
+    {
+      return this->toDenseMatrix().isApprox(other.toDenseMatrix(), precision);
+    }
+    template<typename OtherDerived>
+    bool isApprox(const MatrixBase<OtherDerived>& other, typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision()) const
+    {
+      return this->toDenseMatrix().isApprox(other, precision);
+    }
+    #endif // EIGEN2_SUPPORT
+
+    template<int Side, typename Other>
+    inline const internal::triangular_solve_retval<Side,TriangularView, Other>
+    solve(const MatrixBase<Other>& other) const;
+
+    template<int Side, typename OtherDerived>
+    void solveInPlace(const MatrixBase<OtherDerived>& other) const;
+
+    template<typename Other>
+    inline const internal::triangular_solve_retval<OnTheLeft,TriangularView, Other> 
+    solve(const MatrixBase<Other>& other) const
+    { return solve<OnTheLeft>(other); }
+
+    template<typename OtherDerived>
+    void solveInPlace(const MatrixBase<OtherDerived>& other) const
+    { return solveInPlace<OnTheLeft>(other); }
+
+    const SelfAdjointView<MatrixTypeNestedNonRef,Mode> selfadjointView() const
+    {
+      EIGEN_STATIC_ASSERT((Mode&UnitDiag)==0,PROGRAMMING_ERROR);
+      return SelfAdjointView<MatrixTypeNestedNonRef,Mode>(m_matrix);
+    }
+    SelfAdjointView<MatrixTypeNestedNonRef,Mode> selfadjointView()
+    {
+      EIGEN_STATIC_ASSERT((Mode&UnitDiag)==0,PROGRAMMING_ERROR);
+      return SelfAdjointView<MatrixTypeNestedNonRef,Mode>(m_matrix);
+    }
+
+    template<typename OtherDerived>
+    void swap(TriangularBase<OtherDerived> const & other)
+    {
+      TriangularView<SwapWrapper<MatrixType>,Mode>(const_cast<MatrixType&>(m_matrix)).lazyAssign(other.derived());
+    }
+
+    template<typename OtherDerived>
+    void swap(MatrixBase<OtherDerived> const & other)
+    {
+      SwapWrapper<MatrixType> swaper(const_cast<MatrixType&>(m_matrix));
+      TriangularView<SwapWrapper<MatrixType>,Mode>(swaper).lazyAssign(other.derived());
+    }
+
+    Scalar determinant() const
+    {
+      if (Mode & UnitDiag)
+        return 1;
+      else if (Mode & ZeroDiag)
+        return 0;
+      else
+        return m_matrix.diagonal().prod();
+    }
+    
+    // TODO simplify the following:
+    template<typename ProductDerived, typename Lhs, typename Rhs>
+    EIGEN_STRONG_INLINE TriangularView& operator=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
+    {
+      setZero();
+      return assignProduct(other,1);
+    }
+    
+    template<typename ProductDerived, typename Lhs, typename Rhs>
+    EIGEN_STRONG_INLINE TriangularView& operator+=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
+    {
+      return assignProduct(other,1);
+    }
+    
+    template<typename ProductDerived, typename Lhs, typename Rhs>
+    EIGEN_STRONG_INLINE TriangularView& operator-=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
+    {
+      return assignProduct(other,-1);
+    }
+    
+    
+    template<typename ProductDerived>
+    EIGEN_STRONG_INLINE TriangularView& operator=(const ScaledProduct<ProductDerived>& other)
+    {
+      setZero();
+      return assignProduct(other,other.alpha());
+    }
+    
+    template<typename ProductDerived>
+    EIGEN_STRONG_INLINE TriangularView& operator+=(const ScaledProduct<ProductDerived>& other)
+    {
+      return assignProduct(other,other.alpha());
+    }
+    
+    template<typename ProductDerived>
+    EIGEN_STRONG_INLINE TriangularView& operator-=(const ScaledProduct<ProductDerived>& other)
+    {
+      return assignProduct(other,-other.alpha());
+    }
+    
+  protected:
+    
+    template<typename ProductDerived, typename Lhs, typename Rhs>
+    EIGEN_STRONG_INLINE TriangularView& assignProduct(const ProductBase<ProductDerived, Lhs,Rhs>& prod, const Scalar& alpha);
+
+    MatrixTypeNested m_matrix;
+};
+
+/***************************************************************************
+* Implementation of triangular evaluation/assignment
+***************************************************************************/
+
+namespace internal {
+
+template<typename Derived1, typename Derived2, unsigned int Mode, int UnrollCount, bool ClearOpposite>
+struct triangular_assignment_selector
+{
+  enum {
+    col = (UnrollCount-1) / Derived1::RowsAtCompileTime,
+    row = (UnrollCount-1) % Derived1::RowsAtCompileTime
+  };
+  
+  typedef typename Derived1::Scalar Scalar;
+
+  static inline void run(Derived1 &dst, const Derived2 &src)
+  {
+    triangular_assignment_selector<Derived1, Derived2, Mode, UnrollCount-1, ClearOpposite>::run(dst, src);
+
+    eigen_assert( Mode == Upper || Mode == Lower
+            || Mode == StrictlyUpper || Mode == StrictlyLower
+            || Mode == UnitUpper || Mode == UnitLower);
+    if((Mode == Upper && row <= col)
+    || (Mode == Lower && row >= col)
+    || (Mode == StrictlyUpper && row < col)
+    || (Mode == StrictlyLower && row > col)
+    || (Mode == UnitUpper && row < col)
+    || (Mode == UnitLower && row > col))
+      dst.copyCoeff(row, col, src);
+    else if(ClearOpposite)
+    {
+      if (Mode&UnitDiag && row==col)
+        dst.coeffRef(row, col) = Scalar(1);
+      else
+        dst.coeffRef(row, col) = Scalar(0);
+    }
+  }
+};
+
+// prevent buggy user code from causing an infinite recursion
+template<typename Derived1, typename Derived2, unsigned int Mode, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, Mode, 0, ClearOpposite>
+{
+  static inline void run(Derived1 &, const Derived2 &) {}
+};
+
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, Upper, Dynamic, ClearOpposite>
+{
+  typedef typename Derived1::Index Index;
+  typedef typename Derived1::Scalar Scalar;
+  static inline void run(Derived1 &dst, const Derived2 &src)
+  {
+    for(Index j = 0; j < dst.cols(); ++j)
+    {
+      Index maxi = (std::min)(j, dst.rows()-1);
+      for(Index i = 0; i <= maxi; ++i)
+        dst.copyCoeff(i, j, src);
+      if (ClearOpposite)
+        for(Index i = maxi+1; i < dst.rows(); ++i)
+          dst.coeffRef(i, j) = Scalar(0);
+    }
+  }
+};
+
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, Lower, Dynamic, ClearOpposite>
+{
+  typedef typename Derived1::Index Index;
+  static inline void run(Derived1 &dst, const Derived2 &src)
+  {
+    for(Index j = 0; j < dst.cols(); ++j)
+    {
+      for(Index i = j; i < dst.rows(); ++i)
+        dst.copyCoeff(i, j, src);
+      Index maxi = (std::min)(j, dst.rows());
+      if (ClearOpposite)
+        for(Index i = 0; i < maxi; ++i)
+          dst.coeffRef(i, j) = static_cast<typename Derived1::Scalar>(0);
+    }
+  }
+};
+
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, StrictlyUpper, Dynamic, ClearOpposite>
+{
+  typedef typename Derived1::Index Index;
+  typedef typename Derived1::Scalar Scalar;
+  static inline void run(Derived1 &dst, const Derived2 &src)
+  {
+    for(Index j = 0; j < dst.cols(); ++j)
+    {
+      Index maxi = (std::min)(j, dst.rows());
+      for(Index i = 0; i < maxi; ++i)
+        dst.copyCoeff(i, j, src);
+      if (ClearOpposite)
+        for(Index i = maxi; i < dst.rows(); ++i)
+          dst.coeffRef(i, j) = Scalar(0);
+    }
+  }
+};
+
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, StrictlyLower, Dynamic, ClearOpposite>
+{
+  typedef typename Derived1::Index Index;
+  static inline void run(Derived1 &dst, const Derived2 &src)
+  {
+    for(Index j = 0; j < dst.cols(); ++j)
+    {
+      for(Index i = j+1; i < dst.rows(); ++i)
+        dst.copyCoeff(i, j, src);
+      Index maxi = (std::min)(j, dst.rows()-1);
+      if (ClearOpposite)
+        for(Index i = 0; i <= maxi; ++i)
+          dst.coeffRef(i, j) = static_cast<typename Derived1::Scalar>(0);
+    }
+  }
+};
+
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, UnitUpper, Dynamic, ClearOpposite>
+{
+  typedef typename Derived1::Index Index;
+  static inline void run(Derived1 &dst, const Derived2 &src)
+  {
+    for(Index j = 0; j < dst.cols(); ++j)
+    {
+      Index maxi = (std::min)(j, dst.rows());
+      for(Index i = 0; i < maxi; ++i)
+        dst.copyCoeff(i, j, src);
+      if (ClearOpposite)
+      {
+        for(Index i = maxi+1; i < dst.rows(); ++i)
+          dst.coeffRef(i, j) = 0;
+      }
+    }
+    dst.diagonal().setOnes();
+  }
+};
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, UnitLower, Dynamic, ClearOpposite>
+{
+  typedef typename Derived1::Index Index;
+  static inline void run(Derived1 &dst, const Derived2 &src)
+  {
+    for(Index j = 0; j < dst.cols(); ++j)
+    {
+      Index maxi = (std::min)(j, dst.rows());
+      for(Index i = maxi+1; i < dst.rows(); ++i)
+        dst.copyCoeff(i, j, src);
+      if (ClearOpposite)
+      {
+        for(Index i = 0; i < maxi; ++i)
+          dst.coeffRef(i, j) = 0;
+      }
+    }
+    dst.diagonal().setOnes();
+  }
+};
+
+} // end namespace internal
+
+// FIXME should we keep that possibility
+template<typename MatrixType, unsigned int Mode>
+template<typename OtherDerived>
+inline TriangularView<MatrixType, Mode>&
+TriangularView<MatrixType, Mode>::operator=(const MatrixBase<OtherDerived>& other)
+{
+  if(OtherDerived::Flags & EvalBeforeAssigningBit)
+  {
+    typename internal::plain_matrix_type<OtherDerived>::type other_evaluated(other.rows(), other.cols());
+    other_evaluated.template triangularView<Mode>().lazyAssign(other.derived());
+    lazyAssign(other_evaluated);
+  }
+  else
+    lazyAssign(other.derived());
+  return *this;
+}
+
+// FIXME should we keep that possibility
+template<typename MatrixType, unsigned int Mode>
+template<typename OtherDerived>
+void TriangularView<MatrixType, Mode>::lazyAssign(const MatrixBase<OtherDerived>& other)
+{
+  enum {
+    unroll = MatrixType::SizeAtCompileTime != Dynamic
+          && internal::traits<OtherDerived>::CoeffReadCost != Dynamic
+          && MatrixType::SizeAtCompileTime*internal::traits<OtherDerived>::CoeffReadCost/2 <= EIGEN_UNROLLING_LIMIT
+  };
+  eigen_assert(m_matrix.rows() == other.rows() && m_matrix.cols() == other.cols());
+
+  internal::triangular_assignment_selector
+    <MatrixType, OtherDerived, int(Mode),
+    unroll ? int(MatrixType::SizeAtCompileTime) : Dynamic,
+    false // do not change the opposite triangular part
+    >::run(m_matrix.const_cast_derived(), other.derived());
+}
+
+
+
+template<typename MatrixType, unsigned int Mode>
+template<typename OtherDerived>
+inline TriangularView<MatrixType, Mode>&
+TriangularView<MatrixType, Mode>::operator=(const TriangularBase<OtherDerived>& other)
+{
+  eigen_assert(Mode == int(OtherDerived::Mode));
+  if(internal::traits<OtherDerived>::Flags & EvalBeforeAssigningBit)
+  {
+    typename OtherDerived::DenseMatrixType other_evaluated(other.rows(), other.cols());
+    other_evaluated.template triangularView<Mode>().lazyAssign(other.derived().nestedExpression());
+    lazyAssign(other_evaluated);
+  }
+  else
+    lazyAssign(other.derived().nestedExpression());
+  return *this;
+}
+
+template<typename MatrixType, unsigned int Mode>
+template<typename OtherDerived>
+void TriangularView<MatrixType, Mode>::lazyAssign(const TriangularBase<OtherDerived>& other)
+{
+  enum {
+    unroll = MatrixType::SizeAtCompileTime != Dynamic
+                   && internal::traits<OtherDerived>::CoeffReadCost != Dynamic
+                   && MatrixType::SizeAtCompileTime * internal::traits<OtherDerived>::CoeffReadCost / 2
+                        <= EIGEN_UNROLLING_LIMIT
+  };
+  eigen_assert(m_matrix.rows() == other.rows() && m_matrix.cols() == other.cols());
+
+  internal::triangular_assignment_selector
+    <MatrixType, OtherDerived, int(Mode),
+    unroll ? int(MatrixType::SizeAtCompileTime) : Dynamic,
+    false // preserve the opposite triangular part
+    >::run(m_matrix.const_cast_derived(), other.derived().nestedExpression());
+}
+
+/***************************************************************************
+* Implementation of TriangularBase methods
+***************************************************************************/
+
+/** Assigns a triangular or selfadjoint matrix to a dense matrix.
+  * If the matrix is triangular, the opposite part is set to zero. */
+template<typename Derived>
+template<typename DenseDerived>
+void TriangularBase<Derived>::evalTo(MatrixBase<DenseDerived> &other) const
+{
+  if(internal::traits<Derived>::Flags & EvalBeforeAssigningBit)
+  {
+    typename internal::plain_matrix_type<Derived>::type other_evaluated(rows(), cols());
+    evalToLazy(other_evaluated);
+    other.derived().swap(other_evaluated);
+  }
+  else
+    evalToLazy(other.derived());
+}
+
+/** Assigns a triangular or selfadjoint matrix to a dense matrix.
+  * If the matrix is triangular, the opposite part is set to zero. */
+template<typename Derived>
+template<typename DenseDerived>
+void TriangularBase<Derived>::evalToLazy(MatrixBase<DenseDerived> &other) const
+{
+  enum {
+    unroll = DenseDerived::SizeAtCompileTime != Dynamic
+                   && internal::traits<Derived>::CoeffReadCost != Dynamic
+                   && DenseDerived::SizeAtCompileTime * internal::traits<Derived>::CoeffReadCost / 2
+                        <= EIGEN_UNROLLING_LIMIT
+  };
+  other.derived().resize(this->rows(), this->cols());
+
+  internal::triangular_assignment_selector
+    <DenseDerived, typename internal::traits<Derived>::MatrixTypeNestedCleaned, Derived::Mode,
+    unroll ? int(DenseDerived::SizeAtCompileTime) : Dynamic,
+    true // clear the opposite triangular part
+    >::run(other.derived(), derived().nestedExpression());
+}
+
+/***************************************************************************
+* Implementation of TriangularView methods
+***************************************************************************/
+
+/***************************************************************************
+* Implementation of MatrixBase methods
+***************************************************************************/
+
+#ifdef EIGEN2_SUPPORT
+
+// implementation of part<>(), including the SelfAdjoint case.
+
+namespace internal {
+template<typename MatrixType, unsigned int Mode>
+struct eigen2_part_return_type
+{
+  typedef TriangularView<MatrixType, Mode> type;
+};
+
+template<typename MatrixType>
+struct eigen2_part_return_type<MatrixType, SelfAdjoint>
+{
+  typedef SelfAdjointView<MatrixType, Upper> type;
+};
+}
+
+/** \deprecated use MatrixBase::triangularView() */
+template<typename Derived>
+template<unsigned int Mode>
+const typename internal::eigen2_part_return_type<Derived, Mode>::type MatrixBase<Derived>::part() const
+{
+  return derived();
+}
+
+/** \deprecated use MatrixBase::triangularView() */
+template<typename Derived>
+template<unsigned int Mode>
+typename internal::eigen2_part_return_type<Derived, Mode>::type MatrixBase<Derived>::part()
+{
+  return derived();
+}
+#endif
+
+/**
+  * \returns an expression of a triangular view extracted from the current matrix
+  *
+  * The parameter \a Mode can have the following values: \c #Upper, \c #StrictlyUpper, \c #UnitUpper,
+  * \c #Lower, \c #StrictlyLower, \c #UnitLower.
+  *
+  * Example: \include MatrixBase_extract.cpp
+  * Output: \verbinclude MatrixBase_extract.out
+  *
+  * \sa class TriangularView
+  */
+template<typename Derived>
+template<unsigned int Mode>
+typename MatrixBase<Derived>::template TriangularViewReturnType<Mode>::Type
+MatrixBase<Derived>::triangularView()
+{
+  return derived();
+}
+
+/** This is the const version of MatrixBase::triangularView() */
+template<typename Derived>
+template<unsigned int Mode>
+typename MatrixBase<Derived>::template ConstTriangularViewReturnType<Mode>::Type
+MatrixBase<Derived>::triangularView() const
+{
+  return derived();
+}
+
+/** \returns true if *this is approximately equal to an upper triangular matrix,
+  *          within the precision given by \a prec.
+  *
+  * \sa isLowerTriangular()
+  */
+template<typename Derived>
+bool MatrixBase<Derived>::isUpperTriangular(RealScalar prec) const
+{
+  RealScalar maxAbsOnUpperPart = static_cast<RealScalar>(-1);
+  for(Index j = 0; j < cols(); ++j)
+  {
+    Index maxi = (std::min)(j, rows()-1);
+    for(Index i = 0; i <= maxi; ++i)
+    {
+      RealScalar absValue = internal::abs(coeff(i,j));
+      if(absValue > maxAbsOnUpperPart) maxAbsOnUpperPart = absValue;
+    }
+  }
+  RealScalar threshold = maxAbsOnUpperPart * prec;
+  for(Index j = 0; j < cols(); ++j)
+    for(Index i = j+1; i < rows(); ++i)
+      if(internal::abs(coeff(i, j)) > threshold) return false;
+  return true;
+}
+
+/** \returns true if *this is approximately equal to a lower triangular matrix,
+  *          within the precision given by \a prec.
+  *
+  * \sa isUpperTriangular()
+  */
+template<typename Derived>
+bool MatrixBase<Derived>::isLowerTriangular(RealScalar prec) const
+{
+  RealScalar maxAbsOnLowerPart = static_cast<RealScalar>(-1);
+  for(Index j = 0; j < cols(); ++j)
+    for(Index i = j; i < rows(); ++i)
+    {
+      RealScalar absValue = internal::abs(coeff(i,j));
+      if(absValue > maxAbsOnLowerPart) maxAbsOnLowerPart = absValue;
+    }
+  RealScalar threshold = maxAbsOnLowerPart * prec;
+  for(Index j = 1; j < cols(); ++j)
+  {
+    Index maxi = (std::min)(j, rows()-1);
+    for(Index i = 0; i < maxi; ++i)
+      if(internal::abs(coeff(i, j)) > threshold) return false;
+  }
+  return true;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_TRIANGULARMATRIX_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/VectorBlock.h b/resources/3rdParty/eigen/Eigen/src/Core/VectorBlock.h
new file mode 100644
index 000000000..6f4effca0
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/VectorBlock.h
@@ -0,0 +1,284 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_VECTORBLOCK_H
+#define EIGEN_VECTORBLOCK_H
+
+namespace Eigen { 
+
+/** \class VectorBlock
+  * \ingroup Core_Module
+  *
+  * \brief Expression of a fixed-size or dynamic-size sub-vector
+  *
+  * \param VectorType the type of the object in which we are taking a sub-vector
+  * \param Size size of the sub-vector we are taking at compile time (optional)
+  *
+  * This class represents an expression of either a fixed-size or dynamic-size sub-vector.
+  * It is the return type of DenseBase::segment(Index,Index) and DenseBase::segment<int>(Index) and
+  * most of the time this is the only way it is used.
+  *
+  * However, if you want to directly maniputate sub-vector expressions,
+  * for instance if you want to write a function returning such an expression, you
+  * will need to use this class.
+  *
+  * Here is an example illustrating the dynamic case:
+  * \include class_VectorBlock.cpp
+  * Output: \verbinclude class_VectorBlock.out
+  *
+  * \note Even though this expression has dynamic size, in the case where \a VectorType
+  * has fixed size, this expression inherits a fixed maximal size which means that evaluating
+  * it does not cause a dynamic memory allocation.
+  *
+  * Here is an example illustrating the fixed-size case:
+  * \include class_FixedVectorBlock.cpp
+  * Output: \verbinclude class_FixedVectorBlock.out
+  *
+  * \sa class Block, DenseBase::segment(Index,Index,Index,Index), DenseBase::segment(Index,Index)
+  */
+
+namespace internal {
+template<typename VectorType, int Size>
+struct traits<VectorBlock<VectorType, Size> >
+  : public traits<Block<VectorType,
+                     traits<VectorType>::Flags & RowMajorBit ? 1 : Size,
+                     traits<VectorType>::Flags & RowMajorBit ? Size : 1> >
+{
+};
+}
+
+template<typename VectorType, int Size> class VectorBlock
+  : public Block<VectorType,
+                     internal::traits<VectorType>::Flags & RowMajorBit ? 1 : Size,
+                     internal::traits<VectorType>::Flags & RowMajorBit ? Size : 1>
+{
+    typedef Block<VectorType,
+                     internal::traits<VectorType>::Flags & RowMajorBit ? 1 : Size,
+                     internal::traits<VectorType>::Flags & RowMajorBit ? Size : 1> Base;
+    enum {
+      IsColVector = !(internal::traits<VectorType>::Flags & RowMajorBit)
+    };
+  public:
+    EIGEN_DENSE_PUBLIC_INTERFACE(VectorBlock)
+
+    using Base::operator=;
+
+    /** Dynamic-size constructor
+      */
+    inline VectorBlock(VectorType& vector, Index start, Index size)
+      : Base(vector,
+             IsColVector ? start : 0, IsColVector ? 0 : start,
+             IsColVector ? size  : 1, IsColVector ? 1 : size)
+    {
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock);
+    }
+
+    /** Fixed-size constructor
+      */
+    inline VectorBlock(VectorType& vector, Index start)
+      : Base(vector, IsColVector ? start : 0, IsColVector ? 0 : start)
+    {
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock);
+    }
+};
+
+
+/** \returns a dynamic-size expression of a segment (i.e. a vector block) in *this.
+  *
+  * \only_for_vectors
+  *
+  * \param start the first coefficient in the segment
+  * \param size the number of coefficients in the segment
+  *
+  * Example: \include MatrixBase_segment_int_int.cpp
+  * Output: \verbinclude MatrixBase_segment_int_int.out
+  *
+  * \note Even though the returned expression has dynamic size, in the case
+  * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
+  * which means that evaluating it does not cause a dynamic memory allocation.
+  *
+  * \sa class Block, segment(Index)
+  */
+template<typename Derived>
+inline typename DenseBase<Derived>::SegmentReturnType
+DenseBase<Derived>::segment(Index start, Index size)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return SegmentReturnType(derived(), start, size);
+}
+
+/** This is the const version of segment(Index,Index).*/
+template<typename Derived>
+inline typename DenseBase<Derived>::ConstSegmentReturnType
+DenseBase<Derived>::segment(Index start, Index size) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return ConstSegmentReturnType(derived(), start, size);
+}
+
+/** \returns a dynamic-size expression of the first coefficients of *this.
+  *
+  * \only_for_vectors
+  *
+  * \param size the number of coefficients in the block
+  *
+  * Example: \include MatrixBase_start_int.cpp
+  * Output: \verbinclude MatrixBase_start_int.out
+  *
+  * \note Even though the returned expression has dynamic size, in the case
+  * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
+  * which means that evaluating it does not cause a dynamic memory allocation.
+  *
+  * \sa class Block, block(Index,Index)
+  */
+template<typename Derived>
+inline typename DenseBase<Derived>::SegmentReturnType
+DenseBase<Derived>::head(Index size)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return SegmentReturnType(derived(), 0, size);
+}
+
+/** This is the const version of head(Index).*/
+template<typename Derived>
+inline typename DenseBase<Derived>::ConstSegmentReturnType
+DenseBase<Derived>::head(Index size) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return ConstSegmentReturnType(derived(), 0, size);
+}
+
+/** \returns a dynamic-size expression of the last coefficients of *this.
+  *
+  * \only_for_vectors
+  *
+  * \param size the number of coefficients in the block
+  *
+  * Example: \include MatrixBase_end_int.cpp
+  * Output: \verbinclude MatrixBase_end_int.out
+  *
+  * \note Even though the returned expression has dynamic size, in the case
+  * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
+  * which means that evaluating it does not cause a dynamic memory allocation.
+  *
+  * \sa class Block, block(Index,Index)
+  */
+template<typename Derived>
+inline typename DenseBase<Derived>::SegmentReturnType
+DenseBase<Derived>::tail(Index size)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return SegmentReturnType(derived(), this->size() - size, size);
+}
+
+/** This is the const version of tail(Index).*/
+template<typename Derived>
+inline typename DenseBase<Derived>::ConstSegmentReturnType
+DenseBase<Derived>::tail(Index size) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return ConstSegmentReturnType(derived(), this->size() - size, size);
+}
+
+/** \returns a fixed-size expression of a segment (i.e. a vector block) in \c *this
+  *
+  * \only_for_vectors
+  *
+  * The template parameter \a Size is the number of coefficients in the block
+  *
+  * \param start the index of the first element of the sub-vector
+  *
+  * Example: \include MatrixBase_template_int_segment.cpp
+  * Output: \verbinclude MatrixBase_template_int_segment.out
+  *
+  * \sa class Block
+  */
+template<typename Derived>
+template<int Size>
+inline typename DenseBase<Derived>::template FixedSegmentReturnType<Size>::Type
+DenseBase<Derived>::segment(Index start)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return typename FixedSegmentReturnType<Size>::Type(derived(), start);
+}
+
+/** This is the const version of segment<int>(Index).*/
+template<typename Derived>
+template<int Size>
+inline typename DenseBase<Derived>::template ConstFixedSegmentReturnType<Size>::Type
+DenseBase<Derived>::segment(Index start) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return typename ConstFixedSegmentReturnType<Size>::Type(derived(), start);
+}
+
+/** \returns a fixed-size expression of the first coefficients of *this.
+  *
+  * \only_for_vectors
+  *
+  * The template parameter \a Size is the number of coefficients in the block
+  *
+  * Example: \include MatrixBase_template_int_start.cpp
+  * Output: \verbinclude MatrixBase_template_int_start.out
+  *
+  * \sa class Block
+  */
+template<typename Derived>
+template<int Size>
+inline typename DenseBase<Derived>::template FixedSegmentReturnType<Size>::Type
+DenseBase<Derived>::head()
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return typename FixedSegmentReturnType<Size>::Type(derived(), 0);
+}
+
+/** This is the const version of head<int>().*/
+template<typename Derived>
+template<int Size>
+inline typename DenseBase<Derived>::template ConstFixedSegmentReturnType<Size>::Type
+DenseBase<Derived>::head() const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return typename ConstFixedSegmentReturnType<Size>::Type(derived(), 0);
+}
+
+/** \returns a fixed-size expression of the last coefficients of *this.
+  *
+  * \only_for_vectors
+  *
+  * The template parameter \a Size is the number of coefficients in the block
+  *
+  * Example: \include MatrixBase_template_int_end.cpp
+  * Output: \verbinclude MatrixBase_template_int_end.out
+  *
+  * \sa class Block
+  */
+template<typename Derived>
+template<int Size>
+inline typename DenseBase<Derived>::template FixedSegmentReturnType<Size>::Type
+DenseBase<Derived>::tail()
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return typename FixedSegmentReturnType<Size>::Type(derived(), size() - Size);
+}
+
+/** This is the const version of tail<int>.*/
+template<typename Derived>
+template<int Size>
+inline typename DenseBase<Derived>::template ConstFixedSegmentReturnType<Size>::Type
+DenseBase<Derived>::tail() const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return typename ConstFixedSegmentReturnType<Size>::Type(derived(), size() - Size);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_VECTORBLOCK_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/VectorwiseOp.h b/resources/3rdParty/eigen/Eigen/src/Core/VectorwiseOp.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/VectorwiseOp.h
rename to resources/3rdParty/eigen/Eigen/src/Core/VectorwiseOp.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/Visitor.h b/resources/3rdParty/eigen/Eigen/src/Core/Visitor.h
new file mode 100644
index 000000000..916bfd096
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/Visitor.h
@@ -0,0 +1,237 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_VISITOR_H
+#define EIGEN_VISITOR_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename Visitor, typename Derived, int UnrollCount>
+struct visitor_impl
+{
+  enum {
+    col = (UnrollCount-1) / Derived::RowsAtCompileTime,
+    row = (UnrollCount-1) % Derived::RowsAtCompileTime
+  };
+
+  static inline void run(const Derived &mat, Visitor& visitor)
+  {
+    visitor_impl<Visitor, Derived, UnrollCount-1>::run(mat, visitor);
+    visitor(mat.coeff(row, col), row, col);
+  }
+};
+
+template<typename Visitor, typename Derived>
+struct visitor_impl<Visitor, Derived, 1>
+{
+  static inline void run(const Derived &mat, Visitor& visitor)
+  {
+    return visitor.init(mat.coeff(0, 0), 0, 0);
+  }
+};
+
+template<typename Visitor, typename Derived>
+struct visitor_impl<Visitor, Derived, Dynamic>
+{
+  typedef typename Derived::Index Index;
+  static inline void run(const Derived& mat, Visitor& visitor)
+  {
+    visitor.init(mat.coeff(0,0), 0, 0);
+    for(Index i = 1; i < mat.rows(); ++i)
+      visitor(mat.coeff(i, 0), i, 0);
+    for(Index j = 1; j < mat.cols(); ++j)
+      for(Index i = 0; i < mat.rows(); ++i)
+        visitor(mat.coeff(i, j), i, j);
+  }
+};
+
+} // end namespace internal
+
+/** Applies the visitor \a visitor to the whole coefficients of the matrix or vector.
+  *
+  * The template parameter \a Visitor is the type of the visitor and provides the following interface:
+  * \code
+  * struct MyVisitor {
+  *   // called for the first coefficient
+  *   void init(const Scalar& value, Index i, Index j);
+  *   // called for all other coefficients
+  *   void operator() (const Scalar& value, Index i, Index j);
+  * };
+  * \endcode
+  *
+  * \note compared to one or two \em for \em loops, visitors offer automatic
+  * unrolling for small fixed size matrix.
+  *
+  * \sa minCoeff(Index*,Index*), maxCoeff(Index*,Index*), DenseBase::redux()
+  */
+template<typename Derived>
+template<typename Visitor>
+void DenseBase<Derived>::visit(Visitor& visitor) const
+{
+  enum { unroll = SizeAtCompileTime != Dynamic
+                   && CoeffReadCost != Dynamic
+                   && (SizeAtCompileTime == 1 || internal::functor_traits<Visitor>::Cost != Dynamic)
+                   && SizeAtCompileTime * CoeffReadCost + (SizeAtCompileTime-1) * internal::functor_traits<Visitor>::Cost
+                      <= EIGEN_UNROLLING_LIMIT };
+  return internal::visitor_impl<Visitor, Derived,
+      unroll ? int(SizeAtCompileTime) : Dynamic
+    >::run(derived(), visitor);
+}
+
+namespace internal {
+
+/** \internal
+  * \brief Base class to implement min and max visitors
+  */
+template <typename Derived>
+struct coeff_visitor
+{
+  typedef typename Derived::Index Index;
+  typedef typename Derived::Scalar Scalar;
+  Index row, col;
+  Scalar res;
+  inline void init(const Scalar& value, Index i, Index j)
+  {
+    res = value;
+    row = i;
+    col = j;
+  }
+};
+
+/** \internal
+  * \brief Visitor computing the min coefficient with its value and coordinates
+  *
+  * \sa DenseBase::minCoeff(Index*, Index*)
+  */
+template <typename Derived>
+struct min_coeff_visitor : coeff_visitor<Derived>
+{
+  typedef typename Derived::Index Index;
+  typedef typename Derived::Scalar Scalar;
+  void operator() (const Scalar& value, Index i, Index j)
+  {
+    if(value < this->res)
+    {
+      this->res = value;
+      this->row = i;
+      this->col = j;
+    }
+  }
+};
+
+template<typename Scalar>
+struct functor_traits<min_coeff_visitor<Scalar> > {
+  enum {
+    Cost = NumTraits<Scalar>::AddCost
+  };
+};
+
+/** \internal
+  * \brief Visitor computing the max coefficient with its value and coordinates
+  *
+  * \sa DenseBase::maxCoeff(Index*, Index*)
+  */
+template <typename Derived>
+struct max_coeff_visitor : coeff_visitor<Derived>
+{
+  typedef typename Derived::Index Index;
+  typedef typename Derived::Scalar Scalar;
+  void operator() (const Scalar& value, Index i, Index j)
+  {
+    if(value > this->res)
+    {
+      this->res = value;
+      this->row = i;
+      this->col = j;
+    }
+  }
+};
+
+template<typename Scalar>
+struct functor_traits<max_coeff_visitor<Scalar> > {
+  enum {
+    Cost = NumTraits<Scalar>::AddCost
+  };
+};
+
+} // end namespace internal
+
+/** \returns the minimum of all coefficients of *this
+  * and puts in *row and *col its location.
+  *
+  * \sa DenseBase::minCoeff(Index*), DenseBase::maxCoeff(Index*,Index*), DenseBase::visitor(), DenseBase::minCoeff()
+  */
+template<typename Derived>
+template<typename IndexType>
+typename internal::traits<Derived>::Scalar
+DenseBase<Derived>::minCoeff(IndexType* row, IndexType* col) const
+{
+  internal::min_coeff_visitor<Derived> minVisitor;
+  this->visit(minVisitor);
+  *row = minVisitor.row;
+  if (col) *col = minVisitor.col;
+  return minVisitor.res;
+}
+
+/** \returns the minimum of all coefficients of *this
+  * and puts in *index its location.
+  *
+  * \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::minCoeff()
+  */
+template<typename Derived>
+template<typename IndexType>
+typename internal::traits<Derived>::Scalar
+DenseBase<Derived>::minCoeff(IndexType* index) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  internal::min_coeff_visitor<Derived> minVisitor;
+  this->visit(minVisitor);
+  *index = (RowsAtCompileTime==1) ? minVisitor.col : minVisitor.row;
+  return minVisitor.res;
+}
+
+/** \returns the maximum of all coefficients of *this
+  * and puts in *row and *col its location.
+  *
+  * \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::maxCoeff()
+  */
+template<typename Derived>
+template<typename IndexType>
+typename internal::traits<Derived>::Scalar
+DenseBase<Derived>::maxCoeff(IndexType* row, IndexType* col) const
+{
+  internal::max_coeff_visitor<Derived> maxVisitor;
+  this->visit(maxVisitor);
+  *row = maxVisitor.row;
+  if (col) *col = maxVisitor.col;
+  return maxVisitor.res;
+}
+
+/** \returns the maximum of all coefficients of *this
+  * and puts in *index its location.
+  *
+  * \sa DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::maxCoeff()
+  */
+template<typename Derived>
+template<typename IndexType>
+typename internal::traits<Derived>::Scalar
+DenseBase<Derived>::maxCoeff(IndexType* index) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  internal::max_coeff_visitor<Derived> maxVisitor;
+  this->visit(maxVisitor);
+  *index = (RowsAtCompileTime==1) ? maxVisitor.col : maxVisitor.row;
+  return maxVisitor.res;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_VISITOR_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/arch/AltiVec/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/Core/arch/AltiVec/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/arch/AltiVec/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/Core/arch/AltiVec/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/arch/AltiVec/Complex.h b/resources/3rdParty/eigen/Eigen/src/Core/arch/AltiVec/Complex.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/arch/AltiVec/Complex.h
rename to resources/3rdParty/eigen/Eigen/src/Core/arch/AltiVec/Complex.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h b/resources/3rdParty/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h
rename to resources/3rdParty/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/arch/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/Core/arch/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/arch/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/Core/arch/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/arch/Default/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/Core/arch/Default/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/arch/Default/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/Core/arch/Default/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/arch/Default/Settings.h b/resources/3rdParty/eigen/Eigen/src/Core/arch/Default/Settings.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/arch/Default/Settings.h
rename to resources/3rdParty/eigen/Eigen/src/Core/arch/Default/Settings.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/arch/NEON/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/Core/arch/NEON/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/arch/NEON/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/Core/arch/NEON/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/arch/NEON/Complex.h b/resources/3rdParty/eigen/Eigen/src/Core/arch/NEON/Complex.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/arch/NEON/Complex.h
rename to resources/3rdParty/eigen/Eigen/src/Core/arch/NEON/Complex.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/arch/NEON/PacketMath.h b/resources/3rdParty/eigen/Eigen/src/Core/arch/NEON/PacketMath.h
new file mode 100644
index 000000000..a20250f7c
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/arch/NEON/PacketMath.h
@@ -0,0 +1,424 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Konstantinos Margaritis <markos@codex.gr>
+// Heavily based on Gael's SSE version.
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PACKET_MATH_NEON_H
+#define EIGEN_PACKET_MATH_NEON_H
+
+namespace Eigen {
+
+namespace internal {
+
+#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
+#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
+#endif
+
+// FIXME NEON has 16 quad registers, but since the current register allocator
+// is so bad, it is much better to reduce it to 8
+#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
+#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 8
+#endif
+
+typedef float32x4_t Packet4f;
+typedef int32x4_t   Packet4i;
+typedef uint32x4_t  Packet4ui;
+
+#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
+  const Packet4f p4f_##NAME = pset1<Packet4f>(X)
+
+#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
+  const Packet4f p4f_##NAME = vreinterpretq_f32_u32(pset1<int>(X))
+
+#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
+  const Packet4i p4i_##NAME = pset1<Packet4i>(X)
+
+#if defined(__llvm__) && !defined(__clang__)
+  //Special treatment for Apple's llvm-gcc, its NEON packet types are unions
+  #define EIGEN_INIT_NEON_PACKET2(X, Y)       {{X, Y}}
+  #define EIGEN_INIT_NEON_PACKET4(X, Y, Z, W) {{X, Y, Z, W}}
+#else
+  //Default initializer for packets
+  #define EIGEN_INIT_NEON_PACKET2(X, Y)       {X, Y}
+  #define EIGEN_INIT_NEON_PACKET4(X, Y, Z, W) {X, Y, Z, W}
+#endif
+    
+#ifndef __pld
+#define __pld(x) asm volatile ( "   pld [%[addr]]\n" :: [addr] "r" (x) : "cc" );
+#endif
+
+template<> struct packet_traits<float>  : default_packet_traits
+{
+  typedef Packet4f type;
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size = 4,
+   
+    HasDiv  = 1,
+    // FIXME check the Has*
+    HasSin  = 0,
+    HasCos  = 0,
+    HasLog  = 0,
+    HasExp  = 0,
+    HasSqrt = 0
+  };
+};
+template<> struct packet_traits<int>    : default_packet_traits
+{
+  typedef Packet4i type;
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size=4
+    // FIXME check the Has*
+  };
+};
+
+#if EIGEN_GNUC_AT_MOST(4,4) && !defined(__llvm__)
+// workaround gcc 4.2, 4.3 and 4.4 compilatin issue
+EIGEN_STRONG_INLINE float32x4_t vld1q_f32(const float* x) { return ::vld1q_f32((const float32_t*)x); }
+EIGEN_STRONG_INLINE float32x2_t vld1_f32 (const float* x) { return ::vld1_f32 ((const float32_t*)x); }
+EIGEN_STRONG_INLINE void        vst1q_f32(float* to, float32x4_t from) { ::vst1q_f32((float32_t*)to,from); }
+EIGEN_STRONG_INLINE void        vst1_f32 (float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); }
+#endif
+
+template<> struct unpacket_traits<Packet4f> { typedef float  type; enum {size=4}; };
+template<> struct unpacket_traits<Packet4i> { typedef int    type; enum {size=4}; };
+
+template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&  from) { return vdupq_n_f32(from); }
+template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int&    from)   { return vdupq_n_s32(from); }
+
+template<> EIGEN_STRONG_INLINE Packet4f plset<float>(const float& a)
+{
+  Packet4f countdown = EIGEN_INIT_NEON_PACKET4(0, 1, 2, 3);
+  return vaddq_f32(pset1<Packet4f>(a), countdown);
+}
+template<> EIGEN_STRONG_INLINE Packet4i plset<int>(const int& a)
+{
+  Packet4i countdown = EIGEN_INIT_NEON_PACKET4(0, 1, 2, 3);
+  return vaddq_s32(pset1<Packet4i>(a), countdown);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return vaddq_f32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return vaddq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return vsubq_f32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return vsubq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return vnegq_f32(a); }
+template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return vnegq_s32(a); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmulq_f32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmulq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+  Packet4f inv, restep, div;
+
+  // NEON does not offer a divide instruction, we have to do a reciprocal approximation
+  // However NEON in contrast to other SIMD engines (AltiVec/SSE), offers
+  // a reciprocal estimate AND a reciprocal step -which saves a few instructions
+  // vrecpeq_f32() returns an estimate to 1/b, which we will finetune with
+  // Newton-Raphson and vrecpsq_f32()
+  inv = vrecpeq_f32(b);
+
+  // This returns a differential, by which we will have to multiply inv to get a better
+  // approximation of 1/b.
+  restep = vrecpsq_f32(b, inv);
+  inv = vmulq_f32(restep, inv);
+
+  // Finally, multiply a by 1/b and get the wanted result of the division.
+  div = vmulq_f32(a, inv);
+
+  return div;
+}
+template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/)
+{ eigen_assert(false && "packet integer division are not supported by NEON");
+  return pset1<Packet4i>(0);
+}
+
+// for some weird raisons, it has to be overloaded for packet of integers
+template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vmlaq_f32(c,a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return vmlaq_s32(c,a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return vminq_f32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vminq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmaxq_f32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmaxq_s32(a,b); }
+
+// Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics
+template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+  return vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
+}
+template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vandq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+  return vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
+}
+template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return vorrq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+  return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
+}
+template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return veorq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+  return vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
+}
+template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return vbicq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f32(from); }
+template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int*   from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s32(from); }
+
+template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f32(from); }
+template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)   { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s32(from); }
+
+template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float*   from)
+{
+  float32x2_t lo, hi;
+  lo = vdup_n_f32(*from);
+  hi = vdup_n_f32(*(from+1));
+  return vcombine_f32(lo, hi);
+}
+template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int*     from)
+{
+  int32x2_t lo, hi;
+  lo = vdup_n_s32(*from);
+  hi = vdup_n_s32(*(from+1));
+  return vcombine_s32(lo, hi);
+}
+
+template<> EIGEN_STRONG_INLINE void pstore<float>(float*   to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f32(to, from); }
+template<> EIGEN_STRONG_INLINE void pstore<int>(int*       to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_s32(to, from); }
+
+template<> EIGEN_STRONG_INLINE void pstoreu<float>(float*  to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<int>(int*      to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); }
+
+template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { __pld(addr); }
+template<> EIGEN_STRONG_INLINE void prefetch<int>(const int*     addr) { __pld(addr); }
+
+// FIXME only store the 2 first elements ?
+template<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vst1q_f32(x, a); return x[0]; }
+template<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { int   EIGEN_ALIGN16 x[4]; vst1q_s32(x, a); return x[0]; }
+
+template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) {
+  float32x2_t a_lo, a_hi;
+  Packet4f a_r64;
+
+  a_r64 = vrev64q_f32(a);
+  a_lo = vget_low_f32(a_r64);
+  a_hi = vget_high_f32(a_r64);
+  return vcombine_f32(a_hi, a_lo);
+}
+template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) {
+  int32x2_t a_lo, a_hi;
+  Packet4i a_r64;
+
+  a_r64 = vrev64q_s32(a);
+  a_lo = vget_low_s32(a_r64);
+  a_hi = vget_high_s32(a_r64);
+  return vcombine_s32(a_hi, a_lo);
+}
+template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vabsq_f32(a); }
+template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vabsq_s32(a); }
+
+template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
+{
+  float32x2_t a_lo, a_hi, sum;
+  float s[2];
+
+  a_lo = vget_low_f32(a);
+  a_hi = vget_high_f32(a);
+  sum = vpadd_f32(a_lo, a_hi);
+  sum = vpadd_f32(sum, sum);
+  vst1_f32(s, sum);
+
+  return s[0];
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
+{
+  float32x4x2_t vtrn1, vtrn2, res1, res2;
+  Packet4f sum1, sum2, sum;
+
+  // NEON zip performs interleaving of the supplied vectors.
+  // We perform two interleaves in a row to acquire the transposed vector
+  vtrn1 = vzipq_f32(vecs[0], vecs[2]);
+  vtrn2 = vzipq_f32(vecs[1], vecs[3]);
+  res1 = vzipq_f32(vtrn1.val[0], vtrn2.val[0]);
+  res2 = vzipq_f32(vtrn1.val[1], vtrn2.val[1]);
+
+  // Do the addition of the resulting vectors
+  sum1 = vaddq_f32(res1.val[0], res1.val[1]);
+  sum2 = vaddq_f32(res2.val[0], res2.val[1]);
+  sum = vaddq_f32(sum1, sum2);
+
+  return sum;
+}
+
+template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
+{
+  int32x2_t a_lo, a_hi, sum;
+  int32_t s[2];
+
+  a_lo = vget_low_s32(a);
+  a_hi = vget_high_s32(a);
+  sum = vpadd_s32(a_lo, a_hi);
+  sum = vpadd_s32(sum, sum);
+  vst1_s32(s, sum);
+
+  return s[0];
+}
+
+template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
+{
+  int32x4x2_t vtrn1, vtrn2, res1, res2;
+  Packet4i sum1, sum2, sum;
+
+  // NEON zip performs interleaving of the supplied vectors.
+  // We perform two interleaves in a row to acquire the transposed vector
+  vtrn1 = vzipq_s32(vecs[0], vecs[2]);
+  vtrn2 = vzipq_s32(vecs[1], vecs[3]);
+  res1 = vzipq_s32(vtrn1.val[0], vtrn2.val[0]);
+  res2 = vzipq_s32(vtrn1.val[1], vtrn2.val[1]);
+
+  // Do the addition of the resulting vectors
+  sum1 = vaddq_s32(res1.val[0], res1.val[1]);
+  sum2 = vaddq_s32(res2.val[0], res2.val[1]);
+  sum = vaddq_s32(sum1, sum2);
+
+  return sum;
+}
+
+// Other reduction functions:
+// mul
+template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
+{
+  float32x2_t a_lo, a_hi, prod;
+  float s[2];
+
+  // Get a_lo = |a1|a2| and a_hi = |a3|a4|
+  a_lo = vget_low_f32(a);
+  a_hi = vget_high_f32(a);
+  // Get the product of a_lo * a_hi -> |a1*a3|a2*a4|
+  prod = vmul_f32(a_lo, a_hi);
+  // Multiply prod with its swapped value |a2*a4|a1*a3|
+  prod = vmul_f32(prod, vrev64_f32(prod));
+  vst1_f32(s, prod);
+
+  return s[0];
+}
+template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
+{
+  int32x2_t a_lo, a_hi, prod;
+  int32_t s[2];
+
+  // Get a_lo = |a1|a2| and a_hi = |a3|a4|
+  a_lo = vget_low_s32(a);
+  a_hi = vget_high_s32(a);
+  // Get the product of a_lo * a_hi -> |a1*a3|a2*a4|
+  prod = vmul_s32(a_lo, a_hi);
+  // Multiply prod with its swapped value |a2*a4|a1*a3|
+  prod = vmul_s32(prod, vrev64_s32(prod));
+  vst1_s32(s, prod);
+
+  return s[0];
+}
+
+// min
+template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
+{
+  float32x2_t a_lo, a_hi, min;
+  float s[2];
+
+  a_lo = vget_low_f32(a);
+  a_hi = vget_high_f32(a);
+  min = vpmin_f32(a_lo, a_hi);
+  min = vpmin_f32(min, min);
+  vst1_f32(s, min);
+
+  return s[0];
+}
+template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
+{
+  int32x2_t a_lo, a_hi, min;
+  int32_t s[2];
+
+  a_lo = vget_low_s32(a);
+  a_hi = vget_high_s32(a);
+  min = vpmin_s32(a_lo, a_hi);
+  min = vpmin_s32(min, min);
+  vst1_s32(s, min);
+
+  return s[0];
+}
+
+// max
+template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
+{
+  float32x2_t a_lo, a_hi, max;
+  float s[2];
+
+  a_lo = vget_low_f32(a);
+  a_hi = vget_high_f32(a);
+  max = vpmax_f32(a_lo, a_hi);
+  max = vpmax_f32(max, max);
+  vst1_f32(s, max);
+
+  return s[0];
+}
+template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
+{
+  int32x2_t a_lo, a_hi, max;
+  int32_t s[2];
+
+  a_lo = vget_low_s32(a);
+  a_hi = vget_high_s32(a);
+  max = vpmax_s32(a_lo, a_hi);
+  max = vpmax_s32(max, max);
+  vst1_s32(s, max);
+
+  return s[0];
+}
+
+// this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors,
+// see bug 347 and this LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=11074
+#define PALIGN_NEON(Offset,Type,Command) \
+template<>\
+struct palign_impl<Offset,Type>\
+{\
+    EIGEN_STRONG_INLINE static void run(Type& first, const Type& second)\
+    {\
+        if (Offset!=0)\
+            first = Command(first, second, Offset);\
+    }\
+};\
+
+PALIGN_NEON(0,Packet4f,vextq_f32)
+PALIGN_NEON(1,Packet4f,vextq_f32)
+PALIGN_NEON(2,Packet4f,vextq_f32)
+PALIGN_NEON(3,Packet4f,vextq_f32)
+PALIGN_NEON(0,Packet4i,vextq_s32)
+PALIGN_NEON(1,Packet4i,vextq_s32)
+PALIGN_NEON(2,Packet4i,vextq_s32)
+PALIGN_NEON(3,Packet4i,vextq_s32)
+    
+#undef PALIGN_NEON
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_PACKET_MATH_NEON_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/arch/SSE/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/Core/arch/SSE/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/arch/SSE/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/Core/arch/SSE/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/arch/SSE/Complex.h b/resources/3rdParty/eigen/Eigen/src/Core/arch/SSE/Complex.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/arch/SSE/Complex.h
rename to resources/3rdParty/eigen/Eigen/src/Core/arch/SSE/Complex.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h b/resources/3rdParty/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h
new file mode 100644
index 000000000..3f41a4e26
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h
@@ -0,0 +1,384 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007 Julien Pommier
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/* The sin, cos, exp, and log functions of this file come from
+ * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/
+ */
+
+#ifndef EIGEN_MATH_FUNCTIONS_SSE_H
+#define EIGEN_MATH_FUNCTIONS_SSE_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f plog<Packet4f>(const Packet4f& _x)
+{
+  Packet4f x = _x;
+  _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
+  _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
+  _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
+
+  _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inv_mant_mask, ~0x7f800000);
+
+  /* the smallest non denormalized float number */
+  _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(min_norm_pos,  0x00800000);
+
+  /* natural logarithm computed for 4 simultaneous float
+    return NaN for x <= 0
+  */
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292E-2f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, - 1.1514610310E-1f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740E-1f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, - 1.2420140846E-1f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, + 1.4249322787E-1f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, - 1.6668057665E-1f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, + 2.0000714765E-1f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, - 2.4999993993E-1f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, + 3.3333331174E-1f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f);
+
+
+  Packet4i emm0;
+
+  Packet4f invalid_mask = _mm_cmple_ps(x, _mm_setzero_ps());
+
+  x = pmax(x, p4f_min_norm_pos);  /* cut off denormalized stuff */
+  emm0 = _mm_srli_epi32(_mm_castps_si128(x), 23);
+
+  /* keep only the fractional part */
+  x = _mm_and_ps(x, p4f_inv_mant_mask);
+  x = _mm_or_ps(x, p4f_half);
+
+  emm0 = _mm_sub_epi32(emm0, p4i_0x7f);
+  Packet4f e = padd(_mm_cvtepi32_ps(emm0), p4f_1);
+
+  /* part2:
+     if( x < SQRTHF ) {
+       e -= 1;
+       x = x + x - 1.0;
+     } else { x = x - 1.0; }
+  */
+  Packet4f mask = _mm_cmplt_ps(x, p4f_cephes_SQRTHF);
+  Packet4f tmp = _mm_and_ps(x, mask);
+  x = psub(x, p4f_1);
+  e = psub(e, _mm_and_ps(p4f_1, mask));
+  x = padd(x, tmp);
+
+  Packet4f x2 = pmul(x,x);
+  Packet4f x3 = pmul(x2,x);
+
+  Packet4f y, y1, y2;
+  y  = pmadd(p4f_cephes_log_p0, x, p4f_cephes_log_p1);
+  y1 = pmadd(p4f_cephes_log_p3, x, p4f_cephes_log_p4);
+  y2 = pmadd(p4f_cephes_log_p6, x, p4f_cephes_log_p7);
+  y  = pmadd(y , x, p4f_cephes_log_p2);
+  y1 = pmadd(y1, x, p4f_cephes_log_p5);
+  y2 = pmadd(y2, x, p4f_cephes_log_p8);
+  y = pmadd(y, x3, y1);
+  y = pmadd(y, x3, y2);
+  y = pmul(y, x3);
+
+  y1 = pmul(e, p4f_cephes_log_q1);
+  tmp = pmul(x2, p4f_half);
+  y = padd(y, y1);
+  x = psub(x, tmp);
+  y2 = pmul(e, p4f_cephes_log_q2);
+  x = padd(x, y);
+  x = padd(x, y2);
+  return _mm_or_ps(x, invalid_mask); // negative arg will be NAN
+}
+
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f pexp<Packet4f>(const Packet4f& _x)
+{
+  Packet4f x = _x;
+  _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
+  _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
+  _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
+
+
+  _EIGEN_DECLARE_CONST_Packet4f(exp_hi,  88.3762626647950f);
+  _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f);
+
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f);
+
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);
+
+  Packet4f tmp = _mm_setzero_ps(), fx;
+  Packet4i emm0;
+
+  // clamp x
+  x = pmax(pmin(x, p4f_exp_hi), p4f_exp_lo);
+
+  /* express exp(x) as exp(g + n*log(2)) */
+  fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half);
+
+  /* how to perform a floorf with SSE: just below */
+  emm0 = _mm_cvttps_epi32(fx);
+  tmp  = _mm_cvtepi32_ps(emm0);
+  /* if greater, substract 1 */
+  Packet4f mask = _mm_cmpgt_ps(tmp, fx);
+  mask = _mm_and_ps(mask, p4f_1);
+  fx = psub(tmp, mask);
+
+  tmp = pmul(fx, p4f_cephes_exp_C1);
+  Packet4f z = pmul(fx, p4f_cephes_exp_C2);
+  x = psub(x, tmp);
+  x = psub(x, z);
+
+  z = pmul(x,x);
+
+  Packet4f y = p4f_cephes_exp_p0;
+  y = pmadd(y, x, p4f_cephes_exp_p1);
+  y = pmadd(y, x, p4f_cephes_exp_p2);
+  y = pmadd(y, x, p4f_cephes_exp_p3);
+  y = pmadd(y, x, p4f_cephes_exp_p4);
+  y = pmadd(y, x, p4f_cephes_exp_p5);
+  y = pmadd(y, z, x);
+  y = padd(y, p4f_1);
+
+  // build 2^n
+  emm0 = _mm_cvttps_epi32(fx);
+  emm0 = _mm_add_epi32(emm0, p4i_0x7f);
+  emm0 = _mm_slli_epi32(emm0, 23);
+  return pmul(y, _mm_castsi128_ps(emm0));
+}
+
+/* evaluation of 4 sines at onces, using SSE2 intrinsics.
+
+   The code is the exact rewriting of the cephes sinf function.
+   Precision is excellent as long as x < 8192 (I did not bother to
+   take into account the special handling they have for greater values
+   -- it does not return garbage for arguments over 8192, though, but
+   the extra precision is missing).
+
+   Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the
+   surprising but correct result.
+*/
+
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f psin<Packet4f>(const Packet4f& _x)
+{
+  Packet4f x = _x;
+  _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
+  _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
+
+  _EIGEN_DECLARE_CONST_Packet4i(1, 1);
+  _EIGEN_DECLARE_CONST_Packet4i(not1, ~1);
+  _EIGEN_DECLARE_CONST_Packet4i(2, 2);
+  _EIGEN_DECLARE_CONST_Packet4i(4, 4);
+
+  _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(sign_mask, 0x80000000);
+
+  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1,-0.78515625f);
+  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f);
+  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f);
+  _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891E-4f);
+  _EIGEN_DECLARE_CONST_Packet4f(sincof_p1,  8.3321608736E-3f);
+  _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611E-1f);
+  _EIGEN_DECLARE_CONST_Packet4f(coscof_p0,  2.443315711809948E-005f);
+  _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765E-003f);
+  _EIGEN_DECLARE_CONST_Packet4f(coscof_p2,  4.166664568298827E-002f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
+
+  Packet4f xmm1, xmm2 = _mm_setzero_ps(), xmm3, sign_bit, y;
+
+  Packet4i emm0, emm2;
+  sign_bit = x;
+  /* take the absolute value */
+  x = pabs(x);
+
+  /* take the modulo */
+
+  /* extract the sign bit (upper one) */
+  sign_bit = _mm_and_ps(sign_bit, p4f_sign_mask);
+
+  /* scale by 4/Pi */
+  y = pmul(x, p4f_cephes_FOPI);
+
+  /* store the integer part of y in mm0 */
+  emm2 = _mm_cvttps_epi32(y);
+  /* j=(j+1) & (~1) (see the cephes sources) */
+  emm2 = _mm_add_epi32(emm2, p4i_1);
+  emm2 = _mm_and_si128(emm2, p4i_not1);
+  y = _mm_cvtepi32_ps(emm2);
+  /* get the swap sign flag */
+  emm0 = _mm_and_si128(emm2, p4i_4);
+  emm0 = _mm_slli_epi32(emm0, 29);
+  /* get the polynom selection mask
+     there is one polynom for 0 <= x <= Pi/4
+     and another one for Pi/4<x<=Pi/2
+
+     Both branches will be computed.
+  */
+  emm2 = _mm_and_si128(emm2, p4i_2);
+  emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
+
+  Packet4f swap_sign_bit = _mm_castsi128_ps(emm0);
+  Packet4f poly_mask = _mm_castsi128_ps(emm2);
+  sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
+
+  /* The magic pass: "Extended precision modular arithmetic"
+     x = ((x - y * DP1) - y * DP2) - y * DP3; */
+  xmm1 = pmul(y, p4f_minus_cephes_DP1);
+  xmm2 = pmul(y, p4f_minus_cephes_DP2);
+  xmm3 = pmul(y, p4f_minus_cephes_DP3);
+  x = padd(x, xmm1);
+  x = padd(x, xmm2);
+  x = padd(x, xmm3);
+
+  /* Evaluate the first polynom  (0 <= x <= Pi/4) */
+  y = p4f_coscof_p0;
+  Packet4f z = _mm_mul_ps(x,x);
+
+  y = pmadd(y, z, p4f_coscof_p1);
+  y = pmadd(y, z, p4f_coscof_p2);
+  y = pmul(y, z);
+  y = pmul(y, z);
+  Packet4f tmp = pmul(z, p4f_half);
+  y = psub(y, tmp);
+  y = padd(y, p4f_1);
+
+  /* Evaluate the second polynom  (Pi/4 <= x <= 0) */
+
+  Packet4f y2 = p4f_sincof_p0;
+  y2 = pmadd(y2, z, p4f_sincof_p1);
+  y2 = pmadd(y2, z, p4f_sincof_p2);
+  y2 = pmul(y2, z);
+  y2 = pmul(y2, x);
+  y2 = padd(y2, x);
+
+  /* select the correct result from the two polynoms */
+  y2 = _mm_and_ps(poly_mask, y2);
+  y = _mm_andnot_ps(poly_mask, y);
+  y = _mm_or_ps(y,y2);
+  /* update the sign */
+  return _mm_xor_ps(y, sign_bit);
+}
+
+/* almost the same as psin */
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f pcos<Packet4f>(const Packet4f& _x)
+{
+  Packet4f x = _x;
+  _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
+  _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
+
+  _EIGEN_DECLARE_CONST_Packet4i(1, 1);
+  _EIGEN_DECLARE_CONST_Packet4i(not1, ~1);
+  _EIGEN_DECLARE_CONST_Packet4i(2, 2);
+  _EIGEN_DECLARE_CONST_Packet4i(4, 4);
+
+  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1,-0.78515625f);
+  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f);
+  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f);
+  _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891E-4f);
+  _EIGEN_DECLARE_CONST_Packet4f(sincof_p1,  8.3321608736E-3f);
+  _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611E-1f);
+  _EIGEN_DECLARE_CONST_Packet4f(coscof_p0,  2.443315711809948E-005f);
+  _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765E-003f);
+  _EIGEN_DECLARE_CONST_Packet4f(coscof_p2,  4.166664568298827E-002f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
+
+  Packet4f xmm1, xmm2 = _mm_setzero_ps(), xmm3, y;
+  Packet4i emm0, emm2;
+
+  x = pabs(x);
+
+  /* scale by 4/Pi */
+  y = pmul(x, p4f_cephes_FOPI);
+
+  /* get the integer part of y */
+  emm2 = _mm_cvttps_epi32(y);
+  /* j=(j+1) & (~1) (see the cephes sources) */
+  emm2 = _mm_add_epi32(emm2, p4i_1);
+  emm2 = _mm_and_si128(emm2, p4i_not1);
+  y = _mm_cvtepi32_ps(emm2);
+
+  emm2 = _mm_sub_epi32(emm2, p4i_2);
+
+  /* get the swap sign flag */
+  emm0 = _mm_andnot_si128(emm2, p4i_4);
+  emm0 = _mm_slli_epi32(emm0, 29);
+  /* get the polynom selection mask */
+  emm2 = _mm_and_si128(emm2, p4i_2);
+  emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
+
+  Packet4f sign_bit = _mm_castsi128_ps(emm0);
+  Packet4f poly_mask = _mm_castsi128_ps(emm2);
+
+  /* The magic pass: "Extended precision modular arithmetic"
+     x = ((x - y * DP1) - y * DP2) - y * DP3; */
+  xmm1 = pmul(y, p4f_minus_cephes_DP1);
+  xmm2 = pmul(y, p4f_minus_cephes_DP2);
+  xmm3 = pmul(y, p4f_minus_cephes_DP3);
+  x = padd(x, xmm1);
+  x = padd(x, xmm2);
+  x = padd(x, xmm3);
+
+  /* Evaluate the first polynom  (0 <= x <= Pi/4) */
+  y = p4f_coscof_p0;
+  Packet4f z = pmul(x,x);
+
+  y = pmadd(y,z,p4f_coscof_p1);
+  y = pmadd(y,z,p4f_coscof_p2);
+  y = pmul(y, z);
+  y = pmul(y, z);
+  Packet4f tmp = _mm_mul_ps(z, p4f_half);
+  y = psub(y, tmp);
+  y = padd(y, p4f_1);
+
+  /* Evaluate the second polynom  (Pi/4 <= x <= 0) */
+  Packet4f y2 = p4f_sincof_p0;
+  y2 = pmadd(y2, z, p4f_sincof_p1);
+  y2 = pmadd(y2, z, p4f_sincof_p2);
+  y2 = pmul(y2, z);
+  y2 = pmadd(y2, x, x);
+
+  /* select the correct result from the two polynoms */
+  y2 = _mm_and_ps(poly_mask, y2);
+  y  = _mm_andnot_ps(poly_mask, y);
+  y  = _mm_or_ps(y,y2);
+
+  /* update the sign */
+  return _mm_xor_ps(y, sign_bit);
+}
+
+// This is based on Quake3's fast inverse square root.
+// For detail see here: http://www.beyond3d.com/content/articles/8/
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f psqrt<Packet4f>(const Packet4f& _x)
+{
+  Packet4f half = pmul(_x, pset1<Packet4f>(.5f));
+
+  /* select only the inverse sqrt of non-zero inputs */
+  Packet4f non_zero_mask = _mm_cmpgt_ps(_x, pset1<Packet4f>(std::numeric_limits<float>::epsilon()));
+  Packet4f x = _mm_and_ps(non_zero_mask, _mm_rsqrt_ps(_x));
+
+  x = pmul(x, psub(pset1<Packet4f>(1.5f), pmul(half, pmul(x,x))));
+  return pmul(_x,x);
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATH_FUNCTIONS_SSE_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/arch/SSE/PacketMath.h b/resources/3rdParty/eigen/Eigen/src/Core/arch/SSE/PacketMath.h
new file mode 100644
index 000000000..10d918219
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/arch/SSE/PacketMath.h
@@ -0,0 +1,632 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PACKET_MATH_SSE_H
+#define EIGEN_PACKET_MATH_SSE_H
+
+namespace Eigen {
+
+namespace internal {
+
+#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
+#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
+#endif
+
+#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
+#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
+#endif
+
+typedef __m128  Packet4f;
+typedef __m128i Packet4i;
+typedef __m128d Packet2d;
+
+template<> struct is_arithmetic<__m128>  { enum { value = true }; };
+template<> struct is_arithmetic<__m128i> { enum { value = true }; };
+template<> struct is_arithmetic<__m128d> { enum { value = true }; };
+
+#define vec4f_swizzle1(v,p,q,r,s) \
+  (_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), ((s)<<6|(r)<<4|(q)<<2|(p)))))
+
+#define vec4i_swizzle1(v,p,q,r,s) \
+  (_mm_shuffle_epi32( v, ((s)<<6|(r)<<4|(q)<<2|(p))))
+
+#define vec2d_swizzle1(v,p,q) \
+  (_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), ((q*2+1)<<6|(q*2)<<4|(p*2+1)<<2|(p*2)))))
+  
+#define vec4f_swizzle2(a,b,p,q,r,s) \
+  (_mm_shuffle_ps( (a), (b), ((s)<<6|(r)<<4|(q)<<2|(p))))
+
+#define vec4i_swizzle2(a,b,p,q,r,s) \
+  (_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), ((s)<<6|(r)<<4|(q)<<2|(p))))))
+
+#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
+  const Packet4f p4f_##NAME = pset1<Packet4f>(X)
+
+#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
+  const Packet4f p4f_##NAME = _mm_castsi128_ps(pset1<Packet4i>(X))
+
+#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
+  const Packet4i p4i_##NAME = pset1<Packet4i>(X)
+
+
+template<> struct packet_traits<float>  : default_packet_traits
+{
+  typedef Packet4f type;
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size=4,
+
+    HasDiv    = 1,
+    HasSin  = EIGEN_FAST_MATH,
+    HasCos  = EIGEN_FAST_MATH,
+    HasLog  = 1,
+    HasExp  = 1,
+    HasSqrt = 1
+  };
+};
+template<> struct packet_traits<double> : default_packet_traits
+{
+  typedef Packet2d type;
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size=2,
+
+    HasDiv    = 1
+  };
+};
+template<> struct packet_traits<int>    : default_packet_traits
+{
+  typedef Packet4i type;
+  enum {
+    // FIXME check the Has*
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size=4
+  };
+};
+
+template<> struct unpacket_traits<Packet4f> { typedef float  type; enum {size=4}; };
+template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2}; };
+template<> struct unpacket_traits<Packet4i> { typedef int    type; enum {size=4}; };
+
+#if defined(_MSC_VER) && (_MSC_VER==1500)
+// Workaround MSVC 9 internal compiler error.
+// TODO: It has been detected with win64 builds (amd64), so let's check whether it also happens in 32bits+SSE mode
+// TODO: let's check whether there does not exist a better fix, like adding a pset0() function. (it crashed on pset1(0)).
+template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&  from) { return _mm_set_ps(from,from,from,from); }
+template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set_pd(from,from); }
+template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int&    from) { return _mm_set_epi32(from,from,from,from); }
+#else
+template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&  from) { return _mm_set1_ps(from); }
+template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set1_pd(from); }
+template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int&    from) { return _mm_set1_epi32(from); }
+#endif
+
+template<> EIGEN_STRONG_INLINE Packet4f plset<float>(const float& a) { return _mm_add_ps(pset1<Packet4f>(a), _mm_set_ps(3,2,1,0)); }
+template<> EIGEN_STRONG_INLINE Packet2d plset<double>(const double& a) { return _mm_add_pd(pset1<Packet2d>(a),_mm_set_pd(1,0)); }
+template<> EIGEN_STRONG_INLINE Packet4i plset<int>(const int& a) { return _mm_add_epi32(pset1<Packet4i>(a),_mm_set_epi32(3,2,1,0)); }
+
+template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_add_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a)
+{
+  const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000));
+  return _mm_xor_ps(a,mask);
+}
+template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a)
+{
+  const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x80000000));
+  return _mm_xor_pd(a,mask);
+}
+template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a)
+{
+  return psub(_mm_setr_epi32(0,0,0,0), a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b)
+{
+#ifdef EIGEN_VECTORIZE_SSE4_1
+  return _mm_mullo_epi32(a,b);
+#else
+  // this version is slightly faster than 4 scalar products
+  return vec4i_swizzle1(
+            vec4i_swizzle2(
+              _mm_mul_epu32(a,b),
+              _mm_mul_epu32(vec4i_swizzle1(a,1,0,3,2),
+                            vec4i_swizzle1(b,1,0,3,2)),
+              0,2,0,2),
+            0,2,1,3);
+#endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_div_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_div_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/)
+{ eigen_assert(false && "packet integer division are not supported by SSE");
+  return pset1<Packet4i>(0);
+}
+
+// for some weird raisons, it has to be overloaded for packet of integers
+template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_min_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_min_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b)
+{
+  // after some bench, this version *is* faster than a scalar implementation
+  Packet4i mask = _mm_cmplt_epi32(a,b);
+  return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_max_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_max_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b)
+{
+  // after some bench, this version *is* faster than a scalar implementation
+  Packet4i mask = _mm_cmpgt_epi32(a,b);
+  return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float*   from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); }
+template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double*  from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); }
+template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int*     from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast<const Packet4i*>(from)); }
+
+#if defined(_MSC_VER)
+  template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float*  from) {
+    EIGEN_DEBUG_UNALIGNED_LOAD
+    #if (_MSC_VER==1600)
+    // NOTE Some version of MSVC10 generates bad code when using _mm_loadu_ps
+    // (i.e., it does not generate an unaligned load!!
+    // TODO On most architectures this version should also be faster than a single _mm_loadu_ps
+    // so we could also enable it for MSVC08 but first we have to make this later does not generate crap when doing so...
+    __m128 res = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)(from));
+    res = _mm_loadh_pi(res, (const __m64*)(from+2));
+    return res;
+    #else
+    return _mm_loadu_ps(from);
+    #endif
+  }
+  template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_pd(from); }
+  template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int*    from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(reinterpret_cast<const Packet4i*>(from)); }
+#else
+// Fast unaligned loads. Note that here we cannot directly use intrinsics: this would
+// require pointer casting to incompatible pointer types and leads to invalid code
+// because of the strict aliasing rule. The "dummy" stuff are required to enforce
+// a correct instruction dependency.
+// TODO: do the same for MSVC (ICC is compatible)
+// NOTE: with the code below, MSVC's compiler crashes!
+
+#if defined(__GNUC__) && defined(__i386__)
+  // bug 195: gcc/i386 emits weird x87 fldl/fstpl instructions for _mm_load_sd
+  #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1
+#elif defined(__clang__)
+  // bug 201: Segfaults in __mm_loadh_pd with clang 2.8
+  #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1
+#else
+  #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 0
+#endif
+
+template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
+{
+  EIGEN_DEBUG_UNALIGNED_LOAD
+#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
+  return _mm_loadu_ps(from);
+#else
+  __m128d res;
+  res =  _mm_load_sd((const double*)(from)) ;
+  res =  _mm_loadh_pd(res, (const double*)(from+2)) ;
+  return _mm_castpd_ps(res);
+#endif
+}
+template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from)
+{
+  EIGEN_DEBUG_UNALIGNED_LOAD
+#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
+  return _mm_loadu_pd(from);
+#else
+  __m128d res;
+  res = _mm_load_sd(from) ;
+  res = _mm_loadh_pd(res,from+1);
+  return res;
+#endif
+}
+template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
+{
+  EIGEN_DEBUG_UNALIGNED_LOAD
+#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
+  return _mm_loadu_si128(reinterpret_cast<const Packet4i*>(from));
+#else
+  __m128d res;
+  res =  _mm_load_sd((const double*)(from)) ;
+  res =  _mm_loadh_pd(res, (const double*)(from+2)) ;
+  return _mm_castpd_si128(res);
+#endif
+}
+#endif
+
+template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float*   from)
+{
+  return vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd(reinterpret_cast<const double*>(from))), 0, 0, 1, 1);
+}
+template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double*  from)
+{ return pset1<Packet2d>(from[0]); }
+template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int*     from)
+{
+  Packet4i tmp;
+  tmp = _mm_loadl_epi64(reinterpret_cast<const Packet4i*>(from));
+  return vec4i_swizzle1(tmp, 0, 0, 1, 1);
+}
+
+template<> EIGEN_STRONG_INLINE void pstore<float>(float*   to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); }
+template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); }
+template<> EIGEN_STRONG_INLINE void pstore<int>(int*       to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<Packet4i*>(to), from); }
+
+template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) {
+  EIGEN_DEBUG_UNALIGNED_STORE
+  _mm_storel_pd((to), from);
+  _mm_storeh_pd((to+1), from);
+}
+template<> EIGEN_STRONG_INLINE void pstoreu<float>(float*  to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), _mm_castps_pd(from)); }
+template<> EIGEN_STRONG_INLINE void pstoreu<int>(int*      to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), _mm_castsi128_pd(from)); }
+
+// some compilers might be tempted to perform multiple moves instead of using a vector path.
+template<> EIGEN_STRONG_INLINE void pstore1<Packet4f>(float* to, const float& a)
+{
+  Packet4f pa = _mm_set_ss(a);
+  pstore(to, vec4f_swizzle1(pa,0,0,0,0));
+}
+// some compilers might be tempted to perform multiple moves instead of using a vector path.
+template<> EIGEN_STRONG_INLINE void pstore1<Packet2d>(double* to, const double& a)
+{
+  Packet2d pa = _mm_set_sd(a);
+  pstore(to, vec2d_swizzle1(pa,0,0));
+}
+
+template<> EIGEN_STRONG_INLINE void prefetch<float>(const float*   addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<int>(const int*       addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
+
+#if defined(_MSC_VER) && defined(_WIN64) && !defined(__INTEL_COMPILER)
+// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
+// Direct of the struct members fixed bug #62.
+template<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { return a.m128_f32[0]; }
+template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return a.m128d_f64[0]; }
+template<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
+#elif defined(_MSC_VER) && !defined(__INTEL_COMPILER)
+// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
+template<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { float x = _mm_cvtss_f32(a); return x; }
+template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { double x = _mm_cvtsd_f64(a); return x; }
+template<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
+#else
+template<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { return _mm_cvtss_f32(a); }
+template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return _mm_cvtsd_f64(a); }
+template<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { return _mm_cvtsi128_si32(a); }
+#endif
+
+template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
+{ return _mm_shuffle_ps(a,a,0x1B); }
+template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)
+{ return _mm_shuffle_pd(a,a,0x1); }
+template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)
+{ return _mm_shuffle_epi32(a,0x1B); }
+
+
+template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a)
+{
+  const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
+  return _mm_and_ps(a,mask);
+}
+template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a)
+{
+  const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
+  return _mm_and_pd(a,mask);
+}
+template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a)
+{
+  #ifdef EIGEN_VECTORIZE_SSSE3
+  return _mm_abs_epi32(a);
+  #else
+  Packet4i aux = _mm_srai_epi32(a,31);
+  return _mm_sub_epi32(_mm_xor_si128(a,aux),aux);
+  #endif
+}
+
+EIGEN_STRONG_INLINE void punpackp(Packet4f* vecs)
+{
+  vecs[1] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x55));
+  vecs[2] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xAA));
+  vecs[3] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xFF));
+  vecs[0] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x00));
+}
+
+#ifdef EIGEN_VECTORIZE_SSE3
+// TODO implement SSE2 versions as well as integer versions
+template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
+{
+  return _mm_hadd_ps(_mm_hadd_ps(vecs[0], vecs[1]),_mm_hadd_ps(vecs[2], vecs[3]));
+}
+template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
+{
+  return _mm_hadd_pd(vecs[0], vecs[1]);
+}
+// SSSE3 version:
+// EIGEN_STRONG_INLINE Packet4i preduxp(const Packet4i* vecs)
+// {
+//   return _mm_hadd_epi32(_mm_hadd_epi32(vecs[0], vecs[1]),_mm_hadd_epi32(vecs[2], vecs[3]));
+// }
+
+template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
+{
+  Packet4f tmp0 = _mm_hadd_ps(a,a);
+  return pfirst(_mm_hadd_ps(tmp0, tmp0));
+}
+
+template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) { return pfirst(_mm_hadd_pd(a, a)); }
+
+// SSSE3 version:
+// EIGEN_STRONG_INLINE float predux(const Packet4i& a)
+// {
+//   Packet4i tmp0 = _mm_hadd_epi32(a,a);
+//   return pfirst(_mm_hadd_epi32(tmp0, tmp0));
+// }
+#else
+// SSE2 versions
+template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
+{
+  Packet4f tmp = _mm_add_ps(a, _mm_movehl_ps(a,a));
+  return pfirst(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
+}
+template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
+{
+  return pfirst(_mm_add_sd(a, _mm_unpackhi_pd(a,a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
+{
+  Packet4f tmp0, tmp1, tmp2;
+  tmp0 = _mm_unpacklo_ps(vecs[0], vecs[1]);
+  tmp1 = _mm_unpackhi_ps(vecs[0], vecs[1]);
+  tmp2 = _mm_unpackhi_ps(vecs[2], vecs[3]);
+  tmp0 = _mm_add_ps(tmp0, tmp1);
+  tmp1 = _mm_unpacklo_ps(vecs[2], vecs[3]);
+  tmp1 = _mm_add_ps(tmp1, tmp2);
+  tmp2 = _mm_movehl_ps(tmp1, tmp0);
+  tmp0 = _mm_movelh_ps(tmp0, tmp1);
+  return _mm_add_ps(tmp0, tmp2);
+}
+
+template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
+{
+  return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1]));
+}
+#endif  // SSE3
+
+template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
+{
+  Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a));
+  return pfirst(tmp) + pfirst(_mm_shuffle_epi32(tmp, 1));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
+{
+  Packet4i tmp0, tmp1, tmp2;
+  tmp0 = _mm_unpacklo_epi32(vecs[0], vecs[1]);
+  tmp1 = _mm_unpackhi_epi32(vecs[0], vecs[1]);
+  tmp2 = _mm_unpackhi_epi32(vecs[2], vecs[3]);
+  tmp0 = _mm_add_epi32(tmp0, tmp1);
+  tmp1 = _mm_unpacklo_epi32(vecs[2], vecs[3]);
+  tmp1 = _mm_add_epi32(tmp1, tmp2);
+  tmp2 = _mm_unpacklo_epi64(tmp0, tmp1);
+  tmp0 = _mm_unpackhi_epi64(tmp0, tmp1);
+  return _mm_add_epi32(tmp0, tmp2);
+}
+
+// Other reduction functions:
+
+// mul
+template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
+{
+  Packet4f tmp = _mm_mul_ps(a, _mm_movehl_ps(a,a));
+  return pfirst(_mm_mul_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
+}
+template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)
+{
+  return pfirst(_mm_mul_sd(a, _mm_unpackhi_pd(a,a)));
+}
+template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
+{
+  // after some experiments, it is seems this is the fastest way to implement it
+  // for GCC (eg., reusing pmul is very slow !)
+  // TODO try to call _mm_mul_epu32 directly
+  EIGEN_ALIGN16 int aux[4];
+  pstore(aux, a);
+  return  (aux[0] * aux[1]) * (aux[2] * aux[3]);;
+}
+
+// min
+template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
+{
+  Packet4f tmp = _mm_min_ps(a, _mm_movehl_ps(a,a));
+  return pfirst(_mm_min_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
+}
+template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a)
+{
+  return pfirst(_mm_min_sd(a, _mm_unpackhi_pd(a,a)));
+}
+template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
+{
+  // after some experiments, it is seems this is the fastest way to implement it
+  // for GCC (eg., it does not like using std::min after the pstore !!)
+  EIGEN_ALIGN16 int aux[4];
+  pstore(aux, a);
+  register int aux0 = aux[0]<aux[1] ? aux[0] : aux[1];
+  register int aux2 = aux[2]<aux[3] ? aux[2] : aux[3];
+  return aux0<aux2 ? aux0 : aux2;
+}
+
+// max
+template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
+{
+  Packet4f tmp = _mm_max_ps(a, _mm_movehl_ps(a,a));
+  return pfirst(_mm_max_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
+}
+template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a)
+{
+  return pfirst(_mm_max_sd(a, _mm_unpackhi_pd(a,a)));
+}
+template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
+{
+  // after some experiments, it is seems this is the fastest way to implement it
+  // for GCC (eg., it does not like using std::min after the pstore !!)
+  EIGEN_ALIGN16 int aux[4];
+  pstore(aux, a);
+  register int aux0 = aux[0]>aux[1] ? aux[0] : aux[1];
+  register int aux2 = aux[2]>aux[3] ? aux[2] : aux[3];
+  return aux0>aux2 ? aux0 : aux2;
+}
+
+#if (defined __GNUC__)
+// template <> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f&  a, const Packet4f&  b, const Packet4f&  c)
+// {
+//   Packet4f res = b;
+//   asm("mulps %[a], %[b] \n\taddps %[c], %[b]" : [b] "+x" (res) : [a] "x" (a), [c] "x" (c));
+//   return res;
+// }
+// EIGEN_STRONG_INLINE Packet4i _mm_alignr_epi8(const Packet4i&  a, const Packet4i&  b, const int i)
+// {
+//   Packet4i res = a;
+//   asm("palignr %[i], %[a], %[b] " : [b] "+x" (res) : [a] "x" (a), [i] "i" (i));
+//   return res;
+// }
+#endif
+
+#ifdef EIGEN_VECTORIZE_SSSE3
+// SSSE3 versions
+template<int Offset>
+struct palign_impl<Offset,Packet4f>
+{
+  static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
+  {
+    if (Offset!=0)
+      first = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(second), _mm_castps_si128(first), Offset*4));
+  }
+};
+
+template<int Offset>
+struct palign_impl<Offset,Packet4i>
+{
+  static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
+  {
+    if (Offset!=0)
+      first = _mm_alignr_epi8(second,first, Offset*4);
+  }
+};
+
+template<int Offset>
+struct palign_impl<Offset,Packet2d>
+{
+  static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
+  {
+    if (Offset==1)
+      first = _mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(second), _mm_castpd_si128(first), 8));
+  }
+};
+#else
+// SSE2 versions
+template<int Offset>
+struct palign_impl<Offset,Packet4f>
+{
+  static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
+  {
+    if (Offset==1)
+    {
+      first = _mm_move_ss(first,second);
+      first = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(first),0x39));
+    }
+    else if (Offset==2)
+    {
+      first = _mm_movehl_ps(first,first);
+      first = _mm_movelh_ps(first,second);
+    }
+    else if (Offset==3)
+    {
+      first = _mm_move_ss(first,second);
+      first = _mm_shuffle_ps(first,second,0x93);
+    }
+  }
+};
+
+template<int Offset>
+struct palign_impl<Offset,Packet4i>
+{
+  static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
+  {
+    if (Offset==1)
+    {
+      first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
+      first = _mm_shuffle_epi32(first,0x39);
+    }
+    else if (Offset==2)
+    {
+      first = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(first)));
+      first = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
+    }
+    else if (Offset==3)
+    {
+      first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
+      first = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second),0x93));
+    }
+  }
+};
+
+template<int Offset>
+struct palign_impl<Offset,Packet2d>
+{
+  static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
+  {
+    if (Offset==1)
+    {
+      first = _mm_castps_pd(_mm_movehl_ps(_mm_castpd_ps(first),_mm_castpd_ps(first)));
+      first = _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(first),_mm_castpd_ps(second)));
+    }
+  }
+};
+#endif
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_PACKET_MATH_SSE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/products/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/Core/products/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/products/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/Core/products/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/products/CoeffBasedProduct.h b/resources/3rdParty/eigen/Eigen/src/Core/products/CoeffBasedProduct.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/products/CoeffBasedProduct.h
rename to resources/3rdParty/eigen/Eigen/src/Core/products/CoeffBasedProduct.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/resources/3rdParty/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h
new file mode 100644
index 000000000..5eb03c98c
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h
@@ -0,0 +1,1319 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_GENERAL_BLOCK_PANEL_H
+#define EIGEN_GENERAL_BLOCK_PANEL_H
+
+namespace Eigen { 
+  
+namespace internal {
+
+template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs=false, bool _ConjRhs=false>
+class gebp_traits;
+
+
+/** \internal \returns b if a<=0, and returns a otherwise. */
+inline std::ptrdiff_t manage_caching_sizes_helper(std::ptrdiff_t a, std::ptrdiff_t b)
+{
+  return a<=0 ? b : a;
+}
+
+/** \internal */
+inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1=0, std::ptrdiff_t* l2=0)
+{
+  static std::ptrdiff_t m_l1CacheSize = 0;
+  static std::ptrdiff_t m_l2CacheSize = 0;
+  if(m_l2CacheSize==0)
+  {
+    m_l1CacheSize = manage_caching_sizes_helper(queryL1CacheSize(),8 * 1024);
+    m_l2CacheSize = manage_caching_sizes_helper(queryTopLevelCacheSize(),1*1024*1024);
+  }
+  
+  if(action==SetAction)
+  {
+    // set the cpu cache size and cache all block sizes from a global cache size in byte
+    eigen_internal_assert(l1!=0 && l2!=0);
+    m_l1CacheSize = *l1;
+    m_l2CacheSize = *l2;
+  }
+  else if(action==GetAction)
+  {
+    eigen_internal_assert(l1!=0 && l2!=0);
+    *l1 = m_l1CacheSize;
+    *l2 = m_l2CacheSize;
+  }
+  else
+  {
+    eigen_internal_assert(false);
+  }
+}
+
+/** \brief Computes the blocking parameters for a m x k times k x n matrix product
+  *
+  * \param[in,out] k Input: the third dimension of the product. Output: the blocking size along the same dimension.
+  * \param[in,out] m Input: the number of rows of the left hand side. Output: the blocking size along the same dimension.
+  * \param[in,out] n Input: the number of columns of the right hand side. Output: the blocking size along the same dimension.
+  *
+  * Given a m x k times k x n matrix product of scalar types \c LhsScalar and \c RhsScalar,
+  * this function computes the blocking size parameters along the respective dimensions
+  * for matrix products and related algorithms. The blocking sizes depends on various
+  * parameters:
+  * - the L1 and L2 cache sizes,
+  * - the register level blocking sizes defined by gebp_traits,
+  * - the number of scalars that fit into a packet (when vectorization is enabled).
+  *
+  * \sa setCpuCacheSizes */
+template<typename LhsScalar, typename RhsScalar, int KcFactor>
+void computeProductBlockingSizes(std::ptrdiff_t& k, std::ptrdiff_t& m, std::ptrdiff_t& n)
+{
+  EIGEN_UNUSED_VARIABLE(n);
+  // Explanations:
+  // Let's recall the product algorithms form kc x nc horizontal panels B' on the rhs and
+  // mc x kc blocks A' on the lhs. A' has to fit into L2 cache. Moreover, B' is processed
+  // per kc x nr vertical small panels where nr is the blocking size along the n dimension
+  // at the register level. For vectorization purpose, these small vertical panels are unpacked,
+  // e.g., each coefficient is replicated to fit a packet. This small vertical panel has to
+  // stay in L1 cache.
+  std::ptrdiff_t l1, l2;
+
+  typedef gebp_traits<LhsScalar,RhsScalar> Traits;
+  enum {
+    kdiv = KcFactor * 2 * Traits::nr
+         * Traits::RhsProgress * sizeof(RhsScalar),
+    mr = gebp_traits<LhsScalar,RhsScalar>::mr,
+    mr_mask = (0xffffffff/mr)*mr
+  };
+
+  manage_caching_sizes(GetAction, &l1, &l2);
+  k = std::min<std::ptrdiff_t>(k, l1/kdiv);
+  std::ptrdiff_t _m = k>0 ? l2/(4 * sizeof(LhsScalar) * k) : 0;
+  if(_m<m) m = _m & mr_mask;
+}
+
+template<typename LhsScalar, typename RhsScalar>
+inline void computeProductBlockingSizes(std::ptrdiff_t& k, std::ptrdiff_t& m, std::ptrdiff_t& n)
+{
+  computeProductBlockingSizes<LhsScalar,RhsScalar,1>(k, m, n);
+}
+
+#ifdef EIGEN_HAS_FUSE_CJMADD
+  #define MADD(CJ,A,B,C,T)  C = CJ.pmadd(A,B,C);
+#else
+
+  // FIXME (a bit overkill maybe ?)
+
+  template<typename CJ, typename A, typename B, typename C, typename T> struct gebp_madd_selector {
+    EIGEN_ALWAYS_INLINE static void run(const CJ& cj, A& a, B& b, C& c, T& /*t*/)
+    {
+      c = cj.pmadd(a,b,c);
+    }
+  };
+
+  template<typename CJ, typename T> struct gebp_madd_selector<CJ,T,T,T,T> {
+    EIGEN_ALWAYS_INLINE static void run(const CJ& cj, T& a, T& b, T& c, T& t)
+    {
+      t = b; t = cj.pmul(a,t); c = padd(c,t);
+    }
+  };
+
+  template<typename CJ, typename A, typename B, typename C, typename T>
+  EIGEN_STRONG_INLINE void gebp_madd(const CJ& cj, A& a, B& b, C& c, T& t)
+  {
+    gebp_madd_selector<CJ,A,B,C,T>::run(cj,a,b,c,t);
+  }
+
+  #define MADD(CJ,A,B,C,T)  gebp_madd(CJ,A,B,C,T);
+//   #define MADD(CJ,A,B,C,T)  T = B; T = CJ.pmul(A,T); C = padd(C,T);
+#endif
+
+/* Vectorization logic
+ *  real*real: unpack rhs to constant packets, ...
+ * 
+ *  cd*cd : unpack rhs to (b_r,b_r), (b_i,b_i), mul to get (a_r b_r,a_i b_r) (a_r b_i,a_i b_i),
+ *          storing each res packet into two packets (2x2),
+ *          at the end combine them: swap the second and addsub them 
+ *  cf*cf : same but with 2x4 blocks
+ *  cplx*real : unpack rhs to constant packets, ...
+ *  real*cplx : load lhs as (a0,a0,a1,a1), and mul as usual
+ */
+template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs, bool _ConjRhs>
+class gebp_traits
+{
+public:
+  typedef _LhsScalar LhsScalar;
+  typedef _RhsScalar RhsScalar;
+  typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+
+  enum {
+    ConjLhs = _ConjLhs,
+    ConjRhs = _ConjRhs,
+    Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
+    LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
+    RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
+    ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
+    
+    NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
+
+    // register block size along the N direction (must be either 2 or 4)
+    nr = NumberOfRegisters/4,
+
+    // register block size along the M direction (currently, this one cannot be modified)
+    mr = 2 * LhsPacketSize,
+    
+    WorkSpaceFactor = nr * RhsPacketSize,
+
+    LhsProgress = LhsPacketSize,
+    RhsProgress = RhsPacketSize
+  };
+
+  typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
+  typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
+  typedef typename packet_traits<ResScalar>::type  _ResPacket;
+
+  typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
+  typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
+  typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
+
+  typedef ResPacket AccPacket;
+  
+  EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
+  {
+    p = pset1<ResPacket>(ResScalar(0));
+  }
+
+  EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const RhsScalar* rhs, RhsScalar* b)
+  {
+    for(DenseIndex k=0; k<n; k++)
+      pstore1<RhsPacket>(&b[k*RhsPacketSize], rhs[k]);
+  }
+
+  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
+  {
+    dest = pload<RhsPacket>(b);
+  }
+
+  EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
+  {
+    dest = pload<LhsPacket>(a);
+  }
+
+  EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, AccPacket& tmp) const
+  {
+    tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp);
+  }
+
+  EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
+  {
+    r = pmadd(c,alpha,r);
+  }
+
+protected:
+//   conj_helper<LhsScalar,RhsScalar,ConjLhs,ConjRhs> cj;
+//   conj_helper<LhsPacket,RhsPacket,ConjLhs,ConjRhs> pcj;
+};
+
+template<typename RealScalar, bool _ConjLhs>
+class gebp_traits<std::complex<RealScalar>, RealScalar, _ConjLhs, false>
+{
+public:
+  typedef std::complex<RealScalar> LhsScalar;
+  typedef RealScalar RhsScalar;
+  typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+
+  enum {
+    ConjLhs = _ConjLhs,
+    ConjRhs = false,
+    Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
+    LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
+    RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
+    ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
+    
+    NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
+    nr = NumberOfRegisters/4,
+    mr = 2 * LhsPacketSize,
+    WorkSpaceFactor = nr*RhsPacketSize,
+
+    LhsProgress = LhsPacketSize,
+    RhsProgress = RhsPacketSize
+  };
+
+  typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
+  typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
+  typedef typename packet_traits<ResScalar>::type  _ResPacket;
+
+  typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
+  typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
+  typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
+
+  typedef ResPacket AccPacket;
+
+  EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
+  {
+    p = pset1<ResPacket>(ResScalar(0));
+  }
+
+  EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const RhsScalar* rhs, RhsScalar* b)
+  {
+    for(DenseIndex k=0; k<n; k++)
+      pstore1<RhsPacket>(&b[k*RhsPacketSize], rhs[k]);
+  }
+
+  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
+  {
+    dest = pload<RhsPacket>(b);
+  }
+
+  EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
+  {
+    dest = pload<LhsPacket>(a);
+  }
+
+  EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const
+  {
+    madd_impl(a, b, c, tmp, typename conditional<Vectorizable,true_type,false_type>::type());
+  }
+
+  EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const
+  {
+    tmp = b; tmp = pmul(a.v,tmp); c.v = padd(c.v,tmp);
+  }
+
+  EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const
+  {
+    c += a * b;
+  }
+
+  EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
+  {
+    r = cj.pmadd(c,alpha,r);
+  }
+
+protected:
+  conj_helper<ResPacket,ResPacket,ConjLhs,false> cj;
+};
+
+template<typename RealScalar, bool _ConjLhs, bool _ConjRhs>
+class gebp_traits<std::complex<RealScalar>, std::complex<RealScalar>, _ConjLhs, _ConjRhs >
+{
+public:
+  typedef std::complex<RealScalar>  Scalar;
+  typedef std::complex<RealScalar>  LhsScalar;
+  typedef std::complex<RealScalar>  RhsScalar;
+  typedef std::complex<RealScalar>  ResScalar;
+  
+  enum {
+    ConjLhs = _ConjLhs,
+    ConjRhs = _ConjRhs,
+    Vectorizable = packet_traits<RealScalar>::Vectorizable
+                && packet_traits<Scalar>::Vectorizable,
+    RealPacketSize  = Vectorizable ? packet_traits<RealScalar>::size : 1,
+    ResPacketSize   = Vectorizable ? packet_traits<ResScalar>::size : 1,
+    
+    nr = 2,
+    mr = 2 * ResPacketSize,
+    WorkSpaceFactor = Vectorizable ? 2*nr*RealPacketSize : nr,
+
+    LhsProgress = ResPacketSize,
+    RhsProgress = Vectorizable ? 2*ResPacketSize : 1
+  };
+  
+  typedef typename packet_traits<RealScalar>::type RealPacket;
+  typedef typename packet_traits<Scalar>::type     ScalarPacket;
+  struct DoublePacket
+  {
+    RealPacket first;
+    RealPacket second;
+  };
+
+  typedef typename conditional<Vectorizable,RealPacket,  Scalar>::type LhsPacket;
+  typedef typename conditional<Vectorizable,DoublePacket,Scalar>::type RhsPacket;
+  typedef typename conditional<Vectorizable,ScalarPacket,Scalar>::type ResPacket;
+  typedef typename conditional<Vectorizable,DoublePacket,Scalar>::type AccPacket;
+  
+  EIGEN_STRONG_INLINE void initAcc(Scalar& p) { p = Scalar(0); }
+
+  EIGEN_STRONG_INLINE void initAcc(DoublePacket& p)
+  {
+    p.first   = pset1<RealPacket>(RealScalar(0));
+    p.second  = pset1<RealPacket>(RealScalar(0));
+  }
+
+  /* Unpack the rhs coeff such that each complex coefficient is spread into
+   * two packects containing respectively the real and imaginary coefficient
+   * duplicated as many time as needed: (x+iy) => [x, ..., x] [y, ..., y]
+   */
+  EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const Scalar* rhs, Scalar* b)
+  {
+    for(DenseIndex k=0; k<n; k++)
+    {
+      if(Vectorizable)
+      {
+        pstore1<RealPacket>((RealScalar*)&b[k*ResPacketSize*2+0],             real(rhs[k]));
+        pstore1<RealPacket>((RealScalar*)&b[k*ResPacketSize*2+ResPacketSize], imag(rhs[k]));
+      }
+      else
+        b[k] = rhs[k];
+    }
+  }
+
+  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, ResPacket& dest) const { dest = *b; }
+
+  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, DoublePacket& dest) const
+  {
+    dest.first  = pload<RealPacket>((const RealScalar*)b);
+    dest.second = pload<RealPacket>((const RealScalar*)(b+ResPacketSize));
+  }
+
+  // nothing special here
+  EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
+  {
+    dest = pload<LhsPacket>((const typename unpacket_traits<LhsPacket>::type*)(a));
+  }
+
+  EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, DoublePacket& c, RhsPacket& /*tmp*/) const
+  {
+    c.first   = padd(pmul(a,b.first), c.first);
+    c.second  = padd(pmul(a,b.second),c.second);
+  }
+
+  EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, ResPacket& c, RhsPacket& /*tmp*/) const
+  {
+    c = cj.pmadd(a,b,c);
+  }
+  
+  EIGEN_STRONG_INLINE void acc(const Scalar& c, const Scalar& alpha, Scalar& r) const { r += alpha * c; }
+  
+  EIGEN_STRONG_INLINE void acc(const DoublePacket& c, const ResPacket& alpha, ResPacket& r) const
+  {
+    // assemble c
+    ResPacket tmp;
+    if((!ConjLhs)&&(!ConjRhs))
+    {
+      tmp = pcplxflip(pconj(ResPacket(c.second)));
+      tmp = padd(ResPacket(c.first),tmp);
+    }
+    else if((!ConjLhs)&&(ConjRhs))
+    {
+      tmp = pconj(pcplxflip(ResPacket(c.second)));
+      tmp = padd(ResPacket(c.first),tmp);
+    }
+    else if((ConjLhs)&&(!ConjRhs))
+    {
+      tmp = pcplxflip(ResPacket(c.second));
+      tmp = padd(pconj(ResPacket(c.first)),tmp);
+    }
+    else if((ConjLhs)&&(ConjRhs))
+    {
+      tmp = pcplxflip(ResPacket(c.second));
+      tmp = psub(pconj(ResPacket(c.first)),tmp);
+    }
+    
+    r = pmadd(tmp,alpha,r);
+  }
+
+protected:
+  conj_helper<LhsScalar,RhsScalar,ConjLhs,ConjRhs> cj;
+};
+
+template<typename RealScalar, bool _ConjRhs>
+class gebp_traits<RealScalar, std::complex<RealScalar>, false, _ConjRhs >
+{
+public:
+  typedef std::complex<RealScalar>  Scalar;
+  typedef RealScalar  LhsScalar;
+  typedef Scalar      RhsScalar;
+  typedef Scalar      ResScalar;
+
+  enum {
+    ConjLhs = false,
+    ConjRhs = _ConjRhs,
+    Vectorizable = packet_traits<RealScalar>::Vectorizable
+                && packet_traits<Scalar>::Vectorizable,
+    LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
+    RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
+    ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
+    
+    NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
+    nr = 4,
+    mr = 2*ResPacketSize,
+    WorkSpaceFactor = nr*RhsPacketSize,
+
+    LhsProgress = ResPacketSize,
+    RhsProgress = ResPacketSize
+  };
+
+  typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
+  typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
+  typedef typename packet_traits<ResScalar>::type  _ResPacket;
+
+  typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
+  typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
+  typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
+
+  typedef ResPacket AccPacket;
+
+  EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
+  {
+    p = pset1<ResPacket>(ResScalar(0));
+  }
+
+  EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const RhsScalar* rhs, RhsScalar* b)
+  {
+    for(DenseIndex k=0; k<n; k++)
+      pstore1<RhsPacket>(&b[k*RhsPacketSize], rhs[k]);
+  }
+
+  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
+  {
+    dest = pload<RhsPacket>(b);
+  }
+
+  EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
+  {
+    dest = ploaddup<LhsPacket>(a);
+  }
+
+  EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const
+  {
+    madd_impl(a, b, c, tmp, typename conditional<Vectorizable,true_type,false_type>::type());
+  }
+
+  EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const
+  {
+    tmp = b; tmp.v = pmul(a,tmp.v); c = padd(c,tmp);
+  }
+
+  EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const
+  {
+    c += a * b;
+  }
+
+  EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
+  {
+    r = cj.pmadd(alpha,c,r);
+  }
+
+protected:
+  conj_helper<ResPacket,ResPacket,false,ConjRhs> cj;
+};
+
+/* optimized GEneral packed Block * packed Panel product kernel
+ *
+ * Mixing type logic: C += A * B
+ *  |  A  |  B  | comments
+ *  |real |cplx | no vectorization yet, would require to pack A with duplication
+ *  |cplx |real | easy vectorization
+ */
+template<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+struct gebp_kernel
+{
+  typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> Traits;
+  typedef typename Traits::ResScalar ResScalar;
+  typedef typename Traits::LhsPacket LhsPacket;
+  typedef typename Traits::RhsPacket RhsPacket;
+  typedef typename Traits::ResPacket ResPacket;
+  typedef typename Traits::AccPacket AccPacket;
+
+  enum {
+    Vectorizable  = Traits::Vectorizable,
+    LhsProgress   = Traits::LhsProgress,
+    RhsProgress   = Traits::RhsProgress,
+    ResPacketSize = Traits::ResPacketSize
+  };
+
+  EIGEN_DONT_INLINE EIGEN_FLATTEN_ATTRIB
+  void operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index rows, Index depth, Index cols, ResScalar alpha,
+                  Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0, RhsScalar* unpackedB = 0)
+  {
+    Traits traits;
+    
+    if(strideA==-1) strideA = depth;
+    if(strideB==-1) strideB = depth;
+    conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
+//     conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
+    Index packet_cols = (cols/nr) * nr;
+    const Index peeled_mc = (rows/mr)*mr;
+    // FIXME:
+    const Index peeled_mc2 = peeled_mc + (rows-peeled_mc >= LhsProgress ? LhsProgress : 0);
+    const Index peeled_kc = (depth/4)*4;
+
+    if(unpackedB==0)
+      unpackedB = const_cast<RhsScalar*>(blockB - strideB * nr * RhsProgress);
+
+    // loops on each micro vertical panel of rhs (depth x nr)
+    for(Index j2=0; j2<packet_cols; j2+=nr)
+    {
+      traits.unpackRhs(depth*nr,&blockB[j2*strideB+offsetB*nr],unpackedB); 
+
+      // loops on each largest micro horizontal panel of lhs (mr x depth)
+      // => we select a mr x nr micro block of res which is entirely
+      //    stored into mr/packet_size x nr registers.
+      for(Index i=0; i<peeled_mc; i+=mr)
+      {
+        const LhsScalar* blA = &blockA[i*strideA+offsetA*mr];
+        prefetch(&blA[0]);
+
+        // gets res block as register
+        AccPacket C0, C1, C2, C3, C4, C5, C6, C7;
+                  traits.initAcc(C0);
+                  traits.initAcc(C1);
+        if(nr==4) traits.initAcc(C2);
+        if(nr==4) traits.initAcc(C3);
+                  traits.initAcc(C4);
+                  traits.initAcc(C5);
+        if(nr==4) traits.initAcc(C6);
+        if(nr==4) traits.initAcc(C7);
+
+        ResScalar* r0 = &res[(j2+0)*resStride + i];
+        ResScalar* r1 = r0 + resStride;
+        ResScalar* r2 = r1 + resStride;
+        ResScalar* r3 = r2 + resStride;
+
+        prefetch(r0+16);
+        prefetch(r1+16);
+        prefetch(r2+16);
+        prefetch(r3+16);
+
+        // performs "inner" product
+        // TODO let's check wether the folowing peeled loop could not be
+        //      optimized via optimal prefetching from one loop to the other
+        const RhsScalar* blB = unpackedB;
+        for(Index k=0; k<peeled_kc; k+=4)
+        {
+          if(nr==2)
+          {
+            LhsPacket A0, A1;
+            RhsPacket B_0;
+            RhsPacket T0;
+            
+EIGEN_ASM_COMMENT("mybegin2");
+            traits.loadLhs(&blA[0*LhsProgress], A0);
+            traits.loadLhs(&blA[1*LhsProgress], A1);
+            traits.loadRhs(&blB[0*RhsProgress], B_0);
+            traits.madd(A0,B_0,C0,T0);
+            traits.madd(A1,B_0,C4,B_0);
+            traits.loadRhs(&blB[1*RhsProgress], B_0);
+            traits.madd(A0,B_0,C1,T0);
+            traits.madd(A1,B_0,C5,B_0);
+
+            traits.loadLhs(&blA[2*LhsProgress], A0);
+            traits.loadLhs(&blA[3*LhsProgress], A1);
+            traits.loadRhs(&blB[2*RhsProgress], B_0);
+            traits.madd(A0,B_0,C0,T0);
+            traits.madd(A1,B_0,C4,B_0);
+            traits.loadRhs(&blB[3*RhsProgress], B_0);
+            traits.madd(A0,B_0,C1,T0);
+            traits.madd(A1,B_0,C5,B_0);
+
+            traits.loadLhs(&blA[4*LhsProgress], A0);
+            traits.loadLhs(&blA[5*LhsProgress], A1);
+            traits.loadRhs(&blB[4*RhsProgress], B_0);
+            traits.madd(A0,B_0,C0,T0);
+            traits.madd(A1,B_0,C4,B_0);
+            traits.loadRhs(&blB[5*RhsProgress], B_0);
+            traits.madd(A0,B_0,C1,T0);
+            traits.madd(A1,B_0,C5,B_0);
+
+            traits.loadLhs(&blA[6*LhsProgress], A0);
+            traits.loadLhs(&blA[7*LhsProgress], A1);
+            traits.loadRhs(&blB[6*RhsProgress], B_0);
+            traits.madd(A0,B_0,C0,T0);
+            traits.madd(A1,B_0,C4,B_0);
+            traits.loadRhs(&blB[7*RhsProgress], B_0);
+            traits.madd(A0,B_0,C1,T0);
+            traits.madd(A1,B_0,C5,B_0);
+EIGEN_ASM_COMMENT("myend");
+          }
+          else
+          {
+EIGEN_ASM_COMMENT("mybegin4");
+            LhsPacket A0, A1;
+            RhsPacket B_0, B1, B2, B3;
+            RhsPacket T0;
+            
+            traits.loadLhs(&blA[0*LhsProgress], A0);
+            traits.loadLhs(&blA[1*LhsProgress], A1);
+            traits.loadRhs(&blB[0*RhsProgress], B_0);
+            traits.loadRhs(&blB[1*RhsProgress], B1);
+
+            traits.madd(A0,B_0,C0,T0);
+            traits.loadRhs(&blB[2*RhsProgress], B2);
+            traits.madd(A1,B_0,C4,B_0);
+            traits.loadRhs(&blB[3*RhsProgress], B3);
+            traits.loadRhs(&blB[4*RhsProgress], B_0);
+            traits.madd(A0,B1,C1,T0);
+            traits.madd(A1,B1,C5,B1);
+            traits.loadRhs(&blB[5*RhsProgress], B1);
+            traits.madd(A0,B2,C2,T0);
+            traits.madd(A1,B2,C6,B2);
+            traits.loadRhs(&blB[6*RhsProgress], B2);
+            traits.madd(A0,B3,C3,T0);
+            traits.loadLhs(&blA[2*LhsProgress], A0);
+            traits.madd(A1,B3,C7,B3);
+            traits.loadLhs(&blA[3*LhsProgress], A1);
+            traits.loadRhs(&blB[7*RhsProgress], B3);
+            traits.madd(A0,B_0,C0,T0);
+            traits.madd(A1,B_0,C4,B_0);
+            traits.loadRhs(&blB[8*RhsProgress], B_0);
+            traits.madd(A0,B1,C1,T0);
+            traits.madd(A1,B1,C5,B1);
+            traits.loadRhs(&blB[9*RhsProgress], B1);
+            traits.madd(A0,B2,C2,T0);
+            traits.madd(A1,B2,C6,B2);
+            traits.loadRhs(&blB[10*RhsProgress], B2);
+            traits.madd(A0,B3,C3,T0);
+            traits.loadLhs(&blA[4*LhsProgress], A0);
+            traits.madd(A1,B3,C7,B3);
+            traits.loadLhs(&blA[5*LhsProgress], A1);
+            traits.loadRhs(&blB[11*RhsProgress], B3);
+
+            traits.madd(A0,B_0,C0,T0);
+            traits.madd(A1,B_0,C4,B_0);
+            traits.loadRhs(&blB[12*RhsProgress], B_0);
+            traits.madd(A0,B1,C1,T0);
+            traits.madd(A1,B1,C5,B1);
+            traits.loadRhs(&blB[13*RhsProgress], B1);
+            traits.madd(A0,B2,C2,T0);
+            traits.madd(A1,B2,C6,B2);
+            traits.loadRhs(&blB[14*RhsProgress], B2);
+            traits.madd(A0,B3,C3,T0);
+            traits.loadLhs(&blA[6*LhsProgress], A0);
+            traits.madd(A1,B3,C7,B3);
+            traits.loadLhs(&blA[7*LhsProgress], A1);
+            traits.loadRhs(&blB[15*RhsProgress], B3);
+            traits.madd(A0,B_0,C0,T0);
+            traits.madd(A1,B_0,C4,B_0);
+            traits.madd(A0,B1,C1,T0);
+            traits.madd(A1,B1,C5,B1);
+            traits.madd(A0,B2,C2,T0);
+            traits.madd(A1,B2,C6,B2);
+            traits.madd(A0,B3,C3,T0);
+            traits.madd(A1,B3,C7,B3);
+          }
+
+          blB += 4*nr*RhsProgress;
+          blA += 4*mr;
+        }
+        // process remaining peeled loop
+        for(Index k=peeled_kc; k<depth; k++)
+        {
+          if(nr==2)
+          {
+            LhsPacket A0, A1;
+            RhsPacket B_0;
+            RhsPacket T0;
+
+            traits.loadLhs(&blA[0*LhsProgress], A0);
+            traits.loadLhs(&blA[1*LhsProgress], A1);
+            traits.loadRhs(&blB[0*RhsProgress], B_0);
+            traits.madd(A0,B_0,C0,T0);
+            traits.madd(A1,B_0,C4,B_0);
+            traits.loadRhs(&blB[1*RhsProgress], B_0);
+            traits.madd(A0,B_0,C1,T0);
+            traits.madd(A1,B_0,C5,B_0);
+          }
+          else
+          {
+            LhsPacket A0, A1;
+            RhsPacket B_0, B1, B2, B3;
+            RhsPacket T0;
+
+            traits.loadLhs(&blA[0*LhsProgress], A0);
+            traits.loadLhs(&blA[1*LhsProgress], A1);
+            traits.loadRhs(&blB[0*RhsProgress], B_0);
+            traits.loadRhs(&blB[1*RhsProgress], B1);
+
+            traits.madd(A0,B_0,C0,T0);
+            traits.loadRhs(&blB[2*RhsProgress], B2);
+            traits.madd(A1,B_0,C4,B_0);
+            traits.loadRhs(&blB[3*RhsProgress], B3);
+            traits.madd(A0,B1,C1,T0);
+            traits.madd(A1,B1,C5,B1);
+            traits.madd(A0,B2,C2,T0);
+            traits.madd(A1,B2,C6,B2);
+            traits.madd(A0,B3,C3,T0);
+            traits.madd(A1,B3,C7,B3);
+          }
+
+          blB += nr*RhsProgress;
+          blA += mr;
+        }
+
+        if(nr==4)
+        {
+          ResPacket R0, R1, R2, R3, R4, R5, R6;
+          ResPacket alphav = pset1<ResPacket>(alpha);
+
+          R0 = ploadu<ResPacket>(r0);
+          R1 = ploadu<ResPacket>(r1);
+          R2 = ploadu<ResPacket>(r2);
+          R3 = ploadu<ResPacket>(r3);
+          R4 = ploadu<ResPacket>(r0 + ResPacketSize);
+          R5 = ploadu<ResPacket>(r1 + ResPacketSize);
+          R6 = ploadu<ResPacket>(r2 + ResPacketSize);
+          traits.acc(C0, alphav, R0);
+          pstoreu(r0, R0);
+          R0 = ploadu<ResPacket>(r3 + ResPacketSize);
+
+          traits.acc(C1, alphav, R1);
+          traits.acc(C2, alphav, R2);
+          traits.acc(C3, alphav, R3);
+          traits.acc(C4, alphav, R4);
+          traits.acc(C5, alphav, R5);
+          traits.acc(C6, alphav, R6);
+          traits.acc(C7, alphav, R0);
+          
+          pstoreu(r1, R1);
+          pstoreu(r2, R2);
+          pstoreu(r3, R3);
+          pstoreu(r0 + ResPacketSize, R4);
+          pstoreu(r1 + ResPacketSize, R5);
+          pstoreu(r2 + ResPacketSize, R6);
+          pstoreu(r3 + ResPacketSize, R0);
+        }
+        else
+        {
+          ResPacket R0, R1, R4;
+          ResPacket alphav = pset1<ResPacket>(alpha);
+
+          R0 = ploadu<ResPacket>(r0);
+          R1 = ploadu<ResPacket>(r1);
+          R4 = ploadu<ResPacket>(r0 + ResPacketSize);
+          traits.acc(C0, alphav, R0);
+          pstoreu(r0, R0);
+          R0 = ploadu<ResPacket>(r1 + ResPacketSize);
+          traits.acc(C1, alphav, R1);
+          traits.acc(C4, alphav, R4);
+          traits.acc(C5, alphav, R0);
+          pstoreu(r1, R1);
+          pstoreu(r0 + ResPacketSize, R4);
+          pstoreu(r1 + ResPacketSize, R0);
+        }
+        
+      }
+      
+      if(rows-peeled_mc>=LhsProgress)
+      {
+        Index i = peeled_mc;
+        const LhsScalar* blA = &blockA[i*strideA+offsetA*LhsProgress];
+        prefetch(&blA[0]);
+
+        // gets res block as register
+        AccPacket C0, C1, C2, C3;
+                  traits.initAcc(C0);
+                  traits.initAcc(C1);
+        if(nr==4) traits.initAcc(C2);
+        if(nr==4) traits.initAcc(C3);
+
+        // performs "inner" product
+        const RhsScalar* blB = unpackedB;
+        for(Index k=0; k<peeled_kc; k+=4)
+        {
+          if(nr==2)
+          {
+            LhsPacket A0;
+            RhsPacket B_0, B1;
+
+            traits.loadLhs(&blA[0*LhsProgress], A0);
+            traits.loadRhs(&blB[0*RhsProgress], B_0);
+            traits.loadRhs(&blB[1*RhsProgress], B1);
+            traits.madd(A0,B_0,C0,B_0);
+            traits.loadRhs(&blB[2*RhsProgress], B_0);
+            traits.madd(A0,B1,C1,B1);
+            traits.loadLhs(&blA[1*LhsProgress], A0);
+            traits.loadRhs(&blB[3*RhsProgress], B1);
+            traits.madd(A0,B_0,C0,B_0);
+            traits.loadRhs(&blB[4*RhsProgress], B_0);
+            traits.madd(A0,B1,C1,B1);
+            traits.loadLhs(&blA[2*LhsProgress], A0);
+            traits.loadRhs(&blB[5*RhsProgress], B1);
+            traits.madd(A0,B_0,C0,B_0);
+            traits.loadRhs(&blB[6*RhsProgress], B_0);
+            traits.madd(A0,B1,C1,B1);
+            traits.loadLhs(&blA[3*LhsProgress], A0);
+            traits.loadRhs(&blB[7*RhsProgress], B1);
+            traits.madd(A0,B_0,C0,B_0);
+            traits.madd(A0,B1,C1,B1);
+          }
+          else
+          {
+            LhsPacket A0;
+            RhsPacket B_0, B1, B2, B3;
+
+            traits.loadLhs(&blA[0*LhsProgress], A0);
+            traits.loadRhs(&blB[0*RhsProgress], B_0);
+            traits.loadRhs(&blB[1*RhsProgress], B1);
+
+            traits.madd(A0,B_0,C0,B_0);
+            traits.loadRhs(&blB[2*RhsProgress], B2);
+            traits.loadRhs(&blB[3*RhsProgress], B3);
+            traits.loadRhs(&blB[4*RhsProgress], B_0);
+            traits.madd(A0,B1,C1,B1);
+            traits.loadRhs(&blB[5*RhsProgress], B1);
+            traits.madd(A0,B2,C2,B2);
+            traits.loadRhs(&blB[6*RhsProgress], B2);
+            traits.madd(A0,B3,C3,B3);
+            traits.loadLhs(&blA[1*LhsProgress], A0);
+            traits.loadRhs(&blB[7*RhsProgress], B3);
+            traits.madd(A0,B_0,C0,B_0);
+            traits.loadRhs(&blB[8*RhsProgress], B_0);
+            traits.madd(A0,B1,C1,B1);
+            traits.loadRhs(&blB[9*RhsProgress], B1);
+            traits.madd(A0,B2,C2,B2);
+            traits.loadRhs(&blB[10*RhsProgress], B2);
+            traits.madd(A0,B3,C3,B3);
+            traits.loadLhs(&blA[2*LhsProgress], A0);
+            traits.loadRhs(&blB[11*RhsProgress], B3);
+
+            traits.madd(A0,B_0,C0,B_0);
+            traits.loadRhs(&blB[12*RhsProgress], B_0);
+            traits.madd(A0,B1,C1,B1);
+            traits.loadRhs(&blB[13*RhsProgress], B1);
+            traits.madd(A0,B2,C2,B2);
+            traits.loadRhs(&blB[14*RhsProgress], B2);
+            traits.madd(A0,B3,C3,B3);
+
+            traits.loadLhs(&blA[3*LhsProgress], A0);
+            traits.loadRhs(&blB[15*RhsProgress], B3);
+            traits.madd(A0,B_0,C0,B_0);
+            traits.madd(A0,B1,C1,B1);
+            traits.madd(A0,B2,C2,B2);
+            traits.madd(A0,B3,C3,B3);
+          }
+
+          blB += nr*4*RhsProgress;
+          blA += 4*LhsProgress;
+        }
+        // process remaining peeled loop
+        for(Index k=peeled_kc; k<depth; k++)
+        {
+          if(nr==2)
+          {
+            LhsPacket A0;
+            RhsPacket B_0, B1;
+
+            traits.loadLhs(&blA[0*LhsProgress], A0);
+            traits.loadRhs(&blB[0*RhsProgress], B_0);
+            traits.loadRhs(&blB[1*RhsProgress], B1);
+            traits.madd(A0,B_0,C0,B_0);
+            traits.madd(A0,B1,C1,B1);
+          }
+          else
+          {
+            LhsPacket A0;
+            RhsPacket B_0, B1, B2, B3;
+
+            traits.loadLhs(&blA[0*LhsProgress], A0);
+            traits.loadRhs(&blB[0*RhsProgress], B_0);
+            traits.loadRhs(&blB[1*RhsProgress], B1);
+            traits.loadRhs(&blB[2*RhsProgress], B2);
+            traits.loadRhs(&blB[3*RhsProgress], B3);
+
+            traits.madd(A0,B_0,C0,B_0);
+            traits.madd(A0,B1,C1,B1);
+            traits.madd(A0,B2,C2,B2);
+            traits.madd(A0,B3,C3,B3);
+          }
+
+          blB += nr*RhsProgress;
+          blA += LhsProgress;
+        }
+
+        ResPacket R0, R1, R2, R3;
+        ResPacket alphav = pset1<ResPacket>(alpha);
+
+        ResScalar* r0 = &res[(j2+0)*resStride + i];
+        ResScalar* r1 = r0 + resStride;
+        ResScalar* r2 = r1 + resStride;
+        ResScalar* r3 = r2 + resStride;
+
+                  R0 = ploadu<ResPacket>(r0);
+                  R1 = ploadu<ResPacket>(r1);
+        if(nr==4) R2 = ploadu<ResPacket>(r2);
+        if(nr==4) R3 = ploadu<ResPacket>(r3);
+
+                  traits.acc(C0, alphav, R0);
+                  traits.acc(C1, alphav, R1);
+        if(nr==4) traits.acc(C2, alphav, R2);
+        if(nr==4) traits.acc(C3, alphav, R3);
+
+                  pstoreu(r0, R0);
+                  pstoreu(r1, R1);
+        if(nr==4) pstoreu(r2, R2);
+        if(nr==4) pstoreu(r3, R3);
+      }
+      for(Index i=peeled_mc2; i<rows; i++)
+      {
+        const LhsScalar* blA = &blockA[i*strideA+offsetA];
+        prefetch(&blA[0]);
+
+        // gets a 1 x nr res block as registers
+        ResScalar C0(0), C1(0), C2(0), C3(0);
+        // TODO directly use blockB ???
+        const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
+        for(Index k=0; k<depth; k++)
+        {
+          if(nr==2)
+          {
+            LhsScalar A0;
+            RhsScalar B_0, B1;
+
+            A0 = blA[k];
+            B_0 = blB[0];
+            B1 = blB[1];
+            MADD(cj,A0,B_0,C0,B_0);
+            MADD(cj,A0,B1,C1,B1);
+          }
+          else
+          {
+            LhsScalar A0;
+            RhsScalar B_0, B1, B2, B3;
+
+            A0 = blA[k];
+            B_0 = blB[0];
+            B1 = blB[1];
+            B2 = blB[2];
+            B3 = blB[3];
+
+            MADD(cj,A0,B_0,C0,B_0);
+            MADD(cj,A0,B1,C1,B1);
+            MADD(cj,A0,B2,C2,B2);
+            MADD(cj,A0,B3,C3,B3);
+          }
+
+          blB += nr;
+        }
+                  res[(j2+0)*resStride + i] += alpha*C0;
+                  res[(j2+1)*resStride + i] += alpha*C1;
+        if(nr==4) res[(j2+2)*resStride + i] += alpha*C2;
+        if(nr==4) res[(j2+3)*resStride + i] += alpha*C3;
+      }
+    }
+    // process remaining rhs/res columns one at a time
+    // => do the same but with nr==1
+    for(Index j2=packet_cols; j2<cols; j2++)
+    {
+      // unpack B
+      traits.unpackRhs(depth, &blockB[j2*strideB+offsetB], unpackedB);
+
+      for(Index i=0; i<peeled_mc; i+=mr)
+      {
+        const LhsScalar* blA = &blockA[i*strideA+offsetA*mr];
+        prefetch(&blA[0]);
+
+        // TODO move the res loads to the stores
+
+        // get res block as registers
+        AccPacket C0, C4;
+        traits.initAcc(C0);
+        traits.initAcc(C4);
+
+        const RhsScalar* blB = unpackedB;
+        for(Index k=0; k<depth; k++)
+        {
+          LhsPacket A0, A1;
+          RhsPacket B_0;
+          RhsPacket T0;
+
+          traits.loadLhs(&blA[0*LhsProgress], A0);
+          traits.loadLhs(&blA[1*LhsProgress], A1);
+          traits.loadRhs(&blB[0*RhsProgress], B_0);
+          traits.madd(A0,B_0,C0,T0);
+          traits.madd(A1,B_0,C4,B_0);
+
+          blB += RhsProgress;
+          blA += 2*LhsProgress;
+        }
+        ResPacket R0, R4;
+        ResPacket alphav = pset1<ResPacket>(alpha);
+
+        ResScalar* r0 = &res[(j2+0)*resStride + i];
+
+        R0 = ploadu<ResPacket>(r0);
+        R4 = ploadu<ResPacket>(r0+ResPacketSize);
+
+        traits.acc(C0, alphav, R0);
+        traits.acc(C4, alphav, R4);
+
+        pstoreu(r0,               R0);
+        pstoreu(r0+ResPacketSize, R4);
+      }
+      if(rows-peeled_mc>=LhsProgress)
+      {
+        Index i = peeled_mc;
+        const LhsScalar* blA = &blockA[i*strideA+offsetA*LhsProgress];
+        prefetch(&blA[0]);
+
+        AccPacket C0;
+        traits.initAcc(C0);
+
+        const RhsScalar* blB = unpackedB;
+        for(Index k=0; k<depth; k++)
+        {
+          LhsPacket A0;
+          RhsPacket B_0;
+          traits.loadLhs(blA, A0);
+          traits.loadRhs(blB, B_0);
+          traits.madd(A0, B_0, C0, B_0);
+          blB += RhsProgress;
+          blA += LhsProgress;
+        }
+
+        ResPacket alphav = pset1<ResPacket>(alpha);
+        ResPacket R0 = ploadu<ResPacket>(&res[(j2+0)*resStride + i]);
+        traits.acc(C0, alphav, R0);
+        pstoreu(&res[(j2+0)*resStride + i], R0);
+      }
+      for(Index i=peeled_mc2; i<rows; i++)
+      {
+        const LhsScalar* blA = &blockA[i*strideA+offsetA];
+        prefetch(&blA[0]);
+
+        // gets a 1 x 1 res block as registers
+        ResScalar C0(0);
+        // FIXME directly use blockB ??
+        const RhsScalar* blB = &blockB[j2*strideB+offsetB];
+        for(Index k=0; k<depth; k++)
+        {
+          LhsScalar A0 = blA[k];
+          RhsScalar B_0 = blB[k];
+          MADD(cj, A0, B_0, C0, B_0);
+        }
+        res[(j2+0)*resStride + i] += alpha*C0;
+      }
+    }
+  }
+};
+
+#undef CJMADD
+
+// pack a block of the lhs
+// The traversal is as follow (mr==4):
+//   0  4  8 12 ...
+//   1  5  9 13 ...
+//   2  6 10 14 ...
+//   3  7 11 15 ...
+//
+//  16 20 24 28 ...
+//  17 21 25 29 ...
+//  18 22 26 30 ...
+//  19 23 27 31 ...
+//
+//  32 33 34 35 ...
+//  36 36 38 39 ...
+template<typename Scalar, typename Index, int Pack1, int Pack2, int StorageOrder, bool Conjugate, bool PanelMode>
+struct gemm_pack_lhs
+{
+  EIGEN_DONT_INLINE void operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows,
+                  Index stride=0, Index offset=0)
+  {
+    typedef typename packet_traits<Scalar>::type Packet;
+    enum { PacketSize = packet_traits<Scalar>::size };
+
+    EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS");
+    eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
+    eigen_assert( (StorageOrder==RowMajor) || ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) );
+    conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
+    const_blas_data_mapper<Scalar, Index, StorageOrder> lhs(_lhs,lhsStride);
+    Index count = 0;
+    Index peeled_mc = (rows/Pack1)*Pack1;
+    for(Index i=0; i<peeled_mc; i+=Pack1)
+    {
+      if(PanelMode) count += Pack1 * offset;
+
+      if(StorageOrder==ColMajor)
+      {
+        for(Index k=0; k<depth; k++)
+        {
+          Packet A, B, C, D;
+          if(Pack1>=1*PacketSize) A = ploadu<Packet>(&lhs(i+0*PacketSize, k));
+          if(Pack1>=2*PacketSize) B = ploadu<Packet>(&lhs(i+1*PacketSize, k));
+          if(Pack1>=3*PacketSize) C = ploadu<Packet>(&lhs(i+2*PacketSize, k));
+          if(Pack1>=4*PacketSize) D = ploadu<Packet>(&lhs(i+3*PacketSize, k));
+          if(Pack1>=1*PacketSize) { pstore(blockA+count, cj.pconj(A)); count+=PacketSize; }
+          if(Pack1>=2*PacketSize) { pstore(blockA+count, cj.pconj(B)); count+=PacketSize; }
+          if(Pack1>=3*PacketSize) { pstore(blockA+count, cj.pconj(C)); count+=PacketSize; }
+          if(Pack1>=4*PacketSize) { pstore(blockA+count, cj.pconj(D)); count+=PacketSize; }
+        }
+      }
+      else
+      {
+        for(Index k=0; k<depth; k++)
+        {
+          // TODO add a vectorized transpose here
+          Index w=0;
+          for(; w<Pack1-3; w+=4)
+          {
+            Scalar a(cj(lhs(i+w+0, k))),
+                   b(cj(lhs(i+w+1, k))),
+                   c(cj(lhs(i+w+2, k))),
+                   d(cj(lhs(i+w+3, k)));
+            blockA[count++] = a;
+            blockA[count++] = b;
+            blockA[count++] = c;
+            blockA[count++] = d;
+          }
+          if(Pack1%4)
+            for(;w<Pack1;++w)
+              blockA[count++] = cj(lhs(i+w, k));
+        }
+      }
+      if(PanelMode) count += Pack1 * (stride-offset-depth);
+    }
+    if(rows-peeled_mc>=Pack2)
+    {
+      if(PanelMode) count += Pack2*offset;
+      for(Index k=0; k<depth; k++)
+        for(Index w=0; w<Pack2; w++)
+          blockA[count++] = cj(lhs(peeled_mc+w, k));
+      if(PanelMode) count += Pack2 * (stride-offset-depth);
+      peeled_mc += Pack2;
+    }
+    for(Index i=peeled_mc; i<rows; i++)
+    {
+      if(PanelMode) count += offset;
+      for(Index k=0; k<depth; k++)
+        blockA[count++] = cj(lhs(i, k));
+      if(PanelMode) count += (stride-offset-depth);
+    }
+  }
+};
+
+// copy a complete panel of the rhs
+// this version is optimized for column major matrices
+// The traversal order is as follow: (nr==4):
+//  0  1  2  3   12 13 14 15   24 27
+//  4  5  6  7   16 17 18 19   25 28
+//  8  9 10 11   20 21 22 23   26 29
+//  .  .  .  .    .  .  .  .    .  .
+template<typename Scalar, typename Index, int nr, bool Conjugate, bool PanelMode>
+struct gemm_pack_rhs<Scalar, Index, nr, ColMajor, Conjugate, PanelMode>
+{
+  typedef typename packet_traits<Scalar>::type Packet;
+  enum { PacketSize = packet_traits<Scalar>::size };
+  EIGEN_DONT_INLINE void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols,
+                  Index stride=0, Index offset=0)
+  {
+    EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS COLMAJOR");
+    eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
+    conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
+    Index packet_cols = (cols/nr) * nr;
+    Index count = 0;
+    for(Index j2=0; j2<packet_cols; j2+=nr)
+    {
+      // skip what we have before
+      if(PanelMode) count += nr * offset;
+      const Scalar* b0 = &rhs[(j2+0)*rhsStride];
+      const Scalar* b1 = &rhs[(j2+1)*rhsStride];
+      const Scalar* b2 = &rhs[(j2+2)*rhsStride];
+      const Scalar* b3 = &rhs[(j2+3)*rhsStride];
+      for(Index k=0; k<depth; k++)
+      {
+                  blockB[count+0] = cj(b0[k]);
+                  blockB[count+1] = cj(b1[k]);
+        if(nr==4) blockB[count+2] = cj(b2[k]);
+        if(nr==4) blockB[count+3] = cj(b3[k]);
+        count += nr;
+      }
+      // skip what we have after
+      if(PanelMode) count += nr * (stride-offset-depth);
+    }
+
+    // copy the remaining columns one at a time (nr==1)
+    for(Index j2=packet_cols; j2<cols; ++j2)
+    {
+      if(PanelMode) count += offset;
+      const Scalar* b0 = &rhs[(j2+0)*rhsStride];
+      for(Index k=0; k<depth; k++)
+      {
+        blockB[count] = cj(b0[k]);
+        count += 1;
+      }
+      if(PanelMode) count += (stride-offset-depth);
+    }
+  }
+};
+
+// this version is optimized for row major matrices
+template<typename Scalar, typename Index, int nr, bool Conjugate, bool PanelMode>
+struct gemm_pack_rhs<Scalar, Index, nr, RowMajor, Conjugate, PanelMode>
+{
+  enum { PacketSize = packet_traits<Scalar>::size };
+  EIGEN_DONT_INLINE void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols,
+                  Index stride=0, Index offset=0)
+  {
+    EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS ROWMAJOR");
+    eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
+    conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
+    Index packet_cols = (cols/nr) * nr;
+    Index count = 0;
+    for(Index j2=0; j2<packet_cols; j2+=nr)
+    {
+      // skip what we have before
+      if(PanelMode) count += nr * offset;
+      for(Index k=0; k<depth; k++)
+      {
+        const Scalar* b0 = &rhs[k*rhsStride + j2];
+                  blockB[count+0] = cj(b0[0]);
+                  blockB[count+1] = cj(b0[1]);
+        if(nr==4) blockB[count+2] = cj(b0[2]);
+        if(nr==4) blockB[count+3] = cj(b0[3]);
+        count += nr;
+      }
+      // skip what we have after
+      if(PanelMode) count += nr * (stride-offset-depth);
+    }
+    // copy the remaining columns one at a time (nr==1)
+    for(Index j2=packet_cols; j2<cols; ++j2)
+    {
+      if(PanelMode) count += offset;
+      const Scalar* b0 = &rhs[j2];
+      for(Index k=0; k<depth; k++)
+      {
+        blockB[count] = cj(b0[k*rhsStride]);
+        count += 1;
+      }
+      if(PanelMode) count += stride-offset-depth;
+    }
+  }
+};
+
+} // end namespace internal
+
+/** \returns the currently set level 1 cpu cache size (in bytes) used to estimate the ideal blocking size parameters.
+  * \sa setCpuCacheSize */
+inline std::ptrdiff_t l1CacheSize()
+{
+  std::ptrdiff_t l1, l2;
+  internal::manage_caching_sizes(GetAction, &l1, &l2);
+  return l1;
+}
+
+/** \returns the currently set level 2 cpu cache size (in bytes) used to estimate the ideal blocking size parameters.
+  * \sa setCpuCacheSize */
+inline std::ptrdiff_t l2CacheSize()
+{
+  std::ptrdiff_t l1, l2;
+  internal::manage_caching_sizes(GetAction, &l1, &l2);
+  return l2;
+}
+
+/** Set the cpu L1 and L2 cache sizes (in bytes).
+  * These values are use to adjust the size of the blocks
+  * for the algorithms working per blocks.
+  *
+  * \sa computeProductBlockingSizes */
+inline void setCpuCacheSizes(std::ptrdiff_t l1, std::ptrdiff_t l2)
+{
+  internal::manage_caching_sizes(SetAction, &l1, &l2);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_GENERAL_BLOCK_PANEL_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h b/resources/3rdParty/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h
rename to resources/3rdParty/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h b/resources/3rdParty/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
rename to resources/3rdParty/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_MKL.h b/resources/3rdParty/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_MKL.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_MKL.h
rename to resources/3rdParty/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_MKL.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/products/GeneralMatrixMatrix_MKL.h b/resources/3rdParty/eigen/Eigen/src/Core/products/GeneralMatrixMatrix_MKL.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/products/GeneralMatrixMatrix_MKL.h
rename to resources/3rdParty/eigen/Eigen/src/Core/products/GeneralMatrixMatrix_MKL.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/products/GeneralMatrixVector.h b/resources/3rdParty/eigen/Eigen/src/Core/products/GeneralMatrixVector.h
new file mode 100644
index 000000000..71eb7661d
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/products/GeneralMatrixVector.h
@@ -0,0 +1,552 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_GENERAL_MATRIX_VECTOR_H
+#define EIGEN_GENERAL_MATRIX_VECTOR_H
+
+namespace Eigen { 
+
+namespace internal {
+
+/* Optimized col-major matrix * vector product:
+ * This algorithm processes 4 columns at onces that allows to both reduce
+ * the number of load/stores of the result by a factor 4 and to reduce
+ * the instruction dependency. Moreover, we know that all bands have the
+ * same alignment pattern.
+ *
+ * Mixing type logic: C += alpha * A * B
+ *  |  A  |  B  |alpha| comments
+ *  |real |cplx |cplx | no vectorization
+ *  |real |cplx |real | alpha is converted to a cplx when calling the run function, no vectorization
+ *  |cplx |real |cplx | invalid, the caller has to do tmp: = A * B; C += alpha*tmp
+ *  |cplx |real |real | optimal case, vectorization possible via real-cplx mul
+ */
+template<typename Index, typename LhsScalar, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs, int Version>
+struct general_matrix_vector_product<Index,LhsScalar,ColMajor,ConjugateLhs,RhsScalar,ConjugateRhs,Version>
+{
+typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+
+enum {
+  Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable
+              && int(packet_traits<LhsScalar>::size)==int(packet_traits<RhsScalar>::size),
+  LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
+  RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
+  ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1
+};
+
+typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
+typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
+typedef typename packet_traits<ResScalar>::type  _ResPacket;
+
+typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
+typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
+typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
+
+EIGEN_DONT_INLINE static void run(
+  Index rows, Index cols,
+  const LhsScalar* lhs, Index lhsStride,
+  const RhsScalar* rhs, Index rhsIncr,
+  ResScalar* res, Index
+  #ifdef EIGEN_INTERNAL_DEBUGGING
+    resIncr
+  #endif
+  , RhsScalar alpha)
+{
+  eigen_internal_assert(resIncr==1);
+  #ifdef _EIGEN_ACCUMULATE_PACKETS
+  #error _EIGEN_ACCUMULATE_PACKETS has already been defined
+  #endif
+  #define _EIGEN_ACCUMULATE_PACKETS(A0,A13,A2) \
+    pstore(&res[j], \
+      padd(pload<ResPacket>(&res[j]), \
+        padd( \
+          padd(pcj.pmul(EIGEN_CAT(ploa , A0)<LhsPacket>(&lhs0[j]),    ptmp0), \
+                  pcj.pmul(EIGEN_CAT(ploa , A13)<LhsPacket>(&lhs1[j]),   ptmp1)), \
+          padd(pcj.pmul(EIGEN_CAT(ploa , A2)<LhsPacket>(&lhs2[j]),    ptmp2), \
+                  pcj.pmul(EIGEN_CAT(ploa , A13)<LhsPacket>(&lhs3[j]),   ptmp3)) )))
+
+  conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
+  conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
+  if(ConjugateRhs)
+    alpha = conj(alpha);
+
+  enum { AllAligned = 0, EvenAligned, FirstAligned, NoneAligned };
+  const Index columnsAtOnce = 4;
+  const Index peels = 2;
+  const Index LhsPacketAlignedMask = LhsPacketSize-1;
+  const Index ResPacketAlignedMask = ResPacketSize-1;
+  const Index size = rows;
+  
+  // How many coeffs of the result do we have to skip to be aligned.
+  // Here we assume data are at least aligned on the base scalar type.
+  Index alignedStart = internal::first_aligned(res,size);
+  Index alignedSize = ResPacketSize>1 ? alignedStart + ((size-alignedStart) & ~ResPacketAlignedMask) : 0;
+  const Index peeledSize = alignedSize - RhsPacketSize*peels - RhsPacketSize + 1;
+
+  const Index alignmentStep = LhsPacketSize>1 ? (LhsPacketSize - lhsStride % LhsPacketSize) & LhsPacketAlignedMask : 0;
+  Index alignmentPattern = alignmentStep==0 ? AllAligned
+                       : alignmentStep==(LhsPacketSize/2) ? EvenAligned
+                       : FirstAligned;
+
+  // we cannot assume the first element is aligned because of sub-matrices
+  const Index lhsAlignmentOffset = internal::first_aligned(lhs,size);
+
+  // find how many columns do we have to skip to be aligned with the result (if possible)
+  Index skipColumns = 0;
+  // if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats)
+  if( (size_t(lhs)%sizeof(LhsScalar)) || (size_t(res)%sizeof(ResScalar)) )
+  {
+    alignedSize = 0;
+    alignedStart = 0;
+  }
+  else if (LhsPacketSize>1)
+  {
+    eigen_internal_assert(size_t(lhs+lhsAlignmentOffset)%sizeof(LhsPacket)==0 || size<LhsPacketSize);
+
+    while (skipColumns<LhsPacketSize &&
+          alignedStart != ((lhsAlignmentOffset + alignmentStep*skipColumns)%LhsPacketSize))
+      ++skipColumns;
+    if (skipColumns==LhsPacketSize)
+    {
+      // nothing can be aligned, no need to skip any column
+      alignmentPattern = NoneAligned;
+      skipColumns = 0;
+    }
+    else
+    {
+      skipColumns = (std::min)(skipColumns,cols);
+      // note that the skiped columns are processed later.
+    }
+
+    eigen_internal_assert(  (alignmentPattern==NoneAligned)
+                      || (skipColumns + columnsAtOnce >= cols)
+                      || LhsPacketSize > size
+                      || (size_t(lhs+alignedStart+lhsStride*skipColumns)%sizeof(LhsPacket))==0);
+  }
+  else if(Vectorizable)
+  {
+    alignedStart = 0;
+    alignedSize = size;
+    alignmentPattern = AllAligned;
+  }
+
+  Index offset1 = (FirstAligned && alignmentStep==1?3:1);
+  Index offset3 = (FirstAligned && alignmentStep==1?1:3);
+
+  Index columnBound = ((cols-skipColumns)/columnsAtOnce)*columnsAtOnce + skipColumns;
+  for (Index i=skipColumns; i<columnBound; i+=columnsAtOnce)
+  {
+    RhsPacket ptmp0 = pset1<RhsPacket>(alpha*rhs[i*rhsIncr]),
+              ptmp1 = pset1<RhsPacket>(alpha*rhs[(i+offset1)*rhsIncr]),
+              ptmp2 = pset1<RhsPacket>(alpha*rhs[(i+2)*rhsIncr]),
+              ptmp3 = pset1<RhsPacket>(alpha*rhs[(i+offset3)*rhsIncr]);
+
+    // this helps a lot generating better binary code
+    const LhsScalar *lhs0 = lhs + i*lhsStride,     *lhs1 = lhs + (i+offset1)*lhsStride,
+                    *lhs2 = lhs + (i+2)*lhsStride, *lhs3 = lhs + (i+offset3)*lhsStride;
+
+    if (Vectorizable)
+    {
+      /* explicit vectorization */
+      // process initial unaligned coeffs
+      for (Index j=0; j<alignedStart; ++j)
+      {
+        res[j] = cj.pmadd(lhs0[j], pfirst(ptmp0), res[j]);
+        res[j] = cj.pmadd(lhs1[j], pfirst(ptmp1), res[j]);
+        res[j] = cj.pmadd(lhs2[j], pfirst(ptmp2), res[j]);
+        res[j] = cj.pmadd(lhs3[j], pfirst(ptmp3), res[j]);
+      }
+
+      if (alignedSize>alignedStart)
+      {
+        switch(alignmentPattern)
+        {
+          case AllAligned:
+            for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize)
+              _EIGEN_ACCUMULATE_PACKETS(d,d,d);
+            break;
+          case EvenAligned:
+            for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize)
+              _EIGEN_ACCUMULATE_PACKETS(d,du,d);
+            break;
+          case FirstAligned:
+          {
+            Index j = alignedStart;
+            if(peels>1)
+            {
+              LhsPacket A00, A01, A02, A03, A10, A11, A12, A13;
+              ResPacket T0, T1;
+
+              A01 = pload<LhsPacket>(&lhs1[alignedStart-1]);
+              A02 = pload<LhsPacket>(&lhs2[alignedStart-2]);
+              A03 = pload<LhsPacket>(&lhs3[alignedStart-3]);
+
+              for (; j<peeledSize; j+=peels*ResPacketSize)
+              {
+                A11 = pload<LhsPacket>(&lhs1[j-1+LhsPacketSize]);  palign<1>(A01,A11);
+                A12 = pload<LhsPacket>(&lhs2[j-2+LhsPacketSize]);  palign<2>(A02,A12);
+                A13 = pload<LhsPacket>(&lhs3[j-3+LhsPacketSize]);  palign<3>(A03,A13);
+
+                A00 = pload<LhsPacket>(&lhs0[j]);
+                A10 = pload<LhsPacket>(&lhs0[j+LhsPacketSize]);
+                T0  = pcj.pmadd(A00, ptmp0, pload<ResPacket>(&res[j]));
+                T1  = pcj.pmadd(A10, ptmp0, pload<ResPacket>(&res[j+ResPacketSize]));
+
+                T0  = pcj.pmadd(A01, ptmp1, T0);
+                A01 = pload<LhsPacket>(&lhs1[j-1+2*LhsPacketSize]);  palign<1>(A11,A01);
+                T0  = pcj.pmadd(A02, ptmp2, T0);
+                A02 = pload<LhsPacket>(&lhs2[j-2+2*LhsPacketSize]);  palign<2>(A12,A02);
+                T0  = pcj.pmadd(A03, ptmp3, T0);
+                pstore(&res[j],T0);
+                A03 = pload<LhsPacket>(&lhs3[j-3+2*LhsPacketSize]);  palign<3>(A13,A03);
+                T1  = pcj.pmadd(A11, ptmp1, T1);
+                T1  = pcj.pmadd(A12, ptmp2, T1);
+                T1  = pcj.pmadd(A13, ptmp3, T1);
+                pstore(&res[j+ResPacketSize],T1);
+              }
+            }
+            for (; j<alignedSize; j+=ResPacketSize)
+              _EIGEN_ACCUMULATE_PACKETS(d,du,du);
+            break;
+          }
+          default:
+            for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize)
+              _EIGEN_ACCUMULATE_PACKETS(du,du,du);
+            break;
+        }
+      }
+    } // end explicit vectorization
+
+    /* process remaining coeffs (or all if there is no explicit vectorization) */
+    for (Index j=alignedSize; j<size; ++j)
+    {
+      res[j] = cj.pmadd(lhs0[j], pfirst(ptmp0), res[j]);
+      res[j] = cj.pmadd(lhs1[j], pfirst(ptmp1), res[j]);
+      res[j] = cj.pmadd(lhs2[j], pfirst(ptmp2), res[j]);
+      res[j] = cj.pmadd(lhs3[j], pfirst(ptmp3), res[j]);
+    }
+  }
+
+  // process remaining first and last columns (at most columnsAtOnce-1)
+  Index end = cols;
+  Index start = columnBound;
+  do
+  {
+    for (Index k=start; k<end; ++k)
+    {
+      RhsPacket ptmp0 = pset1<RhsPacket>(alpha*rhs[k*rhsIncr]);
+      const LhsScalar* lhs0 = lhs + k*lhsStride;
+
+      if (Vectorizable)
+      {
+        /* explicit vectorization */
+        // process first unaligned result's coeffs
+        for (Index j=0; j<alignedStart; ++j)
+          res[j] += cj.pmul(lhs0[j], pfirst(ptmp0));
+        // process aligned result's coeffs
+        if ((size_t(lhs0+alignedStart)%sizeof(LhsPacket))==0)
+          for (Index i = alignedStart;i<alignedSize;i+=ResPacketSize)
+            pstore(&res[i], pcj.pmadd(ploadu<LhsPacket>(&lhs0[i]), ptmp0, pload<ResPacket>(&res[i])));
+        else
+          for (Index i = alignedStart;i<alignedSize;i+=ResPacketSize)
+            pstore(&res[i], pcj.pmadd(ploadu<LhsPacket>(&lhs0[i]), ptmp0, pload<ResPacket>(&res[i])));
+      }
+
+      // process remaining scalars (or all if no explicit vectorization)
+      for (Index i=alignedSize; i<size; ++i)
+        res[i] += cj.pmul(lhs0[i], pfirst(ptmp0));
+    }
+    if (skipColumns)
+    {
+      start = 0;
+      end = skipColumns;
+      skipColumns = 0;
+    }
+    else
+      break;
+  } while(Vectorizable);
+  #undef _EIGEN_ACCUMULATE_PACKETS
+}
+};
+
+/* Optimized row-major matrix * vector product:
+ * This algorithm processes 4 rows at onces that allows to both reduce
+ * the number of load/stores of the result by a factor 4 and to reduce
+ * the instruction dependency. Moreover, we know that all bands have the
+ * same alignment pattern.
+ *
+ * Mixing type logic:
+ *  - alpha is always a complex (or converted to a complex)
+ *  - no vectorization
+ */
+template<typename Index, typename LhsScalar, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs, int Version>
+struct general_matrix_vector_product<Index,LhsScalar,RowMajor,ConjugateLhs,RhsScalar,ConjugateRhs,Version>
+{
+typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+
+enum {
+  Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable
+              && int(packet_traits<LhsScalar>::size)==int(packet_traits<RhsScalar>::size),
+  LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
+  RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
+  ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1
+};
+
+typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
+typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
+typedef typename packet_traits<ResScalar>::type  _ResPacket;
+
+typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
+typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
+typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
+  
+EIGEN_DONT_INLINE static void run(
+  Index rows, Index cols,
+  const LhsScalar* lhs, Index lhsStride,
+  const RhsScalar* rhs, Index rhsIncr,
+  ResScalar* res, Index resIncr,
+  ResScalar alpha)
+{
+  EIGEN_UNUSED_VARIABLE(rhsIncr);
+  eigen_internal_assert(rhsIncr==1);
+  #ifdef _EIGEN_ACCUMULATE_PACKETS
+  #error _EIGEN_ACCUMULATE_PACKETS has already been defined
+  #endif
+
+  #define _EIGEN_ACCUMULATE_PACKETS(A0,A13,A2) {\
+    RhsPacket b = pload<RhsPacket>(&rhs[j]); \
+    ptmp0 = pcj.pmadd(EIGEN_CAT(ploa,A0) <LhsPacket>(&lhs0[j]), b, ptmp0); \
+    ptmp1 = pcj.pmadd(EIGEN_CAT(ploa,A13)<LhsPacket>(&lhs1[j]), b, ptmp1); \
+    ptmp2 = pcj.pmadd(EIGEN_CAT(ploa,A2) <LhsPacket>(&lhs2[j]), b, ptmp2); \
+    ptmp3 = pcj.pmadd(EIGEN_CAT(ploa,A13)<LhsPacket>(&lhs3[j]), b, ptmp3); }
+
+  conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
+  conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
+
+  enum { AllAligned=0, EvenAligned=1, FirstAligned=2, NoneAligned=3 };
+  const Index rowsAtOnce = 4;
+  const Index peels = 2;
+  const Index RhsPacketAlignedMask = RhsPacketSize-1;
+  const Index LhsPacketAlignedMask = LhsPacketSize-1;
+  const Index depth = cols;
+
+  // How many coeffs of the result do we have to skip to be aligned.
+  // Here we assume data are at least aligned on the base scalar type
+  // if that's not the case then vectorization is discarded, see below.
+  Index alignedStart = internal::first_aligned(rhs, depth);
+  Index alignedSize = RhsPacketSize>1 ? alignedStart + ((depth-alignedStart) & ~RhsPacketAlignedMask) : 0;
+  const Index peeledSize = alignedSize - RhsPacketSize*peels - RhsPacketSize + 1;
+
+  const Index alignmentStep = LhsPacketSize>1 ? (LhsPacketSize - lhsStride % LhsPacketSize) & LhsPacketAlignedMask : 0;
+  Index alignmentPattern = alignmentStep==0 ? AllAligned
+                         : alignmentStep==(LhsPacketSize/2) ? EvenAligned
+                         : FirstAligned;
+
+  // we cannot assume the first element is aligned because of sub-matrices
+  const Index lhsAlignmentOffset = internal::first_aligned(lhs,depth);
+
+  // find how many rows do we have to skip to be aligned with rhs (if possible)
+  Index skipRows = 0;
+  // if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats)
+  if( (sizeof(LhsScalar)!=sizeof(RhsScalar)) || (size_t(lhs)%sizeof(LhsScalar)) || (size_t(rhs)%sizeof(RhsScalar)) )
+  {
+    alignedSize = 0;
+    alignedStart = 0;
+  }
+  else if (LhsPacketSize>1)
+  {
+    eigen_internal_assert(size_t(lhs+lhsAlignmentOffset)%sizeof(LhsPacket)==0  || depth<LhsPacketSize);
+
+    while (skipRows<LhsPacketSize &&
+           alignedStart != ((lhsAlignmentOffset + alignmentStep*skipRows)%LhsPacketSize))
+      ++skipRows;
+    if (skipRows==LhsPacketSize)
+    {
+      // nothing can be aligned, no need to skip any column
+      alignmentPattern = NoneAligned;
+      skipRows = 0;
+    }
+    else
+    {
+      skipRows = (std::min)(skipRows,Index(rows));
+      // note that the skiped columns are processed later.
+    }
+    eigen_internal_assert(  alignmentPattern==NoneAligned
+                      || LhsPacketSize==1
+                      || (skipRows + rowsAtOnce >= rows)
+                      || LhsPacketSize > depth
+                      || (size_t(lhs+alignedStart+lhsStride*skipRows)%sizeof(LhsPacket))==0);
+  }
+  else if(Vectorizable)
+  {
+    alignedStart = 0;
+    alignedSize = depth;
+    alignmentPattern = AllAligned;
+  }
+
+  Index offset1 = (FirstAligned && alignmentStep==1?3:1);
+  Index offset3 = (FirstAligned && alignmentStep==1?1:3);
+
+  Index rowBound = ((rows-skipRows)/rowsAtOnce)*rowsAtOnce + skipRows;
+  for (Index i=skipRows; i<rowBound; i+=rowsAtOnce)
+  {
+    EIGEN_ALIGN16 ResScalar tmp0 = ResScalar(0);
+    ResScalar tmp1 = ResScalar(0), tmp2 = ResScalar(0), tmp3 = ResScalar(0);
+
+    // this helps the compiler generating good binary code
+    const LhsScalar *lhs0 = lhs + i*lhsStride,     *lhs1 = lhs + (i+offset1)*lhsStride,
+                    *lhs2 = lhs + (i+2)*lhsStride, *lhs3 = lhs + (i+offset3)*lhsStride;
+
+    if (Vectorizable)
+    {
+      /* explicit vectorization */
+      ResPacket ptmp0 = pset1<ResPacket>(ResScalar(0)), ptmp1 = pset1<ResPacket>(ResScalar(0)),
+                ptmp2 = pset1<ResPacket>(ResScalar(0)), ptmp3 = pset1<ResPacket>(ResScalar(0));
+
+      // process initial unaligned coeffs
+      // FIXME this loop get vectorized by the compiler !
+      for (Index j=0; j<alignedStart; ++j)
+      {
+        RhsScalar b = rhs[j];
+        tmp0 += cj.pmul(lhs0[j],b); tmp1 += cj.pmul(lhs1[j],b);
+        tmp2 += cj.pmul(lhs2[j],b); tmp3 += cj.pmul(lhs3[j],b);
+      }
+
+      if (alignedSize>alignedStart)
+      {
+        switch(alignmentPattern)
+        {
+          case AllAligned:
+            for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize)
+              _EIGEN_ACCUMULATE_PACKETS(d,d,d);
+            break;
+          case EvenAligned:
+            for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize)
+              _EIGEN_ACCUMULATE_PACKETS(d,du,d);
+            break;
+          case FirstAligned:
+          {
+            Index j = alignedStart;
+            if (peels>1)
+            {
+              /* Here we proccess 4 rows with with two peeled iterations to hide
+               * the overhead of unaligned loads. Moreover unaligned loads are handled
+               * using special shift/move operations between the two aligned packets
+               * overlaping the desired unaligned packet. This is *much* more efficient
+               * than basic unaligned loads.
+               */
+              LhsPacket A01, A02, A03, A11, A12, A13;
+              A01 = pload<LhsPacket>(&lhs1[alignedStart-1]);
+              A02 = pload<LhsPacket>(&lhs2[alignedStart-2]);
+              A03 = pload<LhsPacket>(&lhs3[alignedStart-3]);
+
+              for (; j<peeledSize; j+=peels*RhsPacketSize)
+              {
+                RhsPacket b = pload<RhsPacket>(&rhs[j]);
+                A11 = pload<LhsPacket>(&lhs1[j-1+LhsPacketSize]);  palign<1>(A01,A11);
+                A12 = pload<LhsPacket>(&lhs2[j-2+LhsPacketSize]);  palign<2>(A02,A12);
+                A13 = pload<LhsPacket>(&lhs3[j-3+LhsPacketSize]);  palign<3>(A03,A13);
+
+                ptmp0 = pcj.pmadd(pload<LhsPacket>(&lhs0[j]), b, ptmp0);
+                ptmp1 = pcj.pmadd(A01, b, ptmp1);
+                A01 = pload<LhsPacket>(&lhs1[j-1+2*LhsPacketSize]);  palign<1>(A11,A01);
+                ptmp2 = pcj.pmadd(A02, b, ptmp2);
+                A02 = pload<LhsPacket>(&lhs2[j-2+2*LhsPacketSize]);  palign<2>(A12,A02);
+                ptmp3 = pcj.pmadd(A03, b, ptmp3);
+                A03 = pload<LhsPacket>(&lhs3[j-3+2*LhsPacketSize]);  palign<3>(A13,A03);
+
+                b = pload<RhsPacket>(&rhs[j+RhsPacketSize]);
+                ptmp0 = pcj.pmadd(pload<LhsPacket>(&lhs0[j+LhsPacketSize]), b, ptmp0);
+                ptmp1 = pcj.pmadd(A11, b, ptmp1);
+                ptmp2 = pcj.pmadd(A12, b, ptmp2);
+                ptmp3 = pcj.pmadd(A13, b, ptmp3);
+              }
+            }
+            for (; j<alignedSize; j+=RhsPacketSize)
+              _EIGEN_ACCUMULATE_PACKETS(d,du,du);
+            break;
+          }
+          default:
+            for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize)
+              _EIGEN_ACCUMULATE_PACKETS(du,du,du);
+            break;
+        }
+        tmp0 += predux(ptmp0);
+        tmp1 += predux(ptmp1);
+        tmp2 += predux(ptmp2);
+        tmp3 += predux(ptmp3);
+      }
+    } // end explicit vectorization
+
+    // process remaining coeffs (or all if no explicit vectorization)
+    // FIXME this loop get vectorized by the compiler !
+    for (Index j=alignedSize; j<depth; ++j)
+    {
+      RhsScalar b = rhs[j];
+      tmp0 += cj.pmul(lhs0[j],b); tmp1 += cj.pmul(lhs1[j],b);
+      tmp2 += cj.pmul(lhs2[j],b); tmp3 += cj.pmul(lhs3[j],b);
+    }
+    res[i*resIncr]            += alpha*tmp0;
+    res[(i+offset1)*resIncr]  += alpha*tmp1;
+    res[(i+2)*resIncr]        += alpha*tmp2;
+    res[(i+offset3)*resIncr]  += alpha*tmp3;
+  }
+
+  // process remaining first and last rows (at most columnsAtOnce-1)
+  Index end = rows;
+  Index start = rowBound;
+  do
+  {
+    for (Index i=start; i<end; ++i)
+    {
+      EIGEN_ALIGN16 ResScalar tmp0 = ResScalar(0);
+      ResPacket ptmp0 = pset1<ResPacket>(tmp0);
+      const LhsScalar* lhs0 = lhs + i*lhsStride;
+      // process first unaligned result's coeffs
+      // FIXME this loop get vectorized by the compiler !
+      for (Index j=0; j<alignedStart; ++j)
+        tmp0 += cj.pmul(lhs0[j], rhs[j]);
+
+      if (alignedSize>alignedStart)
+      {
+        // process aligned rhs coeffs
+        if ((size_t(lhs0+alignedStart)%sizeof(LhsPacket))==0)
+          for (Index j = alignedStart;j<alignedSize;j+=RhsPacketSize)
+            ptmp0 = pcj.pmadd(pload<LhsPacket>(&lhs0[j]), pload<RhsPacket>(&rhs[j]), ptmp0);
+        else
+          for (Index j = alignedStart;j<alignedSize;j+=RhsPacketSize)
+            ptmp0 = pcj.pmadd(ploadu<LhsPacket>(&lhs0[j]), pload<RhsPacket>(&rhs[j]), ptmp0);
+        tmp0 += predux(ptmp0);
+      }
+
+      // process remaining scalars
+      // FIXME this loop get vectorized by the compiler !
+      for (Index j=alignedSize; j<depth; ++j)
+        tmp0 += cj.pmul(lhs0[j], rhs[j]);
+      res[i*resIncr] += alpha*tmp0;
+    }
+    if (skipRows)
+    {
+      start = 0;
+      end = skipRows;
+      skipRows = 0;
+    }
+    else
+      break;
+  } while(Vectorizable);
+
+  #undef _EIGEN_ACCUMULATE_PACKETS
+}
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_GENERAL_MATRIX_VECTOR_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/products/GeneralMatrixVector_MKL.h b/resources/3rdParty/eigen/Eigen/src/Core/products/GeneralMatrixVector_MKL.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/products/GeneralMatrixVector_MKL.h
rename to resources/3rdParty/eigen/Eigen/src/Core/products/GeneralMatrixVector_MKL.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/products/Parallelizer.h b/resources/3rdParty/eigen/Eigen/src/Core/products/Parallelizer.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/products/Parallelizer.h
rename to resources/3rdParty/eigen/Eigen/src/Core/products/Parallelizer.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h b/resources/3rdParty/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
rename to resources/3rdParty/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h b/resources/3rdParty/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h
rename to resources/3rdParty/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h b/resources/3rdParty/eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h
rename to resources/3rdParty/eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h b/resources/3rdParty/eigen/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h
rename to resources/3rdParty/eigen/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/products/SelfadjointProduct.h b/resources/3rdParty/eigen/Eigen/src/Core/products/SelfadjointProduct.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/products/SelfadjointProduct.h
rename to resources/3rdParty/eigen/Eigen/src/Core/products/SelfadjointProduct.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/products/SelfadjointRank2Update.h b/resources/3rdParty/eigen/Eigen/src/Core/products/SelfadjointRank2Update.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/products/SelfadjointRank2Update.h
rename to resources/3rdParty/eigen/Eigen/src/Core/products/SelfadjointRank2Update.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h b/resources/3rdParty/eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h
rename to resources/3rdParty/eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h b/resources/3rdParty/eigen/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h
rename to resources/3rdParty/eigen/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/products/TriangularMatrixVector.h b/resources/3rdParty/eigen/Eigen/src/Core/products/TriangularMatrixVector.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/products/TriangularMatrixVector.h
rename to resources/3rdParty/eigen/Eigen/src/Core/products/TriangularMatrixVector.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/products/TriangularMatrixVector_MKL.h b/resources/3rdParty/eigen/Eigen/src/Core/products/TriangularMatrixVector_MKL.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/products/TriangularMatrixVector_MKL.h
rename to resources/3rdParty/eigen/Eigen/src/Core/products/TriangularMatrixVector_MKL.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/products/TriangularSolverMatrix.h b/resources/3rdParty/eigen/Eigen/src/Core/products/TriangularSolverMatrix.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/products/TriangularSolverMatrix.h
rename to resources/3rdParty/eigen/Eigen/src/Core/products/TriangularSolverMatrix.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h b/resources/3rdParty/eigen/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h
rename to resources/3rdParty/eigen/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/products/TriangularSolverVector.h b/resources/3rdParty/eigen/Eigen/src/Core/products/TriangularSolverVector.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/products/TriangularSolverVector.h
rename to resources/3rdParty/eigen/Eigen/src/Core/products/TriangularSolverVector.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/util/BlasUtil.h b/resources/3rdParty/eigen/Eigen/src/Core/util/BlasUtil.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/util/BlasUtil.h
rename to resources/3rdParty/eigen/Eigen/src/Core/util/BlasUtil.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/util/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/Core/util/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/util/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/Core/util/CMakeLists.txt
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/util/Constants.h b/resources/3rdParty/eigen/Eigen/src/Core/util/Constants.h
new file mode 100644
index 000000000..3fd45e84f
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/util/Constants.h
@@ -0,0 +1,431 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CONSTANTS_H
+#define EIGEN_CONSTANTS_H
+
+namespace Eigen {
+
+/** This value means that a quantity is not known at compile-time, and that instead the value is
+  * stored in some runtime variable.
+  *
+  * Changing the value of Dynamic breaks the ABI, as Dynamic is often used as a template parameter for Matrix.
+  */
+const int Dynamic = -1;
+
+/** This value means +Infinity; it is currently used only as the p parameter to MatrixBase::lpNorm<int>().
+  * The value Infinity there means the L-infinity norm.
+  */
+const int Infinity = -1;
+
+/** \defgroup flags Flags
+  * \ingroup Core_Module
+  *
+  * These are the possible bits which can be OR'ed to constitute the flags of a matrix or
+  * expression.
+  *
+  * It is important to note that these flags are a purely compile-time notion. They are a compile-time property of
+  * an expression type, implemented as enum's. They are not stored in memory at runtime, and they do not incur any
+  * runtime overhead.
+  *
+  * \sa MatrixBase::Flags
+  */
+
+/** \ingroup flags
+  *
+  * for a matrix, this means that the storage order is row-major.
+  * If this bit is not set, the storage order is column-major.
+  * For an expression, this determines the storage order of
+  * the matrix created by evaluation of that expression. 
+  * \sa \ref TopicStorageOrders */
+const unsigned int RowMajorBit = 0x1;
+
+/** \ingroup flags
+  *
+  * means the expression should be evaluated by the calling expression */
+const unsigned int EvalBeforeNestingBit = 0x2;
+
+/** \ingroup flags
+  *
+  * means the expression should be evaluated before any assignment */
+const unsigned int EvalBeforeAssigningBit = 0x4;
+
+/** \ingroup flags
+  *
+  * Short version: means the expression might be vectorized
+  *
+  * Long version: means that the coefficients can be handled by packets
+  * and start at a memory location whose alignment meets the requirements
+  * of the present CPU architecture for optimized packet access. In the fixed-size
+  * case, there is the additional condition that it be possible to access all the
+  * coefficients by packets (this implies the requirement that the size be a multiple of 16 bytes,
+  * and that any nontrivial strides don't break the alignment). In the dynamic-size case,
+  * there is no such condition on the total size and strides, so it might not be possible to access
+  * all coeffs by packets.
+  *
+  * \note This bit can be set regardless of whether vectorization is actually enabled.
+  *       To check for actual vectorizability, see \a ActualPacketAccessBit.
+  */
+const unsigned int PacketAccessBit = 0x8;
+
+#ifdef EIGEN_VECTORIZE
+/** \ingroup flags
+  *
+  * If vectorization is enabled (EIGEN_VECTORIZE is defined) this constant
+  * is set to the value \a PacketAccessBit.
+  *
+  * If vectorization is not enabled (EIGEN_VECTORIZE is not defined) this constant
+  * is set to the value 0.
+  */
+const unsigned int ActualPacketAccessBit = PacketAccessBit;
+#else
+const unsigned int ActualPacketAccessBit = 0x0;
+#endif
+
+/** \ingroup flags
+  *
+  * Short version: means the expression can be seen as 1D vector.
+  *
+  * Long version: means that one can access the coefficients
+  * of this expression by coeff(int), and coeffRef(int) in the case of a lvalue expression. These
+  * index-based access methods are guaranteed
+  * to not have to do any runtime computation of a (row, col)-pair from the index, so that it
+  * is guaranteed that whenever it is available, index-based access is at least as fast as
+  * (row,col)-based access. Expressions for which that isn't possible don't have the LinearAccessBit.
+  *
+  * If both PacketAccessBit and LinearAccessBit are set, then the
+  * packets of this expression can be accessed by packet(int), and writePacket(int) in the case of a
+  * lvalue expression.
+  *
+  * Typically, all vector expressions have the LinearAccessBit, but there is one exception:
+  * Product expressions don't have it, because it would be troublesome for vectorization, even when the
+  * Product is a vector expression. Thus, vector Product expressions allow index-based coefficient access but
+  * not index-based packet access, so they don't have the LinearAccessBit.
+  */
+const unsigned int LinearAccessBit = 0x10;
+
+/** \ingroup flags
+  *
+  * Means the expression has a coeffRef() method, i.e. is writable as its individual coefficients are directly addressable.
+  * This rules out read-only expressions.
+  *
+  * Note that DirectAccessBit and LvalueBit are mutually orthogonal, as there are examples of expression having one but note
+  * the other:
+  *   \li writable expressions that don't have a very simple memory layout as a strided array, have LvalueBit but not DirectAccessBit
+  *   \li Map-to-const expressions, for example Map<const Matrix>, have DirectAccessBit but not LvalueBit
+  *
+  * Expressions having LvalueBit also have their coeff() method returning a const reference instead of returning a new value.
+  */
+const unsigned int LvalueBit = 0x20;
+
+/** \ingroup flags
+  *
+  * Means that the underlying array of coefficients can be directly accessed as a plain strided array. The memory layout
+  * of the array of coefficients must be exactly the natural one suggested by rows(), cols(),
+  * outerStride(), innerStride(), and the RowMajorBit. This rules out expressions such as Diagonal, whose coefficients,
+  * though referencable, do not have such a regular memory layout.
+  *
+  * See the comment on LvalueBit for an explanation of how LvalueBit and DirectAccessBit are mutually orthogonal.
+  */
+const unsigned int DirectAccessBit = 0x40;
+
+/** \ingroup flags
+  *
+  * means the first coefficient packet is guaranteed to be aligned */
+const unsigned int AlignedBit = 0x80;
+
+const unsigned int NestByRefBit = 0x100;
+
+// list of flags that are inherited by default
+const unsigned int HereditaryBits = RowMajorBit
+                                  | EvalBeforeNestingBit
+                                  | EvalBeforeAssigningBit;
+
+/** \defgroup enums Enumerations
+  * \ingroup Core_Module
+  *
+  * Various enumerations used in %Eigen. Many of these are used as template parameters.
+  */
+
+/** \ingroup enums
+  * Enum containing possible values for the \p Mode parameter of 
+  * MatrixBase::selfadjointView() and MatrixBase::triangularView(). */
+enum {
+  /** View matrix as a lower triangular matrix. */
+  Lower=0x1,                      
+  /** View matrix as an upper triangular matrix. */
+  Upper=0x2,                      
+  /** %Matrix has ones on the diagonal; to be used in combination with #Lower or #Upper. */
+  UnitDiag=0x4, 
+  /** %Matrix has zeros on the diagonal; to be used in combination with #Lower or #Upper. */
+  ZeroDiag=0x8,
+  /** View matrix as a lower triangular matrix with ones on the diagonal. */
+  UnitLower=UnitDiag|Lower, 
+  /** View matrix as an upper triangular matrix with ones on the diagonal. */
+  UnitUpper=UnitDiag|Upper,
+  /** View matrix as a lower triangular matrix with zeros on the diagonal. */
+  StrictlyLower=ZeroDiag|Lower, 
+  /** View matrix as an upper triangular matrix with zeros on the diagonal. */
+  StrictlyUpper=ZeroDiag|Upper,
+  /** Used in BandMatrix and SelfAdjointView to indicate that the matrix is self-adjoint. */
+  SelfAdjoint=0x10,
+  /** Used to support symmetric, non-selfadjoint, complex matrices. */
+  Symmetric=0x20
+};
+
+/** \ingroup enums
+  * Enum for indicating whether an object is aligned or not. */
+enum { 
+  /** Object is not correctly aligned for vectorization. */
+  Unaligned=0, 
+  /** Object is aligned for vectorization. */
+  Aligned=1 
+};
+
+/** \ingroup enums
+ * Enum used by DenseBase::corner() in Eigen2 compatibility mode. */
+// FIXME after the corner() API change, this was not needed anymore, except by AlignedBox
+// TODO: find out what to do with that. Adapt the AlignedBox API ?
+enum CornerType { TopLeft, TopRight, BottomLeft, BottomRight };
+
+/** \ingroup enums
+  * Enum containing possible values for the \p Direction parameter of
+  * Reverse, PartialReduxExpr and VectorwiseOp. */
+enum DirectionType { 
+  /** For Reverse, all columns are reversed; 
+    * for PartialReduxExpr and VectorwiseOp, act on columns. */
+  Vertical, 
+  /** For Reverse, all rows are reversed; 
+    * for PartialReduxExpr and VectorwiseOp, act on rows. */
+  Horizontal, 
+  /** For Reverse, both rows and columns are reversed; 
+    * not used for PartialReduxExpr and VectorwiseOp. */
+  BothDirections 
+};
+
+/** \internal \ingroup enums
+  * Enum to specify how to traverse the entries of a matrix. */
+enum {
+  /** \internal Default traversal, no vectorization, no index-based access */
+  DefaultTraversal,
+  /** \internal No vectorization, use index-based access to have only one for loop instead of 2 nested loops */
+  LinearTraversal,
+  /** \internal Equivalent to a slice vectorization for fixed-size matrices having good alignment
+    * and good size */
+  InnerVectorizedTraversal,
+  /** \internal Vectorization path using a single loop plus scalar loops for the
+    * unaligned boundaries */
+  LinearVectorizedTraversal,
+  /** \internal Generic vectorization path using one vectorized loop per row/column with some
+    * scalar loops to handle the unaligned boundaries */
+  SliceVectorizedTraversal,
+  /** \internal Special case to properly handle incompatible scalar types or other defecting cases*/
+  InvalidTraversal
+};
+
+/** \internal \ingroup enums
+  * Enum to specify whether to unroll loops when traversing over the entries of a matrix. */
+enum {
+  /** \internal Do not unroll loops. */
+  NoUnrolling,
+  /** \internal Unroll only the inner loop, but not the outer loop. */
+  InnerUnrolling,
+  /** \internal Unroll both the inner and the outer loop. If there is only one loop, 
+    * because linear traversal is used, then unroll that loop. */
+  CompleteUnrolling
+};
+
+/** \internal \ingroup enums
+  * Enum to specify whether to use the default (built-in) implementation or the specialization. */
+enum {
+  Specialized,
+  BuiltIn
+};
+
+/** \ingroup enums
+  * Enum containing possible values for the \p _Options template parameter of
+  * Matrix, Array and BandMatrix. */
+enum {
+  /** Storage order is column major (see \ref TopicStorageOrders). */
+  ColMajor = 0,
+  /** Storage order is row major (see \ref TopicStorageOrders). */
+  RowMajor = 0x1,  // it is only a coincidence that this is equal to RowMajorBit -- don't rely on that
+  /** \internal Align the matrix itself if it is vectorizable fixed-size */
+  AutoAlign = 0,
+  /** \internal Don't require alignment for the matrix itself (the array of coefficients, if dynamically allocated, may still be requested to be aligned) */ // FIXME --- clarify the situation
+  DontAlign = 0x2
+};
+
+/** \ingroup enums
+  * Enum for specifying whether to apply or solve on the left or right. */
+enum {
+  /** Apply transformation on the left. */
+  OnTheLeft = 1,  
+  /** Apply transformation on the right. */
+  OnTheRight = 2  
+};
+
+/* the following used to be written as:
+ *
+ *   struct NoChange_t {};
+ *   namespace {
+ *     EIGEN_UNUSED NoChange_t NoChange;
+ *   }
+ *
+ * on the ground that it feels dangerous to disambiguate overloaded functions on enum/integer types.  
+ * However, this leads to "variable declared but never referenced" warnings on Intel Composer XE,
+ * and we do not know how to get rid of them (bug 450).
+ */
+
+enum NoChange_t   { NoChange };
+enum Sequential_t { Sequential };
+enum Default_t    { Default };
+
+/** \internal \ingroup enums
+  * Used in AmbiVector. */
+enum {
+  IsDense         = 0,
+  IsSparse
+};
+
+/** \ingroup enums
+  * Used as template parameter in DenseCoeffBase and MapBase to indicate 
+  * which accessors should be provided. */
+enum AccessorLevels {
+  /** Read-only access via a member function. */
+  ReadOnlyAccessors, 
+  /** Read/write access via member functions. */
+  WriteAccessors, 
+  /** Direct read-only access to the coefficients. */
+  DirectAccessors, 
+  /** Direct read/write access to the coefficients. */
+  DirectWriteAccessors
+};
+
+/** \ingroup enums
+  * Enum with options to give to various decompositions. */
+enum DecompositionOptions {
+  /** \internal Not used (meant for LDLT?). */
+  Pivoting            = 0x01, 
+  /** \internal Not used (meant for LDLT?). */
+  NoPivoting          = 0x02, 
+  /** Used in JacobiSVD to indicate that the square matrix U is to be computed. */
+  ComputeFullU        = 0x04,
+  /** Used in JacobiSVD to indicate that the thin matrix U is to be computed. */
+  ComputeThinU        = 0x08,
+  /** Used in JacobiSVD to indicate that the square matrix V is to be computed. */
+  ComputeFullV        = 0x10,
+  /** Used in JacobiSVD to indicate that the thin matrix V is to be computed. */
+  ComputeThinV        = 0x20,
+  /** Used in SelfAdjointEigenSolver and GeneralizedSelfAdjointEigenSolver to specify
+    * that only the eigenvalues are to be computed and not the eigenvectors. */
+  EigenvaluesOnly     = 0x40,
+  /** Used in SelfAdjointEigenSolver and GeneralizedSelfAdjointEigenSolver to specify
+    * that both the eigenvalues and the eigenvectors are to be computed. */
+  ComputeEigenvectors = 0x80,
+  /** \internal */
+  EigVecMask = EigenvaluesOnly | ComputeEigenvectors,
+  /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should
+    * solve the generalized eigenproblem \f$ Ax = \lambda B x \f$. */
+  Ax_lBx              = 0x100,
+  /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should
+    * solve the generalized eigenproblem \f$ ABx = \lambda x \f$. */
+  ABx_lx              = 0x200,
+  /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should
+    * solve the generalized eigenproblem \f$ BAx = \lambda x \f$. */
+  BAx_lx              = 0x400,
+  /** \internal */
+  GenEigMask = Ax_lBx | ABx_lx | BAx_lx
+};
+
+/** \ingroup enums
+  * Possible values for the \p QRPreconditioner template parameter of JacobiSVD. */
+enum QRPreconditioners {
+  /** Do not specify what is to be done if the SVD of a non-square matrix is asked for. */
+  NoQRPreconditioner,
+  /** Use a QR decomposition without pivoting as the first step. */
+  HouseholderQRPreconditioner,
+  /** Use a QR decomposition with column pivoting as the first step. */
+  ColPivHouseholderQRPreconditioner,
+  /** Use a QR decomposition with full pivoting as the first step. */
+  FullPivHouseholderQRPreconditioner
+};
+
+#ifdef Success
+#error The preprocessor symbol 'Success' is defined, possibly by the X11 header file X.h
+#endif
+
+/** \ingroup enums
+  * Enum for reporting the status of a computation. */
+enum ComputationInfo {
+  /** Computation was successful. */
+  Success = 0,        
+  /** The provided data did not satisfy the prerequisites. */
+  NumericalIssue = 1, 
+  /** Iterative procedure did not converge. */
+  NoConvergence = 2,
+  /** The inputs are invalid, or the algorithm has been improperly called.
+    * When assertions are enabled, such errors trigger an assert. */
+  InvalidInput = 3
+};
+
+/** \ingroup enums
+  * Enum used to specify how a particular transformation is stored in a matrix.
+  * \sa Transform, Hyperplane::transform(). */
+enum TransformTraits {
+  /** Transformation is an isometry. */
+  Isometry      = 0x1,
+  /** Transformation is an affine transformation stored as a (Dim+1)^2 matrix whose last row is 
+    * assumed to be [0 ... 0 1]. */
+  Affine        = 0x2,
+  /** Transformation is an affine transformation stored as a (Dim) x (Dim+1) matrix. */
+  AffineCompact = 0x10 | Affine,
+  /** Transformation is a general projective transformation stored as a (Dim+1)^2 matrix. */
+  Projective    = 0x20
+};
+
+/** \internal \ingroup enums
+  * Enum used to choose between implementation depending on the computer architecture. */
+namespace Architecture
+{
+  enum Type {
+    Generic = 0x0,
+    SSE = 0x1,
+    AltiVec = 0x2,
+#if defined EIGEN_VECTORIZE_SSE
+    Target = SSE
+#elif defined EIGEN_VECTORIZE_ALTIVEC
+    Target = AltiVec
+#else
+    Target = Generic
+#endif
+  };
+}
+
+/** \internal \ingroup enums
+  * Enum used as template parameter in GeneralProduct. */
+enum { CoeffBasedProductMode, LazyCoeffBasedProductMode, OuterProduct, InnerProduct, GemvProduct, GemmProduct };
+
+/** \internal \ingroup enums
+  * Enum used in experimental parallel implementation. */
+enum Action {GetAction, SetAction};
+
+/** The type used to identify a dense storage. */
+struct Dense {};
+
+/** The type used to identify a matrix expression */
+struct MatrixXpr {};
+
+/** The type used to identify an array expression */
+struct ArrayXpr {};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CONSTANTS_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/util/DisableStupidWarnings.h b/resources/3rdParty/eigen/Eigen/src/Core/util/DisableStupidWarnings.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/util/DisableStupidWarnings.h
rename to resources/3rdParty/eigen/Eigen/src/Core/util/DisableStupidWarnings.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/util/ForwardDeclarations.h b/resources/3rdParty/eigen/Eigen/src/Core/util/ForwardDeclarations.h
new file mode 100644
index 000000000..bcdfe3914
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/util/ForwardDeclarations.h
@@ -0,0 +1,298 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_FORWARDDECLARATIONS_H
+#define EIGEN_FORWARDDECLARATIONS_H
+
+namespace Eigen {
+namespace internal {
+
+template<typename T> struct traits;
+
+// here we say once and for all that traits<const T> == traits<T>
+// When constness must affect traits, it has to be constness on template parameters on which T itself depends.
+// For example, traits<Map<const T> > != traits<Map<T> >, but
+//              traits<const Map<T> > == traits<Map<T> >
+template<typename T> struct traits<const T> : traits<T> {};
+
+template<typename Derived> struct has_direct_access
+{
+  enum { ret = (traits<Derived>::Flags & DirectAccessBit) ? 1 : 0 };
+};
+
+template<typename Derived> struct accessors_level
+{
+  enum { has_direct_access = (traits<Derived>::Flags & DirectAccessBit) ? 1 : 0,
+         has_write_access = (traits<Derived>::Flags & LvalueBit) ? 1 : 0,
+         value = has_direct_access ? (has_write_access ? DirectWriteAccessors : DirectAccessors)
+                                   : (has_write_access ? WriteAccessors       : ReadOnlyAccessors)
+  };
+};
+
+} // end namespace internal
+
+template<typename T> struct NumTraits;
+
+template<typename Derived> struct EigenBase;
+template<typename Derived> class DenseBase;
+template<typename Derived> class PlainObjectBase;
+
+
+template<typename Derived,
+         int Level = internal::accessors_level<Derived>::value >
+class DenseCoeffsBase;
+
+template<typename _Scalar, int _Rows, int _Cols,
+         int _Options = AutoAlign |
+#if defined(__GNUC__) && __GNUC__==3 && __GNUC_MINOR__==4
+    // workaround a bug in at least gcc 3.4.6
+    // the innermost ?: ternary operator is misparsed. We write it slightly
+    // differently and this makes gcc 3.4.6 happy, but it's ugly.
+    // The error would only show up with EIGEN_DEFAULT_TO_ROW_MAJOR is defined
+    // (when EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION is RowMajor)
+                          ( (_Rows==1 && _Cols!=1) ? RowMajor
+                          : !(_Cols==1 && _Rows!=1) ?  EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION
+                          : ColMajor ),
+#else
+                          ( (_Rows==1 && _Cols!=1) ? RowMajor
+                          : (_Cols==1 && _Rows!=1) ? ColMajor
+                          : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
+#endif
+         int _MaxRows = _Rows,
+         int _MaxCols = _Cols
+> class Matrix;
+
+template<typename Derived> class MatrixBase;
+template<typename Derived> class ArrayBase;
+
+template<typename ExpressionType, unsigned int Added, unsigned int Removed> class Flagged;
+template<typename ExpressionType, template <typename> class StorageBase > class NoAlias;
+template<typename ExpressionType> class NestByValue;
+template<typename ExpressionType> class ForceAlignedAccess;
+template<typename ExpressionType> class SwapWrapper;
+
+template<typename XprType, int BlockRows=Dynamic, int BlockCols=Dynamic, bool InnerPanel = false,
+         bool HasDirectAccess = internal::has_direct_access<XprType>::ret> class Block;
+
+template<typename MatrixType, int Size=Dynamic> class VectorBlock;
+template<typename MatrixType> class Transpose;
+template<typename MatrixType> class Conjugate;
+template<typename NullaryOp, typename MatrixType>         class CwiseNullaryOp;
+template<typename UnaryOp,   typename MatrixType>         class CwiseUnaryOp;
+template<typename ViewOp,    typename MatrixType>         class CwiseUnaryView;
+template<typename BinaryOp,  typename Lhs, typename Rhs>  class CwiseBinaryOp;
+template<typename BinOp,     typename Lhs, typename Rhs>  class SelfCwiseBinaryOp;
+template<typename Derived,   typename Lhs, typename Rhs>  class ProductBase;
+template<typename Lhs, typename Rhs, int Mode>            class GeneralProduct;
+template<typename Lhs, typename Rhs, int NestingFlags>    class CoeffBasedProduct;
+
+template<typename Derived> class DiagonalBase;
+template<typename _DiagonalVectorType> class DiagonalWrapper;
+template<typename _Scalar, int SizeAtCompileTime, int MaxSizeAtCompileTime=SizeAtCompileTime> class DiagonalMatrix;
+template<typename MatrixType, typename DiagonalType, int ProductOrder> class DiagonalProduct;
+template<typename MatrixType, int Index = 0> class Diagonal;
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime = SizeAtCompileTime, typename IndexType=int> class PermutationMatrix;
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime = SizeAtCompileTime, typename IndexType=int> class Transpositions;
+template<typename Derived> class PermutationBase;
+template<typename Derived> class TranspositionsBase;
+template<typename _IndicesType> class PermutationWrapper;
+template<typename _IndicesType> class TranspositionsWrapper;
+
+template<typename Derived,
+         int Level = internal::accessors_level<Derived>::has_write_access ? WriteAccessors : ReadOnlyAccessors
+> class MapBase;
+template<int InnerStrideAtCompileTime, int OuterStrideAtCompileTime> class Stride;
+template<typename MatrixType, int MapOptions=Unaligned, typename StrideType = Stride<0,0> > class Map;
+
+template<typename Derived> class TriangularBase;
+template<typename MatrixType, unsigned int Mode> class TriangularView;
+template<typename MatrixType, unsigned int Mode> class SelfAdjointView;
+template<typename MatrixType> class SparseView;
+template<typename ExpressionType> class WithFormat;
+template<typename MatrixType> struct CommaInitializer;
+template<typename Derived> class ReturnByValue;
+template<typename ExpressionType> class ArrayWrapper;
+template<typename ExpressionType> class MatrixWrapper;
+
+namespace internal {
+template<typename DecompositionType, typename Rhs> struct solve_retval_base;
+template<typename DecompositionType, typename Rhs> struct solve_retval;
+template<typename DecompositionType> struct kernel_retval_base;
+template<typename DecompositionType> struct kernel_retval;
+template<typename DecompositionType> struct image_retval_base;
+template<typename DecompositionType> struct image_retval;
+} // end namespace internal
+
+namespace internal {
+template<typename _Scalar, int Rows=Dynamic, int Cols=Dynamic, int Supers=Dynamic, int Subs=Dynamic, int Options=0> class BandMatrix;
+}
+
+namespace internal {
+template<typename Lhs, typename Rhs> struct product_type;
+}
+
+template<typename Lhs, typename Rhs,
+         int ProductType = internal::product_type<Lhs,Rhs>::value>
+struct ProductReturnType;
+
+// this is a workaround for sun CC
+template<typename Lhs, typename Rhs> struct LazyProductReturnType;
+
+namespace internal {
+
+// Provides scalar/packet-wise product and product with accumulation
+// with optional conjugation of the arguments.
+template<typename LhsScalar, typename RhsScalar, bool ConjLhs=false, bool ConjRhs=false> struct conj_helper;
+
+template<typename Scalar> struct scalar_sum_op;
+template<typename Scalar> struct scalar_difference_op;
+template<typename LhsScalar,typename RhsScalar> struct scalar_conj_product_op;
+template<typename Scalar> struct scalar_quotient_op;
+template<typename Scalar> struct scalar_opposite_op;
+template<typename Scalar> struct scalar_conjugate_op;
+template<typename Scalar> struct scalar_real_op;
+template<typename Scalar> struct scalar_imag_op;
+template<typename Scalar> struct scalar_abs_op;
+template<typename Scalar> struct scalar_abs2_op;
+template<typename Scalar> struct scalar_sqrt_op;
+template<typename Scalar> struct scalar_exp_op;
+template<typename Scalar> struct scalar_log_op;
+template<typename Scalar> struct scalar_cos_op;
+template<typename Scalar> struct scalar_sin_op;
+template<typename Scalar> struct scalar_acos_op;
+template<typename Scalar> struct scalar_asin_op;
+template<typename Scalar> struct scalar_tan_op;
+template<typename Scalar> struct scalar_pow_op;
+template<typename Scalar> struct scalar_inverse_op;
+template<typename Scalar> struct scalar_square_op;
+template<typename Scalar> struct scalar_cube_op;
+template<typename Scalar, typename NewType> struct scalar_cast_op;
+template<typename Scalar> struct scalar_multiple_op;
+template<typename Scalar> struct scalar_quotient1_op;
+template<typename Scalar> struct scalar_min_op;
+template<typename Scalar> struct scalar_max_op;
+template<typename Scalar> struct scalar_random_op;
+template<typename Scalar> struct scalar_add_op;
+template<typename Scalar> struct scalar_constant_op;
+template<typename Scalar> struct scalar_identity_op;
+
+template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_product_op;
+template<typename LhsScalar,typename RhsScalar> struct scalar_multiple2_op;
+
+} // end namespace internal
+
+struct IOFormat;
+
+// Array module
+template<typename _Scalar, int _Rows, int _Cols,
+         int _Options = AutoAlign |
+#if defined(__GNUC__) && __GNUC__==3 && __GNUC_MINOR__==4
+    // workaround a bug in at least gcc 3.4.6
+    // the innermost ?: ternary operator is misparsed. We write it slightly
+    // differently and this makes gcc 3.4.6 happy, but it's ugly.
+    // The error would only show up with EIGEN_DEFAULT_TO_ROW_MAJOR is defined
+    // (when EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION is RowMajor)
+                          ( (_Rows==1 && _Cols!=1) ? RowMajor
+                          : !(_Cols==1 && _Rows!=1) ?  EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION
+                          : ColMajor ),
+#else
+                          ( (_Rows==1 && _Cols!=1) ? RowMajor
+                          : (_Cols==1 && _Rows!=1) ? ColMajor
+                          : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
+#endif
+         int _MaxRows = _Rows, int _MaxCols = _Cols> class Array;
+template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType> class Select;
+template<typename MatrixType, typename BinaryOp, int Direction> class PartialReduxExpr;
+template<typename ExpressionType, int Direction> class VectorwiseOp;
+template<typename MatrixType,int RowFactor,int ColFactor> class Replicate;
+template<typename MatrixType, int Direction = BothDirections> class Reverse;
+
+template<typename MatrixType> class FullPivLU;
+template<typename MatrixType> class PartialPivLU;
+namespace internal {
+template<typename MatrixType> struct inverse_impl;
+}
+template<typename MatrixType> class HouseholderQR;
+template<typename MatrixType> class ColPivHouseholderQR;
+template<typename MatrixType> class FullPivHouseholderQR;
+template<typename MatrixType, int QRPreconditioner = ColPivHouseholderQRPreconditioner> class JacobiSVD;
+template<typename MatrixType, int UpLo = Lower> class LLT;
+template<typename MatrixType, int UpLo = Lower> class LDLT;
+template<typename VectorsType, typename CoeffsType, int Side=OnTheLeft> class HouseholderSequence;
+template<typename Scalar>     class JacobiRotation;
+
+// Geometry module:
+template<typename Derived, int _Dim> class RotationBase;
+template<typename Lhs, typename Rhs> class Cross;
+template<typename Derived> class QuaternionBase;
+template<typename Scalar> class Rotation2D;
+template<typename Scalar> class AngleAxis;
+template<typename Scalar,int Dim> class Translation;
+
+#ifdef EIGEN2_SUPPORT
+template<typename Derived, int _Dim> class eigen2_RotationBase;
+template<typename Lhs, typename Rhs> class eigen2_Cross;
+template<typename Scalar> class eigen2_Quaternion;
+template<typename Scalar> class eigen2_Rotation2D;
+template<typename Scalar> class eigen2_AngleAxis;
+template<typename Scalar,int Dim> class eigen2_Transform;
+template <typename _Scalar, int _AmbientDim> class eigen2_ParametrizedLine;
+template <typename _Scalar, int _AmbientDim> class eigen2_Hyperplane;
+template<typename Scalar,int Dim> class eigen2_Translation;
+template<typename Scalar,int Dim> class eigen2_Scaling;
+#endif
+
+#if EIGEN2_SUPPORT_STAGE < STAGE20_RESOLVE_API_CONFLICTS
+template<typename Scalar> class Quaternion;
+template<typename Scalar,int Dim> class Transform;
+template <typename _Scalar, int _AmbientDim> class ParametrizedLine;
+template <typename _Scalar, int _AmbientDim> class Hyperplane;
+template<typename Scalar,int Dim> class Scaling;
+#endif
+
+#if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS
+template<typename Scalar, int Options = AutoAlign> class Quaternion;
+template<typename Scalar,int Dim,int Mode,int _Options=AutoAlign> class Transform;
+template <typename _Scalar, int _AmbientDim, int Options=AutoAlign> class ParametrizedLine;
+template <typename _Scalar, int _AmbientDim, int Options=AutoAlign> class Hyperplane;
+template<typename Scalar> class UniformScaling;
+template<typename MatrixType,int Direction> class Homogeneous;
+#endif
+
+// MatrixFunctions module
+template<typename Derived> struct MatrixExponentialReturnValue;
+template<typename Derived> class MatrixFunctionReturnValue;
+template<typename Derived> class MatrixSquareRootReturnValue;
+template<typename Derived> class MatrixLogarithmReturnValue;
+
+namespace internal {
+template <typename Scalar>
+struct stem_function
+{
+  typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
+  typedef ComplexScalar type(ComplexScalar, int);
+};
+}
+
+
+#ifdef EIGEN2_SUPPORT
+template<typename ExpressionType> class Cwise;
+template<typename MatrixType> class Minor;
+template<typename MatrixType> class LU;
+template<typename MatrixType> class QR;
+template<typename MatrixType> class SVD;
+namespace internal {
+template<typename MatrixType, unsigned int Mode> struct eigen2_part_return_type;
+}
+#endif
+
+} // end namespace Eigen
+
+#endif // EIGEN_FORWARDDECLARATIONS_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/util/MKL_support.h b/resources/3rdParty/eigen/Eigen/src/Core/util/MKL_support.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/util/MKL_support.h
rename to resources/3rdParty/eigen/Eigen/src/Core/util/MKL_support.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/util/Macros.h b/resources/3rdParty/eigen/Eigen/src/Core/util/Macros.h
new file mode 100644
index 000000000..31436fe79
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/util/Macros.h
@@ -0,0 +1,410 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MACROS_H
+#define EIGEN_MACROS_H
+
+#define EIGEN_WORLD_VERSION 3
+#define EIGEN_MAJOR_VERSION 1
+#define EIGEN_MINOR_VERSION 2
+
+#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
+                                      (EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
+                                                                 EIGEN_MINOR_VERSION>=z))))
+#ifdef __GNUC__
+  #define EIGEN_GNUC_AT_LEAST(x,y) ((__GNUC__==x && __GNUC_MINOR__>=y) || __GNUC__>x)
+#else
+  #define EIGEN_GNUC_AT_LEAST(x,y) 0
+#endif
+ 
+#ifdef __GNUC__
+  #define EIGEN_GNUC_AT_MOST(x,y) ((__GNUC__==x && __GNUC_MINOR__<=y) || __GNUC__<x)
+#else
+  #define EIGEN_GNUC_AT_MOST(x,y) 0
+#endif
+
+#if EIGEN_GNUC_AT_MOST(4,3) && !defined(__clang__)
+  // see bug 89
+  #define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 0
+#else
+  #define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 1
+#endif
+
+#if defined(__GNUC__) && (__GNUC__ <= 3)
+#define EIGEN_GCC3_OR_OLDER 1
+#else
+#define EIGEN_GCC3_OR_OLDER 0
+#endif
+
+// 16 byte alignment is only useful for vectorization. Since it affects the ABI, we need to enable
+// 16 byte alignment on all platforms where vectorization might be enabled. In theory we could always
+// enable alignment, but it can be a cause of problems on some platforms, so we just disable it in
+// certain common platform (compiler+architecture combinations) to avoid these problems.
+// Only static alignment is really problematic (relies on nonstandard compiler extensions that don't
+// work everywhere, for example don't work on GCC/ARM), try to keep heap alignment even
+// when we have to disable static alignment.
+#if defined(__GNUC__) && !(defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || defined(__ppc__) || defined(__ia64__))
+#define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1
+#else
+#define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 0
+#endif
+
+// static alignment is completely disabled with GCC 3, Sun Studio, and QCC/QNX
+#if !EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT \
+ && !EIGEN_GCC3_OR_OLDER \
+ && !defined(__SUNPRO_CC) \
+ && !defined(__QNXNTO__)
+  #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 1
+#else
+  #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 0
+#endif
+
+#ifdef EIGEN_DONT_ALIGN
+  #ifndef EIGEN_DONT_ALIGN_STATICALLY
+    #define EIGEN_DONT_ALIGN_STATICALLY
+  #endif
+  #define EIGEN_ALIGN 0
+#else
+  #define EIGEN_ALIGN 1
+#endif
+
+// EIGEN_ALIGN_STATICALLY is the true test whether we want to align arrays on the stack or not. It takes into account both the user choice to explicitly disable
+// alignment (EIGEN_DONT_ALIGN_STATICALLY) and the architecture config (EIGEN_ARCH_WANTS_STACK_ALIGNMENT). Henceforth, only EIGEN_ALIGN_STATICALLY should be used.
+#if EIGEN_ARCH_WANTS_STACK_ALIGNMENT && !defined(EIGEN_DONT_ALIGN_STATICALLY)
+  #define EIGEN_ALIGN_STATICALLY 1
+#else
+  #define EIGEN_ALIGN_STATICALLY 0
+  #ifndef EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
+    #define EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
+  #endif
+#endif
+
+#ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
+#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION RowMajor
+#else
+#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ColMajor
+#endif
+
+#ifndef EIGEN_DEFAULT_DENSE_INDEX_TYPE
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t
+#endif
+
+/** Allows to disable some optimizations which might affect the accuracy of the result.
+  * Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them.
+  * They currently include:
+  *   - single precision Cwise::sin() and Cwise::cos() when SSE vectorization is enabled.
+  */
+#ifndef EIGEN_FAST_MATH
+#define EIGEN_FAST_MATH 1
+#endif
+
+#define EIGEN_DEBUG_VAR(x) std::cerr << #x << " = " << x << std::endl;
+
+// concatenate two tokens
+#define EIGEN_CAT2(a,b) a ## b
+#define EIGEN_CAT(a,b) EIGEN_CAT2(a,b)
+
+// convert a token to a string
+#define EIGEN_MAKESTRING2(a) #a
+#define EIGEN_MAKESTRING(a) EIGEN_MAKESTRING2(a)
+
+#if EIGEN_GNUC_AT_LEAST(4,1) && !defined(__clang__) && !defined(__INTEL_COMPILER)
+#define EIGEN_FLATTEN_ATTRIB __attribute__((flatten))
+#else
+#define EIGEN_FLATTEN_ATTRIB
+#endif
+
+// EIGEN_STRONG_INLINE is a stronger version of the inline, using __forceinline on MSVC,
+// but it still doesn't use GCC's always_inline. This is useful in (common) situations where MSVC needs forceinline
+// but GCC is still doing fine with just inline.
+#if (defined _MSC_VER) || (defined __INTEL_COMPILER)
+#define EIGEN_STRONG_INLINE __forceinline
+#else
+#define EIGEN_STRONG_INLINE inline
+#endif
+
+// EIGEN_ALWAYS_INLINE is the stronget, it has the effect of making the function inline and adding every possible
+// attribute to maximize inlining. This should only be used when really necessary: in particular,
+// it uses __attribute__((always_inline)) on GCC, which most of the time is useless and can severely harm compile times.
+// FIXME with the always_inline attribute,
+// gcc 3.4.x reports the following compilation error:
+//   Eval.h:91: sorry, unimplemented: inlining failed in call to 'const Eigen::Eval<Derived> Eigen::MatrixBase<Scalar, Derived>::eval() const'
+//    : function body not available
+#if EIGEN_GNUC_AT_LEAST(4,0)
+#define EIGEN_ALWAYS_INLINE __attribute__((always_inline)) inline
+#else
+#define EIGEN_ALWAYS_INLINE EIGEN_STRONG_INLINE
+#endif
+
+#if (defined __GNUC__)
+#define EIGEN_DONT_INLINE __attribute__((noinline))
+#elif (defined _MSC_VER)
+#define EIGEN_DONT_INLINE __declspec(noinline)
+#else
+#define EIGEN_DONT_INLINE
+#endif
+
+// this macro allows to get rid of linking errors about multiply defined functions.
+//  - static is not very good because it prevents definitions from different object files to be merged.
+//           So static causes the resulting linked executable to be bloated with multiple copies of the same function.
+//  - inline is not perfect either as it unwantedly hints the compiler toward inlining the function.
+#define EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+#define EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS inline
+
+#ifdef NDEBUG
+# ifndef EIGEN_NO_DEBUG
+#  define EIGEN_NO_DEBUG
+# endif
+#endif
+
+// eigen_plain_assert is where we implement the workaround for the assert() bug in GCC <= 4.3, see bug 89
+#ifdef EIGEN_NO_DEBUG
+  #define eigen_plain_assert(x)
+#else
+  #if EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO
+    namespace Eigen {
+    namespace internal {
+    inline bool copy_bool(bool b) { return b; }
+    }
+    }
+    #define eigen_plain_assert(x) assert(x)
+  #else
+    // work around bug 89
+    #include <cstdlib>   // for abort
+    #include <iostream>  // for std::cerr
+
+    namespace Eigen {
+    namespace internal {
+    // trivial function copying a bool. Must be EIGEN_DONT_INLINE, so we implement it after including Eigen headers.
+    // see bug 89.
+    namespace {
+    EIGEN_DONT_INLINE bool copy_bool(bool b) { return b; }
+    }
+    inline void assert_fail(const char *condition, const char *function, const char *file, int line)
+    {
+      std::cerr << "assertion failed: " << condition << " in function " << function << " at " << file << ":" << line << std::endl;
+      abort();
+    }
+    }
+    }
+    #define eigen_plain_assert(x) \
+      do { \
+        if(!Eigen::internal::copy_bool(x)) \
+          Eigen::internal::assert_fail(EIGEN_MAKESTRING(x), __PRETTY_FUNCTION__, __FILE__, __LINE__); \
+      } while(false)
+  #endif
+#endif
+
+// eigen_assert can be overridden
+#ifndef eigen_assert
+#define eigen_assert(x) eigen_plain_assert(x)
+#endif
+
+#ifdef EIGEN_INTERNAL_DEBUGGING
+#define eigen_internal_assert(x) eigen_assert(x)
+#else
+#define eigen_internal_assert(x)
+#endif
+
+#ifdef EIGEN_NO_DEBUG
+#define EIGEN_ONLY_USED_FOR_DEBUG(x) (void)x
+#else
+#define EIGEN_ONLY_USED_FOR_DEBUG(x)
+#endif
+
+#ifndef EIGEN_NO_DEPRECATED_WARNING
+  #if (defined __GNUC__)
+    #define EIGEN_DEPRECATED __attribute__((deprecated))
+  #elif (defined _MSC_VER)
+    #define EIGEN_DEPRECATED __declspec(deprecated)
+  #else
+    #define EIGEN_DEPRECATED
+  #endif
+#else
+  #define EIGEN_DEPRECATED
+#endif
+
+#if (defined __GNUC__)
+#define EIGEN_UNUSED __attribute__((unused))
+#else
+#define EIGEN_UNUSED
+#endif
+
+// Suppresses 'unused variable' warnings.
+#define EIGEN_UNUSED_VARIABLE(var) (void)var;
+
+#if !defined(EIGEN_ASM_COMMENT) && (defined __GNUC__)
+#define EIGEN_ASM_COMMENT(X)  asm("#" X)
+#else
+#define EIGEN_ASM_COMMENT(X)
+#endif
+
+/* EIGEN_ALIGN_TO_BOUNDARY(n) forces data to be n-byte aligned. This is used to satisfy SIMD requirements.
+ * However, we do that EVEN if vectorization (EIGEN_VECTORIZE) is disabled,
+ * so that vectorization doesn't affect binary compatibility.
+ *
+ * If we made alignment depend on whether or not EIGEN_VECTORIZE is defined, it would be impossible to link
+ * vectorized and non-vectorized code.
+ */
+#if (defined __GNUC__) || (defined __PGI) || (defined __IBMCPP__) || (defined __ARMCC_VERSION)
+  #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
+#elif (defined _MSC_VER)
+  #define EIGEN_ALIGN_TO_BOUNDARY(n) __declspec(align(n))
+#elif (defined __SUNPRO_CC)
+  // FIXME not sure about this one:
+  #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
+#else
+  #error Please tell me what is the equivalent of __attribute__((aligned(n))) for your compiler
+#endif
+
+#define EIGEN_ALIGN16 EIGEN_ALIGN_TO_BOUNDARY(16)
+
+#if EIGEN_ALIGN_STATICALLY
+#define EIGEN_USER_ALIGN_TO_BOUNDARY(n) EIGEN_ALIGN_TO_BOUNDARY(n)
+#define EIGEN_USER_ALIGN16 EIGEN_ALIGN16
+#else
+#define EIGEN_USER_ALIGN_TO_BOUNDARY(n)
+#define EIGEN_USER_ALIGN16
+#endif
+
+#ifdef EIGEN_DONT_USE_RESTRICT_KEYWORD
+  #define EIGEN_RESTRICT
+#endif
+#ifndef EIGEN_RESTRICT
+  #define EIGEN_RESTRICT __restrict
+#endif
+
+#ifndef EIGEN_STACK_ALLOCATION_LIMIT
+#define EIGEN_STACK_ALLOCATION_LIMIT 20000
+#endif
+
+#ifndef EIGEN_DEFAULT_IO_FORMAT
+#ifdef EIGEN_MAKING_DOCS
+// format used in Eigen's documentation
+// needed to define it here as escaping characters in CMake add_definition's argument seems very problematic.
+#define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat(3, 0, " ", "\n", "", "")
+#else
+#define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat()
+#endif
+#endif
+
+// just an empty macro !
+#define EIGEN_EMPTY
+
+#if defined(_MSC_VER) && (!defined(__INTEL_COMPILER))
+#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
+  using Base::operator =;
+#else
+#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
+  using Base::operator =; \
+  EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) \
+  { \
+    Base::operator=(other); \
+    return *this; \
+  }
+#endif
+
+#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
+  EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived)
+
+/**
+* Just a side note. Commenting within defines works only by documenting
+* behind the object (via '!<'). Comments cannot be multi-line and thus
+* we have these extra long lines. What is confusing doxygen over here is
+* that we use '\' and basically have a bunch of typedefs with their
+* documentation in a single line.
+**/
+
+#define EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \
+  typedef typename Eigen::internal::traits<Derived>::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex<float>. */ \
+  typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex<T>, T were corresponding to RealScalar. */ \
+  typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
+  typedef typename Eigen::internal::nested<Derived>::type Nested; \
+  typedef typename Eigen::internal::traits<Derived>::StorageKind StorageKind; \
+  typedef typename Eigen::internal::traits<Derived>::Index Index; \
+  enum { RowsAtCompileTime = Eigen::internal::traits<Derived>::RowsAtCompileTime, \
+        ColsAtCompileTime = Eigen::internal::traits<Derived>::ColsAtCompileTime, \
+        Flags = Eigen::internal::traits<Derived>::Flags, \
+        CoeffReadCost = Eigen::internal::traits<Derived>::CoeffReadCost, \
+        SizeAtCompileTime = Base::SizeAtCompileTime, \
+        MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \
+        IsVectorAtCompileTime = Base::IsVectorAtCompileTime };
+
+
+#define EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \
+  typedef typename Eigen::internal::traits<Derived>::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex<float>. */ \
+  typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex<T>, T were corresponding to RealScalar. */ \
+  typedef typename Base::PacketScalar PacketScalar; \
+  typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
+  typedef typename Eigen::internal::nested<Derived>::type Nested; \
+  typedef typename Eigen::internal::traits<Derived>::StorageKind StorageKind; \
+  typedef typename Eigen::internal::traits<Derived>::Index Index; \
+  enum { RowsAtCompileTime = Eigen::internal::traits<Derived>::RowsAtCompileTime, \
+        ColsAtCompileTime = Eigen::internal::traits<Derived>::ColsAtCompileTime, \
+        MaxRowsAtCompileTime = Eigen::internal::traits<Derived>::MaxRowsAtCompileTime, \
+        MaxColsAtCompileTime = Eigen::internal::traits<Derived>::MaxColsAtCompileTime, \
+        Flags = Eigen::internal::traits<Derived>::Flags, \
+        CoeffReadCost = Eigen::internal::traits<Derived>::CoeffReadCost, \
+        SizeAtCompileTime = Base::SizeAtCompileTime, \
+        MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \
+        IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \
+  using Base::derived; \
+  using Base::const_cast_derived;
+
+
+#define EIGEN_PLAIN_ENUM_MIN(a,b) (((int)a <= (int)b) ? (int)a : (int)b)
+#define EIGEN_PLAIN_ENUM_MAX(a,b) (((int)a >= (int)b) ? (int)a : (int)b)
+
+// EIGEN_SIZE_MIN_PREFER_DYNAMIC gives the min between compile-time sizes. 0 has absolute priority, followed by 1,
+// followed by Dynamic, followed by other finite values. The reason for giving Dynamic the priority over
+// finite values is that min(3, Dynamic) should be Dynamic, since that could be anything between 0 and 3.
+#define EIGEN_SIZE_MIN_PREFER_DYNAMIC(a,b) (((int)a == 0 || (int)b == 0) ? 0 \
+                           : ((int)a == 1 || (int)b == 1) ? 1 \
+                           : ((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \
+                           : ((int)a <= (int)b) ? (int)a : (int)b)
+
+// EIGEN_SIZE_MIN_PREFER_FIXED is a variant of EIGEN_SIZE_MIN_PREFER_DYNAMIC comparing MaxSizes. The difference is that finite values
+// now have priority over Dynamic, so that min(3, Dynamic) gives 3. Indeed, whatever the actual value is
+// (between 0 and 3), it is not more than 3.
+#define EIGEN_SIZE_MIN_PREFER_FIXED(a,b)  (((int)a == 0 || (int)b == 0) ? 0 \
+                           : ((int)a == 1 || (int)b == 1) ? 1 \
+                           : ((int)a == Dynamic && (int)b == Dynamic) ? Dynamic \
+                           : ((int)a == Dynamic) ? (int)b \
+                           : ((int)b == Dynamic) ? (int)a \
+                           : ((int)a <= (int)b) ? (int)a : (int)b)
+
+// see EIGEN_SIZE_MIN_PREFER_DYNAMIC. No need for a separate variant for MaxSizes here.
+#define EIGEN_SIZE_MAX(a,b) (((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \
+                           : ((int)a >= (int)b) ? (int)a : (int)b)
+
+#define EIGEN_LOGICAL_XOR(a,b) (((a) || (b)) && !((a) && (b)))
+
+#define EIGEN_IMPLIES(a,b) (!(a) || (b))
+
+#define EIGEN_MAKE_CWISE_BINARY_OP(METHOD,FUNCTOR) \
+  template<typename OtherDerived> \
+  EIGEN_STRONG_INLINE const CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived> \
+  (METHOD)(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
+  { \
+    return CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); \
+  }
+
+// the expression type of a cwise product
+#define EIGEN_CWISE_PRODUCT_RETURN_TYPE(LHS,RHS) \
+    CwiseBinaryOp< \
+      internal::scalar_product_op< \
+          typename internal::traits<LHS>::Scalar, \
+          typename internal::traits<RHS>::Scalar \
+      >, \
+      const LHS, \
+      const RHS \
+    >
+
+#endif // EIGEN_MACROS_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/util/Memory.h b/resources/3rdParty/eigen/Eigen/src/Core/util/Memory.h
new file mode 100644
index 000000000..69148703e
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/util/Memory.h
@@ -0,0 +1,952 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009 Kenneth Riddile <kfriddile@yahoo.com>
+// Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com>
+// Copyright (C) 2010 Thomas Capricelli <orzel@freehackers.org>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+/*****************************************************************************
+*** Platform checks for aligned malloc functions                           ***
+*****************************************************************************/
+
+#ifndef EIGEN_MEMORY_H
+#define EIGEN_MEMORY_H
+
+// On 64-bit systems, glibc's malloc returns 16-byte-aligned pointers, see:
+//   http://www.gnu.org/s/libc/manual/html_node/Aligned-Memory-Blocks.html
+// This is true at least since glibc 2.8.
+// This leaves the question how to detect 64-bit. According to this document,
+//   http://gcc.fyxm.net/summit/2003/Porting%20to%2064%20bit.pdf
+// page 114, "[The] LP64 model [...] is used by all 64-bit UNIX ports" so it's indeed
+// quite safe, at least within the context of glibc, to equate 64-bit with LP64.
+#if defined(__GLIBC__) && ((__GLIBC__>=2 && __GLIBC_MINOR__ >= 8) || __GLIBC__>2) \
+ && defined(__LP64__)
+  #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 1
+#else
+  #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 0
+#endif
+
+// FreeBSD 6 seems to have 16-byte aligned malloc
+//   See http://svn.freebsd.org/viewvc/base/stable/6/lib/libc/stdlib/malloc.c?view=markup
+// FreeBSD 7 seems to have 16-byte aligned malloc except on ARM and MIPS architectures
+//   See http://svn.freebsd.org/viewvc/base/stable/7/lib/libc/stdlib/malloc.c?view=markup
+#if defined(__FreeBSD__) && !defined(__arm__) && !defined(__mips__)
+  #define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 1
+#else
+  #define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 0
+#endif
+
+#if defined(__APPLE__) \
+ || defined(_WIN64) \
+ || EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED \
+ || EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED
+  #define EIGEN_MALLOC_ALREADY_ALIGNED 1
+#else
+  #define EIGEN_MALLOC_ALREADY_ALIGNED 0
+#endif
+
+#if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) \
+ && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0)
+  #define EIGEN_HAS_POSIX_MEMALIGN 1
+#else
+  #define EIGEN_HAS_POSIX_MEMALIGN 0
+#endif
+
+#ifdef EIGEN_VECTORIZE_SSE
+  #define EIGEN_HAS_MM_MALLOC 1
+#else
+  #define EIGEN_HAS_MM_MALLOC 0
+#endif
+
+namespace Eigen {
+
+namespace internal {
+
+inline void throw_std_bad_alloc()
+{
+  #ifdef EIGEN_EXCEPTIONS
+    throw std::bad_alloc();
+  #else
+    std::size_t huge = -1;
+    new int[huge];
+  #endif
+}
+
+/*****************************************************************************
+*** Implementation of handmade aligned functions                           ***
+*****************************************************************************/
+
+/* ----- Hand made implementations of aligned malloc/free and realloc ----- */
+
+/** \internal Like malloc, but the returned pointer is guaranteed to be 16-byte aligned.
+  * Fast, but wastes 16 additional bytes of memory. Does not throw any exception.
+  */
+inline void* handmade_aligned_malloc(size_t size)
+{
+  void *original = std::malloc(size+16);
+  if (original == 0) return 0;
+  void *aligned = reinterpret_cast<void*>((reinterpret_cast<size_t>(original) & ~(size_t(15))) + 16);
+  *(reinterpret_cast<void**>(aligned) - 1) = original;
+  return aligned;
+}
+
+/** \internal Frees memory allocated with handmade_aligned_malloc */
+inline void handmade_aligned_free(void *ptr)
+{
+  if (ptr) std::free(*(reinterpret_cast<void**>(ptr) - 1));
+}
+
+/** \internal
+  * \brief Reallocates aligned memory.
+  * Since we know that our handmade version is based on std::realloc
+  * we can use std::realloc to implement efficient reallocation.
+  */
+inline void* handmade_aligned_realloc(void* ptr, size_t size, size_t = 0)
+{
+  if (ptr == 0) return handmade_aligned_malloc(size);
+  void *original = *(reinterpret_cast<void**>(ptr) - 1);
+  original = std::realloc(original,size+16);
+  if (original == 0) return 0;
+  void *aligned = reinterpret_cast<void*>((reinterpret_cast<size_t>(original) & ~(size_t(15))) + 16);
+  *(reinterpret_cast<void**>(aligned) - 1) = original;
+  return aligned;
+}
+
+/*****************************************************************************
+*** Implementation of generic aligned realloc (when no realloc can be used)***
+*****************************************************************************/
+
+void* aligned_malloc(size_t size);
+void  aligned_free(void *ptr);
+
+/** \internal
+  * \brief Reallocates aligned memory.
+  * Allows reallocation with aligned ptr types. This implementation will
+  * always create a new memory chunk and copy the old data.
+  */
+inline void* generic_aligned_realloc(void* ptr, size_t size, size_t old_size)
+{
+  if (ptr==0)
+    return aligned_malloc(size);
+
+  if (size==0)
+  {
+    aligned_free(ptr);
+    return 0;
+  }
+
+  void* newptr = aligned_malloc(size);
+  if (newptr == 0)
+  {
+    #ifdef EIGEN_HAS_ERRNO
+    errno = ENOMEM; // according to the standard
+    #endif
+    return 0;
+  }
+
+  if (ptr != 0)
+  {
+    std::memcpy(newptr, ptr, (std::min)(size,old_size));
+    aligned_free(ptr);
+  }
+
+  return newptr;
+}
+
+/*****************************************************************************
+*** Implementation of portable aligned versions of malloc/free/realloc     ***
+*****************************************************************************/
+
+#ifdef EIGEN_NO_MALLOC
+inline void check_that_malloc_is_allowed()
+{
+  eigen_assert(false && "heap allocation is forbidden (EIGEN_NO_MALLOC is defined)");
+}
+#elif defined EIGEN_RUNTIME_NO_MALLOC
+inline bool is_malloc_allowed_impl(bool update, bool new_value = false)
+{
+  static bool value = true;
+  if (update == 1)
+    value = new_value;
+  return value;
+}
+inline bool is_malloc_allowed() { return is_malloc_allowed_impl(false); }
+inline bool set_is_malloc_allowed(bool new_value) { return is_malloc_allowed_impl(true, new_value); }
+inline void check_that_malloc_is_allowed()
+{
+  eigen_assert(is_malloc_allowed() && "heap allocation is forbidden (EIGEN_RUNTIME_NO_MALLOC is defined and g_is_malloc_allowed is false)");
+}
+#else 
+inline void check_that_malloc_is_allowed()
+{}
+#endif
+
+/** \internal Allocates \a size bytes. The returned pointer is guaranteed to have 16 bytes alignment.
+  * On allocation error, the returned pointer is null, and std::bad_alloc is thrown.
+  */
+inline void* aligned_malloc(size_t size)
+{
+  check_that_malloc_is_allowed();
+
+  void *result;
+  #if !EIGEN_ALIGN
+    result = std::malloc(size);
+  #elif EIGEN_MALLOC_ALREADY_ALIGNED
+    result = std::malloc(size);
+  #elif EIGEN_HAS_POSIX_MEMALIGN
+    if(posix_memalign(&result, 16, size)) result = 0;
+  #elif EIGEN_HAS_MM_MALLOC
+    result = _mm_malloc(size, 16);
+#elif defined(_MSC_VER) && (!defined(_WIN32_WCE))
+    result = _aligned_malloc(size, 16);
+  #else
+    result = handmade_aligned_malloc(size);
+  #endif
+
+  if(!result && size)
+    throw_std_bad_alloc();
+
+  return result;
+}
+
+/** \internal Frees memory allocated with aligned_malloc. */
+inline void aligned_free(void *ptr)
+{
+  #if !EIGEN_ALIGN
+    std::free(ptr);
+  #elif EIGEN_MALLOC_ALREADY_ALIGNED
+    std::free(ptr);
+  #elif EIGEN_HAS_POSIX_MEMALIGN
+    std::free(ptr);
+  #elif EIGEN_HAS_MM_MALLOC
+    _mm_free(ptr);
+  #elif defined(_MSC_VER)
+    _aligned_free(ptr);
+  #else
+    handmade_aligned_free(ptr);
+  #endif
+}
+
+/**
+* \internal
+* \brief Reallocates an aligned block of memory.
+* \throws std::bad_alloc on allocation failure
+**/
+inline void* aligned_realloc(void *ptr, size_t new_size, size_t old_size)
+{
+  EIGEN_UNUSED_VARIABLE(old_size);
+
+  void *result;
+#if !EIGEN_ALIGN
+  result = std::realloc(ptr,new_size);
+#elif EIGEN_MALLOC_ALREADY_ALIGNED
+  result = std::realloc(ptr,new_size);
+#elif EIGEN_HAS_POSIX_MEMALIGN
+  result = generic_aligned_realloc(ptr,new_size,old_size);
+#elif EIGEN_HAS_MM_MALLOC
+  // The defined(_mm_free) is just here to verify that this MSVC version
+  // implements _mm_malloc/_mm_free based on the corresponding _aligned_
+  // functions. This may not always be the case and we just try to be safe.
+  #if defined(_MSC_VER) && defined(_mm_free)
+    result = _aligned_realloc(ptr,new_size,16);
+  #else
+    result = generic_aligned_realloc(ptr,new_size,old_size);
+  #endif
+#elif defined(_MSC_VER)
+  result = _aligned_realloc(ptr,new_size,16);
+#else
+  result = handmade_aligned_realloc(ptr,new_size,old_size);
+#endif
+
+  if (!result && new_size)
+    throw_std_bad_alloc();
+
+  return result;
+}
+
+/*****************************************************************************
+*** Implementation of conditionally aligned functions                      ***
+*****************************************************************************/
+
+/** \internal Allocates \a size bytes. If Align is true, then the returned ptr is 16-byte-aligned.
+  * On allocation error, the returned pointer is null, and a std::bad_alloc is thrown.
+  */
+template<bool Align> inline void* conditional_aligned_malloc(size_t size)
+{
+  return aligned_malloc(size);
+}
+
+template<> inline void* conditional_aligned_malloc<false>(size_t size)
+{
+  check_that_malloc_is_allowed();
+
+  void *result = std::malloc(size);
+  if(!result && size)
+    throw_std_bad_alloc();
+  return result;
+}
+
+/** \internal Frees memory allocated with conditional_aligned_malloc */
+template<bool Align> inline void conditional_aligned_free(void *ptr)
+{
+  aligned_free(ptr);
+}
+
+template<> inline void conditional_aligned_free<false>(void *ptr)
+{
+  std::free(ptr);
+}
+
+template<bool Align> inline void* conditional_aligned_realloc(void* ptr, size_t new_size, size_t old_size)
+{
+  return aligned_realloc(ptr, new_size, old_size);
+}
+
+template<> inline void* conditional_aligned_realloc<false>(void* ptr, size_t new_size, size_t)
+{
+  return std::realloc(ptr, new_size);
+}
+
+/*****************************************************************************
+*** Construction/destruction of array elements                             ***
+*****************************************************************************/
+
+/** \internal Constructs the elements of an array.
+  * The \a size parameter tells on how many objects to call the constructor of T.
+  */
+template<typename T> inline T* construct_elements_of_array(T *ptr, size_t size)
+{
+  for (size_t i=0; i < size; ++i) ::new (ptr + i) T;
+  return ptr;
+}
+
+/** \internal Destructs the elements of an array.
+  * The \a size parameters tells on how many objects to call the destructor of T.
+  */
+template<typename T> inline void destruct_elements_of_array(T *ptr, size_t size)
+{
+  // always destruct an array starting from the end.
+  if(ptr)
+    while(size) ptr[--size].~T();
+}
+
+/*****************************************************************************
+*** Implementation of aligned new/delete-like functions                    ***
+*****************************************************************************/
+
+template<typename T>
+EIGEN_ALWAYS_INLINE void check_size_for_overflow(size_t size)
+{
+  if(size > size_t(-1) / sizeof(T))
+    throw_std_bad_alloc();
+}
+
+/** \internal Allocates \a size objects of type T. The returned pointer is guaranteed to have 16 bytes alignment.
+  * On allocation error, the returned pointer is undefined, but a std::bad_alloc is thrown.
+  * The default constructor of T is called.
+  */
+template<typename T> inline T* aligned_new(size_t size)
+{
+  check_size_for_overflow<T>(size);
+  T *result = reinterpret_cast<T*>(aligned_malloc(sizeof(T)*size));
+  return construct_elements_of_array(result, size);
+}
+
+template<typename T, bool Align> inline T* conditional_aligned_new(size_t size)
+{
+  check_size_for_overflow<T>(size);
+  T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
+  return construct_elements_of_array(result, size);
+}
+
+/** \internal Deletes objects constructed with aligned_new
+  * The \a size parameters tells on how many objects to call the destructor of T.
+  */
+template<typename T> inline void aligned_delete(T *ptr, size_t size)
+{
+  destruct_elements_of_array<T>(ptr, size);
+  aligned_free(ptr);
+}
+
+/** \internal Deletes objects constructed with conditional_aligned_new
+  * The \a size parameters tells on how many objects to call the destructor of T.
+  */
+template<typename T, bool Align> inline void conditional_aligned_delete(T *ptr, size_t size)
+{
+  destruct_elements_of_array<T>(ptr, size);
+  conditional_aligned_free<Align>(ptr);
+}
+
+template<typename T, bool Align> inline T* conditional_aligned_realloc_new(T* pts, size_t new_size, size_t old_size)
+{
+  check_size_for_overflow<T>(new_size);
+  check_size_for_overflow<T>(old_size);
+  if(new_size < old_size)
+    destruct_elements_of_array(pts+new_size, old_size-new_size);
+  T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
+  if(new_size > old_size)
+    construct_elements_of_array(result+old_size, new_size-old_size);
+  return result;
+}
+
+
+template<typename T, bool Align> inline T* conditional_aligned_new_auto(size_t size)
+{
+  check_size_for_overflow<T>(size);
+  T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
+  if(NumTraits<T>::RequireInitialization)
+    construct_elements_of_array(result, size);
+  return result;
+}
+
+template<typename T, bool Align> inline T* conditional_aligned_realloc_new_auto(T* pts, size_t new_size, size_t old_size)
+{
+  check_size_for_overflow<T>(new_size);
+  check_size_for_overflow<T>(old_size);
+  if(NumTraits<T>::RequireInitialization && (new_size < old_size))
+    destruct_elements_of_array(pts+new_size, old_size-new_size);
+  T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
+  if(NumTraits<T>::RequireInitialization && (new_size > old_size))
+    construct_elements_of_array(result+old_size, new_size-old_size);
+  return result;
+}
+
+template<typename T, bool Align> inline void conditional_aligned_delete_auto(T *ptr, size_t size)
+{
+  if(NumTraits<T>::RequireInitialization)
+    destruct_elements_of_array<T>(ptr, size);
+  conditional_aligned_free<Align>(ptr);
+}
+
+/****************************************************************************/
+
+/** \internal Returns the index of the first element of the array that is well aligned for vectorization.
+  *
+  * \param array the address of the start of the array
+  * \param size the size of the array
+  *
+  * \note If no element of the array is well aligned, the size of the array is returned. Typically,
+  * for example with SSE, "well aligned" means 16-byte-aligned. If vectorization is disabled or if the
+  * packet size for the given scalar type is 1, then everything is considered well-aligned.
+  *
+  * \note If the scalar type is vectorizable, we rely on the following assumptions: sizeof(Scalar) is a
+  * power of 2, the packet size in bytes is also a power of 2, and is a multiple of sizeof(Scalar). On the
+  * other hand, we do not assume that the array address is a multiple of sizeof(Scalar), as that fails for
+  * example with Scalar=double on certain 32-bit platforms, see bug #79.
+  *
+  * There is also the variant first_aligned(const MatrixBase&) defined in DenseCoeffsBase.h.
+  */
+template<typename Scalar, typename Index>
+static inline Index first_aligned(const Scalar* array, Index size)
+{
+  typedef typename packet_traits<Scalar>::type Packet;
+  enum { PacketSize = packet_traits<Scalar>::size,
+         PacketAlignedMask = PacketSize-1
+  };
+
+  if(PacketSize==1)
+  {
+    // Either there is no vectorization, or a packet consists of exactly 1 scalar so that all elements
+    // of the array have the same alignment.
+    return 0;
+  }
+  else if(size_t(array) & (sizeof(Scalar)-1))
+  {
+    // There is vectorization for this scalar type, but the array is not aligned to the size of a single scalar.
+    // Consequently, no element of the array is well aligned.
+    return size;
+  }
+  else
+  {
+    return std::min<Index>( (PacketSize - (Index((size_t(array)/sizeof(Scalar))) & PacketAlignedMask))
+                           & PacketAlignedMask, size);
+  }
+}
+
+
+// std::copy is much slower than memcpy, so let's introduce a smart_copy which
+// use memcpy on trivial types, i.e., on types that does not require an initialization ctor.
+template<typename T, bool UseMemcpy> struct smart_copy_helper;
+
+template<typename T> void smart_copy(const T* start, const T* end, T* target)
+{
+  smart_copy_helper<T,!NumTraits<T>::RequireInitialization>::run(start, end, target);
+}
+
+template<typename T> struct smart_copy_helper<T,true> {
+  static inline void run(const T* start, const T* end, T* target)
+  { memcpy(target, start, std::ptrdiff_t(end)-std::ptrdiff_t(start)); }
+};
+
+template<typename T> struct smart_copy_helper<T,false> {
+  static inline void run(const T* start, const T* end, T* target)
+  { std::copy(start, end, target); }
+};
+
+
+/*****************************************************************************
+*** Implementation of runtime stack allocation (falling back to malloc)    ***
+*****************************************************************************/
+
+// you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA
+// to the appropriate stack allocation function
+#ifndef EIGEN_ALLOCA
+  #if (defined __linux__)
+    #define EIGEN_ALLOCA alloca
+  #elif defined(_MSC_VER)
+    #define EIGEN_ALLOCA _alloca
+  #endif
+#endif
+
+// This helper class construct the allocated memory, and takes care of destructing and freeing the handled data
+// at destruction time. In practice this helper class is mainly useful to avoid memory leak in case of exceptions.
+template<typename T> class aligned_stack_memory_handler
+{
+  public:
+    /* Creates a stack_memory_handler responsible for the buffer \a ptr of size \a size.
+     * Note that \a ptr can be 0 regardless of the other parameters.
+     * This constructor takes care of constructing/initializing the elements of the buffer if required by the scalar type T (see NumTraits<T>::RequireInitialization).
+     * In this case, the buffer elements will also be destructed when this handler will be destructed.
+     * Finally, if \a dealloc is true, then the pointer \a ptr is freed.
+     **/
+    aligned_stack_memory_handler(T* ptr, size_t size, bool dealloc)
+      : m_ptr(ptr), m_size(size), m_deallocate(dealloc)
+    {
+      if(NumTraits<T>::RequireInitialization && m_ptr)
+        Eigen::internal::construct_elements_of_array(m_ptr, size);
+    }
+    ~aligned_stack_memory_handler()
+    {
+      if(NumTraits<T>::RequireInitialization && m_ptr)
+        Eigen::internal::destruct_elements_of_array<T>(m_ptr, m_size);
+      if(m_deallocate)
+        Eigen::internal::aligned_free(m_ptr);
+    }
+  protected:
+    T* m_ptr;
+    size_t m_size;
+    bool m_deallocate;
+};
+
+} // end namespace internal
+
+/** \internal
+  * Declares, allocates and construct an aligned buffer named NAME of SIZE elements of type TYPE on the stack
+  * if SIZE is smaller than EIGEN_STACK_ALLOCATION_LIMIT, and if stack allocation is supported by the platform
+  * (currently, this is Linux and Visual Studio only). Otherwise the memory is allocated on the heap.
+  * The allocated buffer is automatically deleted when exiting the scope of this declaration.
+  * If BUFFER is non null, then the declared variable is simply an alias for BUFFER, and no allocation/deletion occurs.
+  * Here is an example:
+  * \code
+  * {
+  *   ei_declare_aligned_stack_constructed_variable(float,data,size,0);
+  *   // use data[0] to data[size-1]
+  * }
+  * \endcode
+  * The underlying stack allocation function can controlled with the EIGEN_ALLOCA preprocessor token.
+  */
+#ifdef EIGEN_ALLOCA
+
+  #ifdef __arm__
+    #define EIGEN_ALIGNED_ALLOCA(SIZE) reinterpret_cast<void*>((reinterpret_cast<size_t>(EIGEN_ALLOCA(SIZE+16)) & ~(size_t(15))) + 16)
+  #else
+    #define EIGEN_ALIGNED_ALLOCA EIGEN_ALLOCA
+  #endif
+
+  #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \
+    Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \
+    TYPE* NAME = (BUFFER)!=0 ? (BUFFER) \
+               : reinterpret_cast<TYPE*>( \
+                      (sizeof(TYPE)*SIZE<=EIGEN_STACK_ALLOCATION_LIMIT) ? EIGEN_ALIGNED_ALLOCA(sizeof(TYPE)*SIZE) \
+                    : Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE) );  \
+    Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,sizeof(TYPE)*SIZE>EIGEN_STACK_ALLOCATION_LIMIT)
+
+#else
+
+  #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \
+    Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \
+    TYPE* NAME = (BUFFER)!=0 ? BUFFER : reinterpret_cast<TYPE*>(Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE));    \
+    Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,true)
+    
+#endif
+
+
+/*****************************************************************************
+*** Implementation of EIGEN_MAKE_ALIGNED_OPERATOR_NEW [_IF]                ***
+*****************************************************************************/
+
+#if EIGEN_ALIGN
+  #ifdef EIGEN_EXCEPTIONS
+    #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
+      void* operator new(size_t size, const std::nothrow_t&) throw() { \
+        try { return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); } \
+        catch (...) { return 0; } \
+        return 0; \
+      }
+  #else
+    #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
+      void* operator new(size_t size, const std::nothrow_t&) throw() { \
+        return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
+      }
+  #endif
+
+  #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \
+      void *operator new(size_t size) { \
+        return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
+      } \
+      void *operator new[](size_t size) { \
+        return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
+      } \
+      void operator delete(void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
+      void operator delete[](void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
+      /* in-place new and delete. since (at least afaik) there is no actual   */ \
+      /* memory allocated we can safely let the default implementation handle */ \
+      /* this particular case. */ \
+      static void *operator new(size_t size, void *ptr) { return ::operator new(size,ptr); } \
+      void operator delete(void * memory, void *ptr) throw() { return ::operator delete(memory,ptr); } \
+      /* nothrow-new (returns zero instead of std::bad_alloc) */ \
+      EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
+      void operator delete(void *ptr, const std::nothrow_t&) throw() { \
+        Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); \
+      } \
+      typedef void eigen_aligned_operator_new_marker_type;
+#else
+  #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
+#endif
+
+#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(true)
+#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size) \
+  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool(((Size)!=Eigen::Dynamic) && ((sizeof(Scalar)*(Size))%16==0)))
+
+/****************************************************************************/
+
+/** \class aligned_allocator
+* \ingroup Core_Module
+*
+* \brief STL compatible allocator to use with with 16 byte aligned types
+*
+* Example:
+* \code
+* // Matrix4f requires 16 bytes alignment:
+* std::map< int, Matrix4f, std::less<int>, 
+*           aligned_allocator<std::pair<const int, Matrix4f> > > my_map_mat4;
+* // Vector3f does not require 16 bytes alignment, no need to use Eigen's allocator:
+* std::map< int, Vector3f > my_map_vec3;
+* \endcode
+*
+* \sa \ref TopicStlContainers.
+*/
+template<class T>
+class aligned_allocator
+{
+public:
+    typedef size_t    size_type;
+    typedef std::ptrdiff_t difference_type;
+    typedef T*        pointer;
+    typedef const T*  const_pointer;
+    typedef T&        reference;
+    typedef const T&  const_reference;
+    typedef T         value_type;
+
+    template<class U>
+    struct rebind
+    {
+        typedef aligned_allocator<U> other;
+    };
+
+    pointer address( reference value ) const
+    {
+        return &value;
+    }
+
+    const_pointer address( const_reference value ) const
+    {
+        return &value;
+    }
+
+    aligned_allocator()
+    {
+    }
+
+    aligned_allocator( const aligned_allocator& )
+    {
+    }
+
+    template<class U>
+    aligned_allocator( const aligned_allocator<U>& )
+    {
+    }
+
+    ~aligned_allocator()
+    {
+    }
+
+    size_type max_size() const
+    {
+        return (std::numeric_limits<size_type>::max)();
+    }
+
+    pointer allocate( size_type num, const void* hint = 0 )
+    {
+        EIGEN_UNUSED_VARIABLE(hint);
+        internal::check_size_for_overflow<T>(num);
+        return static_cast<pointer>( internal::aligned_malloc( num * sizeof(T) ) );
+    }
+
+    void construct( pointer p, const T& value )
+    {
+        ::new( p ) T( value );
+    }
+
+    // Support for c++11
+#if (__cplusplus >= 201103L)
+    template<typename... Args>
+    void  construct(pointer p, Args&&... args)
+    {
+      ::new(p) T(std::forward<Args>(args)...);
+    }
+#endif
+
+    void destroy( pointer p )
+    {
+        p->~T();
+    }
+
+    void deallocate( pointer p, size_type /*num*/ )
+    {
+        internal::aligned_free( p );
+    }
+
+    bool operator!=(const aligned_allocator<T>& ) const
+    { return false; }
+
+    bool operator==(const aligned_allocator<T>& ) const
+    { return true; }
+};
+
+//---------- Cache sizes ----------
+
+#if !defined(EIGEN_NO_CPUID)
+#  if defined(__GNUC__) && ( defined(__i386__) || defined(__x86_64__) )
+#    if defined(__PIC__) && defined(__i386__)
+       // Case for x86 with PIC
+#      define EIGEN_CPUID(abcd,func,id) \
+         __asm__ __volatile__ ("xchgl %%ebx, %%esi;cpuid; xchgl %%ebx,%%esi": "=a" (abcd[0]), "=S" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id));
+#    else
+       // Case for x86_64 or x86 w/o PIC
+#      define EIGEN_CPUID(abcd,func,id) \
+         __asm__ __volatile__ ("cpuid": "=a" (abcd[0]), "=b" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id) );
+#    endif
+#  elif defined(_MSC_VER)
+#    if (_MSC_VER > 1500) && ( defined(_M_IX86) || defined(_M_X64) )
+#      define EIGEN_CPUID(abcd,func,id) __cpuidex((int*)abcd,func,id)
+#    endif
+#  endif
+#endif
+
+namespace internal {
+
+#ifdef EIGEN_CPUID
+
+inline bool cpuid_is_vendor(int abcd[4], const char* vendor)
+{
+  return abcd[1]==(reinterpret_cast<const int*>(vendor))[0] && abcd[3]==(reinterpret_cast<const int*>(vendor))[1] && abcd[2]==(reinterpret_cast<const int*>(vendor))[2];
+}
+
+inline void queryCacheSizes_intel_direct(int& l1, int& l2, int& l3)
+{
+  int abcd[4];
+  l1 = l2 = l3 = 0;
+  int cache_id = 0;
+  int cache_type = 0;
+  do {
+    abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
+    EIGEN_CPUID(abcd,0x4,cache_id);
+    cache_type  = (abcd[0] & 0x0F) >> 0;
+    if(cache_type==1||cache_type==3) // data or unified cache
+    {
+      int cache_level = (abcd[0] & 0xE0) >> 5;  // A[7:5]
+      int ways        = (abcd[1] & 0xFFC00000) >> 22; // B[31:22]
+      int partitions  = (abcd[1] & 0x003FF000) >> 12; // B[21:12]
+      int line_size   = (abcd[1] & 0x00000FFF) >>  0; // B[11:0]
+      int sets        = (abcd[2]);                    // C[31:0]
+
+      int cache_size = (ways+1) * (partitions+1) * (line_size+1) * (sets+1);
+
+      switch(cache_level)
+      {
+        case 1: l1 = cache_size; break;
+        case 2: l2 = cache_size; break;
+        case 3: l3 = cache_size; break;
+        default: break;
+      }
+    }
+    cache_id++;
+  } while(cache_type>0 && cache_id<16);
+}
+
+inline void queryCacheSizes_intel_codes(int& l1, int& l2, int& l3)
+{
+  int abcd[4];
+  abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
+  l1 = l2 = l3 = 0;
+  EIGEN_CPUID(abcd,0x00000002,0);
+  unsigned char * bytes = reinterpret_cast<unsigned char *>(abcd)+2;
+  bool check_for_p2_core2 = false;
+  for(int i=0; i<14; ++i)
+  {
+    switch(bytes[i])
+    {
+      case 0x0A: l1 = 8; break;   // 0Ah   data L1 cache, 8 KB, 2 ways, 32 byte lines
+      case 0x0C: l1 = 16; break;  // 0Ch   data L1 cache, 16 KB, 4 ways, 32 byte lines
+      case 0x0E: l1 = 24; break;  // 0Eh   data L1 cache, 24 KB, 6 ways, 64 byte lines
+      case 0x10: l1 = 16; break;  // 10h   data L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64)
+      case 0x15: l1 = 16; break;  // 15h   code L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64)
+      case 0x2C: l1 = 32; break;  // 2Ch   data L1 cache, 32 KB, 8 ways, 64 byte lines
+      case 0x30: l1 = 32; break;  // 30h   code L1 cache, 32 KB, 8 ways, 64 byte lines
+      case 0x60: l1 = 16; break;  // 60h   data L1 cache, 16 KB, 8 ways, 64 byte lines, sectored
+      case 0x66: l1 = 8; break;   // 66h   data L1 cache, 8 KB, 4 ways, 64 byte lines, sectored
+      case 0x67: l1 = 16; break;  // 67h   data L1 cache, 16 KB, 4 ways, 64 byte lines, sectored
+      case 0x68: l1 = 32; break;  // 68h   data L1 cache, 32 KB, 4 ways, 64 byte lines, sectored
+      case 0x1A: l2 = 96; break;   // code and data L2 cache, 96 KB, 6 ways, 64 byte lines (IA-64)
+      case 0x22: l3 = 512; break;   // code and data L3 cache, 512 KB, 4 ways (!), 64 byte lines, dual-sectored
+      case 0x23: l3 = 1024; break;   // code and data L3 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored
+      case 0x25: l3 = 2048; break;   // code and data L3 cache, 2048 KB, 8 ways, 64 byte lines, dual-sectored
+      case 0x29: l3 = 4096; break;   // code and data L3 cache, 4096 KB, 8 ways, 64 byte lines, dual-sectored
+      case 0x39: l2 = 128; break;   // code and data L2 cache, 128 KB, 4 ways, 64 byte lines, sectored
+      case 0x3A: l2 = 192; break;   // code and data L2 cache, 192 KB, 6 ways, 64 byte lines, sectored
+      case 0x3B: l2 = 128; break;   // code and data L2 cache, 128 KB, 2 ways, 64 byte lines, sectored
+      case 0x3C: l2 = 256; break;   // code and data L2 cache, 256 KB, 4 ways, 64 byte lines, sectored
+      case 0x3D: l2 = 384; break;   // code and data L2 cache, 384 KB, 6 ways, 64 byte lines, sectored
+      case 0x3E: l2 = 512; break;   // code and data L2 cache, 512 KB, 4 ways, 64 byte lines, sectored
+      case 0x40: l2 = 0; break;   // no integrated L2 cache (P6 core) or L3 cache (P4 core)
+      case 0x41: l2 = 128; break;   // code and data L2 cache, 128 KB, 4 ways, 32 byte lines
+      case 0x42: l2 = 256; break;   // code and data L2 cache, 256 KB, 4 ways, 32 byte lines
+      case 0x43: l2 = 512; break;   // code and data L2 cache, 512 KB, 4 ways, 32 byte lines
+      case 0x44: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 4 ways, 32 byte lines
+      case 0x45: l2 = 2048; break;   // code and data L2 cache, 2048 KB, 4 ways, 32 byte lines
+      case 0x46: l3 = 4096; break;   // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines
+      case 0x47: l3 = 8192; break;   // code and data L3 cache, 8192 KB, 8 ways, 64 byte lines
+      case 0x48: l2 = 3072; break;   // code and data L2 cache, 3072 KB, 12 ways, 64 byte lines
+      case 0x49: if(l2!=0) l3 = 4096; else {check_for_p2_core2=true; l3 = l2 = 4096;} break;// code and data L3 cache, 4096 KB, 16 ways, 64 byte lines (P4) or L2 for core2
+      case 0x4A: l3 = 6144; break;   // code and data L3 cache, 6144 KB, 12 ways, 64 byte lines
+      case 0x4B: l3 = 8192; break;   // code and data L3 cache, 8192 KB, 16 ways, 64 byte lines
+      case 0x4C: l3 = 12288; break;   // code and data L3 cache, 12288 KB, 12 ways, 64 byte lines
+      case 0x4D: l3 = 16384; break;   // code and data L3 cache, 16384 KB, 16 ways, 64 byte lines
+      case 0x4E: l2 = 6144; break;   // code and data L2 cache, 6144 KB, 24 ways, 64 byte lines
+      case 0x78: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 4 ways, 64 byte lines
+      case 0x79: l2 = 128; break;   // code and data L2 cache, 128 KB, 8 ways, 64 byte lines, dual-sectored
+      case 0x7A: l2 = 256; break;   // code and data L2 cache, 256 KB, 8 ways, 64 byte lines, dual-sectored
+      case 0x7B: l2 = 512; break;   // code and data L2 cache, 512 KB, 8 ways, 64 byte lines, dual-sectored
+      case 0x7C: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored
+      case 0x7D: l2 = 2048; break;   // code and data L2 cache, 2048 KB, 8 ways, 64 byte lines
+      case 0x7E: l2 = 256; break;   // code and data L2 cache, 256 KB, 8 ways, 128 byte lines, sect. (IA-64)
+      case 0x7F: l2 = 512; break;   // code and data L2 cache, 512 KB, 2 ways, 64 byte lines
+      case 0x80: l2 = 512; break;   // code and data L2 cache, 512 KB, 8 ways, 64 byte lines
+      case 0x81: l2 = 128; break;   // code and data L2 cache, 128 KB, 8 ways, 32 byte lines
+      case 0x82: l2 = 256; break;   // code and data L2 cache, 256 KB, 8 ways, 32 byte lines
+      case 0x83: l2 = 512; break;   // code and data L2 cache, 512 KB, 8 ways, 32 byte lines
+      case 0x84: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 8 ways, 32 byte lines
+      case 0x85: l2 = 2048; break;   // code and data L2 cache, 2048 KB, 8 ways, 32 byte lines
+      case 0x86: l2 = 512; break;   // code and data L2 cache, 512 KB, 4 ways, 64 byte lines
+      case 0x87: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines
+      case 0x88: l3 = 2048; break;   // code and data L3 cache, 2048 KB, 4 ways, 64 byte lines (IA-64)
+      case 0x89: l3 = 4096; break;   // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines (IA-64)
+      case 0x8A: l3 = 8192; break;   // code and data L3 cache, 8192 KB, 4 ways, 64 byte lines (IA-64)
+      case 0x8D: l3 = 3072; break;   // code and data L3 cache, 3072 KB, 12 ways, 128 byte lines (IA-64)
+
+      default: break;
+    }
+  }
+  if(check_for_p2_core2 && l2 == l3)
+    l3 = 0;
+  l1 *= 1024;
+  l2 *= 1024;
+  l3 *= 1024;
+}
+
+inline void queryCacheSizes_intel(int& l1, int& l2, int& l3, int max_std_funcs)
+{
+  if(max_std_funcs>=4)
+    queryCacheSizes_intel_direct(l1,l2,l3);
+  else
+    queryCacheSizes_intel_codes(l1,l2,l3);
+}
+
+inline void queryCacheSizes_amd(int& l1, int& l2, int& l3)
+{
+  int abcd[4];
+  abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
+  EIGEN_CPUID(abcd,0x80000005,0);
+  l1 = (abcd[2] >> 24) * 1024; // C[31:24] = L1 size in KB
+  abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
+  EIGEN_CPUID(abcd,0x80000006,0);
+  l2 = (abcd[2] >> 16) * 1024; // C[31;16] = l2 cache size in KB
+  l3 = ((abcd[3] & 0xFFFC000) >> 18) * 512 * 1024; // D[31;18] = l3 cache size in 512KB
+}
+#endif
+
+/** \internal
+ * Queries and returns the cache sizes in Bytes of the L1, L2, and L3 data caches respectively */
+inline void queryCacheSizes(int& l1, int& l2, int& l3)
+{
+  #ifdef EIGEN_CPUID
+  int abcd[4];
+
+  // identify the CPU vendor
+  EIGEN_CPUID(abcd,0x0,0);
+  int max_std_funcs = abcd[1];
+  if(cpuid_is_vendor(abcd,"GenuineIntel"))
+    queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
+  else if(cpuid_is_vendor(abcd,"AuthenticAMD") || cpuid_is_vendor(abcd,"AMDisbetter!"))
+    queryCacheSizes_amd(l1,l2,l3);
+  else
+    // by default let's use Intel's API
+    queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
+
+  // here is the list of other vendors:
+//   ||cpuid_is_vendor(abcd,"VIA VIA VIA ")
+//   ||cpuid_is_vendor(abcd,"CyrixInstead")
+//   ||cpuid_is_vendor(abcd,"CentaurHauls")
+//   ||cpuid_is_vendor(abcd,"GenuineTMx86")
+//   ||cpuid_is_vendor(abcd,"TransmetaCPU")
+//   ||cpuid_is_vendor(abcd,"RiseRiseRise")
+//   ||cpuid_is_vendor(abcd,"Geode by NSC")
+//   ||cpuid_is_vendor(abcd,"SiS SiS SiS ")
+//   ||cpuid_is_vendor(abcd,"UMC UMC UMC ")
+//   ||cpuid_is_vendor(abcd,"NexGenDriven")
+  #else
+  l1 = l2 = l3 = -1;
+  #endif
+}
+
+/** \internal
+ * \returns the size in Bytes of the L1 data cache */
+inline int queryL1CacheSize()
+{
+  int l1(-1), l2, l3;
+  queryCacheSizes(l1,l2,l3);
+  return l1;
+}
+
+/** \internal
+ * \returns the size in Bytes of the L2 or L3 cache if this later is present */
+inline int queryTopLevelCacheSize()
+{
+  int l1, l2(-1), l3(-1);
+  queryCacheSizes(l1,l2,l3);
+  return (std::max)(l2,l3);
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_MEMORY_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/util/Meta.h b/resources/3rdParty/eigen/Eigen/src/Core/util/Meta.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/util/Meta.h
rename to resources/3rdParty/eigen/Eigen/src/Core/util/Meta.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/util/NonMPL2.h b/resources/3rdParty/eigen/Eigen/src/Core/util/NonMPL2.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/util/NonMPL2.h
rename to resources/3rdParty/eigen/Eigen/src/Core/util/NonMPL2.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/util/ReenableStupidWarnings.h b/resources/3rdParty/eigen/Eigen/src/Core/util/ReenableStupidWarnings.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Core/util/ReenableStupidWarnings.h
rename to resources/3rdParty/eigen/Eigen/src/Core/util/ReenableStupidWarnings.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/util/StaticAssert.h b/resources/3rdParty/eigen/Eigen/src/Core/util/StaticAssert.h
new file mode 100644
index 000000000..b46a75b37
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/util/StaticAssert.h
@@ -0,0 +1,205 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_STATIC_ASSERT_H
+#define EIGEN_STATIC_ASSERT_H
+
+/* Some notes on Eigen's static assertion mechanism:
+ *
+ *  - in EIGEN_STATIC_ASSERT(CONDITION,MSG) the parameter CONDITION must be a compile time boolean
+ *    expression, and MSG an enum listed in struct internal::static_assertion<true>
+ *
+ *  - define EIGEN_NO_STATIC_ASSERT to disable them (and save compilation time)
+ *    in that case, the static assertion is converted to the following runtime assert:
+ *      eigen_assert(CONDITION && "MSG")
+ *
+ *  - currently EIGEN_STATIC_ASSERT can only be used in function scope
+ *
+ */
+
+#ifndef EIGEN_NO_STATIC_ASSERT
+
+  #if defined(__GXX_EXPERIMENTAL_CXX0X__) || (defined(_MSC_VER) && (_MSC_VER >= 1600))
+
+    // if native static_assert is enabled, let's use it
+    #define EIGEN_STATIC_ASSERT(X,MSG) static_assert(X,#MSG);
+
+  #else // not CXX0X
+
+    namespace Eigen {
+
+    namespace internal {
+
+    template<bool condition>
+    struct static_assertion {};
+
+    template<>
+    struct static_assertion<true>
+    {
+      enum {
+        YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX,
+        YOU_MIXED_VECTORS_OF_DIFFERENT_SIZES,
+        YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES,
+        THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE,
+        THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE,
+        THIS_METHOD_IS_ONLY_FOR_OBJECTS_OF_A_SPECIFIC_SIZE,
+        YOU_MADE_A_PROGRAMMING_MISTAKE,
+        EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT,
+        EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE,
+        YOU_CALLED_A_FIXED_SIZE_METHOD_ON_A_DYNAMIC_SIZE_MATRIX_OR_VECTOR,
+        YOU_CALLED_A_DYNAMIC_SIZE_METHOD_ON_A_FIXED_SIZE_MATRIX_OR_VECTOR,
+        UNALIGNED_LOAD_AND_STORE_OPERATIONS_UNIMPLEMENTED_ON_ALTIVEC,
+        THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES,
+        FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED,
+        NUMERIC_TYPE_MUST_BE_REAL,
+        COEFFICIENT_WRITE_ACCESS_TO_SELFADJOINT_NOT_SUPPORTED,
+        WRITING_TO_TRIANGULAR_PART_WITH_UNIT_DIAGONAL_IS_NOT_SUPPORTED,
+        THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE,
+        INVALID_MATRIX_PRODUCT,
+        INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS,
+        INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION,
+        YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY,
+        THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES,
+        THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES,
+        INVALID_MATRIX_TEMPLATE_PARAMETERS,
+        INVALID_MATRIXBASE_TEMPLATE_PARAMETERS,
+        BOTH_MATRICES_MUST_HAVE_THE_SAME_STORAGE_ORDER,
+        THIS_METHOD_IS_ONLY_FOR_DIAGONAL_MATRIX,
+        THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE,
+        THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_WITH_DIRECT_MEMORY_ACCESS_SUCH_AS_MAP_OR_PLAIN_MATRICES,
+        YOU_ALREADY_SPECIFIED_THIS_STRIDE,
+        INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION,
+        THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD,
+        PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1,
+        THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS,
+        YOU_CANNOT_MIX_ARRAYS_AND_MATRICES,
+        YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION,
+        THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY,
+        YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT,
+        THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS,
+        THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL,
+        THIS_METHOD_IS_ONLY_FOR_ARRAYS_NOT_MATRICES,
+        YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED,
+        YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED,
+        THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE,
+        THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH
+      };
+    };
+
+    } // end namespace internal
+
+    } // end namespace Eigen
+
+    // Specialized implementation for MSVC to avoid "conditional
+    // expression is constant" warnings.  This implementation doesn't
+    // appear to work under GCC, hence the multiple implementations.
+    #ifdef _MSC_VER
+
+      #define EIGEN_STATIC_ASSERT(CONDITION,MSG) \
+        {Eigen::internal::static_assertion<bool(CONDITION)>::MSG;}
+
+    #else
+
+      #define EIGEN_STATIC_ASSERT(CONDITION,MSG) \
+        if (Eigen::internal::static_assertion<bool(CONDITION)>::MSG) {}
+
+    #endif
+
+  #endif // not CXX0X
+
+#else // EIGEN_NO_STATIC_ASSERT
+
+  #define EIGEN_STATIC_ASSERT(CONDITION,MSG) eigen_assert((CONDITION) && #MSG);
+
+#endif // EIGEN_NO_STATIC_ASSERT
+
+
+// static assertion failing if the type \a TYPE is not a vector type
+#define EIGEN_STATIC_ASSERT_VECTOR_ONLY(TYPE) \
+  EIGEN_STATIC_ASSERT(TYPE::IsVectorAtCompileTime, \
+                      YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX)
+
+// static assertion failing if the type \a TYPE is not fixed-size
+#define EIGEN_STATIC_ASSERT_FIXED_SIZE(TYPE) \
+  EIGEN_STATIC_ASSERT(TYPE::SizeAtCompileTime!=Eigen::Dynamic, \
+                      YOU_CALLED_A_FIXED_SIZE_METHOD_ON_A_DYNAMIC_SIZE_MATRIX_OR_VECTOR)
+
+// static assertion failing if the type \a TYPE is not dynamic-size
+#define EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(TYPE) \
+  EIGEN_STATIC_ASSERT(TYPE::SizeAtCompileTime==Eigen::Dynamic, \
+                      YOU_CALLED_A_DYNAMIC_SIZE_METHOD_ON_A_FIXED_SIZE_MATRIX_OR_VECTOR)
+
+// static assertion failing if the type \a TYPE is not a vector type of the given size
+#define EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(TYPE, SIZE) \
+  EIGEN_STATIC_ASSERT(TYPE::IsVectorAtCompileTime && TYPE::SizeAtCompileTime==SIZE, \
+                      THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE)
+
+// static assertion failing if the type \a TYPE is not a vector type of the given size
+#define EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(TYPE, ROWS, COLS) \
+  EIGEN_STATIC_ASSERT(TYPE::RowsAtCompileTime==ROWS && TYPE::ColsAtCompileTime==COLS, \
+                      THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE)
+
+// static assertion failing if the two vector expression types are not compatible (same fixed-size or dynamic size)
+#define EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(TYPE0,TYPE1) \
+  EIGEN_STATIC_ASSERT( \
+      (int(TYPE0::SizeAtCompileTime)==Eigen::Dynamic \
+    || int(TYPE1::SizeAtCompileTime)==Eigen::Dynamic \
+    || int(TYPE0::SizeAtCompileTime)==int(TYPE1::SizeAtCompileTime)),\
+    YOU_MIXED_VECTORS_OF_DIFFERENT_SIZES)
+
+#define EIGEN_PREDICATE_SAME_MATRIX_SIZE(TYPE0,TYPE1) \
+     ( \
+        (int(TYPE0::SizeAtCompileTime)==0 && int(TYPE1::SizeAtCompileTime)==0) \
+    || (\
+          (int(TYPE0::RowsAtCompileTime)==Eigen::Dynamic \
+        || int(TYPE1::RowsAtCompileTime)==Eigen::Dynamic \
+        || int(TYPE0::RowsAtCompileTime)==int(TYPE1::RowsAtCompileTime)) \
+      &&  (int(TYPE0::ColsAtCompileTime)==Eigen::Dynamic \
+        || int(TYPE1::ColsAtCompileTime)==Eigen::Dynamic \
+        || int(TYPE0::ColsAtCompileTime)==int(TYPE1::ColsAtCompileTime))\
+       ) \
+     )
+
+#ifdef EIGEN2_SUPPORT
+  #define EIGEN_STATIC_ASSERT_NON_INTEGER(TYPE) \
+    eigen_assert(!NumTraits<Scalar>::IsInteger);
+#else
+  #define EIGEN_STATIC_ASSERT_NON_INTEGER(TYPE) \
+    EIGEN_STATIC_ASSERT(!NumTraits<TYPE>::IsInteger, THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES)
+#endif
+
+
+// static assertion failing if it is guaranteed at compile-time that the two matrix expression types have different sizes
+#define EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(TYPE0,TYPE1) \
+  EIGEN_STATIC_ASSERT( \
+     EIGEN_PREDICATE_SAME_MATRIX_SIZE(TYPE0,TYPE1),\
+    YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES)
+
+#define EIGEN_STATIC_ASSERT_SIZE_1x1(TYPE) \
+      EIGEN_STATIC_ASSERT((TYPE::RowsAtCompileTime == 1 || TYPE::RowsAtCompileTime == Dynamic) && \
+                          (TYPE::ColsAtCompileTime == 1 || TYPE::ColsAtCompileTime == Dynamic), \
+                          THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS)
+
+#define EIGEN_STATIC_ASSERT_LVALUE(Derived) \
+      EIGEN_STATIC_ASSERT(internal::is_lvalue<Derived>::value, \
+                          THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY)
+
+#define EIGEN_STATIC_ASSERT_ARRAYXPR(Derived) \
+      EIGEN_STATIC_ASSERT((internal::is_same<typename internal::traits<Derived>::XprKind, ArrayXpr>::value), \
+                          THIS_METHOD_IS_ONLY_FOR_ARRAYS_NOT_MATRICES)
+
+#define EIGEN_STATIC_ASSERT_SAME_XPR_KIND(Derived1, Derived2) \
+      EIGEN_STATIC_ASSERT((internal::is_same<typename internal::traits<Derived1>::XprKind, \
+                                             typename internal::traits<Derived2>::XprKind \
+                                            >::value), \
+                          YOU_CANNOT_MIX_ARRAYS_AND_MATRICES)
+
+
+#endif // EIGEN_STATIC_ASSERT_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Core/util/XprHelper.h b/resources/3rdParty/eigen/Eigen/src/Core/util/XprHelper.h
new file mode 100644
index 000000000..e6f8aaef8
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Core/util/XprHelper.h
@@ -0,0 +1,447 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_XPRHELPER_H
+#define EIGEN_XPRHELPER_H
+
+// just a workaround because GCC seems to not really like empty structs
+// FIXME: gcc 4.3 generates bad code when strict-aliasing is enabled
+// so currently we simply disable this optimization for gcc 4.3
+#if (defined __GNUG__) && !((__GNUC__==4) && (__GNUC_MINOR__==3))
+  #define EIGEN_EMPTY_STRUCT_CTOR(X) \
+    EIGEN_STRONG_INLINE X() {} \
+    EIGEN_STRONG_INLINE X(const X& ) {}
+#else
+  #define EIGEN_EMPTY_STRUCT_CTOR(X)
+#endif
+
+namespace Eigen {
+
+typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE DenseIndex;
+
+namespace internal {
+
+//classes inheriting no_assignment_operator don't generate a default operator=.
+class no_assignment_operator
+{
+  private:
+    no_assignment_operator& operator=(const no_assignment_operator&);
+};
+
+/** \internal return the index type with the largest number of bits */
+template<typename I1, typename I2>
+struct promote_index_type
+{
+  typedef typename conditional<(sizeof(I1)<sizeof(I2)), I2, I1>::type type;
+};
+
+/** \internal If the template parameter Value is Dynamic, this class is just a wrapper around a T variable that
+  * can be accessed using value() and setValue().
+  * Otherwise, this class is an empty structure and value() just returns the template parameter Value.
+  */
+template<typename T, int Value> class variable_if_dynamic
+{
+  public:
+    EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamic)
+    explicit variable_if_dynamic(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); assert(v == T(Value)); }
+    static T value() { return T(Value); }
+    void setValue(T) {}
+};
+
+template<typename T> class variable_if_dynamic<T, Dynamic>
+{
+    T m_value;
+    variable_if_dynamic() { assert(false); }
+  public:
+    explicit variable_if_dynamic(T value) : m_value(value) {}
+    T value() const { return m_value; }
+    void setValue(T value) { m_value = value; }
+};
+
+template<typename T> struct functor_traits
+{
+  enum
+  {
+    Cost = 10,
+    PacketAccess = false
+  };
+};
+
+template<typename T> struct packet_traits;
+
+template<typename T> struct unpacket_traits
+{
+  typedef T type;
+  enum {size=1};
+};
+
+template<typename _Scalar, int _Rows, int _Cols,
+         int _Options = AutoAlign |
+                          ( (_Rows==1 && _Cols!=1) ? RowMajor
+                          : (_Cols==1 && _Rows!=1) ? ColMajor
+                          : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
+         int _MaxRows = _Rows,
+         int _MaxCols = _Cols
+> class make_proper_matrix_type
+{
+    enum {
+      IsColVector = _Cols==1 && _Rows!=1,
+      IsRowVector = _Rows==1 && _Cols!=1,
+      Options = IsColVector ? (_Options | ColMajor) & ~RowMajor
+              : IsRowVector ? (_Options | RowMajor) & ~ColMajor
+              : _Options
+    };
+  public:
+    typedef Matrix<_Scalar, _Rows, _Cols, Options, _MaxRows, _MaxCols> type;
+};
+
+template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
+class compute_matrix_flags
+{
+    enum {
+      row_major_bit = Options&RowMajor ? RowMajorBit : 0,
+      is_dynamic_size_storage = MaxRows==Dynamic || MaxCols==Dynamic,
+
+      aligned_bit =
+      (
+            ((Options&DontAlign)==0)
+        && (
+#if EIGEN_ALIGN_STATICALLY
+             ((!is_dynamic_size_storage) && (((MaxCols*MaxRows*int(sizeof(Scalar))) % 16) == 0))
+#else
+             0
+#endif
+
+          ||
+
+#if EIGEN_ALIGN
+             is_dynamic_size_storage
+#else
+             0
+#endif
+
+          )
+      ) ? AlignedBit : 0,
+      packet_access_bit = packet_traits<Scalar>::Vectorizable && aligned_bit ? PacketAccessBit : 0
+    };
+
+  public:
+    enum { ret = LinearAccessBit | LvalueBit | DirectAccessBit | NestByRefBit | packet_access_bit | row_major_bit | aligned_bit };
+};
+
+template<int _Rows, int _Cols> struct size_at_compile_time
+{
+  enum { ret = (_Rows==Dynamic || _Cols==Dynamic) ? Dynamic : _Rows * _Cols };
+};
+
+/* plain_matrix_type : the difference from eval is that plain_matrix_type is always a plain matrix type,
+ * whereas eval is a const reference in the case of a matrix
+ */
+
+template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct plain_matrix_type;
+template<typename T, typename BaseClassType> struct plain_matrix_type_dense;
+template<typename T> struct plain_matrix_type<T,Dense>
+{
+  typedef typename plain_matrix_type_dense<T,typename traits<T>::XprKind>::type type;
+};
+
+template<typename T> struct plain_matrix_type_dense<T,MatrixXpr>
+{
+  typedef Matrix<typename traits<T>::Scalar,
+                traits<T>::RowsAtCompileTime,
+                traits<T>::ColsAtCompileTime,
+                AutoAlign | (traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),
+                traits<T>::MaxRowsAtCompileTime,
+                traits<T>::MaxColsAtCompileTime
+          > type;
+};
+
+template<typename T> struct plain_matrix_type_dense<T,ArrayXpr>
+{
+  typedef Array<typename traits<T>::Scalar,
+                traits<T>::RowsAtCompileTime,
+                traits<T>::ColsAtCompileTime,
+                AutoAlign | (traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),
+                traits<T>::MaxRowsAtCompileTime,
+                traits<T>::MaxColsAtCompileTime
+          > type;
+};
+
+/* eval : the return type of eval(). For matrices, this is just a const reference
+ * in order to avoid a useless copy
+ */
+
+template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct eval;
+
+template<typename T> struct eval<T,Dense>
+{
+  typedef typename plain_matrix_type<T>::type type;
+//   typedef typename T::PlainObject type;
+//   typedef T::Matrix<typename traits<T>::Scalar,
+//                 traits<T>::RowsAtCompileTime,
+//                 traits<T>::ColsAtCompileTime,
+//                 AutoAlign | (traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),
+//                 traits<T>::MaxRowsAtCompileTime,
+//                 traits<T>::MaxColsAtCompileTime
+//           > type;
+};
+
+// for matrices, no need to evaluate, just use a const reference to avoid a useless copy
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+struct eval<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>
+{
+  typedef const Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;
+};
+
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+struct eval<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>
+{
+  typedef const Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;
+};
+
+
+
+/* plain_matrix_type_column_major : same as plain_matrix_type but guaranteed to be column-major
+ */
+template<typename T> struct plain_matrix_type_column_major
+{
+  enum { Rows = traits<T>::RowsAtCompileTime,
+         Cols = traits<T>::ColsAtCompileTime,
+         MaxRows = traits<T>::MaxRowsAtCompileTime,
+         MaxCols = traits<T>::MaxColsAtCompileTime
+  };
+  typedef Matrix<typename traits<T>::Scalar,
+                Rows,
+                Cols,
+                (MaxRows==1&&MaxCols!=1) ? RowMajor : ColMajor,
+                MaxRows,
+                MaxCols
+          > type;
+};
+
+/* plain_matrix_type_row_major : same as plain_matrix_type but guaranteed to be row-major
+ */
+template<typename T> struct plain_matrix_type_row_major
+{
+  enum { Rows = traits<T>::RowsAtCompileTime,
+         Cols = traits<T>::ColsAtCompileTime,
+         MaxRows = traits<T>::MaxRowsAtCompileTime,
+         MaxCols = traits<T>::MaxColsAtCompileTime
+  };
+  typedef Matrix<typename traits<T>::Scalar,
+                Rows,
+                Cols,
+                (MaxCols==1&&MaxRows!=1) ? RowMajor : ColMajor,
+                MaxRows,
+                MaxCols
+          > type;
+};
+
+// we should be able to get rid of this one too
+template<typename T> struct must_nest_by_value { enum { ret = false }; };
+
+/** \internal The reference selector for template expressions. The idea is that we don't
+  * need to use references for expressions since they are light weight proxy
+  * objects which should generate no copying overhead. */
+template <typename T>
+struct ref_selector
+{
+  typedef typename conditional<
+    bool(traits<T>::Flags & NestByRefBit),
+    T const&,
+    const T
+  >::type type;
+};
+
+/** \internal Adds the const qualifier on the value-type of T2 if and only if T1 is a const type */
+template<typename T1, typename T2>
+struct transfer_constness
+{
+  typedef typename conditional<
+    bool(internal::is_const<T1>::value),
+    typename internal::add_const_on_value_type<T2>::type,
+    T2
+  >::type type;
+};
+
+/** \internal Determines how a given expression should be nested into another one.
+  * For example, when you do a * (b+c), Eigen will determine how the expression b+c should be
+  * nested into the bigger product expression. The choice is between nesting the expression b+c as-is, or
+  * evaluating that expression b+c into a temporary variable d, and nest d so that the resulting expression is
+  * a*d. Evaluating can be beneficial for example if every coefficient access in the resulting expression causes
+  * many coefficient accesses in the nested expressions -- as is the case with matrix product for example.
+  *
+  * \param T the type of the expression being nested
+  * \param n the number of coefficient accesses in the nested expression for each coefficient access in the bigger expression.
+  *
+  * Note that if no evaluation occur, then the constness of T is preserved.
+  *
+  * Example. Suppose that a, b, and c are of type Matrix3d. The user forms the expression a*(b+c).
+  * b+c is an expression "sum of matrices", which we will denote by S. In order to determine how to nest it,
+  * the Product expression uses: nested<S, 3>::ret, which turns out to be Matrix3d because the internal logic of
+  * nested determined that in this case it was better to evaluate the expression b+c into a temporary. On the other hand,
+  * since a is of type Matrix3d, the Product expression nests it as nested<Matrix3d, 3>::ret, which turns out to be
+  * const Matrix3d&, because the internal logic of nested determined that since a was already a matrix, there was no point
+  * in copying it into another matrix.
+  */
+template<typename T, int n=1, typename PlainObject = typename eval<T>::type> struct nested
+{
+  enum {
+    // for the purpose of this test, to keep it reasonably simple, we arbitrarily choose a value of Dynamic values.
+    // the choice of 10000 makes it larger than any practical fixed value and even most dynamic values.
+    // in extreme cases where these assumptions would be wrong, we would still at worst suffer performance issues
+    // (poor choice of temporaries).
+    // it's important that this value can still be squared without integer overflowing.
+    DynamicAsInteger = 10000,
+    ScalarReadCost = NumTraits<typename traits<T>::Scalar>::ReadCost,
+    ScalarReadCostAsInteger = ScalarReadCost == Dynamic ? int(DynamicAsInteger) : int(ScalarReadCost),
+    CoeffReadCost = traits<T>::CoeffReadCost,
+    CoeffReadCostAsInteger = CoeffReadCost == Dynamic ? int(DynamicAsInteger) : int(CoeffReadCost),
+    NAsInteger = n == Dynamic ? int(DynamicAsInteger) : n,
+    CostEvalAsInteger   = (NAsInteger+1) * ScalarReadCostAsInteger + CoeffReadCostAsInteger,
+    CostNoEvalAsInteger = NAsInteger * CoeffReadCostAsInteger
+  };
+
+  typedef typename conditional<
+      ( (int(traits<T>::Flags) & EvalBeforeNestingBit) ||
+        int(CostEvalAsInteger) < int(CostNoEvalAsInteger)
+      ),
+      PlainObject,
+      typename ref_selector<T>::type
+  >::type type;
+};
+
+template<typename T>
+T* const_cast_ptr(const T* ptr)
+{
+  return const_cast<T*>(ptr);
+}
+
+template<typename Derived, typename XprKind = typename traits<Derived>::XprKind>
+struct dense_xpr_base
+{
+  /* dense_xpr_base should only ever be used on dense expressions, thus falling either into the MatrixXpr or into the ArrayXpr cases */
+};
+
+template<typename Derived>
+struct dense_xpr_base<Derived, MatrixXpr>
+{
+  typedef MatrixBase<Derived> type;
+};
+
+template<typename Derived>
+struct dense_xpr_base<Derived, ArrayXpr>
+{
+  typedef ArrayBase<Derived> type;
+};
+
+/** \internal Helper base class to add a scalar multiple operator
+  * overloads for complex types */
+template<typename Derived,typename Scalar,typename OtherScalar,
+         bool EnableIt = !is_same<Scalar,OtherScalar>::value >
+struct special_scalar_op_base : public DenseCoeffsBase<Derived>
+{
+  // dummy operator* so that the
+  // "using special_scalar_op_base::operator*" compiles
+  void operator*() const;
+};
+
+template<typename Derived,typename Scalar,typename OtherScalar>
+struct special_scalar_op_base<Derived,Scalar,OtherScalar,true>  : public DenseCoeffsBase<Derived>
+{
+  const CwiseUnaryOp<scalar_multiple2_op<Scalar,OtherScalar>, Derived>
+  operator*(const OtherScalar& scalar) const
+  {
+    return CwiseUnaryOp<scalar_multiple2_op<Scalar,OtherScalar>, Derived>
+      (*static_cast<const Derived*>(this), scalar_multiple2_op<Scalar,OtherScalar>(scalar));
+  }
+
+  inline friend const CwiseUnaryOp<scalar_multiple2_op<Scalar,OtherScalar>, Derived>
+  operator*(const OtherScalar& scalar, const Derived& matrix)
+  { return static_cast<const special_scalar_op_base&>(matrix).operator*(scalar); }
+};
+
+template<typename XprType, typename CastType> struct cast_return_type
+{
+  typedef typename XprType::Scalar CurrentScalarType;
+  typedef typename remove_all<CastType>::type _CastType;
+  typedef typename _CastType::Scalar NewScalarType;
+  typedef typename conditional<is_same<CurrentScalarType,NewScalarType>::value,
+                              const XprType&,CastType>::type type;
+};
+
+template <typename A, typename B> struct promote_storage_type;
+
+template <typename A> struct promote_storage_type<A,A>
+{
+  typedef A ret;
+};
+
+/** \internal gives the plain matrix or array type to store a row/column/diagonal of a matrix type.
+  * \param Scalar optional parameter allowing to pass a different scalar type than the one of the MatrixType.
+  */
+template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
+struct plain_row_type
+{
+  typedef Matrix<Scalar, 1, ExpressionType::ColsAtCompileTime,
+                 ExpressionType::PlainObject::Options | RowMajor, 1, ExpressionType::MaxColsAtCompileTime> MatrixRowType;
+  typedef Array<Scalar, 1, ExpressionType::ColsAtCompileTime,
+                 ExpressionType::PlainObject::Options | RowMajor, 1, ExpressionType::MaxColsAtCompileTime> ArrayRowType;
+
+  typedef typename conditional<
+    is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
+    MatrixRowType,
+    ArrayRowType 
+  >::type type;
+};
+
+template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
+struct plain_col_type
+{
+  typedef Matrix<Scalar, ExpressionType::RowsAtCompileTime, 1,
+                 ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> MatrixColType;
+  typedef Array<Scalar, ExpressionType::RowsAtCompileTime, 1,
+                 ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> ArrayColType;
+
+  typedef typename conditional<
+    is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
+    MatrixColType,
+    ArrayColType 
+  >::type type;
+};
+
+template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
+struct plain_diag_type
+{
+  enum { diag_size = EIGEN_SIZE_MIN_PREFER_DYNAMIC(ExpressionType::RowsAtCompileTime, ExpressionType::ColsAtCompileTime),
+         max_diag_size = EIGEN_SIZE_MIN_PREFER_FIXED(ExpressionType::MaxRowsAtCompileTime, ExpressionType::MaxColsAtCompileTime)
+  };
+  typedef Matrix<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> MatrixDiagType;
+  typedef Array<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> ArrayDiagType;
+
+  typedef typename conditional<
+    is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
+    MatrixDiagType,
+    ArrayDiagType 
+  >::type type;
+};
+
+template<typename ExpressionType>
+struct is_lvalue
+{
+  enum { value = !bool(is_const<ExpressionType>::value) &&
+                 bool(traits<ExpressionType>::Flags & LvalueBit) };
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_XPRHELPER_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Block.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Block.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigen2Support/Block.h
rename to resources/3rdParty/eigen/Eigen/src/Eigen2Support/Block.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigen2Support/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/Eigen2Support/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Cwise.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Cwise.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigen2Support/Cwise.h
rename to resources/3rdParty/eigen/Eigen/src/Eigen2Support/Cwise.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/CwiseOperators.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/CwiseOperators.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigen2Support/CwiseOperators.h
rename to resources/3rdParty/eigen/Eigen/src/Eigen2Support/CwiseOperators.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/AlignedBox.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/AlignedBox.h
new file mode 100644
index 000000000..5c928e8fc
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/AlignedBox.h
@@ -0,0 +1,159 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  * \nonstableyet
+  *
+  * \class AlignedBox
+  *
+  * \brief An axis aligned box
+  *
+  * \param _Scalar the type of the scalar coefficients
+  * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
+  *
+  * This class represents an axis aligned box as a pair of the minimal and maximal corners.
+  */
+template <typename _Scalar, int _AmbientDim>
+class AlignedBox
+{
+public:
+EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1)
+  enum { AmbientDimAtCompileTime = _AmbientDim };
+  typedef _Scalar Scalar;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
+
+  /** Default constructor initializing a null box. */
+  inline explicit AlignedBox()
+  { if (AmbientDimAtCompileTime!=Dynamic) setNull(); }
+
+  /** Constructs a null box with \a _dim the dimension of the ambient space. */
+  inline explicit AlignedBox(int _dim) : m_min(_dim), m_max(_dim)
+  { setNull(); }
+
+  /** Constructs a box with extremities \a _min and \a _max. */
+  inline AlignedBox(const VectorType& _min, const VectorType& _max) : m_min(_min), m_max(_max) {}
+
+  /** Constructs a box containing a single point \a p. */
+  inline explicit AlignedBox(const VectorType& p) : m_min(p), m_max(p) {}
+
+  ~AlignedBox() {}
+
+  /** \returns the dimension in which the box holds */
+  inline int dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : AmbientDimAtCompileTime; }
+
+  /** \returns true if the box is null, i.e, empty. */
+  inline bool isNull() const { return (m_min.cwise() > m_max).any(); }
+
+  /** Makes \c *this a null/empty box. */
+  inline void setNull()
+  {
+    m_min.setConstant( (std::numeric_limits<Scalar>::max)());
+    m_max.setConstant(-(std::numeric_limits<Scalar>::max)());
+  }
+
+  /** \returns the minimal corner */
+  inline const VectorType& (min)() const { return m_min; }
+  /** \returns a non const reference to the minimal corner */
+  inline VectorType& (min)() { return m_min; }
+  /** \returns the maximal corner */
+  inline const VectorType& (max)() const { return m_max; }
+  /** \returns a non const reference to the maximal corner */
+  inline VectorType& (max)() { return m_max; }
+
+  /** \returns true if the point \a p is inside the box \c *this. */
+  inline bool contains(const VectorType& p) const
+  { return (m_min.cwise()<=p).all() && (p.cwise()<=m_max).all(); }
+
+  /** \returns true if the box \a b is entirely inside the box \c *this. */
+  inline bool contains(const AlignedBox& b) const
+  { return (m_min.cwise()<=(b.min)()).all() && ((b.max)().cwise()<=m_max).all(); }
+
+  /** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */
+  inline AlignedBox& extend(const VectorType& p)
+  { m_min = (m_min.cwise().min)(p); m_max = (m_max.cwise().max)(p); return *this; }
+
+  /** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */
+  inline AlignedBox& extend(const AlignedBox& b)
+  { m_min = (m_min.cwise().min)(b.m_min); m_max = (m_max.cwise().max)(b.m_max); return *this; }
+
+  /** Clamps \c *this by the box \a b and returns a reference to \c *this. */
+  inline AlignedBox& clamp(const AlignedBox& b)
+  { m_min = (m_min.cwise().max)(b.m_min); m_max = (m_max.cwise().min)(b.m_max); return *this; }
+
+  /** Translate \c *this by the vector \a t and returns a reference to \c *this. */
+  inline AlignedBox& translate(const VectorType& t)
+  { m_min += t; m_max += t; return *this; }
+
+  /** \returns the squared distance between the point \a p and the box \c *this,
+    * and zero if \a p is inside the box.
+    * \sa exteriorDistance()
+    */
+  inline Scalar squaredExteriorDistance(const VectorType& p) const;
+
+  /** \returns the distance between the point \a p and the box \c *this,
+    * and zero if \a p is inside the box.
+    * \sa squaredExteriorDistance()
+    */
+  inline Scalar exteriorDistance(const VectorType& p) const
+  { return ei_sqrt(squaredExteriorDistance(p)); }
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<AlignedBox,
+           AlignedBox<NewScalarType,AmbientDimAtCompileTime> >::type cast() const
+  {
+    return typename internal::cast_return_type<AlignedBox,
+                    AlignedBox<NewScalarType,AmbientDimAtCompileTime> >::type(*this);
+  }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit AlignedBox(const AlignedBox<OtherScalarType,AmbientDimAtCompileTime>& other)
+  {
+    m_min = (other.min)().template cast<Scalar>();
+    m_max = (other.max)().template cast<Scalar>();
+  }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const AlignedBox& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
+  { return m_min.isApprox(other.m_min, prec) && m_max.isApprox(other.m_max, prec); }
+
+protected:
+
+  VectorType m_min, m_max;
+};
+
+template<typename Scalar,int AmbiantDim>
+inline Scalar AlignedBox<Scalar,AmbiantDim>::squaredExteriorDistance(const VectorType& p) const
+{
+  Scalar dist2(0);
+  Scalar aux;
+  for (int k=0; k<dim(); ++k)
+  {
+    if ((aux = (p[k]-m_min[k]))<Scalar(0))
+      dist2 += aux*aux;
+    else if ( (aux = (m_max[k]-p[k]))<Scalar(0))
+      dist2 += aux*aux;
+  }
+  return dist2;
+}
+
+} // end namespace Eigen
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/All.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/All.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/All.h
rename to resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/All.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/AngleAxis.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/AngleAxis.h
new file mode 100644
index 000000000..20f1fceeb
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/AngleAxis.h
@@ -0,0 +1,214 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class AngleAxis
+  *
+  * \brief Represents a 3D rotation as a rotation angle around an arbitrary 3D axis
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients.
+  *
+  * The following two typedefs are provided for convenience:
+  * \li \c AngleAxisf for \c float
+  * \li \c AngleAxisd for \c double
+  *
+  * \addexample AngleAxisForEuler \label How to define a rotation from Euler-angles
+  *
+  * Combined with MatrixBase::Unit{X,Y,Z}, AngleAxis can be used to easily
+  * mimic Euler-angles. Here is an example:
+  * \include AngleAxis_mimic_euler.cpp
+  * Output: \verbinclude AngleAxis_mimic_euler.out
+  *
+  * \note This class is not aimed to be used to store a rotation transformation,
+  * but rather to make easier the creation of other rotation (Quaternion, rotation Matrix)
+  * and transformation objects.
+  *
+  * \sa class Quaternion, class Transform, MatrixBase::UnitX()
+  */
+
+template<typename _Scalar> struct ei_traits<AngleAxis<_Scalar> >
+{
+  typedef _Scalar Scalar;
+};
+
+template<typename _Scalar>
+class AngleAxis : public RotationBase<AngleAxis<_Scalar>,3>
+{
+  typedef RotationBase<AngleAxis<_Scalar>,3> Base;
+
+public:
+
+  using Base::operator*;
+
+  enum { Dim = 3 };
+  /** the scalar type of the coefficients */
+  typedef _Scalar Scalar;
+  typedef Matrix<Scalar,3,3> Matrix3;
+  typedef Matrix<Scalar,3,1> Vector3;
+  typedef Quaternion<Scalar> QuaternionType;
+
+protected:
+
+  Vector3 m_axis;
+  Scalar m_angle;
+
+public:
+
+  /** Default constructor without initialization. */
+  AngleAxis() {}
+  /** Constructs and initialize the angle-axis rotation from an \a angle in radian
+    * and an \a axis which must be normalized. */
+  template<typename Derived>
+  inline AngleAxis(Scalar angle, const MatrixBase<Derived>& axis) : m_axis(axis), m_angle(angle) {}
+  /** Constructs and initialize the angle-axis rotation from a quaternion \a q. */
+  inline AngleAxis(const QuaternionType& q) { *this = q; }
+  /** Constructs and initialize the angle-axis rotation from a 3x3 rotation matrix. */
+  template<typename Derived>
+  inline explicit AngleAxis(const MatrixBase<Derived>& m) { *this = m; }
+
+  Scalar angle() const { return m_angle; }
+  Scalar& angle() { return m_angle; }
+
+  const Vector3& axis() const { return m_axis; }
+  Vector3& axis() { return m_axis; }
+
+  /** Concatenates two rotations */
+  inline QuaternionType operator* (const AngleAxis& other) const
+  { return QuaternionType(*this) * QuaternionType(other); }
+
+  /** Concatenates two rotations */
+  inline QuaternionType operator* (const QuaternionType& other) const
+  { return QuaternionType(*this) * other; }
+
+  /** Concatenates two rotations */
+  friend inline QuaternionType operator* (const QuaternionType& a, const AngleAxis& b)
+  { return a * QuaternionType(b); }
+
+  /** Concatenates two rotations */
+  inline Matrix3 operator* (const Matrix3& other) const
+  { return toRotationMatrix() * other; }
+
+  /** Concatenates two rotations */
+  inline friend Matrix3 operator* (const Matrix3& a, const AngleAxis& b)
+  { return a * b.toRotationMatrix(); }
+
+  /** Applies rotation to vector */
+  inline Vector3 operator* (const Vector3& other) const
+  { return toRotationMatrix() * other; }
+
+  /** \returns the inverse rotation, i.e., an angle-axis with opposite rotation angle */
+  AngleAxis inverse() const
+  { return AngleAxis(-m_angle, m_axis); }
+
+  AngleAxis& operator=(const QuaternionType& q);
+  template<typename Derived>
+  AngleAxis& operator=(const MatrixBase<Derived>& m);
+
+  template<typename Derived>
+  AngleAxis& fromRotationMatrix(const MatrixBase<Derived>& m);
+  Matrix3 toRotationMatrix(void) const;
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type cast() const
+  { return typename internal::cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type(*this); }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit AngleAxis(const AngleAxis<OtherScalarType>& other)
+  {
+    m_axis = other.axis().template cast<Scalar>();
+    m_angle = Scalar(other.angle());
+  }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const AngleAxis& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
+  { return m_axis.isApprox(other.m_axis, prec) && ei_isApprox(m_angle,other.m_angle, prec); }
+};
+
+/** \ingroup Geometry_Module
+  * single precision angle-axis type */
+typedef AngleAxis<float> AngleAxisf;
+/** \ingroup Geometry_Module
+  * double precision angle-axis type */
+typedef AngleAxis<double> AngleAxisd;
+
+/** Set \c *this from a quaternion.
+  * The axis is normalized.
+  */
+template<typename Scalar>
+AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionType& q)
+{
+  Scalar n2 = q.vec().squaredNorm();
+  if (n2 < precision<Scalar>()*precision<Scalar>())
+  {
+    m_angle = 0;
+    m_axis << 1, 0, 0;
+  }
+  else
+  {
+    m_angle = 2*std::acos(q.w());
+    m_axis = q.vec() / ei_sqrt(n2);
+  }
+  return *this;
+}
+
+/** Set \c *this from a 3x3 rotation matrix \a mat.
+  */
+template<typename Scalar>
+template<typename Derived>
+AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const MatrixBase<Derived>& mat)
+{
+  // Since a direct conversion would not be really faster,
+  // let's use the robust Quaternion implementation:
+  return *this = QuaternionType(mat);
+}
+
+/** Constructs and \returns an equivalent 3x3 rotation matrix.
+  */
+template<typename Scalar>
+typename AngleAxis<Scalar>::Matrix3
+AngleAxis<Scalar>::toRotationMatrix(void) const
+{
+  Matrix3 res;
+  Vector3 sin_axis  = ei_sin(m_angle) * m_axis;
+  Scalar c = ei_cos(m_angle);
+  Vector3 cos1_axis = (Scalar(1)-c) * m_axis;
+
+  Scalar tmp;
+  tmp = cos1_axis.x() * m_axis.y();
+  res.coeffRef(0,1) = tmp - sin_axis.z();
+  res.coeffRef(1,0) = tmp + sin_axis.z();
+
+  tmp = cos1_axis.x() * m_axis.z();
+  res.coeffRef(0,2) = tmp + sin_axis.y();
+  res.coeffRef(2,0) = tmp - sin_axis.y();
+
+  tmp = cos1_axis.y() * m_axis.z();
+  res.coeffRef(1,2) = tmp - sin_axis.x();
+  res.coeffRef(2,1) = tmp + sin_axis.x();
+
+  res.diagonal() = (cos1_axis.cwise() * m_axis).cwise() + c;
+
+  return res;
+}
+
+} // end namespace Eigen
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/CMakeLists.txt
diff --git a/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Hyperplane.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Hyperplane.h
new file mode 100644
index 000000000..19cc1bfd8
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Hyperplane.h
@@ -0,0 +1,254 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Hyperplane
+  *
+  * \brief A hyperplane
+  *
+  * A hyperplane is an affine subspace of dimension n-1 in a space of dimension n.
+  * For example, a hyperplane in a plane is a line; a hyperplane in 3-space is a plane.
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients
+  * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
+  *             Notice that the dimension of the hyperplane is _AmbientDim-1.
+  *
+  * This class represents an hyperplane as the zero set of the implicit equation
+  * \f$ n \cdot x + d = 0 \f$ where \f$ n \f$ is a unit normal vector of the plane (linear part)
+  * and \f$ d \f$ is the distance (offset) to the origin.
+  */
+template <typename _Scalar, int _AmbientDim>
+class Hyperplane
+{
+public:
+  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1)
+  enum { AmbientDimAtCompileTime = _AmbientDim };
+  typedef _Scalar Scalar;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
+  typedef Matrix<Scalar,int(AmbientDimAtCompileTime)==Dynamic
+                        ? Dynamic
+                        : int(AmbientDimAtCompileTime)+1,1> Coefficients;
+  typedef Block<Coefficients,AmbientDimAtCompileTime,1> NormalReturnType;
+
+  /** Default constructor without initialization */
+  inline explicit Hyperplane() {}
+
+  /** Constructs a dynamic-size hyperplane with \a _dim the dimension
+    * of the ambient space */
+  inline explicit Hyperplane(int _dim) : m_coeffs(_dim+1) {}
+
+  /** Construct a plane from its normal \a n and a point \a e onto the plane.
+    * \warning the vector normal is assumed to be normalized.
+    */
+  inline Hyperplane(const VectorType& n, const VectorType& e)
+    : m_coeffs(n.size()+1)
+  {
+    normal() = n;
+    offset() = -e.eigen2_dot(n);
+  }
+
+  /** Constructs a plane from its normal \a n and distance to the origin \a d
+    * such that the algebraic equation of the plane is \f$ n \cdot x + d = 0 \f$.
+    * \warning the vector normal is assumed to be normalized.
+    */
+  inline Hyperplane(const VectorType& n, Scalar d)
+    : m_coeffs(n.size()+1)
+  {
+    normal() = n;
+    offset() = d;
+  }
+
+  /** Constructs a hyperplane passing through the two points. If the dimension of the ambient space
+    * is greater than 2, then there isn't uniqueness, so an arbitrary choice is made.
+    */
+  static inline Hyperplane Through(const VectorType& p0, const VectorType& p1)
+  {
+    Hyperplane result(p0.size());
+    result.normal() = (p1 - p0).unitOrthogonal();
+    result.offset() = -result.normal().eigen2_dot(p0);
+    return result;
+  }
+
+  /** Constructs a hyperplane passing through the three points. The dimension of the ambient space
+    * is required to be exactly 3.
+    */
+  static inline Hyperplane Through(const VectorType& p0, const VectorType& p1, const VectorType& p2)
+  {
+    EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 3)
+    Hyperplane result(p0.size());
+    result.normal() = (p2 - p0).cross(p1 - p0).normalized();
+    result.offset() = -result.normal().eigen2_dot(p0);
+    return result;
+  }
+
+  /** Constructs a hyperplane passing through the parametrized line \a parametrized.
+    * If the dimension of the ambient space is greater than 2, then there isn't uniqueness,
+    * so an arbitrary choice is made.
+    */
+  // FIXME to be consitent with the rest this could be implemented as a static Through function ??
+  explicit Hyperplane(const ParametrizedLine<Scalar, AmbientDimAtCompileTime>& parametrized)
+  {
+    normal() = parametrized.direction().unitOrthogonal();
+    offset() = -normal().eigen2_dot(parametrized.origin());
+  }
+
+  ~Hyperplane() {}
+
+  /** \returns the dimension in which the plane holds */
+  inline int dim() const { return int(AmbientDimAtCompileTime)==Dynamic ? m_coeffs.size()-1 : int(AmbientDimAtCompileTime); }
+
+  /** normalizes \c *this */
+  void normalize(void)
+  {
+    m_coeffs /= normal().norm();
+  }
+
+  /** \returns the signed distance between the plane \c *this and a point \a p.
+    * \sa absDistance()
+    */
+  inline Scalar signedDistance(const VectorType& p) const { return p.eigen2_dot(normal()) + offset(); }
+
+  /** \returns the absolute distance between the plane \c *this and a point \a p.
+    * \sa signedDistance()
+    */
+  inline Scalar absDistance(const VectorType& p) const { return ei_abs(signedDistance(p)); }
+
+  /** \returns the projection of a point \a p onto the plane \c *this.
+    */
+  inline VectorType projection(const VectorType& p) const { return p - signedDistance(p) * normal(); }
+
+  /** \returns a constant reference to the unit normal vector of the plane, which corresponds
+    * to the linear part of the implicit equation.
+    */
+  inline const NormalReturnType normal() const { return NormalReturnType(*const_cast<Coefficients*>(&m_coeffs),0,0,dim(),1); }
+
+  /** \returns a non-constant reference to the unit normal vector of the plane, which corresponds
+    * to the linear part of the implicit equation.
+    */
+  inline NormalReturnType normal() { return NormalReturnType(m_coeffs,0,0,dim(),1); }
+
+  /** \returns the distance to the origin, which is also the "constant term" of the implicit equation
+    * \warning the vector normal is assumed to be normalized.
+    */
+  inline const Scalar& offset() const { return m_coeffs.coeff(dim()); }
+
+  /** \returns a non-constant reference to the distance to the origin, which is also the constant part
+    * of the implicit equation */
+  inline Scalar& offset() { return m_coeffs(dim()); }
+
+  /** \returns a constant reference to the coefficients c_i of the plane equation:
+    * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$
+    */
+  inline const Coefficients& coeffs() const { return m_coeffs; }
+
+  /** \returns a non-constant reference to the coefficients c_i of the plane equation:
+    * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$
+    */
+  inline Coefficients& coeffs() { return m_coeffs; }
+
+  /** \returns the intersection of *this with \a other.
+    *
+    * \warning The ambient space must be a plane, i.e. have dimension 2, so that \c *this and \a other are lines.
+    *
+    * \note If \a other is approximately parallel to *this, this method will return any point on *this.
+    */
+  VectorType intersection(const Hyperplane& other)
+  {
+    EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2)
+    Scalar det = coeffs().coeff(0) * other.coeffs().coeff(1) - coeffs().coeff(1) * other.coeffs().coeff(0);
+    // since the line equations ax+by=c are normalized with a^2+b^2=1, the following tests
+    // whether the two lines are approximately parallel.
+    if(ei_isMuchSmallerThan(det, Scalar(1)))
+    {   // special case where the two lines are approximately parallel. Pick any point on the first line.
+        if(ei_abs(coeffs().coeff(1))>ei_abs(coeffs().coeff(0)))
+            return VectorType(coeffs().coeff(1), -coeffs().coeff(2)/coeffs().coeff(1)-coeffs().coeff(0));
+        else
+            return VectorType(-coeffs().coeff(2)/coeffs().coeff(0)-coeffs().coeff(1), coeffs().coeff(0));
+    }
+    else
+    {   // general case
+        Scalar invdet = Scalar(1) / det;
+        return VectorType(invdet*(coeffs().coeff(1)*other.coeffs().coeff(2)-other.coeffs().coeff(1)*coeffs().coeff(2)),
+                          invdet*(other.coeffs().coeff(0)*coeffs().coeff(2)-coeffs().coeff(0)*other.coeffs().coeff(2)));
+    }
+  }
+
+  /** Applies the transformation matrix \a mat to \c *this and returns a reference to \c *this.
+    *
+    * \param mat the Dim x Dim transformation matrix
+    * \param traits specifies whether the matrix \a mat represents an Isometry
+    *               or a more generic Affine transformation. The default is Affine.
+    */
+  template<typename XprType>
+  inline Hyperplane& transform(const MatrixBase<XprType>& mat, TransformTraits traits = Affine)
+  {
+    if (traits==Affine)
+      normal() = mat.inverse().transpose() * normal();
+    else if (traits==Isometry)
+      normal() = mat * normal();
+    else
+    {
+      ei_assert("invalid traits value in Hyperplane::transform()");
+    }
+    return *this;
+  }
+
+  /** Applies the transformation \a t to \c *this and returns a reference to \c *this.
+    *
+    * \param t the transformation of dimension Dim
+    * \param traits specifies whether the transformation \a t represents an Isometry
+    *               or a more generic Affine transformation. The default is Affine.
+    *               Other kind of transformations are not supported.
+    */
+  inline Hyperplane& transform(const Transform<Scalar,AmbientDimAtCompileTime>& t,
+                                TransformTraits traits = Affine)
+  {
+    transform(t.linear(), traits);
+    offset() -= t.translation().eigen2_dot(normal());
+    return *this;
+  }
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<Hyperplane,
+           Hyperplane<NewScalarType,AmbientDimAtCompileTime> >::type cast() const
+  {
+    return typename internal::cast_return_type<Hyperplane,
+                    Hyperplane<NewScalarType,AmbientDimAtCompileTime> >::type(*this);
+  }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit Hyperplane(const Hyperplane<OtherScalarType,AmbientDimAtCompileTime>& other)
+  { m_coeffs = other.coeffs().template cast<Scalar>(); }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const Hyperplane& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
+  { return m_coeffs.isApprox(other.m_coeffs, prec); }
+
+protected:
+
+  Coefficients m_coeffs;
+};
+
+} // end namespace Eigen
diff --git a/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h
new file mode 100644
index 000000000..6e4a168a8
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h
@@ -0,0 +1,141 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class ParametrizedLine
+  *
+  * \brief A parametrized line
+  *
+  * A parametrized line is defined by an origin point \f$ \mathbf{o} \f$ and a unit
+  * direction vector \f$ \mathbf{d} \f$ such that the line corresponds to
+  * the set \f$ l(t) = \mathbf{o} + t \mathbf{d} \f$, \f$ l \in \mathbf{R} \f$.
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients
+  * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
+  */
+template <typename _Scalar, int _AmbientDim>
+class ParametrizedLine
+{
+public:
+  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
+  enum { AmbientDimAtCompileTime = _AmbientDim };
+  typedef _Scalar Scalar;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
+
+  /** Default constructor without initialization */
+  inline explicit ParametrizedLine() {}
+
+  /** Constructs a dynamic-size line with \a _dim the dimension
+    * of the ambient space */
+  inline explicit ParametrizedLine(int _dim) : m_origin(_dim), m_direction(_dim) {}
+
+  /** Initializes a parametrized line of direction \a direction and origin \a origin.
+    * \warning the vector direction is assumed to be normalized.
+    */
+  ParametrizedLine(const VectorType& origin, const VectorType& direction)
+    : m_origin(origin), m_direction(direction) {}
+
+  explicit ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim>& hyperplane);
+
+  /** Constructs a parametrized line going from \a p0 to \a p1. */
+  static inline ParametrizedLine Through(const VectorType& p0, const VectorType& p1)
+  { return ParametrizedLine(p0, (p1-p0).normalized()); }
+
+  ~ParametrizedLine() {}
+
+  /** \returns the dimension in which the line holds */
+  inline int dim() const { return m_direction.size(); }
+
+  const VectorType& origin() const { return m_origin; }
+  VectorType& origin() { return m_origin; }
+
+  const VectorType& direction() const { return m_direction; }
+  VectorType& direction() { return m_direction; }
+
+  /** \returns the squared distance of a point \a p to its projection onto the line \c *this.
+    * \sa distance()
+    */
+  RealScalar squaredDistance(const VectorType& p) const
+  {
+    VectorType diff = p-origin();
+    return (diff - diff.eigen2_dot(direction())* direction()).squaredNorm();
+  }
+  /** \returns the distance of a point \a p to its projection onto the line \c *this.
+    * \sa squaredDistance()
+    */
+  RealScalar distance(const VectorType& p) const { return ei_sqrt(squaredDistance(p)); }
+
+  /** \returns the projection of a point \a p onto the line \c *this. */
+  VectorType projection(const VectorType& p) const
+  { return origin() + (p-origin()).eigen2_dot(direction()) * direction(); }
+
+  Scalar intersection(const Hyperplane<_Scalar, _AmbientDim>& hyperplane);
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<ParametrizedLine,
+           ParametrizedLine<NewScalarType,AmbientDimAtCompileTime> >::type cast() const
+  {
+    return typename internal::cast_return_type<ParametrizedLine,
+                    ParametrizedLine<NewScalarType,AmbientDimAtCompileTime> >::type(*this);
+  }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit ParametrizedLine(const ParametrizedLine<OtherScalarType,AmbientDimAtCompileTime>& other)
+  {
+    m_origin = other.origin().template cast<Scalar>();
+    m_direction = other.direction().template cast<Scalar>();
+  }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const ParametrizedLine& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
+  { return m_origin.isApprox(other.m_origin, prec) && m_direction.isApprox(other.m_direction, prec); }
+
+protected:
+
+  VectorType m_origin, m_direction;
+};
+
+/** Constructs a parametrized line from a 2D hyperplane
+  *
+  * \warning the ambient space must have dimension 2 such that the hyperplane actually describes a line
+  */
+template <typename _Scalar, int _AmbientDim>
+inline ParametrizedLine<_Scalar, _AmbientDim>::ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim>& hyperplane)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2)
+  direction() = hyperplane.normal().unitOrthogonal();
+  origin() = -hyperplane.normal()*hyperplane.offset();
+}
+
+/** \returns the parameter value of the intersection between \c *this and the given hyperplane
+  */
+template <typename _Scalar, int _AmbientDim>
+inline _Scalar ParametrizedLine<_Scalar, _AmbientDim>::intersection(const Hyperplane<_Scalar, _AmbientDim>& hyperplane)
+{
+  return -(hyperplane.offset()+origin().eigen2_dot(hyperplane.normal()))
+          /(direction().eigen2_dot(hyperplane.normal()));
+}
+
+} // end namespace Eigen
diff --git a/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Quaternion.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Quaternion.h
new file mode 100644
index 000000000..ec87da054
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Quaternion.h
@@ -0,0 +1,495 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
+namespace Eigen { 
+
+template<typename Other,
+         int OtherRows=Other::RowsAtCompileTime,
+         int OtherCols=Other::ColsAtCompileTime>
+struct ei_quaternion_assign_impl;
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Quaternion
+  *
+  * \brief The quaternion class used to represent 3D orientations and rotations
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients
+  *
+  * This class represents a quaternion \f$ w+xi+yj+zk \f$ that is a convenient representation of
+  * orientations and rotations of objects in three dimensions. Compared to other representations
+  * like Euler angles or 3x3 matrices, quatertions offer the following advantages:
+  * \li \b compact storage (4 scalars)
+  * \li \b efficient to compose (28 flops),
+  * \li \b stable spherical interpolation
+  *
+  * The following two typedefs are provided for convenience:
+  * \li \c Quaternionf for \c float
+  * \li \c Quaterniond for \c double
+  *
+  * \sa  class AngleAxis, class Transform
+  */
+
+template<typename _Scalar> struct ei_traits<Quaternion<_Scalar> >
+{
+  typedef _Scalar Scalar;
+};
+
+template<typename _Scalar>
+class Quaternion : public RotationBase<Quaternion<_Scalar>,3>
+{
+  typedef RotationBase<Quaternion<_Scalar>,3> Base;
+
+public:
+  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,4)
+
+  using Base::operator*;
+
+  /** the scalar type of the coefficients */
+  typedef _Scalar Scalar;
+
+  /** the type of the Coefficients 4-vector */
+  typedef Matrix<Scalar, 4, 1> Coefficients;
+  /** the type of a 3D vector */
+  typedef Matrix<Scalar,3,1> Vector3;
+  /** the equivalent rotation matrix type */
+  typedef Matrix<Scalar,3,3> Matrix3;
+  /** the equivalent angle-axis type */
+  typedef AngleAxis<Scalar> AngleAxisType;
+
+  /** \returns the \c x coefficient */
+  inline Scalar x() const { return m_coeffs.coeff(0); }
+  /** \returns the \c y coefficient */
+  inline Scalar y() const { return m_coeffs.coeff(1); }
+  /** \returns the \c z coefficient */
+  inline Scalar z() const { return m_coeffs.coeff(2); }
+  /** \returns the \c w coefficient */
+  inline Scalar w() const { return m_coeffs.coeff(3); }
+
+  /** \returns a reference to the \c x coefficient */
+  inline Scalar& x() { return m_coeffs.coeffRef(0); }
+  /** \returns a reference to the \c y coefficient */
+  inline Scalar& y() { return m_coeffs.coeffRef(1); }
+  /** \returns a reference to the \c z coefficient */
+  inline Scalar& z() { return m_coeffs.coeffRef(2); }
+  /** \returns a reference to the \c w coefficient */
+  inline Scalar& w() { return m_coeffs.coeffRef(3); }
+
+  /** \returns a read-only vector expression of the imaginary part (x,y,z) */
+  inline const Block<const Coefficients,3,1> vec() const { return m_coeffs.template start<3>(); }
+
+  /** \returns a vector expression of the imaginary part (x,y,z) */
+  inline Block<Coefficients,3,1> vec() { return m_coeffs.template start<3>(); }
+
+  /** \returns a read-only vector expression of the coefficients (x,y,z,w) */
+  inline const Coefficients& coeffs() const { return m_coeffs; }
+
+  /** \returns a vector expression of the coefficients (x,y,z,w) */
+  inline Coefficients& coeffs() { return m_coeffs; }
+
+  /** Default constructor leaving the quaternion uninitialized. */
+  inline Quaternion() {}
+
+  /** Constructs and initializes the quaternion \f$ w+xi+yj+zk \f$ from
+    * its four coefficients \a w, \a x, \a y and \a z.
+    *
+    * \warning Note the order of the arguments: the real \a w coefficient first,
+    * while internally the coefficients are stored in the following order:
+    * [\c x, \c y, \c z, \c w]
+    */
+  inline Quaternion(Scalar w, Scalar x, Scalar y, Scalar z)
+  { m_coeffs << x, y, z, w; }
+
+  /** Copy constructor */
+  inline Quaternion(const Quaternion& other) { m_coeffs = other.m_coeffs; }
+
+  /** Constructs and initializes a quaternion from the angle-axis \a aa */
+  explicit inline Quaternion(const AngleAxisType& aa) { *this = aa; }
+
+  /** Constructs and initializes a quaternion from either:
+    *  - a rotation matrix expression,
+    *  - a 4D vector expression representing quaternion coefficients.
+    * \sa operator=(MatrixBase<Derived>)
+    */
+  template<typename Derived>
+  explicit inline Quaternion(const MatrixBase<Derived>& other) { *this = other; }
+
+  Quaternion& operator=(const Quaternion& other);
+  Quaternion& operator=(const AngleAxisType& aa);
+  template<typename Derived>
+  Quaternion& operator=(const MatrixBase<Derived>& m);
+
+  /** \returns a quaternion representing an identity rotation
+    * \sa MatrixBase::Identity()
+    */
+  static inline Quaternion Identity() { return Quaternion(1, 0, 0, 0); }
+
+  /** \sa Quaternion::Identity(), MatrixBase::setIdentity()
+    */
+  inline Quaternion& setIdentity() { m_coeffs << 0, 0, 0, 1; return *this; }
+
+  /** \returns the squared norm of the quaternion's coefficients
+    * \sa Quaternion::norm(), MatrixBase::squaredNorm()
+    */
+  inline Scalar squaredNorm() const { return m_coeffs.squaredNorm(); }
+
+  /** \returns the norm of the quaternion's coefficients
+    * \sa Quaternion::squaredNorm(), MatrixBase::norm()
+    */
+  inline Scalar norm() const { return m_coeffs.norm(); }
+
+  /** Normalizes the quaternion \c *this
+    * \sa normalized(), MatrixBase::normalize() */
+  inline void normalize() { m_coeffs.normalize(); }
+  /** \returns a normalized version of \c *this
+    * \sa normalize(), MatrixBase::normalized() */
+  inline Quaternion normalized() const { return Quaternion(m_coeffs.normalized()); }
+
+  /** \returns the dot product of \c *this and \a other
+    * Geometrically speaking, the dot product of two unit quaternions
+    * corresponds to the cosine of half the angle between the two rotations.
+    * \sa angularDistance()
+    */
+  inline Scalar eigen2_dot(const Quaternion& other) const { return m_coeffs.eigen2_dot(other.m_coeffs); }
+
+  inline Scalar angularDistance(const Quaternion& other) const;
+
+  Matrix3 toRotationMatrix(void) const;
+
+  template<typename Derived1, typename Derived2>
+  Quaternion& setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b);
+
+  inline Quaternion operator* (const Quaternion& q) const;
+  inline Quaternion& operator*= (const Quaternion& q);
+
+  Quaternion inverse(void) const;
+  Quaternion conjugate(void) const;
+
+  Quaternion slerp(Scalar t, const Quaternion& other) const;
+
+  template<typename Derived>
+  Vector3 operator* (const MatrixBase<Derived>& vec) const;
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<Quaternion,Quaternion<NewScalarType> >::type cast() const
+  { return typename internal::cast_return_type<Quaternion,Quaternion<NewScalarType> >::type(*this); }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit Quaternion(const Quaternion<OtherScalarType>& other)
+  { m_coeffs = other.coeffs().template cast<Scalar>(); }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const Quaternion& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
+  { return m_coeffs.isApprox(other.m_coeffs, prec); }
+
+protected:
+  Coefficients m_coeffs;
+};
+
+/** \ingroup Geometry_Module
+  * single precision quaternion type */
+typedef Quaternion<float> Quaternionf;
+/** \ingroup Geometry_Module
+  * double precision quaternion type */
+typedef Quaternion<double> Quaterniond;
+
+// Generic Quaternion * Quaternion product
+template<typename Scalar> inline Quaternion<Scalar>
+ei_quaternion_product(const Quaternion<Scalar>& a, const Quaternion<Scalar>& b)
+{
+  return Quaternion<Scalar>
+  (
+    a.w() * b.w() - a.x() * b.x() - a.y() * b.y() - a.z() * b.z(),
+    a.w() * b.x() + a.x() * b.w() + a.y() * b.z() - a.z() * b.y(),
+    a.w() * b.y() + a.y() * b.w() + a.z() * b.x() - a.x() * b.z(),
+    a.w() * b.z() + a.z() * b.w() + a.x() * b.y() - a.y() * b.x()
+  );
+}
+
+/** \returns the concatenation of two rotations as a quaternion-quaternion product */
+template <typename Scalar>
+inline Quaternion<Scalar> Quaternion<Scalar>::operator* (const Quaternion& other) const
+{
+  return ei_quaternion_product(*this,other);
+}
+
+/** \sa operator*(Quaternion) */
+template <typename Scalar>
+inline Quaternion<Scalar>& Quaternion<Scalar>::operator*= (const Quaternion& other)
+{
+  return (*this = *this * other);
+}
+
+/** Rotation of a vector by a quaternion.
+  * \remarks If the quaternion is used to rotate several points (>1)
+  * then it is much more efficient to first convert it to a 3x3 Matrix.
+  * Comparison of the operation cost for n transformations:
+  *   - Quaternion:    30n
+  *   - Via a Matrix3: 24 + 15n
+  */
+template <typename Scalar>
+template<typename Derived>
+inline typename Quaternion<Scalar>::Vector3
+Quaternion<Scalar>::operator* (const MatrixBase<Derived>& v) const
+{
+    // Note that this algorithm comes from the optimization by hand
+    // of the conversion to a Matrix followed by a Matrix/Vector product.
+    // It appears to be much faster than the common algorithm found
+    // in the litterature (30 versus 39 flops). It also requires two
+    // Vector3 as temporaries.
+    Vector3 uv;
+    uv = 2 * this->vec().cross(v);
+    return v + this->w() * uv + this->vec().cross(uv);
+}
+
+template<typename Scalar>
+inline Quaternion<Scalar>& Quaternion<Scalar>::operator=(const Quaternion& other)
+{
+  m_coeffs = other.m_coeffs;
+  return *this;
+}
+
+/** Set \c *this from an angle-axis \a aa and returns a reference to \c *this
+  */
+template<typename Scalar>
+inline Quaternion<Scalar>& Quaternion<Scalar>::operator=(const AngleAxisType& aa)
+{
+  Scalar ha = Scalar(0.5)*aa.angle(); // Scalar(0.5) to suppress precision loss warnings
+  this->w() = ei_cos(ha);
+  this->vec() = ei_sin(ha) * aa.axis();
+  return *this;
+}
+
+/** Set \c *this from the expression \a xpr:
+  *   - if \a xpr is a 4x1 vector, then \a xpr is assumed to be a quaternion
+  *   - if \a xpr is a 3x3 matrix, then \a xpr is assumed to be rotation matrix
+  *     and \a xpr is converted to a quaternion
+  */
+template<typename Scalar>
+template<typename Derived>
+inline Quaternion<Scalar>& Quaternion<Scalar>::operator=(const MatrixBase<Derived>& xpr)
+{
+  ei_quaternion_assign_impl<Derived>::run(*this, xpr.derived());
+  return *this;
+}
+
+/** Convert the quaternion to a 3x3 rotation matrix */
+template<typename Scalar>
+inline typename Quaternion<Scalar>::Matrix3
+Quaternion<Scalar>::toRotationMatrix(void) const
+{
+  // NOTE if inlined, then gcc 4.2 and 4.4 get rid of the temporary (not gcc 4.3 !!)
+  // if not inlined then the cost of the return by value is huge ~ +35%,
+  // however, not inlining this function is an order of magnitude slower, so
+  // it has to be inlined, and so the return by value is not an issue
+  Matrix3 res;
+
+  const Scalar tx  = Scalar(2)*this->x();
+  const Scalar ty  = Scalar(2)*this->y();
+  const Scalar tz  = Scalar(2)*this->z();
+  const Scalar twx = tx*this->w();
+  const Scalar twy = ty*this->w();
+  const Scalar twz = tz*this->w();
+  const Scalar txx = tx*this->x();
+  const Scalar txy = ty*this->x();
+  const Scalar txz = tz*this->x();
+  const Scalar tyy = ty*this->y();
+  const Scalar tyz = tz*this->y();
+  const Scalar tzz = tz*this->z();
+
+  res.coeffRef(0,0) = Scalar(1)-(tyy+tzz);
+  res.coeffRef(0,1) = txy-twz;
+  res.coeffRef(0,2) = txz+twy;
+  res.coeffRef(1,0) = txy+twz;
+  res.coeffRef(1,1) = Scalar(1)-(txx+tzz);
+  res.coeffRef(1,2) = tyz-twx;
+  res.coeffRef(2,0) = txz-twy;
+  res.coeffRef(2,1) = tyz+twx;
+  res.coeffRef(2,2) = Scalar(1)-(txx+tyy);
+
+  return res;
+}
+
+/** Sets *this to be a quaternion representing a rotation sending the vector \a a to the vector \a b.
+  *
+  * \returns a reference to *this.
+  *
+  * Note that the two input vectors do \b not have to be normalized.
+  */
+template<typename Scalar>
+template<typename Derived1, typename Derived2>
+inline Quaternion<Scalar>& Quaternion<Scalar>::setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b)
+{
+  Vector3 v0 = a.normalized();
+  Vector3 v1 = b.normalized();
+  Scalar c = v0.eigen2_dot(v1);
+
+  // if dot == 1, vectors are the same
+  if (ei_isApprox(c,Scalar(1)))
+  {
+    // set to identity
+    this->w() = 1; this->vec().setZero();
+    return *this;
+  }
+  // if dot == -1, vectors are opposites
+  if (ei_isApprox(c,Scalar(-1)))
+  {
+    this->vec() = v0.unitOrthogonal();
+    this->w() = 0;
+    return *this;
+  }
+
+  Vector3 axis = v0.cross(v1);
+  Scalar s = ei_sqrt((Scalar(1)+c)*Scalar(2));
+  Scalar invs = Scalar(1)/s;
+  this->vec() = axis * invs;
+  this->w() = s * Scalar(0.5);
+
+  return *this;
+}
+
+/** \returns the multiplicative inverse of \c *this
+  * Note that in most cases, i.e., if you simply want the opposite rotation,
+  * and/or the quaternion is normalized, then it is enough to use the conjugate.
+  *
+  * \sa Quaternion::conjugate()
+  */
+template <typename Scalar>
+inline Quaternion<Scalar> Quaternion<Scalar>::inverse() const
+{
+  // FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite()  ??
+  Scalar n2 = this->squaredNorm();
+  if (n2 > 0)
+    return Quaternion(conjugate().coeffs() / n2);
+  else
+  {
+    // return an invalid result to flag the error
+    return Quaternion(Coefficients::Zero());
+  }
+}
+
+/** \returns the conjugate of the \c *this which is equal to the multiplicative inverse
+  * if the quaternion is normalized.
+  * The conjugate of a quaternion represents the opposite rotation.
+  *
+  * \sa Quaternion::inverse()
+  */
+template <typename Scalar>
+inline Quaternion<Scalar> Quaternion<Scalar>::conjugate() const
+{
+  return Quaternion(this->w(),-this->x(),-this->y(),-this->z());
+}
+
+/** \returns the angle (in radian) between two rotations
+  * \sa eigen2_dot()
+  */
+template <typename Scalar>
+inline Scalar Quaternion<Scalar>::angularDistance(const Quaternion& other) const
+{
+  double d = ei_abs(this->eigen2_dot(other));
+  if (d>=1.0)
+    return 0;
+  return Scalar(2) * std::acos(d);
+}
+
+/** \returns the spherical linear interpolation between the two quaternions
+  * \c *this and \a other at the parameter \a t
+  */
+template <typename Scalar>
+Quaternion<Scalar> Quaternion<Scalar>::slerp(Scalar t, const Quaternion& other) const
+{
+  static const Scalar one = Scalar(1) - machine_epsilon<Scalar>();
+  Scalar d = this->eigen2_dot(other);
+  Scalar absD = ei_abs(d);
+
+  Scalar scale0;
+  Scalar scale1;
+
+  if (absD>=one)
+  {
+    scale0 = Scalar(1) - t;
+    scale1 = t;
+  }
+  else
+  {
+    // theta is the angle between the 2 quaternions
+    Scalar theta = std::acos(absD);
+    Scalar sinTheta = ei_sin(theta);
+
+    scale0 = ei_sin( ( Scalar(1) - t ) * theta) / sinTheta;
+    scale1 = ei_sin( ( t * theta) ) / sinTheta;
+    if (d<0)
+      scale1 = -scale1;
+  }
+
+  return Quaternion<Scalar>(scale0 * coeffs() + scale1 * other.coeffs());
+}
+
+// set from a rotation matrix
+template<typename Other>
+struct ei_quaternion_assign_impl<Other,3,3>
+{
+  typedef typename Other::Scalar Scalar;
+  static inline void run(Quaternion<Scalar>& q, const Other& mat)
+  {
+    // This algorithm comes from  "Quaternion Calculus and Fast Animation",
+    // Ken Shoemake, 1987 SIGGRAPH course notes
+    Scalar t = mat.trace();
+    if (t > 0)
+    {
+      t = ei_sqrt(t + Scalar(1.0));
+      q.w() = Scalar(0.5)*t;
+      t = Scalar(0.5)/t;
+      q.x() = (mat.coeff(2,1) - mat.coeff(1,2)) * t;
+      q.y() = (mat.coeff(0,2) - mat.coeff(2,0)) * t;
+      q.z() = (mat.coeff(1,0) - mat.coeff(0,1)) * t;
+    }
+    else
+    {
+      int i = 0;
+      if (mat.coeff(1,1) > mat.coeff(0,0))
+        i = 1;
+      if (mat.coeff(2,2) > mat.coeff(i,i))
+        i = 2;
+      int j = (i+1)%3;
+      int k = (j+1)%3;
+
+      t = ei_sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0));
+      q.coeffs().coeffRef(i) = Scalar(0.5) * t;
+      t = Scalar(0.5)/t;
+      q.w() = (mat.coeff(k,j)-mat.coeff(j,k))*t;
+      q.coeffs().coeffRef(j) = (mat.coeff(j,i)+mat.coeff(i,j))*t;
+      q.coeffs().coeffRef(k) = (mat.coeff(k,i)+mat.coeff(i,k))*t;
+    }
+  }
+};
+
+// set from a vector of coefficients assumed to be a quaternion
+template<typename Other>
+struct ei_quaternion_assign_impl<Other,4,1>
+{
+  typedef typename Other::Scalar Scalar;
+  static inline void run(Quaternion<Scalar>& q, const Other& vec)
+  {
+    q.coeffs() = vec;
+  }
+};
+
+} // end namespace Eigen
diff --git a/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Rotation2D.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Rotation2D.h
new file mode 100644
index 000000000..3e02b7a4f
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Rotation2D.h
@@ -0,0 +1,145 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Rotation2D
+  *
+  * \brief Represents a rotation/orientation in a 2 dimensional space.
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients
+  *
+  * This class is equivalent to a single scalar representing a counter clock wise rotation
+  * as a single angle in radian. It provides some additional features such as the automatic
+  * conversion from/to a 2x2 rotation matrix. Moreover this class aims to provide a similar
+  * interface to Quaternion in order to facilitate the writing of generic algorithms
+  * dealing with rotations.
+  *
+  * \sa class Quaternion, class Transform
+  */
+template<typename _Scalar> struct ei_traits<Rotation2D<_Scalar> >
+{
+  typedef _Scalar Scalar;
+};
+
+template<typename _Scalar>
+class Rotation2D : public RotationBase<Rotation2D<_Scalar>,2>
+{
+  typedef RotationBase<Rotation2D<_Scalar>,2> Base;
+
+public:
+
+  using Base::operator*;
+
+  enum { Dim = 2 };
+  /** the scalar type of the coefficients */
+  typedef _Scalar Scalar;
+  typedef Matrix<Scalar,2,1> Vector2;
+  typedef Matrix<Scalar,2,2> Matrix2;
+
+protected:
+
+  Scalar m_angle;
+
+public:
+
+  /** Construct a 2D counter clock wise rotation from the angle \a a in radian. */
+  inline Rotation2D(Scalar a) : m_angle(a) {}
+
+  /** \returns the rotation angle */
+  inline Scalar angle() const { return m_angle; }
+
+  /** \returns a read-write reference to the rotation angle */
+  inline Scalar& angle() { return m_angle; }
+
+  /** \returns the inverse rotation */
+  inline Rotation2D inverse() const { return -m_angle; }
+
+  /** Concatenates two rotations */
+  inline Rotation2D operator*(const Rotation2D& other) const
+  { return m_angle + other.m_angle; }
+
+  /** Concatenates two rotations */
+  inline Rotation2D& operator*=(const Rotation2D& other)
+  { return m_angle += other.m_angle; return *this; }
+
+  /** Applies the rotation to a 2D vector */
+  Vector2 operator* (const Vector2& vec) const
+  { return toRotationMatrix() * vec; }
+
+  template<typename Derived>
+  Rotation2D& fromRotationMatrix(const MatrixBase<Derived>& m);
+  Matrix2 toRotationMatrix(void) const;
+
+  /** \returns the spherical interpolation between \c *this and \a other using
+    * parameter \a t. It is in fact equivalent to a linear interpolation.
+    */
+  inline Rotation2D slerp(Scalar t, const Rotation2D& other) const
+  { return m_angle * (1-t) + other.angle() * t; }
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type cast() const
+  { return typename internal::cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type(*this); }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit Rotation2D(const Rotation2D<OtherScalarType>& other)
+  {
+    m_angle = Scalar(other.angle());
+  }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const Rotation2D& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
+  { return ei_isApprox(m_angle,other.m_angle, prec); }
+};
+
+/** \ingroup Geometry_Module
+  * single precision 2D rotation type */
+typedef Rotation2D<float> Rotation2Df;
+/** \ingroup Geometry_Module
+  * double precision 2D rotation type */
+typedef Rotation2D<double> Rotation2Dd;
+
+/** Set \c *this from a 2x2 rotation matrix \a mat.
+  * In other words, this function extract the rotation angle
+  * from the rotation matrix.
+  */
+template<typename Scalar>
+template<typename Derived>
+Rotation2D<Scalar>& Rotation2D<Scalar>::fromRotationMatrix(const MatrixBase<Derived>& mat)
+{
+  EIGEN_STATIC_ASSERT(Derived::RowsAtCompileTime==2 && Derived::ColsAtCompileTime==2,YOU_MADE_A_PROGRAMMING_MISTAKE)
+  m_angle = ei_atan2(mat.coeff(1,0), mat.coeff(0,0));
+  return *this;
+}
+
+/** Constructs and \returns an equivalent 2x2 rotation matrix.
+  */
+template<typename Scalar>
+typename Rotation2D<Scalar>::Matrix2
+Rotation2D<Scalar>::toRotationMatrix(void) const
+{
+  Scalar sinA = ei_sin(m_angle);
+  Scalar cosA = ei_cos(m_angle);
+  return (Matrix2() << cosA, -sinA, sinA, cosA).finished();
+}
+
+} // end namespace Eigen
diff --git a/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/RotationBase.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/RotationBase.h
new file mode 100644
index 000000000..78ad73b60
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/RotationBase.h
@@ -0,0 +1,123 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
+namespace Eigen { 
+
+// this file aims to contains the various representations of rotation/orientation
+// in 2D and 3D space excepted Matrix and Quaternion.
+
+/** \class RotationBase
+  *
+  * \brief Common base class for compact rotation representations
+  *
+  * \param Derived is the derived type, i.e., a rotation type
+  * \param _Dim the dimension of the space
+  */
+template<typename Derived, int _Dim>
+class RotationBase
+{
+  public:
+    enum { Dim = _Dim };
+    /** the scalar type of the coefficients */
+    typedef typename ei_traits<Derived>::Scalar Scalar;
+    
+    /** corresponding linear transformation matrix type */
+    typedef Matrix<Scalar,Dim,Dim> RotationMatrixType;
+
+    inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
+    inline Derived& derived() { return *static_cast<Derived*>(this); }
+
+    /** \returns an equivalent rotation matrix */
+    inline RotationMatrixType toRotationMatrix() const { return derived().toRotationMatrix(); }
+
+    /** \returns the inverse rotation */
+    inline Derived inverse() const { return derived().inverse(); }
+
+    /** \returns the concatenation of the rotation \c *this with a translation \a t */
+    inline Transform<Scalar,Dim> operator*(const Translation<Scalar,Dim>& t) const
+    { return toRotationMatrix() * t; }
+
+    /** \returns the concatenation of the rotation \c *this with a scaling \a s */
+    inline RotationMatrixType operator*(const Scaling<Scalar,Dim>& s) const
+    { return toRotationMatrix() * s; }
+
+    /** \returns the concatenation of the rotation \c *this with an affine transformation \a t */
+    inline Transform<Scalar,Dim> operator*(const Transform<Scalar,Dim>& t) const
+    { return toRotationMatrix() * t; }
+};
+
+/** \geometry_module
+  *
+  * Constructs a Dim x Dim rotation matrix from the rotation \a r
+  */
+template<typename _Scalar, int _Rows, int _Cols, int _Storage, int _MaxRows, int _MaxCols>
+template<typename OtherDerived>
+Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>
+::Matrix(const RotationBase<OtherDerived,ColsAtCompileTime>& r)
+{
+  EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim))
+  *this = r.toRotationMatrix();
+}
+
+/** \geometry_module
+  *
+  * Set a Dim x Dim rotation matrix from the rotation \a r
+  */
+template<typename _Scalar, int _Rows, int _Cols, int _Storage, int _MaxRows, int _MaxCols>
+template<typename OtherDerived>
+Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>&
+Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>
+::operator=(const RotationBase<OtherDerived,ColsAtCompileTime>& r)
+{
+  EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim))
+  return *this = r.toRotationMatrix();
+}
+
+/** \internal
+  *
+  * Helper function to return an arbitrary rotation object to a rotation matrix.
+  *
+  * \param Scalar the numeric type of the matrix coefficients
+  * \param Dim the dimension of the current space
+  *
+  * It returns a Dim x Dim fixed size matrix.
+  *
+  * Default specializations are provided for:
+  *   - any scalar type (2D),
+  *   - any matrix expression,
+  *   - any type based on RotationBase (e.g., Quaternion, AngleAxis, Rotation2D)
+  *
+  * Currently ei_toRotationMatrix is only used by Transform.
+  *
+  * \sa class Transform, class Rotation2D, class Quaternion, class AngleAxis
+  */
+template<typename Scalar, int Dim>
+static inline Matrix<Scalar,2,2> ei_toRotationMatrix(const Scalar& s)
+{
+  EIGEN_STATIC_ASSERT(Dim==2,YOU_MADE_A_PROGRAMMING_MISTAKE)
+  return Rotation2D<Scalar>(s).toRotationMatrix();
+}
+
+template<typename Scalar, int Dim, typename OtherDerived>
+static inline Matrix<Scalar,Dim,Dim> ei_toRotationMatrix(const RotationBase<OtherDerived,Dim>& r)
+{
+  return r.toRotationMatrix();
+}
+
+template<typename Scalar, int Dim, typename OtherDerived>
+static inline const MatrixBase<OtherDerived>& ei_toRotationMatrix(const MatrixBase<OtherDerived>& mat)
+{
+  EIGEN_STATIC_ASSERT(OtherDerived::RowsAtCompileTime==Dim && OtherDerived::ColsAtCompileTime==Dim,
+    YOU_MADE_A_PROGRAMMING_MISTAKE)
+  return mat;
+}
+
+} // end namespace Eigen
diff --git a/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Scaling.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Scaling.h
new file mode 100644
index 000000000..a07c1c7c7
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Scaling.h
@@ -0,0 +1,167 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Scaling
+  *
+  * \brief Represents a possibly non uniform scaling transformation
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients.
+  * \param _Dim the  dimension of the space, can be a compile time value or Dynamic
+  *
+  * \note This class is not aimed to be used to store a scaling transformation,
+  * but rather to make easier the constructions and updates of Transform objects.
+  *
+  * \sa class Translation, class Transform
+  */
+template<typename _Scalar, int _Dim>
+class Scaling
+{
+public:
+  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim)
+  /** dimension of the space */
+  enum { Dim = _Dim };
+  /** the scalar type of the coefficients */
+  typedef _Scalar Scalar;
+  /** corresponding vector type */
+  typedef Matrix<Scalar,Dim,1> VectorType;
+  /** corresponding linear transformation matrix type */
+  typedef Matrix<Scalar,Dim,Dim> LinearMatrixType;
+  /** corresponding translation type */
+  typedef Translation<Scalar,Dim> TranslationType;
+  /** corresponding affine transformation type */
+  typedef Transform<Scalar,Dim> TransformType;
+
+protected:
+
+  VectorType m_coeffs;
+
+public:
+
+  /** Default constructor without initialization. */
+  Scaling() {}
+  /** Constructs and initialize a uniform scaling transformation */
+  explicit inline Scaling(const Scalar& s) { m_coeffs.setConstant(s); }
+  /** 2D only */
+  inline Scaling(const Scalar& sx, const Scalar& sy)
+  {
+    ei_assert(Dim==2);
+    m_coeffs.x() = sx;
+    m_coeffs.y() = sy;
+  }
+  /** 3D only */
+  inline Scaling(const Scalar& sx, const Scalar& sy, const Scalar& sz)
+  {
+    ei_assert(Dim==3);
+    m_coeffs.x() = sx;
+    m_coeffs.y() = sy;
+    m_coeffs.z() = sz;
+  }
+  /** Constructs and initialize the scaling transformation from a vector of scaling coefficients */
+  explicit inline Scaling(const VectorType& coeffs) : m_coeffs(coeffs) {}
+
+  const VectorType& coeffs() const { return m_coeffs; }
+  VectorType& coeffs() { return m_coeffs; }
+
+  /** Concatenates two scaling */
+  inline Scaling operator* (const Scaling& other) const
+  { return Scaling(coeffs().cwise() * other.coeffs()); }
+
+  /** Concatenates a scaling and a translation */
+  inline TransformType operator* (const TranslationType& t) const;
+
+  /** Concatenates a scaling and an affine transformation */
+  inline TransformType operator* (const TransformType& t) const;
+
+  /** Concatenates a scaling and a linear transformation matrix */
+  // TODO returns an expression
+  inline LinearMatrixType operator* (const LinearMatrixType& other) const
+  { return coeffs().asDiagonal() * other; }
+
+  /** Concatenates a linear transformation matrix and a scaling */
+  // TODO returns an expression
+  friend inline LinearMatrixType operator* (const LinearMatrixType& other, const Scaling& s)
+  { return other * s.coeffs().asDiagonal(); }
+
+  template<typename Derived>
+  inline LinearMatrixType operator*(const RotationBase<Derived,Dim>& r) const
+  { return *this * r.toRotationMatrix(); }
+
+  /** Applies scaling to vector */
+  inline VectorType operator* (const VectorType& other) const
+  { return coeffs().asDiagonal() * other; }
+
+  /** \returns the inverse scaling */
+  inline Scaling inverse() const
+  { return Scaling(coeffs().cwise().inverse()); }
+
+  inline Scaling& operator=(const Scaling& other)
+  {
+    m_coeffs = other.m_coeffs;
+    return *this;
+  }
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<Scaling,Scaling<NewScalarType,Dim> >::type cast() const
+  { return typename internal::cast_return_type<Scaling,Scaling<NewScalarType,Dim> >::type(*this); }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit Scaling(const Scaling<OtherScalarType,Dim>& other)
+  { m_coeffs = other.coeffs().template cast<Scalar>(); }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const Scaling& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
+  { return m_coeffs.isApprox(other.m_coeffs, prec); }
+
+};
+
+/** \addtogroup Geometry_Module */
+//@{
+typedef Scaling<float, 2> Scaling2f;
+typedef Scaling<double,2> Scaling2d;
+typedef Scaling<float, 3> Scaling3f;
+typedef Scaling<double,3> Scaling3d;
+//@}
+
+template<typename Scalar, int Dim>
+inline typename Scaling<Scalar,Dim>::TransformType
+Scaling<Scalar,Dim>::operator* (const TranslationType& t) const
+{
+  TransformType res;
+  res.matrix().setZero();
+  res.linear().diagonal() = coeffs();
+  res.translation() = m_coeffs.cwise() * t.vector();
+  res(Dim,Dim) = Scalar(1);
+  return res;
+}
+
+template<typename Scalar, int Dim>
+inline typename Scaling<Scalar,Dim>::TransformType
+Scaling<Scalar,Dim>::operator* (const TransformType& t) const
+{
+  TransformType res = t;
+  res.prescale(m_coeffs);
+  return res;
+}
+
+} // end namespace Eigen
diff --git a/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Transform.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Transform.h
new file mode 100644
index 000000000..dceb80203
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Transform.h
@@ -0,0 +1,786 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
+namespace Eigen { 
+
+// Note that we have to pass Dim and HDim because it is not allowed to use a template
+// parameter to define a template specialization. To be more precise, in the following
+// specializations, it is not allowed to use Dim+1 instead of HDim.
+template< typename Other,
+          int Dim,
+          int HDim,
+          int OtherRows=Other::RowsAtCompileTime,
+          int OtherCols=Other::ColsAtCompileTime>
+struct ei_transform_product_impl;
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Transform
+  *
+  * \brief Represents an homogeneous transformation in a N dimensional space
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients
+  * \param _Dim the dimension of the space
+  *
+  * The homography is internally represented and stored as a (Dim+1)^2 matrix which
+  * is available through the matrix() method.
+  *
+  * Conversion methods from/to Qt's QMatrix and QTransform are available if the
+  * preprocessor token EIGEN_QT_SUPPORT is defined.
+  *
+  * \sa class Matrix, class Quaternion
+  */
+template<typename _Scalar, int _Dim>
+class Transform
+{
+public:
+  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim==Dynamic ? Dynamic : (_Dim+1)*(_Dim+1))
+  enum {
+    Dim = _Dim,     ///< space dimension in which the transformation holds
+    HDim = _Dim+1   ///< size of a respective homogeneous vector
+  };
+  /** the scalar type of the coefficients */
+  typedef _Scalar Scalar;
+  /** type of the matrix used to represent the transformation */
+  typedef Matrix<Scalar,HDim,HDim> MatrixType;
+  /** type of the matrix used to represent the linear part of the transformation */
+  typedef Matrix<Scalar,Dim,Dim> LinearMatrixType;
+  /** type of read/write reference to the linear part of the transformation */
+  typedef Block<MatrixType,Dim,Dim> LinearPart;
+  /** type of read/write reference to the linear part of the transformation */
+  typedef const Block<const MatrixType,Dim,Dim> ConstLinearPart;
+  /** type of a vector */
+  typedef Matrix<Scalar,Dim,1> VectorType;
+  /** type of a read/write reference to the translation part of the rotation */
+  typedef Block<MatrixType,Dim,1> TranslationPart;
+  /** type of a read/write reference to the translation part of the rotation */
+  typedef const Block<const MatrixType,Dim,1> ConstTranslationPart;
+  /** corresponding translation type */
+  typedef Translation<Scalar,Dim> TranslationType;
+  /** corresponding scaling transformation type */
+  typedef Scaling<Scalar,Dim> ScalingType;
+
+protected:
+
+  MatrixType m_matrix;
+
+public:
+
+  /** Default constructor without initialization of the coefficients. */
+  inline Transform() { }
+
+  inline Transform(const Transform& other)
+  {
+    m_matrix = other.m_matrix;
+  }
+
+  inline explicit Transform(const TranslationType& t) { *this = t; }
+  inline explicit Transform(const ScalingType& s) { *this = s; }
+  template<typename Derived>
+  inline explicit Transform(const RotationBase<Derived, Dim>& r) { *this = r; }
+
+  inline Transform& operator=(const Transform& other)
+  { m_matrix = other.m_matrix; return *this; }
+
+  template<typename OtherDerived, bool BigMatrix> // MSVC 2005 will commit suicide if BigMatrix has a default value
+  struct construct_from_matrix
+  {
+    static inline void run(Transform *transform, const MatrixBase<OtherDerived>& other)
+    {
+      transform->matrix() = other;
+    }
+  };
+
+  template<typename OtherDerived> struct construct_from_matrix<OtherDerived, true>
+  {
+    static inline void run(Transform *transform, const MatrixBase<OtherDerived>& other)
+    {
+      transform->linear() = other;
+      transform->translation().setZero();
+      transform->matrix()(Dim,Dim) = Scalar(1);
+      transform->matrix().template block<1,Dim>(Dim,0).setZero();
+    }
+  };
+
+  /** Constructs and initializes a transformation from a Dim^2 or a (Dim+1)^2 matrix. */
+  template<typename OtherDerived>
+  inline explicit Transform(const MatrixBase<OtherDerived>& other)
+  {
+    construct_from_matrix<OtherDerived, int(OtherDerived::RowsAtCompileTime) == Dim>::run(this, other);
+  }
+
+  /** Set \c *this from a (Dim+1)^2 matrix. */
+  template<typename OtherDerived>
+  inline Transform& operator=(const MatrixBase<OtherDerived>& other)
+  { m_matrix = other; return *this; }
+
+  #ifdef EIGEN_QT_SUPPORT
+  inline Transform(const QMatrix& other);
+  inline Transform& operator=(const QMatrix& other);
+  inline QMatrix toQMatrix(void) const;
+  inline Transform(const QTransform& other);
+  inline Transform& operator=(const QTransform& other);
+  inline QTransform toQTransform(void) const;
+  #endif
+
+  /** shortcut for m_matrix(row,col);
+    * \sa MatrixBase::operaror(int,int) const */
+  inline Scalar operator() (int row, int col) const { return m_matrix(row,col); }
+  /** shortcut for m_matrix(row,col);
+    * \sa MatrixBase::operaror(int,int) */
+  inline Scalar& operator() (int row, int col) { return m_matrix(row,col); }
+
+  /** \returns a read-only expression of the transformation matrix */
+  inline const MatrixType& matrix() const { return m_matrix; }
+  /** \returns a writable expression of the transformation matrix */
+  inline MatrixType& matrix() { return m_matrix; }
+
+  /** \returns a read-only expression of the linear (linear) part of the transformation */
+  inline ConstLinearPart linear() const { return m_matrix.template block<Dim,Dim>(0,0); }
+  /** \returns a writable expression of the linear (linear) part of the transformation */
+  inline LinearPart linear() { return m_matrix.template block<Dim,Dim>(0,0); }
+
+  /** \returns a read-only expression of the translation vector of the transformation */
+  inline ConstTranslationPart translation() const { return m_matrix.template block<Dim,1>(0,Dim); }
+  /** \returns a writable expression of the translation vector of the transformation */
+  inline TranslationPart translation() { return m_matrix.template block<Dim,1>(0,Dim); }
+
+  /** \returns an expression of the product between the transform \c *this and a matrix expression \a other
+  *
+  * The right hand side \a other might be either:
+  * \li a vector of size Dim,
+  * \li an homogeneous vector of size Dim+1,
+  * \li a transformation matrix of size Dim+1 x Dim+1.
+  */
+  // note: this function is defined here because some compilers cannot find the respective declaration
+  template<typename OtherDerived>
+  inline const typename ei_transform_product_impl<OtherDerived,_Dim,_Dim+1>::ResultType
+  operator * (const MatrixBase<OtherDerived> &other) const
+  { return ei_transform_product_impl<OtherDerived,Dim,HDim>::run(*this,other.derived()); }
+
+  /** \returns the product expression of a transformation matrix \a a times a transform \a b
+    * The transformation matrix \a a must have a Dim+1 x Dim+1 sizes. */
+  template<typename OtherDerived>
+  friend inline const typename ProductReturnType<OtherDerived,MatrixType>::Type
+  operator * (const MatrixBase<OtherDerived> &a, const Transform &b)
+  { return a.derived() * b.matrix(); }
+
+  /** Contatenates two transformations */
+  inline const Transform
+  operator * (const Transform& other) const
+  { return Transform(m_matrix * other.matrix()); }
+
+  /** \sa MatrixBase::setIdentity() */
+  void setIdentity() { m_matrix.setIdentity(); }
+  static const typename MatrixType::IdentityReturnType Identity()
+  {
+    return MatrixType::Identity();
+  }
+
+  template<typename OtherDerived>
+  inline Transform& scale(const MatrixBase<OtherDerived> &other);
+
+  template<typename OtherDerived>
+  inline Transform& prescale(const MatrixBase<OtherDerived> &other);
+
+  inline Transform& scale(Scalar s);
+  inline Transform& prescale(Scalar s);
+
+  template<typename OtherDerived>
+  inline Transform& translate(const MatrixBase<OtherDerived> &other);
+
+  template<typename OtherDerived>
+  inline Transform& pretranslate(const MatrixBase<OtherDerived> &other);
+
+  template<typename RotationType>
+  inline Transform& rotate(const RotationType& rotation);
+
+  template<typename RotationType>
+  inline Transform& prerotate(const RotationType& rotation);
+
+  Transform& shear(Scalar sx, Scalar sy);
+  Transform& preshear(Scalar sx, Scalar sy);
+
+  inline Transform& operator=(const TranslationType& t);
+  inline Transform& operator*=(const TranslationType& t) { return translate(t.vector()); }
+  inline Transform operator*(const TranslationType& t) const;
+
+  inline Transform& operator=(const ScalingType& t);
+  inline Transform& operator*=(const ScalingType& s) { return scale(s.coeffs()); }
+  inline Transform operator*(const ScalingType& s) const;
+  friend inline Transform operator*(const LinearMatrixType& mat, const Transform& t)
+  {
+    Transform res = t;
+    res.matrix().row(Dim) = t.matrix().row(Dim);
+    res.matrix().template block<Dim,HDim>(0,0) = (mat * t.matrix().template block<Dim,HDim>(0,0)).lazy();
+    return res;
+  }
+
+  template<typename Derived>
+  inline Transform& operator=(const RotationBase<Derived,Dim>& r);
+  template<typename Derived>
+  inline Transform& operator*=(const RotationBase<Derived,Dim>& r) { return rotate(r.toRotationMatrix()); }
+  template<typename Derived>
+  inline Transform operator*(const RotationBase<Derived,Dim>& r) const;
+
+  LinearMatrixType rotation() const;
+  template<typename RotationMatrixType, typename ScalingMatrixType>
+  void computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const;
+  template<typename ScalingMatrixType, typename RotationMatrixType>
+  void computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const;
+
+  template<typename PositionDerived, typename OrientationType, typename ScaleDerived>
+  Transform& fromPositionOrientationScale(const MatrixBase<PositionDerived> &position,
+    const OrientationType& orientation, const MatrixBase<ScaleDerived> &scale);
+
+  inline const MatrixType inverse(TransformTraits traits = Affine) const;
+
+  /** \returns a const pointer to the column major internal matrix */
+  const Scalar* data() const { return m_matrix.data(); }
+  /** \returns a non-const pointer to the column major internal matrix */
+  Scalar* data() { return m_matrix.data(); }
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<Transform,Transform<NewScalarType,Dim> >::type cast() const
+  { return typename internal::cast_return_type<Transform,Transform<NewScalarType,Dim> >::type(*this); }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit Transform(const Transform<OtherScalarType,Dim>& other)
+  { m_matrix = other.matrix().template cast<Scalar>(); }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const Transform& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
+  { return m_matrix.isApprox(other.m_matrix, prec); }
+
+  #ifdef EIGEN_TRANSFORM_PLUGIN
+  #include EIGEN_TRANSFORM_PLUGIN
+  #endif
+
+protected:
+
+};
+
+/** \ingroup Geometry_Module */
+typedef Transform<float,2> Transform2f;
+/** \ingroup Geometry_Module */
+typedef Transform<float,3> Transform3f;
+/** \ingroup Geometry_Module */
+typedef Transform<double,2> Transform2d;
+/** \ingroup Geometry_Module */
+typedef Transform<double,3> Transform3d;
+
+/**************************
+*** Optional QT support ***
+**************************/
+
+#ifdef EIGEN_QT_SUPPORT
+/** Initialises \c *this from a QMatrix assuming the dimension is 2.
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim>
+Transform<Scalar,Dim>::Transform(const QMatrix& other)
+{
+  *this = other;
+}
+
+/** Set \c *this from a QMatrix assuming the dimension is 2.
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim>
+Transform<Scalar,Dim>& Transform<Scalar,Dim>::operator=(const QMatrix& other)
+{
+  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  m_matrix << other.m11(), other.m21(), other.dx(),
+              other.m12(), other.m22(), other.dy(),
+              0, 0, 1;
+   return *this;
+}
+
+/** \returns a QMatrix from \c *this assuming the dimension is 2.
+  *
+  * \warning this convertion might loss data if \c *this is not affine
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim>
+QMatrix Transform<Scalar,Dim>::toQMatrix(void) const
+{
+  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  return QMatrix(m_matrix.coeff(0,0), m_matrix.coeff(1,0),
+                 m_matrix.coeff(0,1), m_matrix.coeff(1,1),
+                 m_matrix.coeff(0,2), m_matrix.coeff(1,2));
+}
+
+/** Initialises \c *this from a QTransform assuming the dimension is 2.
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim>
+Transform<Scalar,Dim>::Transform(const QTransform& other)
+{
+  *this = other;
+}
+
+/** Set \c *this from a QTransform assuming the dimension is 2.
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim>
+Transform<Scalar,Dim>& Transform<Scalar,Dim>::operator=(const QTransform& other)
+{
+  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  m_matrix << other.m11(), other.m21(), other.dx(),
+              other.m12(), other.m22(), other.dy(),
+              other.m13(), other.m23(), other.m33();
+   return *this;
+}
+
+/** \returns a QTransform from \c *this assuming the dimension is 2.
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim>
+QTransform Transform<Scalar,Dim>::toQTransform(void) const
+{
+  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0), m_matrix.coeff(2,0),
+                    m_matrix.coeff(0,1), m_matrix.coeff(1,1), m_matrix.coeff(2,1),
+                    m_matrix.coeff(0,2), m_matrix.coeff(1,2), m_matrix.coeff(2,2));
+}
+#endif
+
+/*********************
+*** Procedural API ***
+*********************/
+
+/** Applies on the right the non uniform scale transformation represented
+  * by the vector \a other to \c *this and returns a reference to \c *this.
+  * \sa prescale()
+  */
+template<typename Scalar, int Dim>
+template<typename OtherDerived>
+Transform<Scalar,Dim>&
+Transform<Scalar,Dim>::scale(const MatrixBase<OtherDerived> &other)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
+  linear() = (linear() * other.asDiagonal()).lazy();
+  return *this;
+}
+
+/** Applies on the right a uniform scale of a factor \a c to \c *this
+  * and returns a reference to \c *this.
+  * \sa prescale(Scalar)
+  */
+template<typename Scalar, int Dim>
+inline Transform<Scalar,Dim>& Transform<Scalar,Dim>::scale(Scalar s)
+{
+  linear() *= s;
+  return *this;
+}
+
+/** Applies on the left the non uniform scale transformation represented
+  * by the vector \a other to \c *this and returns a reference to \c *this.
+  * \sa scale()
+  */
+template<typename Scalar, int Dim>
+template<typename OtherDerived>
+Transform<Scalar,Dim>&
+Transform<Scalar,Dim>::prescale(const MatrixBase<OtherDerived> &other)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
+  m_matrix.template block<Dim,HDim>(0,0) = (other.asDiagonal() * m_matrix.template block<Dim,HDim>(0,0)).lazy();
+  return *this;
+}
+
+/** Applies on the left a uniform scale of a factor \a c to \c *this
+  * and returns a reference to \c *this.
+  * \sa scale(Scalar)
+  */
+template<typename Scalar, int Dim>
+inline Transform<Scalar,Dim>& Transform<Scalar,Dim>::prescale(Scalar s)
+{
+  m_matrix.template corner<Dim,HDim>(TopLeft) *= s;
+  return *this;
+}
+
+/** Applies on the right the translation matrix represented by the vector \a other
+  * to \c *this and returns a reference to \c *this.
+  * \sa pretranslate()
+  */
+template<typename Scalar, int Dim>
+template<typename OtherDerived>
+Transform<Scalar,Dim>&
+Transform<Scalar,Dim>::translate(const MatrixBase<OtherDerived> &other)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
+  translation() += linear() * other;
+  return *this;
+}
+
+/** Applies on the left the translation matrix represented by the vector \a other
+  * to \c *this and returns a reference to \c *this.
+  * \sa translate()
+  */
+template<typename Scalar, int Dim>
+template<typename OtherDerived>
+Transform<Scalar,Dim>&
+Transform<Scalar,Dim>::pretranslate(const MatrixBase<OtherDerived> &other)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
+  translation() += other;
+  return *this;
+}
+
+/** Applies on the right the rotation represented by the rotation \a rotation
+  * to \c *this and returns a reference to \c *this.
+  *
+  * The template parameter \a RotationType is the type of the rotation which
+  * must be known by ei_toRotationMatrix<>.
+  *
+  * Natively supported types includes:
+  *   - any scalar (2D),
+  *   - a Dim x Dim matrix expression,
+  *   - a Quaternion (3D),
+  *   - a AngleAxis (3D)
+  *
+  * This mechanism is easily extendable to support user types such as Euler angles,
+  * or a pair of Quaternion for 4D rotations.
+  *
+  * \sa rotate(Scalar), class Quaternion, class AngleAxis, prerotate(RotationType)
+  */
+template<typename Scalar, int Dim>
+template<typename RotationType>
+Transform<Scalar,Dim>&
+Transform<Scalar,Dim>::rotate(const RotationType& rotation)
+{
+  linear() *= ei_toRotationMatrix<Scalar,Dim>(rotation);
+  return *this;
+}
+
+/** Applies on the left the rotation represented by the rotation \a rotation
+  * to \c *this and returns a reference to \c *this.
+  *
+  * See rotate() for further details.
+  *
+  * \sa rotate()
+  */
+template<typename Scalar, int Dim>
+template<typename RotationType>
+Transform<Scalar,Dim>&
+Transform<Scalar,Dim>::prerotate(const RotationType& rotation)
+{
+  m_matrix.template block<Dim,HDim>(0,0) = ei_toRotationMatrix<Scalar,Dim>(rotation)
+                                         * m_matrix.template block<Dim,HDim>(0,0);
+  return *this;
+}
+
+/** Applies on the right the shear transformation represented
+  * by the vector \a other to \c *this and returns a reference to \c *this.
+  * \warning 2D only.
+  * \sa preshear()
+  */
+template<typename Scalar, int Dim>
+Transform<Scalar,Dim>&
+Transform<Scalar,Dim>::shear(Scalar sx, Scalar sy)
+{
+  EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  VectorType tmp = linear().col(0)*sy + linear().col(1);
+  linear() << linear().col(0) + linear().col(1)*sx, tmp;
+  return *this;
+}
+
+/** Applies on the left the shear transformation represented
+  * by the vector \a other to \c *this and returns a reference to \c *this.
+  * \warning 2D only.
+  * \sa shear()
+  */
+template<typename Scalar, int Dim>
+Transform<Scalar,Dim>&
+Transform<Scalar,Dim>::preshear(Scalar sx, Scalar sy)
+{
+  EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  m_matrix.template block<Dim,HDim>(0,0) = LinearMatrixType(1, sx, sy, 1) * m_matrix.template block<Dim,HDim>(0,0);
+  return *this;
+}
+
+/******************************************************
+*** Scaling, Translation and Rotation compatibility ***
+******************************************************/
+
+template<typename Scalar, int Dim>
+inline Transform<Scalar,Dim>& Transform<Scalar,Dim>::operator=(const TranslationType& t)
+{
+  linear().setIdentity();
+  translation() = t.vector();
+  m_matrix.template block<1,Dim>(Dim,0).setZero();
+  m_matrix(Dim,Dim) = Scalar(1);
+  return *this;
+}
+
+template<typename Scalar, int Dim>
+inline Transform<Scalar,Dim> Transform<Scalar,Dim>::operator*(const TranslationType& t) const
+{
+  Transform res = *this;
+  res.translate(t.vector());
+  return res;
+}
+
+template<typename Scalar, int Dim>
+inline Transform<Scalar,Dim>& Transform<Scalar,Dim>::operator=(const ScalingType& s)
+{
+  m_matrix.setZero();
+  linear().diagonal() = s.coeffs();
+  m_matrix.coeffRef(Dim,Dim) = Scalar(1);
+  return *this;
+}
+
+template<typename Scalar, int Dim>
+inline Transform<Scalar,Dim> Transform<Scalar,Dim>::operator*(const ScalingType& s) const
+{
+  Transform res = *this;
+  res.scale(s.coeffs());
+  return res;
+}
+
+template<typename Scalar, int Dim>
+template<typename Derived>
+inline Transform<Scalar,Dim>& Transform<Scalar,Dim>::operator=(const RotationBase<Derived,Dim>& r)
+{
+  linear() = ei_toRotationMatrix<Scalar,Dim>(r);
+  translation().setZero();
+  m_matrix.template block<1,Dim>(Dim,0).setZero();
+  m_matrix.coeffRef(Dim,Dim) = Scalar(1);
+  return *this;
+}
+
+template<typename Scalar, int Dim>
+template<typename Derived>
+inline Transform<Scalar,Dim> Transform<Scalar,Dim>::operator*(const RotationBase<Derived,Dim>& r) const
+{
+  Transform res = *this;
+  res.rotate(r.derived());
+  return res;
+}
+
+/************************
+*** Special functions ***
+************************/
+
+/** \returns the rotation part of the transformation
+  * \nonstableyet
+  *
+  * \svd_module
+  *
+  * \sa computeRotationScaling(), computeScalingRotation(), class SVD
+  */
+template<typename Scalar, int Dim>
+typename Transform<Scalar,Dim>::LinearMatrixType
+Transform<Scalar,Dim>::rotation() const
+{
+  LinearMatrixType result;
+  computeRotationScaling(&result, (LinearMatrixType*)0);
+  return result;
+}
+
+
+/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being
+  * not necessarily positive.
+  *
+  * If either pointer is zero, the corresponding computation is skipped.
+  *
+  * \nonstableyet
+  *
+  * \svd_module
+  *
+  * \sa computeScalingRotation(), rotation(), class SVD
+  */
+template<typename Scalar, int Dim>
+template<typename RotationMatrixType, typename ScalingMatrixType>
+void Transform<Scalar,Dim>::computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const
+{
+  JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU|ComputeFullV);
+  Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1
+  Matrix<Scalar, Dim, 1> sv(svd.singularValues());
+  sv.coeffRef(0) *= x;
+  if(scaling)
+  {
+    scaling->noalias() = svd.matrixV() * sv.asDiagonal() * svd.matrixV().adjoint();
+  }
+  if(rotation)
+  {
+    LinearMatrixType m(svd.matrixU());
+    m.col(0) /= x;
+    rotation->noalias() = m * svd.matrixV().adjoint();
+  }
+}
+
+/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being
+  * not necessarily positive.
+  *
+  * If either pointer is zero, the corresponding computation is skipped.
+  *
+  * \nonstableyet
+  *
+  * \svd_module
+  *
+  * \sa computeRotationScaling(), rotation(), class SVD
+  */
+template<typename Scalar, int Dim>
+template<typename ScalingMatrixType, typename RotationMatrixType>
+void Transform<Scalar,Dim>::computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const
+{
+  JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU|ComputeFullV);
+  Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1
+  Matrix<Scalar, Dim, 1> sv(svd.singularValues());
+  sv.coeffRef(0) *= x;
+  if(scaling)
+  {
+    scaling->noalias() = svd.matrixU() * sv.asDiagonal() * svd.matrixU().adjoint();
+  }
+  if(rotation)
+  {
+    LinearMatrixType m(svd.matrixU());
+    m.col(0) /= x;
+    rotation->noalias() = m * svd.matrixV().adjoint();
+  }
+}
+
+/** Convenient method to set \c *this from a position, orientation and scale
+  * of a 3D object.
+  */
+template<typename Scalar, int Dim>
+template<typename PositionDerived, typename OrientationType, typename ScaleDerived>
+Transform<Scalar,Dim>&
+Transform<Scalar,Dim>::fromPositionOrientationScale(const MatrixBase<PositionDerived> &position,
+  const OrientationType& orientation, const MatrixBase<ScaleDerived> &scale)
+{
+  linear() = ei_toRotationMatrix<Scalar,Dim>(orientation);
+  linear() *= scale.asDiagonal();
+  translation() = position;
+  m_matrix.template block<1,Dim>(Dim,0).setZero();
+  m_matrix(Dim,Dim) = Scalar(1);
+  return *this;
+}
+
+/** \nonstableyet
+  *
+  * \returns the inverse transformation matrix according to some given knowledge
+  * on \c *this.
+  *
+  * \param traits allows to optimize the inversion process when the transformion
+  * is known to be not a general transformation. The possible values are:
+  *  - Projective if the transformation is not necessarily affine, i.e., if the
+  *    last row is not guaranteed to be [0 ... 0 1]
+  *  - Affine is the default, the last row is assumed to be [0 ... 0 1]
+  *  - Isometry if the transformation is only a concatenations of translations
+  *    and rotations.
+  *
+  * \warning unless \a traits is always set to NoShear or NoScaling, this function
+  * requires the generic inverse method of MatrixBase defined in the LU module. If
+  * you forget to include this module, then you will get hard to debug linking errors.
+  *
+  * \sa MatrixBase::inverse()
+  */
+template<typename Scalar, int Dim>
+inline const typename Transform<Scalar,Dim>::MatrixType
+Transform<Scalar,Dim>::inverse(TransformTraits traits) const
+{
+  if (traits == Projective)
+  {
+    return m_matrix.inverse();
+  }
+  else
+  {
+    MatrixType res;
+    if (traits == Affine)
+    {
+      res.template corner<Dim,Dim>(TopLeft) = linear().inverse();
+    }
+    else if (traits == Isometry)
+    {
+      res.template corner<Dim,Dim>(TopLeft) = linear().transpose();
+    }
+    else
+    {
+      ei_assert("invalid traits value in Transform::inverse()");
+    }
+    // translation and remaining parts
+    res.template corner<Dim,1>(TopRight) = - res.template corner<Dim,Dim>(TopLeft) * translation();
+    res.template corner<1,Dim>(BottomLeft).setZero();
+    res.coeffRef(Dim,Dim) = Scalar(1);
+    return res;
+  }
+}
+
+/*****************************************************
+*** Specializations of operator* with a MatrixBase ***
+*****************************************************/
+
+template<typename Other, int Dim, int HDim>
+struct ei_transform_product_impl<Other,Dim,HDim, HDim,HDim>
+{
+  typedef Transform<typename Other::Scalar,Dim> TransformType;
+  typedef typename TransformType::MatrixType MatrixType;
+  typedef typename ProductReturnType<MatrixType,Other>::Type ResultType;
+  static ResultType run(const TransformType& tr, const Other& other)
+  { return tr.matrix() * other; }
+};
+
+template<typename Other, int Dim, int HDim>
+struct ei_transform_product_impl<Other,Dim,HDim, Dim,Dim>
+{
+  typedef Transform<typename Other::Scalar,Dim> TransformType;
+  typedef typename TransformType::MatrixType MatrixType;
+  typedef TransformType ResultType;
+  static ResultType run(const TransformType& tr, const Other& other)
+  {
+    TransformType res;
+    res.translation() = tr.translation();
+    res.matrix().row(Dim) = tr.matrix().row(Dim);
+    res.linear() = (tr.linear() * other).lazy();
+    return res;
+  }
+};
+
+template<typename Other, int Dim, int HDim>
+struct ei_transform_product_impl<Other,Dim,HDim, HDim,1>
+{
+  typedef Transform<typename Other::Scalar,Dim> TransformType;
+  typedef typename TransformType::MatrixType MatrixType;
+  typedef typename ProductReturnType<MatrixType,Other>::Type ResultType;
+  static ResultType run(const TransformType& tr, const Other& other)
+  { return tr.matrix() * other; }
+};
+
+template<typename Other, int Dim, int HDim>
+struct ei_transform_product_impl<Other,Dim,HDim, Dim,1>
+{
+  typedef typename Other::Scalar Scalar;
+  typedef Transform<Scalar,Dim> TransformType;
+  typedef Matrix<Scalar,Dim,1> ResultType;
+  static ResultType run(const TransformType& tr, const Other& other)
+  { return ((tr.linear() * other) + tr.translation())
+          * (Scalar(1) / ( (tr.matrix().template block<1,Dim>(Dim,0) * other).coeff(0) + tr.matrix().coeff(Dim,Dim))); }
+};
+
+} // end namespace Eigen
diff --git a/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Translation.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Translation.h
new file mode 100644
index 000000000..0fb9a9f9a
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Geometry/Translation.h
@@ -0,0 +1,184 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Translation
+  *
+  * \brief Represents a translation transformation
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients.
+  * \param _Dim the  dimension of the space, can be a compile time value or Dynamic
+  *
+  * \note This class is not aimed to be used to store a translation transformation,
+  * but rather to make easier the constructions and updates of Transform objects.
+  *
+  * \sa class Scaling, class Transform
+  */
+template<typename _Scalar, int _Dim>
+class Translation
+{
+public:
+  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim)
+  /** dimension of the space */
+  enum { Dim = _Dim };
+  /** the scalar type of the coefficients */
+  typedef _Scalar Scalar;
+  /** corresponding vector type */
+  typedef Matrix<Scalar,Dim,1> VectorType;
+  /** corresponding linear transformation matrix type */
+  typedef Matrix<Scalar,Dim,Dim> LinearMatrixType;
+  /** corresponding scaling transformation type */
+  typedef Scaling<Scalar,Dim> ScalingType;
+  /** corresponding affine transformation type */
+  typedef Transform<Scalar,Dim> TransformType;
+
+protected:
+
+  VectorType m_coeffs;
+
+public:
+
+  /** Default constructor without initialization. */
+  Translation() {}
+  /**  */
+  inline Translation(const Scalar& sx, const Scalar& sy)
+  {
+    ei_assert(Dim==2);
+    m_coeffs.x() = sx;
+    m_coeffs.y() = sy;
+  }
+  /**  */
+  inline Translation(const Scalar& sx, const Scalar& sy, const Scalar& sz)
+  {
+    ei_assert(Dim==3);
+    m_coeffs.x() = sx;
+    m_coeffs.y() = sy;
+    m_coeffs.z() = sz;
+  }
+  /** Constructs and initialize the scaling transformation from a vector of scaling coefficients */
+  explicit inline Translation(const VectorType& vector) : m_coeffs(vector) {}
+
+  const VectorType& vector() const { return m_coeffs; }
+  VectorType& vector() { return m_coeffs; }
+
+  /** Concatenates two translation */
+  inline Translation operator* (const Translation& other) const
+  { return Translation(m_coeffs + other.m_coeffs); }
+
+  /** Concatenates a translation and a scaling */
+  inline TransformType operator* (const ScalingType& other) const;
+
+  /** Concatenates a translation and a linear transformation */
+  inline TransformType operator* (const LinearMatrixType& linear) const;
+
+  template<typename Derived>
+  inline TransformType operator*(const RotationBase<Derived,Dim>& r) const
+  { return *this * r.toRotationMatrix(); }
+
+  /** Concatenates a linear transformation and a translation */
+  // its a nightmare to define a templated friend function outside its declaration
+  friend inline TransformType operator* (const LinearMatrixType& linear, const Translation& t)
+  {
+    TransformType res;
+    res.matrix().setZero();
+    res.linear() = linear;
+    res.translation() = linear * t.m_coeffs;
+    res.matrix().row(Dim).setZero();
+    res(Dim,Dim) = Scalar(1);
+    return res;
+  }
+
+  /** Concatenates a translation and an affine transformation */
+  inline TransformType operator* (const TransformType& t) const;
+
+  /** Applies translation to vector */
+  inline VectorType operator* (const VectorType& other) const
+  { return m_coeffs + other; }
+
+  /** \returns the inverse translation (opposite) */
+  Translation inverse() const { return Translation(-m_coeffs); }
+
+  Translation& operator=(const Translation& other)
+  {
+    m_coeffs = other.m_coeffs;
+    return *this;
+  }
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<Translation,Translation<NewScalarType,Dim> >::type cast() const
+  { return typename internal::cast_return_type<Translation,Translation<NewScalarType,Dim> >::type(*this); }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit Translation(const Translation<OtherScalarType,Dim>& other)
+  { m_coeffs = other.vector().template cast<Scalar>(); }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const Translation& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
+  { return m_coeffs.isApprox(other.m_coeffs, prec); }
+
+};
+
+/** \addtogroup Geometry_Module */
+//@{
+typedef Translation<float, 2> Translation2f;
+typedef Translation<double,2> Translation2d;
+typedef Translation<float, 3> Translation3f;
+typedef Translation<double,3> Translation3d;
+//@}
+
+
+template<typename Scalar, int Dim>
+inline typename Translation<Scalar,Dim>::TransformType
+Translation<Scalar,Dim>::operator* (const ScalingType& other) const
+{
+  TransformType res;
+  res.matrix().setZero();
+  res.linear().diagonal() = other.coeffs();
+  res.translation() = m_coeffs;
+  res(Dim,Dim) = Scalar(1);
+  return res;
+}
+
+template<typename Scalar, int Dim>
+inline typename Translation<Scalar,Dim>::TransformType
+Translation<Scalar,Dim>::operator* (const LinearMatrixType& linear) const
+{
+  TransformType res;
+  res.matrix().setZero();
+  res.linear() = linear;
+  res.translation() = m_coeffs;
+  res.matrix().row(Dim).setZero();
+  res(Dim,Dim) = Scalar(1);
+  return res;
+}
+
+template<typename Scalar, int Dim>
+inline typename Translation<Scalar,Dim>::TransformType
+Translation<Scalar,Dim>::operator* (const TransformType& t) const
+{
+  TransformType res = t;
+  res.pretranslate(m_coeffs);
+  return res;
+}
+
+} // end namespace Eigen
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/LU.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/LU.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigen2Support/LU.h
rename to resources/3rdParty/eigen/Eigen/src/Eigen2Support/LU.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Lazy.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Lazy.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigen2Support/Lazy.h
rename to resources/3rdParty/eigen/Eigen/src/Eigen2Support/Lazy.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Eigen2Support/LeastSquares.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/LeastSquares.h
new file mode 100644
index 000000000..7aff428dc
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/LeastSquares.h
@@ -0,0 +1,170 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN2_LEASTSQUARES_H
+#define EIGEN2_LEASTSQUARES_H
+
+namespace Eigen { 
+
+/** \ingroup LeastSquares_Module
+  *
+  * \leastsquares_module
+  *
+  * For a set of points, this function tries to express
+  * one of the coords as a linear (affine) function of the other coords.
+  *
+  * This is best explained by an example. This function works in full
+  * generality, for points in a space of arbitrary dimension, and also over
+  * the complex numbers, but for this example we will work in dimension 3
+  * over the real numbers (doubles).
+  *
+  * So let us work with the following set of 5 points given by their
+  * \f$(x,y,z)\f$ coordinates:
+  * @code
+    Vector3d points[5];
+    points[0] = Vector3d( 3.02, 6.89, -4.32 );
+    points[1] = Vector3d( 2.01, 5.39, -3.79 );
+    points[2] = Vector3d( 2.41, 6.01, -4.01 );
+    points[3] = Vector3d( 2.09, 5.55, -3.86 );
+    points[4] = Vector3d( 2.58, 6.32, -4.10 );
+  * @endcode
+  * Suppose that we want to express the second coordinate (\f$y\f$) as a linear
+  * expression in \f$x\f$ and \f$z\f$, that is,
+  * \f[ y=ax+bz+c \f]
+  * for some constants \f$a,b,c\f$. Thus, we want to find the best possible
+  * constants \f$a,b,c\f$ so that the plane of equation \f$y=ax+bz+c\f$ fits
+  * best the five above points. To do that, call this function as follows:
+  * @code
+    Vector3d coeffs; // will store the coefficients a, b, c
+    linearRegression(
+      5,
+      &points,
+      &coeffs,
+      1 // the coord to express as a function of
+        // the other ones. 0 means x, 1 means y, 2 means z.
+    );
+  * @endcode
+  * Now the vector \a coeffs is approximately
+  * \f$( 0.495 ,  -1.927 ,  -2.906 )\f$.
+  * Thus, we get \f$a=0.495, b = -1.927, c = -2.906\f$. Let us check for
+  * instance how near points[0] is from the plane of equation \f$y=ax+bz+c\f$.
+  * Looking at the coords of points[0], we see that:
+  * \f[ax+bz+c = 0.495 * 3.02 + (-1.927) * (-4.32) + (-2.906) = 6.91.\f]
+  * On the other hand, we have \f$y=6.89\f$. We see that the values
+  * \f$6.91\f$ and \f$6.89\f$
+  * are near, so points[0] is very near the plane of equation \f$y=ax+bz+c\f$.
+  *
+  * Let's now describe precisely the parameters:
+  * @param numPoints the number of points
+  * @param points the array of pointers to the points on which to perform the linear regression
+  * @param result pointer to the vector in which to store the result.
+                  This vector must be of the same type and size as the
+                  data points. The meaning of its coords is as follows.
+                  For brevity, let \f$n=Size\f$,
+                  \f$r_i=result[i]\f$,
+                  and \f$f=funcOfOthers\f$. Denote by
+                  \f$x_0,\ldots,x_{n-1}\f$
+                  the n coordinates in the n-dimensional space.
+                  Then the resulting equation is:
+                  \f[ x_f = r_0 x_0 + \cdots + r_{f-1}x_{f-1}
+                   + r_{f+1}x_{f+1} + \cdots + r_{n-1}x_{n-1} + r_n. \f]
+  * @param funcOfOthers Determines which coord to express as a function of the
+                        others. Coords are numbered starting from 0, so that a
+                        value of 0 means \f$x\f$, 1 means \f$y\f$,
+                        2 means \f$z\f$, ...
+  *
+  * \sa fitHyperplane()
+  */
+template<typename VectorType>
+void linearRegression(int numPoints,
+                      VectorType **points,
+                      VectorType *result,
+                      int funcOfOthers )
+{
+  typedef typename VectorType::Scalar Scalar;
+  typedef Hyperplane<Scalar, VectorType::SizeAtCompileTime> HyperplaneType;
+  const int size = points[0]->size();
+  result->resize(size);
+  HyperplaneType h(size);
+  fitHyperplane(numPoints, points, &h);
+  for(int i = 0; i < funcOfOthers; i++)
+    result->coeffRef(i) = - h.coeffs()[i] / h.coeffs()[funcOfOthers];
+  for(int i = funcOfOthers; i < size; i++)
+    result->coeffRef(i) = - h.coeffs()[i+1] / h.coeffs()[funcOfOthers];
+}
+
+/** \ingroup LeastSquares_Module
+  *
+  * \leastsquares_module
+  *
+  * This function is quite similar to linearRegression(), so we refer to the
+  * documentation of this function and only list here the differences.
+  *
+  * The main difference from linearRegression() is that this function doesn't
+  * take a \a funcOfOthers argument. Instead, it finds a general equation
+  * of the form
+  * \f[ r_0 x_0 + \cdots + r_{n-1}x_{n-1} + r_n = 0, \f]
+  * where \f$n=Size\f$, \f$r_i=retCoefficients[i]\f$, and we denote by
+  * \f$x_0,\ldots,x_{n-1}\f$ the n coordinates in the n-dimensional space.
+  *
+  * Thus, the vector \a retCoefficients has size \f$n+1\f$, which is another
+  * difference from linearRegression().
+  *
+  * In practice, this function performs an hyper-plane fit in a total least square sense
+  * via the following steps:
+  *  1 - center the data to the mean
+  *  2 - compute the covariance matrix
+  *  3 - pick the eigenvector corresponding to the smallest eigenvalue of the covariance matrix
+  * The ratio of the smallest eigenvalue and the second one gives us a hint about the relevance
+  * of the solution. This value is optionally returned in \a soundness.
+  *
+  * \sa linearRegression()
+  */
+template<typename VectorType, typename HyperplaneType>
+void fitHyperplane(int numPoints,
+                   VectorType **points,
+                   HyperplaneType *result,
+                   typename NumTraits<typename VectorType::Scalar>::Real* soundness = 0)
+{
+  typedef typename VectorType::Scalar Scalar;
+  typedef Matrix<Scalar,VectorType::SizeAtCompileTime,VectorType::SizeAtCompileTime> CovMatrixType;
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorType)
+  ei_assert(numPoints >= 1);
+  int size = points[0]->size();
+  ei_assert(size+1 == result->coeffs().size());
+
+  // compute the mean of the data
+  VectorType mean = VectorType::Zero(size);
+  for(int i = 0; i < numPoints; ++i)
+    mean += *(points[i]);
+  mean /= numPoints;
+
+  // compute the covariance matrix
+  CovMatrixType covMat = CovMatrixType::Zero(size, size);
+  VectorType remean = VectorType::Zero(size);
+  for(int i = 0; i < numPoints; ++i)
+  {
+    VectorType diff = (*(points[i]) - mean).conjugate();
+    covMat += diff * diff.adjoint();
+  }
+
+  // now we just have to pick the eigen vector with smallest eigen value
+  SelfAdjointEigenSolver<CovMatrixType> eig(covMat);
+  result->normal() = eig.eigenvectors().col(0);
+  if (soundness)
+    *soundness = eig.eigenvalues().coeff(0)/eig.eigenvalues().coeff(1);
+
+  // let's compute the constant coefficient such that the
+  // plane pass trough the mean point:
+  result->offset() = - (result->normal().cwise()* mean).sum();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN2_LEASTSQUARES_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Macros.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Macros.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigen2Support/Macros.h
rename to resources/3rdParty/eigen/Eigen/src/Eigen2Support/Macros.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/MathFunctions.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/MathFunctions.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigen2Support/MathFunctions.h
rename to resources/3rdParty/eigen/Eigen/src/Eigen2Support/MathFunctions.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Memory.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Memory.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigen2Support/Memory.h
rename to resources/3rdParty/eigen/Eigen/src/Eigen2Support/Memory.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Meta.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Meta.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigen2Support/Meta.h
rename to resources/3rdParty/eigen/Eigen/src/Eigen2Support/Meta.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Minor.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/Minor.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigen2Support/Minor.h
rename to resources/3rdParty/eigen/Eigen/src/Eigen2Support/Minor.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/QR.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/QR.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigen2Support/QR.h
rename to resources/3rdParty/eigen/Eigen/src/Eigen2Support/QR.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Eigen2Support/SVD.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/SVD.h
new file mode 100644
index 000000000..3d2eeb445
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/SVD.h
@@ -0,0 +1,638 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN2_SVD_H
+#define EIGEN2_SVD_H
+
+namespace Eigen {
+
+/** \ingroup SVD_Module
+  * \nonstableyet
+  *
+  * \class SVD
+  *
+  * \brief Standard SVD decomposition of a matrix and associated features
+  *
+  * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
+  *
+  * This class performs a standard SVD decomposition of a real matrix A of size \c M x \c N
+  * with \c M \>= \c N.
+  *
+  *
+  * \sa MatrixBase::SVD()
+  */
+template<typename MatrixType> class SVD
+{
+  private:
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+
+    enum {
+      PacketSize = internal::packet_traits<Scalar>::size,
+      AlignmentMask = int(PacketSize)-1,
+      MinSize = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime)
+    };
+
+    typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> ColVector;
+    typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, 1> RowVector;
+
+    typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, MinSize> MatrixUType;
+    typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, MatrixType::ColsAtCompileTime> MatrixVType;
+    typedef Matrix<Scalar, MinSize, 1> SingularValuesType;
+
+  public:
+
+    SVD() {} // a user who relied on compiler-generated default compiler reported problems with MSVC in 2.0.7
+    
+    SVD(const MatrixType& matrix)
+      : m_matU(matrix.rows(), (std::min)(matrix.rows(), matrix.cols())),
+        m_matV(matrix.cols(),matrix.cols()),
+        m_sigma((std::min)(matrix.rows(),matrix.cols()))
+    {
+      compute(matrix);
+    }
+
+    template<typename OtherDerived, typename ResultType>
+    bool solve(const MatrixBase<OtherDerived> &b, ResultType* result) const;
+
+    const MatrixUType& matrixU() const { return m_matU; }
+    const SingularValuesType& singularValues() const { return m_sigma; }
+    const MatrixVType& matrixV() const { return m_matV; }
+
+    void compute(const MatrixType& matrix);
+    SVD& sort();
+
+    template<typename UnitaryType, typename PositiveType>
+    void computeUnitaryPositive(UnitaryType *unitary, PositiveType *positive) const;
+    template<typename PositiveType, typename UnitaryType>
+    void computePositiveUnitary(PositiveType *positive, UnitaryType *unitary) const;
+    template<typename RotationType, typename ScalingType>
+    void computeRotationScaling(RotationType *unitary, ScalingType *positive) const;
+    template<typename ScalingType, typename RotationType>
+    void computeScalingRotation(ScalingType *positive, RotationType *unitary) const;
+
+  protected:
+    /** \internal */
+    MatrixUType m_matU;
+    /** \internal */
+    MatrixVType m_matV;
+    /** \internal */
+    SingularValuesType m_sigma;
+};
+
+/** Computes / recomputes the SVD decomposition A = U S V^* of \a matrix
+  *
+  * \note this code has been adapted from JAMA (public domain)
+  */
+template<typename MatrixType>
+void SVD<MatrixType>::compute(const MatrixType& matrix)
+{
+  const int m = matrix.rows();
+  const int n = matrix.cols();
+  const int nu = (std::min)(m,n);
+  ei_assert(m>=n && "In Eigen 2.0, SVD only works for MxN matrices with M>=N. Sorry!");
+  ei_assert(m>1 && "In Eigen 2.0, SVD doesn't work on 1x1 matrices");
+
+  m_matU.resize(m, nu);
+  m_matU.setZero();
+  m_sigma.resize((std::min)(m,n));
+  m_matV.resize(n,n);
+
+  RowVector e(n);
+  ColVector work(m);
+  MatrixType matA(matrix);
+  const bool wantu = true;
+  const bool wantv = true;
+  int i=0, j=0, k=0;
+
+  // Reduce A to bidiagonal form, storing the diagonal elements
+  // in s and the super-diagonal elements in e.
+  int nct = (std::min)(m-1,n);
+  int nrt = (std::max)(0,(std::min)(n-2,m));
+  for (k = 0; k < (std::max)(nct,nrt); ++k)
+  {
+    if (k < nct)
+    {
+      // Compute the transformation for the k-th column and
+      // place the k-th diagonal in m_sigma[k].
+      m_sigma[k] = matA.col(k).end(m-k).norm();
+      if (m_sigma[k] != 0.0) // FIXME
+      {
+        if (matA(k,k) < 0.0)
+          m_sigma[k] = -m_sigma[k];
+        matA.col(k).end(m-k) /= m_sigma[k];
+        matA(k,k) += 1.0;
+      }
+      m_sigma[k] = -m_sigma[k];
+    }
+
+    for (j = k+1; j < n; ++j)
+    {
+      if ((k < nct) && (m_sigma[k] != 0.0))
+      {
+        // Apply the transformation.
+        Scalar t = matA.col(k).end(m-k).eigen2_dot(matA.col(j).end(m-k)); // FIXME dot product or cwise prod + .sum() ??
+        t = -t/matA(k,k);
+        matA.col(j).end(m-k) += t * matA.col(k).end(m-k);
+      }
+
+      // Place the k-th row of A into e for the
+      // subsequent calculation of the row transformation.
+      e[j] = matA(k,j);
+    }
+
+    // Place the transformation in U for subsequent back multiplication.
+    if (wantu & (k < nct))
+      m_matU.col(k).end(m-k) = matA.col(k).end(m-k);
+
+    if (k < nrt)
+    {
+      // Compute the k-th row transformation and place the
+      // k-th super-diagonal in e[k].
+      e[k] = e.end(n-k-1).norm();
+      if (e[k] != 0.0)
+      {
+          if (e[k+1] < 0.0)
+            e[k] = -e[k];
+          e.end(n-k-1) /= e[k];
+          e[k+1] += 1.0;
+      }
+      e[k] = -e[k];
+      if ((k+1 < m) & (e[k] != 0.0))
+      {
+        // Apply the transformation.
+        work.end(m-k-1) = matA.corner(BottomRight,m-k-1,n-k-1) * e.end(n-k-1);
+        for (j = k+1; j < n; ++j)
+          matA.col(j).end(m-k-1) += (-e[j]/e[k+1]) * work.end(m-k-1);
+      }
+
+      // Place the transformation in V for subsequent back multiplication.
+      if (wantv)
+        m_matV.col(k).end(n-k-1) = e.end(n-k-1);
+    }
+  }
+
+
+  // Set up the final bidiagonal matrix or order p.
+  int p = (std::min)(n,m+1);
+  if (nct < n)
+    m_sigma[nct] = matA(nct,nct);
+  if (m < p)
+    m_sigma[p-1] = 0.0;
+  if (nrt+1 < p)
+    e[nrt] = matA(nrt,p-1);
+  e[p-1] = 0.0;
+
+  // If required, generate U.
+  if (wantu)
+  {
+    for (j = nct; j < nu; ++j)
+    {
+      m_matU.col(j).setZero();
+      m_matU(j,j) = 1.0;
+    }
+    for (k = nct-1; k >= 0; k--)
+    {
+      if (m_sigma[k] != 0.0)
+      {
+        for (j = k+1; j < nu; ++j)
+        {
+          Scalar t = m_matU.col(k).end(m-k).eigen2_dot(m_matU.col(j).end(m-k)); // FIXME is it really a dot product we want ?
+          t = -t/m_matU(k,k);
+          m_matU.col(j).end(m-k) += t * m_matU.col(k).end(m-k);
+        }
+        m_matU.col(k).end(m-k) = - m_matU.col(k).end(m-k);
+        m_matU(k,k) = Scalar(1) + m_matU(k,k);
+        if (k-1>0)
+          m_matU.col(k).start(k-1).setZero();
+      }
+      else
+      {
+        m_matU.col(k).setZero();
+        m_matU(k,k) = 1.0;
+      }
+    }
+  }
+
+  // If required, generate V.
+  if (wantv)
+  {
+    for (k = n-1; k >= 0; k--)
+    {
+      if ((k < nrt) & (e[k] != 0.0))
+      {
+        for (j = k+1; j < nu; ++j)
+        {
+          Scalar t = m_matV.col(k).end(n-k-1).eigen2_dot(m_matV.col(j).end(n-k-1)); // FIXME is it really a dot product we want ?
+          t = -t/m_matV(k+1,k);
+          m_matV.col(j).end(n-k-1) += t * m_matV.col(k).end(n-k-1);
+        }
+      }
+      m_matV.col(k).setZero();
+      m_matV(k,k) = 1.0;
+    }
+  }
+
+  // Main iteration loop for the singular values.
+  int pp = p-1;
+  int iter = 0;
+  Scalar eps = ei_pow(Scalar(2),ei_is_same_type<Scalar,float>::ret ? Scalar(-23) : Scalar(-52));
+  while (p > 0)
+  {
+    int k=0;
+    int kase=0;
+
+    // Here is where a test for too many iterations would go.
+
+    // This section of the program inspects for
+    // negligible elements in the s and e arrays.  On
+    // completion the variables kase and k are set as follows.
+
+    // kase = 1     if s(p) and e[k-1] are negligible and k<p
+    // kase = 2     if s(k) is negligible and k<p
+    // kase = 3     if e[k-1] is negligible, k<p, and
+    //              s(k), ..., s(p) are not negligible (qr step).
+    // kase = 4     if e(p-1) is negligible (convergence).
+
+    for (k = p-2; k >= -1; --k)
+    {
+      if (k == -1)
+          break;
+      if (ei_abs(e[k]) <= eps*(ei_abs(m_sigma[k]) + ei_abs(m_sigma[k+1])))
+      {
+          e[k] = 0.0;
+          break;
+      }
+    }
+    if (k == p-2)
+    {
+      kase = 4;
+    }
+    else
+    {
+      int ks;
+      for (ks = p-1; ks >= k; --ks)
+      {
+        if (ks == k)
+          break;
+        Scalar t = (ks != p ? ei_abs(e[ks]) : Scalar(0)) + (ks != k+1 ? ei_abs(e[ks-1]) : Scalar(0));
+        if (ei_abs(m_sigma[ks]) <= eps*t)
+        {
+          m_sigma[ks] = 0.0;
+          break;
+        }
+      }
+      if (ks == k)
+      {
+        kase = 3;
+      }
+      else if (ks == p-1)
+      {
+        kase = 1;
+      }
+      else
+      {
+        kase = 2;
+        k = ks;
+      }
+    }
+    ++k;
+
+    // Perform the task indicated by kase.
+    switch (kase)
+    {
+
+      // Deflate negligible s(p).
+      case 1:
+      {
+        Scalar f(e[p-2]);
+        e[p-2] = 0.0;
+        for (j = p-2; j >= k; --j)
+        {
+          Scalar t(internal::hypot(m_sigma[j],f));
+          Scalar cs(m_sigma[j]/t);
+          Scalar sn(f/t);
+          m_sigma[j] = t;
+          if (j != k)
+          {
+            f = -sn*e[j-1];
+            e[j-1] = cs*e[j-1];
+          }
+          if (wantv)
+          {
+            for (i = 0; i < n; ++i)
+            {
+              t = cs*m_matV(i,j) + sn*m_matV(i,p-1);
+              m_matV(i,p-1) = -sn*m_matV(i,j) + cs*m_matV(i,p-1);
+              m_matV(i,j) = t;
+            }
+          }
+        }
+      }
+      break;
+
+      // Split at negligible s(k).
+      case 2:
+      {
+        Scalar f(e[k-1]);
+        e[k-1] = 0.0;
+        for (j = k; j < p; ++j)
+        {
+          Scalar t(internal::hypot(m_sigma[j],f));
+          Scalar cs( m_sigma[j]/t);
+          Scalar sn(f/t);
+          m_sigma[j] = t;
+          f = -sn*e[j];
+          e[j] = cs*e[j];
+          if (wantu)
+          {
+            for (i = 0; i < m; ++i)
+            {
+              t = cs*m_matU(i,j) + sn*m_matU(i,k-1);
+              m_matU(i,k-1) = -sn*m_matU(i,j) + cs*m_matU(i,k-1);
+              m_matU(i,j) = t;
+            }
+          }
+        }
+      }
+      break;
+
+      // Perform one qr step.
+      case 3:
+      {
+        // Calculate the shift.
+        Scalar scale = (std::max)((std::max)((std::max)((std::max)(
+                        ei_abs(m_sigma[p-1]),ei_abs(m_sigma[p-2])),ei_abs(e[p-2])),
+                        ei_abs(m_sigma[k])),ei_abs(e[k]));
+        Scalar sp = m_sigma[p-1]/scale;
+        Scalar spm1 = m_sigma[p-2]/scale;
+        Scalar epm1 = e[p-2]/scale;
+        Scalar sk = m_sigma[k]/scale;
+        Scalar ek = e[k]/scale;
+        Scalar b = ((spm1 + sp)*(spm1 - sp) + epm1*epm1)/Scalar(2);
+        Scalar c = (sp*epm1)*(sp*epm1);
+        Scalar shift(0);
+        if ((b != 0.0) || (c != 0.0))
+        {
+          shift = ei_sqrt(b*b + c);
+          if (b < 0.0)
+            shift = -shift;
+          shift = c/(b + shift);
+        }
+        Scalar f = (sk + sp)*(sk - sp) + shift;
+        Scalar g = sk*ek;
+
+        // Chase zeros.
+
+        for (j = k; j < p-1; ++j)
+        {
+          Scalar t = internal::hypot(f,g);
+          Scalar cs = f/t;
+          Scalar sn = g/t;
+          if (j != k)
+            e[j-1] = t;
+          f = cs*m_sigma[j] + sn*e[j];
+          e[j] = cs*e[j] - sn*m_sigma[j];
+          g = sn*m_sigma[j+1];
+          m_sigma[j+1] = cs*m_sigma[j+1];
+          if (wantv)
+          {
+            for (i = 0; i < n; ++i)
+            {
+              t = cs*m_matV(i,j) + sn*m_matV(i,j+1);
+              m_matV(i,j+1) = -sn*m_matV(i,j) + cs*m_matV(i,j+1);
+              m_matV(i,j) = t;
+            }
+          }
+          t = internal::hypot(f,g);
+          cs = f/t;
+          sn = g/t;
+          m_sigma[j] = t;
+          f = cs*e[j] + sn*m_sigma[j+1];
+          m_sigma[j+1] = -sn*e[j] + cs*m_sigma[j+1];
+          g = sn*e[j+1];
+          e[j+1] = cs*e[j+1];
+          if (wantu && (j < m-1))
+          {
+            for (i = 0; i < m; ++i)
+            {
+              t = cs*m_matU(i,j) + sn*m_matU(i,j+1);
+              m_matU(i,j+1) = -sn*m_matU(i,j) + cs*m_matU(i,j+1);
+              m_matU(i,j) = t;
+            }
+          }
+        }
+        e[p-2] = f;
+        iter = iter + 1;
+      }
+      break;
+
+      // Convergence.
+      case 4:
+      {
+        // Make the singular values positive.
+        if (m_sigma[k] <= 0.0)
+        {
+          m_sigma[k] = m_sigma[k] < Scalar(0) ? -m_sigma[k] : Scalar(0);
+          if (wantv)
+            m_matV.col(k).start(pp+1) = -m_matV.col(k).start(pp+1);
+        }
+
+        // Order the singular values.
+        while (k < pp)
+        {
+          if (m_sigma[k] >= m_sigma[k+1])
+            break;
+          Scalar t = m_sigma[k];
+          m_sigma[k] = m_sigma[k+1];
+          m_sigma[k+1] = t;
+          if (wantv && (k < n-1))
+            m_matV.col(k).swap(m_matV.col(k+1));
+          if (wantu && (k < m-1))
+            m_matU.col(k).swap(m_matU.col(k+1));
+          ++k;
+        }
+        iter = 0;
+        p--;
+      }
+      break;
+    } // end big switch
+  } // end iterations
+}
+
+template<typename MatrixType>
+SVD<MatrixType>& SVD<MatrixType>::sort()
+{
+  int mu = m_matU.rows();
+  int mv = m_matV.rows();
+  int n  = m_matU.cols();
+
+  for (int i=0; i<n; ++i)
+  {
+    int  k = i;
+    Scalar p = m_sigma.coeff(i);
+
+    for (int j=i+1; j<n; ++j)
+    {
+      if (m_sigma.coeff(j) > p)
+      {
+        k = j;
+        p = m_sigma.coeff(j);
+      }
+    }
+    if (k != i)
+    {
+      m_sigma.coeffRef(k) = m_sigma.coeff(i);  // i.e.
+      m_sigma.coeffRef(i) = p;                 // swaps the i-th and the k-th elements
+
+      int j = mu;
+      for(int s=0; j!=0; ++s, --j)
+        std::swap(m_matU.coeffRef(s,i), m_matU.coeffRef(s,k));
+
+      j = mv;
+      for (int s=0; j!=0; ++s, --j)
+        std::swap(m_matV.coeffRef(s,i), m_matV.coeffRef(s,k));
+    }
+  }
+  return *this;
+}
+
+/** \returns the solution of \f$ A x = b \f$ using the current SVD decomposition of A.
+  * The parts of the solution corresponding to zero singular values are ignored.
+  *
+  * \sa MatrixBase::svd(), LU::solve(), LLT::solve()
+  */
+template<typename MatrixType>
+template<typename OtherDerived, typename ResultType>
+bool SVD<MatrixType>::solve(const MatrixBase<OtherDerived> &b, ResultType* result) const
+{
+  const int rows = m_matU.rows();
+  ei_assert(b.rows() == rows);
+
+  Scalar maxVal = m_sigma.cwise().abs().maxCoeff();
+  for (int j=0; j<b.cols(); ++j)
+  {
+    Matrix<Scalar,MatrixUType::RowsAtCompileTime,1> aux = m_matU.transpose() * b.col(j);
+
+    for (int i = 0; i <m_matU.cols(); ++i)
+    {
+      Scalar si = m_sigma.coeff(i);
+      if (ei_isMuchSmallerThan(ei_abs(si),maxVal))
+        aux.coeffRef(i) = 0;
+      else
+        aux.coeffRef(i) /= si;
+    }
+
+    result->col(j) = m_matV * aux;
+  }
+  return true;
+}
+
+/** Computes the polar decomposition of the matrix, as a product unitary x positive.
+  *
+  * If either pointer is zero, the corresponding computation is skipped.
+  *
+  * Only for square matrices.
+  *
+  * \sa computePositiveUnitary(), computeRotationScaling()
+  */
+template<typename MatrixType>
+template<typename UnitaryType, typename PositiveType>
+void SVD<MatrixType>::computeUnitaryPositive(UnitaryType *unitary,
+                                             PositiveType *positive) const
+{
+  ei_assert(m_matU.cols() == m_matV.cols() && "Polar decomposition is only for square matrices");
+  if(unitary) *unitary = m_matU * m_matV.adjoint();
+  if(positive) *positive = m_matV * m_sigma.asDiagonal() * m_matV.adjoint();
+}
+
+/** Computes the polar decomposition of the matrix, as a product positive x unitary.
+  *
+  * If either pointer is zero, the corresponding computation is skipped.
+  *
+  * Only for square matrices.
+  *
+  * \sa computeUnitaryPositive(), computeRotationScaling()
+  */
+template<typename MatrixType>
+template<typename UnitaryType, typename PositiveType>
+void SVD<MatrixType>::computePositiveUnitary(UnitaryType *positive,
+                                             PositiveType *unitary) const
+{
+  ei_assert(m_matU.rows() == m_matV.rows() && "Polar decomposition is only for square matrices");
+  if(unitary) *unitary = m_matU * m_matV.adjoint();
+  if(positive) *positive = m_matU * m_sigma.asDiagonal() * m_matU.adjoint();
+}
+
+/** decomposes the matrix as a product rotation x scaling, the scaling being
+  * not necessarily positive.
+  *
+  * If either pointer is zero, the corresponding computation is skipped.
+  *
+  * This method requires the Geometry module.
+  *
+  * \sa computeScalingRotation(), computeUnitaryPositive()
+  */
+template<typename MatrixType>
+template<typename RotationType, typename ScalingType>
+void SVD<MatrixType>::computeRotationScaling(RotationType *rotation, ScalingType *scaling) const
+{
+  ei_assert(m_matU.rows() == m_matV.rows() && "Polar decomposition is only for square matrices");
+  Scalar x = (m_matU * m_matV.adjoint()).determinant(); // so x has absolute value 1
+  Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> sv(m_sigma);
+  sv.coeffRef(0) *= x;
+  if(scaling) scaling->lazyAssign(m_matV * sv.asDiagonal() * m_matV.adjoint());
+  if(rotation)
+  {
+    MatrixType m(m_matU);
+    m.col(0) /= x;
+    rotation->lazyAssign(m * m_matV.adjoint());
+  }
+}
+
+/** decomposes the matrix as a product scaling x rotation, the scaling being
+  * not necessarily positive.
+  *
+  * If either pointer is zero, the corresponding computation is skipped.
+  *
+  * This method requires the Geometry module.
+  *
+  * \sa computeRotationScaling(), computeUnitaryPositive()
+  */
+template<typename MatrixType>
+template<typename ScalingType, typename RotationType>
+void SVD<MatrixType>::computeScalingRotation(ScalingType *scaling, RotationType *rotation) const
+{
+  ei_assert(m_matU.rows() == m_matV.rows() && "Polar decomposition is only for square matrices");
+  Scalar x = (m_matU * m_matV.adjoint()).determinant(); // so x has absolute value 1
+  Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> sv(m_sigma);
+  sv.coeffRef(0) *= x;
+  if(scaling) scaling->lazyAssign(m_matU * sv.asDiagonal() * m_matU.adjoint());
+  if(rotation)
+  {
+    MatrixType m(m_matU);
+    m.col(0) /= x;
+    rotation->lazyAssign(m * m_matV.adjoint());
+  }
+}
+
+
+/** \svd_module
+  * \returns the SVD decomposition of \c *this
+  */
+template<typename Derived>
+inline SVD<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::svd() const
+{
+  return SVD<PlainObject>(derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN2_SVD_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/TriangularSolver.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/TriangularSolver.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigen2Support/TriangularSolver.h
rename to resources/3rdParty/eigen/Eigen/src/Eigen2Support/TriangularSolver.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/VectorBlock.h b/resources/3rdParty/eigen/Eigen/src/Eigen2Support/VectorBlock.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigen2Support/VectorBlock.h
rename to resources/3rdParty/eigen/Eigen/src/Eigen2Support/VectorBlock.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigenvalues/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/Eigenvalues/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigenvalues/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/Eigenvalues/CMakeLists.txt
diff --git a/resources/3rdParty/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h b/resources/3rdParty/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h
new file mode 100644
index 000000000..c4b8a308c
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h
@@ -0,0 +1,319 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Claire Maurice
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_COMPLEX_EIGEN_SOLVER_H
+#define EIGEN_COMPLEX_EIGEN_SOLVER_H
+
+#include "./ComplexSchur.h"
+
+namespace Eigen { 
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+  *
+  *
+  * \class ComplexEigenSolver
+  *
+  * \brief Computes eigenvalues and eigenvectors of general complex matrices
+  *
+  * \tparam _MatrixType the type of the matrix of which we are
+  * computing the eigendecomposition; this is expected to be an
+  * instantiation of the Matrix class template.
+  *
+  * The eigenvalues and eigenvectors of a matrix \f$ A \f$ are scalars
+  * \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda v
+  * \f$.  If \f$ D \f$ is a diagonal matrix with the eigenvalues on
+  * the diagonal, and \f$ V \f$ is a matrix with the eigenvectors as
+  * its columns, then \f$ A V = V D \f$. The matrix \f$ V \f$ is
+  * almost always invertible, in which case we have \f$ A = V D V^{-1}
+  * \f$. This is called the eigendecomposition.
+  *
+  * The main function in this class is compute(), which computes the
+  * eigenvalues and eigenvectors of a given function. The
+  * documentation for that function contains an example showing the
+  * main features of the class.
+  *
+  * \sa class EigenSolver, class SelfAdjointEigenSolver
+  */
+template<typename _MatrixType> class ComplexEigenSolver
+{
+  public:
+
+    /** \brief Synonym for the template parameter \p _MatrixType. */
+    typedef _MatrixType MatrixType;
+
+    enum {
+      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      Options = MatrixType::Options,
+      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+    };
+
+    /** \brief Scalar type for matrices of type #MatrixType. */
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    typedef typename MatrixType::Index Index;
+
+    /** \brief Complex scalar type for #MatrixType.
+      *
+      * This is \c std::complex<Scalar> if #Scalar is real (e.g.,
+      * \c float or \c double) and just \c Scalar if #Scalar is
+      * complex.
+      */
+    typedef std::complex<RealScalar> ComplexScalar;
+
+    /** \brief Type for vector of eigenvalues as returned by eigenvalues().
+      *
+      * This is a column vector with entries of type #ComplexScalar.
+      * The length of the vector is the size of #MatrixType.
+      */
+    typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options&(~RowMajor), MaxColsAtCompileTime, 1> EigenvalueType;
+
+    /** \brief Type for matrix of eigenvectors as returned by eigenvectors().
+      *
+      * This is a square matrix with entries of type #ComplexScalar.
+      * The size is the same as the size of #MatrixType.
+      */
+    typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorType;
+
+    /** \brief Default constructor.
+      *
+      * The default constructor is useful in cases in which the user intends to
+      * perform decompositions via compute().
+      */
+    ComplexEigenSolver()
+            : m_eivec(),
+              m_eivalues(),
+              m_schur(),
+              m_isInitialized(false),
+              m_eigenvectorsOk(false),
+              m_matX()
+    {}
+
+    /** \brief Default Constructor with memory preallocation
+      *
+      * Like the default constructor but with preallocation of the internal data
+      * according to the specified problem \a size.
+      * \sa ComplexEigenSolver()
+      */
+    ComplexEigenSolver(Index size)
+            : m_eivec(size, size),
+              m_eivalues(size),
+              m_schur(size),
+              m_isInitialized(false),
+              m_eigenvectorsOk(false),
+              m_matX(size, size)
+    {}
+
+    /** \brief Constructor; computes eigendecomposition of given matrix.
+      *
+      * \param[in]  matrix  Square matrix whose eigendecomposition is to be computed.
+      * \param[in]  computeEigenvectors  If true, both the eigenvectors and the
+      *    eigenvalues are computed; if false, only the eigenvalues are
+      *    computed.
+      *
+      * This constructor calls compute() to compute the eigendecomposition.
+      */
+      ComplexEigenSolver(const MatrixType& matrix, bool computeEigenvectors = true)
+            : m_eivec(matrix.rows(),matrix.cols()),
+              m_eivalues(matrix.cols()),
+              m_schur(matrix.rows()),
+              m_isInitialized(false),
+              m_eigenvectorsOk(false),
+              m_matX(matrix.rows(),matrix.cols())
+    {
+      compute(matrix, computeEigenvectors);
+    }
+
+    /** \brief Returns the eigenvectors of given matrix.
+      *
+      * \returns  A const reference to the matrix whose columns are the eigenvectors.
+      *
+      * \pre Either the constructor
+      * ComplexEigenSolver(const MatrixType& matrix, bool) or the member
+      * function compute(const MatrixType& matrix, bool) has been called before
+      * to compute the eigendecomposition of a matrix, and
+      * \p computeEigenvectors was set to true (the default).
+      *
+      * This function returns a matrix whose columns are the eigenvectors. Column
+      * \f$ k \f$ is an eigenvector corresponding to eigenvalue number \f$ k
+      * \f$ as returned by eigenvalues().  The eigenvectors are normalized to
+      * have (Euclidean) norm equal to one. The matrix returned by this
+      * function is the matrix \f$ V \f$ in the eigendecomposition \f$ A = V D
+      * V^{-1} \f$, if it exists.
+      *
+      * Example: \include ComplexEigenSolver_eigenvectors.cpp
+      * Output: \verbinclude ComplexEigenSolver_eigenvectors.out
+      */
+    const EigenvectorType& eigenvectors() const
+    {
+      eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized.");
+      eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+      return m_eivec;
+    }
+
+    /** \brief Returns the eigenvalues of given matrix.
+      *
+      * \returns A const reference to the column vector containing the eigenvalues.
+      *
+      * \pre Either the constructor
+      * ComplexEigenSolver(const MatrixType& matrix, bool) or the member
+      * function compute(const MatrixType& matrix, bool) has been called before
+      * to compute the eigendecomposition of a matrix.
+      *
+      * This function returns a column vector containing the
+      * eigenvalues. Eigenvalues are repeated according to their
+      * algebraic multiplicity, so there are as many eigenvalues as
+      * rows in the matrix. The eigenvalues are not sorted in any particular
+      * order.
+      *
+      * Example: \include ComplexEigenSolver_eigenvalues.cpp
+      * Output: \verbinclude ComplexEigenSolver_eigenvalues.out
+      */
+    const EigenvalueType& eigenvalues() const
+    {
+      eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized.");
+      return m_eivalues;
+    }
+
+    /** \brief Computes eigendecomposition of given matrix.
+      *
+      * \param[in]  matrix  Square matrix whose eigendecomposition is to be computed.
+      * \param[in]  computeEigenvectors  If true, both the eigenvectors and the
+      *    eigenvalues are computed; if false, only the eigenvalues are
+      *    computed.
+      * \returns    Reference to \c *this
+      *
+      * This function computes the eigenvalues of the complex matrix \p matrix.
+      * The eigenvalues() function can be used to retrieve them.  If
+      * \p computeEigenvectors is true, then the eigenvectors are also computed
+      * and can be retrieved by calling eigenvectors().
+      *
+      * The matrix is first reduced to Schur form using the
+      * ComplexSchur class. The Schur decomposition is then used to
+      * compute the eigenvalues and eigenvectors.
+      *
+      * The cost of the computation is dominated by the cost of the
+      * Schur decomposition, which is \f$ O(n^3) \f$ where \f$ n \f$
+      * is the size of the matrix.
+      *
+      * Example: \include ComplexEigenSolver_compute.cpp
+      * Output: \verbinclude ComplexEigenSolver_compute.out
+      */
+    ComplexEigenSolver& compute(const MatrixType& matrix, bool computeEigenvectors = true);
+
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized.");
+      return m_schur.info();
+    }
+
+  protected:
+    EigenvectorType m_eivec;
+    EigenvalueType m_eivalues;
+    ComplexSchur<MatrixType> m_schur;
+    bool m_isInitialized;
+    bool m_eigenvectorsOk;
+    EigenvectorType m_matX;
+
+  private:
+    void doComputeEigenvectors(RealScalar matrixnorm);
+    void sortEigenvalues(bool computeEigenvectors);
+};
+
+
+template<typename MatrixType>
+ComplexEigenSolver<MatrixType>& ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
+{
+  // this code is inspired from Jampack
+  assert(matrix.cols() == matrix.rows());
+
+  // Do a complex Schur decomposition, A = U T U^*
+  // The eigenvalues are on the diagonal of T.
+  m_schur.compute(matrix, computeEigenvectors);
+
+  if(m_schur.info() == Success)
+  {
+    m_eivalues = m_schur.matrixT().diagonal();
+    if(computeEigenvectors)
+      doComputeEigenvectors(matrix.norm());
+    sortEigenvalues(computeEigenvectors);
+  }
+
+  m_isInitialized = true;
+  m_eigenvectorsOk = computeEigenvectors;
+  return *this;
+}
+
+
+template<typename MatrixType>
+void ComplexEigenSolver<MatrixType>::doComputeEigenvectors(RealScalar matrixnorm)
+{
+  const Index n = m_eivalues.size();
+
+  // Compute X such that T = X D X^(-1), where D is the diagonal of T.
+  // The matrix X is unit triangular.
+  m_matX = EigenvectorType::Zero(n, n);
+  for(Index k=n-1 ; k>=0 ; k--)
+  {
+    m_matX.coeffRef(k,k) = ComplexScalar(1.0,0.0);
+    // Compute X(i,k) using the (i,k) entry of the equation X T = D X
+    for(Index i=k-1 ; i>=0 ; i--)
+    {
+      m_matX.coeffRef(i,k) = -m_schur.matrixT().coeff(i,k);
+      if(k-i-1>0)
+        m_matX.coeffRef(i,k) -= (m_schur.matrixT().row(i).segment(i+1,k-i-1) * m_matX.col(k).segment(i+1,k-i-1)).value();
+      ComplexScalar z = m_schur.matrixT().coeff(i,i) - m_schur.matrixT().coeff(k,k);
+      if(z==ComplexScalar(0))
+      {
+        // If the i-th and k-th eigenvalue are equal, then z equals 0.
+        // Use a small value instead, to prevent division by zero.
+        internal::real_ref(z) = NumTraits<RealScalar>::epsilon() * matrixnorm;
+      }
+      m_matX.coeffRef(i,k) = m_matX.coeff(i,k) / z;
+    }
+  }
+
+  // Compute V as V = U X; now A = U T U^* = U X D X^(-1) U^* = V D V^(-1)
+  m_eivec.noalias() = m_schur.matrixU() * m_matX;
+  // .. and normalize the eigenvectors
+  for(Index k=0 ; k<n ; k++)
+  {
+    m_eivec.col(k).normalize();
+  }
+}
+
+
+template<typename MatrixType>
+void ComplexEigenSolver<MatrixType>::sortEigenvalues(bool computeEigenvectors)
+{
+  const Index n =  m_eivalues.size();
+  for (Index i=0; i<n; i++)
+  {
+    Index k;
+    m_eivalues.cwiseAbs().tail(n-i).minCoeff(&k);
+    if (k != 0)
+    {
+      k += i;
+      std::swap(m_eivalues[k],m_eivalues[i]);
+      if(computeEigenvectors)
+	m_eivec.col(i).swap(m_eivec.col(k));
+    }
+  }
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_COMPLEX_EIGEN_SOLVER_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Eigenvalues/ComplexSchur.h b/resources/3rdParty/eigen/Eigen/src/Eigenvalues/ComplexSchur.h
new file mode 100644
index 000000000..55aeedb90
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Eigenvalues/ComplexSchur.h
@@ -0,0 +1,398 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Claire Maurice
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_COMPLEX_SCHUR_H
+#define EIGEN_COMPLEX_SCHUR_H
+
+#include "./HessenbergDecomposition.h"
+
+namespace Eigen { 
+
+namespace internal {
+template<typename MatrixType, bool IsComplex> struct complex_schur_reduce_to_hessenberg;
+}
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+  *
+  *
+  * \class ComplexSchur
+  *
+  * \brief Performs a complex Schur decomposition of a real or complex square matrix
+  *
+  * \tparam _MatrixType the type of the matrix of which we are
+  * computing the Schur decomposition; this is expected to be an
+  * instantiation of the Matrix class template.
+  *
+  * Given a real or complex square matrix A, this class computes the
+  * Schur decomposition: \f$ A = U T U^*\f$ where U is a unitary
+  * complex matrix, and T is a complex upper triangular matrix.  The
+  * diagonal of the matrix T corresponds to the eigenvalues of the
+  * matrix A.
+  *
+  * Call the function compute() to compute the Schur decomposition of
+  * a given matrix. Alternatively, you can use the 
+  * ComplexSchur(const MatrixType&, bool) constructor which computes
+  * the Schur decomposition at construction time. Once the
+  * decomposition is computed, you can use the matrixU() and matrixT()
+  * functions to retrieve the matrices U and V in the decomposition.
+  *
+  * \note This code is inspired from Jampack
+  *
+  * \sa class RealSchur, class EigenSolver, class ComplexEigenSolver
+  */
+template<typename _MatrixType> class ComplexSchur
+{
+  public:
+    typedef _MatrixType MatrixType;
+    enum {
+      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      Options = MatrixType::Options,
+      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+    };
+
+    /** \brief Scalar type for matrices of type \p _MatrixType. */
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    typedef typename MatrixType::Index Index;
+
+    /** \brief Complex scalar type for \p _MatrixType. 
+      *
+      * This is \c std::complex<Scalar> if #Scalar is real (e.g.,
+      * \c float or \c double) and just \c Scalar if #Scalar is
+      * complex.
+      */
+    typedef std::complex<RealScalar> ComplexScalar;
+
+    /** \brief Type for the matrices in the Schur decomposition.
+      *
+      * This is a square matrix with entries of type #ComplexScalar. 
+      * The size is the same as the size of \p _MatrixType.
+      */
+    typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> ComplexMatrixType;
+
+    /** \brief Default constructor.
+      *
+      * \param [in] size  Positive integer, size of the matrix whose Schur decomposition will be computed.
+      *
+      * The default constructor is useful in cases in which the user
+      * intends to perform decompositions via compute().  The \p size
+      * parameter is only used as a hint. It is not an error to give a
+      * wrong \p size, but it may impair performance.
+      *
+      * \sa compute() for an example.
+      */
+    ComplexSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime)
+      : m_matT(size,size),
+        m_matU(size,size),
+        m_hess(size),
+        m_isInitialized(false),
+        m_matUisUptodate(false)
+    {}
+
+    /** \brief Constructor; computes Schur decomposition of given matrix. 
+      * 
+      * \param[in]  matrix    Square matrix whose Schur decomposition is to be computed.
+      * \param[in]  computeU  If true, both T and U are computed; if false, only T is computed.
+      *
+      * This constructor calls compute() to compute the Schur decomposition.
+      *
+      * \sa matrixT() and matrixU() for examples.
+      */
+    ComplexSchur(const MatrixType& matrix, bool computeU = true)
+            : m_matT(matrix.rows(),matrix.cols()),
+              m_matU(matrix.rows(),matrix.cols()),
+              m_hess(matrix.rows()),
+              m_isInitialized(false),
+              m_matUisUptodate(false)
+    {
+      compute(matrix, computeU);
+    }
+
+    /** \brief Returns the unitary matrix in the Schur decomposition. 
+      *
+      * \returns A const reference to the matrix U.
+      *
+      * It is assumed that either the constructor
+      * ComplexSchur(const MatrixType& matrix, bool computeU) or the
+      * member function compute(const MatrixType& matrix, bool computeU)
+      * has been called before to compute the Schur decomposition of a
+      * matrix, and that \p computeU was set to true (the default
+      * value).
+      *
+      * Example: \include ComplexSchur_matrixU.cpp
+      * Output: \verbinclude ComplexSchur_matrixU.out
+      */
+    const ComplexMatrixType& matrixU() const
+    {
+      eigen_assert(m_isInitialized && "ComplexSchur is not initialized.");
+      eigen_assert(m_matUisUptodate && "The matrix U has not been computed during the ComplexSchur decomposition.");
+      return m_matU;
+    }
+
+    /** \brief Returns the triangular matrix in the Schur decomposition. 
+      *
+      * \returns A const reference to the matrix T.
+      *
+      * It is assumed that either the constructor
+      * ComplexSchur(const MatrixType& matrix, bool computeU) or the
+      * member function compute(const MatrixType& matrix, bool computeU)
+      * has been called before to compute the Schur decomposition of a
+      * matrix.
+      *
+      * Note that this function returns a plain square matrix. If you want to reference
+      * only the upper triangular part, use:
+      * \code schur.matrixT().triangularView<Upper>() \endcode 
+      *
+      * Example: \include ComplexSchur_matrixT.cpp
+      * Output: \verbinclude ComplexSchur_matrixT.out
+      */
+    const ComplexMatrixType& matrixT() const
+    {
+      eigen_assert(m_isInitialized && "ComplexSchur is not initialized.");
+      return m_matT;
+    }
+
+    /** \brief Computes Schur decomposition of given matrix. 
+      * 
+      * \param[in]  matrix  Square matrix whose Schur decomposition is to be computed.
+      * \param[in]  computeU  If true, both T and U are computed; if false, only T is computed.
+      * \returns    Reference to \c *this
+      *
+      * The Schur decomposition is computed by first reducing the
+      * matrix to Hessenberg form using the class
+      * HessenbergDecomposition. The Hessenberg matrix is then reduced
+      * to triangular form by performing QR iterations with a single
+      * shift. The cost of computing the Schur decomposition depends
+      * on the number of iterations; as a rough guide, it may be taken
+      * on the number of iterations; as a rough guide, it may be taken
+      * to be \f$25n^3\f$ complex flops, or \f$10n^3\f$ complex flops
+      * if \a computeU is false.
+      *
+      * Example: \include ComplexSchur_compute.cpp
+      * Output: \verbinclude ComplexSchur_compute.out
+      */
+    ComplexSchur& compute(const MatrixType& matrix, bool computeU = true);
+
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "RealSchur is not initialized.");
+      return m_info;
+    }
+
+    /** \brief Maximum number of iterations.
+      *
+      * Maximum number of iterations allowed for an eigenvalue to converge. 
+      */
+    static const int m_maxIterations = 30;
+
+  protected:
+    ComplexMatrixType m_matT, m_matU;
+    HessenbergDecomposition<MatrixType> m_hess;
+    ComputationInfo m_info;
+    bool m_isInitialized;
+    bool m_matUisUptodate;
+
+  private:  
+    bool subdiagonalEntryIsNeglegible(Index i);
+    ComplexScalar computeShift(Index iu, Index iter);
+    void reduceToTriangularForm(bool computeU);
+    friend struct internal::complex_schur_reduce_to_hessenberg<MatrixType, NumTraits<Scalar>::IsComplex>;
+};
+
+/** If m_matT(i+1,i) is neglegible in floating point arithmetic
+  * compared to m_matT(i,i) and m_matT(j,j), then set it to zero and
+  * return true, else return false. */
+template<typename MatrixType>
+inline bool ComplexSchur<MatrixType>::subdiagonalEntryIsNeglegible(Index i)
+{
+  RealScalar d = internal::norm1(m_matT.coeff(i,i)) + internal::norm1(m_matT.coeff(i+1,i+1));
+  RealScalar sd = internal::norm1(m_matT.coeff(i+1,i));
+  if (internal::isMuchSmallerThan(sd, d, NumTraits<RealScalar>::epsilon()))
+  {
+    m_matT.coeffRef(i+1,i) = ComplexScalar(0);
+    return true;
+  }
+  return false;
+}
+
+
+/** Compute the shift in the current QR iteration. */
+template<typename MatrixType>
+typename ComplexSchur<MatrixType>::ComplexScalar ComplexSchur<MatrixType>::computeShift(Index iu, Index iter)
+{
+  if (iter == 10 || iter == 20) 
+  {
+    // exceptional shift, taken from http://www.netlib.org/eispack/comqr.f
+    return internal::abs(internal::real(m_matT.coeff(iu,iu-1))) + internal::abs(internal::real(m_matT.coeff(iu-1,iu-2)));
+  }
+
+  // compute the shift as one of the eigenvalues of t, the 2x2
+  // diagonal block on the bottom of the active submatrix
+  Matrix<ComplexScalar,2,2> t = m_matT.template block<2,2>(iu-1,iu-1);
+  RealScalar normt = t.cwiseAbs().sum();
+  t /= normt;     // the normalization by sf is to avoid under/overflow
+
+  ComplexScalar b = t.coeff(0,1) * t.coeff(1,0);
+  ComplexScalar c = t.coeff(0,0) - t.coeff(1,1);
+  ComplexScalar disc = sqrt(c*c + RealScalar(4)*b);
+  ComplexScalar det = t.coeff(0,0) * t.coeff(1,1) - b;
+  ComplexScalar trace = t.coeff(0,0) + t.coeff(1,1);
+  ComplexScalar eival1 = (trace + disc) / RealScalar(2);
+  ComplexScalar eival2 = (trace - disc) / RealScalar(2);
+
+  if(internal::norm1(eival1) > internal::norm1(eival2))
+    eival2 = det / eival1;
+  else
+    eival1 = det / eival2;
+
+  // choose the eigenvalue closest to the bottom entry of the diagonal
+  if(internal::norm1(eival1-t.coeff(1,1)) < internal::norm1(eival2-t.coeff(1,1)))
+    return normt * eival1;
+  else
+    return normt * eival2;
+}
+
+
+template<typename MatrixType>
+ComplexSchur<MatrixType>& ComplexSchur<MatrixType>::compute(const MatrixType& matrix, bool computeU)
+{
+  m_matUisUptodate = false;
+  eigen_assert(matrix.cols() == matrix.rows());
+
+  if(matrix.cols() == 1)
+  {
+    m_matT = matrix.template cast<ComplexScalar>();
+    if(computeU)  m_matU = ComplexMatrixType::Identity(1,1);
+    m_info = Success;
+    m_isInitialized = true;
+    m_matUisUptodate = computeU;
+    return *this;
+  }
+
+  internal::complex_schur_reduce_to_hessenberg<MatrixType, NumTraits<Scalar>::IsComplex>::run(*this, matrix, computeU);
+  reduceToTriangularForm(computeU);
+  return *this;
+}
+
+namespace internal {
+
+/* Reduce given matrix to Hessenberg form */
+template<typename MatrixType, bool IsComplex>
+struct complex_schur_reduce_to_hessenberg
+{
+  // this is the implementation for the case IsComplex = true
+  static void run(ComplexSchur<MatrixType>& _this, const MatrixType& matrix, bool computeU)
+  {
+    _this.m_hess.compute(matrix);
+    _this.m_matT = _this.m_hess.matrixH();
+    if(computeU)  _this.m_matU = _this.m_hess.matrixQ();
+  }
+};
+
+template<typename MatrixType>
+struct complex_schur_reduce_to_hessenberg<MatrixType, false>
+{
+  static void run(ComplexSchur<MatrixType>& _this, const MatrixType& matrix, bool computeU)
+  {
+    typedef typename ComplexSchur<MatrixType>::ComplexScalar ComplexScalar;
+    typedef typename ComplexSchur<MatrixType>::ComplexMatrixType ComplexMatrixType;
+
+    // Note: m_hess is over RealScalar; m_matT and m_matU is over ComplexScalar
+    _this.m_hess.compute(matrix);
+    _this.m_matT = _this.m_hess.matrixH().template cast<ComplexScalar>();
+    if(computeU)  
+    {
+      // This may cause an allocation which seems to be avoidable
+      MatrixType Q = _this.m_hess.matrixQ(); 
+      _this.m_matU = Q.template cast<ComplexScalar>();
+    }
+  }
+};
+
+} // end namespace internal
+
+// Reduce the Hessenberg matrix m_matT to triangular form by QR iteration.
+template<typename MatrixType>
+void ComplexSchur<MatrixType>::reduceToTriangularForm(bool computeU)
+{  
+  // The matrix m_matT is divided in three parts. 
+  // Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero. 
+  // Rows il,...,iu is the part we are working on (the active submatrix).
+  // Rows iu+1,...,end are already brought in triangular form.
+  Index iu = m_matT.cols() - 1;
+  Index il;
+  Index iter = 0; // number of iterations we are working on the (iu,iu) element
+  Index totalIter = 0; // number of iterations for whole matrix
+
+  while(true)
+  {
+    // find iu, the bottom row of the active submatrix
+    while(iu > 0)
+    {
+      if(!subdiagonalEntryIsNeglegible(iu-1)) break;
+      iter = 0;
+      --iu;
+    }
+
+    // if iu is zero then we are done; the whole matrix is triangularized
+    if(iu==0) break;
+
+    // if we spent too many iterations, we give up
+    iter++;
+    totalIter++;
+    if(totalIter > m_maxIterations * m_matT.cols()) break;
+
+    // find il, the top row of the active submatrix
+    il = iu-1;
+    while(il > 0 && !subdiagonalEntryIsNeglegible(il-1))
+    {
+      --il;
+    }
+
+    /* perform the QR step using Givens rotations. The first rotation
+       creates a bulge; the (il+2,il) element becomes nonzero. This
+       bulge is chased down to the bottom of the active submatrix. */
+
+    ComplexScalar shift = computeShift(iu, iter);
+    JacobiRotation<ComplexScalar> rot;
+    rot.makeGivens(m_matT.coeff(il,il) - shift, m_matT.coeff(il+1,il));
+    m_matT.rightCols(m_matT.cols()-il).applyOnTheLeft(il, il+1, rot.adjoint());
+    m_matT.topRows((std::min)(il+2,iu)+1).applyOnTheRight(il, il+1, rot);
+    if(computeU) m_matU.applyOnTheRight(il, il+1, rot);
+
+    for(Index i=il+1 ; i<iu ; i++)
+    {
+      rot.makeGivens(m_matT.coeffRef(i,i-1), m_matT.coeffRef(i+1,i-1), &m_matT.coeffRef(i,i-1));
+      m_matT.coeffRef(i+1,i-1) = ComplexScalar(0);
+      m_matT.rightCols(m_matT.cols()-i).applyOnTheLeft(i, i+1, rot.adjoint());
+      m_matT.topRows((std::min)(i+2,iu)+1).applyOnTheRight(i, i+1, rot);
+      if(computeU) m_matU.applyOnTheRight(i, i+1, rot);
+    }
+  }
+
+  if(totalIter <= m_maxIterations * m_matT.cols())
+    m_info = Success;
+  else
+    m_info = NoConvergence;
+
+  m_isInitialized = true;
+  m_matUisUptodate = computeU;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_COMPLEX_SCHUR_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigenvalues/ComplexSchur_MKL.h b/resources/3rdParty/eigen/Eigen/src/Eigenvalues/ComplexSchur_MKL.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigenvalues/ComplexSchur_MKL.h
rename to resources/3rdParty/eigen/Eigen/src/Eigenvalues/ComplexSchur_MKL.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Eigenvalues/EigenSolver.h b/resources/3rdParty/eigen/Eigen/src/Eigenvalues/EigenSolver.h
new file mode 100644
index 000000000..c16ff2b74
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Eigenvalues/EigenSolver.h
@@ -0,0 +1,579 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_EIGENSOLVER_H
+#define EIGEN_EIGENSOLVER_H
+
+#include "./RealSchur.h"
+
+namespace Eigen { 
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+  *
+  *
+  * \class EigenSolver
+  *
+  * \brief Computes eigenvalues and eigenvectors of general matrices
+  *
+  * \tparam _MatrixType the type of the matrix of which we are computing the
+  * eigendecomposition; this is expected to be an instantiation of the Matrix
+  * class template. Currently, only real matrices are supported.
+  *
+  * The eigenvalues and eigenvectors of a matrix \f$ A \f$ are scalars
+  * \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda v \f$.  If
+  * \f$ D \f$ is a diagonal matrix with the eigenvalues on the diagonal, and
+  * \f$ V \f$ is a matrix with the eigenvectors as its columns, then \f$ A V =
+  * V D \f$. The matrix \f$ V \f$ is almost always invertible, in which case we
+  * have \f$ A = V D V^{-1} \f$. This is called the eigendecomposition.
+  *
+  * The eigenvalues and eigenvectors of a matrix may be complex, even when the
+  * matrix is real. However, we can choose real matrices \f$ V \f$ and \f$ D
+  * \f$ satisfying \f$ A V = V D \f$, just like the eigendecomposition, if the
+  * matrix \f$ D \f$ is not required to be diagonal, but if it is allowed to
+  * have blocks of the form
+  * \f[ \begin{bmatrix} u & v \\ -v & u \end{bmatrix} \f]
+  * (where \f$ u \f$ and \f$ v \f$ are real numbers) on the diagonal.  These
+  * blocks correspond to complex eigenvalue pairs \f$ u \pm iv \f$. We call
+  * this variant of the eigendecomposition the pseudo-eigendecomposition.
+  *
+  * Call the function compute() to compute the eigenvalues and eigenvectors of
+  * a given matrix. Alternatively, you can use the 
+  * EigenSolver(const MatrixType&, bool) constructor which computes the
+  * eigenvalues and eigenvectors at construction time. Once the eigenvalue and
+  * eigenvectors are computed, they can be retrieved with the eigenvalues() and
+  * eigenvectors() functions. The pseudoEigenvalueMatrix() and
+  * pseudoEigenvectors() methods allow the construction of the
+  * pseudo-eigendecomposition.
+  *
+  * The documentation for EigenSolver(const MatrixType&, bool) contains an
+  * example of the typical use of this class.
+  *
+  * \note The implementation is adapted from
+  * <a href="http://math.nist.gov/javanumerics/jama/">JAMA</a> (public domain).
+  * Their code is based on EISPACK.
+  *
+  * \sa MatrixBase::eigenvalues(), class ComplexEigenSolver, class SelfAdjointEigenSolver
+  */
+template<typename _MatrixType> class EigenSolver
+{
+  public:
+
+    /** \brief Synonym for the template parameter \p _MatrixType. */
+    typedef _MatrixType MatrixType;
+
+    enum {
+      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      Options = MatrixType::Options,
+      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+    };
+
+    /** \brief Scalar type for matrices of type #MatrixType. */
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    typedef typename MatrixType::Index Index;
+
+    /** \brief Complex scalar type for #MatrixType. 
+      *
+      * This is \c std::complex<Scalar> if #Scalar is real (e.g.,
+      * \c float or \c double) and just \c Scalar if #Scalar is
+      * complex.
+      */
+    typedef std::complex<RealScalar> ComplexScalar;
+
+    /** \brief Type for vector of eigenvalues as returned by eigenvalues(). 
+      *
+      * This is a column vector with entries of type #ComplexScalar.
+      * The length of the vector is the size of #MatrixType.
+      */
+    typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;
+
+    /** \brief Type for matrix of eigenvectors as returned by eigenvectors(). 
+      *
+      * This is a square matrix with entries of type #ComplexScalar. 
+      * The size is the same as the size of #MatrixType.
+      */
+    typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorsType;
+
+    /** \brief Default constructor.
+      *
+      * The default constructor is useful in cases in which the user intends to
+      * perform decompositions via EigenSolver::compute(const MatrixType&, bool).
+      *
+      * \sa compute() for an example.
+      */
+ EigenSolver() : m_eivec(), m_eivalues(), m_isInitialized(false), m_realSchur(), m_matT(), m_tmp() {}
+
+    /** \brief Default constructor with memory preallocation
+      *
+      * Like the default constructor but with preallocation of the internal data
+      * according to the specified problem \a size.
+      * \sa EigenSolver()
+      */
+    EigenSolver(Index size)
+      : m_eivec(size, size),
+        m_eivalues(size),
+        m_isInitialized(false),
+        m_eigenvectorsOk(false),
+        m_realSchur(size),
+        m_matT(size, size), 
+        m_tmp(size)
+    {}
+
+    /** \brief Constructor; computes eigendecomposition of given matrix. 
+      * 
+      * \param[in]  matrix  Square matrix whose eigendecomposition is to be computed.
+      * \param[in]  computeEigenvectors  If true, both the eigenvectors and the
+      *    eigenvalues are computed; if false, only the eigenvalues are
+      *    computed. 
+      *
+      * This constructor calls compute() to compute the eigenvalues
+      * and eigenvectors.
+      *
+      * Example: \include EigenSolver_EigenSolver_MatrixType.cpp
+      * Output: \verbinclude EigenSolver_EigenSolver_MatrixType.out
+      *
+      * \sa compute()
+      */
+    EigenSolver(const MatrixType& matrix, bool computeEigenvectors = true)
+      : m_eivec(matrix.rows(), matrix.cols()),
+        m_eivalues(matrix.cols()),
+        m_isInitialized(false),
+        m_eigenvectorsOk(false),
+        m_realSchur(matrix.cols()),
+        m_matT(matrix.rows(), matrix.cols()), 
+        m_tmp(matrix.cols())
+    {
+      compute(matrix, computeEigenvectors);
+    }
+
+    /** \brief Returns the eigenvectors of given matrix. 
+      *
+      * \returns  %Matrix whose columns are the (possibly complex) eigenvectors.
+      *
+      * \pre Either the constructor 
+      * EigenSolver(const MatrixType&,bool) or the member function
+      * compute(const MatrixType&, bool) has been called before, and
+      * \p computeEigenvectors was set to true (the default).
+      *
+      * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding
+      * to eigenvalue number \f$ k \f$ as returned by eigenvalues().  The
+      * eigenvectors are normalized to have (Euclidean) norm equal to one. The
+      * matrix returned by this function is the matrix \f$ V \f$ in the
+      * eigendecomposition \f$ A = V D V^{-1} \f$, if it exists.
+      *
+      * Example: \include EigenSolver_eigenvectors.cpp
+      * Output: \verbinclude EigenSolver_eigenvectors.out
+      *
+      * \sa eigenvalues(), pseudoEigenvectors()
+      */
+    EigenvectorsType eigenvectors() const;
+
+    /** \brief Returns the pseudo-eigenvectors of given matrix. 
+      *
+      * \returns  Const reference to matrix whose columns are the pseudo-eigenvectors.
+      *
+      * \pre Either the constructor 
+      * EigenSolver(const MatrixType&,bool) or the member function
+      * compute(const MatrixType&, bool) has been called before, and
+      * \p computeEigenvectors was set to true (the default).
+      *
+      * The real matrix \f$ V \f$ returned by this function and the
+      * block-diagonal matrix \f$ D \f$ returned by pseudoEigenvalueMatrix()
+      * satisfy \f$ AV = VD \f$.
+      *
+      * Example: \include EigenSolver_pseudoEigenvectors.cpp
+      * Output: \verbinclude EigenSolver_pseudoEigenvectors.out
+      *
+      * \sa pseudoEigenvalueMatrix(), eigenvectors()
+      */
+    const MatrixType& pseudoEigenvectors() const
+    {
+      eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
+      eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+      return m_eivec;
+    }
+
+    /** \brief Returns the block-diagonal matrix in the pseudo-eigendecomposition.
+      *
+      * \returns  A block-diagonal matrix.
+      *
+      * \pre Either the constructor 
+      * EigenSolver(const MatrixType&,bool) or the member function
+      * compute(const MatrixType&, bool) has been called before.
+      *
+      * The matrix \f$ D \f$ returned by this function is real and
+      * block-diagonal. The blocks on the diagonal are either 1-by-1 or 2-by-2
+      * blocks of the form
+      * \f$ \begin{bmatrix} u & v \\ -v & u \end{bmatrix} \f$.
+      * These blocks are not sorted in any particular order.
+      * The matrix \f$ D \f$ and the matrix \f$ V \f$ returned by
+      * pseudoEigenvectors() satisfy \f$ AV = VD \f$.
+      *
+      * \sa pseudoEigenvectors() for an example, eigenvalues()
+      */
+    MatrixType pseudoEigenvalueMatrix() const;
+
+    /** \brief Returns the eigenvalues of given matrix. 
+      *
+      * \returns A const reference to the column vector containing the eigenvalues.
+      *
+      * \pre Either the constructor 
+      * EigenSolver(const MatrixType&,bool) or the member function
+      * compute(const MatrixType&, bool) has been called before.
+      *
+      * The eigenvalues are repeated according to their algebraic multiplicity,
+      * so there are as many eigenvalues as rows in the matrix. The eigenvalues 
+      * are not sorted in any particular order.
+      *
+      * Example: \include EigenSolver_eigenvalues.cpp
+      * Output: \verbinclude EigenSolver_eigenvalues.out
+      *
+      * \sa eigenvectors(), pseudoEigenvalueMatrix(),
+      *     MatrixBase::eigenvalues()
+      */
+    const EigenvalueType& eigenvalues() const
+    {
+      eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
+      return m_eivalues;
+    }
+
+    /** \brief Computes eigendecomposition of given matrix. 
+      * 
+      * \param[in]  matrix  Square matrix whose eigendecomposition is to be computed.
+      * \param[in]  computeEigenvectors  If true, both the eigenvectors and the
+      *    eigenvalues are computed; if false, only the eigenvalues are
+      *    computed. 
+      * \returns    Reference to \c *this
+      *
+      * This function computes the eigenvalues of the real matrix \p matrix.
+      * The eigenvalues() function can be used to retrieve them.  If 
+      * \p computeEigenvectors is true, then the eigenvectors are also computed
+      * and can be retrieved by calling eigenvectors().
+      *
+      * The matrix is first reduced to real Schur form using the RealSchur
+      * class. The Schur decomposition is then used to compute the eigenvalues
+      * and eigenvectors.
+      *
+      * The cost of the computation is dominated by the cost of the
+      * Schur decomposition, which is very approximately \f$ 25n^3 \f$
+      * (where \f$ n \f$ is the size of the matrix) if \p computeEigenvectors 
+      * is true, and \f$ 10n^3 \f$ if \p computeEigenvectors is false.
+      *
+      * This method reuses of the allocated data in the EigenSolver object.
+      *
+      * Example: \include EigenSolver_compute.cpp
+      * Output: \verbinclude EigenSolver_compute.out
+      */
+    EigenSolver& compute(const MatrixType& matrix, bool computeEigenvectors = true);
+
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
+      return m_realSchur.info();
+    }
+
+  private:
+    void doComputeEigenvectors();
+
+  protected:
+    MatrixType m_eivec;
+    EigenvalueType m_eivalues;
+    bool m_isInitialized;
+    bool m_eigenvectorsOk;
+    RealSchur<MatrixType> m_realSchur;
+    MatrixType m_matT;
+
+    typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
+    ColumnVectorType m_tmp;
+};
+
+template<typename MatrixType>
+MatrixType EigenSolver<MatrixType>::pseudoEigenvalueMatrix() const
+{
+  eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
+  Index n = m_eivalues.rows();
+  MatrixType matD = MatrixType::Zero(n,n);
+  for (Index i=0; i<n; ++i)
+  {
+    if (internal::isMuchSmallerThan(internal::imag(m_eivalues.coeff(i)), internal::real(m_eivalues.coeff(i))))
+      matD.coeffRef(i,i) = internal::real(m_eivalues.coeff(i));
+    else
+    {
+      matD.template block<2,2>(i,i) <<  internal::real(m_eivalues.coeff(i)), internal::imag(m_eivalues.coeff(i)),
+                                       -internal::imag(m_eivalues.coeff(i)), internal::real(m_eivalues.coeff(i));
+      ++i;
+    }
+  }
+  return matD;
+}
+
+template<typename MatrixType>
+typename EigenSolver<MatrixType>::EigenvectorsType EigenSolver<MatrixType>::eigenvectors() const
+{
+  eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
+  eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+  Index n = m_eivec.cols();
+  EigenvectorsType matV(n,n);
+  for (Index j=0; j<n; ++j)
+  {
+    if (internal::isMuchSmallerThan(internal::imag(m_eivalues.coeff(j)), internal::real(m_eivalues.coeff(j))) || j+1==n)
+    {
+      // we have a real eigen value
+      matV.col(j) = m_eivec.col(j).template cast<ComplexScalar>();
+      matV.col(j).normalize();
+    }
+    else
+    {
+      // we have a pair of complex eigen values
+      for (Index i=0; i<n; ++i)
+      {
+        matV.coeffRef(i,j)   = ComplexScalar(m_eivec.coeff(i,j),  m_eivec.coeff(i,j+1));
+        matV.coeffRef(i,j+1) = ComplexScalar(m_eivec.coeff(i,j), -m_eivec.coeff(i,j+1));
+      }
+      matV.col(j).normalize();
+      matV.col(j+1).normalize();
+      ++j;
+    }
+  }
+  return matV;
+}
+
+template<typename MatrixType>
+EigenSolver<MatrixType>& EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
+{
+  assert(matrix.cols() == matrix.rows());
+
+  // Reduce to real Schur form.
+  m_realSchur.compute(matrix, computeEigenvectors);
+  if (m_realSchur.info() == Success)
+  {
+    m_matT = m_realSchur.matrixT();
+    if (computeEigenvectors)
+      m_eivec = m_realSchur.matrixU();
+  
+    // Compute eigenvalues from matT
+    m_eivalues.resize(matrix.cols());
+    Index i = 0;
+    while (i < matrix.cols()) 
+    {
+      if (i == matrix.cols() - 1 || m_matT.coeff(i+1, i) == Scalar(0)) 
+      {
+        m_eivalues.coeffRef(i) = m_matT.coeff(i, i);
+        ++i;
+      }
+      else
+      {
+        Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i+1, i+1));
+        Scalar z = internal::sqrt(internal::abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1)));
+        m_eivalues.coeffRef(i)   = ComplexScalar(m_matT.coeff(i+1, i+1) + p, z);
+        m_eivalues.coeffRef(i+1) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, -z);
+        i += 2;
+      }
+    }
+    
+    // Compute eigenvectors.
+    if (computeEigenvectors)
+      doComputeEigenvectors();
+  }
+
+  m_isInitialized = true;
+  m_eigenvectorsOk = computeEigenvectors;
+
+  return *this;
+}
+
+// Complex scalar division.
+template<typename Scalar>
+std::complex<Scalar> cdiv(Scalar xr, Scalar xi, Scalar yr, Scalar yi)
+{
+  Scalar r,d;
+  if (internal::abs(yr) > internal::abs(yi))
+  {
+      r = yi/yr;
+      d = yr + r*yi;
+      return std::complex<Scalar>((xr + r*xi)/d, (xi - r*xr)/d);
+  }
+  else
+  {
+      r = yr/yi;
+      d = yi + r*yr;
+      return std::complex<Scalar>((r*xr + xi)/d, (r*xi - xr)/d);
+  }
+}
+
+
+template<typename MatrixType>
+void EigenSolver<MatrixType>::doComputeEigenvectors()
+{
+  const Index size = m_eivec.cols();
+  const Scalar eps = NumTraits<Scalar>::epsilon();
+
+  // inefficient! this is already computed in RealSchur
+  Scalar norm(0);
+  for (Index j = 0; j < size; ++j)
+  {
+    norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum();
+  }
+  
+  // Backsubstitute to find vectors of upper triangular form
+  if (norm == 0.0)
+  {
+    return;
+  }
+
+  for (Index n = size-1; n >= 0; n--)
+  {
+    Scalar p = m_eivalues.coeff(n).real();
+    Scalar q = m_eivalues.coeff(n).imag();
+
+    // Scalar vector
+    if (q == Scalar(0))
+    {
+      Scalar lastr(0), lastw(0);
+      Index l = n;
+
+      m_matT.coeffRef(n,n) = 1.0;
+      for (Index i = n-1; i >= 0; i--)
+      {
+        Scalar w = m_matT.coeff(i,i) - p;
+        Scalar r = m_matT.row(i).segment(l,n-l+1).dot(m_matT.col(n).segment(l, n-l+1));
+
+        if (m_eivalues.coeff(i).imag() < 0.0)
+        {
+          lastw = w;
+          lastr = r;
+        }
+        else
+        {
+          l = i;
+          if (m_eivalues.coeff(i).imag() == 0.0)
+          {
+            if (w != 0.0)
+              m_matT.coeffRef(i,n) = -r / w;
+            else
+              m_matT.coeffRef(i,n) = -r / (eps * norm);
+          }
+          else // Solve real equations
+          {
+            Scalar x = m_matT.coeff(i,i+1);
+            Scalar y = m_matT.coeff(i+1,i);
+            Scalar denom = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag();
+            Scalar t = (x * lastr - lastw * r) / denom;
+            m_matT.coeffRef(i,n) = t;
+            if (internal::abs(x) > internal::abs(lastw))
+              m_matT.coeffRef(i+1,n) = (-r - w * t) / x;
+            else
+              m_matT.coeffRef(i+1,n) = (-lastr - y * t) / lastw;
+          }
+
+          // Overflow control
+          Scalar t = internal::abs(m_matT.coeff(i,n));
+          if ((eps * t) * t > Scalar(1))
+            m_matT.col(n).tail(size-i) /= t;
+        }
+      }
+    }
+    else if (q < Scalar(0) && n > 0) // Complex vector
+    {
+      Scalar lastra(0), lastsa(0), lastw(0);
+      Index l = n-1;
+
+      // Last vector component imaginary so matrix is triangular
+      if (internal::abs(m_matT.coeff(n,n-1)) > internal::abs(m_matT.coeff(n-1,n)))
+      {
+        m_matT.coeffRef(n-1,n-1) = q / m_matT.coeff(n,n-1);
+        m_matT.coeffRef(n-1,n) = -(m_matT.coeff(n,n) - p) / m_matT.coeff(n,n-1);
+      }
+      else
+      {
+        std::complex<Scalar> cc = cdiv<Scalar>(0.0,-m_matT.coeff(n-1,n),m_matT.coeff(n-1,n-1)-p,q);
+        m_matT.coeffRef(n-1,n-1) = internal::real(cc);
+        m_matT.coeffRef(n-1,n) = internal::imag(cc);
+      }
+      m_matT.coeffRef(n,n-1) = 0.0;
+      m_matT.coeffRef(n,n) = 1.0;
+      for (Index i = n-2; i >= 0; i--)
+      {
+        Scalar ra = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n-1).segment(l, n-l+1));
+        Scalar sa = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n).segment(l, n-l+1));
+        Scalar w = m_matT.coeff(i,i) - p;
+
+        if (m_eivalues.coeff(i).imag() < 0.0)
+        {
+          lastw = w;
+          lastra = ra;
+          lastsa = sa;
+        }
+        else
+        {
+          l = i;
+          if (m_eivalues.coeff(i).imag() == RealScalar(0))
+          {
+            std::complex<Scalar> cc = cdiv(-ra,-sa,w,q);
+            m_matT.coeffRef(i,n-1) = internal::real(cc);
+            m_matT.coeffRef(i,n) = internal::imag(cc);
+          }
+          else
+          {
+            // Solve complex equations
+            Scalar x = m_matT.coeff(i,i+1);
+            Scalar y = m_matT.coeff(i+1,i);
+            Scalar vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q;
+            Scalar vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q;
+            if ((vr == 0.0) && (vi == 0.0))
+              vr = eps * norm * (internal::abs(w) + internal::abs(q) + internal::abs(x) + internal::abs(y) + internal::abs(lastw));
+
+	    std::complex<Scalar> cc = cdiv(x*lastra-lastw*ra+q*sa,x*lastsa-lastw*sa-q*ra,vr,vi);
+            m_matT.coeffRef(i,n-1) = internal::real(cc);
+            m_matT.coeffRef(i,n) = internal::imag(cc);
+            if (internal::abs(x) > (internal::abs(lastw) + internal::abs(q)))
+            {
+              m_matT.coeffRef(i+1,n-1) = (-ra - w * m_matT.coeff(i,n-1) + q * m_matT.coeff(i,n)) / x;
+              m_matT.coeffRef(i+1,n) = (-sa - w * m_matT.coeff(i,n) - q * m_matT.coeff(i,n-1)) / x;
+            }
+            else
+            {
+              cc = cdiv(-lastra-y*m_matT.coeff(i,n-1),-lastsa-y*m_matT.coeff(i,n),lastw,q);
+              m_matT.coeffRef(i+1,n-1) = internal::real(cc);
+              m_matT.coeffRef(i+1,n) = internal::imag(cc);
+            }
+          }
+
+          // Overflow control
+          using std::max;
+          Scalar t = (max)(internal::abs(m_matT.coeff(i,n-1)),internal::abs(m_matT.coeff(i,n)));
+          if ((eps * t) * t > Scalar(1))
+            m_matT.block(i, n-1, size-i, 2) /= t;
+
+        }
+      }
+      
+      // We handled a pair of complex conjugate eigenvalues, so need to skip them both
+      n--;
+    }
+    else
+    {
+      eigen_assert(0 && "Internal bug in EigenSolver"); // this should not happen
+    }
+  }
+
+  // Back transformation to get eigenvectors of original matrix
+  for (Index j = size-1; j >= 0; j--)
+  {
+    m_tmp.noalias() = m_eivec.leftCols(j+1) * m_matT.col(j).segment(0, j+1);
+    m_eivec.col(j) = m_tmp;
+  }
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_EIGENSOLVER_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h b/resources/3rdParty/eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h
rename to resources/3rdParty/eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h b/resources/3rdParty/eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h
rename to resources/3rdParty/eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h b/resources/3rdParty/eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h
rename to resources/3rdParty/eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Eigenvalues/RealSchur.h b/resources/3rdParty/eigen/Eigen/src/Eigenvalues/RealSchur.h
new file mode 100644
index 000000000..d1949b83c
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Eigenvalues/RealSchur.h
@@ -0,0 +1,466 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_REAL_SCHUR_H
+#define EIGEN_REAL_SCHUR_H
+
+#include "./HessenbergDecomposition.h"
+
+namespace Eigen { 
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+  *
+  *
+  * \class RealSchur
+  *
+  * \brief Performs a real Schur decomposition of a square matrix
+  *
+  * \tparam _MatrixType the type of the matrix of which we are computing the
+  * real Schur decomposition; this is expected to be an instantiation of the
+  * Matrix class template.
+  *
+  * Given a real square matrix A, this class computes the real Schur
+  * decomposition: \f$ A = U T U^T \f$ where U is a real orthogonal matrix and
+  * T is a real quasi-triangular matrix. An orthogonal matrix is a matrix whose
+  * inverse is equal to its transpose, \f$ U^{-1} = U^T \f$. A quasi-triangular
+  * matrix is a block-triangular matrix whose diagonal consists of 1-by-1
+  * blocks and 2-by-2 blocks with complex eigenvalues. The eigenvalues of the
+  * blocks on the diagonal of T are the same as the eigenvalues of the matrix
+  * A, and thus the real Schur decomposition is used in EigenSolver to compute
+  * the eigendecomposition of a matrix.
+  *
+  * Call the function compute() to compute the real Schur decomposition of a
+  * given matrix. Alternatively, you can use the RealSchur(const MatrixType&, bool)
+  * constructor which computes the real Schur decomposition at construction
+  * time. Once the decomposition is computed, you can use the matrixU() and
+  * matrixT() functions to retrieve the matrices U and T in the decomposition.
+  *
+  * The documentation of RealSchur(const MatrixType&, bool) contains an example
+  * of the typical use of this class.
+  *
+  * \note The implementation is adapted from
+  * <a href="http://math.nist.gov/javanumerics/jama/">JAMA</a> (public domain).
+  * Their code is based on EISPACK.
+  *
+  * \sa class ComplexSchur, class EigenSolver, class ComplexEigenSolver
+  */
+template<typename _MatrixType> class RealSchur
+{
+  public:
+    typedef _MatrixType MatrixType;
+    enum {
+      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      Options = MatrixType::Options,
+      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+    };
+    typedef typename MatrixType::Scalar Scalar;
+    typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
+    typedef typename MatrixType::Index Index;
+
+    typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;
+    typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
+
+    /** \brief Default constructor.
+      *
+      * \param [in] size  Positive integer, size of the matrix whose Schur decomposition will be computed.
+      *
+      * The default constructor is useful in cases in which the user intends to
+      * perform decompositions via compute().  The \p size parameter is only
+      * used as a hint. It is not an error to give a wrong \p size, but it may
+      * impair performance.
+      *
+      * \sa compute() for an example.
+      */
+    RealSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime)
+            : m_matT(size, size),
+              m_matU(size, size),
+              m_workspaceVector(size),
+              m_hess(size),
+              m_isInitialized(false),
+              m_matUisUptodate(false)
+    { }
+
+    /** \brief Constructor; computes real Schur decomposition of given matrix. 
+      * 
+      * \param[in]  matrix    Square matrix whose Schur decomposition is to be computed.
+      * \param[in]  computeU  If true, both T and U are computed; if false, only T is computed.
+      *
+      * This constructor calls compute() to compute the Schur decomposition.
+      *
+      * Example: \include RealSchur_RealSchur_MatrixType.cpp
+      * Output: \verbinclude RealSchur_RealSchur_MatrixType.out
+      */
+    RealSchur(const MatrixType& matrix, bool computeU = true)
+            : m_matT(matrix.rows(),matrix.cols()),
+              m_matU(matrix.rows(),matrix.cols()),
+              m_workspaceVector(matrix.rows()),
+              m_hess(matrix.rows()),
+              m_isInitialized(false),
+              m_matUisUptodate(false)
+    {
+      compute(matrix, computeU);
+    }
+
+    /** \brief Returns the orthogonal matrix in the Schur decomposition. 
+      *
+      * \returns A const reference to the matrix U.
+      *
+      * \pre Either the constructor RealSchur(const MatrixType&, bool) or the
+      * member function compute(const MatrixType&, bool) has been called before
+      * to compute the Schur decomposition of a matrix, and \p computeU was set
+      * to true (the default value).
+      *
+      * \sa RealSchur(const MatrixType&, bool) for an example
+      */
+    const MatrixType& matrixU() const
+    {
+      eigen_assert(m_isInitialized && "RealSchur is not initialized.");
+      eigen_assert(m_matUisUptodate && "The matrix U has not been computed during the RealSchur decomposition.");
+      return m_matU;
+    }
+
+    /** \brief Returns the quasi-triangular matrix in the Schur decomposition. 
+      *
+      * \returns A const reference to the matrix T.
+      *
+      * \pre Either the constructor RealSchur(const MatrixType&, bool) or the
+      * member function compute(const MatrixType&, bool) has been called before
+      * to compute the Schur decomposition of a matrix.
+      *
+      * \sa RealSchur(const MatrixType&, bool) for an example
+      */
+    const MatrixType& matrixT() const
+    {
+      eigen_assert(m_isInitialized && "RealSchur is not initialized.");
+      return m_matT;
+    }
+  
+    /** \brief Computes Schur decomposition of given matrix. 
+      * 
+      * \param[in]  matrix    Square matrix whose Schur decomposition is to be computed.
+      * \param[in]  computeU  If true, both T and U are computed; if false, only T is computed.
+      * \returns    Reference to \c *this
+      *
+      * The Schur decomposition is computed by first reducing the matrix to
+      * Hessenberg form using the class HessenbergDecomposition. The Hessenberg
+      * matrix is then reduced to triangular form by performing Francis QR
+      * iterations with implicit double shift. The cost of computing the Schur
+      * decomposition depends on the number of iterations; as a rough guide, it
+      * may be taken to be \f$25n^3\f$ flops if \a computeU is true and
+      * \f$10n^3\f$ flops if \a computeU is false.
+      *
+      * Example: \include RealSchur_compute.cpp
+      * Output: \verbinclude RealSchur_compute.out
+      */
+    RealSchur& compute(const MatrixType& matrix, bool computeU = true);
+
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "RealSchur is not initialized.");
+      return m_info;
+    }
+
+    /** \brief Maximum number of iterations.
+      *
+      * Maximum number of iterations allowed for an eigenvalue to converge. 
+      */
+    static const int m_maxIterations = 40;
+
+  private:
+    
+    MatrixType m_matT;
+    MatrixType m_matU;
+    ColumnVectorType m_workspaceVector;
+    HessenbergDecomposition<MatrixType> m_hess;
+    ComputationInfo m_info;
+    bool m_isInitialized;
+    bool m_matUisUptodate;
+
+    typedef Matrix<Scalar,3,1> Vector3s;
+
+    Scalar computeNormOfT();
+    Index findSmallSubdiagEntry(Index iu, Scalar norm);
+    void splitOffTwoRows(Index iu, bool computeU, Scalar exshift);
+    void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo);
+    void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector);
+    void performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace);
+};
+
+
+template<typename MatrixType>
+RealSchur<MatrixType>& RealSchur<MatrixType>::compute(const MatrixType& matrix, bool computeU)
+{
+  assert(matrix.cols() == matrix.rows());
+
+  // Step 1. Reduce to Hessenberg form
+  m_hess.compute(matrix);
+  m_matT = m_hess.matrixH();
+  if (computeU)
+    m_matU = m_hess.matrixQ();
+
+  // Step 2. Reduce to real Schur form  
+  m_workspaceVector.resize(m_matT.cols());
+  Scalar* workspace = &m_workspaceVector.coeffRef(0);
+
+  // The matrix m_matT is divided in three parts. 
+  // Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero. 
+  // Rows il,...,iu is the part we are working on (the active window).
+  // Rows iu+1,...,end are already brought in triangular form.
+  Index iu = m_matT.cols() - 1;
+  Index iter = 0;      // iteration count for current eigenvalue
+  Index totalIter = 0; // iteration count for whole matrix
+  Scalar exshift(0);   // sum of exceptional shifts
+  Scalar norm = computeNormOfT();
+
+  if(norm!=0)
+  {
+    while (iu >= 0)
+    {
+      Index il = findSmallSubdiagEntry(iu, norm);
+
+      // Check for convergence
+      if (il == iu) // One root found
+      {
+        m_matT.coeffRef(iu,iu) = m_matT.coeff(iu,iu) + exshift;
+        if (iu > 0)
+          m_matT.coeffRef(iu, iu-1) = Scalar(0);
+        iu--;
+        iter = 0;
+      }
+      else if (il == iu-1) // Two roots found
+      {
+        splitOffTwoRows(iu, computeU, exshift);
+        iu -= 2;
+        iter = 0;
+      }
+      else // No convergence yet
+      {
+        // The firstHouseholderVector vector has to be initialized to something to get rid of a silly GCC warning (-O1 -Wall -DNDEBUG )
+        Vector3s firstHouseholderVector(0,0,0), shiftInfo;
+        computeShift(iu, iter, exshift, shiftInfo);
+        iter = iter + 1;
+        totalIter = totalIter + 1;
+        if (totalIter > m_maxIterations * matrix.cols()) break;
+        Index im;
+        initFrancisQRStep(il, iu, shiftInfo, im, firstHouseholderVector);
+        performFrancisQRStep(il, im, iu, computeU, firstHouseholderVector, workspace);
+      }
+    }
+  }
+  if(totalIter <= m_maxIterations * matrix.cols()) 
+    m_info = Success;
+  else
+    m_info = NoConvergence;
+
+  m_isInitialized = true;
+  m_matUisUptodate = computeU;
+  return *this;
+}
+
+/** \internal Computes and returns vector L1 norm of T */
+template<typename MatrixType>
+inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
+{
+  const Index size = m_matT.cols();
+  // FIXME to be efficient the following would requires a triangular reduxion code
+  // Scalar norm = m_matT.upper().cwiseAbs().sum() 
+  //               + m_matT.bottomLeftCorner(size-1,size-1).diagonal().cwiseAbs().sum();
+  Scalar norm(0);
+  for (Index j = 0; j < size; ++j)
+    norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum();
+  return norm;
+}
+
+/** \internal Look for single small sub-diagonal element and returns its index */
+template<typename MatrixType>
+inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu, Scalar norm)
+{
+  Index res = iu;
+  while (res > 0)
+  {
+    Scalar s = internal::abs(m_matT.coeff(res-1,res-1)) + internal::abs(m_matT.coeff(res,res));
+    if (s == 0.0)
+      s = norm;
+    if (internal::abs(m_matT.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s)
+      break;
+    res--;
+  }
+  return res;
+}
+
+/** \internal Update T given that rows iu-1 and iu decouple from the rest. */
+template<typename MatrixType>
+inline void RealSchur<MatrixType>::splitOffTwoRows(Index iu, bool computeU, Scalar exshift)
+{
+  const Index size = m_matT.cols();
+
+  // The eigenvalues of the 2x2 matrix [a b; c d] are 
+  // trace +/- sqrt(discr/4) where discr = tr^2 - 4*det, tr = a + d, det = ad - bc
+  Scalar p = Scalar(0.5) * (m_matT.coeff(iu-1,iu-1) - m_matT.coeff(iu,iu));
+  Scalar q = p * p + m_matT.coeff(iu,iu-1) * m_matT.coeff(iu-1,iu);   // q = tr^2 / 4 - det = discr/4
+  m_matT.coeffRef(iu,iu) += exshift;
+  m_matT.coeffRef(iu-1,iu-1) += exshift;
+
+  if (q >= Scalar(0)) // Two real eigenvalues
+  {
+    Scalar z = internal::sqrt(internal::abs(q));
+    JacobiRotation<Scalar> rot;
+    if (p >= Scalar(0))
+      rot.makeGivens(p + z, m_matT.coeff(iu, iu-1));
+    else
+      rot.makeGivens(p - z, m_matT.coeff(iu, iu-1));
+
+    m_matT.rightCols(size-iu+1).applyOnTheLeft(iu-1, iu, rot.adjoint());
+    m_matT.topRows(iu+1).applyOnTheRight(iu-1, iu, rot);
+    m_matT.coeffRef(iu, iu-1) = Scalar(0); 
+    if (computeU)
+      m_matU.applyOnTheRight(iu-1, iu, rot);
+  }
+
+  if (iu > 1) 
+    m_matT.coeffRef(iu-1, iu-2) = Scalar(0);
+}
+
+/** \internal Form shift in shiftInfo, and update exshift if an exceptional shift is performed. */
+template<typename MatrixType>
+inline void RealSchur<MatrixType>::computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo)
+{
+  shiftInfo.coeffRef(0) = m_matT.coeff(iu,iu);
+  shiftInfo.coeffRef(1) = m_matT.coeff(iu-1,iu-1);
+  shiftInfo.coeffRef(2) = m_matT.coeff(iu,iu-1) * m_matT.coeff(iu-1,iu);
+
+  // Wilkinson's original ad hoc shift
+  if (iter == 10)
+  {
+    exshift += shiftInfo.coeff(0);
+    for (Index i = 0; i <= iu; ++i)
+      m_matT.coeffRef(i,i) -= shiftInfo.coeff(0);
+    Scalar s = internal::abs(m_matT.coeff(iu,iu-1)) + internal::abs(m_matT.coeff(iu-1,iu-2));
+    shiftInfo.coeffRef(0) = Scalar(0.75) * s;
+    shiftInfo.coeffRef(1) = Scalar(0.75) * s;
+    shiftInfo.coeffRef(2) = Scalar(-0.4375) * s * s;
+  }
+
+  // MATLAB's new ad hoc shift
+  if (iter == 30)
+  {
+    Scalar s = (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0);
+    s = s * s + shiftInfo.coeff(2);
+    if (s > Scalar(0))
+    {
+      s = internal::sqrt(s);
+      if (shiftInfo.coeff(1) < shiftInfo.coeff(0))
+        s = -s;
+      s = s + (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0);
+      s = shiftInfo.coeff(0) - shiftInfo.coeff(2) / s;
+      exshift += s;
+      for (Index i = 0; i <= iu; ++i)
+        m_matT.coeffRef(i,i) -= s;
+      shiftInfo.setConstant(Scalar(0.964));
+    }
+  }
+}
+
+/** \internal Compute index im at which Francis QR step starts and the first Householder vector. */
+template<typename MatrixType>
+inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector)
+{
+  Vector3s& v = firstHouseholderVector; // alias to save typing
+
+  for (im = iu-2; im >= il; --im)
+  {
+    const Scalar Tmm = m_matT.coeff(im,im);
+    const Scalar r = shiftInfo.coeff(0) - Tmm;
+    const Scalar s = shiftInfo.coeff(1) - Tmm;
+    v.coeffRef(0) = (r * s - shiftInfo.coeff(2)) / m_matT.coeff(im+1,im) + m_matT.coeff(im,im+1);
+    v.coeffRef(1) = m_matT.coeff(im+1,im+1) - Tmm - r - s;
+    v.coeffRef(2) = m_matT.coeff(im+2,im+1);
+    if (im == il) {
+      break;
+    }
+    const Scalar lhs = m_matT.coeff(im,im-1) * (internal::abs(v.coeff(1)) + internal::abs(v.coeff(2)));
+    const Scalar rhs = v.coeff(0) * (internal::abs(m_matT.coeff(im-1,im-1)) + internal::abs(Tmm) + internal::abs(m_matT.coeff(im+1,im+1)));
+    if (internal::abs(lhs) < NumTraits<Scalar>::epsilon() * rhs)
+    {
+      break;
+    }
+  }
+}
+
+/** \internal Perform a Francis QR step involving rows il:iu and columns im:iu. */
+template<typename MatrixType>
+inline void RealSchur<MatrixType>::performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace)
+{
+  assert(im >= il);
+  assert(im <= iu-2);
+
+  const Index size = m_matT.cols();
+
+  for (Index k = im; k <= iu-2; ++k)
+  {
+    bool firstIteration = (k == im);
+
+    Vector3s v;
+    if (firstIteration)
+      v = firstHouseholderVector;
+    else
+      v = m_matT.template block<3,1>(k,k-1);
+
+    Scalar tau, beta;
+    Matrix<Scalar, 2, 1> ess;
+    v.makeHouseholder(ess, tau, beta);
+    
+    if (beta != Scalar(0)) // if v is not zero
+    {
+      if (firstIteration && k > il)
+        m_matT.coeffRef(k,k-1) = -m_matT.coeff(k,k-1);
+      else if (!firstIteration)
+        m_matT.coeffRef(k,k-1) = beta;
+
+      // These Householder transformations form the O(n^3) part of the algorithm
+      m_matT.block(k, k, 3, size-k).applyHouseholderOnTheLeft(ess, tau, workspace);
+      m_matT.block(0, k, (std::min)(iu,k+3) + 1, 3).applyHouseholderOnTheRight(ess, tau, workspace);
+      if (computeU)
+        m_matU.block(0, k, size, 3).applyHouseholderOnTheRight(ess, tau, workspace);
+    }
+  }
+
+  Matrix<Scalar, 2, 1> v = m_matT.template block<2,1>(iu-1, iu-2);
+  Scalar tau, beta;
+  Matrix<Scalar, 1, 1> ess;
+  v.makeHouseholder(ess, tau, beta);
+
+  if (beta != Scalar(0)) // if v is not zero
+  {
+    m_matT.coeffRef(iu-1, iu-2) = beta;
+    m_matT.block(iu-1, iu-1, 2, size-iu+1).applyHouseholderOnTheLeft(ess, tau, workspace);
+    m_matT.block(0, iu-1, iu+1, 2).applyHouseholderOnTheRight(ess, tau, workspace);
+    if (computeU)
+      m_matU.block(0, iu-1, size, 2).applyHouseholderOnTheRight(ess, tau, workspace);
+  }
+
+  // clean up pollution due to round-off errors
+  for (Index i = im+2; i <= iu; ++i)
+  {
+    m_matT.coeffRef(i,i-2) = Scalar(0);
+    if (i > im+2)
+      m_matT.coeffRef(i,i-3) = Scalar(0);
+  }
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_REAL_SCHUR_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigenvalues/RealSchur_MKL.h b/resources/3rdParty/eigen/Eigen/src/Eigenvalues/RealSchur_MKL.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigenvalues/RealSchur_MKL.h
rename to resources/3rdParty/eigen/Eigen/src/Eigenvalues/RealSchur_MKL.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/resources/3rdParty/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
rename to resources/3rdParty/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h b/resources/3rdParty/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h
rename to resources/3rdParty/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigenvalues/Tridiagonalization.h b/resources/3rdParty/eigen/Eigen/src/Eigenvalues/Tridiagonalization.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Eigenvalues/Tridiagonalization.h
rename to resources/3rdParty/eigen/Eigen/src/Eigenvalues/Tridiagonalization.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Geometry/AlignedBox.h b/resources/3rdParty/eigen/Eigen/src/Geometry/AlignedBox.h
new file mode 100644
index 000000000..c0f97300c
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Geometry/AlignedBox.h
@@ -0,0 +1,375 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ALIGNEDBOX_H
+#define EIGEN_ALIGNEDBOX_H
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  *
+  * \class AlignedBox
+  *
+  * \brief An axis aligned box
+  *
+  * \param _Scalar the type of the scalar coefficients
+  * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
+  *
+  * This class represents an axis aligned box as a pair of the minimal and maximal corners.
+  */
+template <typename _Scalar, int _AmbientDim>
+class AlignedBox
+{
+public:
+EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
+  enum { AmbientDimAtCompileTime = _AmbientDim };
+  typedef _Scalar                                   Scalar;
+  typedef NumTraits<Scalar>                         ScalarTraits;
+  typedef DenseIndex                                Index;
+  typedef typename ScalarTraits::Real               RealScalar;
+  typedef typename ScalarTraits::NonInteger      NonInteger;
+  typedef Matrix<Scalar,AmbientDimAtCompileTime,1>  VectorType;
+
+  /** Define constants to name the corners of a 1D, 2D or 3D axis aligned bounding box */
+  enum CornerType
+  {
+    /** 1D names */
+    Min=0, Max=1,
+
+    /** Added names for 2D */
+    BottomLeft=0, BottomRight=1,
+    TopLeft=2, TopRight=3,
+
+    /** Added names for 3D */
+    BottomLeftFloor=0, BottomRightFloor=1,
+    TopLeftFloor=2, TopRightFloor=3,
+    BottomLeftCeil=4, BottomRightCeil=5,
+    TopLeftCeil=6, TopRightCeil=7
+  };
+
+
+  /** Default constructor initializing a null box. */
+  inline explicit AlignedBox()
+  { if (AmbientDimAtCompileTime!=Dynamic) setEmpty(); }
+
+  /** Constructs a null box with \a _dim the dimension of the ambient space. */
+  inline explicit AlignedBox(Index _dim) : m_min(_dim), m_max(_dim)
+  { setEmpty(); }
+
+  /** Constructs a box with extremities \a _min and \a _max. */
+  template<typename OtherVectorType1, typename OtherVectorType2>
+  inline AlignedBox(const OtherVectorType1& _min, const OtherVectorType2& _max) : m_min(_min), m_max(_max) {}
+
+  /** Constructs a box containing a single point \a p. */
+  template<typename Derived>
+  inline explicit AlignedBox(const MatrixBase<Derived>& a_p)
+  {
+    const typename internal::nested<Derived,2>::type p(a_p.derived());
+    m_min = p;
+    m_max = p;
+  }
+
+  ~AlignedBox() {}
+
+  /** \returns the dimension in which the box holds */
+  inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size() : Index(AmbientDimAtCompileTime); }
+
+  /** \deprecated use isEmpty */
+  inline bool isNull() const { return isEmpty(); }
+
+  /** \deprecated use setEmpty */
+  inline void setNull() { setEmpty(); }
+
+  /** \returns true if the box is empty. */
+  inline bool isEmpty() const { return (m_min.array() > m_max.array()).any(); }
+
+  /** Makes \c *this an empty box. */
+  inline void setEmpty()
+  {
+    m_min.setConstant( ScalarTraits::highest() );
+    m_max.setConstant( ScalarTraits::lowest() );
+  }
+
+  /** \returns the minimal corner */
+  inline const VectorType& (min)() const { return m_min; }
+  /** \returns a non const reference to the minimal corner */
+  inline VectorType& (min)() { return m_min; }
+  /** \returns the maximal corner */
+  inline const VectorType& (max)() const { return m_max; }
+  /** \returns a non const reference to the maximal corner */
+  inline VectorType& (max)() { return m_max; }
+
+  /** \returns the center of the box */
+  inline const CwiseUnaryOp<internal::scalar_quotient1_op<Scalar>,
+                            const CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const VectorType, const VectorType> >
+  center() const
+  { return (m_min+m_max)/2; }
+
+  /** \returns the lengths of the sides of the bounding box.
+    * Note that this function does not get the same
+    * result for integral or floating scalar types: see
+    */
+  inline const CwiseBinaryOp< internal::scalar_difference_op<Scalar>, const VectorType, const VectorType> sizes() const
+  { return m_max - m_min; }
+
+  /** \returns the volume of the bounding box */
+  inline Scalar volume() const
+  { return sizes().prod(); }
+
+  /** \returns an expression for the bounding box diagonal vector
+    * if the length of the diagonal is needed: diagonal().norm()
+    * will provide it.
+    */
+  inline CwiseBinaryOp< internal::scalar_difference_op<Scalar>, const VectorType, const VectorType> diagonal() const
+  { return sizes(); }
+
+  /** \returns the vertex of the bounding box at the corner defined by
+    * the corner-id corner. It works only for a 1D, 2D or 3D bounding box.
+    * For 1D bounding boxes corners are named by 2 enum constants:
+    * BottomLeft and BottomRight.
+    * For 2D bounding boxes, corners are named by 4 enum constants:
+    * BottomLeft, BottomRight, TopLeft, TopRight.
+    * For 3D bounding boxes, the following names are added:
+    * BottomLeftCeil, BottomRightCeil, TopLeftCeil, TopRightCeil.
+    */
+  inline VectorType corner(CornerType corner) const
+  {
+    EIGEN_STATIC_ASSERT(_AmbientDim <= 3, THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE);
+
+    VectorType res;
+
+    Index mult = 1;
+    for(Index d=0; d<dim(); ++d)
+    {
+      if( mult & corner ) res[d] = m_max[d];
+      else                res[d] = m_min[d];
+      mult *= 2;
+    }
+    return res;
+  }
+
+  /** \returns a random point inside the bounding box sampled with
+   * a uniform distribution */
+  inline VectorType sample() const
+  {
+    VectorType r;
+    for(Index d=0; d<dim(); ++d)
+    {
+      if(!ScalarTraits::IsInteger)
+      {
+        r[d] = m_min[d] + (m_max[d]-m_min[d])
+             * internal::random<Scalar>(Scalar(0), Scalar(1));
+      }
+      else
+        r[d] = internal::random(m_min[d], m_max[d]);
+    }
+    return r;
+  }
+
+  /** \returns true if the point \a p is inside the box \c *this. */
+  template<typename Derived>
+  inline bool contains(const MatrixBase<Derived>& a_p) const
+  {
+    typename internal::nested<Derived,2>::type p(a_p.derived());
+    return (m_min.array()<=p.array()).all() && (p.array()<=m_max.array()).all();
+  }
+
+  /** \returns true if the box \a b is entirely inside the box \c *this. */
+  inline bool contains(const AlignedBox& b) const
+  { return (m_min.array()<=(b.min)().array()).all() && ((b.max)().array()<=m_max.array()).all(); }
+
+  /** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */
+  template<typename Derived>
+  inline AlignedBox& extend(const MatrixBase<Derived>& a_p)
+  {
+    typename internal::nested<Derived,2>::type p(a_p.derived());
+    m_min = m_min.cwiseMin(p);
+    m_max = m_max.cwiseMax(p);
+    return *this;
+  }
+
+  /** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */
+  inline AlignedBox& extend(const AlignedBox& b)
+  {
+    m_min = m_min.cwiseMin(b.m_min);
+    m_max = m_max.cwiseMax(b.m_max);
+    return *this;
+  }
+
+  /** Clamps \c *this by the box \a b and returns a reference to \c *this. */
+  inline AlignedBox& clamp(const AlignedBox& b)
+  {
+    m_min = m_min.cwiseMax(b.m_min);
+    m_max = m_max.cwiseMin(b.m_max);
+    return *this;
+  }
+
+  /** Returns an AlignedBox that is the intersection of \a b and \c *this */
+  inline AlignedBox intersection(const AlignedBox& b) const
+  {return AlignedBox(m_min.cwiseMax(b.m_min), m_max.cwiseMin(b.m_max)); }
+
+  /** Returns an AlignedBox that is the union of \a b and \c *this */
+  inline AlignedBox merged(const AlignedBox& b) const
+  { return AlignedBox(m_min.cwiseMin(b.m_min), m_max.cwiseMax(b.m_max)); }
+
+  /** Translate \c *this by the vector \a t and returns a reference to \c *this. */
+  template<typename Derived>
+  inline AlignedBox& translate(const MatrixBase<Derived>& a_t)
+  {
+    const typename internal::nested<Derived,2>::type t(a_t.derived());
+    m_min += t;
+    m_max += t;
+    return *this;
+  }
+
+  /** \returns the squared distance between the point \a p and the box \c *this,
+    * and zero if \a p is inside the box.
+    * \sa exteriorDistance()
+    */
+  template<typename Derived>
+  inline Scalar squaredExteriorDistance(const MatrixBase<Derived>& a_p) const;
+
+  /** \returns the squared distance between the boxes \a b and \c *this,
+    * and zero if the boxes intersect.
+    * \sa exteriorDistance()
+    */
+  inline Scalar squaredExteriorDistance(const AlignedBox& b) const;
+
+  /** \returns the distance between the point \a p and the box \c *this,
+    * and zero if \a p is inside the box.
+    * \sa squaredExteriorDistance()
+    */
+  template<typename Derived>
+  inline NonInteger exteriorDistance(const MatrixBase<Derived>& p) const
+  { return internal::sqrt(NonInteger(squaredExteriorDistance(p))); }
+
+  /** \returns the distance between the boxes \a b and \c *this,
+    * and zero if the boxes intersect.
+    * \sa squaredExteriorDistance()
+    */
+  inline NonInteger exteriorDistance(const AlignedBox& b) const
+  { return internal::sqrt(NonInteger(squaredExteriorDistance(b))); }
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<AlignedBox,
+           AlignedBox<NewScalarType,AmbientDimAtCompileTime> >::type cast() const
+  {
+    return typename internal::cast_return_type<AlignedBox,
+                    AlignedBox<NewScalarType,AmbientDimAtCompileTime> >::type(*this);
+  }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit AlignedBox(const AlignedBox<OtherScalarType,AmbientDimAtCompileTime>& other)
+  {
+    m_min = (other.min)().template cast<Scalar>();
+    m_max = (other.max)().template cast<Scalar>();
+  }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const AlignedBox& other, RealScalar prec = ScalarTraits::dummy_precision()) const
+  { return m_min.isApprox(other.m_min, prec) && m_max.isApprox(other.m_max, prec); }
+
+protected:
+
+  VectorType m_min, m_max;
+};
+
+
+
+template<typename Scalar,int AmbientDim>
+template<typename Derived>
+inline Scalar AlignedBox<Scalar,AmbientDim>::squaredExteriorDistance(const MatrixBase<Derived>& a_p) const
+{
+  const typename internal::nested<Derived,2*AmbientDim>::type p(a_p.derived());
+  Scalar dist2(0);
+  Scalar aux;
+  for (Index k=0; k<dim(); ++k)
+  {
+    if( m_min[k] > p[k] )
+    {
+      aux = m_min[k] - p[k];
+      dist2 += aux*aux;
+    }
+    else if( p[k] > m_max[k] )
+    {
+      aux = p[k] - m_max[k];
+      dist2 += aux*aux;
+    }
+  }
+  return dist2;
+}
+
+template<typename Scalar,int AmbientDim>
+inline Scalar AlignedBox<Scalar,AmbientDim>::squaredExteriorDistance(const AlignedBox& b) const
+{
+  Scalar dist2(0);
+  Scalar aux;
+  for (Index k=0; k<dim(); ++k)
+  {
+    if( m_min[k] > b.m_max[k] )
+    {
+      aux = m_min[k] - b.m_max[k];
+      dist2 += aux*aux;
+    }
+    else if( b.m_min[k] > m_max[k] )
+    {
+      aux = b.m_min[k] - m_max[k];
+      dist2 += aux*aux;
+    }
+  }
+  return dist2;
+}
+
+/** \defgroup alignedboxtypedefs Global aligned box typedefs
+  *
+  * \ingroup Geometry_Module
+  *
+  * Eigen defines several typedef shortcuts for most common aligned box types.
+  *
+  * The general patterns are the following:
+  *
+  * \c AlignedBoxSizeType where \c Size can be \c 1, \c 2,\c 3,\c 4 for fixed size boxes or \c X for dynamic size,
+  * and where \c Type can be \c i for integer, \c f for float, \c d for double.
+  *
+  * For example, \c AlignedBox3d is a fixed-size 3x3 aligned box type of doubles, and \c AlignedBoxXf is a dynamic-size aligned box of floats.
+  *
+  * \sa class AlignedBox
+  */
+
+#define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix)    \
+/** \ingroup alignedboxtypedefs */                                 \
+typedef AlignedBox<Type, Size>   AlignedBox##SizeSuffix##TypeSuffix;
+
+#define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \
+EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 1, 1) \
+EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \
+EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \
+EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \
+EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X)
+
+EIGEN_MAKE_TYPEDEFS_ALL_SIZES(int,                  i)
+EIGEN_MAKE_TYPEDEFS_ALL_SIZES(float,                f)
+EIGEN_MAKE_TYPEDEFS_ALL_SIZES(double,               d)
+
+#undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES
+#undef EIGEN_MAKE_TYPEDEFS
+
+} // end namespace Eigen
+
+#endif // EIGEN_ALIGNEDBOX_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Geometry/AngleAxis.h b/resources/3rdParty/eigen/Eigen/src/Geometry/AngleAxis.h
new file mode 100644
index 000000000..67197ac78
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Geometry/AngleAxis.h
@@ -0,0 +1,230 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ANGLEAXIS_H
+#define EIGEN_ANGLEAXIS_H
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class AngleAxis
+  *
+  * \brief Represents a 3D rotation as a rotation angle around an arbitrary 3D axis
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients.
+  *
+  * \warning When setting up an AngleAxis object, the axis vector \b must \b be \b normalized.
+  *
+  * The following two typedefs are provided for convenience:
+  * \li \c AngleAxisf for \c float
+  * \li \c AngleAxisd for \c double
+  *
+  * Combined with MatrixBase::Unit{X,Y,Z}, AngleAxis can be used to easily
+  * mimic Euler-angles. Here is an example:
+  * \include AngleAxis_mimic_euler.cpp
+  * Output: \verbinclude AngleAxis_mimic_euler.out
+  *
+  * \note This class is not aimed to be used to store a rotation transformation,
+  * but rather to make easier the creation of other rotation (Quaternion, rotation Matrix)
+  * and transformation objects.
+  *
+  * \sa class Quaternion, class Transform, MatrixBase::UnitX()
+  */
+
+namespace internal {
+template<typename _Scalar> struct traits<AngleAxis<_Scalar> >
+{
+  typedef _Scalar Scalar;
+};
+}
+
+template<typename _Scalar>
+class AngleAxis : public RotationBase<AngleAxis<_Scalar>,3>
+{
+  typedef RotationBase<AngleAxis<_Scalar>,3> Base;
+
+public:
+
+  using Base::operator*;
+
+  enum { Dim = 3 };
+  /** the scalar type of the coefficients */
+  typedef _Scalar Scalar;
+  typedef Matrix<Scalar,3,3> Matrix3;
+  typedef Matrix<Scalar,3,1> Vector3;
+  typedef Quaternion<Scalar> QuaternionType;
+
+protected:
+
+  Vector3 m_axis;
+  Scalar m_angle;
+
+public:
+
+  /** Default constructor without initialization. */
+  AngleAxis() {}
+  /** Constructs and initialize the angle-axis rotation from an \a angle in radian
+    * and an \a axis which \b must \b be \b normalized.
+    *
+    * \warning If the \a axis vector is not normalized, then the angle-axis object
+    *          represents an invalid rotation. */
+  template<typename Derived>
+  inline AngleAxis(Scalar angle, const MatrixBase<Derived>& axis) : m_axis(axis), m_angle(angle) {}
+  /** Constructs and initialize the angle-axis rotation from a quaternion \a q. */
+  template<typename QuatDerived> inline explicit AngleAxis(const QuaternionBase<QuatDerived>& q) { *this = q; }
+  /** Constructs and initialize the angle-axis rotation from a 3x3 rotation matrix. */
+  template<typename Derived>
+  inline explicit AngleAxis(const MatrixBase<Derived>& m) { *this = m; }
+
+  Scalar angle() const { return m_angle; }
+  Scalar& angle() { return m_angle; }
+
+  const Vector3& axis() const { return m_axis; }
+  Vector3& axis() { return m_axis; }
+
+  /** Concatenates two rotations */
+  inline QuaternionType operator* (const AngleAxis& other) const
+  { return QuaternionType(*this) * QuaternionType(other); }
+
+  /** Concatenates two rotations */
+  inline QuaternionType operator* (const QuaternionType& other) const
+  { return QuaternionType(*this) * other; }
+
+  /** Concatenates two rotations */
+  friend inline QuaternionType operator* (const QuaternionType& a, const AngleAxis& b)
+  { return a * QuaternionType(b); }
+
+  /** \returns the inverse rotation, i.e., an angle-axis with opposite rotation angle */
+  AngleAxis inverse() const
+  { return AngleAxis(-m_angle, m_axis); }
+
+  template<class QuatDerived>
+  AngleAxis& operator=(const QuaternionBase<QuatDerived>& q);
+  template<typename Derived>
+  AngleAxis& operator=(const MatrixBase<Derived>& m);
+
+  template<typename Derived>
+  AngleAxis& fromRotationMatrix(const MatrixBase<Derived>& m);
+  Matrix3 toRotationMatrix(void) const;
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type cast() const
+  { return typename internal::cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type(*this); }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit AngleAxis(const AngleAxis<OtherScalarType>& other)
+  {
+    m_axis = other.axis().template cast<Scalar>();
+    m_angle = Scalar(other.angle());
+  }
+
+  static inline const AngleAxis Identity() { return AngleAxis(0, Vector3::UnitX()); }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const AngleAxis& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+  { return m_axis.isApprox(other.m_axis, prec) && internal::isApprox(m_angle,other.m_angle, prec); }
+};
+
+/** \ingroup Geometry_Module
+  * single precision angle-axis type */
+typedef AngleAxis<float> AngleAxisf;
+/** \ingroup Geometry_Module
+  * double precision angle-axis type */
+typedef AngleAxis<double> AngleAxisd;
+
+/** Set \c *this from a \b unit quaternion.
+  * The axis is normalized.
+  * 
+  * \warning As any other method dealing with quaternion, if the input quaternion
+  *          is not normalized then the result is undefined.
+  */
+template<typename Scalar>
+template<typename QuatDerived>
+AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionBase<QuatDerived>& q)
+{
+  using std::acos;
+  using std::min;
+  using std::max;
+  Scalar n2 = q.vec().squaredNorm();
+  if (n2 < NumTraits<Scalar>::dummy_precision()*NumTraits<Scalar>::dummy_precision())
+  {
+    m_angle = 0;
+    m_axis << 1, 0, 0;
+  }
+  else
+  {
+    m_angle = Scalar(2)*acos((min)((max)(Scalar(-1),q.w()),Scalar(1)));
+    m_axis = q.vec() / internal::sqrt(n2);
+  }
+  return *this;
+}
+
+/** Set \c *this from a 3x3 rotation matrix \a mat.
+  */
+template<typename Scalar>
+template<typename Derived>
+AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const MatrixBase<Derived>& mat)
+{
+  // Since a direct conversion would not be really faster,
+  // let's use the robust Quaternion implementation:
+  return *this = QuaternionType(mat);
+}
+
+/**
+* \brief Sets \c *this from a 3x3 rotation matrix.
+**/
+template<typename Scalar>
+template<typename Derived>
+AngleAxis<Scalar>& AngleAxis<Scalar>::fromRotationMatrix(const MatrixBase<Derived>& mat)
+{
+  return *this = QuaternionType(mat);
+}
+
+/** Constructs and \returns an equivalent 3x3 rotation matrix.
+  */
+template<typename Scalar>
+typename AngleAxis<Scalar>::Matrix3
+AngleAxis<Scalar>::toRotationMatrix(void) const
+{
+  Matrix3 res;
+  Vector3 sin_axis  = internal::sin(m_angle) * m_axis;
+  Scalar c = internal::cos(m_angle);
+  Vector3 cos1_axis = (Scalar(1)-c) * m_axis;
+
+  Scalar tmp;
+  tmp = cos1_axis.x() * m_axis.y();
+  res.coeffRef(0,1) = tmp - sin_axis.z();
+  res.coeffRef(1,0) = tmp + sin_axis.z();
+
+  tmp = cos1_axis.x() * m_axis.z();
+  res.coeffRef(0,2) = tmp + sin_axis.y();
+  res.coeffRef(2,0) = tmp - sin_axis.y();
+
+  tmp = cos1_axis.y() * m_axis.z();
+  res.coeffRef(1,2) = tmp - sin_axis.x();
+  res.coeffRef(2,1) = tmp + sin_axis.x();
+
+  res.diagonal() = (cos1_axis.cwiseProduct(m_axis)).array() + c;
+
+  return res;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_ANGLEAXIS_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Geometry/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/Geometry/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Geometry/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/Geometry/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/Eigen/src/Geometry/EulerAngles.h b/resources/3rdParty/eigen/Eigen/src/Geometry/EulerAngles.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Geometry/EulerAngles.h
rename to resources/3rdParty/eigen/Eigen/src/Geometry/EulerAngles.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Geometry/Homogeneous.h b/resources/3rdParty/eigen/Eigen/src/Geometry/Homogeneous.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Geometry/Homogeneous.h
rename to resources/3rdParty/eigen/Eigen/src/Geometry/Homogeneous.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Geometry/Hyperplane.h b/resources/3rdParty/eigen/Eigen/src/Geometry/Hyperplane.h
new file mode 100644
index 000000000..1b7c7c78c
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Geometry/Hyperplane.h
@@ -0,0 +1,269 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_HYPERPLANE_H
+#define EIGEN_HYPERPLANE_H
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Hyperplane
+  *
+  * \brief A hyperplane
+  *
+  * A hyperplane is an affine subspace of dimension n-1 in a space of dimension n.
+  * For example, a hyperplane in a plane is a line; a hyperplane in 3-space is a plane.
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients
+  * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
+  *             Notice that the dimension of the hyperplane is _AmbientDim-1.
+  *
+  * This class represents an hyperplane as the zero set of the implicit equation
+  * \f$ n \cdot x + d = 0 \f$ where \f$ n \f$ is a unit normal vector of the plane (linear part)
+  * and \f$ d \f$ is the distance (offset) to the origin.
+  */
+template <typename _Scalar, int _AmbientDim, int _Options>
+class Hyperplane
+{
+public:
+  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1)
+  enum {
+    AmbientDimAtCompileTime = _AmbientDim,
+    Options = _Options
+  };
+  typedef _Scalar Scalar;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  typedef DenseIndex Index;
+  typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
+  typedef Matrix<Scalar,Index(AmbientDimAtCompileTime)==Dynamic
+                        ? Dynamic
+                        : Index(AmbientDimAtCompileTime)+1,1,Options> Coefficients;
+  typedef Block<Coefficients,AmbientDimAtCompileTime,1> NormalReturnType;
+  typedef const Block<const Coefficients,AmbientDimAtCompileTime,1> ConstNormalReturnType;
+
+  /** Default constructor without initialization */
+  inline explicit Hyperplane() {}
+  
+  template<int OtherOptions>
+  Hyperplane(const Hyperplane<Scalar,AmbientDimAtCompileTime,OtherOptions>& other)
+   : m_coeffs(other.coeffs())
+  {}
+
+  /** Constructs a dynamic-size hyperplane with \a _dim the dimension
+    * of the ambient space */
+  inline explicit Hyperplane(Index _dim) : m_coeffs(_dim+1) {}
+
+  /** Construct a plane from its normal \a n and a point \a e onto the plane.
+    * \warning the vector normal is assumed to be normalized.
+    */
+  inline Hyperplane(const VectorType& n, const VectorType& e)
+    : m_coeffs(n.size()+1)
+  {
+    normal() = n;
+    offset() = -n.dot(e);
+  }
+
+  /** Constructs a plane from its normal \a n and distance to the origin \a d
+    * such that the algebraic equation of the plane is \f$ n \cdot x + d = 0 \f$.
+    * \warning the vector normal is assumed to be normalized.
+    */
+  inline Hyperplane(const VectorType& n, Scalar d)
+    : m_coeffs(n.size()+1)
+  {
+    normal() = n;
+    offset() = d;
+  }
+
+  /** Constructs a hyperplane passing through the two points. If the dimension of the ambient space
+    * is greater than 2, then there isn't uniqueness, so an arbitrary choice is made.
+    */
+  static inline Hyperplane Through(const VectorType& p0, const VectorType& p1)
+  {
+    Hyperplane result(p0.size());
+    result.normal() = (p1 - p0).unitOrthogonal();
+    result.offset() = -p0.dot(result.normal());
+    return result;
+  }
+
+  /** Constructs a hyperplane passing through the three points. The dimension of the ambient space
+    * is required to be exactly 3.
+    */
+  static inline Hyperplane Through(const VectorType& p0, const VectorType& p1, const VectorType& p2)
+  {
+    EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 3)
+    Hyperplane result(p0.size());
+    result.normal() = (p2 - p0).cross(p1 - p0).normalized();
+    result.offset() = -p0.dot(result.normal());
+    return result;
+  }
+
+  /** Constructs a hyperplane passing through the parametrized line \a parametrized.
+    * If the dimension of the ambient space is greater than 2, then there isn't uniqueness,
+    * so an arbitrary choice is made.
+    */
+  // FIXME to be consitent with the rest this could be implemented as a static Through function ??
+  explicit Hyperplane(const ParametrizedLine<Scalar, AmbientDimAtCompileTime>& parametrized)
+  {
+    normal() = parametrized.direction().unitOrthogonal();
+    offset() = -parametrized.origin().dot(normal());
+  }
+
+  ~Hyperplane() {}
+
+  /** \returns the dimension in which the plane holds */
+  inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_coeffs.size()-1 : Index(AmbientDimAtCompileTime); }
+
+  /** normalizes \c *this */
+  void normalize(void)
+  {
+    m_coeffs /= normal().norm();
+  }
+
+  /** \returns the signed distance between the plane \c *this and a point \a p.
+    * \sa absDistance()
+    */
+  inline Scalar signedDistance(const VectorType& p) const { return normal().dot(p) + offset(); }
+
+  /** \returns the absolute distance between the plane \c *this and a point \a p.
+    * \sa signedDistance()
+    */
+  inline Scalar absDistance(const VectorType& p) const { return internal::abs(signedDistance(p)); }
+
+  /** \returns the projection of a point \a p onto the plane \c *this.
+    */
+  inline VectorType projection(const VectorType& p) const { return p - signedDistance(p) * normal(); }
+
+  /** \returns a constant reference to the unit normal vector of the plane, which corresponds
+    * to the linear part of the implicit equation.
+    */
+  inline ConstNormalReturnType normal() const { return ConstNormalReturnType(m_coeffs,0,0,dim(),1); }
+
+  /** \returns a non-constant reference to the unit normal vector of the plane, which corresponds
+    * to the linear part of the implicit equation.
+    */
+  inline NormalReturnType normal() { return NormalReturnType(m_coeffs,0,0,dim(),1); }
+
+  /** \returns the distance to the origin, which is also the "constant term" of the implicit equation
+    * \warning the vector normal is assumed to be normalized.
+    */
+  inline const Scalar& offset() const { return m_coeffs.coeff(dim()); }
+
+  /** \returns a non-constant reference to the distance to the origin, which is also the constant part
+    * of the implicit equation */
+  inline Scalar& offset() { return m_coeffs(dim()); }
+
+  /** \returns a constant reference to the coefficients c_i of the plane equation:
+    * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$
+    */
+  inline const Coefficients& coeffs() const { return m_coeffs; }
+
+  /** \returns a non-constant reference to the coefficients c_i of the plane equation:
+    * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$
+    */
+  inline Coefficients& coeffs() { return m_coeffs; }
+
+  /** \returns the intersection of *this with \a other.
+    *
+    * \warning The ambient space must be a plane, i.e. have dimension 2, so that \c *this and \a other are lines.
+    *
+    * \note If \a other is approximately parallel to *this, this method will return any point on *this.
+    */
+  VectorType intersection(const Hyperplane& other) const
+  {
+    EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2)
+    Scalar det = coeffs().coeff(0) * other.coeffs().coeff(1) - coeffs().coeff(1) * other.coeffs().coeff(0);
+    // since the line equations ax+by=c are normalized with a^2+b^2=1, the following tests
+    // whether the two lines are approximately parallel.
+    if(internal::isMuchSmallerThan(det, Scalar(1)))
+    {   // special case where the two lines are approximately parallel. Pick any point on the first line.
+        if(internal::abs(coeffs().coeff(1))>internal::abs(coeffs().coeff(0)))
+            return VectorType(coeffs().coeff(1), -coeffs().coeff(2)/coeffs().coeff(1)-coeffs().coeff(0));
+        else
+            return VectorType(-coeffs().coeff(2)/coeffs().coeff(0)-coeffs().coeff(1), coeffs().coeff(0));
+    }
+    else
+    {   // general case
+        Scalar invdet = Scalar(1) / det;
+        return VectorType(invdet*(coeffs().coeff(1)*other.coeffs().coeff(2)-other.coeffs().coeff(1)*coeffs().coeff(2)),
+                          invdet*(other.coeffs().coeff(0)*coeffs().coeff(2)-coeffs().coeff(0)*other.coeffs().coeff(2)));
+    }
+  }
+
+  /** Applies the transformation matrix \a mat to \c *this and returns a reference to \c *this.
+    *
+    * \param mat the Dim x Dim transformation matrix
+    * \param traits specifies whether the matrix \a mat represents an #Isometry
+    *               or a more generic #Affine transformation. The default is #Affine.
+    */
+  template<typename XprType>
+  inline Hyperplane& transform(const MatrixBase<XprType>& mat, TransformTraits traits = Affine)
+  {
+    if (traits==Affine)
+      normal() = mat.inverse().transpose() * normal();
+    else if (traits==Isometry)
+      normal() = mat * normal();
+    else
+    {
+      eigen_assert(0 && "invalid traits value in Hyperplane::transform()");
+    }
+    return *this;
+  }
+
+  /** Applies the transformation \a t to \c *this and returns a reference to \c *this.
+    *
+    * \param t the transformation of dimension Dim
+    * \param traits specifies whether the transformation \a t represents an #Isometry
+    *               or a more generic #Affine transformation. The default is #Affine.
+    *               Other kind of transformations are not supported.
+    */
+  template<int TrOptions>
+  inline Hyperplane& transform(const Transform<Scalar,AmbientDimAtCompileTime,Affine,TrOptions>& t,
+                                TransformTraits traits = Affine)
+  {
+    transform(t.linear(), traits);
+    offset() -= normal().dot(t.translation());
+    return *this;
+  }
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<Hyperplane,
+           Hyperplane<NewScalarType,AmbientDimAtCompileTime,Options> >::type cast() const
+  {
+    return typename internal::cast_return_type<Hyperplane,
+                    Hyperplane<NewScalarType,AmbientDimAtCompileTime,Options> >::type(*this);
+  }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType,int OtherOptions>
+  inline explicit Hyperplane(const Hyperplane<OtherScalarType,AmbientDimAtCompileTime,OtherOptions>& other)
+  { m_coeffs = other.coeffs().template cast<Scalar>(); }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  template<int OtherOptions>
+  bool isApprox(const Hyperplane<Scalar,AmbientDimAtCompileTime,OtherOptions>& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+  { return m_coeffs.isApprox(other.m_coeffs, prec); }
+
+protected:
+
+  Coefficients m_coeffs;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_HYPERPLANE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Geometry/OrthoMethods.h b/resources/3rdParty/eigen/Eigen/src/Geometry/OrthoMethods.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Geometry/OrthoMethods.h
rename to resources/3rdParty/eigen/Eigen/src/Geometry/OrthoMethods.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Geometry/ParametrizedLine.h b/resources/3rdParty/eigen/Eigen/src/Geometry/ParametrizedLine.h
new file mode 100644
index 000000000..719a90441
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Geometry/ParametrizedLine.h
@@ -0,0 +1,195 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PARAMETRIZEDLINE_H
+#define EIGEN_PARAMETRIZEDLINE_H
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class ParametrizedLine
+  *
+  * \brief A parametrized line
+  *
+  * A parametrized line is defined by an origin point \f$ \mathbf{o} \f$ and a unit
+  * direction vector \f$ \mathbf{d} \f$ such that the line corresponds to
+  * the set \f$ l(t) = \mathbf{o} + t \mathbf{d} \f$, \f$ t \in \mathbf{R} \f$.
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients
+  * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
+  */
+template <typename _Scalar, int _AmbientDim, int _Options>
+class ParametrizedLine
+{
+public:
+  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
+  enum {
+    AmbientDimAtCompileTime = _AmbientDim,
+    Options = _Options
+  };
+  typedef _Scalar Scalar;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  typedef DenseIndex Index;
+  typedef Matrix<Scalar,AmbientDimAtCompileTime,1,Options> VectorType;
+
+  /** Default constructor without initialization */
+  inline explicit ParametrizedLine() {}
+  
+  template<int OtherOptions>
+  ParametrizedLine(const ParametrizedLine<Scalar,AmbientDimAtCompileTime,OtherOptions>& other)
+   : m_origin(other.origin()), m_direction(other.direction())
+  {}
+
+  /** Constructs a dynamic-size line with \a _dim the dimension
+    * of the ambient space */
+  inline explicit ParametrizedLine(Index _dim) : m_origin(_dim), m_direction(_dim) {}
+
+  /** Initializes a parametrized line of direction \a direction and origin \a origin.
+    * \warning the vector direction is assumed to be normalized.
+    */
+  ParametrizedLine(const VectorType& origin, const VectorType& direction)
+    : m_origin(origin), m_direction(direction) {}
+
+  template <int OtherOptions>
+  explicit ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane);
+
+  /** Constructs a parametrized line going from \a p0 to \a p1. */
+  static inline ParametrizedLine Through(const VectorType& p0, const VectorType& p1)
+  { return ParametrizedLine(p0, (p1-p0).normalized()); }
+
+  ~ParametrizedLine() {}
+
+  /** \returns the dimension in which the line holds */
+  inline Index dim() const { return m_direction.size(); }
+
+  const VectorType& origin() const { return m_origin; }
+  VectorType& origin() { return m_origin; }
+
+  const VectorType& direction() const { return m_direction; }
+  VectorType& direction() { return m_direction; }
+
+  /** \returns the squared distance of a point \a p to its projection onto the line \c *this.
+    * \sa distance()
+    */
+  RealScalar squaredDistance(const VectorType& p) const
+  {
+    VectorType diff = p - origin();
+    return (diff - direction().dot(diff) * direction()).squaredNorm();
+  }
+  /** \returns the distance of a point \a p to its projection onto the line \c *this.
+    * \sa squaredDistance()
+    */
+  RealScalar distance(const VectorType& p) const { return internal::sqrt(squaredDistance(p)); }
+
+  /** \returns the projection of a point \a p onto the line \c *this. */
+  VectorType projection(const VectorType& p) const
+  { return origin() + direction().dot(p-origin()) * direction(); }
+
+  VectorType pointAt( Scalar t ) const;
+  
+  template <int OtherOptions>
+  Scalar intersectionParameter(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const;
+ 
+  template <int OtherOptions>
+  Scalar intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const;
+  
+  template <int OtherOptions>
+  VectorType intersectionPoint(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const;
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<ParametrizedLine,
+           ParametrizedLine<NewScalarType,AmbientDimAtCompileTime,Options> >::type cast() const
+  {
+    return typename internal::cast_return_type<ParametrizedLine,
+                    ParametrizedLine<NewScalarType,AmbientDimAtCompileTime,Options> >::type(*this);
+  }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType,int OtherOptions>
+  inline explicit ParametrizedLine(const ParametrizedLine<OtherScalarType,AmbientDimAtCompileTime,OtherOptions>& other)
+  {
+    m_origin = other.origin().template cast<Scalar>();
+    m_direction = other.direction().template cast<Scalar>();
+  }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const ParametrizedLine& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+  { return m_origin.isApprox(other.m_origin, prec) && m_direction.isApprox(other.m_direction, prec); }
+
+protected:
+
+  VectorType m_origin, m_direction;
+};
+
+/** Constructs a parametrized line from a 2D hyperplane
+  *
+  * \warning the ambient space must have dimension 2 such that the hyperplane actually describes a line
+  */
+template <typename _Scalar, int _AmbientDim, int _Options>
+template <int OtherOptions>
+inline ParametrizedLine<_Scalar, _AmbientDim,_Options>::ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim,OtherOptions>& hyperplane)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2)
+  direction() = hyperplane.normal().unitOrthogonal();
+  origin() = -hyperplane.normal()*hyperplane.offset();
+}
+
+/** \returns the point at \a t along this line
+  */
+template <typename _Scalar, int _AmbientDim, int _Options>
+inline typename ParametrizedLine<_Scalar, _AmbientDim,_Options>::VectorType
+ParametrizedLine<_Scalar, _AmbientDim,_Options>::pointAt( _Scalar t ) const
+{
+  return origin() + (direction()*t); 
+}
+
+/** \returns the parameter value of the intersection between \c *this and the given \a hyperplane
+  */
+template <typename _Scalar, int _AmbientDim, int _Options>
+template <int OtherOptions>
+inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersectionParameter(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const
+{
+  return -(hyperplane.offset()+hyperplane.normal().dot(origin()))
+          / hyperplane.normal().dot(direction());
+}
+
+
+/** \deprecated use intersectionParameter()
+  * \returns the parameter value of the intersection between \c *this and the given \a hyperplane
+  */
+template <typename _Scalar, int _AmbientDim, int _Options>
+template <int OtherOptions>
+inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const
+{
+  return intersectionParameter(hyperplane);
+}
+
+/** \returns the point of the intersection between \c *this and the given hyperplane
+  */
+template <typename _Scalar, int _AmbientDim, int _Options>
+template <int OtherOptions>
+inline typename ParametrizedLine<_Scalar, _AmbientDim,_Options>::VectorType
+ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersectionPoint(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const
+{
+  return pointAt(intersectionParameter(hyperplane));
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_PARAMETRIZEDLINE_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Geometry/Quaternion.h b/resources/3rdParty/eigen/Eigen/src/Geometry/Quaternion.h
new file mode 100644
index 000000000..8792e2da2
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Geometry/Quaternion.h
@@ -0,0 +1,778 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Mathieu Gautier <mathieu.gautier@cea.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_QUATERNION_H
+#define EIGEN_QUATERNION_H
+namespace Eigen { 
+
+
+/***************************************************************************
+* Definition of QuaternionBase<Derived>
+* The implementation is at the end of the file
+***************************************************************************/
+
+namespace internal {
+template<typename Other,
+         int OtherRows=Other::RowsAtCompileTime,
+         int OtherCols=Other::ColsAtCompileTime>
+struct quaternionbase_assign_impl;
+}
+
+/** \geometry_module \ingroup Geometry_Module
+  * \class QuaternionBase
+  * \brief Base class for quaternion expressions
+  * \tparam Derived derived type (CRTP)
+  * \sa class Quaternion
+  */
+template<class Derived>
+class QuaternionBase : public RotationBase<Derived, 3>
+{
+  typedef RotationBase<Derived, 3> Base;
+public:
+  using Base::operator*;
+  using Base::derived;
+
+  typedef typename internal::traits<Derived>::Scalar Scalar;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  typedef typename internal::traits<Derived>::Coefficients Coefficients;
+  enum {
+    Flags = Eigen::internal::traits<Derived>::Flags
+  };
+
+ // typedef typename Matrix<Scalar,4,1> Coefficients;
+  /** the type of a 3D vector */
+  typedef Matrix<Scalar,3,1> Vector3;
+  /** the equivalent rotation matrix type */
+  typedef Matrix<Scalar,3,3> Matrix3;
+  /** the equivalent angle-axis type */
+  typedef AngleAxis<Scalar> AngleAxisType;
+
+
+
+  /** \returns the \c x coefficient */
+  inline Scalar x() const { return this->derived().coeffs().coeff(0); }
+  /** \returns the \c y coefficient */
+  inline Scalar y() const { return this->derived().coeffs().coeff(1); }
+  /** \returns the \c z coefficient */
+  inline Scalar z() const { return this->derived().coeffs().coeff(2); }
+  /** \returns the \c w coefficient */
+  inline Scalar w() const { return this->derived().coeffs().coeff(3); }
+
+  /** \returns a reference to the \c x coefficient */
+  inline Scalar& x() { return this->derived().coeffs().coeffRef(0); }
+  /** \returns a reference to the \c y coefficient */
+  inline Scalar& y() { return this->derived().coeffs().coeffRef(1); }
+  /** \returns a reference to the \c z coefficient */
+  inline Scalar& z() { return this->derived().coeffs().coeffRef(2); }
+  /** \returns a reference to the \c w coefficient */
+  inline Scalar& w() { return this->derived().coeffs().coeffRef(3); }
+
+  /** \returns a read-only vector expression of the imaginary part (x,y,z) */
+  inline const VectorBlock<const Coefficients,3> vec() const { return coeffs().template head<3>(); }
+
+  /** \returns a vector expression of the imaginary part (x,y,z) */
+  inline VectorBlock<Coefficients,3> vec() { return coeffs().template head<3>(); }
+
+  /** \returns a read-only vector expression of the coefficients (x,y,z,w) */
+  inline const typename internal::traits<Derived>::Coefficients& coeffs() const { return derived().coeffs(); }
+
+  /** \returns a vector expression of the coefficients (x,y,z,w) */
+  inline typename internal::traits<Derived>::Coefficients& coeffs() { return derived().coeffs(); }
+
+  EIGEN_STRONG_INLINE QuaternionBase<Derived>& operator=(const QuaternionBase<Derived>& other);
+  template<class OtherDerived> EIGEN_STRONG_INLINE Derived& operator=(const QuaternionBase<OtherDerived>& other);
+
+// disabled this copy operator as it is giving very strange compilation errors when compiling
+// test_stdvector with GCC 4.4.2. This looks like a GCC bug though, so feel free to re-enable it if it's
+// useful; however notice that we already have the templated operator= above and e.g. in MatrixBase
+// we didn't have to add, in addition to templated operator=, such a non-templated copy operator.
+//  Derived& operator=(const QuaternionBase& other)
+//  { return operator=<Derived>(other); }
+
+  Derived& operator=(const AngleAxisType& aa);
+  template<class OtherDerived> Derived& operator=(const MatrixBase<OtherDerived>& m);
+
+  /** \returns a quaternion representing an identity rotation
+    * \sa MatrixBase::Identity()
+    */
+  static inline Quaternion<Scalar> Identity() { return Quaternion<Scalar>(1, 0, 0, 0); }
+
+  /** \sa QuaternionBase::Identity(), MatrixBase::setIdentity()
+    */
+  inline QuaternionBase& setIdentity() { coeffs() << 0, 0, 0, 1; return *this; }
+
+  /** \returns the squared norm of the quaternion's coefficients
+    * \sa QuaternionBase::norm(), MatrixBase::squaredNorm()
+    */
+  inline Scalar squaredNorm() const { return coeffs().squaredNorm(); }
+
+  /** \returns the norm of the quaternion's coefficients
+    * \sa QuaternionBase::squaredNorm(), MatrixBase::norm()
+    */
+  inline Scalar norm() const { return coeffs().norm(); }
+
+  /** Normalizes the quaternion \c *this
+    * \sa normalized(), MatrixBase::normalize() */
+  inline void normalize() { coeffs().normalize(); }
+  /** \returns a normalized copy of \c *this
+    * \sa normalize(), MatrixBase::normalized() */
+  inline Quaternion<Scalar> normalized() const { return Quaternion<Scalar>(coeffs().normalized()); }
+
+    /** \returns the dot product of \c *this and \a other
+    * Geometrically speaking, the dot product of two unit quaternions
+    * corresponds to the cosine of half the angle between the two rotations.
+    * \sa angularDistance()
+    */
+  template<class OtherDerived> inline Scalar dot(const QuaternionBase<OtherDerived>& other) const { return coeffs().dot(other.coeffs()); }
+
+  template<class OtherDerived> Scalar angularDistance(const QuaternionBase<OtherDerived>& other) const;
+
+  /** \returns an equivalent 3x3 rotation matrix */
+  Matrix3 toRotationMatrix() const;
+
+  /** \returns the quaternion which transform \a a into \a b through a rotation */
+  template<typename Derived1, typename Derived2>
+  Derived& setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b);
+
+  template<class OtherDerived> EIGEN_STRONG_INLINE Quaternion<Scalar> operator* (const QuaternionBase<OtherDerived>& q) const;
+  template<class OtherDerived> EIGEN_STRONG_INLINE Derived& operator*= (const QuaternionBase<OtherDerived>& q);
+
+  /** \returns the quaternion describing the inverse rotation */
+  Quaternion<Scalar> inverse() const;
+
+  /** \returns the conjugated quaternion */
+  Quaternion<Scalar> conjugate() const;
+
+  /** \returns an interpolation for a constant motion between \a other and \c *this
+    * \a t in [0;1]
+    * see http://en.wikipedia.org/wiki/Slerp
+    */
+  template<class OtherDerived> Quaternion<Scalar> slerp(Scalar t, const QuaternionBase<OtherDerived>& other) const;
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  template<class OtherDerived>
+  bool isApprox(const QuaternionBase<OtherDerived>& other, RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
+  { return coeffs().isApprox(other.coeffs(), prec); }
+
+	/** return the result vector of \a v through the rotation*/
+  EIGEN_STRONG_INLINE Vector3 _transformVector(Vector3 v) const;
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<Derived,Quaternion<NewScalarType> >::type cast() const
+  {
+    return typename internal::cast_return_type<Derived,Quaternion<NewScalarType> >::type(derived());
+  }
+
+#ifdef EIGEN_QUATERNIONBASE_PLUGIN
+# include EIGEN_QUATERNIONBASE_PLUGIN
+#endif
+};
+
+/***************************************************************************
+* Definition/implementation of Quaternion<Scalar>
+***************************************************************************/
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Quaternion
+  *
+  * \brief The quaternion class used to represent 3D orientations and rotations
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients
+  *
+  * This class represents a quaternion \f$ w+xi+yj+zk \f$ that is a convenient representation of
+  * orientations and rotations of objects in three dimensions. Compared to other representations
+  * like Euler angles or 3x3 matrices, quatertions offer the following advantages:
+  * \li \b compact storage (4 scalars)
+  * \li \b efficient to compose (28 flops),
+  * \li \b stable spherical interpolation
+  *
+  * The following two typedefs are provided for convenience:
+  * \li \c Quaternionf for \c float
+  * \li \c Quaterniond for \c double
+  *
+  * \sa  class AngleAxis, class Transform
+  */
+
+namespace internal {
+template<typename _Scalar,int _Options>
+struct traits<Quaternion<_Scalar,_Options> >
+{
+  typedef Quaternion<_Scalar,_Options> PlainObject;
+  typedef _Scalar Scalar;
+  typedef Matrix<_Scalar,4,1,_Options> Coefficients;
+  enum{
+    IsAligned = internal::traits<Coefficients>::Flags & AlignedBit,
+    Flags = IsAligned ? (AlignedBit | LvalueBit) : LvalueBit
+  };
+};
+}
+
+template<typename _Scalar, int _Options>
+class Quaternion : public QuaternionBase<Quaternion<_Scalar,_Options> >
+{
+  typedef QuaternionBase<Quaternion<_Scalar,_Options> > Base;
+  enum { IsAligned = internal::traits<Quaternion>::IsAligned };
+
+public:
+  typedef _Scalar Scalar;
+
+  EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Quaternion)
+  using Base::operator*=;
+
+  typedef typename internal::traits<Quaternion>::Coefficients Coefficients;
+  typedef typename Base::AngleAxisType AngleAxisType;
+
+  /** Default constructor leaving the quaternion uninitialized. */
+  inline Quaternion() {}
+
+  /** Constructs and initializes the quaternion \f$ w+xi+yj+zk \f$ from
+    * its four coefficients \a w, \a x, \a y and \a z.
+    *
+    * \warning Note the order of the arguments: the real \a w coefficient first,
+    * while internally the coefficients are stored in the following order:
+    * [\c x, \c y, \c z, \c w]
+    */
+  inline Quaternion(Scalar w, Scalar x, Scalar y, Scalar z) : m_coeffs(x, y, z, w){}
+
+  /** Constructs and initialize a quaternion from the array data */
+  inline Quaternion(const Scalar* data) : m_coeffs(data) {}
+
+  /** Copy constructor */
+  template<class Derived> EIGEN_STRONG_INLINE Quaternion(const QuaternionBase<Derived>& other) { this->Base::operator=(other); }
+
+  /** Constructs and initializes a quaternion from the angle-axis \a aa */
+  explicit inline Quaternion(const AngleAxisType& aa) { *this = aa; }
+
+  /** Constructs and initializes a quaternion from either:
+    *  - a rotation matrix expression,
+    *  - a 4D vector expression representing quaternion coefficients.
+    */
+  template<typename Derived>
+  explicit inline Quaternion(const MatrixBase<Derived>& other) { *this = other; }
+
+  /** Explicit copy constructor with scalar conversion */
+  template<typename OtherScalar, int OtherOptions>
+  explicit inline Quaternion(const Quaternion<OtherScalar, OtherOptions>& other)
+  { m_coeffs = other.coeffs().template cast<Scalar>(); }
+
+  template<typename Derived1, typename Derived2>
+  static Quaternion FromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b);
+
+  inline Coefficients& coeffs() { return m_coeffs;}
+  inline const Coefficients& coeffs() const { return m_coeffs;}
+
+  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(IsAligned)
+
+protected:
+  Coefficients m_coeffs;
+  
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    static EIGEN_STRONG_INLINE void _check_template_params()
+    {
+      EIGEN_STATIC_ASSERT( (_Options & DontAlign) == _Options,
+        INVALID_MATRIX_TEMPLATE_PARAMETERS)
+    }
+#endif
+};
+
+/** \ingroup Geometry_Module
+  * single precision quaternion type */
+typedef Quaternion<float> Quaternionf;
+/** \ingroup Geometry_Module
+  * double precision quaternion type */
+typedef Quaternion<double> Quaterniond;
+
+/***************************************************************************
+* Specialization of Map<Quaternion<Scalar>>
+***************************************************************************/
+
+namespace internal {
+  template<typename _Scalar, int _Options>
+  struct traits<Map<Quaternion<_Scalar>, _Options> >:
+  traits<Quaternion<_Scalar, _Options> >
+  {
+    typedef _Scalar Scalar;
+    typedef Map<Matrix<_Scalar,4,1>, _Options> Coefficients;
+
+    typedef traits<Quaternion<_Scalar, _Options> > TraitsBase;
+    enum {
+      IsAligned = TraitsBase::IsAligned,
+
+      Flags = TraitsBase::Flags
+    };
+  };
+}
+
+namespace internal {
+  template<typename _Scalar, int _Options>
+  struct traits<Map<const Quaternion<_Scalar>, _Options> >:
+  traits<Quaternion<_Scalar> >
+  {
+    typedef _Scalar Scalar;
+    typedef Map<const Matrix<_Scalar,4,1>, _Options> Coefficients;
+
+    typedef traits<Quaternion<_Scalar, _Options> > TraitsBase;
+    enum {
+      IsAligned = TraitsBase::IsAligned,
+      Flags = TraitsBase::Flags & ~LvalueBit
+    };
+  };
+}
+
+/** \brief Quaternion expression mapping a constant memory buffer
+  *
+  * \param _Scalar the type of the Quaternion coefficients
+  * \param _Options see class Map
+  *
+  * This is a specialization of class Map for Quaternion. This class allows to view
+  * a 4 scalar memory buffer as an Eigen's Quaternion object.
+  *
+  * \sa class Map, class Quaternion, class QuaternionBase
+  */
+template<typename _Scalar, int _Options>
+class Map<const Quaternion<_Scalar>, _Options >
+  : public QuaternionBase<Map<const Quaternion<_Scalar>, _Options> >
+{
+    typedef QuaternionBase<Map<const Quaternion<_Scalar>, _Options> > Base;
+
+  public:
+    typedef _Scalar Scalar;
+    typedef typename internal::traits<Map>::Coefficients Coefficients;
+    EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map)
+    using Base::operator*=;
+
+    /** Constructs a Mapped Quaternion object from the pointer \a coeffs
+      *
+      * The pointer \a coeffs must reference the four coeffecients of Quaternion in the following order:
+      * \code *coeffs == {x, y, z, w} \endcode
+      *
+      * If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */
+    EIGEN_STRONG_INLINE Map(const Scalar* coeffs) : m_coeffs(coeffs) {}
+
+    inline const Coefficients& coeffs() const { return m_coeffs;}
+
+  protected:
+    const Coefficients m_coeffs;
+};
+
+/** \brief Expression of a quaternion from a memory buffer
+  *
+  * \param _Scalar the type of the Quaternion coefficients
+  * \param _Options see class Map
+  *
+  * This is a specialization of class Map for Quaternion. This class allows to view
+  * a 4 scalar memory buffer as an Eigen's  Quaternion object.
+  *
+  * \sa class Map, class Quaternion, class QuaternionBase
+  */
+template<typename _Scalar, int _Options>
+class Map<Quaternion<_Scalar>, _Options >
+  : public QuaternionBase<Map<Quaternion<_Scalar>, _Options> >
+{
+    typedef QuaternionBase<Map<Quaternion<_Scalar>, _Options> > Base;
+
+  public:
+    typedef _Scalar Scalar;
+    typedef typename internal::traits<Map>::Coefficients Coefficients;
+    EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map)
+    using Base::operator*=;
+
+    /** Constructs a Mapped Quaternion object from the pointer \a coeffs
+      *
+      * The pointer \a coeffs must reference the four coeffecients of Quaternion in the following order:
+      * \code *coeffs == {x, y, z, w} \endcode
+      *
+      * If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */
+    EIGEN_STRONG_INLINE Map(Scalar* coeffs) : m_coeffs(coeffs) {}
+
+    inline Coefficients& coeffs() { return m_coeffs; }
+    inline const Coefficients& coeffs() const { return m_coeffs; }
+
+  protected:
+    Coefficients m_coeffs;
+};
+
+/** \ingroup Geometry_Module
+  * Map an unaligned array of single precision scalar as a quaternion */
+typedef Map<Quaternion<float>, 0>         QuaternionMapf;
+/** \ingroup Geometry_Module
+  * Map an unaligned array of double precision scalar as a quaternion */
+typedef Map<Quaternion<double>, 0>        QuaternionMapd;
+/** \ingroup Geometry_Module
+  * Map a 16-bits aligned array of double precision scalars as a quaternion */
+typedef Map<Quaternion<float>, Aligned>   QuaternionMapAlignedf;
+/** \ingroup Geometry_Module
+  * Map a 16-bits aligned array of double precision scalars as a quaternion */
+typedef Map<Quaternion<double>, Aligned>  QuaternionMapAlignedd;
+
+/***************************************************************************
+* Implementation of QuaternionBase methods
+***************************************************************************/
+
+// Generic Quaternion * Quaternion product
+// This product can be specialized for a given architecture via the Arch template argument.
+namespace internal {
+template<int Arch, class Derived1, class Derived2, typename Scalar, int _Options> struct quat_product
+{
+  static EIGEN_STRONG_INLINE Quaternion<Scalar> run(const QuaternionBase<Derived1>& a, const QuaternionBase<Derived2>& b){
+    return Quaternion<Scalar>
+    (
+      a.w() * b.w() - a.x() * b.x() - a.y() * b.y() - a.z() * b.z(),
+      a.w() * b.x() + a.x() * b.w() + a.y() * b.z() - a.z() * b.y(),
+      a.w() * b.y() + a.y() * b.w() + a.z() * b.x() - a.x() * b.z(),
+      a.w() * b.z() + a.z() * b.w() + a.x() * b.y() - a.y() * b.x()
+    );
+  }
+};
+}
+
+/** \returns the concatenation of two rotations as a quaternion-quaternion product */
+template <class Derived>
+template <class OtherDerived>
+EIGEN_STRONG_INLINE Quaternion<typename internal::traits<Derived>::Scalar>
+QuaternionBase<Derived>::operator* (const QuaternionBase<OtherDerived>& other) const
+{
+  EIGEN_STATIC_ASSERT((internal::is_same<typename Derived::Scalar, typename OtherDerived::Scalar>::value),
+   YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+  return internal::quat_product<Architecture::Target, Derived, OtherDerived,
+                         typename internal::traits<Derived>::Scalar,
+                         internal::traits<Derived>::IsAligned && internal::traits<OtherDerived>::IsAligned>::run(*this, other);
+}
+
+/** \sa operator*(Quaternion) */
+template <class Derived>
+template <class OtherDerived>
+EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator*= (const QuaternionBase<OtherDerived>& other)
+{
+  derived() = derived() * other.derived();
+  return derived();
+}
+
+/** Rotation of a vector by a quaternion.
+  * \remarks If the quaternion is used to rotate several points (>1)
+  * then it is much more efficient to first convert it to a 3x3 Matrix.
+  * Comparison of the operation cost for n transformations:
+  *   - Quaternion2:    30n
+  *   - Via a Matrix3: 24 + 15n
+  */
+template <class Derived>
+EIGEN_STRONG_INLINE typename QuaternionBase<Derived>::Vector3
+QuaternionBase<Derived>::_transformVector(Vector3 v) const
+{
+    // Note that this algorithm comes from the optimization by hand
+    // of the conversion to a Matrix followed by a Matrix/Vector product.
+    // It appears to be much faster than the common algorithm found
+    // in the litterature (30 versus 39 flops). It also requires two
+    // Vector3 as temporaries.
+    Vector3 uv = this->vec().cross(v);
+    uv += uv;
+    return v + this->w() * uv + this->vec().cross(uv);
+}
+
+template<class Derived>
+EIGEN_STRONG_INLINE QuaternionBase<Derived>& QuaternionBase<Derived>::operator=(const QuaternionBase<Derived>& other)
+{
+  coeffs() = other.coeffs();
+  return derived();
+}
+
+template<class Derived>
+template<class OtherDerived>
+EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator=(const QuaternionBase<OtherDerived>& other)
+{
+  coeffs() = other.coeffs();
+  return derived();
+}
+
+/** Set \c *this from an angle-axis \a aa and returns a reference to \c *this
+  */
+template<class Derived>
+EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator=(const AngleAxisType& aa)
+{
+  Scalar ha = Scalar(0.5)*aa.angle(); // Scalar(0.5) to suppress precision loss warnings
+  this->w() = internal::cos(ha);
+  this->vec() = internal::sin(ha) * aa.axis();
+  return derived();
+}
+
+/** Set \c *this from the expression \a xpr:
+  *   - if \a xpr is a 4x1 vector, then \a xpr is assumed to be a quaternion
+  *   - if \a xpr is a 3x3 matrix, then \a xpr is assumed to be rotation matrix
+  *     and \a xpr is converted to a quaternion
+  */
+
+template<class Derived>
+template<class MatrixDerived>
+inline Derived& QuaternionBase<Derived>::operator=(const MatrixBase<MatrixDerived>& xpr)
+{
+  EIGEN_STATIC_ASSERT((internal::is_same<typename Derived::Scalar, typename MatrixDerived::Scalar>::value),
+   YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+  internal::quaternionbase_assign_impl<MatrixDerived>::run(*this, xpr.derived());
+  return derived();
+}
+
+/** Convert the quaternion to a 3x3 rotation matrix. The quaternion is required to
+  * be normalized, otherwise the result is undefined.
+  */
+template<class Derived>
+inline typename QuaternionBase<Derived>::Matrix3
+QuaternionBase<Derived>::toRotationMatrix(void) const
+{
+  // NOTE if inlined, then gcc 4.2 and 4.4 get rid of the temporary (not gcc 4.3 !!)
+  // if not inlined then the cost of the return by value is huge ~ +35%,
+  // however, not inlining this function is an order of magnitude slower, so
+  // it has to be inlined, and so the return by value is not an issue
+  Matrix3 res;
+
+  const Scalar tx  = Scalar(2)*this->x();
+  const Scalar ty  = Scalar(2)*this->y();
+  const Scalar tz  = Scalar(2)*this->z();
+  const Scalar twx = tx*this->w();
+  const Scalar twy = ty*this->w();
+  const Scalar twz = tz*this->w();
+  const Scalar txx = tx*this->x();
+  const Scalar txy = ty*this->x();
+  const Scalar txz = tz*this->x();
+  const Scalar tyy = ty*this->y();
+  const Scalar tyz = tz*this->y();
+  const Scalar tzz = tz*this->z();
+
+  res.coeffRef(0,0) = Scalar(1)-(tyy+tzz);
+  res.coeffRef(0,1) = txy-twz;
+  res.coeffRef(0,2) = txz+twy;
+  res.coeffRef(1,0) = txy+twz;
+  res.coeffRef(1,1) = Scalar(1)-(txx+tzz);
+  res.coeffRef(1,2) = tyz-twx;
+  res.coeffRef(2,0) = txz-twy;
+  res.coeffRef(2,1) = tyz+twx;
+  res.coeffRef(2,2) = Scalar(1)-(txx+tyy);
+
+  return res;
+}
+
+/** Sets \c *this to be a quaternion representing a rotation between
+  * the two arbitrary vectors \a a and \a b. In other words, the built
+  * rotation represent a rotation sending the line of direction \a a
+  * to the line of direction \a b, both lines passing through the origin.
+  *
+  * \returns a reference to \c *this.
+  *
+  * Note that the two input vectors do \b not have to be normalized, and
+  * do not need to have the same norm.
+  */
+template<class Derived>
+template<typename Derived1, typename Derived2>
+inline Derived& QuaternionBase<Derived>::setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b)
+{
+  using std::max;
+  Vector3 v0 = a.normalized();
+  Vector3 v1 = b.normalized();
+  Scalar c = v1.dot(v0);
+
+  // if dot == -1, vectors are nearly opposites
+  // => accuraletly compute the rotation axis by computing the
+  //    intersection of the two planes. This is done by solving:
+  //       x^T v0 = 0
+  //       x^T v1 = 0
+  //    under the constraint:
+  //       ||x|| = 1
+  //    which yields a singular value problem
+  if (c < Scalar(-1)+NumTraits<Scalar>::dummy_precision())
+  {
+    c = max<Scalar>(c,-1);
+    Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose();
+    JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV);
+    Vector3 axis = svd.matrixV().col(2);
+
+    Scalar w2 = (Scalar(1)+c)*Scalar(0.5);
+    this->w() = internal::sqrt(w2);
+    this->vec() = axis * internal::sqrt(Scalar(1) - w2);
+    return derived();
+  }
+  Vector3 axis = v0.cross(v1);
+  Scalar s = internal::sqrt((Scalar(1)+c)*Scalar(2));
+  Scalar invs = Scalar(1)/s;
+  this->vec() = axis * invs;
+  this->w() = s * Scalar(0.5);
+
+  return derived();
+}
+
+
+/** Returns a quaternion representing a rotation between
+  * the two arbitrary vectors \a a and \a b. In other words, the built
+  * rotation represent a rotation sending the line of direction \a a
+  * to the line of direction \a b, both lines passing through the origin.
+  *
+  * \returns resulting quaternion
+  *
+  * Note that the two input vectors do \b not have to be normalized, and
+  * do not need to have the same norm.
+  */
+template<typename Scalar, int Options>
+template<typename Derived1, typename Derived2>
+Quaternion<Scalar,Options> Quaternion<Scalar,Options>::FromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b)
+{
+    Quaternion quat;
+    quat.setFromTwoVectors(a, b);
+    return quat;
+}
+
+
+/** \returns the multiplicative inverse of \c *this
+  * Note that in most cases, i.e., if you simply want the opposite rotation,
+  * and/or the quaternion is normalized, then it is enough to use the conjugate.
+  *
+  * \sa QuaternionBase::conjugate()
+  */
+template <class Derived>
+inline Quaternion<typename internal::traits<Derived>::Scalar> QuaternionBase<Derived>::inverse() const
+{
+  // FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite()  ??
+  Scalar n2 = this->squaredNorm();
+  if (n2 > 0)
+    return Quaternion<Scalar>(conjugate().coeffs() / n2);
+  else
+  {
+    // return an invalid result to flag the error
+    return Quaternion<Scalar>(Coefficients::Zero());
+  }
+}
+
+/** \returns the conjugate of the \c *this which is equal to the multiplicative inverse
+  * if the quaternion is normalized.
+  * The conjugate of a quaternion represents the opposite rotation.
+  *
+  * \sa Quaternion2::inverse()
+  */
+template <class Derived>
+inline Quaternion<typename internal::traits<Derived>::Scalar>
+QuaternionBase<Derived>::conjugate() const
+{
+  return Quaternion<Scalar>(this->w(),-this->x(),-this->y(),-this->z());
+}
+
+/** \returns the angle (in radian) between two rotations
+  * \sa dot()
+  */
+template <class Derived>
+template <class OtherDerived>
+inline typename internal::traits<Derived>::Scalar
+QuaternionBase<Derived>::angularDistance(const QuaternionBase<OtherDerived>& other) const
+{
+  using std::acos;
+  double d = internal::abs(this->dot(other));
+  if (d>=1.0)
+    return Scalar(0);
+  return static_cast<Scalar>(2 * acos(d));
+}
+
+/** \returns the spherical linear interpolation between the two quaternions
+  * \c *this and \a other at the parameter \a t
+  */
+template <class Derived>
+template <class OtherDerived>
+Quaternion<typename internal::traits<Derived>::Scalar>
+QuaternionBase<Derived>::slerp(Scalar t, const QuaternionBase<OtherDerived>& other) const
+{
+  using std::acos;
+  static const Scalar one = Scalar(1) - NumTraits<Scalar>::epsilon();
+  Scalar d = this->dot(other);
+  Scalar absD = internal::abs(d);
+
+  Scalar scale0;
+  Scalar scale1;
+
+  if(absD>=one)
+  {
+    scale0 = Scalar(1) - t;
+    scale1 = t;
+  }
+  else
+  {
+    // theta is the angle between the 2 quaternions
+    Scalar theta = acos(absD);
+    Scalar sinTheta = internal::sin(theta);
+
+    scale0 = internal::sin( ( Scalar(1) - t ) * theta) / sinTheta;
+    scale1 = internal::sin( ( t * theta) ) / sinTheta;
+  }
+  if(d<0) scale1 = -scale1;
+
+  return Quaternion<Scalar>(scale0 * coeffs() + scale1 * other.coeffs());
+}
+
+namespace internal {
+
+// set from a rotation matrix
+template<typename Other>
+struct quaternionbase_assign_impl<Other,3,3>
+{
+  typedef typename Other::Scalar Scalar;
+  typedef DenseIndex Index;
+  template<class Derived> static inline void run(QuaternionBase<Derived>& q, const Other& mat)
+  {
+    // This algorithm comes from  "Quaternion Calculus and Fast Animation",
+    // Ken Shoemake, 1987 SIGGRAPH course notes
+    Scalar t = mat.trace();
+    if (t > Scalar(0))
+    {
+      t = sqrt(t + Scalar(1.0));
+      q.w() = Scalar(0.5)*t;
+      t = Scalar(0.5)/t;
+      q.x() = (mat.coeff(2,1) - mat.coeff(1,2)) * t;
+      q.y() = (mat.coeff(0,2) - mat.coeff(2,0)) * t;
+      q.z() = (mat.coeff(1,0) - mat.coeff(0,1)) * t;
+    }
+    else
+    {
+      DenseIndex i = 0;
+      if (mat.coeff(1,1) > mat.coeff(0,0))
+        i = 1;
+      if (mat.coeff(2,2) > mat.coeff(i,i))
+        i = 2;
+      DenseIndex j = (i+1)%3;
+      DenseIndex k = (j+1)%3;
+
+      t = sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0));
+      q.coeffs().coeffRef(i) = Scalar(0.5) * t;
+      t = Scalar(0.5)/t;
+      q.w() = (mat.coeff(k,j)-mat.coeff(j,k))*t;
+      q.coeffs().coeffRef(j) = (mat.coeff(j,i)+mat.coeff(i,j))*t;
+      q.coeffs().coeffRef(k) = (mat.coeff(k,i)+mat.coeff(i,k))*t;
+    }
+  }
+};
+
+// set from a vector of coefficients assumed to be a quaternion
+template<typename Other>
+struct quaternionbase_assign_impl<Other,4,1>
+{
+  typedef typename Other::Scalar Scalar;
+  template<class Derived> static inline void run(QuaternionBase<Derived>& q, const Other& vec)
+  {
+    q.coeffs() = vec;
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_QUATERNION_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Geometry/Rotation2D.h b/resources/3rdParty/eigen/Eigen/src/Geometry/Rotation2D.h
new file mode 100644
index 000000000..868e2ef31
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Geometry/Rotation2D.h
@@ -0,0 +1,154 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ROTATION2D_H
+#define EIGEN_ROTATION2D_H
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Rotation2D
+  *
+  * \brief Represents a rotation/orientation in a 2 dimensional space.
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients
+  *
+  * This class is equivalent to a single scalar representing a counter clock wise rotation
+  * as a single angle in radian. It provides some additional features such as the automatic
+  * conversion from/to a 2x2 rotation matrix. Moreover this class aims to provide a similar
+  * interface to Quaternion in order to facilitate the writing of generic algorithms
+  * dealing with rotations.
+  *
+  * \sa class Quaternion, class Transform
+  */
+
+namespace internal {
+
+template<typename _Scalar> struct traits<Rotation2D<_Scalar> >
+{
+  typedef _Scalar Scalar;
+};
+} // end namespace internal
+
+template<typename _Scalar>
+class Rotation2D : public RotationBase<Rotation2D<_Scalar>,2>
+{
+  typedef RotationBase<Rotation2D<_Scalar>,2> Base;
+
+public:
+
+  using Base::operator*;
+
+  enum { Dim = 2 };
+  /** the scalar type of the coefficients */
+  typedef _Scalar Scalar;
+  typedef Matrix<Scalar,2,1> Vector2;
+  typedef Matrix<Scalar,2,2> Matrix2;
+
+protected:
+
+  Scalar m_angle;
+
+public:
+
+  /** Construct a 2D counter clock wise rotation from the angle \a a in radian. */
+  inline Rotation2D(Scalar a) : m_angle(a) {}
+
+  /** \returns the rotation angle */
+  inline Scalar angle() const { return m_angle; }
+
+  /** \returns a read-write reference to the rotation angle */
+  inline Scalar& angle() { return m_angle; }
+
+  /** \returns the inverse rotation */
+  inline Rotation2D inverse() const { return -m_angle; }
+
+  /** Concatenates two rotations */
+  inline Rotation2D operator*(const Rotation2D& other) const
+  { return m_angle + other.m_angle; }
+
+  /** Concatenates two rotations */
+  inline Rotation2D& operator*=(const Rotation2D& other)
+  { m_angle += other.m_angle; return *this; }
+
+  /** Applies the rotation to a 2D vector */
+  Vector2 operator* (const Vector2& vec) const
+  { return toRotationMatrix() * vec; }
+
+  template<typename Derived>
+  Rotation2D& fromRotationMatrix(const MatrixBase<Derived>& m);
+  Matrix2 toRotationMatrix(void) const;
+
+  /** \returns the spherical interpolation between \c *this and \a other using
+    * parameter \a t. It is in fact equivalent to a linear interpolation.
+    */
+  inline Rotation2D slerp(Scalar t, const Rotation2D& other) const
+  { return m_angle * (1-t) + other.angle() * t; }
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type cast() const
+  { return typename internal::cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type(*this); }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit Rotation2D(const Rotation2D<OtherScalarType>& other)
+  {
+    m_angle = Scalar(other.angle());
+  }
+
+  static inline Rotation2D Identity() { return Rotation2D(0); }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const Rotation2D& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+  { return internal::isApprox(m_angle,other.m_angle, prec); }
+};
+
+/** \ingroup Geometry_Module
+  * single precision 2D rotation type */
+typedef Rotation2D<float> Rotation2Df;
+/** \ingroup Geometry_Module
+  * double precision 2D rotation type */
+typedef Rotation2D<double> Rotation2Dd;
+
+/** Set \c *this from a 2x2 rotation matrix \a mat.
+  * In other words, this function extract the rotation angle
+  * from the rotation matrix.
+  */
+template<typename Scalar>
+template<typename Derived>
+Rotation2D<Scalar>& Rotation2D<Scalar>::fromRotationMatrix(const MatrixBase<Derived>& mat)
+{
+  EIGEN_STATIC_ASSERT(Derived::RowsAtCompileTime==2 && Derived::ColsAtCompileTime==2,YOU_MADE_A_PROGRAMMING_MISTAKE)
+  m_angle = internal::atan2(mat.coeff(1,0), mat.coeff(0,0));
+  return *this;
+}
+
+/** Constructs and \returns an equivalent 2x2 rotation matrix.
+  */
+template<typename Scalar>
+typename Rotation2D<Scalar>::Matrix2
+Rotation2D<Scalar>::toRotationMatrix(void) const
+{
+  Scalar sinA = internal::sin(m_angle);
+  Scalar cosA = internal::cos(m_angle);
+  return (Matrix2() << cosA, -sinA, sinA, cosA).finished();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_ROTATION2D_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Geometry/RotationBase.h b/resources/3rdParty/eigen/Eigen/src/Geometry/RotationBase.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Geometry/RotationBase.h
rename to resources/3rdParty/eigen/Eigen/src/Geometry/RotationBase.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Geometry/Scaling.h b/resources/3rdParty/eigen/Eigen/src/Geometry/Scaling.h
new file mode 100644
index 000000000..8edcac31c
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Geometry/Scaling.h
@@ -0,0 +1,166 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SCALING_H
+#define EIGEN_SCALING_H
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Scaling
+  *
+  * \brief Represents a generic uniform scaling transformation
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients.
+  *
+  * This class represent a uniform scaling transformation. It is the return
+  * type of Scaling(Scalar), and most of the time this is the only way it
+  * is used. In particular, this class is not aimed to be used to store a scaling transformation,
+  * but rather to make easier the constructions and updates of Transform objects.
+  *
+  * To represent an axis aligned scaling, use the DiagonalMatrix class.
+  *
+  * \sa Scaling(), class DiagonalMatrix, MatrixBase::asDiagonal(), class Translation, class Transform
+  */
+template<typename _Scalar>
+class UniformScaling
+{
+public:
+  /** the scalar type of the coefficients */
+  typedef _Scalar Scalar;
+
+protected:
+
+  Scalar m_factor;
+
+public:
+
+  /** Default constructor without initialization. */
+  UniformScaling() {}
+  /** Constructs and initialize a uniform scaling transformation */
+  explicit inline UniformScaling(const Scalar& s) : m_factor(s) {}
+
+  inline const Scalar& factor() const { return m_factor; }
+  inline Scalar& factor() { return m_factor; }
+
+  /** Concatenates two uniform scaling */
+  inline UniformScaling operator* (const UniformScaling& other) const
+  { return UniformScaling(m_factor * other.factor()); }
+
+  /** Concatenates a uniform scaling and a translation */
+  template<int Dim>
+  inline Transform<Scalar,Dim,Affine> operator* (const Translation<Scalar,Dim>& t) const;
+
+  /** Concatenates a uniform scaling and an affine transformation */
+  template<int Dim, int Mode, int Options>
+  inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> operator* (const Transform<Scalar,Dim, Mode, Options>& t) const
+  {
+   Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> res = t;
+   res.prescale(factor());
+   return res;
+}
+
+  /** Concatenates a uniform scaling and a linear transformation matrix */
+  // TODO returns an expression
+  template<typename Derived>
+  inline typename internal::plain_matrix_type<Derived>::type operator* (const MatrixBase<Derived>& other) const
+  { return other * m_factor; }
+
+  template<typename Derived,int Dim>
+  inline Matrix<Scalar,Dim,Dim> operator*(const RotationBase<Derived,Dim>& r) const
+  { return r.toRotationMatrix() * m_factor; }
+
+  /** \returns the inverse scaling */
+  inline UniformScaling inverse() const
+  { return UniformScaling(Scalar(1)/m_factor); }
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline UniformScaling<NewScalarType> cast() const
+  { return UniformScaling<NewScalarType>(NewScalarType(m_factor)); }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit UniformScaling(const UniformScaling<OtherScalarType>& other)
+  { m_factor = Scalar(other.factor()); }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const UniformScaling& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+  { return internal::isApprox(m_factor, other.factor(), prec); }
+
+};
+
+/** Concatenates a linear transformation matrix and a uniform scaling */
+// NOTE this operator is defiend in MatrixBase and not as a friend function
+// of UniformScaling to fix an internal crash of Intel's ICC
+template<typename Derived> typename MatrixBase<Derived>::ScalarMultipleReturnType
+MatrixBase<Derived>::operator*(const UniformScaling<Scalar>& s) const
+{ return derived() * s.factor(); }
+
+/** Constructs a uniform scaling from scale factor \a s */
+static inline UniformScaling<float> Scaling(float s) { return UniformScaling<float>(s); }
+/** Constructs a uniform scaling from scale factor \a s */
+static inline UniformScaling<double> Scaling(double s) { return UniformScaling<double>(s); }
+/** Constructs a uniform scaling from scale factor \a s */
+template<typename RealScalar>
+static inline UniformScaling<std::complex<RealScalar> > Scaling(const std::complex<RealScalar>& s)
+{ return UniformScaling<std::complex<RealScalar> >(s); }
+
+/** Constructs a 2D axis aligned scaling */
+template<typename Scalar>
+static inline DiagonalMatrix<Scalar,2> Scaling(Scalar sx, Scalar sy)
+{ return DiagonalMatrix<Scalar,2>(sx, sy); }
+/** Constructs a 3D axis aligned scaling */
+template<typename Scalar>
+static inline DiagonalMatrix<Scalar,3> Scaling(Scalar sx, Scalar sy, Scalar sz)
+{ return DiagonalMatrix<Scalar,3>(sx, sy, sz); }
+
+/** Constructs an axis aligned scaling expression from vector expression \a coeffs
+  * This is an alias for coeffs.asDiagonal()
+  */
+template<typename Derived>
+static inline const DiagonalWrapper<const Derived> Scaling(const MatrixBase<Derived>& coeffs)
+{ return coeffs.asDiagonal(); }
+
+/** \addtogroup Geometry_Module */
+//@{
+/** \deprecated */
+typedef DiagonalMatrix<float, 2> AlignedScaling2f;
+/** \deprecated */
+typedef DiagonalMatrix<double,2> AlignedScaling2d;
+/** \deprecated */
+typedef DiagonalMatrix<float, 3> AlignedScaling3f;
+/** \deprecated */
+typedef DiagonalMatrix<double,3> AlignedScaling3d;
+//@}
+
+template<typename Scalar>
+template<int Dim>
+inline Transform<Scalar,Dim,Affine>
+UniformScaling<Scalar>::operator* (const Translation<Scalar,Dim>& t) const
+{
+  Transform<Scalar,Dim,Affine> res;
+  res.matrix().setZero();
+  res.linear().diagonal().fill(factor());
+  res.translation() = factor() * t.vector();
+  res(Dim,Dim) = Scalar(1);
+  return res;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SCALING_H
diff --git a/resources/3rdParty/eigen/Eigen/src/Geometry/Transform.h b/resources/3rdParty/eigen/Eigen/src/Geometry/Transform.h
new file mode 100644
index 000000000..4c1ef8eaa
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Geometry/Transform.h
@@ -0,0 +1,1440 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_TRANSFORM_H
+#define EIGEN_TRANSFORM_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename Transform>
+struct transform_traits
+{
+  enum
+  {
+    Dim = Transform::Dim,
+    HDim = Transform::HDim,
+    Mode = Transform::Mode,
+    IsProjective = (int(Mode)==int(Projective))
+  };
+};
+
+template< typename TransformType,
+          typename MatrixType,
+          int Case = transform_traits<TransformType>::IsProjective ? 0
+                   : int(MatrixType::RowsAtCompileTime) == int(transform_traits<TransformType>::HDim) ? 1
+                   : 2>
+struct transform_right_product_impl;
+
+template< typename Other,
+          int Mode,
+          int Options,
+          int Dim,
+          int HDim,
+          int OtherRows=Other::RowsAtCompileTime,
+          int OtherCols=Other::ColsAtCompileTime>
+struct transform_left_product_impl;
+
+template< typename Lhs,
+          typename Rhs,
+          bool AnyProjective = 
+            transform_traits<Lhs>::IsProjective ||
+            transform_traits<Rhs>::IsProjective>
+struct transform_transform_product_impl;
+
+template< typename Other,
+          int Mode,
+          int Options,
+          int Dim,
+          int HDim,
+          int OtherRows=Other::RowsAtCompileTime,
+          int OtherCols=Other::ColsAtCompileTime>
+struct transform_construct_from_matrix;
+
+template<typename TransformType> struct transform_take_affine_part;
+
+} // end namespace internal
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Transform
+  *
+  * \brief Represents an homogeneous transformation in a N dimensional space
+  *
+  * \tparam _Scalar the scalar type, i.e., the type of the coefficients
+  * \tparam _Dim the dimension of the space
+  * \tparam _Mode the type of the transformation. Can be:
+  *              - #Affine: the transformation is stored as a (Dim+1)^2 matrix,
+  *                         where the last row is assumed to be [0 ... 0 1].
+  *              - #AffineCompact: the transformation is stored as a (Dim)x(Dim+1) matrix.
+  *              - #Projective: the transformation is stored as a (Dim+1)^2 matrix
+  *                             without any assumption.
+  * \tparam _Options has the same meaning as in class Matrix. It allows to specify DontAlign and/or RowMajor.
+  *                  These Options are passed directly to the underlying matrix type.
+  *
+  * The homography is internally represented and stored by a matrix which
+  * is available through the matrix() method. To understand the behavior of
+  * this class you have to think a Transform object as its internal
+  * matrix representation. The chosen convention is right multiply:
+  *
+  * \code v' = T * v \endcode
+  *
+  * Therefore, an affine transformation matrix M is shaped like this:
+  *
+  * \f$ \left( \begin{array}{cc}
+  * linear & translation\\
+  * 0 ... 0 & 1
+  * \end{array} \right) \f$
+  *
+  * Note that for a projective transformation the last row can be anything,
+  * and then the interpretation of different parts might be sightly different.
+  *
+  * However, unlike a plain matrix, the Transform class provides many features
+  * simplifying both its assembly and usage. In particular, it can be composed
+  * with any other transformations (Transform,Translation,RotationBase,Matrix)
+  * and can be directly used to transform implicit homogeneous vectors. All these
+  * operations are handled via the operator*. For the composition of transformations,
+  * its principle consists to first convert the right/left hand sides of the product
+  * to a compatible (Dim+1)^2 matrix and then perform a pure matrix product.
+  * Of course, internally, operator* tries to perform the minimal number of operations
+  * according to the nature of each terms. Likewise, when applying the transform
+  * to non homogeneous vectors, the latters are automatically promoted to homogeneous
+  * one before doing the matrix product. The convertions to homogeneous representations
+  * are performed as follow:
+  *
+  * \b Translation t (Dim)x(1):
+  * \f$ \left( \begin{array}{cc}
+  * I & t \\
+  * 0\,...\,0 & 1
+  * \end{array} \right) \f$
+  *
+  * \b Rotation R (Dim)x(Dim):
+  * \f$ \left( \begin{array}{cc}
+  * R & 0\\
+  * 0\,...\,0 & 1
+  * \end{array} \right) \f$
+  *
+  * \b Linear \b Matrix L (Dim)x(Dim):
+  * \f$ \left( \begin{array}{cc}
+  * L & 0\\
+  * 0\,...\,0 & 1
+  * \end{array} \right) \f$
+  *
+  * \b Affine \b Matrix A (Dim)x(Dim+1):
+  * \f$ \left( \begin{array}{c}
+  * A\\
+  * 0\,...\,0\,1
+  * \end{array} \right) \f$
+  *
+  * \b Column \b vector v (Dim)x(1):
+  * \f$ \left( \begin{array}{c}
+  * v\\
+  * 1
+  * \end{array} \right) \f$
+  *
+  * \b Set \b of \b column \b vectors V1...Vn (Dim)x(n):
+  * \f$ \left( \begin{array}{ccc}
+  * v_1 & ... & v_n\\
+  * 1 & ... & 1
+  * \end{array} \right) \f$
+  *
+  * The concatenation of a Transform object with any kind of other transformation
+  * always returns a Transform object.
+  *
+  * A little exception to the "as pure matrix product" rule is the case of the
+  * transformation of non homogeneous vectors by an affine transformation. In
+  * that case the last matrix row can be ignored, and the product returns non
+  * homogeneous vectors.
+  *
+  * Since, for instance, a Dim x Dim matrix is interpreted as a linear transformation,
+  * it is not possible to directly transform Dim vectors stored in a Dim x Dim matrix.
+  * The solution is either to use a Dim x Dynamic matrix or explicitly request a
+  * vector transformation by making the vector homogeneous:
+  * \code
+  * m' = T * m.colwise().homogeneous();
+  * \endcode
+  * Note that there is zero overhead.
+  *
+  * Conversion methods from/to Qt's QMatrix and QTransform are available if the
+  * preprocessor token EIGEN_QT_SUPPORT is defined.
+  *
+  * This class can be extended with the help of the plugin mechanism described on the page
+  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_TRANSFORM_PLUGIN.
+  *
+  * \sa class Matrix, class Quaternion
+  */
+template<typename _Scalar, int _Dim, int _Mode, int _Options>
+class Transform
+{
+public:
+  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim==Dynamic ? Dynamic : (_Dim+1)*(_Dim+1))
+  enum {
+    Mode = _Mode,
+    Options = _Options,
+    Dim = _Dim,     ///< space dimension in which the transformation holds
+    HDim = _Dim+1,  ///< size of a respective homogeneous vector
+    Rows = int(Mode)==(AffineCompact) ? Dim : HDim
+  };
+  /** the scalar type of the coefficients */
+  typedef _Scalar Scalar;
+  typedef DenseIndex Index;
+  /** type of the matrix used to represent the transformation */
+  typedef typename internal::make_proper_matrix_type<Scalar,Rows,HDim,Options>::type MatrixType;
+  /** constified MatrixType */
+  typedef const MatrixType ConstMatrixType;
+  /** type of the matrix used to represent the linear part of the transformation */
+  typedef Matrix<Scalar,Dim,Dim,Options> LinearMatrixType;
+  /** type of read/write reference to the linear part of the transformation */
+  typedef Block<MatrixType,Dim,Dim,int(Mode)==(AffineCompact)> LinearPart;
+  /** type of read reference to the linear part of the transformation */
+  typedef const Block<ConstMatrixType,Dim,Dim,int(Mode)==(AffineCompact)> ConstLinearPart;
+  /** type of read/write reference to the affine part of the transformation */
+  typedef typename internal::conditional<int(Mode)==int(AffineCompact),
+                              MatrixType&,
+                              Block<MatrixType,Dim,HDim> >::type AffinePart;
+  /** type of read reference to the affine part of the transformation */
+  typedef typename internal::conditional<int(Mode)==int(AffineCompact),
+                              const MatrixType&,
+                              const Block<const MatrixType,Dim,HDim> >::type ConstAffinePart;
+  /** type of a vector */
+  typedef Matrix<Scalar,Dim,1> VectorType;
+  /** type of a read/write reference to the translation part of the rotation */
+  typedef Block<MatrixType,Dim,1,int(Mode)==(AffineCompact)> TranslationPart;
+  /** type of a read reference to the translation part of the rotation */
+  typedef const Block<ConstMatrixType,Dim,1,int(Mode)==(AffineCompact)> ConstTranslationPart;
+  /** corresponding translation type */
+  typedef Translation<Scalar,Dim> TranslationType;
+  
+  // this intermediate enum is needed to avoid an ICE with gcc 3.4 and 4.0
+  enum { TransformTimeDiagonalMode = ((Mode==int(Isometry))?Affine:int(Mode)) };
+  /** The return type of the product between a diagonal matrix and a transform */
+  typedef Transform<Scalar,Dim,TransformTimeDiagonalMode> TransformTimeDiagonalReturnType;
+
+protected:
+
+  MatrixType m_matrix;
+
+public:
+
+  /** Default constructor without initialization of the meaningful coefficients.
+    * If Mode==Affine, then the last row is set to [0 ... 0 1] */
+  inline Transform()
+  {
+    check_template_params();
+    if (int(Mode)==Affine)
+      makeAffine();
+  }
+
+  inline Transform(const Transform& other)
+  {
+    check_template_params();
+    m_matrix = other.m_matrix;
+  }
+
+  inline explicit Transform(const TranslationType& t)
+  {
+    check_template_params();
+    *this = t;
+  }
+  inline explicit Transform(const UniformScaling<Scalar>& s)
+  {
+    check_template_params();
+    *this = s;
+  }
+  template<typename Derived>
+  inline explicit Transform(const RotationBase<Derived, Dim>& r)
+  {
+    check_template_params();
+    *this = r;
+  }
+
+  inline Transform& operator=(const Transform& other)
+  { m_matrix = other.m_matrix; return *this; }
+
+  typedef internal::transform_take_affine_part<Transform> take_affine_part;
+
+  /** Constructs and initializes a transformation from a Dim^2 or a (Dim+1)^2 matrix. */
+  template<typename OtherDerived>
+  inline explicit Transform(const EigenBase<OtherDerived>& other)
+  {
+    EIGEN_STATIC_ASSERT((internal::is_same<Scalar,typename OtherDerived::Scalar>::value),
+      YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY);
+
+    check_template_params();
+    internal::transform_construct_from_matrix<OtherDerived,Mode,Options,Dim,HDim>::run(this, other.derived());
+  }
+
+  /** Set \c *this from a Dim^2 or (Dim+1)^2 matrix. */
+  template<typename OtherDerived>
+  inline Transform& operator=(const EigenBase<OtherDerived>& other)
+  {
+    EIGEN_STATIC_ASSERT((internal::is_same<Scalar,typename OtherDerived::Scalar>::value),
+      YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY);
+
+    internal::transform_construct_from_matrix<OtherDerived,Mode,Options,Dim,HDim>::run(this, other.derived());
+    return *this;
+  }
+  
+  template<int OtherOptions>
+  inline Transform(const Transform<Scalar,Dim,Mode,OtherOptions>& other)
+  {
+    check_template_params();
+    // only the options change, we can directly copy the matrices
+    m_matrix = other.matrix();
+  }
+
+  template<int OtherMode,int OtherOptions>
+  inline Transform(const Transform<Scalar,Dim,OtherMode,OtherOptions>& other)
+  {
+    check_template_params();
+    // prevent conversions as:
+    // Affine | AffineCompact | Isometry = Projective
+    EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(OtherMode==int(Projective), Mode==int(Projective)),
+                        YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION)
+
+    // prevent conversions as:
+    // Isometry = Affine | AffineCompact
+    EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(OtherMode==int(Affine)||OtherMode==int(AffineCompact), Mode!=int(Isometry)),
+                        YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION)
+
+    enum { ModeIsAffineCompact = Mode == int(AffineCompact),
+           OtherModeIsAffineCompact = OtherMode == int(AffineCompact)
+    };
+
+    if(ModeIsAffineCompact == OtherModeIsAffineCompact)
+    {
+      // We need the block expression because the code is compiled for all
+      // combinations of transformations and will trigger a compile time error
+      // if one tries to assign the matrices directly
+      m_matrix.template block<Dim,Dim+1>(0,0) = other.matrix().template block<Dim,Dim+1>(0,0);
+      makeAffine();
+    }
+    else if(OtherModeIsAffineCompact)
+    {
+      typedef typename Transform<Scalar,Dim,OtherMode,OtherOptions>::MatrixType OtherMatrixType;
+      internal::transform_construct_from_matrix<OtherMatrixType,Mode,Options,Dim,HDim>::run(this, other.matrix());
+    }
+    else
+    {
+      // here we know that Mode == AffineCompact and OtherMode != AffineCompact.
+      // if OtherMode were Projective, the static assert above would already have caught it.
+      // So the only possibility is that OtherMode == Affine
+      linear() = other.linear();
+      translation() = other.translation();
+    }
+  }
+
+  template<typename OtherDerived>
+  Transform(const ReturnByValue<OtherDerived>& other)
+  {
+    check_template_params();
+    other.evalTo(*this);
+  }
+
+  template<typename OtherDerived>
+  Transform& operator=(const ReturnByValue<OtherDerived>& other)
+  {
+    other.evalTo(*this);
+    return *this;
+  }
+
+  #ifdef EIGEN_QT_SUPPORT
+  inline Transform(const QMatrix& other);
+  inline Transform& operator=(const QMatrix& other);
+  inline QMatrix toQMatrix(void) const;
+  inline Transform(const QTransform& other);
+  inline Transform& operator=(const QTransform& other);
+  inline QTransform toQTransform(void) const;
+  #endif
+
+  /** shortcut for m_matrix(row,col);
+    * \sa MatrixBase::operator(Index,Index) const */
+  inline Scalar operator() (Index row, Index col) const { return m_matrix(row,col); }
+  /** shortcut for m_matrix(row,col);
+    * \sa MatrixBase::operator(Index,Index) */
+  inline Scalar& operator() (Index row, Index col) { return m_matrix(row,col); }
+
+  /** \returns a read-only expression of the transformation matrix */
+  inline const MatrixType& matrix() const { return m_matrix; }
+  /** \returns a writable expression of the transformation matrix */
+  inline MatrixType& matrix() { return m_matrix; }
+
+  /** \returns a read-only expression of the linear part of the transformation */
+  inline ConstLinearPart linear() const { return ConstLinearPart(m_matrix,0,0); }
+  /** \returns a writable expression of the linear part of the transformation */
+  inline LinearPart linear() { return LinearPart(m_matrix,0,0); }
+
+  /** \returns a read-only expression of the Dim x HDim affine part of the transformation */
+  inline ConstAffinePart affine() const { return take_affine_part::run(m_matrix); }
+  /** \returns a writable expression of the Dim x HDim affine part of the transformation */
+  inline AffinePart affine() { return take_affine_part::run(m_matrix); }
+
+  /** \returns a read-only expression of the translation vector of the transformation */
+  inline ConstTranslationPart translation() const { return ConstTranslationPart(m_matrix,0,Dim); }
+  /** \returns a writable expression of the translation vector of the transformation */
+  inline TranslationPart translation() { return TranslationPart(m_matrix,0,Dim); }
+
+  /** \returns an expression of the product between the transform \c *this and a matrix expression \a other
+    *
+    * The right hand side \a other might be either:
+    * \li a vector of size Dim,
+    * \li an homogeneous vector of size Dim+1,
+    * \li a set of vectors of size Dim x Dynamic,
+    * \li a set of homogeneous vectors of size Dim+1 x Dynamic,
+    * \li a linear transformation matrix of size Dim x Dim,
+    * \li an affine transformation matrix of size Dim x Dim+1,
+    * \li a transformation matrix of size Dim+1 x Dim+1.
+    */
+  // note: this function is defined here because some compilers cannot find the respective declaration
+  template<typename OtherDerived>
+  EIGEN_STRONG_INLINE const typename internal::transform_right_product_impl<Transform, OtherDerived>::ResultType
+  operator * (const EigenBase<OtherDerived> &other) const
+  { return internal::transform_right_product_impl<Transform, OtherDerived>::run(*this,other.derived()); }
+
+  /** \returns the product expression of a transformation matrix \a a times a transform \a b
+    *
+    * The left hand side \a other might be either:
+    * \li a linear transformation matrix of size Dim x Dim,
+    * \li an affine transformation matrix of size Dim x Dim+1,
+    * \li a general transformation matrix of size Dim+1 x Dim+1.
+    */
+  template<typename OtherDerived> friend
+  inline const typename internal::transform_left_product_impl<OtherDerived,Mode,Options,_Dim,_Dim+1>::ResultType
+    operator * (const EigenBase<OtherDerived> &a, const Transform &b)
+  { return internal::transform_left_product_impl<OtherDerived,Mode,Options,Dim,HDim>::run(a.derived(),b); }
+
+  /** \returns The product expression of a transform \a a times a diagonal matrix \a b
+    *
+    * The rhs diagonal matrix is interpreted as an affine scaling transformation. The
+    * product results in a Transform of the same type (mode) as the lhs only if the lhs 
+    * mode is no isometry. In that case, the returned transform is an affinity.
+    */
+  template<typename DiagonalDerived>
+  inline const TransformTimeDiagonalReturnType
+    operator * (const DiagonalBase<DiagonalDerived> &b) const
+  {
+    TransformTimeDiagonalReturnType res(*this);
+    res.linear() *= b;
+    return res;
+  }
+
+  /** \returns The product expression of a diagonal matrix \a a times a transform \a b
+    *
+    * The lhs diagonal matrix is interpreted as an affine scaling transformation. The
+    * product results in a Transform of the same type (mode) as the lhs only if the lhs 
+    * mode is no isometry. In that case, the returned transform is an affinity.
+    */
+  template<typename DiagonalDerived>
+  friend inline TransformTimeDiagonalReturnType
+    operator * (const DiagonalBase<DiagonalDerived> &a, const Transform &b)
+  {
+    TransformTimeDiagonalReturnType res;
+    res.linear().noalias() = a*b.linear();
+    res.translation().noalias() = a*b.translation();
+    if (Mode!=int(AffineCompact))
+      res.matrix().row(Dim) = b.matrix().row(Dim);
+    return res;
+  }
+
+  template<typename OtherDerived>
+  inline Transform& operator*=(const EigenBase<OtherDerived>& other) { return *this = *this * other; }
+
+  /** Concatenates two transformations */
+  inline const Transform operator * (const Transform& other) const
+  {
+    return internal::transform_transform_product_impl<Transform,Transform>::run(*this,other);
+  }
+  
+  #ifdef __INTEL_COMPILER
+private:
+  // this intermediate structure permits to workaround a bug in ICC 11:
+  //   error: template instantiation resulted in unexpected function type of "Eigen::Transform<double, 3, 32, 0>
+  //             (const Eigen::Transform<double, 3, 2, 0> &) const"
+  //  (the meaning of a name may have changed since the template declaration -- the type of the template is:
+  // "Eigen::internal::transform_transform_product_impl<Eigen::Transform<double, 3, 32, 0>,
+  //     Eigen::Transform<double, 3, Mode, Options>, <expression>>::ResultType (const Eigen::Transform<double, 3, Mode, Options> &) const")
+  // 
+  template<int OtherMode,int OtherOptions> struct icc_11_workaround
+  {
+    typedef internal::transform_transform_product_impl<Transform,Transform<Scalar,Dim,OtherMode,OtherOptions> > ProductType;
+    typedef typename ProductType::ResultType ResultType;
+  };
+  
+public:
+  /** Concatenates two different transformations */
+  template<int OtherMode,int OtherOptions>
+  inline typename icc_11_workaround<OtherMode,OtherOptions>::ResultType
+    operator * (const Transform<Scalar,Dim,OtherMode,OtherOptions>& other) const
+  {
+    typedef typename icc_11_workaround<OtherMode,OtherOptions>::ProductType ProductType;
+    return ProductType::run(*this,other);
+  }
+  #else
+  /** Concatenates two different transformations */
+  template<int OtherMode,int OtherOptions>
+  inline typename internal::transform_transform_product_impl<Transform,Transform<Scalar,Dim,OtherMode,OtherOptions> >::ResultType
+    operator * (const Transform<Scalar,Dim,OtherMode,OtherOptions>& other) const
+  {
+    return internal::transform_transform_product_impl<Transform,Transform<Scalar,Dim,OtherMode,OtherOptions> >::run(*this,other);
+  }
+  #endif
+
+  /** \sa MatrixBase::setIdentity() */
+  void setIdentity() { m_matrix.setIdentity(); }
+
+  /**
+   * \brief Returns an identity transformation.
+   * \todo In the future this function should be returning a Transform expression.
+   */
+  static const Transform Identity()
+  {
+    return Transform(MatrixType::Identity());
+  }
+
+  template<typename OtherDerived>
+  inline Transform& scale(const MatrixBase<OtherDerived> &other);
+
+  template<typename OtherDerived>
+  inline Transform& prescale(const MatrixBase<OtherDerived> &other);
+
+  inline Transform& scale(Scalar s);
+  inline Transform& prescale(Scalar s);
+
+  template<typename OtherDerived>
+  inline Transform& translate(const MatrixBase<OtherDerived> &other);
+
+  template<typename OtherDerived>
+  inline Transform& pretranslate(const MatrixBase<OtherDerived> &other);
+
+  template<typename RotationType>
+  inline Transform& rotate(const RotationType& rotation);
+
+  template<typename RotationType>
+  inline Transform& prerotate(const RotationType& rotation);
+
+  Transform& shear(Scalar sx, Scalar sy);
+  Transform& preshear(Scalar sx, Scalar sy);
+
+  inline Transform& operator=(const TranslationType& t);
+  inline Transform& operator*=(const TranslationType& t) { return translate(t.vector()); }
+  inline Transform operator*(const TranslationType& t) const;
+
+  inline Transform& operator=(const UniformScaling<Scalar>& t);
+  inline Transform& operator*=(const UniformScaling<Scalar>& s) { return scale(s.factor()); }
+  inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Isometry)> operator*(const UniformScaling<Scalar>& s) const
+  {
+    Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Isometry),Options> res = *this;
+    res.scale(s.factor());
+    return res;
+  }
+
+  inline Transform& operator*=(const DiagonalMatrix<Scalar,Dim>& s) { linear() *= s; return *this; }
+
+  template<typename Derived>
+  inline Transform& operator=(const RotationBase<Derived,Dim>& r);
+  template<typename Derived>
+  inline Transform& operator*=(const RotationBase<Derived,Dim>& r) { return rotate(r.toRotationMatrix()); }
+  template<typename Derived>
+  inline Transform operator*(const RotationBase<Derived,Dim>& r) const;
+
+  const LinearMatrixType rotation() const;
+  template<typename RotationMatrixType, typename ScalingMatrixType>
+  void computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const;
+  template<typename ScalingMatrixType, typename RotationMatrixType>
+  void computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const;
+
+  template<typename PositionDerived, typename OrientationType, typename ScaleDerived>
+  Transform& fromPositionOrientationScale(const MatrixBase<PositionDerived> &position,
+    const OrientationType& orientation, const MatrixBase<ScaleDerived> &scale);
+
+  inline Transform inverse(TransformTraits traits = (TransformTraits)Mode) const;
+
+  /** \returns a const pointer to the column major internal matrix */
+  const Scalar* data() const { return m_matrix.data(); }
+  /** \returns a non-const pointer to the column major internal matrix */
+  Scalar* data() { return m_matrix.data(); }
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<Transform,Transform<NewScalarType,Dim,Mode,Options> >::type cast() const
+  { return typename internal::cast_return_type<Transform,Transform<NewScalarType,Dim,Mode,Options> >::type(*this); }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit Transform(const Transform<OtherScalarType,Dim,Mode,Options>& other)
+  {
+    check_template_params();
+    m_matrix = other.matrix().template cast<Scalar>();
+  }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const Transform& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+  { return m_matrix.isApprox(other.m_matrix, prec); }
+
+  /** Sets the last row to [0 ... 0 1]
+    */
+  void makeAffine()
+  {
+    if(int(Mode)!=int(AffineCompact))
+    {
+      matrix().template block<1,Dim>(Dim,0).setZero();
+      matrix().coeffRef(Dim,Dim) = Scalar(1);
+    }
+  }
+
+  /** \internal
+    * \returns the Dim x Dim linear part if the transformation is affine,
+    *          and the HDim x Dim part for projective transformations.
+    */
+  inline Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,Dim> linearExt()
+  { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,Dim>(0,0); }
+  /** \internal
+    * \returns the Dim x Dim linear part if the transformation is affine,
+    *          and the HDim x Dim part for projective transformations.
+    */
+  inline const Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,Dim> linearExt() const
+  { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,Dim>(0,0); }
+
+  /** \internal
+    * \returns the translation part if the transformation is affine,
+    *          and the last column for projective transformations.
+    */
+  inline Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,1> translationExt()
+  { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,1>(0,Dim); }
+  /** \internal
+    * \returns the translation part if the transformation is affine,
+    *          and the last column for projective transformations.
+    */
+  inline const Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,1> translationExt() const
+  { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,1>(0,Dim); }
+
+
+  #ifdef EIGEN_TRANSFORM_PLUGIN
+  #include EIGEN_TRANSFORM_PLUGIN
+  #endif
+  
+protected:
+  #ifndef EIGEN_PARSED_BY_DOXYGEN
+    static EIGEN_STRONG_INLINE void check_template_params()
+    {
+      EIGEN_STATIC_ASSERT((Options & (DontAlign|RowMajor)) == Options, INVALID_MATRIX_TEMPLATE_PARAMETERS)
+    }
+  #endif
+
+};
+
+/** \ingroup Geometry_Module */
+typedef Transform<float,2,Isometry> Isometry2f;
+/** \ingroup Geometry_Module */
+typedef Transform<float,3,Isometry> Isometry3f;
+/** \ingroup Geometry_Module */
+typedef Transform<double,2,Isometry> Isometry2d;
+/** \ingroup Geometry_Module */
+typedef Transform<double,3,Isometry> Isometry3d;
+
+/** \ingroup Geometry_Module */
+typedef Transform<float,2,Affine> Affine2f;
+/** \ingroup Geometry_Module */
+typedef Transform<float,3,Affine> Affine3f;
+/** \ingroup Geometry_Module */
+typedef Transform<double,2,Affine> Affine2d;
+/** \ingroup Geometry_Module */
+typedef Transform<double,3,Affine> Affine3d;
+
+/** \ingroup Geometry_Module */
+typedef Transform<float,2,AffineCompact> AffineCompact2f;
+/** \ingroup Geometry_Module */
+typedef Transform<float,3,AffineCompact> AffineCompact3f;
+/** \ingroup Geometry_Module */
+typedef Transform<double,2,AffineCompact> AffineCompact2d;
+/** \ingroup Geometry_Module */
+typedef Transform<double,3,AffineCompact> AffineCompact3d;
+
+/** \ingroup Geometry_Module */
+typedef Transform<float,2,Projective> Projective2f;
+/** \ingroup Geometry_Module */
+typedef Transform<float,3,Projective> Projective3f;
+/** \ingroup Geometry_Module */
+typedef Transform<double,2,Projective> Projective2d;
+/** \ingroup Geometry_Module */
+typedef Transform<double,3,Projective> Projective3d;
+
+/**************************
+*** Optional QT support ***
+**************************/
+
+#ifdef EIGEN_QT_SUPPORT
+/** Initializes \c *this from a QMatrix assuming the dimension is 2.
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim, int Mode,int Options>
+Transform<Scalar,Dim,Mode,Options>::Transform(const QMatrix& other)
+{
+  check_template_params();
+  *this = other;
+}
+
+/** Set \c *this from a QMatrix assuming the dimension is 2.
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim, int Mode,int Options>
+Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const QMatrix& other)
+{
+  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  m_matrix << other.m11(), other.m21(), other.dx(),
+              other.m12(), other.m22(), other.dy(),
+              0, 0, 1;
+  return *this;
+}
+
+/** \returns a QMatrix from \c *this assuming the dimension is 2.
+  *
+  * \warning this conversion might loss data if \c *this is not affine
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+QMatrix Transform<Scalar,Dim,Mode,Options>::toQMatrix(void) const
+{
+  check_template_params();
+  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  return QMatrix(m_matrix.coeff(0,0), m_matrix.coeff(1,0),
+                 m_matrix.coeff(0,1), m_matrix.coeff(1,1),
+                 m_matrix.coeff(0,2), m_matrix.coeff(1,2));
+}
+
+/** Initializes \c *this from a QTransform assuming the dimension is 2.
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim, int Mode,int Options>
+Transform<Scalar,Dim,Mode,Options>::Transform(const QTransform& other)
+{
+  check_template_params();
+  *this = other;
+}
+
+/** Set \c *this from a QTransform assuming the dimension is 2.
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const QTransform& other)
+{
+  check_template_params();
+  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  if (Mode == int(AffineCompact))
+    m_matrix << other.m11(), other.m21(), other.dx(),
+                other.m12(), other.m22(), other.dy();
+  else
+    m_matrix << other.m11(), other.m21(), other.dx(),
+                other.m12(), other.m22(), other.dy(),
+                other.m13(), other.m23(), other.m33();
+  return *this;
+}
+
+/** \returns a QTransform from \c *this assuming the dimension is 2.
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+QTransform Transform<Scalar,Dim,Mode,Options>::toQTransform(void) const
+{
+  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  if (Mode == int(AffineCompact))
+    return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0),
+                      m_matrix.coeff(0,1), m_matrix.coeff(1,1),
+                      m_matrix.coeff(0,2), m_matrix.coeff(1,2));
+  else
+    return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0), m_matrix.coeff(2,0),
+                      m_matrix.coeff(0,1), m_matrix.coeff(1,1), m_matrix.coeff(2,1),
+                      m_matrix.coeff(0,2), m_matrix.coeff(1,2), m_matrix.coeff(2,2));
+}
+#endif
+
+/*********************
+*** Procedural API ***
+*********************/
+
+/** Applies on the right the non uniform scale transformation represented
+  * by the vector \a other to \c *this and returns a reference to \c *this.
+  * \sa prescale()
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename OtherDerived>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::scale(const MatrixBase<OtherDerived> &other)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
+  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
+  linearExt().noalias() = (linearExt() * other.asDiagonal());
+  return *this;
+}
+
+/** Applies on the right a uniform scale of a factor \a c to \c *this
+  * and returns a reference to \c *this.
+  * \sa prescale(Scalar)
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::scale(Scalar s)
+{
+  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
+  linearExt() *= s;
+  return *this;
+}
+
+/** Applies on the left the non uniform scale transformation represented
+  * by the vector \a other to \c *this and returns a reference to \c *this.
+  * \sa scale()
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename OtherDerived>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::prescale(const MatrixBase<OtherDerived> &other)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
+  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
+  m_matrix.template block<Dim,HDim>(0,0).noalias() = (other.asDiagonal() * m_matrix.template block<Dim,HDim>(0,0));
+  return *this;
+}
+
+/** Applies on the left a uniform scale of a factor \a c to \c *this
+  * and returns a reference to \c *this.
+  * \sa scale(Scalar)
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::prescale(Scalar s)
+{
+  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
+  m_matrix.template topRows<Dim>() *= s;
+  return *this;
+}
+
+/** Applies on the right the translation matrix represented by the vector \a other
+  * to \c *this and returns a reference to \c *this.
+  * \sa pretranslate()
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename OtherDerived>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::translate(const MatrixBase<OtherDerived> &other)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
+  translationExt() += linearExt() * other;
+  return *this;
+}
+
+/** Applies on the left the translation matrix represented by the vector \a other
+  * to \c *this and returns a reference to \c *this.
+  * \sa translate()
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename OtherDerived>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::pretranslate(const MatrixBase<OtherDerived> &other)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
+  if(int(Mode)==int(Projective))
+    affine() += other * m_matrix.row(Dim);
+  else
+    translation() += other;
+  return *this;
+}
+
+/** Applies on the right the rotation represented by the rotation \a rotation
+  * to \c *this and returns a reference to \c *this.
+  *
+  * The template parameter \a RotationType is the type of the rotation which
+  * must be known by internal::toRotationMatrix<>.
+  *
+  * Natively supported types includes:
+  *   - any scalar (2D),
+  *   - a Dim x Dim matrix expression,
+  *   - a Quaternion (3D),
+  *   - a AngleAxis (3D)
+  *
+  * This mechanism is easily extendable to support user types such as Euler angles,
+  * or a pair of Quaternion for 4D rotations.
+  *
+  * \sa rotate(Scalar), class Quaternion, class AngleAxis, prerotate(RotationType)
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename RotationType>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::rotate(const RotationType& rotation)
+{
+  linearExt() *= internal::toRotationMatrix<Scalar,Dim>(rotation);
+  return *this;
+}
+
+/** Applies on the left the rotation represented by the rotation \a rotation
+  * to \c *this and returns a reference to \c *this.
+  *
+  * See rotate() for further details.
+  *
+  * \sa rotate()
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename RotationType>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::prerotate(const RotationType& rotation)
+{
+  m_matrix.template block<Dim,HDim>(0,0) = internal::toRotationMatrix<Scalar,Dim>(rotation)
+                                         * m_matrix.template block<Dim,HDim>(0,0);
+  return *this;
+}
+
+/** Applies on the right the shear transformation represented
+  * by the vector \a other to \c *this and returns a reference to \c *this.
+  * \warning 2D only.
+  * \sa preshear()
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::shear(Scalar sx, Scalar sy)
+{
+  EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
+  VectorType tmp = linear().col(0)*sy + linear().col(1);
+  linear() << linear().col(0) + linear().col(1)*sx, tmp;
+  return *this;
+}
+
+/** Applies on the left the shear transformation represented
+  * by the vector \a other to \c *this and returns a reference to \c *this.
+  * \warning 2D only.
+  * \sa shear()
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::preshear(Scalar sx, Scalar sy)
+{
+  EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
+  m_matrix.template block<Dim,HDim>(0,0) = LinearMatrixType(1, sx, sy, 1) * m_matrix.template block<Dim,HDim>(0,0);
+  return *this;
+}
+
+/******************************************************
+*** Scaling, Translation and Rotation compatibility ***
+******************************************************/
+
+template<typename Scalar, int Dim, int Mode, int Options>
+inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const TranslationType& t)
+{
+  linear().setIdentity();
+  translation() = t.vector();
+  makeAffine();
+  return *this;
+}
+
+template<typename Scalar, int Dim, int Mode, int Options>
+inline Transform<Scalar,Dim,Mode,Options> Transform<Scalar,Dim,Mode,Options>::operator*(const TranslationType& t) const
+{
+  Transform res = *this;
+  res.translate(t.vector());
+  return res;
+}
+
+template<typename Scalar, int Dim, int Mode, int Options>
+inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const UniformScaling<Scalar>& s)
+{
+  m_matrix.setZero();
+  linear().diagonal().fill(s.factor());
+  makeAffine();
+  return *this;
+}
+
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename Derived>
+inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const RotationBase<Derived,Dim>& r)
+{
+  linear() = internal::toRotationMatrix<Scalar,Dim>(r);
+  translation().setZero();
+  makeAffine();
+  return *this;
+}
+
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename Derived>
+inline Transform<Scalar,Dim,Mode,Options> Transform<Scalar,Dim,Mode,Options>::operator*(const RotationBase<Derived,Dim>& r) const
+{
+  Transform res = *this;
+  res.rotate(r.derived());
+  return res;
+}
+
+/************************
+*** Special functions ***
+************************/
+
+/** \returns the rotation part of the transformation
+  *
+  *
+  * \svd_module
+  *
+  * \sa computeRotationScaling(), computeScalingRotation(), class SVD
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+const typename Transform<Scalar,Dim,Mode,Options>::LinearMatrixType
+Transform<Scalar,Dim,Mode,Options>::rotation() const
+{
+  LinearMatrixType result;
+  computeRotationScaling(&result, (LinearMatrixType*)0);
+  return result;
+}
+
+
+/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being
+  * not necessarily positive.
+  *
+  * If either pointer is zero, the corresponding computation is skipped.
+  *
+  *
+  *
+  * \svd_module
+  *
+  * \sa computeScalingRotation(), rotation(), class SVD
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename RotationMatrixType, typename ScalingMatrixType>
+void Transform<Scalar,Dim,Mode,Options>::computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const
+{
+  JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU | ComputeFullV);
+
+  Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1
+  VectorType sv(svd.singularValues());
+  sv.coeffRef(0) *= x;
+  if(scaling) scaling->lazyAssign(svd.matrixV() * sv.asDiagonal() * svd.matrixV().adjoint());
+  if(rotation)
+  {
+    LinearMatrixType m(svd.matrixU());
+    m.col(0) /= x;
+    rotation->lazyAssign(m * svd.matrixV().adjoint());
+  }
+}
+
+/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being
+  * not necessarily positive.
+  *
+  * If either pointer is zero, the corresponding computation is skipped.
+  *
+  *
+  *
+  * \svd_module
+  *
+  * \sa computeRotationScaling(), rotation(), class SVD
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename ScalingMatrixType, typename RotationMatrixType>
+void Transform<Scalar,Dim,Mode,Options>::computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const
+{
+  JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU | ComputeFullV);
+
+  Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1
+  VectorType sv(svd.singularValues());
+  sv.coeffRef(0) *= x;
+  if(scaling) scaling->lazyAssign(svd.matrixU() * sv.asDiagonal() * svd.matrixU().adjoint());
+  if(rotation)
+  {
+    LinearMatrixType m(svd.matrixU());
+    m.col(0) /= x;
+    rotation->lazyAssign(m * svd.matrixV().adjoint());
+  }
+}
+
+/** Convenient method to set \c *this from a position, orientation and scale
+  * of a 3D object.
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename PositionDerived, typename OrientationType, typename ScaleDerived>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::fromPositionOrientationScale(const MatrixBase<PositionDerived> &position,
+  const OrientationType& orientation, const MatrixBase<ScaleDerived> &scale)
+{
+  linear() = internal::toRotationMatrix<Scalar,Dim>(orientation);
+  linear() *= scale.asDiagonal();
+  translation() = position;
+  makeAffine();
+  return *this;
+}
+
+namespace internal {
+
+// selector needed to avoid taking the inverse of a 3x4 matrix
+template<typename TransformType, int Mode=TransformType::Mode>
+struct projective_transform_inverse
+{
+  static inline void run(const TransformType&, TransformType&)
+  {}
+};
+
+template<typename TransformType>
+struct projective_transform_inverse<TransformType, Projective>
+{
+  static inline void run(const TransformType& m, TransformType& res)
+  {
+    res.matrix() = m.matrix().inverse();
+  }
+};
+
+} // end namespace internal
+
+
+/**
+  *
+  * \returns the inverse transformation according to some given knowledge
+  * on \c *this.
+  *
+  * \param hint allows to optimize the inversion process when the transformation
+  * is known to be not a general transformation (optional). The possible values are:
+  *  - #Projective if the transformation is not necessarily affine, i.e., if the
+  *    last row is not guaranteed to be [0 ... 0 1]
+  *  - #Affine if the last row can be assumed to be [0 ... 0 1]
+  *  - #Isometry if the transformation is only a concatenations of translations
+  *    and rotations.
+  *  The default is the template class parameter \c Mode.
+  *
+  * \warning unless \a traits is always set to NoShear or NoScaling, this function
+  * requires the generic inverse method of MatrixBase defined in the LU module. If
+  * you forget to include this module, then you will get hard to debug linking errors.
+  *
+  * \sa MatrixBase::inverse()
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+Transform<Scalar,Dim,Mode,Options>
+Transform<Scalar,Dim,Mode,Options>::inverse(TransformTraits hint) const
+{
+  Transform res;
+  if (hint == Projective)
+  {
+    internal::projective_transform_inverse<Transform>::run(*this, res);
+  }
+  else
+  {
+    if (hint == Isometry)
+    {
+      res.matrix().template topLeftCorner<Dim,Dim>() = linear().transpose();
+    }
+    else if(hint&Affine)
+    {
+      res.matrix().template topLeftCorner<Dim,Dim>() = linear().inverse();
+    }
+    else
+    {
+      eigen_assert(false && "Invalid transform traits in Transform::Inverse");
+    }
+    // translation and remaining parts
+    res.matrix().template topRightCorner<Dim,1>()
+      = - res.matrix().template topLeftCorner<Dim,Dim>() * translation();
+    res.makeAffine(); // we do need this, because in the beginning res is uninitialized
+  }
+  return res;
+}
+
+namespace internal {
+
+/*****************************************************
+*** Specializations of take affine part            ***
+*****************************************************/
+
+template<typename TransformType> struct transform_take_affine_part {
+  typedef typename TransformType::MatrixType MatrixType;
+  typedef typename TransformType::AffinePart AffinePart;
+  typedef typename TransformType::ConstAffinePart ConstAffinePart;
+  static inline AffinePart run(MatrixType& m)
+  { return m.template block<TransformType::Dim,TransformType::HDim>(0,0); }
+  static inline ConstAffinePart run(const MatrixType& m)
+  { return m.template block<TransformType::Dim,TransformType::HDim>(0,0); }
+};
+
+template<typename Scalar, int Dim, int Options>
+struct transform_take_affine_part<Transform<Scalar,Dim,AffineCompact, Options> > {
+  typedef typename Transform<Scalar,Dim,AffineCompact,Options>::MatrixType MatrixType;
+  static inline MatrixType& run(MatrixType& m) { return m; }
+  static inline const MatrixType& run(const MatrixType& m) { return m; }
+};
+
+/*****************************************************
+*** Specializations of construct from matrix       ***
+*****************************************************/
+
+template<typename Other, int Mode, int Options, int Dim, int HDim>
+struct transform_construct_from_matrix<Other, Mode,Options,Dim,HDim, Dim,Dim>
+{
+  static inline void run(Transform<typename Other::Scalar,Dim,Mode,Options> *transform, const Other& other)
+  {
+    transform->linear() = other;
+    transform->translation().setZero();
+    transform->makeAffine();
+  }
+};
+
+template<typename Other, int Mode, int Options, int Dim, int HDim>
+struct transform_construct_from_matrix<Other, Mode,Options,Dim,HDim, Dim,HDim>
+{
+  static inline void run(Transform<typename Other::Scalar,Dim,Mode,Options> *transform, const Other& other)
+  {
+    transform->affine() = other;
+    transform->makeAffine();
+  }
+};
+
+template<typename Other, int Mode, int Options, int Dim, int HDim>
+struct transform_construct_from_matrix<Other, Mode,Options,Dim,HDim, HDim,HDim>
+{
+  static inline void run(Transform<typename Other::Scalar,Dim,Mode,Options> *transform, const Other& other)
+  { transform->matrix() = other; }
+};
+
+template<typename Other, int Options, int Dim, int HDim>
+struct transform_construct_from_matrix<Other, AffineCompact,Options,Dim,HDim, HDim,HDim>
+{
+  static inline void run(Transform<typename Other::Scalar,Dim,AffineCompact,Options> *transform, const Other& other)
+  { transform->matrix() = other.template block<Dim,HDim>(0,0); }
+};
+
+/**********************************************************
+***   Specializations of operator* with rhs EigenBase   ***
+**********************************************************/
+
+template<int LhsMode,int RhsMode>
+struct transform_product_result
+{
+  enum 
+  { 
+    Mode =
+      (LhsMode == (int)Projective    || RhsMode == (int)Projective    ) ? Projective :
+      (LhsMode == (int)Affine        || RhsMode == (int)Affine        ) ? Affine :
+      (LhsMode == (int)AffineCompact || RhsMode == (int)AffineCompact ) ? AffineCompact :
+      (LhsMode == (int)Isometry      || RhsMode == (int)Isometry      ) ? Isometry : Projective
+  };
+};
+
+template< typename TransformType, typename MatrixType >
+struct transform_right_product_impl< TransformType, MatrixType, 0 >
+{
+  typedef typename MatrixType::PlainObject ResultType;
+
+  static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)
+  {
+    return T.matrix() * other;
+  }
+};
+
+template< typename TransformType, typename MatrixType >
+struct transform_right_product_impl< TransformType, MatrixType, 1 >
+{
+  enum { 
+    Dim = TransformType::Dim, 
+    HDim = TransformType::HDim,
+    OtherRows = MatrixType::RowsAtCompileTime,
+    OtherCols = MatrixType::ColsAtCompileTime
+  };
+
+  typedef typename MatrixType::PlainObject ResultType;
+
+  static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)
+  {
+    EIGEN_STATIC_ASSERT(OtherRows==HDim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES);
+
+    typedef Block<ResultType, Dim, OtherCols, int(MatrixType::RowsAtCompileTime)==Dim> TopLeftLhs;
+
+    ResultType res(other.rows(),other.cols());
+    TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() = T.affine() * other;
+    res.row(OtherRows-1) = other.row(OtherRows-1);
+    
+    return res;
+  }
+};
+
+template< typename TransformType, typename MatrixType >
+struct transform_right_product_impl< TransformType, MatrixType, 2 >
+{
+  enum { 
+    Dim = TransformType::Dim, 
+    HDim = TransformType::HDim,
+    OtherRows = MatrixType::RowsAtCompileTime,
+    OtherCols = MatrixType::ColsAtCompileTime
+  };
+
+  typedef typename MatrixType::PlainObject ResultType;
+
+  static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)
+  {
+    EIGEN_STATIC_ASSERT(OtherRows==Dim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES);
+
+    typedef Block<ResultType, Dim, OtherCols, true> TopLeftLhs;
+    ResultType res(Replicate<typename TransformType::ConstTranslationPart, 1, OtherCols>(T.translation(),1,other.cols()));
+    TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() += T.linear() * other;
+
+    return res;
+  }
+};
+
+/**********************************************************
+***   Specializations of operator* with lhs EigenBase   ***
+**********************************************************/
+
+// generic HDim x HDim matrix * T => Projective
+template<typename Other,int Mode, int Options, int Dim, int HDim>
+struct transform_left_product_impl<Other,Mode,Options,Dim,HDim, HDim,HDim>
+{
+  typedef Transform<typename Other::Scalar,Dim,Mode,Options> TransformType;
+  typedef typename TransformType::MatrixType MatrixType;
+  typedef Transform<typename Other::Scalar,Dim,Projective,Options> ResultType;
+  static ResultType run(const Other& other,const TransformType& tr)
+  { return ResultType(other * tr.matrix()); }
+};
+
+// generic HDim x HDim matrix * AffineCompact => Projective
+template<typename Other, int Options, int Dim, int HDim>
+struct transform_left_product_impl<Other,AffineCompact,Options,Dim,HDim, HDim,HDim>
+{
+  typedef Transform<typename Other::Scalar,Dim,AffineCompact,Options> TransformType;
+  typedef typename TransformType::MatrixType MatrixType;
+  typedef Transform<typename Other::Scalar,Dim,Projective,Options> ResultType;
+  static ResultType run(const Other& other,const TransformType& tr)
+  {
+    ResultType res;
+    res.matrix().noalias() = other.template block<HDim,Dim>(0,0) * tr.matrix();
+    res.matrix().col(Dim) += other.col(Dim);
+    return res;
+  }
+};
+
+// affine matrix * T
+template<typename Other,int Mode, int Options, int Dim, int HDim>
+struct transform_left_product_impl<Other,Mode,Options,Dim,HDim, Dim,HDim>
+{
+  typedef Transform<typename Other::Scalar,Dim,Mode,Options> TransformType;
+  typedef typename TransformType::MatrixType MatrixType;
+  typedef TransformType ResultType;
+  static ResultType run(const Other& other,const TransformType& tr)
+  {
+    ResultType res;
+    res.affine().noalias() = other * tr.matrix();
+    res.matrix().row(Dim) = tr.matrix().row(Dim);
+    return res;
+  }
+};
+
+// affine matrix * AffineCompact
+template<typename Other, int Options, int Dim, int HDim>
+struct transform_left_product_impl<Other,AffineCompact,Options,Dim,HDim, Dim,HDim>
+{
+  typedef Transform<typename Other::Scalar,Dim,AffineCompact,Options> TransformType;
+  typedef typename TransformType::MatrixType MatrixType;
+  typedef TransformType ResultType;
+  static ResultType run(const Other& other,const TransformType& tr)
+  {
+    ResultType res;
+    res.matrix().noalias() = other.template block<Dim,Dim>(0,0) * tr.matrix();
+    res.translation() += other.col(Dim);
+    return res;
+  }
+};
+
+// linear matrix * T
+template<typename Other,int Mode, int Options, int Dim, int HDim>
+struct transform_left_product_impl<Other,Mode,Options,Dim,HDim, Dim,Dim>
+{
+  typedef Transform<typename Other::Scalar,Dim,Mode,Options> TransformType;
+  typedef typename TransformType::MatrixType MatrixType;
+  typedef TransformType ResultType;
+  static ResultType run(const Other& other, const TransformType& tr)
+  {
+    TransformType res;
+    if(Mode!=int(AffineCompact))
+      res.matrix().row(Dim) = tr.matrix().row(Dim);
+    res.matrix().template topRows<Dim>().noalias()
+      = other * tr.matrix().template topRows<Dim>();
+    return res;
+  }
+};
+
+/**********************************************************
+*** Specializations of operator* with another Transform ***
+**********************************************************/
+
+template<typename Scalar, int Dim, int LhsMode, int LhsOptions, int RhsMode, int RhsOptions>
+struct transform_transform_product_impl<Transform<Scalar,Dim,LhsMode,LhsOptions>,Transform<Scalar,Dim,RhsMode,RhsOptions>,false >
+{
+  enum { ResultMode = transform_product_result<LhsMode,RhsMode>::Mode };
+  typedef Transform<Scalar,Dim,LhsMode,LhsOptions> Lhs;
+  typedef Transform<Scalar,Dim,RhsMode,RhsOptions> Rhs;
+  typedef Transform<Scalar,Dim,ResultMode,LhsOptions> ResultType;
+  static ResultType run(const Lhs& lhs, const Rhs& rhs)
+  {
+    ResultType res;
+    res.linear() = lhs.linear() * rhs.linear();
+    res.translation() = lhs.linear() * rhs.translation() + lhs.translation();
+    res.makeAffine();
+    return res;
+  }
+};
+
+template<typename Scalar, int Dim, int LhsMode, int LhsOptions, int RhsMode, int RhsOptions>
+struct transform_transform_product_impl<Transform<Scalar,Dim,LhsMode,LhsOptions>,Transform<Scalar,Dim,RhsMode,RhsOptions>,true >
+{
+  typedef Transform<Scalar,Dim,LhsMode,LhsOptions> Lhs;
+  typedef Transform<Scalar,Dim,RhsMode,RhsOptions> Rhs;
+  typedef Transform<Scalar,Dim,Projective> ResultType;
+  static ResultType run(const Lhs& lhs, const Rhs& rhs)
+  {
+    return ResultType( lhs.matrix() * rhs.matrix() );
+  }
+};
+
+template<typename Scalar, int Dim, int LhsOptions, int RhsOptions>
+struct transform_transform_product_impl<Transform<Scalar,Dim,AffineCompact,LhsOptions>,Transform<Scalar,Dim,Projective,RhsOptions>,true >
+{
+  typedef Transform<Scalar,Dim,AffineCompact,LhsOptions> Lhs;
+  typedef Transform<Scalar,Dim,Projective,RhsOptions> Rhs;
+  typedef Transform<Scalar,Dim,Projective> ResultType;
+  static ResultType run(const Lhs& lhs, const Rhs& rhs)
+  {
+    ResultType res;
+    res.matrix().template topRows<Dim>() = lhs.matrix() * rhs.matrix();
+    res.matrix().row(Dim) = rhs.matrix().row(Dim);
+    return res;
+  }
+};
+
+template<typename Scalar, int Dim, int LhsOptions, int RhsOptions>
+struct transform_transform_product_impl<Transform<Scalar,Dim,Projective,LhsOptions>,Transform<Scalar,Dim,AffineCompact,RhsOptions>,true >
+{
+  typedef Transform<Scalar,Dim,Projective,LhsOptions> Lhs;
+  typedef Transform<Scalar,Dim,AffineCompact,RhsOptions> Rhs;
+  typedef Transform<Scalar,Dim,Projective> ResultType;
+  static ResultType run(const Lhs& lhs, const Rhs& rhs)
+  {
+    ResultType res(lhs.matrix().template leftCols<Dim>() * rhs.matrix());
+    res.matrix().col(Dim) += lhs.matrix().col(Dim);
+    return res;
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_TRANSFORM_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Geometry/Translation.h b/resources/3rdParty/eigen/Eigen/src/Geometry/Translation.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Geometry/Translation.h
rename to resources/3rdParty/eigen/Eigen/src/Geometry/Translation.h
diff --git a/resources/3rdParty/eigen/Eigen/src/Geometry/Umeyama.h b/resources/3rdParty/eigen/Eigen/src/Geometry/Umeyama.h
new file mode 100644
index 000000000..ac0939cde
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Geometry/Umeyama.h
@@ -0,0 +1,172 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Hauke Heibel <hauke.heibel@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_UMEYAMA_H
+#define EIGEN_UMEYAMA_H
+
+// This file requires the user to include 
+// * Eigen/Core
+// * Eigen/LU 
+// * Eigen/SVD
+// * Eigen/Array
+
+namespace Eigen { 
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+
+// These helpers are required since it allows to use mixed types as parameters
+// for the Umeyama. The problem with mixed parameters is that the return type
+// cannot trivially be deduced when float and double types are mixed.
+namespace internal {
+
+// Compile time return type deduction for different MatrixBase types.
+// Different means here different alignment and parameters but the same underlying
+// real scalar type.
+template<typename MatrixType, typename OtherMatrixType>
+struct umeyama_transform_matrix_type
+{
+  enum {
+    MinRowsAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::RowsAtCompileTime, OtherMatrixType::RowsAtCompileTime),
+
+    // When possible we want to choose some small fixed size value since the result
+    // is likely to fit on the stack. So here, EIGEN_SIZE_MIN_PREFER_DYNAMIC is not what we want.
+    HomogeneousDimension = int(MinRowsAtCompileTime) == Dynamic ? Dynamic : int(MinRowsAtCompileTime)+1
+  };
+
+  typedef Matrix<typename traits<MatrixType>::Scalar,
+    HomogeneousDimension,
+    HomogeneousDimension,
+    AutoAlign | (traits<MatrixType>::Flags & RowMajorBit ? RowMajor : ColMajor),
+    HomogeneousDimension,
+    HomogeneousDimension
+  > type;
+};
+
+}
+
+#endif
+
+/**
+* \geometry_module \ingroup Geometry_Module
+*
+* \brief Returns the transformation between two point sets.
+*
+* The algorithm is based on:
+* "Least-squares estimation of transformation parameters between two point patterns",
+* Shinji Umeyama, PAMI 1991, DOI: 10.1109/34.88573
+*
+* It estimates parameters \f$ c, \mathbf{R}, \f$ and \f$ \mathbf{t} \f$ such that
+* \f{align*}
+*   \frac{1}{n} \sum_{i=1}^n \vert\vert y_i - (c\mathbf{R}x_i + \mathbf{t}) \vert\vert_2^2
+* \f}
+* is minimized.
+*
+* The algorithm is based on the analysis of the covariance matrix
+* \f$ \Sigma_{\mathbf{x}\mathbf{y}} \in \mathbb{R}^{d \times d} \f$
+* of the input point sets \f$ \mathbf{x} \f$ and \f$ \mathbf{y} \f$ where 
+* \f$d\f$ is corresponding to the dimension (which is typically small).
+* The analysis is involving the SVD having a complexity of \f$O(d^3)\f$
+* though the actual computational effort lies in the covariance
+* matrix computation which has an asymptotic lower bound of \f$O(dm)\f$ when 
+* the input point sets have dimension \f$d \times m\f$.
+*
+* Currently the method is working only for floating point matrices.
+*
+* \todo Should the return type of umeyama() become a Transform?
+*
+* \param src Source points \f$ \mathbf{x} = \left( x_1, \hdots, x_n \right) \f$.
+* \param dst Destination points \f$ \mathbf{y} = \left( y_1, \hdots, y_n \right) \f$.
+* \param with_scaling Sets \f$ c=1 \f$ when <code>false</code> is passed.
+* \return The homogeneous transformation 
+* \f{align*}
+*   T = \begin{bmatrix} c\mathbf{R} & \mathbf{t} \\ \mathbf{0} & 1 \end{bmatrix}
+* \f}
+* minimizing the resudiual above. This transformation is always returned as an 
+* Eigen::Matrix.
+*/
+template <typename Derived, typename OtherDerived>
+typename internal::umeyama_transform_matrix_type<Derived, OtherDerived>::type
+umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, bool with_scaling = true)
+{
+  typedef typename internal::umeyama_transform_matrix_type<Derived, OtherDerived>::type TransformationMatrixType;
+  typedef typename internal::traits<TransformationMatrixType>::Scalar Scalar;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  typedef typename Derived::Index Index;
+
+  EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL)
+  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename internal::traits<OtherDerived>::Scalar>::value),
+    YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+  enum { Dimension = EIGEN_SIZE_MIN_PREFER_DYNAMIC(Derived::RowsAtCompileTime, OtherDerived::RowsAtCompileTime) };
+
+  typedef Matrix<Scalar, Dimension, 1> VectorType;
+  typedef Matrix<Scalar, Dimension, Dimension> MatrixType;
+  typedef typename internal::plain_matrix_type_row_major<Derived>::type RowMajorMatrixType;
+
+  const Index m = src.rows(); // dimension
+  const Index n = src.cols(); // number of measurements
+
+  // required for demeaning ...
+  const RealScalar one_over_n = 1 / static_cast<RealScalar>(n);
+
+  // computation of mean
+  const VectorType src_mean = src.rowwise().sum() * one_over_n;
+  const VectorType dst_mean = dst.rowwise().sum() * one_over_n;
+
+  // demeaning of src and dst points
+  const RowMajorMatrixType src_demean = src.colwise() - src_mean;
+  const RowMajorMatrixType dst_demean = dst.colwise() - dst_mean;
+
+  // Eq. (36)-(37)
+  const Scalar src_var = src_demean.rowwise().squaredNorm().sum() * one_over_n;
+
+  // Eq. (38)
+  const MatrixType sigma = one_over_n * dst_demean * src_demean.transpose();
+
+  JacobiSVD<MatrixType> svd(sigma, ComputeFullU | ComputeFullV);
+
+  // Initialize the resulting transformation with an identity matrix...
+  TransformationMatrixType Rt = TransformationMatrixType::Identity(m+1,m+1);
+
+  // Eq. (39)
+  VectorType S = VectorType::Ones(m);
+  if (sigma.determinant()<0) S(m-1) = -1;
+
+  // Eq. (40) and (43)
+  const VectorType& d = svd.singularValues();
+  Index rank = 0; for (Index i=0; i<m; ++i) if (!internal::isMuchSmallerThan(d.coeff(i),d.coeff(0))) ++rank;
+  if (rank == m-1) {
+    if ( svd.matrixU().determinant() * svd.matrixV().determinant() > 0 ) {
+      Rt.block(0,0,m,m).noalias() = svd.matrixU()*svd.matrixV().transpose();
+    } else {
+      const Scalar s = S(m-1); S(m-1) = -1;
+      Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose();
+      S(m-1) = s;
+    }
+  } else {
+    Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose();
+  }
+
+  // Eq. (42)
+  const Scalar c = 1/src_var * svd.singularValues().dot(S);
+
+  // Eq. (41)
+  // Note that we first assign dst_mean to the destination so that there no need
+  // for a temporary.
+  Rt.col(m).head(m) = dst_mean;
+  Rt.col(m).head(m).noalias() -= c*Rt.topLeftCorner(m,m)*src_mean;
+
+  if (with_scaling) Rt.block(0,0,m,m) *= c;
+
+  return Rt;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_UMEYAMA_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Geometry/arch/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/Geometry/arch/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Geometry/arch/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/Geometry/arch/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/Eigen/src/Geometry/arch/Geometry_SSE.h b/resources/3rdParty/eigen/Eigen/src/Geometry/arch/Geometry_SSE.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Geometry/arch/Geometry_SSE.h
rename to resources/3rdParty/eigen/Eigen/src/Geometry/arch/Geometry_SSE.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Householder/BlockHouseholder.h b/resources/3rdParty/eigen/Eigen/src/Householder/BlockHouseholder.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Householder/BlockHouseholder.h
rename to resources/3rdParty/eigen/Eigen/src/Householder/BlockHouseholder.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Householder/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/Householder/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Householder/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/Householder/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/Eigen/src/Householder/Householder.h b/resources/3rdParty/eigen/Eigen/src/Householder/Householder.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Householder/Householder.h
rename to resources/3rdParty/eigen/Eigen/src/Householder/Householder.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Householder/HouseholderSequence.h b/resources/3rdParty/eigen/Eigen/src/Householder/HouseholderSequence.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Householder/HouseholderSequence.h
rename to resources/3rdParty/eigen/Eigen/src/Householder/HouseholderSequence.h
diff --git a/resources/3rdparty/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h b/resources/3rdParty/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
rename to resources/3rdParty/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
diff --git a/resources/3rdParty/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h b/resources/3rdParty/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
new file mode 100644
index 000000000..126341be8
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
@@ -0,0 +1,254 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_BICGSTAB_H
+#define EIGEN_BICGSTAB_H
+
+namespace Eigen { 
+
+namespace internal {
+
+/** \internal Low-level bi conjugate gradient stabilized algorithm
+  * \param mat The matrix A
+  * \param rhs The right hand side vector b
+  * \param x On input and initial solution, on output the computed solution.
+  * \param precond A preconditioner being able to efficiently solve for an
+  *                approximation of Ax=b (regardless of b)
+  * \param iters On input the max number of iteration, on output the number of performed iterations.
+  * \param tol_error On input the tolerance error, on output an estimation of the relative error.
+  * \return false in the case of numerical issue, for example a break down of BiCGSTAB. 
+  */
+template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
+bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
+              const Preconditioner& precond, int& iters,
+              typename Dest::RealScalar& tol_error)
+{
+  using std::sqrt;
+  using std::abs;
+  typedef typename Dest::RealScalar RealScalar;
+  typedef typename Dest::Scalar Scalar;
+  typedef Matrix<Scalar,Dynamic,1> VectorType;
+  RealScalar tol = tol_error;
+  int maxIters = iters;
+
+  int n = mat.cols();
+  VectorType r  = rhs - mat * x;
+  VectorType r0 = r;
+  
+  RealScalar r0_sqnorm = r0.squaredNorm();
+  Scalar rho    = 1;
+  Scalar alpha  = 1;
+  Scalar w      = 1;
+  
+  VectorType v = VectorType::Zero(n), p = VectorType::Zero(n);
+  VectorType y(n),  z(n);
+  VectorType kt(n), ks(n);
+
+  VectorType s(n), t(n);
+
+  RealScalar tol2 = tol*tol;
+  int i = 0;
+
+  while ( r.squaredNorm()/r0_sqnorm > tol2 && i<maxIters )
+  {
+    Scalar rho_old = rho;
+
+    rho = r0.dot(r);
+    if (rho == Scalar(0)) return false; /* New search directions cannot be found */
+    Scalar beta = (rho/rho_old) * (alpha / w);
+    p = r + beta * (p - w * v);
+    
+    y = precond.solve(p);
+    
+    v.noalias() = mat * y;
+
+    alpha = rho / r0.dot(v);
+    s = r - alpha * v;
+
+    z = precond.solve(s);
+    t.noalias() = mat * z;
+
+    w = t.dot(s) / t.squaredNorm();
+    x += alpha * y + w * z;
+    r = s - w * t;
+    ++i;
+  }
+  tol_error = sqrt(r.squaredNorm()/r0_sqnorm);
+  iters = i;
+  return true; 
+}
+
+}
+
+template< typename _MatrixType,
+          typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
+class BiCGSTAB;
+
+namespace internal {
+
+template< typename _MatrixType, typename _Preconditioner>
+struct traits<BiCGSTAB<_MatrixType,_Preconditioner> >
+{
+  typedef _MatrixType MatrixType;
+  typedef _Preconditioner Preconditioner;
+};
+
+}
+
+/** \ingroup IterativeLinearSolvers_Module
+  * \brief A bi conjugate gradient stabilized solver for sparse square problems
+  *
+  * This class allows to solve for A.x = b sparse linear problems using a bi conjugate gradient
+  * stabilized algorithm. The vectors x and b can be either dense or sparse.
+  *
+  * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
+  * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
+  *
+  * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
+  * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
+  * and NumTraits<Scalar>::epsilon() for the tolerance.
+  * 
+  * This class can be used as the direct solver classes. Here is a typical usage example:
+  * \code
+  * int n = 10000;
+  * VectorXd x(n), b(n);
+  * SparseMatrix<double> A(n,n);
+  * // fill A and b
+  * BiCGSTAB<SparseMatrix<double> > solver;
+  * solver(A);
+  * x = solver.solve(b);
+  * std::cout << "#iterations:     " << solver.iterations() << std::endl;
+  * std::cout << "estimated error: " << solver.error()      << std::endl;
+  * // update b, and solve again
+  * x = solver.solve(b);
+  * \endcode
+  * 
+  * By default the iterations start with x=0 as an initial guess of the solution.
+  * One can control the start using the solveWithGuess() method. Here is a step by
+  * step execution example starting with a random guess and printing the evolution
+  * of the estimated error:
+  * * \code
+  * x = VectorXd::Random(n);
+  * solver.setMaxIterations(1);
+  * int i = 0;
+  * do {
+  *   x = solver.solveWithGuess(b,x);
+  *   std::cout << i << " : " << solver.error() << std::endl;
+  *   ++i;
+  * } while (solver.info()!=Success && i<100);
+  * \endcode
+  * Note that such a step by step excution is slightly slower.
+  * 
+  * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
+  */
+template< typename _MatrixType, typename _Preconditioner>
+class BiCGSTAB : public IterativeSolverBase<BiCGSTAB<_MatrixType,_Preconditioner> >
+{
+  typedef IterativeSolverBase<BiCGSTAB> Base;
+  using Base::mp_matrix;
+  using Base::m_error;
+  using Base::m_iterations;
+  using Base::m_info;
+  using Base::m_isInitialized;
+public:
+  typedef _MatrixType MatrixType;
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename MatrixType::Index Index;
+  typedef typename MatrixType::RealScalar RealScalar;
+  typedef _Preconditioner Preconditioner;
+
+public:
+
+  /** Default constructor. */
+  BiCGSTAB() : Base() {}
+
+  /** Initialize the solver with matrix \a A for further \c Ax=b solving.
+    * 
+    * This constructor is a shortcut for the default constructor followed
+    * by a call to compute().
+    * 
+    * \warning this class stores a reference to the matrix A as well as some
+    * precomputed values that depend on it. Therefore, if \a A is changed
+    * this class becomes invalid. Call compute() to update it with the new
+    * matrix A, or modify a copy of A.
+    */
+  BiCGSTAB(const MatrixType& A) : Base(A) {}
+
+  ~BiCGSTAB() {}
+  
+  /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
+    * \a x0 as an initial solution.
+    *
+    * \sa compute()
+    */
+  template<typename Rhs,typename Guess>
+  inline const internal::solve_retval_with_guess<BiCGSTAB, Rhs, Guess>
+  solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
+  {
+    eigen_assert(m_isInitialized && "BiCGSTAB is not initialized.");
+    eigen_assert(Base::rows()==b.rows()
+              && "BiCGSTAB::solve(): invalid number of rows of the right hand side matrix b");
+    return internal::solve_retval_with_guess
+            <BiCGSTAB, Rhs, Guess>(*this, b.derived(), x0);
+  }
+  
+  /** \internal */
+  template<typename Rhs,typename Dest>
+  void _solveWithGuess(const Rhs& b, Dest& x) const
+  {    
+    bool failed = false;
+    for(int j=0; j<b.cols(); ++j)
+    {
+      m_iterations = Base::maxIterations();
+      m_error = Base::m_tolerance;
+      
+      typename Dest::ColXpr xj(x,j);
+      if(!internal::bicgstab(*mp_matrix, b.col(j), xj, Base::m_preconditioner, m_iterations, m_error))
+        failed = true;
+    }
+    m_info = failed ? NumericalIssue
+           : m_error <= Base::m_tolerance ? Success
+           : NoConvergence;
+    m_isInitialized = true;
+  }
+
+  /** \internal */
+  template<typename Rhs,typename Dest>
+  void _solve(const Rhs& b, Dest& x) const
+  {
+    x.setZero();
+    _solveWithGuess(b,x);
+  }
+
+protected:
+
+};
+
+
+namespace internal {
+
+  template<typename _MatrixType, typename _Preconditioner, typename Rhs>
+struct solve_retval<BiCGSTAB<_MatrixType, _Preconditioner>, Rhs>
+  : solve_retval_base<BiCGSTAB<_MatrixType, _Preconditioner>, Rhs>
+{
+  typedef BiCGSTAB<_MatrixType, _Preconditioner> Dec;
+  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec()._solve(rhs(),dst);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_BICGSTAB_H
diff --git a/resources/3rdparty/eigen/Eigen/src/IterativeLinearSolvers/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/IterativeLinearSolvers/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/IterativeLinearSolvers/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/IterativeLinearSolvers/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h b/resources/3rdParty/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
rename to resources/3rdParty/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
diff --git a/resources/3rdParty/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h b/resources/3rdParty/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
new file mode 100644
index 000000000..224304f0e
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
@@ -0,0 +1,466 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_INCOMPLETE_LUT_H
+#define EIGEN_INCOMPLETE_LUT_H
+
+namespace Eigen { 
+
+/**
+ * \brief Incomplete LU factorization with dual-threshold strategy
+ * During the numerical factorization, two dropping rules are used :
+ *  1) any element whose magnitude is less than some tolerance is dropped.
+ *    This tolerance is obtained by multiplying the input tolerance @p droptol 
+ *    by the average magnitude of all the original elements in the current row.
+ *  2) After the elimination of the row, only the @p fill largest elements in 
+ *    the L part and the @p fill largest elements in the U part are kept 
+ *    (in addition to the diagonal element ). Note that @p fill is computed from 
+ *    the input parameter @p fillfactor which is used the ratio to control the fill_in 
+ *    relatively to the initial number of nonzero elements.
+ * 
+ * The two extreme cases are when @p droptol=0 (to keep all the @p fill*2 largest elements)
+ * and when @p fill=n/2 with @p droptol being different to zero. 
+ * 
+ * References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization, 
+ *              Numerical Linear Algebra with Applications, 1(4), pp 387-402, 1994.
+ * 
+ * NOTE : The following implementation is derived from the ILUT implementation
+ * in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota 
+ *  released under the terms of the GNU LGPL: 
+ *    http://www-users.cs.umn.edu/~saad/software/SPARSKIT/README
+ * However, Yousef Saad gave us permission to relicense his ILUT code to MPL2.
+ * See the Eigen mailing list archive, thread: ILUT, date: July 8, 2012:
+ *   http://listengine.tuxfamily.org/lists.tuxfamily.org/eigen/2012/07/msg00064.html
+ * alternatively, on GMANE:
+ *   http://comments.gmane.org/gmane.comp.lib.eigen/3302
+ */
+template <typename _Scalar>
+class IncompleteLUT : internal::noncopyable
+{
+    typedef _Scalar Scalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    typedef Matrix<Scalar,Dynamic,1> Vector;
+    typedef SparseMatrix<Scalar,RowMajor> FactorType;
+    typedef SparseMatrix<Scalar,ColMajor> PermutType;
+    typedef typename FactorType::Index Index;
+
+  public:
+    typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
+    
+    IncompleteLUT()
+      : m_droptol(NumTraits<Scalar>::dummy_precision()), m_fillfactor(10),
+        m_analysisIsOk(false), m_factorizationIsOk(false), m_isInitialized(false)
+    {}
+    
+    template<typename MatrixType>
+    IncompleteLUT(const MatrixType& mat, RealScalar droptol=NumTraits<Scalar>::dummy_precision(), int fillfactor = 10)
+      : m_droptol(droptol),m_fillfactor(fillfactor),
+        m_analysisIsOk(false),m_factorizationIsOk(false),m_isInitialized(false)
+    {
+      eigen_assert(fillfactor != 0);
+      compute(mat); 
+    }
+    
+    Index rows() const { return m_lu.rows(); }
+    
+    Index cols() const { return m_lu.cols(); }
+
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful,
+      *          \c NumericalIssue if the matrix.appears to be negative.
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "IncompleteLUT is not initialized.");
+      return m_info;
+    }
+    
+    template<typename MatrixType>
+    void analyzePattern(const MatrixType& amat);
+    
+    template<typename MatrixType>
+    void factorize(const MatrixType& amat);
+    
+    /**
+      * Compute an incomplete LU factorization with dual threshold on the matrix mat
+      * No pivoting is done in this version
+      * 
+      **/
+    template<typename MatrixType>
+    IncompleteLUT<Scalar>& compute(const MatrixType& amat)
+    {
+      analyzePattern(amat); 
+      factorize(amat);
+      eigen_assert(m_factorizationIsOk == true); 
+      m_isInitialized = true;
+      return *this;
+    }
+
+    void setDroptol(RealScalar droptol); 
+    void setFillfactor(int fillfactor); 
+    
+    template<typename Rhs, typename Dest>
+    void _solve(const Rhs& b, Dest& x) const
+    {
+      x = m_Pinv * b;  
+      x = m_lu.template triangularView<UnitLower>().solve(x);
+      x = m_lu.template triangularView<Upper>().solve(x);
+      x = m_P * x; 
+    }
+
+    template<typename Rhs> inline const internal::solve_retval<IncompleteLUT, Rhs>
+     solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "IncompleteLUT is not initialized.");
+      eigen_assert(cols()==b.rows()
+                && "IncompleteLUT::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::solve_retval<IncompleteLUT, Rhs>(*this, b.derived());
+    }
+
+protected:
+
+    template <typename VectorV, typename VectorI>
+    int QuickSplit(VectorV &row, VectorI &ind, int ncut);
+
+
+    /** keeps off-diagonal entries; drops diagonal entries */
+    struct keep_diag {
+      inline bool operator() (const Index& row, const Index& col, const Scalar&) const
+      {
+        return row!=col;
+      }
+    };
+
+protected:
+
+    FactorType m_lu;
+    RealScalar m_droptol;
+    int m_fillfactor;
+    bool m_analysisIsOk;
+    bool m_factorizationIsOk;
+    bool m_isInitialized;
+    ComputationInfo m_info;
+    PermutationMatrix<Dynamic,Dynamic,Index> m_P;     // Fill-reducing permutation
+    PermutationMatrix<Dynamic,Dynamic,Index> m_Pinv;  // Inverse permutation
+};
+
+/**
+ * Set control parameter droptol
+ *  \param droptol   Drop any element whose magnitude is less than this tolerance 
+ **/ 
+template<typename Scalar>
+void IncompleteLUT<Scalar>::setDroptol(RealScalar droptol)
+{
+  this->m_droptol = droptol;   
+}
+
+/**
+ * Set control parameter fillfactor
+ * \param fillfactor  This is used to compute the  number @p fill_in of largest elements to keep on each row. 
+ **/ 
+template<typename Scalar>
+void IncompleteLUT<Scalar>::setFillfactor(int fillfactor)
+{
+  this->m_fillfactor = fillfactor;   
+}
+
+
+/**
+ * Compute a quick-sort split of a vector 
+ * On output, the vector row is permuted such that its elements satisfy
+ * abs(row(i)) >= abs(row(ncut)) if i<ncut
+ * abs(row(i)) <= abs(row(ncut)) if i>ncut 
+ * \param row The vector of values
+ * \param ind The array of index for the elements in @p row
+ * \param ncut  The number of largest elements to keep
+ **/ 
+template <typename Scalar>
+template <typename VectorV, typename VectorI>
+int IncompleteLUT<Scalar>::QuickSplit(VectorV &row, VectorI &ind, int ncut)
+{
+  using std::swap;
+  int mid;
+  int n = row.size(); /* length of the vector */
+  int first, last ; 
+  
+  ncut--; /* to fit the zero-based indices */
+  first = 0; 
+  last = n-1; 
+  if (ncut < first || ncut > last ) return 0;
+  
+  do {
+    mid = first; 
+    RealScalar abskey = std::abs(row(mid)); 
+    for (int j = first + 1; j <= last; j++) {
+      if ( std::abs(row(j)) > abskey) {
+        ++mid;
+        swap(row(mid), row(j));
+        swap(ind(mid), ind(j));
+      }
+    }
+    /* Interchange for the pivot element */
+    swap(row(mid), row(first));
+    swap(ind(mid), ind(first));
+    
+    if (mid > ncut) last = mid - 1;
+    else if (mid < ncut ) first = mid + 1; 
+  } while (mid != ncut );
+  
+  return 0; /* mid is equal to ncut */ 
+}
+
+template <typename Scalar>
+template<typename _MatrixType>
+void IncompleteLUT<Scalar>::analyzePattern(const _MatrixType& amat)
+{
+  // Compute the Fill-reducing permutation
+  SparseMatrix<Scalar,ColMajor, Index> mat1 = amat;
+  SparseMatrix<Scalar,ColMajor, Index> mat2 = amat.transpose();
+  // Symmetrize the pattern
+  // FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice.
+  //       on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered...
+  SparseMatrix<Scalar,ColMajor, Index> AtA = mat2 + mat1;
+  AtA.prune(keep_diag());
+  internal::minimum_degree_ordering<Scalar, Index>(AtA, m_P);  // Then compute the AMD ordering...
+
+  m_Pinv  = m_P.inverse(); // ... and the inverse permutation
+
+  m_analysisIsOk = true;
+}
+
+template <typename Scalar>
+template<typename _MatrixType>
+void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
+{
+  using std::sqrt;
+  using std::swap;
+  using std::abs;
+
+  eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix");
+  int n = amat.cols();  // Size of the matrix
+  m_lu.resize(n,n);
+  // Declare Working vectors and variables
+  Vector u(n) ;     // real values of the row -- maximum size is n --
+  VectorXi ju(n);   // column position of the values in u -- maximum size  is n
+  VectorXi jr(n);   // Indicate the position of the nonzero elements in the vector u -- A zero location is indicated by -1
+
+  // Apply the fill-reducing permutation
+  eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+  SparseMatrix<Scalar,RowMajor, Index> mat;
+  mat = amat.twistedBy(m_Pinv);
+
+  // Initialization
+  jr.fill(-1);
+  ju.fill(0);
+  u.fill(0);
+
+  // number of largest elements to keep in each row:
+  int fill_in =   static_cast<int> (amat.nonZeros()*m_fillfactor)/n+1;
+  if (fill_in > n) fill_in = n;
+
+  // number of largest nonzero elements to keep in the L and the U part of the current row:
+  int nnzL = fill_in/2;
+  int nnzU = nnzL;
+  m_lu.reserve(n * (nnzL + nnzU + 1));
+
+  // global loop over the rows of the sparse matrix
+  for (int ii = 0; ii < n; ii++)
+  {
+    // 1 - copy the lower and the upper part of the row i of mat in the working vector u
+
+    int sizeu = 1; // number of nonzero elements in the upper part of the current row
+    int sizel = 0; // number of nonzero elements in the lower part of the current row
+    ju(ii)    = ii;
+    u(ii)     = 0;
+    jr(ii)    = ii;
+    RealScalar rownorm = 0;
+
+    typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii
+    for (; j_it; ++j_it)
+    {
+      int k = j_it.index();
+      if (k < ii)
+      {
+        // copy the lower part
+        ju(sizel) = k;
+        u(sizel) = j_it.value();
+        jr(k) = sizel;
+        ++sizel;
+      }
+      else if (k == ii)
+      {
+        u(ii) = j_it.value();
+      }
+      else
+      {
+        // copy the upper part
+        int jpos = ii + sizeu;
+        ju(jpos) = k;
+        u(jpos) = j_it.value();
+        jr(k) = jpos;
+        ++sizeu;
+      }
+      rownorm += internal::abs2(j_it.value());
+    }
+
+    // 2 - detect possible zero row
+    if(rownorm==0)
+    {
+      m_info = NumericalIssue;
+      return;
+    }
+    // Take the 2-norm of the current row as a relative tolerance
+    rownorm = sqrt(rownorm);
+
+    // 3 - eliminate the previous nonzero rows
+    int jj = 0;
+    int len = 0;
+    while (jj < sizel)
+    {
+      // In order to eliminate in the correct order,
+      // we must select first the smallest column index among  ju(jj:sizel)
+      int k;
+      int minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment
+      k += jj;
+      if (minrow != ju(jj))
+      {
+        // swap the two locations
+        int j = ju(jj);
+        swap(ju(jj), ju(k));
+        jr(minrow) = jj;   jr(j) = k;
+        swap(u(jj), u(k));
+      }
+      // Reset this location
+      jr(minrow) = -1;
+
+      // Start elimination
+      typename FactorType::InnerIterator ki_it(m_lu, minrow);
+      while (ki_it && ki_it.index() < minrow) ++ki_it;
+      eigen_internal_assert(ki_it && ki_it.col()==minrow);
+      Scalar fact = u(jj) / ki_it.value();
+
+      // drop too small elements
+      if(abs(fact) <= m_droptol)
+      {
+        jj++;
+        continue;
+      }
+
+      // linear combination of the current row ii and the row minrow
+      ++ki_it;
+      for (; ki_it; ++ki_it)
+      {
+        Scalar prod = fact * ki_it.value();
+        int j       = ki_it.index();
+        int jpos    = jr(j);
+        if (jpos == -1) // fill-in element
+        {
+          int newpos;
+          if (j >= ii) // dealing with the upper part
+          {
+            newpos = ii + sizeu;
+            sizeu++;
+            eigen_internal_assert(sizeu<=n);
+          }
+          else // dealing with the lower part
+          {
+            newpos = sizel;
+            sizel++;
+            eigen_internal_assert(sizel<=ii);
+          }
+          ju(newpos) = j;
+          u(newpos) = -prod;
+          jr(j) = newpos;
+        }
+        else
+          u(jpos) -= prod;
+      }
+      // store the pivot element
+      u(len) = fact;
+      ju(len) = minrow;
+      ++len;
+
+      jj++;
+    } // end of the elimination on the row ii
+
+    // reset the upper part of the pointer jr to zero
+    for(int k = 0; k <sizeu; k++) jr(ju(ii+k)) = -1;
+
+    // 4 - partially sort and insert the elements in the m_lu matrix
+
+    // sort the L-part of the row
+    sizel = len;
+    len = (std::min)(sizel, nnzL);
+    typename Vector::SegmentReturnType ul(u.segment(0, sizel));
+    typename VectorXi::SegmentReturnType jul(ju.segment(0, sizel));
+    QuickSplit(ul, jul, len);
+
+    // store the largest m_fill elements of the L part
+    m_lu.startVec(ii);
+    for(int k = 0; k < len; k++)
+      m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
+
+    // store the diagonal element
+    // apply a shifting rule to avoid zero pivots (we are doing an incomplete factorization)
+    if (u(ii) == Scalar(0))
+      u(ii) = sqrt(m_droptol) * rownorm;
+    m_lu.insertBackByOuterInnerUnordered(ii, ii) = u(ii);
+
+    // sort the U-part of the row
+    // apply the dropping rule first
+    len = 0;
+    for(int k = 1; k < sizeu; k++)
+    {
+      if(abs(u(ii+k)) > m_droptol * rownorm )
+      {
+        ++len;
+        u(ii + len)  = u(ii + k);
+        ju(ii + len) = ju(ii + k);
+      }
+    }
+    sizeu = len + 1; // +1 to take into account the diagonal element
+    len = (std::min)(sizeu, nnzU);
+    typename Vector::SegmentReturnType uu(u.segment(ii+1, sizeu-1));
+    typename VectorXi::SegmentReturnType juu(ju.segment(ii+1, sizeu-1));
+    QuickSplit(uu, juu, len);
+
+    // store the largest elements of the U part
+    for(int k = ii + 1; k < ii + len; k++)
+      m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
+  }
+
+  m_lu.finalize();
+  m_lu.makeCompressed();
+
+  m_factorizationIsOk = true;
+  m_info = Success;
+}
+
+namespace internal {
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<IncompleteLUT<_MatrixType>, Rhs>
+  : solve_retval_base<IncompleteLUT<_MatrixType>, Rhs>
+{
+  typedef IncompleteLUT<_MatrixType> Dec;
+  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec()._solve(rhs(),dst);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_INCOMPLETE_LUT_H
+
diff --git a/resources/3rdparty/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h b/resources/3rdParty/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
rename to resources/3rdParty/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
diff --git a/resources/3rdparty/eigen/Eigen/src/Jacobi/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/Jacobi/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/Jacobi/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/Jacobi/CMakeLists.txt
diff --git a/resources/3rdParty/eigen/Eigen/src/Jacobi/Jacobi.h b/resources/3rdParty/eigen/Eigen/src/Jacobi/Jacobi.h
new file mode 100644
index 000000000..a9c17dcdf
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/Jacobi/Jacobi.h
@@ -0,0 +1,420 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_JACOBI_H
+#define EIGEN_JACOBI_H
+
+namespace Eigen { 
+
+/** \ingroup Jacobi_Module
+  * \jacobi_module
+  * \class JacobiRotation
+  * \brief Rotation given by a cosine-sine pair.
+  *
+  * This class represents a Jacobi or Givens rotation.
+  * This is a 2D rotation in the plane \c J of angle \f$ \theta \f$ defined by
+  * its cosine \c c and sine \c s as follow:
+  * \f$ J = \left ( \begin{array}{cc} c & \overline s \\ -s  & \overline c \end{array} \right ) \f$
+  *
+  * You can apply the respective counter-clockwise rotation to a column vector \c v by
+  * applying its adjoint on the left: \f$ v = J^* v \f$ that translates to the following Eigen code:
+  * \code
+  * v.applyOnTheLeft(J.adjoint());
+  * \endcode
+  *
+  * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
+  */
+template<typename Scalar> class JacobiRotation
+{
+  public:
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+
+    /** Default constructor without any initialization. */
+    JacobiRotation() {}
+
+    /** Construct a planar rotation from a cosine-sine pair (\a c, \c s). */
+    JacobiRotation(const Scalar& c, const Scalar& s) : m_c(c), m_s(s) {}
+
+    Scalar& c() { return m_c; }
+    Scalar c() const { return m_c; }
+    Scalar& s() { return m_s; }
+    Scalar s() const { return m_s; }
+
+    /** Concatenates two planar rotation */
+    JacobiRotation operator*(const JacobiRotation& other)
+    {
+      return JacobiRotation(m_c * other.m_c - internal::conj(m_s) * other.m_s,
+                            internal::conj(m_c * internal::conj(other.m_s) + internal::conj(m_s) * internal::conj(other.m_c)));
+    }
+
+    /** Returns the transposed transformation */
+    JacobiRotation transpose() const { return JacobiRotation(m_c, -internal::conj(m_s)); }
+
+    /** Returns the adjoint transformation */
+    JacobiRotation adjoint() const { return JacobiRotation(internal::conj(m_c), -m_s); }
+
+    template<typename Derived>
+    bool makeJacobi(const MatrixBase<Derived>&, typename Derived::Index p, typename Derived::Index q);
+    bool makeJacobi(RealScalar x, Scalar y, RealScalar z);
+
+    void makeGivens(const Scalar& p, const Scalar& q, Scalar* z=0);
+
+  protected:
+    void makeGivens(const Scalar& p, const Scalar& q, Scalar* z, internal::true_type);
+    void makeGivens(const Scalar& p, const Scalar& q, Scalar* z, internal::false_type);
+
+    Scalar m_c, m_s;
+};
+
+/** Makes \c *this as a Jacobi rotation \a J such that applying \a J on both the right and left sides of the selfadjoint 2x2 matrix
+  * \f$ B = \left ( \begin{array}{cc} x & y \\ \overline y & z \end{array} \right )\f$ yields a diagonal matrix \f$ A = J^* B J \f$
+  *
+  * \sa MatrixBase::makeJacobi(const MatrixBase<Derived>&, Index, Index), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
+  */
+template<typename Scalar>
+bool JacobiRotation<Scalar>::makeJacobi(RealScalar x, Scalar y, RealScalar z)
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  if(y == Scalar(0))
+  {
+    m_c = Scalar(1);
+    m_s = Scalar(0);
+    return false;
+  }
+  else
+  {
+    RealScalar tau = (x-z)/(RealScalar(2)*internal::abs(y));
+    RealScalar w = internal::sqrt(internal::abs2(tau) + RealScalar(1));
+    RealScalar t;
+    if(tau>RealScalar(0))
+    {
+      t = RealScalar(1) / (tau + w);
+    }
+    else
+    {
+      t = RealScalar(1) / (tau - w);
+    }
+    RealScalar sign_t = t > RealScalar(0) ? RealScalar(1) : RealScalar(-1);
+    RealScalar n = RealScalar(1) / internal::sqrt(internal::abs2(t)+RealScalar(1));
+    m_s = - sign_t * (internal::conj(y) / internal::abs(y)) * internal::abs(t) * n;
+    m_c = n;
+    return true;
+  }
+}
+
+/** Makes \c *this as a Jacobi rotation \c J such that applying \a J on both the right and left sides of the 2x2 selfadjoint matrix
+  * \f$ B = \left ( \begin{array}{cc} \text{this}_{pp} & \text{this}_{pq} \\ (\text{this}_{pq})^* & \text{this}_{qq} \end{array} \right )\f$ yields
+  * a diagonal matrix \f$ A = J^* B J \f$
+  *
+  * Example: \include Jacobi_makeJacobi.cpp
+  * Output: \verbinclude Jacobi_makeJacobi.out
+  *
+  * \sa JacobiRotation::makeJacobi(RealScalar, Scalar, RealScalar), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
+  */
+template<typename Scalar>
+template<typename Derived>
+inline bool JacobiRotation<Scalar>::makeJacobi(const MatrixBase<Derived>& m, typename Derived::Index p, typename Derived::Index q)
+{
+  return makeJacobi(internal::real(m.coeff(p,p)), m.coeff(p,q), internal::real(m.coeff(q,q)));
+}
+
+/** Makes \c *this as a Givens rotation \c G such that applying \f$ G^* \f$ to the left of the vector
+  * \f$ V = \left ( \begin{array}{c} p \\ q \end{array} \right )\f$ yields:
+  * \f$ G^* V = \left ( \begin{array}{c} r \\ 0 \end{array} \right )\f$.
+  *
+  * The value of \a z is returned if \a z is not null (the default is null).
+  * Also note that G is built such that the cosine is always real.
+  *
+  * Example: \include Jacobi_makeGivens.cpp
+  * Output: \verbinclude Jacobi_makeGivens.out
+  *
+  * This function implements the continuous Givens rotation generation algorithm
+  * found in Anderson (2000), Discontinuous Plane Rotations and the Symmetric Eigenvalue Problem.
+  * LAPACK Working Note 150, University of Tennessee, UT-CS-00-454, December 4, 2000.
+  *
+  * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
+  */
+template<typename Scalar>
+void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* z)
+{
+  makeGivens(p, q, z, typename internal::conditional<NumTraits<Scalar>::IsComplex, internal::true_type, internal::false_type>::type());
+}
+
+
+// specialization for complexes
+template<typename Scalar>
+void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::true_type)
+{
+  if(q==Scalar(0))
+  {
+    m_c = internal::real(p)<0 ? Scalar(-1) : Scalar(1);
+    m_s = 0;
+    if(r) *r = m_c * p;
+  }
+  else if(p==Scalar(0))
+  {
+    m_c = 0;
+    m_s = -q/internal::abs(q);
+    if(r) *r = internal::abs(q);
+  }
+  else
+  {
+    RealScalar p1 = internal::norm1(p);
+    RealScalar q1 = internal::norm1(q);
+    if(p1>=q1)
+    {
+      Scalar ps = p / p1;
+      RealScalar p2 = internal::abs2(ps);
+      Scalar qs = q / p1;
+      RealScalar q2 = internal::abs2(qs);
+
+      RealScalar u = internal::sqrt(RealScalar(1) + q2/p2);
+      if(internal::real(p)<RealScalar(0))
+        u = -u;
+
+      m_c = Scalar(1)/u;
+      m_s = -qs*internal::conj(ps)*(m_c/p2);
+      if(r) *r = p * u;
+    }
+    else
+    {
+      Scalar ps = p / q1;
+      RealScalar p2 = internal::abs2(ps);
+      Scalar qs = q / q1;
+      RealScalar q2 = internal::abs2(qs);
+
+      RealScalar u = q1 * internal::sqrt(p2 + q2);
+      if(internal::real(p)<RealScalar(0))
+        u = -u;
+
+      p1 = internal::abs(p);
+      ps = p/p1;
+      m_c = p1/u;
+      m_s = -internal::conj(ps) * (q/u);
+      if(r) *r = ps * u;
+    }
+  }
+}
+
+// specialization for reals
+template<typename Scalar>
+void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::false_type)
+{
+
+  if(q==Scalar(0))
+  {
+    m_c = p<Scalar(0) ? Scalar(-1) : Scalar(1);
+    m_s = Scalar(0);
+    if(r) *r = internal::abs(p);
+  }
+  else if(p==Scalar(0))
+  {
+    m_c = Scalar(0);
+    m_s = q<Scalar(0) ? Scalar(1) : Scalar(-1);
+    if(r) *r = internal::abs(q);
+  }
+  else if(internal::abs(p) > internal::abs(q))
+  {
+    Scalar t = q/p;
+    Scalar u = internal::sqrt(Scalar(1) + internal::abs2(t));
+    if(p<Scalar(0))
+      u = -u;
+    m_c = Scalar(1)/u;
+    m_s = -t * m_c;
+    if(r) *r = p * u;
+  }
+  else
+  {
+    Scalar t = p/q;
+    Scalar u = internal::sqrt(Scalar(1) + internal::abs2(t));
+    if(q<Scalar(0))
+      u = -u;
+    m_s = -Scalar(1)/u;
+    m_c = -t * m_s;
+    if(r) *r = q * u;
+  }
+
+}
+
+/****************************************************************************************
+*   Implementation of MatrixBase methods
+****************************************************************************************/
+
+/** \jacobi_module
+  * Applies the clock wise 2D rotation \a j to the set of 2D vectors of cordinates \a x and \a y:
+  * \f$ \left ( \begin{array}{cc} x \\ y \end{array} \right )  =  J \left ( \begin{array}{cc} x \\ y \end{array} \right ) \f$
+  *
+  * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
+  */
+namespace internal {
+template<typename VectorX, typename VectorY, typename OtherScalar>
+void apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, const JacobiRotation<OtherScalar>& j);
+}
+
+/** \jacobi_module
+  * Applies the rotation in the plane \a j to the rows \a p and \a q of \c *this, i.e., it computes B = J * B,
+  * with \f$ B = \left ( \begin{array}{cc} \text{*this.row}(p) \\ \text{*this.row}(q) \end{array} \right ) \f$.
+  *
+  * \sa class JacobiRotation, MatrixBase::applyOnTheRight(), internal::apply_rotation_in_the_plane()
+  */
+template<typename Derived>
+template<typename OtherScalar>
+inline void MatrixBase<Derived>::applyOnTheLeft(Index p, Index q, const JacobiRotation<OtherScalar>& j)
+{
+  RowXpr x(this->row(p));
+  RowXpr y(this->row(q));
+  internal::apply_rotation_in_the_plane(x, y, j);
+}
+
+/** \ingroup Jacobi_Module
+  * Applies the rotation in the plane \a j to the columns \a p and \a q of \c *this, i.e., it computes B = B * J
+  * with \f$ B = \left ( \begin{array}{cc} \text{*this.col}(p) & \text{*this.col}(q) \end{array} \right ) \f$.
+  *
+  * \sa class JacobiRotation, MatrixBase::applyOnTheLeft(), internal::apply_rotation_in_the_plane()
+  */
+template<typename Derived>
+template<typename OtherScalar>
+inline void MatrixBase<Derived>::applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j)
+{
+  ColXpr x(this->col(p));
+  ColXpr y(this->col(q));
+  internal::apply_rotation_in_the_plane(x, y, j.transpose());
+}
+
+namespace internal {
+template<typename VectorX, typename VectorY, typename OtherScalar>
+void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, const JacobiRotation<OtherScalar>& j)
+{
+  typedef typename VectorX::Index Index;
+  typedef typename VectorX::Scalar Scalar;
+  enum { PacketSize = packet_traits<Scalar>::size };
+  typedef typename packet_traits<Scalar>::type Packet;
+  eigen_assert(_x.size() == _y.size());
+  Index size = _x.size();
+  Index incrx = _x.innerStride();
+  Index incry = _y.innerStride();
+
+  Scalar* EIGEN_RESTRICT x = &_x.coeffRef(0);
+  Scalar* EIGEN_RESTRICT y = &_y.coeffRef(0);
+
+  /*** dynamic-size vectorized paths ***/
+
+  if(VectorX::SizeAtCompileTime == Dynamic &&
+    (VectorX::Flags & VectorY::Flags & PacketAccessBit) &&
+    ((incrx==1 && incry==1) || PacketSize == 1))
+  {
+    // both vectors are sequentially stored in memory => vectorization
+    enum { Peeling = 2 };
+
+    Index alignedStart = internal::first_aligned(y, size);
+    Index alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize;
+
+    const Packet pc = pset1<Packet>(j.c());
+    const Packet ps = pset1<Packet>(j.s());
+    conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex,false> pcj;
+
+    for(Index i=0; i<alignedStart; ++i)
+    {
+      Scalar xi = x[i];
+      Scalar yi = y[i];
+      x[i] =  j.c() * xi + conj(j.s()) * yi;
+      y[i] = -j.s() * xi + conj(j.c()) * yi;
+    }
+
+    Scalar* EIGEN_RESTRICT px = x + alignedStart;
+    Scalar* EIGEN_RESTRICT py = y + alignedStart;
+
+    if(internal::first_aligned(x, size)==alignedStart)
+    {
+      for(Index i=alignedStart; i<alignedEnd; i+=PacketSize)
+      {
+        Packet xi = pload<Packet>(px);
+        Packet yi = pload<Packet>(py);
+        pstore(px, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
+        pstore(py, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
+        px += PacketSize;
+        py += PacketSize;
+      }
+    }
+    else
+    {
+      Index peelingEnd = alignedStart + ((size-alignedStart)/(Peeling*PacketSize))*(Peeling*PacketSize);
+      for(Index i=alignedStart; i<peelingEnd; i+=Peeling*PacketSize)
+      {
+        Packet xi   = ploadu<Packet>(px);
+        Packet xi1  = ploadu<Packet>(px+PacketSize);
+        Packet yi   = pload <Packet>(py);
+        Packet yi1  = pload <Packet>(py+PacketSize);
+        pstoreu(px, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
+        pstoreu(px+PacketSize, padd(pmul(pc,xi1),pcj.pmul(ps,yi1)));
+        pstore (py, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
+        pstore (py+PacketSize, psub(pcj.pmul(pc,yi1),pmul(ps,xi1)));
+        px += Peeling*PacketSize;
+        py += Peeling*PacketSize;
+      }
+      if(alignedEnd!=peelingEnd)
+      {
+        Packet xi = ploadu<Packet>(x+peelingEnd);
+        Packet yi = pload <Packet>(y+peelingEnd);
+        pstoreu(x+peelingEnd, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
+        pstore (y+peelingEnd, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
+      }
+    }
+
+    for(Index i=alignedEnd; i<size; ++i)
+    {
+      Scalar xi = x[i];
+      Scalar yi = y[i];
+      x[i] =  j.c() * xi + conj(j.s()) * yi;
+      y[i] = -j.s() * xi + conj(j.c()) * yi;
+    }
+  }
+
+  /*** fixed-size vectorized path ***/
+  else if(VectorX::SizeAtCompileTime != Dynamic &&
+          (VectorX::Flags & VectorY::Flags & PacketAccessBit) &&
+          (VectorX::Flags & VectorY::Flags & AlignedBit))
+  {
+    const Packet pc = pset1<Packet>(j.c());
+    const Packet ps = pset1<Packet>(j.s());
+    conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex,false> pcj;
+    Scalar* EIGEN_RESTRICT px = x;
+    Scalar* EIGEN_RESTRICT py = y;
+    for(Index i=0; i<size; i+=PacketSize)
+    {
+      Packet xi = pload<Packet>(px);
+      Packet yi = pload<Packet>(py);
+      pstore(px, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
+      pstore(py, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
+      px += PacketSize;
+      py += PacketSize;
+    }
+  }
+
+  /*** non-vectorized path ***/
+  else
+  {
+    for(Index i=0; i<size; ++i)
+    {
+      Scalar xi = *x;
+      Scalar yi = *y;
+      *x =  j.c() * xi + conj(j.s()) * yi;
+      *y = -j.s() * xi + conj(j.c()) * yi;
+      x += incrx;
+      y += incry;
+    }
+  }
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_JACOBI_H
diff --git a/resources/3rdparty/eigen/Eigen/src/LU/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/LU/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/LU/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/LU/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/Eigen/src/LU/Determinant.h b/resources/3rdParty/eigen/Eigen/src/LU/Determinant.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/LU/Determinant.h
rename to resources/3rdParty/eigen/Eigen/src/LU/Determinant.h
diff --git a/resources/3rdparty/eigen/Eigen/src/LU/FullPivLU.h b/resources/3rdParty/eigen/Eigen/src/LU/FullPivLU.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/LU/FullPivLU.h
rename to resources/3rdParty/eigen/Eigen/src/LU/FullPivLU.h
diff --git a/resources/3rdparty/eigen/Eigen/src/LU/Inverse.h b/resources/3rdParty/eigen/Eigen/src/LU/Inverse.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/LU/Inverse.h
rename to resources/3rdParty/eigen/Eigen/src/LU/Inverse.h
diff --git a/resources/3rdparty/eigen/Eigen/src/LU/PartialPivLU.h b/resources/3rdParty/eigen/Eigen/src/LU/PartialPivLU.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/LU/PartialPivLU.h
rename to resources/3rdParty/eigen/Eigen/src/LU/PartialPivLU.h
diff --git a/resources/3rdparty/eigen/Eigen/src/LU/PartialPivLU_MKL.h b/resources/3rdParty/eigen/Eigen/src/LU/PartialPivLU_MKL.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/LU/PartialPivLU_MKL.h
rename to resources/3rdParty/eigen/Eigen/src/LU/PartialPivLU_MKL.h
diff --git a/resources/3rdparty/eigen/Eigen/src/LU/arch/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/LU/arch/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/LU/arch/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/LU/arch/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/Eigen/src/LU/arch/Inverse_SSE.h b/resources/3rdParty/eigen/Eigen/src/LU/arch/Inverse_SSE.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/LU/arch/Inverse_SSE.h
rename to resources/3rdParty/eigen/Eigen/src/LU/arch/Inverse_SSE.h
diff --git a/resources/3rdparty/eigen/Eigen/src/OrderingMethods/Amd.h b/resources/3rdParty/eigen/Eigen/src/OrderingMethods/Amd.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/OrderingMethods/Amd.h
rename to resources/3rdParty/eigen/Eigen/src/OrderingMethods/Amd.h
diff --git a/resources/3rdparty/eigen/Eigen/src/OrderingMethods/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/OrderingMethods/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/OrderingMethods/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/OrderingMethods/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/Eigen/src/PaStiXSupport/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/PaStiXSupport/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/PaStiXSupport/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/PaStiXSupport/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h b/resources/3rdParty/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h
rename to resources/3rdParty/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h
diff --git a/resources/3rdparty/eigen/Eigen/src/PardisoSupport/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/PardisoSupport/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/PardisoSupport/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/PardisoSupport/CMakeLists.txt
diff --git a/resources/3rdParty/eigen/Eigen/src/PardisoSupport/PardisoSupport.h b/resources/3rdParty/eigen/Eigen/src/PardisoSupport/PardisoSupport.h
new file mode 100644
index 000000000..d623bf518
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/PardisoSupport/PardisoSupport.h
@@ -0,0 +1,615 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ *   Content : Eigen bindings to Intel(R) MKL PARDISO
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_PARDISOSUPPORT_H
+#define EIGEN_PARDISOSUPPORT_H
+
+namespace Eigen { 
+
+template<typename _MatrixType> class PardisoLU;
+template<typename _MatrixType, int Options=Upper> class PardisoLLT;
+template<typename _MatrixType, int Options=Upper> class PardisoLDLT;
+
+namespace internal
+{
+  template<typename Index>
+  struct pardiso_run_selector
+  {
+    static Index run( _MKL_DSS_HANDLE_t pt, Index maxfct, Index mnum, Index type, Index phase, Index n, void *a,
+                      Index *ia, Index *ja, Index *perm, Index nrhs, Index *iparm, Index msglvl, void *b, void *x)
+    {
+      Index error = 0;
+      ::pardiso(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error);
+      return error;
+    }
+  };
+  template<>
+  struct pardiso_run_selector<long long int>
+  {
+    typedef long long int Index;
+    static Index run( _MKL_DSS_HANDLE_t pt, Index maxfct, Index mnum, Index type, Index phase, Index n, void *a,
+                      Index *ia, Index *ja, Index *perm, Index nrhs, Index *iparm, Index msglvl, void *b, void *x)
+    {
+      Index error = 0;
+      ::pardiso_64(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error);
+      return error;
+    }
+  };
+
+  template<class Pardiso> struct pardiso_traits;
+
+  template<typename _MatrixType>
+  struct pardiso_traits< PardisoLU<_MatrixType> >
+  {
+    typedef _MatrixType MatrixType;
+    typedef typename _MatrixType::Scalar Scalar;
+    typedef typename _MatrixType::RealScalar RealScalar;
+    typedef typename _MatrixType::Index Index;
+  };
+
+  template<typename _MatrixType, int Options>
+  struct pardiso_traits< PardisoLLT<_MatrixType, Options> >
+  {
+    typedef _MatrixType MatrixType;
+    typedef typename _MatrixType::Scalar Scalar;
+    typedef typename _MatrixType::RealScalar RealScalar;
+    typedef typename _MatrixType::Index Index;
+  };
+
+  template<typename _MatrixType, int Options>
+  struct pardiso_traits< PardisoLDLT<_MatrixType, Options> >
+  {
+    typedef _MatrixType MatrixType;
+    typedef typename _MatrixType::Scalar Scalar;
+    typedef typename _MatrixType::RealScalar RealScalar;
+    typedef typename _MatrixType::Index Index;    
+  };
+
+}
+
+template<class Derived>
+class PardisoImpl
+{
+    typedef internal::pardiso_traits<Derived> Traits;
+  public:
+    typedef typename Traits::MatrixType MatrixType;
+    typedef typename Traits::Scalar Scalar;
+    typedef typename Traits::RealScalar RealScalar;
+    typedef typename Traits::Index Index;
+    typedef SparseMatrix<Scalar,RowMajor,Index> SparseMatrixType;
+    typedef Matrix<Scalar,Dynamic,1> VectorType;
+    typedef Matrix<Index, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
+    typedef Matrix<Index, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
+    typedef Array<Index,64,1,DontAlign> ParameterType;
+    enum {
+      ScalarIsComplex = NumTraits<Scalar>::IsComplex
+    };
+
+    PardisoImpl()
+    {
+      eigen_assert((sizeof(Index) >= sizeof(_INTEGER_t) && sizeof(Index) <= 8) && "Non-supported index type");
+      m_iparm.setZero();
+      m_msglvl = 0; // No output
+      m_initialized = false;
+    }
+
+    ~PardisoImpl()
+    {
+      pardisoRelease();
+    }
+
+    inline Index cols() const { return m_size; }
+    inline Index rows() const { return m_size; }
+  
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful,
+      *          \c NumericalIssue if the matrix appears to be negative.
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_initialized && "Decomposition is not initialized.");
+      return m_info;
+    }
+
+    /** \warning for advanced usage only.
+      * \returns a reference to the parameter array controlling PARDISO.
+      * See the PARDISO manual to know how to use it. */
+    ParameterType& pardisoParameterArray()
+    {
+      return m_iparm;
+    }
+    
+    /** Performs a symbolic decomposition on the sparcity of \a matrix.
+      *
+      * This function is particularly useful when solving for several problems having the same structure.
+      * 
+      * \sa factorize()
+      */
+    Derived& analyzePattern(const MatrixType& matrix);
+    
+    /** Performs a numeric decomposition of \a matrix
+      *
+      * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+      *
+      * \sa analyzePattern()
+      */
+    Derived& factorize(const MatrixType& matrix);
+
+    Derived& compute(const MatrixType& matrix);
+    
+    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+      *
+      * \sa compute()
+      */
+    template<typename Rhs>
+    inline const internal::solve_retval<PardisoImpl, Rhs>
+    solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_initialized && "Pardiso solver is not initialized.");
+      eigen_assert(rows()==b.rows()
+                && "PardisoImpl::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::solve_retval<PardisoImpl, Rhs>(*this, b.derived());
+    }
+
+    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+      *
+      * \sa compute()
+      */
+    template<typename Rhs>
+    inline const internal::sparse_solve_retval<PardisoImpl, Rhs>
+    solve(const SparseMatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_initialized && "Pardiso solver is not initialized.");
+      eigen_assert(rows()==b.rows()
+                && "PardisoImpl::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::sparse_solve_retval<PardisoImpl, Rhs>(*this, b.derived());
+    }
+
+    Derived& derived()
+    {
+      return *static_cast<Derived*>(this);
+    }
+    const Derived& derived() const
+    {
+      return *static_cast<const Derived*>(this);
+    }
+
+    template<typename BDerived, typename XDerived>
+    bool _solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>& x) const;
+
+    /** \internal */
+    template<typename Rhs, typename DestScalar, int DestOptions, typename DestIndex>
+    void _solve_sparse(const Rhs& b, SparseMatrix<DestScalar,DestOptions,DestIndex> &dest) const
+    {
+      eigen_assert(m_size==b.rows());
+
+      // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
+      static const int NbColsAtOnce = 4;
+      int rhsCols = b.cols();
+      int size = b.rows();
+      // Pardiso cannot solve in-place,
+      // so we need two temporaries
+      Eigen::Matrix<DestScalar,Dynamic,Dynamic,ColMajor> tmp_rhs(size,rhsCols);
+      Eigen::Matrix<DestScalar,Dynamic,Dynamic,ColMajor> tmp_res(size,rhsCols);
+      for(int k=0; k<rhsCols; k+=NbColsAtOnce)
+      {
+        int actualCols = std::min<int>(rhsCols-k, NbColsAtOnce);
+        tmp_rhs.leftCols(actualCols) = b.middleCols(k,actualCols);
+        tmp_res.leftCols(actualCols) = derived().solve(tmp_rhs.leftCols(actualCols));
+        dest.middleCols(k,actualCols) = tmp_res.leftCols(actualCols).sparseView();
+      }
+    }
+
+  protected:
+    void pardisoRelease()
+    {
+      if(m_initialized) // Factorization ran at least once
+      {
+        internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, -1, m_size, 0, 0, 0, m_perm.data(), 0,
+                                                   m_iparm.data(), m_msglvl, 0, 0);
+      }
+    }
+
+    void pardisoInit(int type)
+    {
+      m_type = type;
+      bool symmetric = abs(m_type) < 10;
+      m_iparm[0] = 1;   // No solver default
+      m_iparm[1] = 3;   // use Metis for the ordering
+      m_iparm[2] = 1;   // Numbers of processors, value of OMP_NUM_THREADS
+      m_iparm[3] = 0;   // No iterative-direct algorithm
+      m_iparm[4] = 0;   // No user fill-in reducing permutation
+      m_iparm[5] = 0;   // Write solution into x
+      m_iparm[6] = 0;   // Not in use
+      m_iparm[7] = 2;   // Max numbers of iterative refinement steps
+      m_iparm[8] = 0;   // Not in use
+      m_iparm[9] = 13;  // Perturb the pivot elements with 1E-13
+      m_iparm[10] = symmetric ? 0 : 1; // Use nonsymmetric permutation and scaling MPS
+      m_iparm[11] = 0;  // Not in use
+      m_iparm[12] = symmetric ? 0 : 1;  // Maximum weighted matching algorithm is switched-off (default for symmetric).
+                                        // Try m_iparm[12] = 1 in case of inappropriate accuracy
+      m_iparm[13] = 0;  // Output: Number of perturbed pivots
+      m_iparm[14] = 0;  // Not in use
+      m_iparm[15] = 0;  // Not in use
+      m_iparm[16] = 0;  // Not in use
+      m_iparm[17] = -1; // Output: Number of nonzeros in the factor LU
+      m_iparm[18] = -1; // Output: Mflops for LU factorization
+      m_iparm[19] = 0;  // Output: Numbers of CG Iterations
+      
+      m_iparm[20] = 0;  // 1x1 pivoting
+      m_iparm[26] = 0;  // No matrix checker
+      m_iparm[27] = (sizeof(RealScalar) == 4) ? 1 : 0;
+      m_iparm[34] = 1;  // C indexing
+      m_iparm[59] = 1;  // Automatic switch between In-Core and Out-of-Core modes
+    }
+
+  protected:
+    // cached data to reduce reallocation, etc.
+    
+    void manageErrorCode(Index error)
+    {
+      switch(error)
+      {
+        case 0:
+          m_info = Success;
+          break;
+        case -4:
+        case -7:
+          m_info = NumericalIssue;
+          break;
+        default:
+          m_info = InvalidInput;
+      }
+    }
+
+    mutable SparseMatrixType m_matrix;
+    ComputationInfo m_info;
+    bool m_initialized, m_analysisIsOk, m_factorizationIsOk;
+    Index m_type, m_msglvl;
+    mutable void *m_pt[64];
+    mutable ParameterType m_iparm;
+    mutable IntColVectorType m_perm;
+    Index m_size;
+    
+  private:
+    PardisoImpl(PardisoImpl &) {}
+};
+
+template<class Derived>
+Derived& PardisoImpl<Derived>::compute(const MatrixType& a)
+{
+  m_size = a.rows();
+  eigen_assert(a.rows() == a.cols());
+
+  pardisoRelease();
+  memset(m_pt, 0, sizeof(m_pt));
+  m_perm.setZero(m_size);
+  derived().getMatrix(a);
+  
+  Index error;
+  error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 12, m_size,
+                                                     m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
+                                                     m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
+
+  manageErrorCode(error);
+  m_analysisIsOk = true;
+  m_factorizationIsOk = true;
+  m_initialized = true;
+  return derived();
+}
+
+template<class Derived>
+Derived& PardisoImpl<Derived>::analyzePattern(const MatrixType& a)
+{
+  m_size = a.rows();
+  eigen_assert(m_size == a.cols());
+
+  pardisoRelease();
+  memset(m_pt, 0, sizeof(m_pt));
+  m_perm.setZero(m_size);
+  derived().getMatrix(a);
+  
+  Index error;
+  error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 11, m_size,
+                                                     m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
+                                                     m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
+  
+  manageErrorCode(error);
+  m_analysisIsOk = true;
+  m_factorizationIsOk = false;
+  m_initialized = true;
+  return derived();
+}
+
+template<class Derived>
+Derived& PardisoImpl<Derived>::factorize(const MatrixType& a)
+{
+  eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+  eigen_assert(m_size == a.rows() && m_size == a.cols());
+  
+  derived().getMatrix(a);
+
+  Index error;  
+  error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 22, m_size,
+                                                     m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
+                                                     m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
+  
+  manageErrorCode(error);
+  m_factorizationIsOk = true;
+  return derived();
+}
+
+template<class Base>
+template<typename BDerived,typename XDerived>
+bool PardisoImpl<Base>::_solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>& x) const
+{
+  if(m_iparm[0] == 0) // Factorization was not computed
+    return false;
+
+  //Index n = m_matrix.rows();
+  Index nrhs = Index(b.cols());
+  eigen_assert(m_size==b.rows());
+  eigen_assert(((MatrixBase<BDerived>::Flags & RowMajorBit) == 0 || nrhs == 1) && "Row-major right hand sides are not supported");
+  eigen_assert(((MatrixBase<XDerived>::Flags & RowMajorBit) == 0 || nrhs == 1) && "Row-major matrices of unknowns are not supported");
+  eigen_assert(((nrhs == 1) || b.outerStride() == b.rows()));
+
+
+//  switch (transposed) {
+//    case SvNoTrans    : m_iparm[11] = 0 ; break;
+//    case SvTranspose  : m_iparm[11] = 2 ; break;
+//    case SvAdjoint    : m_iparm[11] = 1 ; break;
+//    default:
+//      //std::cerr << "Eigen: transposition  option \"" << transposed << "\" not supported by the PARDISO backend\n";
+//      m_iparm[11] = 0;
+//  }
+
+  Scalar* rhs_ptr = const_cast<Scalar*>(b.derived().data());
+  Matrix<Scalar,Dynamic,Dynamic,ColMajor> tmp;
+  
+  // Pardiso cannot solve in-place
+  if(rhs_ptr == x.derived().data())
+  {
+    tmp = b;
+    rhs_ptr = tmp.data();
+  }
+  
+  Index error;
+  error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 33, m_size,
+                                                     m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
+                                                     m_perm.data(), nrhs, m_iparm.data(), m_msglvl,
+                                                     rhs_ptr, x.derived().data());
+
+  return error==0;
+}
+
+
+/** \ingroup PardisoSupport_Module
+  * \class PardisoLU
+  * \brief A sparse direct LU factorization and solver based on the PARDISO library
+  *
+  * This class allows to solve for A.X = B sparse linear problems via a direct LU factorization
+  * using the Intel MKL PARDISO library. The sparse matrix A must be squared and invertible.
+  * The vectors or matrices X and B can be either dense or sparse.
+  *
+  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  *
+  * \sa \ref TutorialSparseDirectSolvers
+  */
+template<typename MatrixType>
+class PardisoLU : public PardisoImpl< PardisoLU<MatrixType> >
+{
+  protected:
+    typedef PardisoImpl< PardisoLU<MatrixType> > Base;
+    typedef typename Base::Scalar Scalar;
+    typedef typename Base::RealScalar RealScalar;
+    using Base::pardisoInit;
+    using Base::m_matrix;
+    friend class PardisoImpl< PardisoLU<MatrixType> >;
+
+  public:
+
+    using Base::compute;
+    using Base::solve;
+
+    PardisoLU()
+      : Base()
+    {
+      pardisoInit(Base::ScalarIsComplex ? 13 : 11);
+    }
+
+    PardisoLU(const MatrixType& matrix)
+      : Base()
+    {
+      pardisoInit(Base::ScalarIsComplex ? 13 : 11);
+      compute(matrix);
+    }
+  protected:
+    void getMatrix(const MatrixType& matrix)
+    {
+      m_matrix = matrix;
+    }
+    
+  private:
+    PardisoLU(PardisoLU& ) {}
+};
+
+/** \ingroup PardisoSupport_Module
+  * \class PardisoLLT
+  * \brief A sparse direct Cholesky (LLT) factorization and solver based on the PARDISO library
+  *
+  * This class allows to solve for A.X = B sparse linear problems via a LL^T Cholesky factorization
+  * using the Intel MKL PARDISO library. The sparse matrix A must be selfajoint and positive definite.
+  * The vectors or matrices X and B can be either dense or sparse.
+  *
+  * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  * \tparam UpLo can be any bitwise combination of Upper, Lower. The default is Upper, meaning only the upper triangular part has to be used.
+  *         Upper|Lower can be used to tell both triangular parts can be used as input.
+  *
+  * \sa \ref TutorialSparseDirectSolvers
+  */
+template<typename MatrixType, int _UpLo>
+class PardisoLLT : public PardisoImpl< PardisoLLT<MatrixType,_UpLo> >
+{
+  protected:
+    typedef PardisoImpl< PardisoLLT<MatrixType,_UpLo> > Base;
+    typedef typename Base::Scalar Scalar;
+    typedef typename Base::Index Index;
+    typedef typename Base::RealScalar RealScalar;
+    using Base::pardisoInit;
+    using Base::m_matrix;
+    friend class PardisoImpl< PardisoLLT<MatrixType,_UpLo> >;
+
+  public:
+
+    enum { UpLo = _UpLo };
+    using Base::compute;
+    using Base::solve;
+
+    PardisoLLT()
+      : Base()
+    {
+      pardisoInit(Base::ScalarIsComplex ? 4 : 2);
+    }
+
+    PardisoLLT(const MatrixType& matrix)
+      : Base()
+    {
+      pardisoInit(Base::ScalarIsComplex ? 4 : 2);
+      compute(matrix);
+    }
+    
+  protected:
+    
+    void getMatrix(const MatrixType& matrix)
+    {
+      // PARDISO supports only upper, row-major matrices
+      PermutationMatrix<Dynamic,Dynamic,Index> p_null;
+      m_matrix.resize(matrix.rows(), matrix.cols());
+      m_matrix.template selfadjointView<Upper>() = matrix.template selfadjointView<UpLo>().twistedBy(p_null);
+    }
+    
+  private:
+    PardisoLLT(PardisoLLT& ) {}
+};
+
+/** \ingroup PardisoSupport_Module
+  * \class PardisoLDLT
+  * \brief A sparse direct Cholesky (LDLT) factorization and solver based on the PARDISO library
+  *
+  * This class allows to solve for A.X = B sparse linear problems via a LDL^T Cholesky factorization
+  * using the Intel MKL PARDISO library. The sparse matrix A is assumed to be selfajoint and positive definite.
+  * For complex matrices, A can also be symmetric only, see the \a Options template parameter.
+  * The vectors or matrices X and B can be either dense or sparse.
+  *
+  * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  * \tparam Options can be any bitwise combination of Upper, Lower, and Symmetric. The default is Upper, meaning only the upper triangular part has to be used.
+  *         Symmetric can be used for symmetric, non-selfadjoint complex matrices, the default being to assume a selfadjoint matrix.
+  *         Upper|Lower can be used to tell both triangular parts can be used as input.
+  *
+  * \sa \ref TutorialSparseDirectSolvers
+  */
+template<typename MatrixType, int Options>
+class PardisoLDLT : public PardisoImpl< PardisoLDLT<MatrixType,Options> >
+{
+  protected:
+    typedef PardisoImpl< PardisoLDLT<MatrixType,Options> > Base;
+    typedef typename Base::Scalar Scalar;
+    typedef typename Base::Index Index;
+    typedef typename Base::RealScalar RealScalar;
+    using Base::pardisoInit;
+    using Base::m_matrix;
+    friend class PardisoImpl< PardisoLDLT<MatrixType,Options> >;
+
+  public:
+
+    using Base::compute;
+    using Base::solve;
+    enum { UpLo = Options&(Upper|Lower) };
+
+    PardisoLDLT()
+      : Base()
+    {
+      pardisoInit(Base::ScalarIsComplex ? ( bool(Options&Symmetric) ? 6 : -4 ) : -2);
+    }
+
+    PardisoLDLT(const MatrixType& matrix)
+      : Base()
+    {
+      pardisoInit(Base::ScalarIsComplex ? ( bool(Options&Symmetric) ? 6 : -4 ) : -2);
+      compute(matrix);
+    }
+    
+    void getMatrix(const MatrixType& matrix)
+    {
+      // PARDISO supports only upper, row-major matrices
+      PermutationMatrix<Dynamic,Dynamic,Index> p_null;
+      m_matrix.resize(matrix.rows(), matrix.cols());
+      m_matrix.template selfadjointView<Upper>() = matrix.template selfadjointView<UpLo>().twistedBy(p_null);
+    }
+    
+  private:
+    PardisoLDLT(PardisoLDLT& ) {}
+};
+
+namespace internal {
+  
+template<typename _Derived, typename Rhs>
+struct solve_retval<PardisoImpl<_Derived>, Rhs>
+  : solve_retval_base<PardisoImpl<_Derived>, Rhs>
+{
+  typedef PardisoImpl<_Derived> Dec;
+  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec()._solve(rhs(),dst);
+  }
+};
+
+template<typename Derived, typename Rhs>
+struct sparse_solve_retval<PardisoImpl<Derived>, Rhs>
+  : sparse_solve_retval_base<PardisoImpl<Derived>, Rhs>
+{
+  typedef PardisoImpl<Derived> Dec;
+  EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec().derived()._solve_sparse(rhs(),dst);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_PARDISOSUPPORT_H
diff --git a/resources/3rdparty/eigen/Eigen/src/QR/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/QR/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/QR/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/QR/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/Eigen/src/QR/ColPivHouseholderQR.h b/resources/3rdParty/eigen/Eigen/src/QR/ColPivHouseholderQR.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/QR/ColPivHouseholderQR.h
rename to resources/3rdParty/eigen/Eigen/src/QR/ColPivHouseholderQR.h
diff --git a/resources/3rdparty/eigen/Eigen/src/QR/ColPivHouseholderQR_MKL.h b/resources/3rdParty/eigen/Eigen/src/QR/ColPivHouseholderQR_MKL.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/QR/ColPivHouseholderQR_MKL.h
rename to resources/3rdParty/eigen/Eigen/src/QR/ColPivHouseholderQR_MKL.h
diff --git a/resources/3rdparty/eigen/Eigen/src/QR/FullPivHouseholderQR.h b/resources/3rdParty/eigen/Eigen/src/QR/FullPivHouseholderQR.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/QR/FullPivHouseholderQR.h
rename to resources/3rdParty/eigen/Eigen/src/QR/FullPivHouseholderQR.h
diff --git a/resources/3rdParty/eigen/Eigen/src/QR/HouseholderQR.h b/resources/3rdParty/eigen/Eigen/src/QR/HouseholderQR.h
new file mode 100644
index 000000000..5bcb32c1e
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/QR/HouseholderQR.h
@@ -0,0 +1,343 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2010 Vincent Lejeune
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_QR_H
+#define EIGEN_QR_H
+
+namespace Eigen { 
+
+/** \ingroup QR_Module
+  *
+  *
+  * \class HouseholderQR
+  *
+  * \brief Householder QR decomposition of a matrix
+  *
+  * \param MatrixType the type of the matrix of which we are computing the QR decomposition
+  *
+  * This class performs a QR decomposition of a matrix \b A into matrices \b Q and \b R
+  * such that 
+  * \f[
+  *  \mathbf{A} = \mathbf{Q} \, \mathbf{R}
+  * \f]
+  * by using Householder transformations. Here, \b Q a unitary matrix and \b R an upper triangular matrix.
+  * The result is stored in a compact way compatible with LAPACK.
+  *
+  * Note that no pivoting is performed. This is \b not a rank-revealing decomposition.
+  * If you want that feature, use FullPivHouseholderQR or ColPivHouseholderQR instead.
+  *
+  * This Householder QR decomposition is faster, but less numerically stable and less feature-full than
+  * FullPivHouseholderQR or ColPivHouseholderQR.
+  *
+  * \sa MatrixBase::householderQr()
+  */
+template<typename _MatrixType> class HouseholderQR
+{
+  public:
+
+    typedef _MatrixType MatrixType;
+    enum {
+      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      Options = MatrixType::Options,
+      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+    };
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::RealScalar RealScalar;
+    typedef typename MatrixType::Index Index;
+    typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, (MatrixType::Flags&RowMajorBit) ? RowMajor : ColMajor, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;
+    typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
+    typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
+    typedef typename HouseholderSequence<MatrixType,HCoeffsType>::ConjugateReturnType HouseholderSequenceType;
+
+    /**
+    * \brief Default Constructor.
+    *
+    * The default constructor is useful in cases in which the user intends to
+    * perform decompositions via HouseholderQR::compute(const MatrixType&).
+    */
+    HouseholderQR() : m_qr(), m_hCoeffs(), m_temp(), m_isInitialized(false) {}
+
+    /** \brief Default Constructor with memory preallocation
+      *
+      * Like the default constructor but with preallocation of the internal data
+      * according to the specified problem \a size.
+      * \sa HouseholderQR()
+      */
+    HouseholderQR(Index rows, Index cols)
+      : m_qr(rows, cols),
+        m_hCoeffs((std::min)(rows,cols)),
+        m_temp(cols),
+        m_isInitialized(false) {}
+
+    HouseholderQR(const MatrixType& matrix)
+      : m_qr(matrix.rows(), matrix.cols()),
+        m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),
+        m_temp(matrix.cols()),
+        m_isInitialized(false)
+    {
+      compute(matrix);
+    }
+
+    /** This method finds a solution x to the equation Ax=b, where A is the matrix of which
+      * *this is the QR decomposition, if any exists.
+      *
+      * \param b the right-hand-side of the equation to solve.
+      *
+      * \returns a solution.
+      *
+      * \note The case where b is a matrix is not yet implemented. Also, this
+      *       code is space inefficient.
+      *
+      * \note_about_checking_solutions
+      *
+      * \note_about_arbitrary_choice_of_solution
+      *
+      * Example: \include HouseholderQR_solve.cpp
+      * Output: \verbinclude HouseholderQR_solve.out
+      */
+    template<typename Rhs>
+    inline const internal::solve_retval<HouseholderQR, Rhs>
+    solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
+      return internal::solve_retval<HouseholderQR, Rhs>(*this, b.derived());
+    }
+
+    HouseholderSequenceType householderQ() const
+    {
+      eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
+      return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate());
+    }
+
+    /** \returns a reference to the matrix where the Householder QR decomposition is stored
+      * in a LAPACK-compatible way.
+      */
+    const MatrixType& matrixQR() const
+    {
+        eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
+        return m_qr;
+    }
+
+    HouseholderQR& compute(const MatrixType& matrix);
+
+    /** \returns the absolute value of the determinant of the matrix of which
+      * *this is the QR decomposition. It has only linear complexity
+      * (that is, O(n) where n is the dimension of the square matrix)
+      * as the QR decomposition has already been computed.
+      *
+      * \note This is only for square matrices.
+      *
+      * \warning a determinant can be very big or small, so for matrices
+      * of large enough dimension, there is a risk of overflow/underflow.
+      * One way to work around that is to use logAbsDeterminant() instead.
+      *
+      * \sa logAbsDeterminant(), MatrixBase::determinant()
+      */
+    typename MatrixType::RealScalar absDeterminant() const;
+
+    /** \returns the natural log of the absolute value of the determinant of the matrix of which
+      * *this is the QR decomposition. It has only linear complexity
+      * (that is, O(n) where n is the dimension of the square matrix)
+      * as the QR decomposition has already been computed.
+      *
+      * \note This is only for square matrices.
+      *
+      * \note This method is useful to work around the risk of overflow/underflow that's inherent
+      * to determinant computation.
+      *
+      * \sa absDeterminant(), MatrixBase::determinant()
+      */
+    typename MatrixType::RealScalar logAbsDeterminant() const;
+
+    inline Index rows() const { return m_qr.rows(); }
+    inline Index cols() const { return m_qr.cols(); }
+    const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
+
+  protected:
+    MatrixType m_qr;
+    HCoeffsType m_hCoeffs;
+    RowVectorType m_temp;
+    bool m_isInitialized;
+};
+
+template<typename MatrixType>
+typename MatrixType::RealScalar HouseholderQR<MatrixType>::absDeterminant() const
+{
+  eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
+  eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
+  return internal::abs(m_qr.diagonal().prod());
+}
+
+template<typename MatrixType>
+typename MatrixType::RealScalar HouseholderQR<MatrixType>::logAbsDeterminant() const
+{
+  eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
+  eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
+  return m_qr.diagonal().cwiseAbs().array().log().sum();
+}
+
+namespace internal {
+
+/** \internal */
+template<typename MatrixQR, typename HCoeffs>
+void householder_qr_inplace_unblocked(MatrixQR& mat, HCoeffs& hCoeffs, typename MatrixQR::Scalar* tempData = 0)
+{
+  typedef typename MatrixQR::Index Index;
+  typedef typename MatrixQR::Scalar Scalar;
+  typedef typename MatrixQR::RealScalar RealScalar;
+  Index rows = mat.rows();
+  Index cols = mat.cols();
+  Index size = (std::min)(rows,cols);
+
+  eigen_assert(hCoeffs.size() == size);
+
+  typedef Matrix<Scalar,MatrixQR::ColsAtCompileTime,1> TempType;
+  TempType tempVector;
+  if(tempData==0)
+  {
+    tempVector.resize(cols);
+    tempData = tempVector.data();
+  }
+
+  for(Index k = 0; k < size; ++k)
+  {
+    Index remainingRows = rows - k;
+    Index remainingCols = cols - k - 1;
+
+    RealScalar beta;
+    mat.col(k).tail(remainingRows).makeHouseholderInPlace(hCoeffs.coeffRef(k), beta);
+    mat.coeffRef(k,k) = beta;
+
+    // apply H to remaining part of m_qr from the left
+    mat.bottomRightCorner(remainingRows, remainingCols)
+        .applyHouseholderOnTheLeft(mat.col(k).tail(remainingRows-1), hCoeffs.coeffRef(k), tempData+k+1);
+  }
+}
+
+/** \internal */
+template<typename MatrixQR, typename HCoeffs>
+void householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs,
+                                       typename MatrixQR::Index maxBlockSize=32,
+                                       typename MatrixQR::Scalar* tempData = 0)
+{
+  typedef typename MatrixQR::Index Index;
+  typedef typename MatrixQR::Scalar Scalar;
+  typedef typename MatrixQR::RealScalar RealScalar;
+  typedef Block<MatrixQR,Dynamic,Dynamic> BlockType;
+
+  Index rows = mat.rows();
+  Index cols = mat.cols();
+  Index size = (std::min)(rows, cols);
+
+  typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixQR::MaxColsAtCompileTime,1> TempType;
+  TempType tempVector;
+  if(tempData==0)
+  {
+    tempVector.resize(cols);
+    tempData = tempVector.data();
+  }
+
+  Index blockSize = (std::min)(maxBlockSize,size);
+
+  Index k = 0;
+  for (k = 0; k < size; k += blockSize)
+  {
+    Index bs = (std::min)(size-k,blockSize);  // actual size of the block
+    Index tcols = cols - k - bs;            // trailing columns
+    Index brows = rows-k;                   // rows of the block
+
+    // partition the matrix:
+    //        A00 | A01 | A02
+    // mat  = A10 | A11 | A12
+    //        A20 | A21 | A22
+    // and performs the qr dec of [A11^T A12^T]^T
+    // and update [A21^T A22^T]^T using level 3 operations.
+    // Finally, the algorithm continue on A22
+
+    BlockType A11_21 = mat.block(k,k,brows,bs);
+    Block<HCoeffs,Dynamic,1> hCoeffsSegment = hCoeffs.segment(k,bs);
+
+    householder_qr_inplace_unblocked(A11_21, hCoeffsSegment, tempData);
+
+    if(tcols)
+    {
+      BlockType A21_22 = mat.block(k,k+bs,brows,tcols);
+      apply_block_householder_on_the_left(A21_22,A11_21,hCoeffsSegment.adjoint());
+    }
+  }
+}
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<HouseholderQR<_MatrixType>, Rhs>
+  : solve_retval_base<HouseholderQR<_MatrixType>, Rhs>
+{
+  EIGEN_MAKE_SOLVE_HELPERS(HouseholderQR<_MatrixType>,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    const Index rows = dec().rows(), cols = dec().cols();
+    const Index rank = (std::min)(rows, cols);
+    eigen_assert(rhs().rows() == rows);
+
+    typename Rhs::PlainObject c(rhs());
+
+    // Note that the matrix Q = H_0^* H_1^*... so its inverse is Q^* = (H_0 H_1 ...)^T
+    c.applyOnTheLeft(householderSequence(
+      dec().matrixQR().leftCols(rank),
+      dec().hCoeffs().head(rank)).transpose()
+    );
+
+    dec().matrixQR()
+       .topLeftCorner(rank, rank)
+       .template triangularView<Upper>()
+       .solveInPlace(c.topRows(rank));
+
+    dst.topRows(rank) = c.topRows(rank);
+    dst.bottomRows(cols-rank).setZero();
+  }
+};
+
+} // end namespace internal
+
+template<typename MatrixType>
+HouseholderQR<MatrixType>& HouseholderQR<MatrixType>::compute(const MatrixType& matrix)
+{
+  Index rows = matrix.rows();
+  Index cols = matrix.cols();
+  Index size = (std::min)(rows,cols);
+
+  m_qr = matrix;
+  m_hCoeffs.resize(size);
+
+  m_temp.resize(cols);
+
+  internal::householder_qr_inplace_blocked(m_qr, m_hCoeffs, 48, m_temp.data());
+
+  m_isInitialized = true;
+  return *this;
+}
+
+/** \return the Householder QR decomposition of \c *this.
+  *
+  * \sa class HouseholderQR
+  */
+template<typename Derived>
+const HouseholderQR<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::householderQr() const
+{
+  return HouseholderQR<PlainObject>(eval());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_QR_H
diff --git a/resources/3rdparty/eigen/Eigen/src/QR/HouseholderQR_MKL.h b/resources/3rdParty/eigen/Eigen/src/QR/HouseholderQR_MKL.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/QR/HouseholderQR_MKL.h
rename to resources/3rdParty/eigen/Eigen/src/QR/HouseholderQR_MKL.h
diff --git a/resources/3rdparty/eigen/Eigen/src/SVD/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/SVD/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/SVD/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/SVD/CMakeLists.txt
diff --git a/resources/3rdParty/eigen/Eigen/src/SVD/JacobiSVD.h b/resources/3rdParty/eigen/Eigen/src/SVD/JacobiSVD.h
new file mode 100644
index 000000000..a7dbf0737
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/SVD/JacobiSVD.h
@@ -0,0 +1,867 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_JACOBISVD_H
+#define EIGEN_JACOBISVD_H
+
+namespace Eigen { 
+
+namespace internal {
+// forward declaration (needed by ICC)
+// the empty body is required by MSVC
+template<typename MatrixType, int QRPreconditioner,
+         bool IsComplex = NumTraits<typename MatrixType::Scalar>::IsComplex>
+struct svd_precondition_2x2_block_to_be_real {};
+
+/*** QR preconditioners (R-SVD)
+ ***
+ *** Their role is to reduce the problem of computing the SVD to the case of a square matrix.
+ *** This approach, known as R-SVD, is an optimization for rectangular-enough matrices, and is a requirement for
+ *** JacobiSVD which by itself is only able to work on square matrices.
+ ***/
+
+enum { PreconditionIfMoreColsThanRows, PreconditionIfMoreRowsThanCols };
+
+template<typename MatrixType, int QRPreconditioner, int Case>
+struct qr_preconditioner_should_do_anything
+{
+  enum { a = MatrixType::RowsAtCompileTime != Dynamic &&
+             MatrixType::ColsAtCompileTime != Dynamic &&
+             MatrixType::ColsAtCompileTime <= MatrixType::RowsAtCompileTime,
+         b = MatrixType::RowsAtCompileTime != Dynamic &&
+             MatrixType::ColsAtCompileTime != Dynamic &&
+             MatrixType::RowsAtCompileTime <= MatrixType::ColsAtCompileTime,
+         ret = !( (QRPreconditioner == NoQRPreconditioner) ||
+                  (Case == PreconditionIfMoreColsThanRows && bool(a)) ||
+                  (Case == PreconditionIfMoreRowsThanCols && bool(b)) )
+  };
+};
+
+template<typename MatrixType, int QRPreconditioner, int Case,
+         bool DoAnything = qr_preconditioner_should_do_anything<MatrixType, QRPreconditioner, Case>::ret
+> struct qr_preconditioner_impl {};
+
+template<typename MatrixType, int QRPreconditioner, int Case>
+class qr_preconditioner_impl<MatrixType, QRPreconditioner, Case, false>
+{
+public:
+  typedef typename MatrixType::Index Index;
+  void allocate(const JacobiSVD<MatrixType, QRPreconditioner>&) {}
+  bool run(JacobiSVD<MatrixType, QRPreconditioner>&, const MatrixType&)
+  {
+    return false;
+  }
+};
+
+/*** preconditioner using FullPivHouseholderQR ***/
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
+{
+public:
+  typedef typename MatrixType::Index Index;
+  typedef typename MatrixType::Scalar Scalar;
+  enum
+  {
+    RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime
+  };
+  typedef Matrix<Scalar, 1, RowsAtCompileTime, RowMajor, 1, MaxRowsAtCompileTime> WorkspaceType;
+
+  void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
+  {
+    if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
+    {
+      m_qr = FullPivHouseholderQR<MatrixType>(svd.rows(), svd.cols());
+    }
+    if (svd.m_computeFullU) m_workspace.resize(svd.rows());
+  }
+
+  bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+  {
+    if(matrix.rows() > matrix.cols())
+    {
+      m_qr.compute(matrix);
+      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
+      if(svd.m_computeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace);
+      if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
+      return true;
+    }
+    return false;
+  }
+private:
+  FullPivHouseholderQR<MatrixType> m_qr;
+  WorkspaceType m_workspace;
+};
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
+{
+public:
+  typedef typename MatrixType::Index Index;
+  typedef typename MatrixType::Scalar Scalar;
+  enum
+  {
+    RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+    ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+    MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+    Options = MatrixType::Options
+  };
+  typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
+          TransposeTypeWithSameStorageOrder;
+
+  void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
+  {
+    if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
+    {
+      m_qr = FullPivHouseholderQR<TransposeTypeWithSameStorageOrder>(svd.cols(), svd.rows());
+    }
+    m_adjoint.resize(svd.cols(), svd.rows());
+    if (svd.m_computeFullV) m_workspace.resize(svd.cols());
+  }
+
+  bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+  {
+    if(matrix.cols() > matrix.rows())
+    {
+      m_adjoint = matrix.adjoint();
+      m_qr.compute(m_adjoint);
+      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
+      if(svd.m_computeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace);
+      if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
+      return true;
+    }
+    else return false;
+  }
+private:
+  FullPivHouseholderQR<TransposeTypeWithSameStorageOrder> m_qr;
+  TransposeTypeWithSameStorageOrder m_adjoint;
+  typename internal::plain_row_type<MatrixType>::type m_workspace;
+};
+
+/*** preconditioner using ColPivHouseholderQR ***/
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
+{
+public:
+  typedef typename MatrixType::Index Index;
+
+  void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
+  {
+    if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
+    {
+      m_qr = ColPivHouseholderQR<MatrixType>(svd.rows(), svd.cols());
+    }
+    if (svd.m_computeFullU) m_workspace.resize(svd.rows());
+    else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
+  }
+
+  bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+  {
+    if(matrix.rows() > matrix.cols())
+    {
+      m_qr.compute(matrix);
+      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
+      if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
+      else if(svd.m_computeThinU)
+      {
+        svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
+        m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
+      }
+      if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
+      return true;
+    }
+    return false;
+  }
+
+private:
+  ColPivHouseholderQR<MatrixType> m_qr;
+  typename internal::plain_col_type<MatrixType>::type m_workspace;
+};
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
+{
+public:
+  typedef typename MatrixType::Index Index;
+  typedef typename MatrixType::Scalar Scalar;
+  enum
+  {
+    RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+    ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+    MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+    Options = MatrixType::Options
+  };
+
+  typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
+          TransposeTypeWithSameStorageOrder;
+
+  void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
+  {
+    if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
+    {
+      m_qr = ColPivHouseholderQR<TransposeTypeWithSameStorageOrder>(svd.cols(), svd.rows());
+    }
+    if (svd.m_computeFullV) m_workspace.resize(svd.cols());
+    else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
+    m_adjoint.resize(svd.cols(), svd.rows());
+  }
+
+  bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+  {
+    if(matrix.cols() > matrix.rows())
+    {
+      m_adjoint = matrix.adjoint();
+      m_qr.compute(m_adjoint);
+
+      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
+      if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
+      else if(svd.m_computeThinV)
+      {
+        svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
+        m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
+      }
+      if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
+      return true;
+    }
+    else return false;
+  }
+
+private:
+  ColPivHouseholderQR<TransposeTypeWithSameStorageOrder> m_qr;
+  TransposeTypeWithSameStorageOrder m_adjoint;
+  typename internal::plain_row_type<MatrixType>::type m_workspace;
+};
+
+/*** preconditioner using HouseholderQR ***/
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
+{
+public:
+  typedef typename MatrixType::Index Index;
+
+  void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
+  {
+    if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
+    {
+      m_qr = HouseholderQR<MatrixType>(svd.rows(), svd.cols());
+    }
+    if (svd.m_computeFullU) m_workspace.resize(svd.rows());
+    else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
+  }
+
+  bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+  {
+    if(matrix.rows() > matrix.cols())
+    {
+      m_qr.compute(matrix);
+      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
+      if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
+      else if(svd.m_computeThinU)
+      {
+        svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
+        m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
+      }
+      if(svd.computeV()) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols());
+      return true;
+    }
+    return false;
+  }
+private:
+  HouseholderQR<MatrixType> m_qr;
+  typename internal::plain_col_type<MatrixType>::type m_workspace;
+};
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
+{
+public:
+  typedef typename MatrixType::Index Index;
+  typedef typename MatrixType::Scalar Scalar;
+  enum
+  {
+    RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+    ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+    MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+    Options = MatrixType::Options
+  };
+
+  typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
+          TransposeTypeWithSameStorageOrder;
+
+  void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
+  {
+    if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
+    {
+      m_qr = HouseholderQR<TransposeTypeWithSameStorageOrder>(svd.cols(), svd.rows());
+    }
+    if (svd.m_computeFullV) m_workspace.resize(svd.cols());
+    else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
+    m_adjoint.resize(svd.cols(), svd.rows());
+  }
+
+  bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+  {
+    if(matrix.cols() > matrix.rows())
+    {
+      m_adjoint = matrix.adjoint();
+      m_qr.compute(m_adjoint);
+
+      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
+      if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
+      else if(svd.m_computeThinV)
+      {
+        svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
+        m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
+      }
+      if(svd.computeU()) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows());
+      return true;
+    }
+    else return false;
+  }
+
+private:
+  HouseholderQR<TransposeTypeWithSameStorageOrder> m_qr;
+  TransposeTypeWithSameStorageOrder m_adjoint;
+  typename internal::plain_row_type<MatrixType>::type m_workspace;
+};
+
+/*** 2x2 SVD implementation
+ ***
+ *** JacobiSVD consists in performing a series of 2x2 SVD subproblems
+ ***/
+
+template<typename MatrixType, int QRPreconditioner>
+struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, false>
+{
+  typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
+  typedef typename SVD::Index Index;
+  static void run(typename SVD::WorkMatrixType&, SVD&, Index, Index) {}
+};
+
+template<typename MatrixType, int QRPreconditioner>
+struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
+{
+  typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename MatrixType::RealScalar RealScalar;
+  typedef typename SVD::Index Index;
+  static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q)
+  {
+    Scalar z;
+    JacobiRotation<Scalar> rot;
+    RealScalar n = sqrt(abs2(work_matrix.coeff(p,p)) + abs2(work_matrix.coeff(q,p)));
+    if(n==0)
+    {
+      z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
+      work_matrix.row(p) *= z;
+      if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
+      z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
+      work_matrix.row(q) *= z;
+      if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
+    }
+    else
+    {
+      rot.c() = conj(work_matrix.coeff(p,p)) / n;
+      rot.s() = work_matrix.coeff(q,p) / n;
+      work_matrix.applyOnTheLeft(p,q,rot);
+      if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint());
+      if(work_matrix.coeff(p,q) != Scalar(0))
+      {
+        Scalar z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
+        work_matrix.col(q) *= z;
+        if(svd.computeV()) svd.m_matrixV.col(q) *= z;
+      }
+      if(work_matrix.coeff(q,q) != Scalar(0))
+      {
+        z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
+        work_matrix.row(q) *= z;
+        if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
+      }
+    }
+  }
+};
+
+template<typename MatrixType, typename RealScalar, typename Index>
+void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
+                            JacobiRotation<RealScalar> *j_left,
+                            JacobiRotation<RealScalar> *j_right)
+{
+  Matrix<RealScalar,2,2> m;
+  m << real(matrix.coeff(p,p)), real(matrix.coeff(p,q)),
+       real(matrix.coeff(q,p)), real(matrix.coeff(q,q));
+  JacobiRotation<RealScalar> rot1;
+  RealScalar t = m.coeff(0,0) + m.coeff(1,1);
+  RealScalar d = m.coeff(1,0) - m.coeff(0,1);
+  if(t == RealScalar(0))
+  {
+    rot1.c() = RealScalar(0);
+    rot1.s() = d > RealScalar(0) ? RealScalar(1) : RealScalar(-1);
+  }
+  else
+  {
+    RealScalar u = d / t;
+    rot1.c() = RealScalar(1) / sqrt(RealScalar(1) + abs2(u));
+    rot1.s() = rot1.c() * u;
+  }
+  m.applyOnTheLeft(0,1,rot1);
+  j_right->makeJacobi(m,0,1);
+  *j_left  = rot1 * j_right->transpose();
+}
+
+} // end namespace internal
+
+/** \ingroup SVD_Module
+  *
+  *
+  * \class JacobiSVD
+  *
+  * \brief Two-sided Jacobi SVD decomposition of a rectangular matrix
+  *
+  * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
+  * \param QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally
+  *                        for the R-SVD step for non-square matrices. See discussion of possible values below.
+  *
+  * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
+  *   \f[ A = U S V^* \f]
+  * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal;
+  * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left
+  * and right \em singular \em vectors of \a A respectively.
+  *
+  * Singular values are always sorted in decreasing order.
+  *
+  * This JacobiSVD decomposition computes only the singular values by default. If you want \a U or \a V, you need to ask for them explicitly.
+  *
+  * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
+  * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
+  * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
+  * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
+  *
+  * Here's an example demonstrating basic usage:
+  * \include JacobiSVD_basic.cpp
+  * Output: \verbinclude JacobiSVD_basic.out
+  *
+  * This JacobiSVD class is a two-sided Jacobi R-SVD decomposition, ensuring optimal reliability and accuracy. The downside is that it's slower than
+  * bidiagonalizing SVD algorithms for large square matrices; however its complexity is still \f$ O(n^2p) \f$ where \a n is the smaller dimension and
+  * \a p is the greater dimension, meaning that it is still of the same order of complexity as the faster bidiagonalizing R-SVD algorithms.
+  * In particular, like any R-SVD, it takes advantage of non-squareness in that its complexity is only linear in the greater dimension.
+  *
+  * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
+  * terminate in finite (and reasonable) time.
+  *
+  * The possible values for QRPreconditioner are:
+  * \li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR.
+  * \li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR.
+  *     Contrary to other QRs, it doesn't allow computing thin unitaries.
+  * \li HouseholderQRPreconditioner is the fastest, and less safe and accurate than the pivoting variants. It uses non-pivoting QR.
+  *     This is very similar in safety and accuracy to the bidiagonalization process used by bidiagonalizing SVD algorithms (since bidiagonalization
+  *     is inherently non-pivoting). However the resulting SVD is still more reliable than bidiagonalizing SVDs because the Jacobi-based iterarive
+  *     process is more reliable than the optimized bidiagonal SVD iterations.
+  * \li NoQRPreconditioner allows not to use a QR preconditioner at all. This is useful if you know that you will only be computing
+  *     JacobiSVD decompositions of square matrices. Non-square matrices require a QR preconditioner. Using this option will result in
+  *     faster compilation and smaller executable code. It won't significantly speed up computation, since JacobiSVD is always checking
+  *     if QR preconditioning is needed before applying it anyway.
+  *
+  * \sa MatrixBase::jacobiSvd()
+  */
+template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
+{
+  public:
+
+    typedef _MatrixType MatrixType;
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+    typedef typename MatrixType::Index Index;
+    enum {
+      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
+      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+      MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
+      MatrixOptions = MatrixType::Options
+    };
+
+    typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime,
+                   MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime>
+            MatrixUType;
+    typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime,
+                   MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime>
+            MatrixVType;
+    typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
+    typedef typename internal::plain_row_type<MatrixType>::type RowType;
+    typedef typename internal::plain_col_type<MatrixType>::type ColType;
+    typedef Matrix<Scalar, DiagSizeAtCompileTime, DiagSizeAtCompileTime,
+                   MatrixOptions, MaxDiagSizeAtCompileTime, MaxDiagSizeAtCompileTime>
+            WorkMatrixType;
+
+    /** \brief Default Constructor.
+      *
+      * The default constructor is useful in cases in which the user intends to
+      * perform decompositions via JacobiSVD::compute(const MatrixType&).
+      */
+    JacobiSVD()
+      : m_isInitialized(false),
+        m_isAllocated(false),
+        m_computationOptions(0),
+        m_rows(-1), m_cols(-1)
+    {}
+
+
+    /** \brief Default Constructor with memory preallocation
+      *
+      * Like the default constructor but with preallocation of the internal data
+      * according to the specified problem size.
+      * \sa JacobiSVD()
+      */
+    JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0)
+      : m_isInitialized(false),
+        m_isAllocated(false),
+        m_computationOptions(0),
+        m_rows(-1), m_cols(-1)
+    {
+      allocate(rows, cols, computationOptions);
+    }
+
+    /** \brief Constructor performing the decomposition of given matrix.
+     *
+     * \param matrix the matrix to decompose
+     * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+     *                           By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
+     *                           #ComputeFullV, #ComputeThinV.
+     *
+     * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
+     * available with the (non-default) FullPivHouseholderQR preconditioner.
+     */
+    JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
+      : m_isInitialized(false),
+        m_isAllocated(false),
+        m_computationOptions(0),
+        m_rows(-1), m_cols(-1)
+    {
+      compute(matrix, computationOptions);
+    }
+
+    /** \brief Method performing the decomposition of given matrix using custom options.
+     *
+     * \param matrix the matrix to decompose
+     * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+     *                           By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
+     *                           #ComputeFullV, #ComputeThinV.
+     *
+     * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
+     * available with the (non-default) FullPivHouseholderQR preconditioner.
+     */
+    JacobiSVD& compute(const MatrixType& matrix, unsigned int computationOptions);
+
+    /** \brief Method performing the decomposition of given matrix using current options.
+     *
+     * \param matrix the matrix to decompose
+     *
+     * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
+     */
+    JacobiSVD& compute(const MatrixType& matrix)
+    {
+      return compute(matrix, m_computationOptions);
+    }
+
+    /** \returns the \a U matrix.
+     *
+     * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
+     * the U matrix is n-by-n if you asked for #ComputeFullU, and is n-by-m if you asked for #ComputeThinU.
+     *
+     * The \a m first columns of \a U are the left singular vectors of the matrix being decomposed.
+     *
+     * This method asserts that you asked for \a U to be computed.
+     */
+    const MatrixUType& matrixU() const
+    {
+      eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+      eigen_assert(computeU() && "This JacobiSVD decomposition didn't compute U. Did you ask for it?");
+      return m_matrixU;
+    }
+
+    /** \returns the \a V matrix.
+     *
+     * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
+     * the V matrix is p-by-p if you asked for #ComputeFullV, and is p-by-m if you asked for ComputeThinV.
+     *
+     * The \a m first columns of \a V are the right singular vectors of the matrix being decomposed.
+     *
+     * This method asserts that you asked for \a V to be computed.
+     */
+    const MatrixVType& matrixV() const
+    {
+      eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+      eigen_assert(computeV() && "This JacobiSVD decomposition didn't compute V. Did you ask for it?");
+      return m_matrixV;
+    }
+
+    /** \returns the vector of singular values.
+     *
+     * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, the
+     * returned vector has size \a m.  Singular values are always sorted in decreasing order.
+     */
+    const SingularValuesType& singularValues() const
+    {
+      eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+      return m_singularValues;
+    }
+
+    /** \returns true if \a U (full or thin) is asked for in this SVD decomposition */
+    inline bool computeU() const { return m_computeFullU || m_computeThinU; }
+    /** \returns true if \a V (full or thin) is asked for in this SVD decomposition */
+    inline bool computeV() const { return m_computeFullV || m_computeThinV; }
+
+    /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
+      *
+      * \param b the right-hand-side of the equation to solve.
+      *
+      * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V.
+      *
+      * \note SVD solving is implicitly least-squares. Thus, this method serves both purposes of exact solving and least-squares solving.
+      * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$.
+      */
+    template<typename Rhs>
+    inline const internal::solve_retval<JacobiSVD, Rhs>
+    solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+      eigen_assert(computeU() && computeV() && "JacobiSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
+      return internal::solve_retval<JacobiSVD, Rhs>(*this, b.derived());
+    }
+
+    /** \returns the number of singular values that are not exactly 0 */
+    Index nonzeroSingularValues() const
+    {
+      eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+      return m_nonzeroSingularValues;
+    }
+
+    inline Index rows() const { return m_rows; }
+    inline Index cols() const { return m_cols; }
+
+  private:
+    void allocate(Index rows, Index cols, unsigned int computationOptions);
+
+  protected:
+    MatrixUType m_matrixU;
+    MatrixVType m_matrixV;
+    SingularValuesType m_singularValues;
+    WorkMatrixType m_workMatrix;
+    bool m_isInitialized, m_isAllocated;
+    bool m_computeFullU, m_computeThinU;
+    bool m_computeFullV, m_computeThinV;
+    unsigned int m_computationOptions;
+    Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize;
+
+    template<typename __MatrixType, int _QRPreconditioner, bool _IsComplex>
+    friend struct internal::svd_precondition_2x2_block_to_be_real;
+    template<typename __MatrixType, int _QRPreconditioner, int _Case, bool _DoAnything>
+    friend struct internal::qr_preconditioner_impl;
+
+    internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreColsThanRows> m_qr_precond_morecols;
+    internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreRowsThanCols> m_qr_precond_morerows;
+};
+
+template<typename MatrixType, int QRPreconditioner>
+void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, unsigned int computationOptions)
+{
+  eigen_assert(rows >= 0 && cols >= 0);
+
+  if (m_isAllocated &&
+      rows == m_rows &&
+      cols == m_cols &&
+      computationOptions == m_computationOptions)
+  {
+    return;
+  }
+
+  m_rows = rows;
+  m_cols = cols;
+  m_isInitialized = false;
+  m_isAllocated = true;
+  m_computationOptions = computationOptions;
+  m_computeFullU = (computationOptions & ComputeFullU) != 0;
+  m_computeThinU = (computationOptions & ComputeThinU) != 0;
+  m_computeFullV = (computationOptions & ComputeFullV) != 0;
+  m_computeThinV = (computationOptions & ComputeThinV) != 0;
+  eigen_assert(!(m_computeFullU && m_computeThinU) && "JacobiSVD: you can't ask for both full and thin U");
+  eigen_assert(!(m_computeFullV && m_computeThinV) && "JacobiSVD: you can't ask for both full and thin V");
+  eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
+              "JacobiSVD: thin U and V are only available when your matrix has a dynamic number of columns.");
+  if (QRPreconditioner == FullPivHouseholderQRPreconditioner)
+  {
+      eigen_assert(!(m_computeThinU || m_computeThinV) &&
+              "JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. "
+              "Use the ColPivHouseholderQR preconditioner instead.");
+  }
+  m_diagSize = (std::min)(m_rows, m_cols);
+  m_singularValues.resize(m_diagSize);
+  m_matrixU.resize(m_rows, m_computeFullU ? m_rows
+                          : m_computeThinU ? m_diagSize
+                          : 0);
+  m_matrixV.resize(m_cols, m_computeFullV ? m_cols
+                          : m_computeThinV ? m_diagSize
+                          : 0);
+  m_workMatrix.resize(m_diagSize, m_diagSize);
+  
+  if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this);
+  if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this);
+}
+
+template<typename MatrixType, int QRPreconditioner>
+JacobiSVD<MatrixType, QRPreconditioner>&
+JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
+{
+  allocate(matrix.rows(), matrix.cols(), computationOptions);
+
+  // currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations,
+  // only worsening the precision of U and V as we accumulate more rotations
+  const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();
+
+  // limit for very small denormal numbers to be considered zero in order to avoid infinite loops (see bug 286)
+  const RealScalar considerAsZero = RealScalar(2) * std::numeric_limits<RealScalar>::denorm_min();
+
+  /*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */
+
+  if(!m_qr_precond_morecols.run(*this, matrix) && !m_qr_precond_morerows.run(*this, matrix))
+  {
+    m_workMatrix = matrix.block(0,0,m_diagSize,m_diagSize);
+    if(m_computeFullU) m_matrixU.setIdentity(m_rows,m_rows);
+    if(m_computeThinU) m_matrixU.setIdentity(m_rows,m_diagSize);
+    if(m_computeFullV) m_matrixV.setIdentity(m_cols,m_cols);
+    if(m_computeThinV) m_matrixV.setIdentity(m_cols, m_diagSize);
+  }
+
+  /*** step 2. The main Jacobi SVD iteration. ***/
+
+  bool finished = false;
+  while(!finished)
+  {
+    finished = true;
+
+    // do a sweep: for all index pairs (p,q), perform SVD of the corresponding 2x2 sub-matrix
+
+    for(Index p = 1; p < m_diagSize; ++p)
+    {
+      for(Index q = 0; q < p; ++q)
+      {
+        // if this 2x2 sub-matrix is not diagonal already...
+        // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
+        // keep us iterating forever. Similarly, small denormal numbers are considered zero.
+        using std::max;
+        RealScalar threshold = (max)(considerAsZero, precision * (max)(internal::abs(m_workMatrix.coeff(p,p)),
+                                                                       internal::abs(m_workMatrix.coeff(q,q))));
+        if((max)(internal::abs(m_workMatrix.coeff(p,q)),internal::abs(m_workMatrix.coeff(q,p))) > threshold)
+        {
+          finished = false;
+
+          // perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal
+          internal::svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner>::run(m_workMatrix, *this, p, q);
+          JacobiRotation<RealScalar> j_left, j_right;
+          internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right);
+
+          // accumulate resulting Jacobi rotations
+          m_workMatrix.applyOnTheLeft(p,q,j_left);
+          if(computeU()) m_matrixU.applyOnTheRight(p,q,j_left.transpose());
+
+          m_workMatrix.applyOnTheRight(p,q,j_right);
+          if(computeV()) m_matrixV.applyOnTheRight(p,q,j_right);
+        }
+      }
+    }
+  }
+
+  /*** step 3. The work matrix is now diagonal, so ensure it's positive so its diagonal entries are the singular values ***/
+
+  for(Index i = 0; i < m_diagSize; ++i)
+  {
+    RealScalar a = internal::abs(m_workMatrix.coeff(i,i));
+    m_singularValues.coeffRef(i) = a;
+    if(computeU() && (a!=RealScalar(0))) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a;
+  }
+
+  /*** step 4. Sort singular values in descending order and compute the number of nonzero singular values ***/
+
+  m_nonzeroSingularValues = m_diagSize;
+  for(Index i = 0; i < m_diagSize; i++)
+  {
+    Index pos;
+    RealScalar maxRemainingSingularValue = m_singularValues.tail(m_diagSize-i).maxCoeff(&pos);
+    if(maxRemainingSingularValue == RealScalar(0))
+    {
+      m_nonzeroSingularValues = i;
+      break;
+    }
+    if(pos)
+    {
+      pos += i;
+      std::swap(m_singularValues.coeffRef(i), m_singularValues.coeffRef(pos));
+      if(computeU()) m_matrixU.col(pos).swap(m_matrixU.col(i));
+      if(computeV()) m_matrixV.col(pos).swap(m_matrixV.col(i));
+    }
+  }
+
+  m_isInitialized = true;
+  return *this;
+}
+
+namespace internal {
+template<typename _MatrixType, int QRPreconditioner, typename Rhs>
+struct solve_retval<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
+  : solve_retval_base<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
+{
+  typedef JacobiSVD<_MatrixType, QRPreconditioner> JacobiSVDType;
+  EIGEN_MAKE_SOLVE_HELPERS(JacobiSVDType,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    eigen_assert(rhs().rows() == dec().rows());
+
+    // A = U S V^*
+    // So A^{-1} = V S^{-1} U^*
+
+    Index diagSize = (std::min)(dec().rows(), dec().cols());
+    typename JacobiSVDType::SingularValuesType invertedSingVals(diagSize);
+
+    Index nonzeroSingVals = dec().nonzeroSingularValues();
+    invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse();
+    invertedSingVals.tail(diagSize - nonzeroSingVals).setZero();
+
+    dst = dec().matrixV().leftCols(diagSize)
+        * invertedSingVals.asDiagonal()
+        * dec().matrixU().leftCols(diagSize).adjoint()
+        * rhs();
+  }
+};
+} // end namespace internal
+
+/** \svd_module
+  *
+  * \return the singular value decomposition of \c *this computed by two-sided
+  * Jacobi transformations.
+  *
+  * \sa class JacobiSVD
+  */
+template<typename Derived>
+JacobiSVD<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::jacobiSvd(unsigned int computationOptions) const
+{
+  return JacobiSVD<PlainObject>(*this, computationOptions);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_JACOBISVD_H
diff --git a/resources/3rdparty/eigen/Eigen/src/SVD/JacobiSVD_MKL.h b/resources/3rdParty/eigen/Eigen/src/SVD/JacobiSVD_MKL.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/SVD/JacobiSVD_MKL.h
rename to resources/3rdParty/eigen/Eigen/src/SVD/JacobiSVD_MKL.h
diff --git a/resources/3rdparty/eigen/Eigen/src/SVD/UpperBidiagonalization.h b/resources/3rdParty/eigen/Eigen/src/SVD/UpperBidiagonalization.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/SVD/UpperBidiagonalization.h
rename to resources/3rdParty/eigen/Eigen/src/SVD/UpperBidiagonalization.h
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCholesky/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/SparseCholesky/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/SparseCholesky/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/SparseCholesky/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h b/resources/3rdParty/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h
rename to resources/3rdParty/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/AmbiVector.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/AmbiVector.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/SparseCore/AmbiVector.h
rename to resources/3rdParty/eigen/Eigen/src/SparseCore/AmbiVector.h
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/SparseCore/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/SparseCore/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/SparseCore/CMakeLists.txt
diff --git a/resources/3rdParty/eigen/Eigen/src/SparseCore/CompressedStorage.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/CompressedStorage.h
new file mode 100644
index 000000000..85a998aff
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/SparseCore/CompressedStorage.h
@@ -0,0 +1,233 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_COMPRESSED_STORAGE_H
+#define EIGEN_COMPRESSED_STORAGE_H
+
+namespace Eigen { 
+
+namespace internal {
+
+/** \internal
+  * Stores a sparse set of values as a list of values and a list of indices.
+  *
+  */
+template<typename _Scalar,typename _Index>
+class CompressedStorage
+{
+  public:
+
+    typedef _Scalar Scalar;
+    typedef _Index Index;
+
+  protected:
+
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+
+  public:
+
+    CompressedStorage()
+      : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
+    {}
+
+    CompressedStorage(size_t size)
+      : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
+    {
+      resize(size);
+    }
+
+    CompressedStorage(const CompressedStorage& other)
+      : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
+    {
+      *this = other;
+    }
+
+    CompressedStorage& operator=(const CompressedStorage& other)
+    {
+      resize(other.size());
+      memcpy(m_values, other.m_values, m_size * sizeof(Scalar));
+      memcpy(m_indices, other.m_indices, m_size * sizeof(Index));
+      return *this;
+    }
+
+    void swap(CompressedStorage& other)
+    {
+      std::swap(m_values, other.m_values);
+      std::swap(m_indices, other.m_indices);
+      std::swap(m_size, other.m_size);
+      std::swap(m_allocatedSize, other.m_allocatedSize);
+    }
+
+    ~CompressedStorage()
+    {
+      delete[] m_values;
+      delete[] m_indices;
+    }
+
+    void reserve(size_t size)
+    {
+      size_t newAllocatedSize = m_size + size;
+      if (newAllocatedSize > m_allocatedSize)
+        reallocate(newAllocatedSize);
+    }
+
+    void squeeze()
+    {
+      if (m_allocatedSize>m_size)
+        reallocate(m_size);
+    }
+
+    void resize(size_t size, float reserveSizeFactor = 0)
+    {
+      if (m_allocatedSize<size)
+        reallocate(size + size_t(reserveSizeFactor*size));
+      m_size = size;
+    }
+
+    void append(const Scalar& v, Index i)
+    {
+      Index id = static_cast<Index>(m_size);
+      resize(m_size+1, 1);
+      m_values[id] = v;
+      m_indices[id] = i;
+    }
+
+    inline size_t size() const { return m_size; }
+    inline size_t allocatedSize() const { return m_allocatedSize; }
+    inline void clear() { m_size = 0; }
+
+    inline Scalar& value(size_t i) { return m_values[i]; }
+    inline const Scalar& value(size_t i) const { return m_values[i]; }
+
+    inline Index& index(size_t i) { return m_indices[i]; }
+    inline const Index& index(size_t i) const { return m_indices[i]; }
+
+    static CompressedStorage Map(Index* indices, Scalar* values, size_t size)
+    {
+      CompressedStorage res;
+      res.m_indices = indices;
+      res.m_values = values;
+      res.m_allocatedSize = res.m_size = size;
+      return res;
+    }
+
+    /** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */
+    inline Index searchLowerIndex(Index key) const
+    {
+      return searchLowerIndex(0, m_size, key);
+    }
+
+    /** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */
+    inline Index searchLowerIndex(size_t start, size_t end, Index key) const
+    {
+      while(end>start)
+      {
+        size_t mid = (end+start)>>1;
+        if (m_indices[mid]<key)
+          start = mid+1;
+        else
+          end = mid;
+      }
+      return static_cast<Index>(start);
+    }
+
+    /** \returns the stored value at index \a key
+      * If the value does not exist, then the value \a defaultValue is returned without any insertion. */
+    inline Scalar at(Index key, Scalar defaultValue = Scalar(0)) const
+    {
+      if (m_size==0)
+        return defaultValue;
+      else if (key==m_indices[m_size-1])
+        return m_values[m_size-1];
+      // ^^  optimization: let's first check if it is the last coefficient
+      // (very common in high level algorithms)
+      const size_t id = searchLowerIndex(0,m_size-1,key);
+      return ((id<m_size) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
+    }
+
+    /** Like at(), but the search is performed in the range [start,end) */
+    inline Scalar atInRange(size_t start, size_t end, Index key, Scalar defaultValue = Scalar(0)) const
+    {
+      if (start>=end)
+        return Scalar(0);
+      else if (end>start && key==m_indices[end-1])
+        return m_values[end-1];
+      // ^^  optimization: let's first check if it is the last coefficient
+      // (very common in high level algorithms)
+      const size_t id = searchLowerIndex(start,end-1,key);
+      return ((id<end) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
+    }
+
+    /** \returns a reference to the value at index \a key
+      * If the value does not exist, then the value \a defaultValue is inserted
+      * such that the keys are sorted. */
+    inline Scalar& atWithInsertion(Index key, Scalar defaultValue = Scalar(0))
+    {
+      size_t id = searchLowerIndex(0,m_size,key);
+      if (id>=m_size || m_indices[id]!=key)
+      {
+        resize(m_size+1,1);
+        for (size_t j=m_size-1; j>id; --j)
+        {
+          m_indices[j] = m_indices[j-1];
+          m_values[j] = m_values[j-1];
+        }
+        m_indices[id] = key;
+        m_values[id] = defaultValue;
+      }
+      return m_values[id];
+    }
+
+    void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+    {
+      size_t k = 0;
+      size_t n = size();
+      for (size_t i=0; i<n; ++i)
+      {
+        if (!internal::isMuchSmallerThan(value(i), reference, epsilon))
+        {
+          value(k) = value(i);
+          index(k) = index(i);
+          ++k;
+        }
+      }
+      resize(k,0);
+    }
+
+  protected:
+
+    inline void reallocate(size_t size)
+    {
+      Scalar* newValues  = new Scalar[size];
+      Index* newIndices = new Index[size];
+      size_t copySize = (std::min)(size, m_size);
+      // copy
+      internal::smart_copy(m_values, m_values+copySize, newValues);
+      internal::smart_copy(m_indices, m_indices+copySize, newIndices);
+      // delete old stuff
+      delete[] m_values;
+      delete[] m_indices;
+      m_values = newValues;
+      m_indices = newIndices;
+      m_allocatedSize = size;
+    }
+
+  protected:
+    Scalar* m_values;
+    Index* m_indices;
+    size_t m_size;
+    size_t m_allocatedSize;
+
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_COMPRESSED_STORAGE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
rename to resources/3rdParty/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/CoreIterators.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/CoreIterators.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/SparseCore/CoreIterators.h
rename to resources/3rdParty/eigen/Eigen/src/SparseCore/CoreIterators.h
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h
rename to resources/3rdParty/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseAssign.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseAssign.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/SparseCore/SparseAssign.h
rename to resources/3rdParty/eigen/Eigen/src/SparseCore/SparseAssign.h
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseBlock.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseBlock.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/SparseCore/SparseBlock.h
rename to resources/3rdParty/eigen/Eigen/src/SparseCore/SparseBlock.h
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
rename to resources/3rdParty/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
rename to resources/3rdParty/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
diff --git a/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseDenseProduct.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseDenseProduct.h
new file mode 100644
index 000000000..6f32940d6
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseDenseProduct.h
@@ -0,0 +1,300 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEDENSEPRODUCT_H
+#define EIGEN_SPARSEDENSEPRODUCT_H
+
+namespace Eigen { 
+
+template<typename Lhs, typename Rhs, int InnerSize> struct SparseDenseProductReturnType
+{
+  typedef SparseTimeDenseProduct<Lhs,Rhs> Type;
+};
+
+template<typename Lhs, typename Rhs> struct SparseDenseProductReturnType<Lhs,Rhs,1>
+{
+  typedef SparseDenseOuterProduct<Lhs,Rhs,false> Type;
+};
+
+template<typename Lhs, typename Rhs, int InnerSize> struct DenseSparseProductReturnType
+{
+  typedef DenseTimeSparseProduct<Lhs,Rhs> Type;
+};
+
+template<typename Lhs, typename Rhs> struct DenseSparseProductReturnType<Lhs,Rhs,1>
+{
+  typedef SparseDenseOuterProduct<Rhs,Lhs,true> Type;
+};
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, bool Tr>
+struct traits<SparseDenseOuterProduct<Lhs,Rhs,Tr> >
+{
+  typedef Sparse StorageKind;
+  typedef typename scalar_product_traits<typename traits<Lhs>::Scalar,
+                                            typename traits<Rhs>::Scalar>::ReturnType Scalar;
+  typedef typename Lhs::Index Index;
+  typedef typename Lhs::Nested LhsNested;
+  typedef typename Rhs::Nested RhsNested;
+  typedef typename remove_all<LhsNested>::type _LhsNested;
+  typedef typename remove_all<RhsNested>::type _RhsNested;
+
+  enum {
+    LhsCoeffReadCost = traits<_LhsNested>::CoeffReadCost,
+    RhsCoeffReadCost = traits<_RhsNested>::CoeffReadCost,
+
+    RowsAtCompileTime    = Tr ? int(traits<Rhs>::RowsAtCompileTime)     : int(traits<Lhs>::RowsAtCompileTime),
+    ColsAtCompileTime    = Tr ? int(traits<Lhs>::ColsAtCompileTime)     : int(traits<Rhs>::ColsAtCompileTime),
+    MaxRowsAtCompileTime = Tr ? int(traits<Rhs>::MaxRowsAtCompileTime)  : int(traits<Lhs>::MaxRowsAtCompileTime),
+    MaxColsAtCompileTime = Tr ? int(traits<Lhs>::MaxColsAtCompileTime)  : int(traits<Rhs>::MaxColsAtCompileTime),
+
+    Flags = Tr ? RowMajorBit : 0,
+
+    CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + NumTraits<Scalar>::MulCost
+  };
+};
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs, bool Tr>
+class SparseDenseOuterProduct
+ : public SparseMatrixBase<SparseDenseOuterProduct<Lhs,Rhs,Tr> >
+{
+  public:
+
+    typedef SparseMatrixBase<SparseDenseOuterProduct> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(SparseDenseOuterProduct)
+    typedef internal::traits<SparseDenseOuterProduct> Traits;
+
+  private:
+
+    typedef typename Traits::LhsNested LhsNested;
+    typedef typename Traits::RhsNested RhsNested;
+    typedef typename Traits::_LhsNested _LhsNested;
+    typedef typename Traits::_RhsNested _RhsNested;
+
+  public:
+
+    class InnerIterator;
+
+    EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Lhs& lhs, const Rhs& rhs)
+      : m_lhs(lhs), m_rhs(rhs)
+    {
+      EIGEN_STATIC_ASSERT(!Tr,YOU_MADE_A_PROGRAMMING_MISTAKE);
+    }
+
+    EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Rhs& rhs, const Lhs& lhs)
+      : m_lhs(lhs), m_rhs(rhs)
+    {
+      EIGEN_STATIC_ASSERT(Tr,YOU_MADE_A_PROGRAMMING_MISTAKE);
+    }
+
+    EIGEN_STRONG_INLINE Index rows() const { return Tr ? m_rhs.rows() : m_lhs.rows(); }
+    EIGEN_STRONG_INLINE Index cols() const { return Tr ? m_lhs.cols() : m_rhs.cols(); }
+
+    EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+    EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+
+  protected:
+    LhsNested m_lhs;
+    RhsNested m_rhs;
+};
+
+template<typename Lhs, typename Rhs, bool Transpose>
+class SparseDenseOuterProduct<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNested::InnerIterator
+{
+    typedef typename _LhsNested::InnerIterator Base;
+  public:
+    EIGEN_STRONG_INLINE InnerIterator(const SparseDenseOuterProduct& prod, Index outer)
+      : Base(prod.lhs(), 0), m_outer(outer), m_factor(prod.rhs().coeff(outer))
+    {
+    }
+
+    inline Index outer() const { return m_outer; }
+    inline Index row() const { return Transpose ? Base::row() : m_outer; }
+    inline Index col() const { return Transpose ? m_outer : Base::row(); }
+
+    inline Scalar value() const { return Base::value() * m_factor; }
+
+  protected:
+    int m_outer;
+    Scalar m_factor;
+};
+
+namespace internal {
+template<typename Lhs, typename Rhs>
+struct traits<SparseTimeDenseProduct<Lhs,Rhs> >
+ : traits<ProductBase<SparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs> >
+{
+  typedef Dense StorageKind;
+  typedef MatrixXpr XprKind;
+};
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,
+         int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor,
+         bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1>
+struct sparse_time_dense_product_impl;
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, RowMajor, true>
+{
+  typedef typename internal::remove_all<SparseLhsType>::type Lhs;
+  typedef typename internal::remove_all<DenseRhsType>::type Rhs;
+  typedef typename internal::remove_all<DenseResType>::type Res;
+  typedef typename Lhs::Index Index;
+  typedef typename Lhs::InnerIterator LhsInnerIterator;
+  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha)
+  {
+    for(Index c=0; c<rhs.cols(); ++c)
+    {
+      int n = lhs.outerSize();
+      for(Index j=0; j<n; ++j)
+      {
+        typename Res::Scalar tmp(0);
+        for(LhsInnerIterator it(lhs,j); it ;++it)
+          tmp += it.value() * rhs.coeff(it.index(),c);
+        res.coeffRef(j,c) = alpha * tmp;
+      }
+    }
+  }
+};
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, ColMajor, true>
+{
+  typedef typename internal::remove_all<SparseLhsType>::type Lhs;
+  typedef typename internal::remove_all<DenseRhsType>::type Rhs;
+  typedef typename internal::remove_all<DenseResType>::type Res;
+  typedef typename Lhs::InnerIterator LhsInnerIterator;
+  typedef typename Lhs::Index Index;
+  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha)
+  {
+    for(Index c=0; c<rhs.cols(); ++c)
+    {
+      for(Index j=0; j<lhs.outerSize(); ++j)
+      {
+        typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c);
+        for(LhsInnerIterator it(lhs,j); it ;++it)
+          res.coeffRef(it.index(),c) += it.value() * rhs_j;
+      }
+    }
+  }
+};
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, RowMajor, false>
+{
+  typedef typename internal::remove_all<SparseLhsType>::type Lhs;
+  typedef typename internal::remove_all<DenseRhsType>::type Rhs;
+  typedef typename internal::remove_all<DenseResType>::type Res;
+  typedef typename Lhs::InnerIterator LhsInnerIterator;
+  typedef typename Lhs::Index Index;
+  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha)
+  {
+    for(Index j=0; j<lhs.outerSize(); ++j)
+    {
+      typename Res::RowXpr res_j(res.row(j));
+      for(LhsInnerIterator it(lhs,j); it ;++it)
+        res_j += (alpha*it.value()) * rhs.row(it.index());
+    }
+  }
+};
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, ColMajor, false>
+{
+  typedef typename internal::remove_all<SparseLhsType>::type Lhs;
+  typedef typename internal::remove_all<DenseRhsType>::type Rhs;
+  typedef typename internal::remove_all<DenseResType>::type Res;
+  typedef typename Lhs::InnerIterator LhsInnerIterator;
+  typedef typename Lhs::Index Index;
+  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha)
+  {
+    for(Index j=0; j<lhs.outerSize(); ++j)
+    {
+      typename Rhs::ConstRowXpr rhs_j(rhs.row(j));
+      for(LhsInnerIterator it(lhs,j); it ;++it)
+        res.row(it.index()) += (alpha*it.value()) * rhs_j;
+    }
+  }
+};
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType>
+inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
+{
+  sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType>::run(lhs, rhs, res, alpha);
+}
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class SparseTimeDenseProduct
+  : public ProductBase<SparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs>
+{
+  public:
+    EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseTimeDenseProduct)
+
+    SparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+    {}
+
+    template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+    {
+      internal::sparse_time_dense_product(m_lhs, m_rhs, dest, alpha);
+    }
+
+  private:
+    SparseTimeDenseProduct& operator=(const SparseTimeDenseProduct&);
+};
+
+
+// dense = dense * sparse
+namespace internal {
+template<typename Lhs, typename Rhs>
+struct traits<DenseTimeSparseProduct<Lhs,Rhs> >
+ : traits<ProductBase<DenseTimeSparseProduct<Lhs,Rhs>, Lhs, Rhs> >
+{
+  typedef Dense StorageKind;
+};
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class DenseTimeSparseProduct
+  : public ProductBase<DenseTimeSparseProduct<Lhs,Rhs>, Lhs, Rhs>
+{
+  public:
+    EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseProduct)
+
+    DenseTimeSparseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+    {}
+
+    template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+    {
+      Transpose<const _LhsNested> lhs_t(m_lhs);
+      Transpose<const _RhsNested> rhs_t(m_rhs);
+      Transpose<Dest> dest_t(dest);
+      internal::sparse_time_dense_product(rhs_t, lhs_t, dest_t, alpha);
+    }
+
+  private:
+    DenseTimeSparseProduct& operator=(const DenseTimeSparseProduct&);
+};
+
+// sparse * dense
+template<typename Derived>
+template<typename OtherDerived>
+inline const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
+SparseMatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
+{
+  return typename SparseDenseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEDENSEPRODUCT_H
diff --git a/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h
new file mode 100644
index 000000000..ccba02124
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h
@@ -0,0 +1,192 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_DIAGONAL_PRODUCT_H
+#define EIGEN_SPARSE_DIAGONAL_PRODUCT_H
+
+namespace Eigen { 
+
+// The product of a diagonal matrix with a sparse matrix can be easily
+// implemented using expression template.
+// We have two consider very different cases:
+// 1 - diag * row-major sparse
+//     => each inner vector <=> scalar * sparse vector product
+//     => so we can reuse CwiseUnaryOp::InnerIterator
+// 2 - diag * col-major sparse
+//     => each inner vector <=> densevector * sparse vector cwise product
+//     => again, we can reuse specialization of CwiseBinaryOp::InnerIterator
+//        for that particular case
+// The two other cases are symmetric.
+
+namespace internal {
+
+template<typename Lhs, typename Rhs>
+struct traits<SparseDiagonalProduct<Lhs, Rhs> >
+{
+  typedef typename remove_all<Lhs>::type _Lhs;
+  typedef typename remove_all<Rhs>::type _Rhs;
+  typedef typename _Lhs::Scalar Scalar;
+  typedef typename promote_index_type<typename traits<Lhs>::Index,
+                                         typename traits<Rhs>::Index>::type Index;
+  typedef Sparse StorageKind;
+  typedef MatrixXpr XprKind;
+  enum {
+    RowsAtCompileTime = _Lhs::RowsAtCompileTime,
+    ColsAtCompileTime = _Rhs::ColsAtCompileTime,
+
+    MaxRowsAtCompileTime = _Lhs::MaxRowsAtCompileTime,
+    MaxColsAtCompileTime = _Rhs::MaxColsAtCompileTime,
+
+    SparseFlags = is_diagonal<_Lhs>::ret ? int(_Rhs::Flags) : int(_Lhs::Flags),
+    Flags = (SparseFlags&RowMajorBit),
+    CoeffReadCost = Dynamic
+  };
+};
+
+enum {SDP_IsDiagonal, SDP_IsSparseRowMajor, SDP_IsSparseColMajor};
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType, int RhsMode, int LhsMode>
+class sparse_diagonal_product_inner_iterator_selector;
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class SparseDiagonalProduct
+  : public SparseMatrixBase<SparseDiagonalProduct<Lhs,Rhs> >,
+    internal::no_assignment_operator
+{
+    typedef typename Lhs::Nested LhsNested;
+    typedef typename Rhs::Nested RhsNested;
+
+    typedef typename internal::remove_all<LhsNested>::type _LhsNested;
+    typedef typename internal::remove_all<RhsNested>::type _RhsNested;
+
+    enum {
+      LhsMode = internal::is_diagonal<_LhsNested>::ret ? internal::SDP_IsDiagonal
+              : (_LhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor,
+      RhsMode = internal::is_diagonal<_RhsNested>::ret ? internal::SDP_IsDiagonal
+              : (_RhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor
+    };
+
+  public:
+
+    EIGEN_SPARSE_PUBLIC_INTERFACE(SparseDiagonalProduct)
+
+    typedef internal::sparse_diagonal_product_inner_iterator_selector
+                <_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator;
+
+    EIGEN_STRONG_INLINE SparseDiagonalProduct(const Lhs& lhs, const Rhs& rhs)
+      : m_lhs(lhs), m_rhs(rhs)
+    {
+      eigen_assert(lhs.cols() == rhs.rows() && "invalid sparse matrix * diagonal matrix product");
+    }
+
+    EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
+    EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
+
+    EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+    EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+
+  protected:
+    LhsNested m_lhs;
+    RhsNested m_rhs;
+};
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseRowMajor>
+  : public CwiseUnaryOp<scalar_multiple_op<typename Lhs::Scalar>,const Rhs>::InnerIterator
+{
+    typedef typename CwiseUnaryOp<scalar_multiple_op<typename Lhs::Scalar>,const Rhs>::InnerIterator Base;
+    typedef typename Lhs::Index Index;
+  public:
+    inline sparse_diagonal_product_inner_iterator_selector(
+              const SparseDiagonalProductType& expr, Index outer)
+      : Base(expr.rhs()*(expr.lhs().diagonal().coeff(outer)), outer)
+    {}
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseColMajor>
+  : public CwiseBinaryOp<
+      scalar_product_op<typename Lhs::Scalar>,
+      SparseInnerVectorSet<Rhs,1>,
+      typename Lhs::DiagonalVectorType>::InnerIterator
+{
+    typedef typename CwiseBinaryOp<
+      scalar_product_op<typename Lhs::Scalar>,
+      SparseInnerVectorSet<Rhs,1>,
+      typename Lhs::DiagonalVectorType>::InnerIterator Base;
+    typedef typename Lhs::Index Index;
+    Index m_outer;
+  public:
+    inline sparse_diagonal_product_inner_iterator_selector(
+              const SparseDiagonalProductType& expr, Index outer)
+      : Base(expr.rhs().innerVector(outer) .cwiseProduct(expr.lhs().diagonal()), 0), m_outer(outer)
+    {}
+    
+    inline Index outer() const { return m_outer; }
+    inline Index col() const { return m_outer; }
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseColMajor,SDP_IsDiagonal>
+  : public CwiseUnaryOp<scalar_multiple_op<typename Rhs::Scalar>,const Lhs>::InnerIterator
+{
+    typedef typename CwiseUnaryOp<scalar_multiple_op<typename Rhs::Scalar>,const Lhs>::InnerIterator Base;
+    typedef typename Lhs::Index Index;
+  public:
+    inline sparse_diagonal_product_inner_iterator_selector(
+              const SparseDiagonalProductType& expr, Index outer)
+      : Base(expr.lhs()*expr.rhs().diagonal().coeff(outer), outer)
+    {}
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseRowMajor,SDP_IsDiagonal>
+  : public CwiseBinaryOp<
+      scalar_product_op<typename Rhs::Scalar>,
+      SparseInnerVectorSet<Lhs,1>,
+      Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator
+{
+    typedef typename CwiseBinaryOp<
+      scalar_product_op<typename Rhs::Scalar>,
+      SparseInnerVectorSet<Lhs,1>,
+      Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator Base;
+    typedef typename Lhs::Index Index;
+    Index m_outer;
+  public:
+    inline sparse_diagonal_product_inner_iterator_selector(
+              const SparseDiagonalProductType& expr, Index outer)
+      : Base(expr.lhs().innerVector(outer) .cwiseProduct(expr.rhs().diagonal().transpose()), 0), m_outer(outer)
+    {}
+    
+    inline Index outer() const { return m_outer; }
+    inline Index row() const { return m_outer; }
+};
+
+} // end namespace internal
+
+// SparseMatrixBase functions
+
+template<typename Derived>
+template<typename OtherDerived>
+const SparseDiagonalProduct<Derived,OtherDerived>
+SparseMatrixBase<Derived>::operator*(const DiagonalBase<OtherDerived> &other) const
+{
+  return SparseDiagonalProduct<Derived,OtherDerived>(this->derived(), other.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_DIAGONAL_PRODUCT_H
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseDot.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseDot.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/SparseCore/SparseDot.h
rename to resources/3rdParty/eigen/Eigen/src/SparseCore/SparseDot.h
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseFuzzy.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseFuzzy.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/SparseCore/SparseFuzzy.h
rename to resources/3rdParty/eigen/Eigen/src/SparseCore/SparseFuzzy.h
diff --git a/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseMatrix.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseMatrix.h
new file mode 100644
index 000000000..fc3749b5f
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseMatrix.h
@@ -0,0 +1,1134 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEMATRIX_H
+#define EIGEN_SPARSEMATRIX_H
+
+namespace Eigen { 
+
+/** \ingroup SparseCore_Module
+  *
+  * \class SparseMatrix
+  *
+  * \brief A versatible sparse matrix representation
+  *
+  * This class implements a more versatile variants of the common \em compressed row/column storage format.
+  * Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
+  * All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
+  * space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
+  * can be done with limited memory reallocation and copies.
+  *
+  * A call to the function makeCompressed() turns the matrix into the standard \em compressed format
+  * compatible with many library.
+  *
+  * More details on this storage sceheme are given in the \ref TutorialSparse "manual pages".
+  *
+  * \tparam _Scalar the scalar type, i.e. the type of the coefficients
+  * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
+  *                 is RowMajor. The default is 0 which means column-major.
+  * \tparam _Index the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
+  *
+  * This class can be extended with the help of the plugin mechanism described on the page
+  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
+  */
+
+namespace internal {
+template<typename _Scalar, int _Options, typename _Index>
+struct traits<SparseMatrix<_Scalar, _Options, _Index> >
+{
+  typedef _Scalar Scalar;
+  typedef _Index Index;
+  typedef Sparse StorageKind;
+  typedef MatrixXpr XprKind;
+  enum {
+    RowsAtCompileTime = Dynamic,
+    ColsAtCompileTime = Dynamic,
+    MaxRowsAtCompileTime = Dynamic,
+    MaxColsAtCompileTime = Dynamic,
+    Flags = _Options | NestByRefBit | LvalueBit,
+    CoeffReadCost = NumTraits<Scalar>::ReadCost,
+    SupportedAccessPatterns = InnerRandomAccessPattern
+  };
+};
+
+template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
+struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
+{
+  typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
+  typedef typename nested<MatrixType>::type MatrixTypeNested;
+  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
+
+  typedef _Scalar Scalar;
+  typedef Dense StorageKind;
+  typedef _Index Index;
+  typedef MatrixXpr XprKind;
+
+  enum {
+    RowsAtCompileTime = Dynamic,
+    ColsAtCompileTime = 1,
+    MaxRowsAtCompileTime = Dynamic,
+    MaxColsAtCompileTime = 1,
+    Flags = 0,
+    CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10
+  };
+};
+
+} // end namespace internal
+
+template<typename _Scalar, int _Options, typename _Index>
+class SparseMatrix
+  : public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
+{
+  public:
+    EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
+    EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
+    EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
+
+    typedef MappedSparseMatrix<Scalar,Flags> Map;
+    using Base::IsRowMajor;
+    typedef internal::CompressedStorage<Scalar,Index> Storage;
+    enum {
+      Options = _Options
+    };
+
+  protected:
+
+    typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
+
+    Index m_outerSize;
+    Index m_innerSize;
+    Index* m_outerIndex;
+    Index* m_innerNonZeros;     // optional, if null then the data is compressed
+    Storage m_data;
+    
+    Eigen::Map<Matrix<Index,Dynamic,1> > innerNonZeros() { return Eigen::Map<Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
+    const  Eigen::Map<const Matrix<Index,Dynamic,1> > innerNonZeros() const { return Eigen::Map<const Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
+
+  public:
+    
+    /** \returns whether \c *this is in compressed form. */
+    inline bool isCompressed() const { return m_innerNonZeros==0; }
+
+    /** \returns the number of rows of the matrix */
+    inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
+    /** \returns the number of columns of the matrix */
+    inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
+
+    /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */
+    inline Index innerSize() const { return m_innerSize; }
+    /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */
+    inline Index outerSize() const { return m_outerSize; }
+    
+    /** \returns a const pointer to the array of values.
+      * This function is aimed at interoperability with other libraries.
+      * \sa innerIndexPtr(), outerIndexPtr() */
+    inline const Scalar* valuePtr() const { return &m_data.value(0); }
+    /** \returns a non-const pointer to the array of values.
+      * This function is aimed at interoperability with other libraries.
+      * \sa innerIndexPtr(), outerIndexPtr() */
+    inline Scalar* valuePtr() { return &m_data.value(0); }
+
+    /** \returns a const pointer to the array of inner indices.
+      * This function is aimed at interoperability with other libraries.
+      * \sa valuePtr(), outerIndexPtr() */
+    inline const Index* innerIndexPtr() const { return &m_data.index(0); }
+    /** \returns a non-const pointer to the array of inner indices.
+      * This function is aimed at interoperability with other libraries.
+      * \sa valuePtr(), outerIndexPtr() */
+    inline Index* innerIndexPtr() { return &m_data.index(0); }
+
+    /** \returns a const pointer to the array of the starting positions of the inner vectors.
+      * This function is aimed at interoperability with other libraries.
+      * \sa valuePtr(), innerIndexPtr() */
+    inline const Index* outerIndexPtr() const { return m_outerIndex; }
+    /** \returns a non-const pointer to the array of the starting positions of the inner vectors.
+      * This function is aimed at interoperability with other libraries.
+      * \sa valuePtr(), innerIndexPtr() */
+    inline Index* outerIndexPtr() { return m_outerIndex; }
+
+    /** \returns a const pointer to the array of the number of non zeros of the inner vectors.
+      * This function is aimed at interoperability with other libraries.
+      * \warning it returns the null pointer 0 in compressed mode */
+    inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; }
+    /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
+      * This function is aimed at interoperability with other libraries.
+      * \warning it returns the null pointer 0 in compressed mode */
+    inline Index* innerNonZeroPtr() { return m_innerNonZeros; }
+
+    /** \internal */
+    inline Storage& data() { return m_data; }
+    /** \internal */
+    inline const Storage& data() const { return m_data; }
+
+    /** \returns the value of the matrix at position \a i, \a j
+      * This function returns Scalar(0) if the element is an explicit \em zero */
+    inline Scalar coeff(Index row, Index col) const
+    {
+      const Index outer = IsRowMajor ? row : col;
+      const Index inner = IsRowMajor ? col : row;
+      Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
+      return m_data.atInRange(m_outerIndex[outer], end, inner);
+    }
+
+    /** \returns a non-const reference to the value of the matrix at position \a i, \a j
+      *
+      * If the element does not exist then it is inserted via the insert(Index,Index) function
+      * which itself turns the matrix into a non compressed form if that was not the case.
+      *
+      * This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index)
+      * function if the element does not already exist.
+      */
+    inline Scalar& coeffRef(Index row, Index col)
+    {
+      const Index outer = IsRowMajor ? row : col;
+      const Index inner = IsRowMajor ? col : row;
+
+      Index start = m_outerIndex[outer];
+      Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
+      eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
+      if(end<=start)
+        return insert(row,col);
+      const Index p = m_data.searchLowerIndex(start,end-1,inner);
+      if((p<end) && (m_data.index(p)==inner))
+        return m_data.value(p);
+      else
+        return insert(row,col);
+    }
+
+    /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
+      * The non zero coefficient must \b not already exist.
+      *
+      * If the matrix \c *this is in compressed mode, then \c *this is turned into uncompressed
+      * mode while reserving room for 2 non zeros per inner vector. It is strongly recommended to first
+      * call reserve(const SizesType &) to reserve a more appropriate number of elements per
+      * inner vector that better match your scenario.
+      *
+      * This function performs a sorted insertion in O(1) if the elements of each inner vector are
+      * inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
+      *
+      */
+    EIGEN_DONT_INLINE Scalar& insert(Index row, Index col)
+    {
+      if(isCompressed())
+      {
+        reserve(VectorXi::Constant(outerSize(), 2));
+      }
+      return insertUncompressed(row,col);
+    }
+
+  public:
+
+    class InnerIterator;
+    class ReverseInnerIterator;
+
+    /** Removes all non zeros but keep allocated memory */
+    inline void setZero()
+    {
+      m_data.clear();
+      memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
+      if(m_innerNonZeros)
+        memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index));
+    }
+
+    /** \returns the number of non zero coefficients */
+    inline Index nonZeros() const
+    {
+      if(m_innerNonZeros)
+        return innerNonZeros().sum();
+      return static_cast<Index>(m_data.size());
+    }
+
+    /** Preallocates \a reserveSize non zeros.
+      *
+      * Precondition: the matrix must be in compressed mode. */
+    inline void reserve(Index reserveSize)
+    {
+      eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
+      m_data.reserve(reserveSize);
+    }
+    
+    #ifdef EIGEN_PARSED_BY_DOXYGEN
+    /** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j.
+      *
+      * This function turns the matrix in non-compressed mode */
+    template<class SizesType>
+    inline void reserve(const SizesType& reserveSizes);
+    #else
+    template<class SizesType>
+    inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = typename SizesType::value_type())
+    {
+      EIGEN_UNUSED_VARIABLE(enableif);
+      reserveInnerVectors(reserveSizes);
+    }
+    template<class SizesType>
+    inline void reserve(const SizesType& reserveSizes, const typename SizesType::Scalar& enableif =
+    #if (!defined(_MSC_VER)) || (_MSC_VER>=1500) // MSVC 2005 fails to compile with this typename
+        typename
+    #endif
+        SizesType::Scalar())
+    {
+      EIGEN_UNUSED_VARIABLE(enableif);
+      reserveInnerVectors(reserveSizes);
+    }
+    #endif // EIGEN_PARSED_BY_DOXYGEN
+  protected:
+    template<class SizesType>
+    inline void reserveInnerVectors(const SizesType& reserveSizes)
+    {
+      
+      if(isCompressed())
+      {
+        std::size_t totalReserveSize = 0;
+        // turn the matrix into non-compressed mode
+        m_innerNonZeros = new Index[m_outerSize];
+        
+        // temporarily use m_innerSizes to hold the new starting points.
+        Index* newOuterIndex = m_innerNonZeros;
+        
+        Index count = 0;
+        for(Index j=0; j<m_outerSize; ++j)
+        {
+          newOuterIndex[j] = count;
+          count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
+          totalReserveSize += reserveSizes[j];
+        }
+        m_data.reserve(totalReserveSize);
+        std::ptrdiff_t previousOuterIndex = m_outerIndex[m_outerSize];
+        for(std::ptrdiff_t j=m_outerSize-1; j>=0; --j)
+        {
+          ptrdiff_t innerNNZ = previousOuterIndex - m_outerIndex[j];
+          for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
+          {
+            m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
+            m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
+          }
+          previousOuterIndex = m_outerIndex[j];
+          m_outerIndex[j] = newOuterIndex[j];
+          m_innerNonZeros[j] = innerNNZ;
+        }
+        m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
+        
+        m_data.resize(m_outerIndex[m_outerSize]);
+      }
+      else
+      {
+        Index* newOuterIndex = new Index[m_outerSize+1];
+        Index count = 0;
+        for(Index j=0; j<m_outerSize; ++j)
+        {
+          newOuterIndex[j] = count;
+          Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
+          Index toReserve = std::max<std::ptrdiff_t>(reserveSizes[j], alreadyReserved);
+          count += toReserve + m_innerNonZeros[j];
+        }
+        newOuterIndex[m_outerSize] = count;
+        
+        m_data.resize(count);
+        for(ptrdiff_t j=m_outerSize-1; j>=0; --j)
+        {
+          std::ptrdiff_t offset = newOuterIndex[j] - m_outerIndex[j];
+          if(offset>0)
+          {
+            std::ptrdiff_t innerNNZ = m_innerNonZeros[j];
+            for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
+            {
+              m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
+              m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
+            }
+          }
+        }
+        
+        std::swap(m_outerIndex, newOuterIndex);
+        delete[] newOuterIndex;
+      }
+      
+    }
+  public:
+
+    //--- low level purely coherent filling ---
+
+    /** \internal
+      * \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
+      * - the nonzero does not already exist
+      * - the new coefficient is the last one according to the storage order
+      *
+      * Before filling a given inner vector you must call the statVec(Index) function.
+      *
+      * After an insertion session, you should call the finalize() function.
+      *
+      * \sa insert, insertBackByOuterInner, startVec */
+    inline Scalar& insertBack(Index row, Index col)
+    {
+      return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
+    }
+
+    /** \internal
+      * \sa insertBack, startVec */
+    inline Scalar& insertBackByOuterInner(Index outer, Index inner)
+    {
+      eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
+      eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
+      Index p = m_outerIndex[outer+1];
+      ++m_outerIndex[outer+1];
+      m_data.append(0, inner);
+      return m_data.value(p);
+    }
+
+    /** \internal
+      * \warning use it only if you know what you are doing */
+    inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
+    {
+      Index p = m_outerIndex[outer+1];
+      ++m_outerIndex[outer+1];
+      m_data.append(0, inner);
+      return m_data.value(p);
+    }
+
+    /** \internal
+      * \sa insertBack, insertBackByOuterInner */
+    inline void startVec(Index outer)
+    {
+      eigen_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially");
+      eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
+      m_outerIndex[outer+1] = m_outerIndex[outer];
+    }
+
+    /** \internal
+      * Must be called after inserting a set of non zero entries using the low level compressed API.
+      */
+    inline void finalize()
+    {
+      if(isCompressed())
+      {
+        Index size = static_cast<Index>(m_data.size());
+        Index i = m_outerSize;
+        // find the last filled column
+        while (i>=0 && m_outerIndex[i]==0)
+          --i;
+        ++i;
+        while (i<=m_outerSize)
+        {
+          m_outerIndex[i] = size;
+          ++i;
+        }
+      }
+    }
+
+    //---
+
+    template<typename InputIterators>
+    void setFromTriplets(const InputIterators& begin, const InputIterators& end);
+
+    void sumupDuplicates();
+
+    //---
+    
+    /** \internal
+      * same as insert(Index,Index) except that the indices are given relative to the storage order */
+    EIGEN_DONT_INLINE Scalar& insertByOuterInner(Index j, Index i)
+    {
+      return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
+    }
+
+    /** Turns the matrix into the \em compressed format.
+      */
+    void makeCompressed()
+    {
+      if(isCompressed())
+        return;
+      
+      Index oldStart = m_outerIndex[1];
+      m_outerIndex[1] = m_innerNonZeros[0];
+      for(Index j=1; j<m_outerSize; ++j)
+      {
+        Index nextOldStart = m_outerIndex[j+1];
+        std::ptrdiff_t offset = oldStart - m_outerIndex[j];
+        if(offset>0)
+        {
+          for(Index k=0; k<m_innerNonZeros[j]; ++k)
+          {
+            m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
+            m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
+          }
+        }
+        m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
+        oldStart = nextOldStart;
+      }
+      delete[] m_innerNonZeros;
+      m_innerNonZeros = 0;
+      m_data.resize(m_outerIndex[m_outerSize]);
+      m_data.squeeze();
+    }
+
+    /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */
+    void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+    {
+      prune(default_prunning_func(reference,epsilon));
+    }
+    
+    /** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \a keep.
+      * The functor type \a KeepFunc must implement the following function:
+      * \code
+      * bool operator() (const Index& row, const Index& col, const Scalar& value) const;
+      * \endcode
+      * \sa prune(Scalar,RealScalar)
+      */
+    template<typename KeepFunc>
+    void prune(const KeepFunc& keep = KeepFunc())
+    {
+      // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
+      // TODO also implement a unit test
+      makeCompressed();
+
+      Index k = 0;
+      for(Index j=0; j<m_outerSize; ++j)
+      {
+        Index previousStart = m_outerIndex[j];
+        m_outerIndex[j] = k;
+        Index end = m_outerIndex[j+1];
+        for(Index i=previousStart; i<end; ++i)
+        {
+          if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
+          {
+            m_data.value(k) = m_data.value(i);
+            m_data.index(k) = m_data.index(i);
+            ++k;
+          }
+        }
+      }
+      m_outerIndex[m_outerSize] = k;
+      m_data.resize(k,0);
+    }
+
+    /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
+      * \sa resizeNonZeros(Index), reserve(), setZero()
+      */
+    void resize(Index rows, Index cols)
+    {
+      const Index outerSize = IsRowMajor ? rows : cols;
+      m_innerSize = IsRowMajor ? cols : rows;
+      m_data.clear();
+      if (m_outerSize != outerSize || m_outerSize==0)
+      {
+        delete[] m_outerIndex;
+        m_outerIndex = new Index [outerSize+1];
+        m_outerSize = outerSize;
+      }
+      if(m_innerNonZeros)
+      {
+        delete[] m_innerNonZeros;
+        m_innerNonZeros = 0;
+      }
+      memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
+    }
+
+    /** \internal
+      * Resize the nonzero vector to \a size */
+    void resizeNonZeros(Index size)
+    {
+      // TODO remove this function
+      m_data.resize(size);
+    }
+
+    /** \returns a const expression of the diagonal coefficients */
+    const Diagonal<const SparseMatrix> diagonal() const { return *this; }
+
+    /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
+    inline SparseMatrix()
+      : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+    {
+      check_template_parameters();
+      resize(0, 0);
+    }
+
+    /** Constructs a \a rows \c x \a cols empty matrix */
+    inline SparseMatrix(Index rows, Index cols)
+      : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+    {
+      check_template_parameters();
+      resize(rows, cols);
+    }
+
+    /** Constructs a sparse matrix from the sparse expression \a other */
+    template<typename OtherDerived>
+    inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
+      : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+    {
+      check_template_parameters();
+      *this = other.derived();
+    }
+
+    /** Copy constructor (it performs a deep copy) */
+    inline SparseMatrix(const SparseMatrix& other)
+      : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+    {
+      check_template_parameters();
+      *this = other.derived();
+    }
+
+    /** \brief Copy constructor with in-place evaluation */
+    template<typename OtherDerived>
+    SparseMatrix(const ReturnByValue<OtherDerived>& other)
+      : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+    {
+      check_template_parameters();
+      initAssignment(other);
+      other.evalTo(*this);
+    }
+
+    /** Swaps the content of two sparse matrices of the same type.
+      * This is a fast operation that simply swaps the underlying pointers and parameters. */
+    inline void swap(SparseMatrix& other)
+    {
+      //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
+      std::swap(m_outerIndex, other.m_outerIndex);
+      std::swap(m_innerSize, other.m_innerSize);
+      std::swap(m_outerSize, other.m_outerSize);
+      std::swap(m_innerNonZeros, other.m_innerNonZeros);
+      m_data.swap(other.m_data);
+    }
+
+    inline SparseMatrix& operator=(const SparseMatrix& other)
+    {
+      if (other.isRValue())
+      {
+        swap(other.const_cast_derived());
+      }
+      else
+      {
+        initAssignment(other);
+        if(other.isCompressed())
+        {
+          memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
+          m_data = other.m_data;
+        }
+        else
+        {
+          Base::operator=(other);
+        }
+      }
+      return *this;
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    template<typename Lhs, typename Rhs>
+    inline SparseMatrix& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
+    { return Base::operator=(product); }
+    
+    template<typename OtherDerived>
+    inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
+    {
+      initAssignment(other);
+      return Base::operator=(other.derived());
+    }
+    
+    template<typename OtherDerived>
+    inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
+    { return Base::operator=(other.derived()); }
+    #endif
+
+    template<typename OtherDerived>
+    EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other)
+    {
+      const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+      if (needToTranspose)
+      {
+        // two passes algorithm:
+        //  1 - compute the number of coeffs per dest inner vector
+        //  2 - do the actual copy/eval
+        // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
+        typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
+        typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
+        OtherCopy otherCopy(other.derived());
+
+        SparseMatrix dest(other.rows(),other.cols());
+        Eigen::Map<Matrix<Index, Dynamic, 1> > (dest.m_outerIndex,dest.outerSize()).setZero();
+
+        // pass 1
+        // FIXME the above copy could be merged with that pass
+        for (Index j=0; j<otherCopy.outerSize(); ++j)
+          for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+            ++dest.m_outerIndex[it.index()];
+
+        // prefix sum
+        Index count = 0;
+        VectorXi positions(dest.outerSize());
+        for (Index j=0; j<dest.outerSize(); ++j)
+        {
+          Index tmp = dest.m_outerIndex[j];
+          dest.m_outerIndex[j] = count;
+          positions[j] = count;
+          count += tmp;
+        }
+        dest.m_outerIndex[dest.outerSize()] = count;
+        // alloc
+        dest.m_data.resize(count);
+        // pass 2
+        for (Index j=0; j<otherCopy.outerSize(); ++j)
+        {
+          for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+          {
+            Index pos = positions[it.index()]++;
+            dest.m_data.index(pos) = j;
+            dest.m_data.value(pos) = it.value();
+          }
+        }
+        this->swap(dest);
+        return *this;
+      }
+      else
+      {
+        if(other.isRValue())
+          initAssignment(other.derived());
+        // there is no special optimization
+        return Base::operator=(other.derived());
+      }
+    }
+
+    friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
+    {
+      EIGEN_DBG_SPARSE(
+        s << "Nonzero entries:\n";
+        if(m.isCompressed())
+          for (Index i=0; i<m.nonZeros(); ++i)
+            s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
+        else
+          for (Index i=0; i<m.outerSize(); ++i)
+          {
+            int p = m.m_outerIndex[i];
+            int pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
+            Index k=p;
+            for (; k<pe; ++k)
+              s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
+            for (; k<m.m_outerIndex[i+1]; ++k)
+              s << "(_,_) ";
+          }
+        s << std::endl;
+        s << std::endl;
+        s << "Outer pointers:\n";
+        for (Index i=0; i<m.outerSize(); ++i)
+          s << m.m_outerIndex[i] << " ";
+        s << " $" << std::endl;
+        if(!m.isCompressed())
+        {
+          s << "Inner non zeros:\n";
+          for (Index i=0; i<m.outerSize(); ++i)
+            s << m.m_innerNonZeros[i] << " ";
+          s << " $" << std::endl;
+        }
+        s << std::endl;
+      );
+      s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
+      return s;
+    }
+
+    /** Destructor */
+    inline ~SparseMatrix()
+    {
+      delete[] m_outerIndex;
+      delete[] m_innerNonZeros;
+    }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** Overloaded for performance */
+    Scalar sum() const;
+#endif
+    
+#   ifdef EIGEN_SPARSEMATRIX_PLUGIN
+#     include EIGEN_SPARSEMATRIX_PLUGIN
+#   endif
+
+protected:
+
+    template<typename Other>
+    void initAssignment(const Other& other)
+    {
+      resize(other.rows(), other.cols());
+      if(m_innerNonZeros)
+      {
+        delete[] m_innerNonZeros;
+        m_innerNonZeros = 0;
+      }
+    }
+
+    /** \internal
+      * \sa insert(Index,Index) */
+    EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col)
+    {
+      eigen_assert(isCompressed());
+
+      const Index outer = IsRowMajor ? row : col;
+      const Index inner = IsRowMajor ? col : row;
+
+      Index previousOuter = outer;
+      if (m_outerIndex[outer+1]==0)
+      {
+        // we start a new inner vector
+        while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
+        {
+          m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
+          --previousOuter;
+        }
+        m_outerIndex[outer+1] = m_outerIndex[outer];
+      }
+
+      // here we have to handle the tricky case where the outerIndex array
+      // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
+      // the 2nd inner vector...
+      bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
+                    && (size_t(m_outerIndex[outer+1]) == m_data.size());
+
+      size_t startId = m_outerIndex[outer];
+      // FIXME let's make sure sizeof(long int) == sizeof(size_t)
+      size_t p = m_outerIndex[outer+1];
+      ++m_outerIndex[outer+1];
+
+      float reallocRatio = 1;
+      if (m_data.allocatedSize()<=m_data.size())
+      {
+        // if there is no preallocated memory, let's reserve a minimum of 32 elements
+        if (m_data.size()==0)
+        {
+          m_data.reserve(32);
+        }
+        else
+        {
+          // we need to reallocate the data, to reduce multiple reallocations
+          // we use a smart resize algorithm based on the current filling ratio
+          // in addition, we use float to avoid integers overflows
+          float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1);
+          reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
+          // furthermore we bound the realloc ratio to:
+          //   1) reduce multiple minor realloc when the matrix is almost filled
+          //   2) avoid to allocate too much memory when the matrix is almost empty
+          reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
+        }
+      }
+      m_data.resize(m_data.size()+1,reallocRatio);
+
+      if (!isLastVec)
+      {
+        if (previousOuter==-1)
+        {
+          // oops wrong guess.
+          // let's correct the outer offsets
+          for (Index k=0; k<=(outer+1); ++k)
+            m_outerIndex[k] = 0;
+          Index k=outer+1;
+          while(m_outerIndex[k]==0)
+            m_outerIndex[k++] = 1;
+          while (k<=m_outerSize && m_outerIndex[k]!=0)
+            m_outerIndex[k++]++;
+          p = 0;
+          --k;
+          k = m_outerIndex[k]-1;
+          while (k>0)
+          {
+            m_data.index(k) = m_data.index(k-1);
+            m_data.value(k) = m_data.value(k-1);
+            k--;
+          }
+        }
+        else
+        {
+          // we are not inserting into the last inner vec
+          // update outer indices:
+          Index j = outer+2;
+          while (j<=m_outerSize && m_outerIndex[j]!=0)
+            m_outerIndex[j++]++;
+          --j;
+          // shift data of last vecs:
+          Index k = m_outerIndex[j]-1;
+          while (k>=Index(p))
+          {
+            m_data.index(k) = m_data.index(k-1);
+            m_data.value(k) = m_data.value(k-1);
+            k--;
+          }
+        }
+      }
+
+      while ( (p > startId) && (m_data.index(p-1) > inner) )
+      {
+        m_data.index(p) = m_data.index(p-1);
+        m_data.value(p) = m_data.value(p-1);
+        --p;
+      }
+
+      m_data.index(p) = inner;
+      return (m_data.value(p) = 0);
+    }
+
+    /** \internal
+      * A vector object that is equal to 0 everywhere but v at the position i */
+    class SingletonVector
+    {
+        Index m_index;
+        Index m_value;
+      public:
+        typedef Index value_type;
+        SingletonVector(Index i, Index v)
+          : m_index(i), m_value(v)
+        {}
+
+        Index operator[](Index i) const { return i==m_index ? m_value : 0; }
+    };
+
+    /** \internal
+      * \sa insert(Index,Index) */
+    EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col)
+    {
+      eigen_assert(!isCompressed());
+
+      const Index outer = IsRowMajor ? row : col;
+      const Index inner = IsRowMajor ? col : row;
+
+      std::ptrdiff_t room = m_outerIndex[outer+1] - m_outerIndex[outer];
+      std::ptrdiff_t innerNNZ = m_innerNonZeros[outer];
+      if(innerNNZ>=room)
+      {
+        // this inner vector is full, we need to reallocate the whole buffer :(
+        reserve(SingletonVector(outer,std::max<std::ptrdiff_t>(2,innerNNZ)));
+      }
+
+      Index startId = m_outerIndex[outer];
+      Index p = startId + m_innerNonZeros[outer];
+      while ( (p > startId) && (m_data.index(p-1) > inner) )
+      {
+        m_data.index(p) = m_data.index(p-1);
+        m_data.value(p) = m_data.value(p-1);
+        --p;
+      }
+      eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exist, you must call coeffRef to this end");
+
+      m_innerNonZeros[outer]++;
+
+      m_data.index(p) = inner;
+      return (m_data.value(p) = 0);
+    }
+
+public:
+    /** \internal
+      * \sa insert(Index,Index) */
+    inline Scalar& insertBackUncompressed(Index row, Index col)
+    {
+      const Index outer = IsRowMajor ? row : col;
+      const Index inner = IsRowMajor ? col : row;
+
+      eigen_assert(!isCompressed());
+      eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
+
+      Index p = m_outerIndex[outer] + m_innerNonZeros[outer];
+      m_innerNonZeros[outer]++;
+      m_data.index(p) = inner;
+      return (m_data.value(p) = 0);
+    }
+
+private:
+  static void check_template_parameters()
+  {
+    EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
+  }
+
+  struct default_prunning_func {
+    default_prunning_func(Scalar ref, RealScalar eps) : reference(ref), epsilon(eps) {}
+    inline bool operator() (const Index&, const Index&, const Scalar& value) const
+    {
+      return !internal::isMuchSmallerThan(value, reference, epsilon);
+    }
+    Scalar reference;
+    RealScalar epsilon;
+  };
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
+{
+  public:
+    InnerIterator(const SparseMatrix& mat, Index outer)
+      : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer])
+    {
+      if(mat.isCompressed())
+        m_end = mat.m_outerIndex[outer+1];
+      else
+        m_end = m_id + mat.m_innerNonZeros[outer];
+    }
+
+    inline InnerIterator& operator++() { m_id++; return *this; }
+
+    inline const Scalar& value() const { return m_values[m_id]; }
+    inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
+
+    inline Index index() const { return m_indices[m_id]; }
+    inline Index outer() const { return m_outer; }
+    inline Index row() const { return IsRowMajor ? m_outer : index(); }
+    inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+    inline operator bool() const { return (m_id < m_end); }
+
+  protected:
+    const Scalar* m_values;
+    const Index* m_indices;
+    const Index m_outer;
+    Index m_id;
+    Index m_end;
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class SparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator
+{
+  public:
+    ReverseInnerIterator(const SparseMatrix& mat, Index outer)
+      : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_start(mat.m_outerIndex[outer])
+    {
+      if(mat.isCompressed())
+        m_id = mat.m_outerIndex[outer+1];
+      else
+        m_id = m_start + mat.m_innerNonZeros[outer];
+    }
+
+    inline ReverseInnerIterator& operator--() { --m_id; return *this; }
+
+    inline const Scalar& value() const { return m_values[m_id-1]; }
+    inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
+
+    inline Index index() const { return m_indices[m_id-1]; }
+    inline Index outer() const { return m_outer; }
+    inline Index row() const { return IsRowMajor ? m_outer : index(); }
+    inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+    inline operator bool() const { return (m_id > m_start); }
+
+  protected:
+    const Scalar* m_values;
+    const Index* m_indices;
+    const Index m_outer;
+    Index m_id;
+    const Index m_start;
+};
+
+namespace internal {
+
+template<typename InputIterator, typename SparseMatrixType>
+void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, int Options = 0)
+{
+  EIGEN_UNUSED_VARIABLE(Options);
+  enum { IsRowMajor = SparseMatrixType::IsRowMajor };
+  typedef typename SparseMatrixType::Scalar Scalar;
+  typedef typename SparseMatrixType::Index Index;
+  SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor> trMat(mat.rows(),mat.cols());
+
+  // pass 1: count the nnz per inner-vector
+  VectorXi wi(trMat.outerSize());
+  wi.setZero();
+  for(InputIterator it(begin); it!=end; ++it)
+    wi(IsRowMajor ? it->col() : it->row())++;
+
+  // pass 2: insert all the elements into trMat
+  trMat.reserve(wi);
+  for(InputIterator it(begin); it!=end; ++it)
+    trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
+
+  // pass 3:
+  trMat.sumupDuplicates();
+
+  // pass 4: transposed copy -> implicit sorting
+  mat = trMat;
+}
+
+}
+
+
+/** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \b.
+  *
+  * A \em triplet is a tuple (i,j,value) defining a non-zero element.
+  * The input list of triplets does not have to be sorted, and can contains duplicated elements.
+  * In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up.
+  * This is a \em O(n) operation, with \em n the number of triplet elements.
+  * The initial contents of \c *this is destroyed.
+  * The matrix \c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor,
+  * or the resize(Index,Index) method. The sizes are not extracted from the triplet list.
+  *
+  * The \a InputIterators value_type must provide the following interface:
+  * \code
+  * Scalar value() const; // the value
+  * Scalar row() const;   // the row index i
+  * Scalar col() const;   // the column index j
+  * \endcode
+  * See for instance the Eigen::Triplet template class.
+  *
+  * Here is a typical usage example:
+  * \code
+    typedef Triplet<double> T;
+    std::vector<T> tripletList;
+    triplets.reserve(estimation_of_entries);
+    for(...)
+    {
+      // ...
+      tripletList.push_back(T(i,j,v_ij));
+    }
+    SparseMatrixType m(rows,cols);
+    m.setFromTriplets(tripletList.begin(), tripletList.end());
+    // m is ready to go!
+  * \endcode
+  *
+  * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
+  * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
+  * be explicitely stored into a std::vector for instance.
+  */
+template<typename Scalar, int _Options, typename _Index>
+template<typename InputIterators>
+void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
+{
+  internal::set_from_triplets(begin, end, *this);
+}
+
+/** \internal */
+template<typename Scalar, int _Options, typename _Index>
+void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
+{
+  eigen_assert(!isCompressed());
+  // TODO, in practice we should be able to use m_innerNonZeros for that task
+  VectorXi wi(innerSize());
+  wi.fill(-1);
+  Index count = 0;
+  // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
+  for(int j=0; j<outerSize(); ++j)
+  {
+    Index start   = count;
+    Index oldEnd  = m_outerIndex[j]+m_innerNonZeros[j];
+    for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
+    {
+      Index i = m_data.index(k);
+      if(wi(i)>=start)
+      {
+        // we already meet this entry => accumulate it
+        m_data.value(wi(i)) += m_data.value(k);
+      }
+      else
+      {
+        m_data.value(count) = m_data.value(k);
+        m_data.index(count) = m_data.index(k);
+        wi(i) = count;
+        ++count;
+      }
+    }
+    m_outerIndex[j] = start;
+  }
+  m_outerIndex[m_outerSize] = count;
+
+  // turn the matrix into compressed form
+  delete[] m_innerNonZeros;
+  m_innerNonZeros = 0;
+  m_data.resize(m_outerIndex[m_outerSize]);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEMATRIX_H
diff --git a/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseMatrixBase.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseMatrixBase.h
new file mode 100644
index 000000000..9a1258097
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseMatrixBase.h
@@ -0,0 +1,458 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEMATRIXBASE_H
+#define EIGEN_SPARSEMATRIXBASE_H
+
+namespace Eigen { 
+
+/** \ingroup SparseCore_Module
+  *
+  * \class SparseMatrixBase
+  *
+  * \brief Base class of any sparse matrices or sparse expressions
+  *
+  * \tparam Derived
+  *
+  * This class can be extended with the help of the plugin mechanism described on the page
+  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN.
+  */
+template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
+{
+  public:
+
+    typedef typename internal::traits<Derived>::Scalar Scalar;
+    typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+    typedef typename internal::traits<Derived>::StorageKind StorageKind;
+    typedef typename internal::traits<Derived>::Index Index;
+    typedef typename internal::add_const_on_value_type_if_arithmetic<
+                         typename internal::packet_traits<Scalar>::type
+                     >::type PacketReturnType;
+
+    typedef SparseMatrixBase StorageBaseType;
+    typedef EigenBase<Derived> Base;
+    
+    template<typename OtherDerived>
+    Derived& operator=(const EigenBase<OtherDerived> &other)
+    {
+      other.derived().evalTo(derived());
+      return derived();
+    }
+
+    enum {
+
+      RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
+        /**< The number of rows at compile-time. This is just a copy of the value provided
+          * by the \a Derived type. If a value is not known at compile-time,
+          * it is set to the \a Dynamic constant.
+          * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */
+
+      ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
+        /**< The number of columns at compile-time. This is just a copy of the value provided
+          * by the \a Derived type. If a value is not known at compile-time,
+          * it is set to the \a Dynamic constant.
+          * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
+
+
+      SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
+                                                   internal::traits<Derived>::ColsAtCompileTime>::ret),
+        /**< This is equal to the number of coefficients, i.e. the number of
+          * rows times the number of columns, or to \a Dynamic if this is not
+          * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
+
+      MaxRowsAtCompileTime = RowsAtCompileTime,
+      MaxColsAtCompileTime = ColsAtCompileTime,
+
+      MaxSizeAtCompileTime = (internal::size_at_compile_time<MaxRowsAtCompileTime,
+                                                      MaxColsAtCompileTime>::ret),
+
+      IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1,
+        /**< This is set to true if either the number of rows or the number of
+          * columns is known at compile-time to be equal to 1. Indeed, in that case,
+          * we are dealing with a column-vector (if there is only one column) or with
+          * a row-vector (if there is only one row). */
+
+      Flags = internal::traits<Derived>::Flags,
+        /**< This stores expression \ref flags flags which may or may not be inherited by new expressions
+          * constructed from this one. See the \ref flags "list of flags".
+          */
+
+      CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
+        /**< This is a rough measure of how expensive it is to read one coefficient from
+          * this expression.
+          */
+
+      IsRowMajor = Flags&RowMajorBit ? 1 : 0,
+
+      #ifndef EIGEN_PARSED_BY_DOXYGEN
+      _HasDirectAccess = (int(Flags)&DirectAccessBit) ? 1 : 0 // workaround sunCC
+      #endif
+    };
+
+    /** \internal the return type of MatrixBase::adjoint() */
+    typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
+                        CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, Eigen::Transpose<const Derived> >,
+                        Transpose<const Derived>
+                     >::type AdjointReturnType;
+
+
+    typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor> PlainObject;
+
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** This is the "real scalar" type; if the \a Scalar type is already real numbers
+      * (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If
+      * \a Scalar is \a std::complex<T> then RealScalar is \a T.
+      *
+      * \sa class NumTraits
+      */
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+
+    /** \internal the return type of coeff()
+      */
+    typedef typename internal::conditional<_HasDirectAccess, const Scalar&, Scalar>::type CoeffReturnType;
+
+    /** \internal Represents a matrix with all coefficients equal to one another*/
+    typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Matrix<Scalar,Dynamic,Dynamic> > ConstantReturnType;
+
+    /** type of the equivalent square matrix */
+    typedef Matrix<Scalar,EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime),
+                          EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
+
+    inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
+    inline Derived& derived() { return *static_cast<Derived*>(this); }
+    inline Derived& const_cast_derived() const
+    { return *static_cast<Derived*>(const_cast<SparseMatrixBase*>(this)); }
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase
+#   include "../plugins/CommonCwiseUnaryOps.h"
+#   include "../plugins/CommonCwiseBinaryOps.h"
+#   include "../plugins/MatrixCwiseUnaryOps.h"
+#   include "../plugins/MatrixCwiseBinaryOps.h"
+#   ifdef EIGEN_SPARSEMATRIXBASE_PLUGIN
+#     include EIGEN_SPARSEMATRIXBASE_PLUGIN
+#   endif
+#   undef EIGEN_CURRENT_STORAGE_BASE_CLASS
+#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
+
+
+    /** \returns the number of rows. \sa cols() */
+    inline Index rows() const { return derived().rows(); }
+    /** \returns the number of columns. \sa rows() */
+    inline Index cols() const { return derived().cols(); }
+    /** \returns the number of coefficients, which is \a rows()*cols().
+      * \sa rows(), cols(). */
+    inline Index size() const { return rows() * cols(); }
+    /** \returns the number of nonzero coefficients which is in practice the number
+      * of stored coefficients. */
+    inline Index nonZeros() const { return derived().nonZeros(); }
+    /** \returns true if either the number of rows or the number of columns is equal to 1.
+      * In other words, this function returns
+      * \code rows()==1 || cols()==1 \endcode
+      * \sa rows(), cols(), IsVectorAtCompileTime. */
+    inline bool isVector() const { return rows()==1 || cols()==1; }
+    /** \returns the size of the storage major dimension,
+      * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */
+    Index outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); }
+    /** \returns the size of the inner dimension according to the storage order,
+      * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */
+    Index innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); }
+
+    bool isRValue() const { return m_isRValue; }
+    Derived& markAsRValue() { m_isRValue = true; return derived(); }
+
+    SparseMatrixBase() : m_isRValue(false) { /* TODO check flags */ }
+
+    
+    template<typename OtherDerived>
+    Derived& operator=(const ReturnByValue<OtherDerived>& other)
+    {
+      other.evalTo(derived());
+      return derived();
+    }
+
+
+    template<typename OtherDerived>
+    inline Derived& operator=(const SparseMatrixBase<OtherDerived>& other)
+    {
+      return assign(other.derived());
+    }
+
+    inline Derived& operator=(const Derived& other)
+    {
+//       if (other.isRValue())
+//         derived().swap(other.const_cast_derived());
+//       else
+      return assign(other.derived());
+    }
+
+  protected:
+
+    template<typename OtherDerived>
+    inline Derived& assign(const OtherDerived& other)
+    {
+      const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+      const Index outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? other.rows() : other.cols();
+      if ((!transpose) && other.isRValue())
+      {
+        // eval without temporary
+        derived().resize(other.rows(), other.cols());
+        derived().setZero();
+        derived().reserve((std::max)(this->rows(),this->cols())*2);
+        for (Index j=0; j<outerSize; ++j)
+        {
+          derived().startVec(j);
+          for (typename OtherDerived::InnerIterator it(other, j); it; ++it)
+          {
+            Scalar v = it.value();
+            derived().insertBackByOuterInner(j,it.index()) = v;
+          }
+        }
+        derived().finalize();
+      }
+      else
+      {
+        assignGeneric(other);
+      }
+      return derived();
+    }
+
+    template<typename OtherDerived>
+    inline void assignGeneric(const OtherDerived& other)
+    {
+      //const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+      eigen_assert(( ((internal::traits<Derived>::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
+                  (!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit)))) &&
+                  "the transpose operation is supposed to be handled in SparseMatrix::operator=");
+
+      enum { Flip = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit) };
+
+      const Index outerSize = other.outerSize();
+      //typedef typename internal::conditional<transpose, LinkedVectorMatrix<Scalar,Flags&RowMajorBit>, Derived>::type TempType;
+      // thanks to shallow copies, we always eval to a tempary
+      Derived temp(other.rows(), other.cols());
+
+      temp.reserve((std::max)(this->rows(),this->cols())*2);
+      for (Index j=0; j<outerSize; ++j)
+      {
+        temp.startVec(j);
+        for (typename OtherDerived::InnerIterator it(other.derived(), j); it; ++it)
+        {
+          Scalar v = it.value();
+          temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v;
+        }
+      }
+      temp.finalize();
+
+      derived() = temp.markAsRValue();
+    }
+
+  public:
+
+    template<typename Lhs, typename Rhs>
+    inline Derived& operator=(const SparseSparseProduct<Lhs,Rhs>& product);
+
+    friend std::ostream & operator << (std::ostream & s, const SparseMatrixBase& m)
+    {
+      typedef typename Derived::Nested Nested;
+      typedef typename internal::remove_all<Nested>::type NestedCleaned;
+
+      if (Flags&RowMajorBit)
+      {
+        const Nested nm(m.derived());
+        for (Index row=0; row<nm.outerSize(); ++row)
+        {
+          Index col = 0;
+          for (typename NestedCleaned::InnerIterator it(nm.derived(), row); it; ++it)
+          {
+            for ( ; col<it.index(); ++col)
+              s << "0 ";
+            s << it.value() << " ";
+            ++col;
+          }
+          for ( ; col<m.cols(); ++col)
+            s << "0 ";
+          s << std::endl;
+        }
+      }
+      else
+      {
+        const Nested nm(m.derived());
+        if (m.cols() == 1) {
+          Index row = 0;
+          for (typename NestedCleaned::InnerIterator it(nm.derived(), 0); it; ++it)
+          {
+            for ( ; row<it.index(); ++row)
+              s << "0" << std::endl;
+            s << it.value() << std::endl;
+            ++row;
+          }
+          for ( ; row<m.rows(); ++row)
+            s << "0" << std::endl;
+        }
+        else
+        {
+          SparseMatrix<Scalar, RowMajorBit> trans = m;
+          s << static_cast<const SparseMatrixBase<SparseMatrix<Scalar, RowMajorBit> >&>(trans);
+        }
+      }
+      return s;
+    }
+
+    template<typename OtherDerived>
+    Derived& operator+=(const SparseMatrixBase<OtherDerived>& other);
+    template<typename OtherDerived>
+    Derived& operator-=(const SparseMatrixBase<OtherDerived>& other);
+
+    Derived& operator*=(const Scalar& other);
+    Derived& operator/=(const Scalar& other);
+
+    #define EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE \
+      CwiseBinaryOp< \
+        internal::scalar_product_op< \
+          typename internal::scalar_product_traits< \
+            typename internal::traits<Derived>::Scalar, \
+            typename internal::traits<OtherDerived>::Scalar \
+          >::ReturnType \
+        >, \
+        Derived, \
+        OtherDerived \
+      >
+
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
+    cwiseProduct(const MatrixBase<OtherDerived> &other) const;
+
+    // sparse * sparse
+    template<typename OtherDerived>
+    const typename SparseSparseProductReturnType<Derived,OtherDerived>::Type
+    operator*(const SparseMatrixBase<OtherDerived> &other) const;
+
+    // sparse * diagonal
+    template<typename OtherDerived>
+    const SparseDiagonalProduct<Derived,OtherDerived>
+    operator*(const DiagonalBase<OtherDerived> &other) const;
+
+    // diagonal * sparse
+    template<typename OtherDerived> friend
+    const SparseDiagonalProduct<OtherDerived,Derived>
+    operator*(const DiagonalBase<OtherDerived> &lhs, const SparseMatrixBase& rhs)
+    { return SparseDiagonalProduct<OtherDerived,Derived>(lhs.derived(), rhs.derived()); }
+
+    /** dense * sparse (return a dense object unless it is an outer product) */
+    template<typename OtherDerived> friend
+    const typename DenseSparseProductReturnType<OtherDerived,Derived>::Type
+    operator*(const MatrixBase<OtherDerived>& lhs, const Derived& rhs)
+    { return typename DenseSparseProductReturnType<OtherDerived,Derived>::Type(lhs.derived(),rhs); }
+
+    /** sparse * dense (returns a dense object unless it is an outer product) */
+    template<typename OtherDerived>
+    const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
+    operator*(const MatrixBase<OtherDerived> &other) const;
+    
+     /** \returns an expression of P H P^-1 where H is the matrix represented by \c *this */
+    SparseSymmetricPermutationProduct<Derived,Upper|Lower> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const
+    {
+      return SparseSymmetricPermutationProduct<Derived,Upper|Lower>(derived(), perm);
+    }
+
+    template<typename OtherDerived>
+    Derived& operator*=(const SparseMatrixBase<OtherDerived>& other);
+
+    #ifdef EIGEN2_SUPPORT
+    // deprecated
+    template<typename OtherDerived>
+    typename internal::plain_matrix_type_column_major<OtherDerived>::type
+    solveTriangular(const MatrixBase<OtherDerived>& other) const;
+
+    // deprecated
+    template<typename OtherDerived>
+    void solveTriangularInPlace(MatrixBase<OtherDerived>& other) const;
+    #endif // EIGEN2_SUPPORT
+
+    template<int Mode>
+    inline const SparseTriangularView<Derived, Mode> triangularView() const;
+
+    template<unsigned int UpLo> inline const SparseSelfAdjointView<Derived, UpLo> selfadjointView() const;
+    template<unsigned int UpLo> inline SparseSelfAdjointView<Derived, UpLo> selfadjointView();
+
+    template<typename OtherDerived> Scalar dot(const MatrixBase<OtherDerived>& other) const;
+    template<typename OtherDerived> Scalar dot(const SparseMatrixBase<OtherDerived>& other) const;
+    RealScalar squaredNorm() const;
+    RealScalar norm()  const;
+
+    Transpose<Derived> transpose() { return derived(); }
+    const Transpose<const Derived> transpose() const { return derived(); }
+    const AdjointReturnType adjoint() const { return transpose(); }
+
+    // sub-vector
+    SparseInnerVectorSet<Derived,1> row(Index i);
+    const SparseInnerVectorSet<Derived,1> row(Index i) const;
+    SparseInnerVectorSet<Derived,1> col(Index j);
+    const SparseInnerVectorSet<Derived,1> col(Index j) const;
+    SparseInnerVectorSet<Derived,1> innerVector(Index outer);
+    const SparseInnerVectorSet<Derived,1> innerVector(Index outer) const;
+
+    // set of sub-vectors
+    SparseInnerVectorSet<Derived,Dynamic> subrows(Index start, Index size);
+    const SparseInnerVectorSet<Derived,Dynamic> subrows(Index start, Index size) const;
+    SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size);
+    const SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size) const;
+    
+    SparseInnerVectorSet<Derived,Dynamic> middleRows(Index start, Index size);
+    const SparseInnerVectorSet<Derived,Dynamic> middleRows(Index start, Index size) const;
+    SparseInnerVectorSet<Derived,Dynamic> middleCols(Index start, Index size);
+    const SparseInnerVectorSet<Derived,Dynamic> middleCols(Index start, Index size) const;
+    SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize);
+    const SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize) const;
+
+      /** \internal use operator= */
+      template<typename DenseDerived>
+      void evalTo(MatrixBase<DenseDerived>& dst) const
+      {
+        dst.setZero();
+        for (Index j=0; j<outerSize(); ++j)
+          for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+            dst.coeffRef(i.row(),i.col()) = i.value();
+      }
+
+      Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> toDense() const
+      {
+        return derived();
+      }
+
+    template<typename OtherDerived>
+    bool isApprox(const SparseMatrixBase<OtherDerived>& other,
+                  RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
+    { return toDense().isApprox(other.toDense(),prec); }
+
+    template<typename OtherDerived>
+    bool isApprox(const MatrixBase<OtherDerived>& other,
+                  RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
+    { return toDense().isApprox(other,prec); }
+
+    /** \returns the matrix or vector obtained by evaluating this expression.
+      *
+      * Notice that in the case of a plain matrix or vector (not an expression) this function just returns
+      * a const reference, in order to avoid a useless copy.
+      */
+    inline const typename internal::eval<Derived>::type eval() const
+    { return typename internal::eval<Derived>::type(derived()); }
+
+    Scalar sum() const;
+
+  protected:
+
+    bool m_isRValue;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEMATRIXBASE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparsePermutation.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparsePermutation.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/SparseCore/SparsePermutation.h
rename to resources/3rdParty/eigen/Eigen/src/SparseCore/SparsePermutation.h
diff --git a/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseProduct.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseProduct.h
new file mode 100644
index 000000000..6a555b834
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseProduct.h
@@ -0,0 +1,186 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEPRODUCT_H
+#define EIGEN_SPARSEPRODUCT_H
+
+namespace Eigen { 
+
+template<typename Lhs, typename Rhs>
+struct SparseSparseProductReturnType
+{
+  typedef typename internal::traits<Lhs>::Scalar Scalar;
+  enum {
+    LhsRowMajor = internal::traits<Lhs>::Flags & RowMajorBit,
+    RhsRowMajor = internal::traits<Rhs>::Flags & RowMajorBit,
+    TransposeRhs = (!LhsRowMajor) && RhsRowMajor,
+    TransposeLhs = LhsRowMajor && (!RhsRowMajor)
+  };
+
+  typedef typename internal::conditional<TransposeLhs,
+    SparseMatrix<Scalar,0>,
+    typename internal::nested<Lhs,Rhs::RowsAtCompileTime>::type>::type LhsNested;
+
+  typedef typename internal::conditional<TransposeRhs,
+    SparseMatrix<Scalar,0>,
+    typename internal::nested<Rhs,Lhs::RowsAtCompileTime>::type>::type RhsNested;
+
+  typedef SparseSparseProduct<LhsNested, RhsNested> Type;
+};
+
+namespace internal {
+template<typename LhsNested, typename RhsNested>
+struct traits<SparseSparseProduct<LhsNested, RhsNested> >
+{
+  typedef MatrixXpr XprKind;
+  // clean the nested types:
+  typedef typename remove_all<LhsNested>::type _LhsNested;
+  typedef typename remove_all<RhsNested>::type _RhsNested;
+  typedef typename _LhsNested::Scalar Scalar;
+  typedef typename promote_index_type<typename traits<_LhsNested>::Index,
+                                         typename traits<_RhsNested>::Index>::type Index;
+
+  enum {
+    LhsCoeffReadCost = _LhsNested::CoeffReadCost,
+    RhsCoeffReadCost = _RhsNested::CoeffReadCost,
+    LhsFlags = _LhsNested::Flags,
+    RhsFlags = _RhsNested::Flags,
+
+    RowsAtCompileTime    = _LhsNested::RowsAtCompileTime,
+    ColsAtCompileTime    = _RhsNested::ColsAtCompileTime,
+    MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime,
+    MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime,
+
+    InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime),
+
+    EvalToRowMajor = (RhsFlags & LhsFlags & RowMajorBit),
+
+    RemovedBits = ~(EvalToRowMajor ? 0 : RowMajorBit),
+
+    Flags = (int(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits)
+          | EvalBeforeAssigningBit
+          | EvalBeforeNestingBit,
+
+    CoeffReadCost = Dynamic
+  };
+
+  typedef Sparse StorageKind;
+};
+
+} // end namespace internal
+
+template<typename LhsNested, typename RhsNested>
+class SparseSparseProduct : internal::no_assignment_operator,
+  public SparseMatrixBase<SparseSparseProduct<LhsNested, RhsNested> >
+{
+  public:
+
+    typedef SparseMatrixBase<SparseSparseProduct> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(SparseSparseProduct)
+
+  private:
+
+    typedef typename internal::traits<SparseSparseProduct>::_LhsNested _LhsNested;
+    typedef typename internal::traits<SparseSparseProduct>::_RhsNested _RhsNested;
+
+  public:
+
+    template<typename Lhs, typename Rhs>
+    EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs)
+      : m_lhs(lhs), m_rhs(rhs), m_tolerance(0), m_conservative(true)
+    {
+      init();
+    }
+
+    template<typename Lhs, typename Rhs>
+    EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs, RealScalar tolerance)
+      : m_lhs(lhs), m_rhs(rhs), m_tolerance(tolerance), m_conservative(false)
+    {
+      init();
+    }
+
+    SparseSparseProduct pruned(Scalar reference = 0, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision()) const
+    {
+      return SparseSparseProduct(m_lhs,m_rhs,internal::abs(reference)*epsilon);
+    }
+
+    template<typename Dest>
+    void evalTo(Dest& result) const
+    {
+      if(m_conservative)
+        internal::conservative_sparse_sparse_product_selector<_LhsNested, _RhsNested, Dest>::run(lhs(),rhs(),result);
+      else
+        internal::sparse_sparse_product_with_pruning_selector<_LhsNested, _RhsNested, Dest>::run(lhs(),rhs(),result,m_tolerance);
+    }
+
+    EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
+    EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
+
+    EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+    EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+
+  protected:
+    void init()
+    {
+      eigen_assert(m_lhs.cols() == m_rhs.rows());
+
+      enum {
+        ProductIsValid = _LhsNested::ColsAtCompileTime==Dynamic
+                      || _RhsNested::RowsAtCompileTime==Dynamic
+                      || int(_LhsNested::ColsAtCompileTime)==int(_RhsNested::RowsAtCompileTime),
+        AreVectors = _LhsNested::IsVectorAtCompileTime && _RhsNested::IsVectorAtCompileTime,
+        SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(_LhsNested,_RhsNested)
+      };
+      // note to the lost user:
+      //    * for a dot product use: v1.dot(v2)
+      //    * for a coeff-wise product use: v1.cwise()*v2
+      EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
+        INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
+      EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
+        INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
+      EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
+    }
+
+    LhsNested m_lhs;
+    RhsNested m_rhs;
+    RealScalar m_tolerance;
+    bool m_conservative;
+};
+
+// sparse = sparse * sparse
+template<typename Derived>
+template<typename Lhs, typename Rhs>
+inline Derived& SparseMatrixBase<Derived>::operator=(const SparseSparseProduct<Lhs,Rhs>& product)
+{
+  product.evalTo(derived());
+  return derived();
+}
+
+/** \returns an expression of the product of two sparse matrices.
+  * By default a conservative product preserving the symbolic non zeros is performed.
+  * The automatic pruning of the small values can be achieved by calling the pruned() function
+  * in which case a totally different product algorithm is employed:
+  * \code
+  * C = (A*B).pruned();             // supress numerical zeros (exact)
+  * C = (A*B).pruned(ref);
+  * C = (A*B).pruned(ref,epsilon);
+  * \endcode
+  * where \c ref is a meaningful non zero reference value.
+  * */
+template<typename Derived>
+template<typename OtherDerived>
+inline const typename SparseSparseProductReturnType<Derived,OtherDerived>::Type
+SparseMatrixBase<Derived>::operator*(const SparseMatrixBase<OtherDerived> &other) const
+{
+  return typename SparseSparseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEPRODUCT_H
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseRedux.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseRedux.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/SparseCore/SparseRedux.h
rename to resources/3rdParty/eigen/Eigen/src/SparseCore/SparseRedux.h
diff --git a/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h
new file mode 100644
index 000000000..86ec0a6c5
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h
@@ -0,0 +1,480 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
+#define EIGEN_SPARSE_SELFADJOINTVIEW_H
+
+namespace Eigen { 
+
+/** \ingroup SparseCore_Module
+  * \class SparseSelfAdjointView
+  *
+  * \brief Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
+  *
+  * \param MatrixType the type of the dense matrix storing the coefficients
+  * \param UpLo can be either \c #Lower or \c #Upper
+  *
+  * This class is an expression of a sefladjoint matrix from a triangular part of a matrix
+  * with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView()
+  * and most of the time this is the only way that it is used.
+  *
+  * \sa SparseMatrixBase::selfadjointView()
+  */
+template<typename Lhs, typename Rhs, int UpLo>
+class SparseSelfAdjointTimeDenseProduct;
+
+template<typename Lhs, typename Rhs, int UpLo>
+class DenseTimeSparseSelfAdjointProduct;
+
+namespace internal {
+  
+template<typename MatrixType, unsigned int UpLo>
+struct traits<SparseSelfAdjointView<MatrixType,UpLo> > : traits<MatrixType> {
+};
+
+template<int SrcUpLo,int DstUpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
+
+template<int UpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
+
+}
+
+template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
+  : public EigenBase<SparseSelfAdjointView<MatrixType,UpLo> >
+{
+  public:
+
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::Index Index;
+    typedef Matrix<Index,Dynamic,1> VectorI;
+    typedef typename MatrixType::Nested MatrixTypeNested;
+    typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+
+    inline SparseSelfAdjointView(const MatrixType& matrix) : m_matrix(matrix)
+    {
+      eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
+    }
+
+    inline Index rows() const { return m_matrix.rows(); }
+    inline Index cols() const { return m_matrix.cols(); }
+
+    /** \internal \returns a reference to the nested matrix */
+    const _MatrixTypeNested& matrix() const { return m_matrix; }
+    _MatrixTypeNested& matrix() { return m_matrix.const_cast_derived(); }
+
+    /** Efficient sparse self-adjoint matrix times dense vector/matrix product */
+    template<typename OtherDerived>
+    SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>
+    operator*(const MatrixBase<OtherDerived>& rhs) const
+    {
+      return SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>(m_matrix, rhs.derived());
+    }
+
+    /** Efficient dense vector/matrix times sparse self-adjoint matrix product */
+    template<typename OtherDerived> friend
+    DenseTimeSparseSelfAdjointProduct<OtherDerived,MatrixType,UpLo>
+    operator*(const MatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
+    {
+      return DenseTimeSparseSelfAdjointProduct<OtherDerived,_MatrixTypeNested,UpLo>(lhs.derived(), rhs.m_matrix);
+    }
+
+    /** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
+      * \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix.
+      *
+      * \returns a reference to \c *this
+      *
+      * To perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply
+      * call this function with u.adjoint().
+      */
+    template<typename DerivedU>
+    SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, Scalar alpha = Scalar(1));
+    
+    /** \internal triggered by sparse_matrix = SparseSelfadjointView; */
+    template<typename DestScalar,int StorageOrder> void evalTo(SparseMatrix<DestScalar,StorageOrder,Index>& _dest) const
+    {
+      internal::permute_symm_to_fullsymm<UpLo>(m_matrix, _dest);
+    }
+    
+    template<typename DestScalar> void evalTo(DynamicSparseMatrix<DestScalar,ColMajor,Index>& _dest) const
+    {
+      // TODO directly evaluate into _dest;
+      SparseMatrix<DestScalar,ColMajor,Index> tmp(_dest.rows(),_dest.cols());
+      internal::permute_symm_to_fullsymm<UpLo>(m_matrix, tmp);
+      _dest = tmp;
+    }
+    
+    /** \returns an expression of P H P^-1 */
+    SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const
+    {
+      return SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo>(m_matrix, perm);
+    }
+    
+    template<typename SrcMatrixType,int SrcUpLo>
+    SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcUpLo>& permutedMatrix)
+    {
+      permutedMatrix.evalTo(*this);
+      return *this;
+    }
+
+
+    SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src)
+    {
+      PermutationMatrix<Dynamic> pnull;
+      return *this = src.twistedBy(pnull);
+    }
+
+    template<typename SrcMatrixType,unsigned int SrcUpLo>
+    SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcUpLo>& src)
+    {
+      PermutationMatrix<Dynamic> pnull;
+      return *this = src.twistedBy(pnull);
+    }
+    
+
+    // const SparseLLT<PlainObject, UpLo> llt() const;
+    // const SparseLDLT<PlainObject, UpLo> ldlt() const;
+
+  protected:
+
+    typename MatrixType::Nested m_matrix;
+    mutable VectorI m_countPerRow;
+    mutable VectorI m_countPerCol;
+};
+
+/***************************************************************************
+* Implementation of SparseMatrixBase methods
+***************************************************************************/
+
+template<typename Derived>
+template<unsigned int UpLo>
+const SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView() const
+{
+  return derived();
+}
+
+template<typename Derived>
+template<unsigned int UpLo>
+SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView()
+{
+  return derived();
+}
+
+/***************************************************************************
+* Implementation of SparseSelfAdjointView methods
+***************************************************************************/
+
+template<typename MatrixType, unsigned int UpLo>
+template<typename DerivedU>
+SparseSelfAdjointView<MatrixType,UpLo>&
+SparseSelfAdjointView<MatrixType,UpLo>::rankUpdate(const SparseMatrixBase<DerivedU>& u, Scalar alpha)
+{
+  SparseMatrix<Scalar,MatrixType::Flags&RowMajorBit?RowMajor:ColMajor> tmp = u * u.adjoint();
+  if(alpha==Scalar(0))
+    m_matrix.const_cast_derived() = tmp.template triangularView<UpLo>();
+  else
+    m_matrix.const_cast_derived() += alpha * tmp.template triangularView<UpLo>();
+
+  return *this;
+}
+
+/***************************************************************************
+* Implementation of sparse self-adjoint time dense matrix
+***************************************************************************/
+
+namespace internal {
+template<typename Lhs, typename Rhs, int UpLo>
+struct traits<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo> >
+ : traits<ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
+{
+  typedef Dense StorageKind;
+};
+}
+
+template<typename Lhs, typename Rhs, int UpLo>
+class SparseSelfAdjointTimeDenseProduct
+  : public ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
+{
+  public:
+    EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseSelfAdjointTimeDenseProduct)
+
+    SparseSelfAdjointTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+    {}
+
+    template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+    {
+      // TODO use alpha
+      eigen_assert(alpha==Scalar(1) && "alpha != 1 is not implemented yet, sorry");
+      typedef typename internal::remove_all<Lhs>::type _Lhs;
+      typedef typename internal::remove_all<Rhs>::type _Rhs;
+      typedef typename _Lhs::InnerIterator LhsInnerIterator;
+      enum {
+        LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit,
+        ProcessFirstHalf =
+                 ((UpLo&(Upper|Lower))==(Upper|Lower))
+              || ( (UpLo&Upper) && !LhsIsRowMajor)
+              || ( (UpLo&Lower) && LhsIsRowMajor),
+        ProcessSecondHalf = !ProcessFirstHalf
+      };
+      for (Index j=0; j<m_lhs.outerSize(); ++j)
+      {
+        LhsInnerIterator i(m_lhs,j);
+        if (ProcessSecondHalf)
+        {
+          while (i && i.index()<j) ++i;
+          if(i && i.index()==j)
+          {
+            dest.row(j) += i.value() * m_rhs.row(j);
+            ++i;
+          }
+        }
+        for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
+        {
+          Index a = LhsIsRowMajor ? j : i.index();
+          Index b = LhsIsRowMajor ? i.index() : j;
+          typename Lhs::Scalar v = i.value();
+          dest.row(a) += (v) * m_rhs.row(b);
+          dest.row(b) += internal::conj(v) * m_rhs.row(a);
+        }
+        if (ProcessFirstHalf && i && (i.index()==j))
+          dest.row(j) += i.value() * m_rhs.row(j);
+      }
+    }
+
+  private:
+    SparseSelfAdjointTimeDenseProduct& operator=(const SparseSelfAdjointTimeDenseProduct&);
+};
+
+namespace internal {
+template<typename Lhs, typename Rhs, int UpLo>
+struct traits<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo> >
+ : traits<ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
+{};
+}
+
+template<typename Lhs, typename Rhs, int UpLo>
+class DenseTimeSparseSelfAdjointProduct
+  : public ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
+{
+  public:
+    EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseSelfAdjointProduct)
+
+    DenseTimeSparseSelfAdjointProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+    {}
+
+    template<typename Dest> void scaleAndAddTo(Dest& /*dest*/, Scalar /*alpha*/) const
+    {
+      // TODO
+    }
+
+  private:
+    DenseTimeSparseSelfAdjointProduct& operator=(const DenseTimeSparseSelfAdjointProduct&);
+};
+
+/***************************************************************************
+* Implementation of symmetric copies and permutations
+***************************************************************************/
+namespace internal {
+  
+template<typename MatrixType, int UpLo>
+struct traits<SparseSymmetricPermutationProduct<MatrixType,UpLo> > : traits<MatrixType> {
+};
+
+template<int UpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
+{
+  typedef typename MatrixType::Index Index;
+  typedef typename MatrixType::Scalar Scalar;
+  typedef SparseMatrix<Scalar,DestOrder,Index> Dest;
+  typedef Matrix<Index,Dynamic,1> VectorI;
+  
+  Dest& dest(_dest.derived());
+  enum {
+    StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
+  };
+  
+  Index size = mat.rows();
+  VectorI count;
+  count.resize(size);
+  count.setZero();
+  dest.resize(size,size);
+  for(Index j = 0; j<size; ++j)
+  {
+    Index jp = perm ? perm[j] : j;
+    for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+    {
+      Index i = it.index();
+      Index r = it.row();
+      Index c = it.col();
+      Index ip = perm ? perm[i] : i;
+      if(UpLo==(Upper|Lower))
+        count[StorageOrderMatch ? jp : ip]++;
+      else if(r==c)
+        count[ip]++;
+      else if(( UpLo==Lower && r>c) || ( UpLo==Upper && r<c))
+      {
+        count[ip]++;
+        count[jp]++;
+      }
+    }
+  }
+  Index nnz = count.sum();
+  
+  // reserve space
+  dest.resizeNonZeros(nnz);
+  dest.outerIndexPtr()[0] = 0;
+  for(Index j=0; j<size; ++j)
+    dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
+  for(Index j=0; j<size; ++j)
+    count[j] = dest.outerIndexPtr()[j];
+  
+  // copy data
+  for(Index j = 0; j<size; ++j)
+  {
+    for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+    {
+      Index i = it.index();
+      Index r = it.row();
+      Index c = it.col();
+      
+      Index jp = perm ? perm[j] : j;
+      Index ip = perm ? perm[i] : i;
+      
+      if(UpLo==(Upper|Lower))
+      {
+        Index k = count[StorageOrderMatch ? jp : ip]++;
+        dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
+        dest.valuePtr()[k] = it.value();
+      }
+      else if(r==c)
+      {
+        Index k = count[ip]++;
+        dest.innerIndexPtr()[k] = ip;
+        dest.valuePtr()[k] = it.value();
+      }
+      else if(( (UpLo&Lower)==Lower && r>c) || ( (UpLo&Upper)==Upper && r<c))
+      {
+        if(!StorageOrderMatch)
+          std::swap(ip,jp);
+        Index k = count[jp]++;
+        dest.innerIndexPtr()[k] = ip;
+        dest.valuePtr()[k] = it.value();
+        k = count[ip]++;
+        dest.innerIndexPtr()[k] = jp;
+        dest.valuePtr()[k] = internal::conj(it.value());
+      }
+    }
+  }
+}
+
+template<int _SrcUpLo,int _DstUpLo,typename MatrixType,int DstOrder>
+void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
+{
+  typedef typename MatrixType::Index Index;
+  typedef typename MatrixType::Scalar Scalar;
+  SparseMatrix<Scalar,DstOrder,Index>& dest(_dest.derived());
+  typedef Matrix<Index,Dynamic,1> VectorI;
+  enum {
+    SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
+    StorageOrderMatch = int(SrcOrder) == int(DstOrder),
+    DstUpLo = DstOrder==RowMajor ? (_DstUpLo==Upper ? Lower : Upper) : _DstUpLo,
+    SrcUpLo = SrcOrder==RowMajor ? (_SrcUpLo==Upper ? Lower : Upper) : _SrcUpLo
+  };
+  
+  Index size = mat.rows();
+  VectorI count(size);
+  count.setZero();
+  dest.resize(size,size);
+  for(Index j = 0; j<size; ++j)
+  {
+    Index jp = perm ? perm[j] : j;
+    for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+    {
+      Index i = it.index();
+      if((int(SrcUpLo)==int(Lower) && i<j) || (int(SrcUpLo)==int(Upper) && i>j))
+        continue;
+                  
+      Index ip = perm ? perm[i] : i;
+      count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
+    }
+  }
+  dest.outerIndexPtr()[0] = 0;
+  for(Index j=0; j<size; ++j)
+    dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
+  dest.resizeNonZeros(dest.outerIndexPtr()[size]);
+  for(Index j=0; j<size; ++j)
+    count[j] = dest.outerIndexPtr()[j];
+  
+  for(Index j = 0; j<size; ++j)
+  {
+    
+    for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+    {
+      Index i = it.index();
+      if((int(SrcUpLo)==int(Lower) && i<j) || (int(SrcUpLo)==int(Upper) && i>j))
+        continue;
+                  
+      Index jp = perm ? perm[j] : j;
+      Index ip = perm? perm[i] : i;
+      
+      Index k = count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
+      dest.innerIndexPtr()[k] = int(DstUpLo)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
+      
+      if(!StorageOrderMatch) std::swap(ip,jp);
+      if( ((int(DstUpLo)==int(Lower) && ip<jp) || (int(DstUpLo)==int(Upper) && ip>jp)))
+        dest.valuePtr()[k] = conj(it.value());
+      else
+        dest.valuePtr()[k] = it.value();
+    }
+  }
+}
+
+}
+
+template<typename MatrixType,int UpLo>
+class SparseSymmetricPermutationProduct
+  : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,UpLo> >
+{
+  public:
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::Index Index;
+  protected:
+    typedef PermutationMatrix<Dynamic,Dynamic,Index> Perm;
+  public:
+    typedef Matrix<Index,Dynamic,1> VectorI;
+    typedef typename MatrixType::Nested MatrixTypeNested;
+    typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+    
+    SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
+      : m_matrix(mat), m_perm(perm)
+    {}
+    
+    inline Index rows() const { return m_matrix.rows(); }
+    inline Index cols() const { return m_matrix.cols(); }
+    
+    template<typename DestScalar, int Options, typename DstIndex>
+    void evalTo(SparseMatrix<DestScalar,Options,DstIndex>& _dest) const
+    {
+      internal::permute_symm_to_fullsymm<UpLo>(m_matrix,_dest,m_perm.indices().data());
+    }
+    
+    template<typename DestType,unsigned int DestUpLo> void evalTo(SparseSelfAdjointView<DestType,DestUpLo>& dest) const
+    {
+      internal::permute_symm_to_symm<UpLo,DestUpLo>(m_matrix,dest.matrix(),m_perm.indices().data());
+    }
+    
+  protected:
+    MatrixTypeNested m_matrix;
+    const Perm& m_perm;
+
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
diff --git a/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
new file mode 100644
index 000000000..2438ac573
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
@@ -0,0 +1,149 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
+#define EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
+
+namespace Eigen { 
+
+namespace internal {
+
+
+// perform a pseudo in-place sparse * sparse product assuming all matrices are col major
+template<typename Lhs, typename Rhs, typename ResultType>
+static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, typename ResultType::RealScalar tolerance)
+{
+  // return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res);
+
+  typedef typename remove_all<Lhs>::type::Scalar Scalar;
+  typedef typename remove_all<Lhs>::type::Index Index;
+
+  // make sure to call innerSize/outerSize since we fake the storage order.
+  Index rows = lhs.innerSize();
+  Index cols = rhs.outerSize();
+  //int size = lhs.outerSize();
+  eigen_assert(lhs.outerSize() == rhs.innerSize());
+
+  // allocate a temporary buffer
+  AmbiVector<Scalar,Index> tempVector(rows);
+
+  // estimate the number of non zero entries
+  // given a rhs column containing Y non zeros, we assume that the respective Y columns
+  // of the lhs differs in average of one non zeros, thus the number of non zeros for
+  // the product of a rhs column with the lhs is X+Y where X is the average number of non zero
+  // per column of the lhs.
+  // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
+  Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros();
+
+  // mimics a resizeByInnerOuter:
+  if(ResultType::IsRowMajor)
+    res.resize(cols, rows);
+  else
+    res.resize(rows, cols);
+
+  res.reserve(estimated_nnz_prod);
+  double ratioColRes = double(estimated_nnz_prod)/double(lhs.rows()*rhs.cols());
+  for (Index j=0; j<cols; ++j)
+  {
+    // FIXME:
+    //double ratioColRes = (double(rhs.innerVector(j).nonZeros()) + double(lhs.nonZeros())/double(lhs.cols()))/double(lhs.rows());
+    // let's do a more accurate determination of the nnz ratio for the current column j of res
+    tempVector.init(ratioColRes);
+    tempVector.setZero();
+    for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
+    {
+      // FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index())
+      tempVector.restart();
+      Scalar x = rhsIt.value();
+      for (typename Lhs::InnerIterator lhsIt(lhs, rhsIt.index()); lhsIt; ++lhsIt)
+      {
+        tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x;
+      }
+    }
+    res.startVec(j);
+    for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector,tolerance); it; ++it)
+      res.insertBackByOuterInner(j,it.index()) = it.value();
+  }
+  res.finalize();
+}
+
+template<typename Lhs, typename Rhs, typename ResultType,
+  int LhsStorageOrder = traits<Lhs>::Flags&RowMajorBit,
+  int RhsStorageOrder = traits<Rhs>::Flags&RowMajorBit,
+  int ResStorageOrder = traits<ResultType>::Flags&RowMajorBit>
+struct sparse_sparse_product_with_pruning_selector;
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
+{
+  typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
+  typedef typename ResultType::RealScalar RealScalar;
+
+  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance)
+  {
+    typename remove_all<ResultType>::type _res(res.rows(), res.cols());
+    internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,ResultType>(lhs, rhs, _res, tolerance);
+    res.swap(_res);
+  }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
+{
+  typedef typename ResultType::RealScalar RealScalar;
+  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance)
+  {
+    // we need a col-major matrix to hold the result
+    typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
+    SparseTemporaryType _res(res.rows(), res.cols());
+    internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,SparseTemporaryType>(lhs, rhs, _res, tolerance);
+    res = _res;
+  }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
+{
+  typedef typename ResultType::RealScalar RealScalar;
+  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance)
+  {
+    // let's transpose the product to get a column x column product
+    typename remove_all<ResultType>::type _res(res.rows(), res.cols());
+    internal::sparse_sparse_product_with_pruning_impl<Rhs,Lhs,ResultType>(rhs, lhs, _res, tolerance);
+    res.swap(_res);
+  }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
+{
+  typedef typename ResultType::RealScalar RealScalar;
+  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance)
+  {
+    typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+    ColMajorMatrix colLhs(lhs);
+    ColMajorMatrix colRhs(rhs);
+    internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrix,ColMajorMatrix,ResultType>(colLhs, colRhs, res, tolerance);
+
+    // let's transpose the product to get a column x column product
+//     typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
+//     SparseTemporaryType _res(res.cols(), res.rows());
+//     sparse_sparse_product_with_pruning_impl<Rhs,Lhs,SparseTemporaryType>(rhs, lhs, _res);
+//     res = _res.transpose();
+  }
+};
+
+// NOTE the 2 others cases (col row *) must never occur since they are caught
+// by ProductReturnType which transforms it to (col col *) by evaluating rhs.
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
diff --git a/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseTranspose.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseTranspose.h
new file mode 100644
index 000000000..273f9de68
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseTranspose.h
@@ -0,0 +1,61 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSETRANSPOSE_H
+#define EIGEN_SPARSETRANSPOSE_H
+
+namespace Eigen { 
+
+template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
+  : public SparseMatrixBase<Transpose<MatrixType> >
+{
+    typedef typename internal::remove_all<typename MatrixType::Nested>::type _MatrixTypeNested;
+  public:
+
+    EIGEN_SPARSE_PUBLIC_INTERFACE(Transpose<MatrixType>)
+
+    class InnerIterator;
+    class ReverseInnerIterator;
+
+    inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); }
+};
+
+// NOTE: VC10 trigger an ICE if don't put typename TransposeImpl<MatrixType,Sparse>:: in front of Index,
+// a typedef typename TransposeImpl<MatrixType,Sparse>::Index Index;
+// does not fix the issue.
+// An alternative is to define the nested class in the parent class itself.
+template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::InnerIterator
+  : public _MatrixTypeNested::InnerIterator
+{
+    typedef typename _MatrixTypeNested::InnerIterator Base;
+  public:
+
+    EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, typename TransposeImpl<MatrixType,Sparse>::Index outer)
+      : Base(trans.derived().nestedExpression(), outer)
+    {}
+    inline typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); }
+    inline typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); }
+};
+
+template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::ReverseInnerIterator
+  : public _MatrixTypeNested::ReverseInnerIterator
+{
+    typedef typename _MatrixTypeNested::ReverseInnerIterator Base;
+  public:
+
+    EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, typename TransposeImpl<MatrixType,Sparse>::Index outer)
+      : Base(xpr.derived().nestedExpression(), outer)
+    {}
+    inline typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); }
+    inline typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSETRANSPOSE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseTriangularView.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseTriangularView.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/SparseCore/SparseTriangularView.h
rename to resources/3rdParty/eigen/Eigen/src/SparseCore/SparseTriangularView.h
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseUtil.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseUtil.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/SparseCore/SparseUtil.h
rename to resources/3rdParty/eigen/Eigen/src/SparseCore/SparseUtil.h
diff --git a/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseVector.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseVector.h
new file mode 100644
index 000000000..c952f6540
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseVector.h
@@ -0,0 +1,398 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEVECTOR_H
+#define EIGEN_SPARSEVECTOR_H
+
+namespace Eigen { 
+
+/** \ingroup SparseCore_Module
+  * \class SparseVector
+  *
+  * \brief a sparse vector class
+  *
+  * \tparam _Scalar the scalar type, i.e. the type of the coefficients
+  *
+  * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
+  *
+  * This class can be extended with the help of the plugin mechanism described on the page
+  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEVECTOR_PLUGIN.
+  */
+
+namespace internal {
+template<typename _Scalar, int _Options, typename _Index>
+struct traits<SparseVector<_Scalar, _Options, _Index> >
+{
+  typedef _Scalar Scalar;
+  typedef _Index Index;
+  typedef Sparse StorageKind;
+  typedef MatrixXpr XprKind;
+  enum {
+    IsColVector = (_Options & RowMajorBit) ? 0 : 1,
+
+    RowsAtCompileTime = IsColVector ? Dynamic : 1,
+    ColsAtCompileTime = IsColVector ? 1 : Dynamic,
+    MaxRowsAtCompileTime = RowsAtCompileTime,
+    MaxColsAtCompileTime = ColsAtCompileTime,
+    Flags = _Options | NestByRefBit | LvalueBit | (IsColVector ? 0 : RowMajorBit),
+    CoeffReadCost = NumTraits<Scalar>::ReadCost,
+    SupportedAccessPatterns = InnerRandomAccessPattern
+  };
+};
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+class SparseVector
+  : public SparseMatrixBase<SparseVector<_Scalar, _Options, _Index> >
+{
+  public:
+    EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector)
+    EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
+    EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
+
+  protected:
+  public:
+
+    typedef SparseMatrixBase<SparseVector> SparseBase;
+    enum { IsColVector = internal::traits<SparseVector>::IsColVector };
+    
+    enum {
+      Options = _Options
+    };
+
+    internal::CompressedStorage<Scalar,Index> m_data;
+    Index m_size;
+
+    internal::CompressedStorage<Scalar,Index>& _data() { return m_data; }
+    internal::CompressedStorage<Scalar,Index>& _data() const { return m_data; }
+
+  public:
+
+    EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; }
+    EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; }
+    EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }
+    EIGEN_STRONG_INLINE Index outerSize() const { return 1; }
+
+    EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return &m_data.value(0); }
+    EIGEN_STRONG_INLINE Scalar* valuePtr() { return &m_data.value(0); }
+
+    EIGEN_STRONG_INLINE const Index* innerIndexPtr() const { return &m_data.index(0); }
+    EIGEN_STRONG_INLINE Index* innerIndexPtr() { return &m_data.index(0); }
+
+    inline Scalar coeff(Index row, Index col) const
+    {
+      eigen_assert((IsColVector ? col : row)==0);
+      return coeff(IsColVector ? row : col);
+    }
+    inline Scalar coeff(Index i) const { return m_data.at(i); }
+
+    inline Scalar& coeffRef(Index row, Index col)
+    {
+      eigen_assert((IsColVector ? col : row)==0);
+      return coeff(IsColVector ? row : col);
+    }
+
+    /** \returns a reference to the coefficient value at given index \a i
+      * This operation involes a log(rho*size) binary search. If the coefficient does not
+      * exist yet, then a sorted insertion into a sequential buffer is performed.
+      *
+      * This insertion might be very costly if the number of nonzeros above \a i is large.
+      */
+    inline Scalar& coeffRef(Index i)
+    {
+      return m_data.atWithInsertion(i);
+    }
+
+  public:
+
+    class InnerIterator;
+    class ReverseInnerIterator;
+
+    inline void setZero() { m_data.clear(); }
+
+    /** \returns the number of non zero coefficients */
+    inline Index nonZeros() const  { return static_cast<Index>(m_data.size()); }
+
+    inline void startVec(Index outer)
+    {
+      EIGEN_UNUSED_VARIABLE(outer);
+      eigen_assert(outer==0);
+    }
+
+    inline Scalar& insertBackByOuterInner(Index outer, Index inner)
+    {
+      EIGEN_UNUSED_VARIABLE(outer);
+      eigen_assert(outer==0);
+      return insertBack(inner);
+    }
+    inline Scalar& insertBack(Index i)
+    {
+      m_data.append(0, i);
+      return m_data.value(m_data.size()-1);
+    }
+
+    inline Scalar& insert(Index row, Index col)
+    {
+      Index inner = IsColVector ? row : col;
+      Index outer = IsColVector ? col : row;
+      eigen_assert(outer==0);
+      return insert(inner);
+    }
+    Scalar& insert(Index i)
+    {
+      Index startId = 0;
+      Index p = Index(m_data.size()) - 1;
+      // TODO smart realloc
+      m_data.resize(p+2,1);
+
+      while ( (p >= startId) && (m_data.index(p) > i) )
+      {
+        m_data.index(p+1) = m_data.index(p);
+        m_data.value(p+1) = m_data.value(p);
+        --p;
+      }
+      m_data.index(p+1) = i;
+      m_data.value(p+1) = 0;
+      return m_data.value(p+1);
+    }
+
+    /**
+      */
+    inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); }
+
+
+    inline void finalize() {}
+
+    void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+    {
+      m_data.prune(reference,epsilon);
+    }
+
+    void resize(Index rows, Index cols)
+    {
+      eigen_assert(rows==1 || cols==1);
+      resize(IsColVector ? rows : cols);
+    }
+
+    void resize(Index newSize)
+    {
+      m_size = newSize;
+      m_data.clear();
+    }
+
+    void resizeNonZeros(Index size) { m_data.resize(size); }
+
+    inline SparseVector() : m_size(0) { resize(0); }
+
+    inline SparseVector(Index size) : m_size(0) { resize(size); }
+
+    inline SparseVector(Index rows, Index cols) : m_size(0) { resize(rows,cols); }
+
+    template<typename OtherDerived>
+    inline SparseVector(const SparseMatrixBase<OtherDerived>& other)
+      : m_size(0)
+    {
+      *this = other.derived();
+    }
+
+    inline SparseVector(const SparseVector& other)
+      : m_size(0)
+    {
+      *this = other.derived();
+    }
+
+    inline void swap(SparseVector& other)
+    {
+      std::swap(m_size, other.m_size);
+      m_data.swap(other.m_data);
+    }
+
+    inline SparseVector& operator=(const SparseVector& other)
+    {
+      if (other.isRValue())
+      {
+        swap(other.const_cast_derived());
+      }
+      else
+      {
+        resize(other.size());
+        m_data = other.m_data;
+      }
+      return *this;
+    }
+
+    template<typename OtherDerived>
+    inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other)
+    {
+      if (int(RowsAtCompileTime)!=int(OtherDerived::RowsAtCompileTime))
+        return assign(other.transpose());
+      else
+        return assign(other);
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    template<typename Lhs, typename Rhs>
+    inline SparseVector& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
+    {
+      return Base::operator=(product);
+    }
+    #endif
+
+    friend std::ostream & operator << (std::ostream & s, const SparseVector& m)
+    {
+      for (Index i=0; i<m.nonZeros(); ++i)
+        s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
+      s << std::endl;
+      return s;
+    }
+
+    /** Destructor */
+    inline ~SparseVector() {}
+
+    /** Overloaded for performance */
+    Scalar sum() const;
+
+  public:
+
+    /** \deprecated use setZero() and reserve() */
+    EIGEN_DEPRECATED void startFill(Index reserve)
+    {
+      setZero();
+      m_data.reserve(reserve);
+    }
+
+    /** \deprecated use insertBack(Index,Index) */
+    EIGEN_DEPRECATED Scalar& fill(Index r, Index c)
+    {
+      eigen_assert(r==0 || c==0);
+      return fill(IsColVector ? r : c);
+    }
+
+    /** \deprecated use insertBack(Index) */
+    EIGEN_DEPRECATED Scalar& fill(Index i)
+    {
+      m_data.append(0, i);
+      return m_data.value(m_data.size()-1);
+    }
+
+    /** \deprecated use insert(Index,Index) */
+    EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c)
+    {
+      eigen_assert(r==0 || c==0);
+      return fillrand(IsColVector ? r : c);
+    }
+
+    /** \deprecated use insert(Index) */
+    EIGEN_DEPRECATED Scalar& fillrand(Index i)
+    {
+      return insert(i);
+    }
+
+    /** \deprecated use finalize() */
+    EIGEN_DEPRECATED void endFill() {}
+    
+#   ifdef EIGEN_SPARSEVECTOR_PLUGIN
+#     include EIGEN_SPARSEVECTOR_PLUGIN
+#   endif
+
+protected:
+    template<typename OtherDerived>
+    EIGEN_DONT_INLINE SparseVector& assign(const SparseMatrixBase<OtherDerived>& _other)
+    {
+      const OtherDerived& other(_other.derived());
+      const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+      if(needToTranspose)
+      {
+        Index size = other.size();
+        Index nnz = other.nonZeros();
+        resize(size);
+        reserve(nnz);
+        for(Index i=0; i<size; ++i)
+        {
+          typename OtherDerived::InnerIterator it(other, i);
+          if(it)
+              insert(i) = it.value();
+        }
+        return *this;
+      }
+      else
+      {
+        // there is no special optimization
+        return Base::operator=(other);
+      }
+    }
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class SparseVector<Scalar,_Options,_Index>::InnerIterator
+{
+  public:
+    InnerIterator(const SparseVector& vec, Index outer=0)
+      : m_data(vec.m_data), m_id(0), m_end(static_cast<Index>(m_data.size()))
+    {
+      EIGEN_UNUSED_VARIABLE(outer);
+      eigen_assert(outer==0);
+    }
+
+    InnerIterator(const internal::CompressedStorage<Scalar,Index>& data)
+      : m_data(data), m_id(0), m_end(static_cast<Index>(m_data.size()))
+    {}
+
+    inline InnerIterator& operator++() { m_id++; return *this; }
+
+    inline Scalar value() const { return m_data.value(m_id); }
+    inline Scalar& valueRef() { return const_cast<Scalar&>(m_data.value(m_id)); }
+
+    inline Index index() const { return m_data.index(m_id); }
+    inline Index row() const { return IsColVector ? index() : 0; }
+    inline Index col() const { return IsColVector ? 0 : index(); }
+
+    inline operator bool() const { return (m_id < m_end); }
+
+  protected:
+    const internal::CompressedStorage<Scalar,Index>& m_data;
+    Index m_id;
+    const Index m_end;
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class SparseVector<Scalar,_Options,_Index>::ReverseInnerIterator
+{
+  public:
+    ReverseInnerIterator(const SparseVector& vec, Index outer=0)
+      : m_data(vec.m_data), m_id(static_cast<Index>(m_data.size())), m_start(0)
+    {
+      EIGEN_UNUSED_VARIABLE(outer);
+      eigen_assert(outer==0);
+    }
+
+    ReverseInnerIterator(const internal::CompressedStorage<Scalar,Index>& data)
+      : m_data(data), m_id(static_cast<Index>(m_data.size())), m_start(0)
+    {}
+
+    inline ReverseInnerIterator& operator--() { m_id--; return *this; }
+
+    inline Scalar value() const { return m_data.value(m_id-1); }
+    inline Scalar& valueRef() { return const_cast<Scalar&>(m_data.value(m_id-1)); }
+
+    inline Index index() const { return m_data.index(m_id-1); }
+    inline Index row() const { return IsColVector ? index() : 0; }
+    inline Index col() const { return IsColVector ? 0 : index(); }
+
+    inline operator bool() const { return (m_id > m_start); }
+
+  protected:
+    const internal::CompressedStorage<Scalar,Index>& m_data;
+    Index m_id;
+    const Index m_start;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEVECTOR_H
diff --git a/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseView.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseView.h
new file mode 100644
index 000000000..8b0b9ea03
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/SparseCore/SparseView.h
@@ -0,0 +1,98 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Daniel Lowengrub <lowdanie@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEVIEW_H
+#define EIGEN_SPARSEVIEW_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename MatrixType>
+struct traits<SparseView<MatrixType> > : traits<MatrixType>
+{
+  typedef int Index;
+  typedef Sparse StorageKind;
+  enum {
+    Flags = int(traits<MatrixType>::Flags) & (RowMajorBit)
+  };
+};
+
+} // end namespace internal
+
+template<typename MatrixType>
+class SparseView : public SparseMatrixBase<SparseView<MatrixType> >
+{
+  typedef typename MatrixType::Nested MatrixTypeNested;
+  typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+public:
+  EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView)
+
+  SparseView(const MatrixType& mat, const Scalar& m_reference = Scalar(0),
+             typename NumTraits<Scalar>::Real m_epsilon = NumTraits<Scalar>::dummy_precision()) : 
+    m_matrix(mat), m_reference(m_reference), m_epsilon(m_epsilon) {}
+
+  class InnerIterator;
+
+  inline Index rows() const { return m_matrix.rows(); }
+  inline Index cols() const { return m_matrix.cols(); }
+
+  inline Index innerSize() const { return m_matrix.innerSize(); }
+  inline Index outerSize() const { return m_matrix.outerSize(); }
+
+protected:
+  MatrixTypeNested m_matrix;
+  Scalar m_reference;
+  typename NumTraits<Scalar>::Real m_epsilon;
+};
+
+template<typename MatrixType>
+class SparseView<MatrixType>::InnerIterator : public _MatrixTypeNested::InnerIterator
+{
+public:
+  typedef typename _MatrixTypeNested::InnerIterator IterBase;
+  InnerIterator(const SparseView& view, Index outer) :
+  IterBase(view.m_matrix, outer), m_view(view)
+  {
+    incrementToNonZero();
+  }
+
+  EIGEN_STRONG_INLINE InnerIterator& operator++()
+  {
+    IterBase::operator++();
+    incrementToNonZero();
+    return *this;
+  }
+
+  using IterBase::value;
+
+protected:
+  const SparseView& m_view;
+
+private:
+  void incrementToNonZero()
+  {
+    while((bool(*this)) && internal::isMuchSmallerThan(value(), m_view.m_reference, m_view.m_epsilon))
+    {
+      IterBase::operator++();
+    }
+  }
+};
+
+template<typename Derived>
+const SparseView<Derived> MatrixBase<Derived>::sparseView(const Scalar& m_reference,
+                                                          typename NumTraits<Scalar>::Real m_epsilon) const
+{
+  return SparseView<Derived>(derived(), m_reference, m_epsilon);
+}
+
+} // end namespace Eigen
+
+#endif
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/TriangularSolver.h b/resources/3rdParty/eigen/Eigen/src/SparseCore/TriangularSolver.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/SparseCore/TriangularSolver.h
rename to resources/3rdParty/eigen/Eigen/src/SparseCore/TriangularSolver.h
diff --git a/resources/3rdparty/eigen/Eigen/src/StlSupport/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/StlSupport/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/StlSupport/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/StlSupport/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/Eigen/src/StlSupport/StdDeque.h b/resources/3rdParty/eigen/Eigen/src/StlSupport/StdDeque.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/StlSupport/StdDeque.h
rename to resources/3rdParty/eigen/Eigen/src/StlSupport/StdDeque.h
diff --git a/resources/3rdparty/eigen/Eigen/src/StlSupport/StdList.h b/resources/3rdParty/eigen/Eigen/src/StlSupport/StdList.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/StlSupport/StdList.h
rename to resources/3rdParty/eigen/Eigen/src/StlSupport/StdList.h
diff --git a/resources/3rdparty/eigen/Eigen/src/StlSupport/StdVector.h b/resources/3rdParty/eigen/Eigen/src/StlSupport/StdVector.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/StlSupport/StdVector.h
rename to resources/3rdParty/eigen/Eigen/src/StlSupport/StdVector.h
diff --git a/resources/3rdparty/eigen/Eigen/src/StlSupport/details.h b/resources/3rdParty/eigen/Eigen/src/StlSupport/details.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/StlSupport/details.h
rename to resources/3rdParty/eigen/Eigen/src/StlSupport/details.h
diff --git a/resources/3rdparty/eigen/Eigen/src/SuperLUSupport/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/SuperLUSupport/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/SuperLUSupport/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/SuperLUSupport/CMakeLists.txt
diff --git a/resources/3rdParty/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h b/resources/3rdParty/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h
new file mode 100644
index 000000000..d8a54e18c
--- /dev/null
+++ b/resources/3rdParty/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h
@@ -0,0 +1,1025 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SUPERLUSUPPORT_H
+#define EIGEN_SUPERLUSUPPORT_H
+
+namespace Eigen { 
+
+#define DECL_GSSVX(PREFIX,FLOATTYPE,KEYTYPE)		\
+    extern "C" {                                                                                          \
+      typedef struct { FLOATTYPE for_lu; FLOATTYPE total_needed; int expansions; } PREFIX##mem_usage_t;   \
+      extern void PREFIX##gssvx(superlu_options_t *, SuperMatrix *, int *, int *, int *,                  \
+                                char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *,           \
+                                void *, int, SuperMatrix *, SuperMatrix *,                                \
+                                FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, FLOATTYPE *,                       \
+                                PREFIX##mem_usage_t *, SuperLUStat_t *, int *);                           \
+    }                                                                                                     \
+    inline float SuperLU_gssvx(superlu_options_t *options, SuperMatrix *A,                                \
+         int *perm_c, int *perm_r, int *etree, char *equed,                                               \
+         FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L,                                                      \
+         SuperMatrix *U, void *work, int lwork,                                                           \
+         SuperMatrix *B, SuperMatrix *X,                                                                  \
+         FLOATTYPE *recip_pivot_growth,                                                                   \
+         FLOATTYPE *rcond, FLOATTYPE *ferr, FLOATTYPE *berr,                                              \
+         SuperLUStat_t *stats, int *info, KEYTYPE) {                                                      \
+    PREFIX##mem_usage_t mem_usage;                                                                        \
+    PREFIX##gssvx(options, A, perm_c, perm_r, etree, equed, R, C, L,                                      \
+         U, work, lwork, B, X, recip_pivot_growth, rcond,                                                 \
+         ferr, berr, &mem_usage, stats, info);                                                            \
+    return mem_usage.for_lu; /* bytes used by the factor storage */                                       \
+  }
+
+DECL_GSSVX(s,float,float)
+DECL_GSSVX(c,float,std::complex<float>)
+DECL_GSSVX(d,double,double)
+DECL_GSSVX(z,double,std::complex<double>)
+
+#ifdef MILU_ALPHA
+#define EIGEN_SUPERLU_HAS_ILU
+#endif
+
+#ifdef EIGEN_SUPERLU_HAS_ILU
+
+// similarly for the incomplete factorization using gsisx
+#define DECL_GSISX(PREFIX,FLOATTYPE,KEYTYPE)                                                    \
+    extern "C" {                                                                                \
+      extern void PREFIX##gsisx(superlu_options_t *, SuperMatrix *, int *, int *, int *,        \
+                         char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *,        \
+                         void *, int, SuperMatrix *, SuperMatrix *, FLOATTYPE *, FLOATTYPE *,   \
+                         PREFIX##mem_usage_t *, SuperLUStat_t *, int *);                        \
+    }                                                                                           \
+    inline float SuperLU_gsisx(superlu_options_t *options, SuperMatrix *A,                      \
+         int *perm_c, int *perm_r, int *etree, char *equed,                                     \
+         FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L,                                            \
+         SuperMatrix *U, void *work, int lwork,                                                 \
+         SuperMatrix *B, SuperMatrix *X,                                                        \
+         FLOATTYPE *recip_pivot_growth,                                                         \
+         FLOATTYPE *rcond,                                                                      \
+         SuperLUStat_t *stats, int *info, KEYTYPE) {                                            \
+    PREFIX##mem_usage_t mem_usage;                                                              \
+    PREFIX##gsisx(options, A, perm_c, perm_r, etree, equed, R, C, L,                            \
+         U, work, lwork, B, X, recip_pivot_growth, rcond,                                       \
+         &mem_usage, stats, info);                                                              \
+    return mem_usage.for_lu; /* bytes used by the factor storage */                             \
+  }
+
+DECL_GSISX(s,float,float)
+DECL_GSISX(c,float,std::complex<float>)
+DECL_GSISX(d,double,double)
+DECL_GSISX(z,double,std::complex<double>)
+
+#endif
+
+template<typename MatrixType>
+struct SluMatrixMapHelper;
+
+/** \internal
+  *
+  * A wrapper class for SuperLU matrices. It supports only compressed sparse matrices
+  * and dense matrices. Supernodal and other fancy format are not supported by this wrapper.
+  *
+  * This wrapper class mainly aims to avoids the need of dynamic allocation of the storage structure.
+  */
+struct SluMatrix : SuperMatrix
+{
+  SluMatrix()
+  {
+    Store = &storage;
+  }
+
+  SluMatrix(const SluMatrix& other)
+    : SuperMatrix(other)
+  {
+    Store = &storage;
+    storage = other.storage;
+  }
+
+  SluMatrix& operator=(const SluMatrix& other)
+  {
+    SuperMatrix::operator=(static_cast<const SuperMatrix&>(other));
+    Store = &storage;
+    storage = other.storage;
+    return *this;
+  }
+
+  struct
+  {
+    union {int nnz;int lda;};
+    void *values;
+    int *innerInd;
+    int *outerInd;
+  } storage;
+
+  void setStorageType(Stype_t t)
+  {
+    Stype = t;
+    if (t==SLU_NC || t==SLU_NR || t==SLU_DN)
+      Store = &storage;
+    else
+    {
+      eigen_assert(false && "storage type not supported");
+      Store = 0;
+    }
+  }
+
+  template<typename Scalar>
+  void setScalarType()
+  {
+    if (internal::is_same<Scalar,float>::value)
+      Dtype = SLU_S;
+    else if (internal::is_same<Scalar,double>::value)
+      Dtype = SLU_D;
+    else if (internal::is_same<Scalar,std::complex<float> >::value)
+      Dtype = SLU_C;
+    else if (internal::is_same<Scalar,std::complex<double> >::value)
+      Dtype = SLU_Z;
+    else
+    {
+      eigen_assert(false && "Scalar type not supported by SuperLU");
+    }
+  }
+
+  template<typename MatrixType>
+  static SluMatrix Map(MatrixBase<MatrixType>& _mat)
+  {
+    MatrixType& mat(_mat.derived());
+    eigen_assert( ((MatrixType::Flags&RowMajorBit)!=RowMajorBit) && "row-major dense matrices are not supported by SuperLU");
+    SluMatrix res;
+    res.setStorageType(SLU_DN);
+    res.setScalarType<typename MatrixType::Scalar>();
+    res.Mtype     = SLU_GE;
+
+    res.nrow      = mat.rows();
+    res.ncol      = mat.cols();
+
+    res.storage.lda       = MatrixType::IsVectorAtCompileTime ? mat.size() : mat.outerStride();
+    res.storage.values    = mat.data();
+    return res;
+  }
+
+  template<typename MatrixType>
+  static SluMatrix Map(SparseMatrixBase<MatrixType>& mat)
+  {
+    SluMatrix res;
+    if ((MatrixType::Flags&RowMajorBit)==RowMajorBit)
+    {
+      res.setStorageType(SLU_NR);
+      res.nrow      = mat.cols();
+      res.ncol      = mat.rows();
+    }
+    else
+    {
+      res.setStorageType(SLU_NC);
+      res.nrow      = mat.rows();
+      res.ncol      = mat.cols();
+    }
+
+    res.Mtype       = SLU_GE;
+
+    res.storage.nnz       = mat.nonZeros();
+    res.storage.values    = mat.derived().valuePtr();
+    res.storage.innerInd  = mat.derived().innerIndexPtr();
+    res.storage.outerInd  = mat.derived().outerIndexPtr();
+
+    res.setScalarType<typename MatrixType::Scalar>();
+
+    // FIXME the following is not very accurate
+    if (MatrixType::Flags & Upper)
+      res.Mtype = SLU_TRU;
+    if (MatrixType::Flags & Lower)
+      res.Mtype = SLU_TRL;
+
+    eigen_assert(((MatrixType::Flags & SelfAdjoint)==0) && "SelfAdjoint matrix shape not supported by SuperLU");
+
+    return res;
+  }
+};
+
+template<typename Scalar, int Rows, int Cols, int Options, int MRows, int MCols>
+struct SluMatrixMapHelper<Matrix<Scalar,Rows,Cols,Options,MRows,MCols> >
+{
+  typedef Matrix<Scalar,Rows,Cols,Options,MRows,MCols> MatrixType;
+  static void run(MatrixType& mat, SluMatrix& res)
+  {
+    eigen_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU");
+    res.setStorageType(SLU_DN);
+    res.setScalarType<Scalar>();
+    res.Mtype     = SLU_GE;
+
+    res.nrow      = mat.rows();
+    res.ncol      = mat.cols();
+
+    res.storage.lda       = mat.outerStride();
+    res.storage.values    = mat.data();
+  }
+};
+
+template<typename Derived>
+struct SluMatrixMapHelper<SparseMatrixBase<Derived> >
+{
+  typedef Derived MatrixType;
+  static void run(MatrixType& mat, SluMatrix& res)
+  {
+    if ((MatrixType::Flags&RowMajorBit)==RowMajorBit)
+    {
+      res.setStorageType(SLU_NR);
+      res.nrow      = mat.cols();
+      res.ncol      = mat.rows();
+    }
+    else
+    {
+      res.setStorageType(SLU_NC);
+      res.nrow      = mat.rows();
+      res.ncol      = mat.cols();
+    }
+
+    res.Mtype       = SLU_GE;
+
+    res.storage.nnz       = mat.nonZeros();
+    res.storage.values    = mat.valuePtr();
+    res.storage.innerInd  = mat.innerIndexPtr();
+    res.storage.outerInd  = mat.outerIndexPtr();
+
+    res.setScalarType<typename MatrixType::Scalar>();
+
+    // FIXME the following is not very accurate
+    if (MatrixType::Flags & Upper)
+      res.Mtype = SLU_TRU;
+    if (MatrixType::Flags & Lower)
+      res.Mtype = SLU_TRL;
+
+    eigen_assert(((MatrixType::Flags & SelfAdjoint)==0) && "SelfAdjoint matrix shape not supported by SuperLU");
+  }
+};
+
+namespace internal {
+
+template<typename MatrixType>
+SluMatrix asSluMatrix(MatrixType& mat)
+{
+  return SluMatrix::Map(mat);
+}
+
+/** View a Super LU matrix as an Eigen expression */
+template<typename Scalar, int Flags, typename Index>
+MappedSparseMatrix<Scalar,Flags,Index> map_superlu(SluMatrix& sluMat)
+{
+  eigen_assert((Flags&RowMajor)==RowMajor && sluMat.Stype == SLU_NR
+         || (Flags&ColMajor)==ColMajor && sluMat.Stype == SLU_NC);
+
+  Index outerSize = (Flags&RowMajor)==RowMajor ? sluMat.ncol : sluMat.nrow;
+
+  return MappedSparseMatrix<Scalar,Flags,Index>(
+    sluMat.nrow, sluMat.ncol, sluMat.storage.outerInd[outerSize],
+    sluMat.storage.outerInd, sluMat.storage.innerInd, reinterpret_cast<Scalar*>(sluMat.storage.values) );
+}
+
+} // end namespace internal
+
+/** \ingroup SuperLUSupport_Module
+  * \class SuperLUBase
+  * \brief The base class for the direct and incomplete LU factorization of SuperLU
+  */
+template<typename _MatrixType, typename Derived>
+class SuperLUBase : internal::noncopyable
+{
+  public:
+    typedef _MatrixType MatrixType;
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::RealScalar RealScalar;
+    typedef typename MatrixType::Index Index;
+    typedef Matrix<Scalar,Dynamic,1> Vector;
+    typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
+    typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;    
+    typedef SparseMatrix<Scalar> LUMatrixType;
+
+  public:
+
+    SuperLUBase() {}
+
+    ~SuperLUBase()
+    {
+      clearFactors();
+    }
+    
+    Derived& derived() { return *static_cast<Derived*>(this); }
+    const Derived& derived() const { return *static_cast<const Derived*>(this); }
+    
+    inline Index rows() const { return m_matrix.rows(); }
+    inline Index cols() const { return m_matrix.cols(); }
+    
+    /** \returns a reference to the Super LU option object to configure the  Super LU algorithms. */
+    inline superlu_options_t& options() { return m_sluOptions; }
+    
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful,
+      *          \c NumericalIssue if the matrix.appears to be negative.
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+      return m_info;
+    }
+
+    /** Computes the sparse Cholesky decomposition of \a matrix */
+    void compute(const MatrixType& matrix)
+    {
+      derived().analyzePattern(matrix);
+      derived().factorize(matrix);
+    }
+    
+    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+      *
+      * \sa compute()
+      */
+    template<typename Rhs>
+    inline const internal::solve_retval<SuperLUBase, Rhs> solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "SuperLU is not initialized.");
+      eigen_assert(rows()==b.rows()
+                && "SuperLU::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::solve_retval<SuperLUBase, Rhs>(*this, b.derived());
+    }
+    
+    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+      *
+      * \sa compute()
+      */
+//     template<typename Rhs>
+//     inline const internal::sparse_solve_retval<SuperLU, Rhs> solve(const SparseMatrixBase<Rhs>& b) const
+//     {
+//       eigen_assert(m_isInitialized && "SuperLU is not initialized.");
+//       eigen_assert(rows()==b.rows()
+//                 && "SuperLU::solve(): invalid number of rows of the right hand side matrix b");
+//       return internal::sparse_solve_retval<SuperLU, Rhs>(*this, b.derived());
+//     }
+    
+    /** Performs a symbolic decomposition on the sparcity of \a matrix.
+      *
+      * This function is particularly useful when solving for several problems having the same structure.
+      * 
+      * \sa factorize()
+      */
+    void analyzePattern(const MatrixType& /*matrix*/)
+    {
+      m_isInitialized = true;
+      m_info = Success;
+      m_analysisIsOk = true;
+      m_factorizationIsOk = false;
+    }
+    
+    template<typename Stream>
+    void dumpMemory(Stream& s)
+    {}
+    
+  protected:
+    
+    void initFactorization(const MatrixType& a)
+    {
+      set_default_options(&this->m_sluOptions);
+      
+      const int size = a.rows();
+      m_matrix = a;
+
+      m_sluA = internal::asSluMatrix(m_matrix);
+      clearFactors();
+
+      m_p.resize(size);
+      m_q.resize(size);
+      m_sluRscale.resize(size);
+      m_sluCscale.resize(size);
+      m_sluEtree.resize(size);
+
+      // set empty B and X
+      m_sluB.setStorageType(SLU_DN);
+      m_sluB.setScalarType<Scalar>();
+      m_sluB.Mtype          = SLU_GE;
+      m_sluB.storage.values = 0;
+      m_sluB.nrow           = 0;
+      m_sluB.ncol           = 0;
+      m_sluB.storage.lda    = size;
+      m_sluX                = m_sluB;
+      
+      m_extractedDataAreDirty = true;
+    }
+    
+    void init()
+    {
+      m_info = InvalidInput;
+      m_isInitialized = false;
+      m_sluL.Store = 0;
+      m_sluU.Store = 0;
+    }
+    
+    void extractData() const;
+
+    void clearFactors()
+    {
+      if(m_sluL.Store)
+        Destroy_SuperNode_Matrix(&m_sluL);
+      if(m_sluU.Store)
+        Destroy_CompCol_Matrix(&m_sluU);
+
+      m_sluL.Store = 0;
+      m_sluU.Store = 0;
+
+      memset(&m_sluL,0,sizeof m_sluL);
+      memset(&m_sluU,0,sizeof m_sluU);
+    }
+
+    // cached data to reduce reallocation, etc.
+    mutable LUMatrixType m_l;
+    mutable LUMatrixType m_u;
+    mutable IntColVectorType m_p;
+    mutable IntRowVectorType m_q;
+
+    mutable LUMatrixType m_matrix;  // copy of the factorized matrix
+    mutable SluMatrix m_sluA;
+    mutable SuperMatrix m_sluL, m_sluU;
+    mutable SluMatrix m_sluB, m_sluX;
+    mutable SuperLUStat_t m_sluStat;
+    mutable superlu_options_t m_sluOptions;
+    mutable std::vector<int> m_sluEtree;
+    mutable Matrix<RealScalar,Dynamic,1> m_sluRscale, m_sluCscale;
+    mutable Matrix<RealScalar,Dynamic,1> m_sluFerr, m_sluBerr;
+    mutable char m_sluEqued;
+
+    mutable ComputationInfo m_info;
+    bool m_isInitialized;
+    int m_factorizationIsOk;
+    int m_analysisIsOk;
+    mutable bool m_extractedDataAreDirty;
+    
+  private:
+    SuperLUBase(SuperLUBase& ) { }
+};
+
+
+/** \ingroup SuperLUSupport_Module
+  * \class SuperLU
+  * \brief A sparse direct LU factorization and solver based on the SuperLU library
+  *
+  * This class allows to solve for A.X = B sparse linear problems via a direct LU factorization
+  * using the SuperLU library. The sparse matrix A must be squared and invertible. The vectors or matrices
+  * X and B can be either dense or sparse.
+  *
+  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  *
+  * \sa \ref TutorialSparseDirectSolvers
+  */
+template<typename _MatrixType>
+class SuperLU : public SuperLUBase<_MatrixType,SuperLU<_MatrixType> >
+{
+  public:
+    typedef SuperLUBase<_MatrixType,SuperLU> Base;
+    typedef _MatrixType MatrixType;
+    typedef typename Base::Scalar Scalar;
+    typedef typename Base::RealScalar RealScalar;
+    typedef typename Base::Index Index;
+    typedef typename Base::IntRowVectorType IntRowVectorType;
+    typedef typename Base::IntColVectorType IntColVectorType;    
+    typedef typename Base::LUMatrixType LUMatrixType;
+    typedef TriangularView<LUMatrixType, Lower|UnitDiag>  LMatrixType;
+    typedef TriangularView<LUMatrixType,  Upper>           UMatrixType;
+
+  public:
+
+    SuperLU() : Base() { init(); }
+
+    SuperLU(const MatrixType& matrix) : Base()
+    {
+      init();
+      Base::compute(matrix);
+    }
+
+    ~SuperLU()
+    {
+    }
+    
+    /** Performs a symbolic decomposition on the sparcity of \a matrix.
+      *
+      * This function is particularly useful when solving for several problems having the same structure.
+      * 
+      * \sa factorize()
+      */
+    void analyzePattern(const MatrixType& matrix)
+    {
+      m_info = InvalidInput;
+      m_isInitialized = false;
+      Base::analyzePattern(matrix);
+    }
+    
+    /** Performs a numeric decomposition of \a matrix
+      *
+      * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+      *
+      * \sa analyzePattern()
+      */
+    void factorize(const MatrixType& matrix);
+    
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** \internal */
+    template<typename Rhs,typename Dest>
+    void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const;
+    #endif // EIGEN_PARSED_BY_DOXYGEN
+    
+    inline const LMatrixType& matrixL() const
+    {
+      if (m_extractedDataAreDirty) this->extractData();
+      return m_l;
+    }
+
+    inline const UMatrixType& matrixU() const
+    {
+      if (m_extractedDataAreDirty) this->extractData();
+      return m_u;
+    }
+
+    inline const IntColVectorType& permutationP() const
+    {
+      if (m_extractedDataAreDirty) this->extractData();
+      return m_p;
+    }
+
+    inline const IntRowVectorType& permutationQ() const
+    {
+      if (m_extractedDataAreDirty) this->extractData();
+      return m_q;
+    }
+    
+    Scalar determinant() const;
+    
+  protected:
+    
+    using Base::m_matrix;
+    using Base::m_sluOptions;
+    using Base::m_sluA;
+    using Base::m_sluB;
+    using Base::m_sluX;
+    using Base::m_p;
+    using Base::m_q;
+    using Base::m_sluEtree;
+    using Base::m_sluEqued;
+    using Base::m_sluRscale;
+    using Base::m_sluCscale;
+    using Base::m_sluL;
+    using Base::m_sluU;
+    using Base::m_sluStat;
+    using Base::m_sluFerr;
+    using Base::m_sluBerr;
+    using Base::m_l;
+    using Base::m_u;
+    
+    using Base::m_analysisIsOk;
+    using Base::m_factorizationIsOk;
+    using Base::m_extractedDataAreDirty;
+    using Base::m_isInitialized;
+    using Base::m_info;
+    
+    void init()
+    {
+      Base::init();
+      
+      set_default_options(&this->m_sluOptions);
+      m_sluOptions.PrintStat        = NO;
+      m_sluOptions.ConditionNumber  = NO;
+      m_sluOptions.Trans            = NOTRANS;
+      m_sluOptions.ColPerm          = COLAMD;
+    }
+    
+    
+  private:
+    SuperLU(SuperLU& ) { }
+};
+
+template<typename MatrixType>
+void SuperLU<MatrixType>::factorize(const MatrixType& a)
+{
+  eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+  if(!m_analysisIsOk)
+  {
+    m_info = InvalidInput;
+    return;
+  }
+  
+  this->initFactorization(a);
+  
+  int info = 0;
+  RealScalar recip_pivot_growth, rcond;
+  RealScalar ferr, berr;
+
+  StatInit(&m_sluStat);
+  SuperLU_gssvx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0],
+                &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0],
+                &m_sluL, &m_sluU,
+                NULL, 0,
+                &m_sluB, &m_sluX,
+                &recip_pivot_growth, &rcond,
+                &ferr, &berr,
+                &m_sluStat, &info, Scalar());
+  StatFree(&m_sluStat);
+
+  m_extractedDataAreDirty = true;
+
+  // FIXME how to better check for errors ???
+  m_info = info == 0 ? Success : NumericalIssue;
+  m_factorizationIsOk = true;
+}
+
+template<typename MatrixType>
+template<typename Rhs,typename Dest>
+void SuperLU<MatrixType>::_solve(const MatrixBase<Rhs> &b, MatrixBase<Dest>& x) const
+{
+  eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()");
+
+  const int size = m_matrix.rows();
+  const int rhsCols = b.cols();
+  eigen_assert(size==b.rows());
+
+  m_sluOptions.Trans = NOTRANS;
+  m_sluOptions.Fact = FACTORED;
+  m_sluOptions.IterRefine = NOREFINE;
+  
+
+  m_sluFerr.resize(rhsCols);
+  m_sluBerr.resize(rhsCols);
+  m_sluB = SluMatrix::Map(b.const_cast_derived());
+  m_sluX = SluMatrix::Map(x.derived());
+  
+  typename Rhs::PlainObject b_cpy;
+  if(m_sluEqued!='N')
+  {
+    b_cpy = b;
+    m_sluB = SluMatrix::Map(b_cpy.const_cast_derived());  
+  }
+
+  StatInit(&m_sluStat);
+  int info = 0;
+  RealScalar recip_pivot_growth, rcond;
+  SuperLU_gssvx(&m_sluOptions, &m_sluA,
+                m_q.data(), m_p.data(),
+                &m_sluEtree[0], &m_sluEqued,
+                &m_sluRscale[0], &m_sluCscale[0],
+                &m_sluL, &m_sluU,
+                NULL, 0,
+                &m_sluB, &m_sluX,
+                &recip_pivot_growth, &rcond,
+                &m_sluFerr[0], &m_sluBerr[0],
+                &m_sluStat, &info, Scalar());
+  StatFree(&m_sluStat);
+  m_info = info==0 ? Success : NumericalIssue;
+}
+
+// the code of this extractData() function has been adapted from the SuperLU's Matlab support code,
+//
+//  Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
+//
+//  THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+//  EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+//
+template<typename MatrixType, typename Derived>
+void SuperLUBase<MatrixType,Derived>::extractData() const
+{
+  eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for extracting factors, you must first call either compute() or analyzePattern()/factorize()");
+  if (m_extractedDataAreDirty)
+  {
+    int         upper;
+    int         fsupc, istart, nsupr;
+    int         lastl = 0, lastu = 0;
+    SCformat    *Lstore = static_cast<SCformat*>(m_sluL.Store);
+    NCformat    *Ustore = static_cast<NCformat*>(m_sluU.Store);
+    Scalar      *SNptr;
+
+    const int size = m_matrix.rows();
+    m_l.resize(size,size);
+    m_l.resizeNonZeros(Lstore->nnz);
+    m_u.resize(size,size);
+    m_u.resizeNonZeros(Ustore->nnz);
+
+    int* Lcol = m_l.outerIndexPtr();
+    int* Lrow = m_l.innerIndexPtr();
+    Scalar* Lval = m_l.valuePtr();
+
+    int* Ucol = m_u.outerIndexPtr();
+    int* Urow = m_u.innerIndexPtr();
+    Scalar* Uval = m_u.valuePtr();
+
+    Ucol[0] = 0;
+    Ucol[0] = 0;
+
+    /* for each supernode */
+    for (int k = 0; k <= Lstore->nsuper; ++k)
+    {
+      fsupc   = L_FST_SUPC(k);
+      istart  = L_SUB_START(fsupc);
+      nsupr   = L_SUB_START(fsupc+1) - istart;
+      upper   = 1;
+
+      /* for each column in the supernode */
+      for (int j = fsupc; j < L_FST_SUPC(k+1); ++j)
+      {
+        SNptr = &((Scalar*)Lstore->nzval)[L_NZ_START(j)];
+
+        /* Extract U */
+        for (int i = U_NZ_START(j); i < U_NZ_START(j+1); ++i)
+        {
+          Uval[lastu] = ((Scalar*)Ustore->nzval)[i];
+          /* Matlab doesn't like explicit zero. */
+          if (Uval[lastu] != 0.0)
+            Urow[lastu++] = U_SUB(i);
+        }
+        for (int i = 0; i < upper; ++i)
+        {
+          /* upper triangle in the supernode */
+          Uval[lastu] = SNptr[i];
+          /* Matlab doesn't like explicit zero. */
+          if (Uval[lastu] != 0.0)
+            Urow[lastu++] = L_SUB(istart+i);
+        }
+        Ucol[j+1] = lastu;
+
+        /* Extract L */
+        Lval[lastl] = 1.0; /* unit diagonal */
+        Lrow[lastl++] = L_SUB(istart + upper - 1);
+        for (int i = upper; i < nsupr; ++i)
+        {
+          Lval[lastl] = SNptr[i];
+          /* Matlab doesn't like explicit zero. */
+          if (Lval[lastl] != 0.0)
+            Lrow[lastl++] = L_SUB(istart+i);
+        }
+        Lcol[j+1] = lastl;
+
+        ++upper;
+      } /* for j ... */
+
+    } /* for k ... */
+
+    // squeeze the matrices :
+    m_l.resizeNonZeros(lastl);
+    m_u.resizeNonZeros(lastu);
+
+    m_extractedDataAreDirty = false;
+  }
+}
+
+template<typename MatrixType>
+typename SuperLU<MatrixType>::Scalar SuperLU<MatrixType>::determinant() const
+{
+  eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for computing the determinant, you must first call either compute() or analyzePattern()/factorize()");
+  
+  if (m_extractedDataAreDirty)
+    this->extractData();
+
+  Scalar det = Scalar(1);
+  for (int j=0; j<m_u.cols(); ++j)
+  {
+    if (m_u.outerIndexPtr()[j+1]-m_u.outerIndexPtr()[j] > 0)
+    {
+      int lastId = m_u.outerIndexPtr()[j+1]-1;
+      eigen_assert(m_u.innerIndexPtr()[lastId]<=j);
+      if (m_u.innerIndexPtr()[lastId]==j)
+        det *= m_u.valuePtr()[lastId];
+    }
+  }
+  if(m_sluEqued!='N')
+    return det/m_sluRscale.prod()/m_sluCscale.prod();
+  else
+    return det;
+}
+
+#ifdef EIGEN_PARSED_BY_DOXYGEN
+#define EIGEN_SUPERLU_HAS_ILU
+#endif
+
+#ifdef EIGEN_SUPERLU_HAS_ILU
+
+/** \ingroup SuperLUSupport_Module
+  * \class SuperILU
+  * \brief A sparse direct \b incomplete LU factorization and solver based on the SuperLU library
+  *
+  * This class allows to solve for an approximate solution of A.X = B sparse linear problems via an incomplete LU factorization
+  * using the SuperLU library. This class is aimed to be used as a preconditioner of the iterative linear solvers.
+  *
+  * \warning This class requires SuperLU 4 or later.
+  *
+  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  *
+  * \sa \ref TutorialSparseDirectSolvers, class ConjugateGradient, class BiCGSTAB
+  */
+
+template<typename _MatrixType>
+class SuperILU : public SuperLUBase<_MatrixType,SuperILU<_MatrixType> >
+{
+  public:
+    typedef SuperLUBase<_MatrixType,SuperILU> Base;
+    typedef _MatrixType MatrixType;
+    typedef typename Base::Scalar Scalar;
+    typedef typename Base::RealScalar RealScalar;
+    typedef typename Base::Index Index;
+
+  public:
+
+    SuperILU() : Base() { init(); }
+
+    SuperILU(const MatrixType& matrix) : Base()
+    {
+      init();
+      Base::compute(matrix);
+    }
+
+    ~SuperILU()
+    {
+    }
+    
+    /** Performs a symbolic decomposition on the sparcity of \a matrix.
+      *
+      * This function is particularly useful when solving for several problems having the same structure.
+      * 
+      * \sa factorize()
+      */
+    void analyzePattern(const MatrixType& matrix)
+    {
+      Base::analyzePattern(matrix);
+    }
+    
+    /** Performs a numeric decomposition of \a matrix
+      *
+      * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+      *
+      * \sa analyzePattern()
+      */
+    void factorize(const MatrixType& matrix);
+    
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** \internal */
+    template<typename Rhs,typename Dest>
+    void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const;
+    #endif // EIGEN_PARSED_BY_DOXYGEN
+    
+  protected:
+    
+    using Base::m_matrix;
+    using Base::m_sluOptions;
+    using Base::m_sluA;
+    using Base::m_sluB;
+    using Base::m_sluX;
+    using Base::m_p;
+    using Base::m_q;
+    using Base::m_sluEtree;
+    using Base::m_sluEqued;
+    using Base::m_sluRscale;
+    using Base::m_sluCscale;
+    using Base::m_sluL;
+    using Base::m_sluU;
+    using Base::m_sluStat;
+    using Base::m_sluFerr;
+    using Base::m_sluBerr;
+    using Base::m_l;
+    using Base::m_u;
+    
+    using Base::m_analysisIsOk;
+    using Base::m_factorizationIsOk;
+    using Base::m_extractedDataAreDirty;
+    using Base::m_isInitialized;
+    using Base::m_info;
+
+    void init()
+    {
+      Base::init();
+      
+      ilu_set_default_options(&m_sluOptions);
+      m_sluOptions.PrintStat        = NO;
+      m_sluOptions.ConditionNumber  = NO;
+      m_sluOptions.Trans            = NOTRANS;
+      m_sluOptions.ColPerm          = MMD_AT_PLUS_A;
+      
+      // no attempt to preserve column sum
+      m_sluOptions.ILU_MILU = SILU;
+      // only basic ILU(k) support -- no direct control over memory consumption
+      // better to use ILU_DropRule = DROP_BASIC | DROP_AREA
+      // and set ILU_FillFactor to max memory growth
+      m_sluOptions.ILU_DropRule = DROP_BASIC;
+      m_sluOptions.ILU_DropTol = NumTraits<Scalar>::dummy_precision()*10;
+    }
+    
+  private:
+    SuperILU(SuperILU& ) { }
+};
+
+template<typename MatrixType>
+void SuperILU<MatrixType>::factorize(const MatrixType& a)
+{
+  eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+  if(!m_analysisIsOk)
+  {
+    m_info = InvalidInput;
+    return;
+  }
+  
+  this->initFactorization(a);
+
+  int info = 0;
+  RealScalar recip_pivot_growth, rcond;
+
+  StatInit(&m_sluStat);
+  SuperLU_gsisx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0],
+                &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0],
+                &m_sluL, &m_sluU,
+                NULL, 0,
+                &m_sluB, &m_sluX,
+                &recip_pivot_growth, &rcond,
+                &m_sluStat, &info, Scalar());
+  StatFree(&m_sluStat);
+
+  // FIXME how to better check for errors ???
+  m_info = info == 0 ? Success : NumericalIssue;
+  m_factorizationIsOk = true;
+}
+
+template<typename MatrixType>
+template<typename Rhs,typename Dest>
+void SuperILU<MatrixType>::_solve(const MatrixBase<Rhs> &b, MatrixBase<Dest>& x) const
+{
+  eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()");
+
+  const int size = m_matrix.rows();
+  const int rhsCols = b.cols();
+  eigen_assert(size==b.rows());
+
+  m_sluOptions.Trans = NOTRANS;
+  m_sluOptions.Fact = FACTORED;
+  m_sluOptions.IterRefine = NOREFINE;
+
+  m_sluFerr.resize(rhsCols);
+  m_sluBerr.resize(rhsCols);
+  m_sluB = SluMatrix::Map(b.const_cast_derived());
+  m_sluX = SluMatrix::Map(x.derived());
+
+  typename Rhs::PlainObject b_cpy;
+  if(m_sluEqued!='N')
+  {
+    b_cpy = b;
+    m_sluB = SluMatrix::Map(b_cpy.const_cast_derived());  
+  }
+  
+  int info = 0;
+  RealScalar recip_pivot_growth, rcond;
+
+  StatInit(&m_sluStat);
+  SuperLU_gsisx(&m_sluOptions, &m_sluA,
+                m_q.data(), m_p.data(),
+                &m_sluEtree[0], &m_sluEqued,
+                &m_sluRscale[0], &m_sluCscale[0],
+                &m_sluL, &m_sluU,
+                NULL, 0,
+                &m_sluB, &m_sluX,
+                &recip_pivot_growth, &rcond,
+                &m_sluStat, &info, Scalar());
+  StatFree(&m_sluStat);
+
+  m_info = info==0 ? Success : NumericalIssue;
+}
+#endif
+
+namespace internal {
+  
+template<typename _MatrixType, typename Derived, typename Rhs>
+struct solve_retval<SuperLUBase<_MatrixType,Derived>, Rhs>
+  : solve_retval_base<SuperLUBase<_MatrixType,Derived>, Rhs>
+{
+  typedef SuperLUBase<_MatrixType,Derived> Dec;
+  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec().derived()._solve(rhs(),dst);
+  }
+};
+
+template<typename _MatrixType, typename Derived, typename Rhs>
+struct sparse_solve_retval<SuperLUBase<_MatrixType,Derived>, Rhs>
+  : sparse_solve_retval_base<SuperLUBase<_MatrixType,Derived>, Rhs>
+{
+  typedef SuperLUBase<_MatrixType,Derived> Dec;
+  EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec().derived()._solve(rhs(),dst);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SUPERLUSUPPORT_H
diff --git a/resources/3rdparty/eigen/Eigen/src/UmfPackSupport/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/UmfPackSupport/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/UmfPackSupport/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/UmfPackSupport/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h b/resources/3rdParty/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h
rename to resources/3rdParty/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h
diff --git a/resources/3rdparty/eigen/Eigen/src/misc/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/misc/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/misc/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/misc/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/Eigen/src/misc/Image.h b/resources/3rdParty/eigen/Eigen/src/misc/Image.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/misc/Image.h
rename to resources/3rdParty/eigen/Eigen/src/misc/Image.h
diff --git a/resources/3rdparty/eigen/Eigen/src/misc/Kernel.h b/resources/3rdParty/eigen/Eigen/src/misc/Kernel.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/misc/Kernel.h
rename to resources/3rdParty/eigen/Eigen/src/misc/Kernel.h
diff --git a/resources/3rdparty/eigen/Eigen/src/misc/Solve.h b/resources/3rdParty/eigen/Eigen/src/misc/Solve.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/misc/Solve.h
rename to resources/3rdParty/eigen/Eigen/src/misc/Solve.h
diff --git a/resources/3rdparty/eigen/Eigen/src/misc/SparseSolve.h b/resources/3rdParty/eigen/Eigen/src/misc/SparseSolve.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/misc/SparseSolve.h
rename to resources/3rdParty/eigen/Eigen/src/misc/SparseSolve.h
diff --git a/resources/3rdparty/eigen/Eigen/src/misc/blas.h b/resources/3rdParty/eigen/Eigen/src/misc/blas.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/misc/blas.h
rename to resources/3rdParty/eigen/Eigen/src/misc/blas.h
diff --git a/resources/3rdparty/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h b/resources/3rdParty/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h
rename to resources/3rdParty/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h
diff --git a/resources/3rdparty/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h b/resources/3rdParty/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h
rename to resources/3rdParty/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h
diff --git a/resources/3rdparty/eigen/Eigen/src/plugins/BlockMethods.h b/resources/3rdParty/eigen/Eigen/src/plugins/BlockMethods.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/plugins/BlockMethods.h
rename to resources/3rdParty/eigen/Eigen/src/plugins/BlockMethods.h
diff --git a/resources/3rdparty/eigen/Eigen/src/plugins/CMakeLists.txt b/resources/3rdParty/eigen/Eigen/src/plugins/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/plugins/CMakeLists.txt
rename to resources/3rdParty/eigen/Eigen/src/plugins/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/Eigen/src/plugins/CommonCwiseBinaryOps.h b/resources/3rdParty/eigen/Eigen/src/plugins/CommonCwiseBinaryOps.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/plugins/CommonCwiseBinaryOps.h
rename to resources/3rdParty/eigen/Eigen/src/plugins/CommonCwiseBinaryOps.h
diff --git a/resources/3rdparty/eigen/Eigen/src/plugins/CommonCwiseUnaryOps.h b/resources/3rdParty/eigen/Eigen/src/plugins/CommonCwiseUnaryOps.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/plugins/CommonCwiseUnaryOps.h
rename to resources/3rdParty/eigen/Eigen/src/plugins/CommonCwiseUnaryOps.h
diff --git a/resources/3rdparty/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h b/resources/3rdParty/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h
rename to resources/3rdParty/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h
diff --git a/resources/3rdparty/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h b/resources/3rdParty/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h
similarity index 100%
rename from resources/3rdparty/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h
rename to resources/3rdParty/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h
diff --git a/resources/3rdparty/eigen/INSTALL b/resources/3rdParty/eigen/INSTALL
similarity index 100%
rename from resources/3rdparty/eigen/INSTALL
rename to resources/3rdParty/eigen/INSTALL
diff --git a/resources/3rdparty/eigen/bench/BenchSparseUtil.h b/resources/3rdParty/eigen/bench/BenchSparseUtil.h
similarity index 100%
rename from resources/3rdparty/eigen/bench/BenchSparseUtil.h
rename to resources/3rdParty/eigen/bench/BenchSparseUtil.h
diff --git a/resources/3rdparty/eigen/bench/BenchTimer.h b/resources/3rdParty/eigen/bench/BenchTimer.h
similarity index 100%
rename from resources/3rdparty/eigen/bench/BenchTimer.h
rename to resources/3rdParty/eigen/bench/BenchTimer.h
diff --git a/resources/3rdparty/eigen/bench/BenchUtil.h b/resources/3rdParty/eigen/bench/BenchUtil.h
similarity index 100%
rename from resources/3rdparty/eigen/bench/BenchUtil.h
rename to resources/3rdParty/eigen/bench/BenchUtil.h
diff --git a/resources/3rdparty/eigen/bench/README.txt b/resources/3rdParty/eigen/bench/README.txt
similarity index 100%
rename from resources/3rdparty/eigen/bench/README.txt
rename to resources/3rdParty/eigen/bench/README.txt
diff --git a/resources/3rdparty/eigen/bench/basicbench.cxxlist b/resources/3rdParty/eigen/bench/basicbench.cxxlist
similarity index 100%
rename from resources/3rdparty/eigen/bench/basicbench.cxxlist
rename to resources/3rdParty/eigen/bench/basicbench.cxxlist
diff --git a/resources/3rdparty/eigen/bench/basicbenchmark.cpp b/resources/3rdParty/eigen/bench/basicbenchmark.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/basicbenchmark.cpp
rename to resources/3rdParty/eigen/bench/basicbenchmark.cpp
diff --git a/resources/3rdparty/eigen/bench/basicbenchmark.h b/resources/3rdParty/eigen/bench/basicbenchmark.h
similarity index 100%
rename from resources/3rdparty/eigen/bench/basicbenchmark.h
rename to resources/3rdParty/eigen/bench/basicbenchmark.h
diff --git a/resources/3rdparty/eigen/bench/benchBlasGemm.cpp b/resources/3rdParty/eigen/bench/benchBlasGemm.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/benchBlasGemm.cpp
rename to resources/3rdParty/eigen/bench/benchBlasGemm.cpp
diff --git a/resources/3rdparty/eigen/bench/benchCholesky.cpp b/resources/3rdParty/eigen/bench/benchCholesky.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/benchCholesky.cpp
rename to resources/3rdParty/eigen/bench/benchCholesky.cpp
diff --git a/resources/3rdparty/eigen/bench/benchEigenSolver.cpp b/resources/3rdParty/eigen/bench/benchEigenSolver.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/benchEigenSolver.cpp
rename to resources/3rdParty/eigen/bench/benchEigenSolver.cpp
diff --git a/resources/3rdparty/eigen/bench/benchFFT.cpp b/resources/3rdParty/eigen/bench/benchFFT.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/benchFFT.cpp
rename to resources/3rdParty/eigen/bench/benchFFT.cpp
diff --git a/resources/3rdparty/eigen/bench/benchVecAdd.cpp b/resources/3rdParty/eigen/bench/benchVecAdd.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/benchVecAdd.cpp
rename to resources/3rdParty/eigen/bench/benchVecAdd.cpp
diff --git a/resources/3rdParty/eigen/bench/bench_gemm.cpp b/resources/3rdParty/eigen/bench/bench_gemm.cpp
new file mode 100644
index 000000000..98ac34e20
--- /dev/null
+++ b/resources/3rdParty/eigen/bench/bench_gemm.cpp
@@ -0,0 +1,271 @@
+
+// g++-4.4 bench_gemm.cpp -I .. -O2 -DNDEBUG -lrt -fopenmp && OMP_NUM_THREADS=2  ./a.out
+// icpc bench_gemm.cpp -I .. -O3 -DNDEBUG -lrt -openmp  && OMP_NUM_THREADS=2  ./a.out
+
+#include <iostream>
+#include <Eigen/Core>
+#include <bench/BenchTimer.h>
+
+using namespace std;
+using namespace Eigen;
+
+#ifndef SCALAR
+// #define SCALAR std::complex<float>
+#define SCALAR float
+#endif
+
+typedef SCALAR Scalar;
+typedef NumTraits<Scalar>::Real RealScalar;
+typedef Matrix<RealScalar,Dynamic,Dynamic> A;
+typedef Matrix</*Real*/Scalar,Dynamic,Dynamic> B;
+typedef Matrix<Scalar,Dynamic,Dynamic> C;
+typedef Matrix<RealScalar,Dynamic,Dynamic> M;
+
+#ifdef HAVE_BLAS
+
+extern "C" {
+  #include <bench/btl/libs/C_BLAS/blas.h>
+}
+
+static float fone = 1;
+static float fzero = 0;
+static double done = 1;
+static double szero = 0;
+static std::complex<float> cfone = 1;
+static std::complex<float> cfzero = 0;
+static std::complex<double> cdone = 1;
+static std::complex<double> cdzero = 0;
+static char notrans = 'N';
+static char trans = 'T';  
+static char nonunit = 'N';
+static char lower = 'L';
+static char right = 'R';
+static int intone = 1;
+
+void blas_gemm(const MatrixXf& a, const MatrixXf& b, MatrixXf& c)
+{
+  int M = c.rows(); int N = c.cols(); int K = a.cols();
+  int lda = a.rows(); int ldb = b.rows(); int ldc = c.rows();
+
+  sgemm_(&notrans,&notrans,&M,&N,&K,&fone,
+         const_cast<float*>(a.data()),&lda,
+         const_cast<float*>(b.data()),&ldb,&fone,
+         c.data(),&ldc);
+}
+
+EIGEN_DONT_INLINE void blas_gemm(const MatrixXd& a, const MatrixXd& b, MatrixXd& c)
+{
+  int M = c.rows(); int N = c.cols(); int K = a.cols();
+  int lda = a.rows(); int ldb = b.rows(); int ldc = c.rows();
+
+  dgemm_(&notrans,&notrans,&M,&N,&K,&done,
+         const_cast<double*>(a.data()),&lda,
+         const_cast<double*>(b.data()),&ldb,&done,
+         c.data(),&ldc);
+}
+
+void blas_gemm(const MatrixXcf& a, const MatrixXcf& b, MatrixXcf& c)
+{
+  int M = c.rows(); int N = c.cols(); int K = a.cols();
+  int lda = a.rows(); int ldb = b.rows(); int ldc = c.rows();
+
+  cgemm_(&notrans,&notrans,&M,&N,&K,(float*)&cfone,
+         const_cast<float*>((const float*)a.data()),&lda,
+         const_cast<float*>((const float*)b.data()),&ldb,(float*)&cfone,
+         (float*)c.data(),&ldc);
+}
+
+void blas_gemm(const MatrixXcd& a, const MatrixXcd& b, MatrixXcd& c)
+{
+  int M = c.rows(); int N = c.cols(); int K = a.cols();
+  int lda = a.rows(); int ldb = b.rows(); int ldc = c.rows();
+
+  zgemm_(&notrans,&notrans,&M,&N,&K,(double*)&cdone,
+         const_cast<double*>((const double*)a.data()),&lda,
+         const_cast<double*>((const double*)b.data()),&ldb,(double*)&cdone,
+         (double*)c.data(),&ldc);
+}
+
+
+
+#endif
+
+void matlab_cplx_cplx(const M& ar, const M& ai, const M& br, const M& bi, M& cr, M& ci)
+{
+  cr.noalias() += ar * br;
+  cr.noalias() -= ai * bi;
+  ci.noalias() += ar * bi;
+  ci.noalias() += ai * br;
+}
+
+void matlab_real_cplx(const M& a, const M& br, const M& bi, M& cr, M& ci)
+{
+  cr.noalias() += a * br;
+  ci.noalias() += a * bi;
+}
+
+void matlab_cplx_real(const M& ar, const M& ai, const M& b, M& cr, M& ci)
+{
+  cr.noalias() += ar * b;
+  ci.noalias() += ai * b;
+}
+
+template<typename A, typename B, typename C>
+EIGEN_DONT_INLINE void gemm(const A& a, const B& b, C& c)
+{
+ c.noalias() += a * b;
+}
+
+int main(int argc, char ** argv)
+{
+  std::ptrdiff_t l1 = internal::queryL1CacheSize();
+  std::ptrdiff_t l2 = internal::queryTopLevelCacheSize();
+  std::cout << "L1 cache size     = " << (l1>0 ? l1/1024 : -1) << " KB\n";
+  std::cout << "L2/L3 cache size  = " << (l2>0 ? l2/1024 : -1) << " KB\n";
+  typedef internal::gebp_traits<Scalar,Scalar> Traits;
+  std::cout << "Register blocking = " << Traits::mr << " x " << Traits::nr << "\n";
+
+  int rep = 1;    // number of repetitions per try
+  int tries = 2;  // number of tries, we keep the best
+
+  int s = 2048;
+  int cache_size = -1;
+
+  bool need_help = false;
+  for (int i=1; i<argc; ++i)
+  {
+    if(argv[i][0]=='s')
+      s = atoi(argv[i]+1);
+    else if(argv[i][0]=='c')
+      cache_size = atoi(argv[i]+1);
+    else if(argv[i][0]=='t')
+      tries = atoi(argv[i]+1);
+    else if(argv[i][0]=='p')
+      rep = atoi(argv[i]+1);
+    else
+      need_help = true;
+  }
+
+  if(need_help)
+  {
+    std::cout << argv[0] << " s<matrix size> c<cache size> t<nb tries> p<nb repeats>\n";
+    return 1;
+  }
+
+  if(cache_size>0)
+    setCpuCacheSizes(cache_size,96*cache_size);
+
+  int m = s;
+  int n = s;
+  int p = s;
+  A a(m,p); a.setRandom();
+  B b(p,n); b.setRandom();
+  C c(m,n); c.setOnes();
+  C rc = c;
+
+  std::cout << "Matrix sizes = " << m << "x" << p << " * " << p << "x" << n << "\n";
+  std::ptrdiff_t mc(m), nc(n), kc(p);
+  internal::computeProductBlockingSizes<Scalar,Scalar>(kc, mc, nc);
+  std::cout << "blocking size (mc x kc) = " << mc << " x " << kc << "\n";
+
+  C r = c;
+
+  // check the parallel product is correct
+  #if defined EIGEN_HAS_OPENMP
+  int procs = omp_get_max_threads();
+  if(procs>1)
+  {
+    #ifdef HAVE_BLAS
+    blas_gemm(a,b,r);
+    #else
+    omp_set_num_threads(1);
+    r.noalias() += a * b;
+    omp_set_num_threads(procs);
+    #endif
+    c.noalias() += a * b;
+    if(!r.isApprox(c)) std::cerr << "Warning, your parallel product is crap!\n\n";
+  }
+  #elif defined HAVE_BLAS
+    blas_gemm(a,b,r);
+    c.noalias() += a * b;
+    if(!r.isApprox(c)) std::cerr << "Warning, your product is crap!\n\n";
+  #else
+    gemm(a,b,c);
+    r.noalias() += a.cast<Scalar>() * b.cast<Scalar>();
+    if(!r.isApprox(c)) std::cerr << "Warning, your product is crap!\n\n";
+  #endif
+
+  #ifdef HAVE_BLAS
+  BenchTimer tblas;
+  c = rc;
+  BENCH(tblas, tries, rep, blas_gemm(a,b,c));
+  std::cout << "blas  cpu         " << tblas.best(CPU_TIMER)/rep  << "s  \t" << (double(m)*n*p*rep*2/tblas.best(CPU_TIMER))*1e-9  <<  " GFLOPS \t(" << tblas.total(CPU_TIMER)  << "s)\n";
+  std::cout << "blas  real        " << tblas.best(REAL_TIMER)/rep << "s  \t" << (double(m)*n*p*rep*2/tblas.best(REAL_TIMER))*1e-9 <<  " GFLOPS \t(" << tblas.total(REAL_TIMER) << "s)\n";
+  #endif
+
+  BenchTimer tmt;
+  c = rc;
+  BENCH(tmt, tries, rep, gemm(a,b,c));
+  std::cout << "eigen cpu         " << tmt.best(CPU_TIMER)/rep  << "s  \t" << (double(m)*n*p*rep*2/tmt.best(CPU_TIMER))*1e-9  <<  " GFLOPS \t(" << tmt.total(CPU_TIMER)  << "s)\n";
+  std::cout << "eigen real        " << tmt.best(REAL_TIMER)/rep << "s  \t" << (double(m)*n*p*rep*2/tmt.best(REAL_TIMER))*1e-9 <<  " GFLOPS \t(" << tmt.total(REAL_TIMER) << "s)\n";
+
+  #ifdef EIGEN_HAS_OPENMP
+  if(procs>1)
+  {
+    BenchTimer tmono;
+    omp_set_num_threads(1);
+    Eigen::internal::setNbThreads(1);
+    c = rc;
+    BENCH(tmono, tries, rep, gemm(a,b,c));
+    std::cout << "eigen mono cpu    " << tmono.best(CPU_TIMER)/rep  << "s  \t" << (double(m)*n*p*rep*2/tmono.best(CPU_TIMER))*1e-9  <<  " GFLOPS \t(" << tmono.total(CPU_TIMER)  << "s)\n";
+    std::cout << "eigen mono real   " << tmono.best(REAL_TIMER)/rep << "s  \t" << (double(m)*n*p*rep*2/tmono.best(REAL_TIMER))*1e-9 <<  " GFLOPS \t(" << tmono.total(REAL_TIMER) << "s)\n";
+    std::cout << "mt speed up x" << tmono.best(CPU_TIMER) / tmt.best(REAL_TIMER)  << " => " << (100.0*tmono.best(CPU_TIMER) / tmt.best(REAL_TIMER))/procs << "%\n";
+  }
+  #endif
+  
+  #ifdef DECOUPLED
+  if((NumTraits<A::Scalar>::IsComplex) && (NumTraits<B::Scalar>::IsComplex))
+  {
+    M ar(m,p); ar.setRandom();
+    M ai(m,p); ai.setRandom();
+    M br(p,n); br.setRandom();
+    M bi(p,n); bi.setRandom();
+    M cr(m,n); cr.setRandom();
+    M ci(m,n); ci.setRandom();
+    
+    BenchTimer t;
+    BENCH(t, tries, rep, matlab_cplx_cplx(ar,ai,br,bi,cr,ci));
+    std::cout << "\"matlab\" cpu    " << t.best(CPU_TIMER)/rep  << "s  \t" << (double(m)*n*p*rep*2/t.best(CPU_TIMER))*1e-9  <<  " GFLOPS \t(" << t.total(CPU_TIMER)  << "s)\n";
+    std::cout << "\"matlab\" real   " << t.best(REAL_TIMER)/rep << "s  \t" << (double(m)*n*p*rep*2/t.best(REAL_TIMER))*1e-9 <<  " GFLOPS \t(" << t.total(REAL_TIMER) << "s)\n";
+  }
+  if((!NumTraits<A::Scalar>::IsComplex) && (NumTraits<B::Scalar>::IsComplex))
+  {
+    M a(m,p);  a.setRandom();
+    M br(p,n); br.setRandom();
+    M bi(p,n); bi.setRandom();
+    M cr(m,n); cr.setRandom();
+    M ci(m,n); ci.setRandom();
+    
+    BenchTimer t;
+    BENCH(t, tries, rep, matlab_real_cplx(a,br,bi,cr,ci));
+    std::cout << "\"matlab\" cpu    " << t.best(CPU_TIMER)/rep  << "s  \t" << (double(m)*n*p*rep*2/t.best(CPU_TIMER))*1e-9  <<  " GFLOPS \t(" << t.total(CPU_TIMER)  << "s)\n";
+    std::cout << "\"matlab\" real   " << t.best(REAL_TIMER)/rep << "s  \t" << (double(m)*n*p*rep*2/t.best(REAL_TIMER))*1e-9 <<  " GFLOPS \t(" << t.total(REAL_TIMER) << "s)\n";
+  }
+  if((NumTraits<A::Scalar>::IsComplex) && (!NumTraits<B::Scalar>::IsComplex))
+  {
+    M ar(m,p); ar.setRandom();
+    M ai(m,p); ai.setRandom();
+    M b(p,n);  b.setRandom();
+    M cr(m,n); cr.setRandom();
+    M ci(m,n); ci.setRandom();
+    
+    BenchTimer t;
+    BENCH(t, tries, rep, matlab_cplx_real(ar,ai,b,cr,ci));
+    std::cout << "\"matlab\" cpu    " << t.best(CPU_TIMER)/rep  << "s  \t" << (double(m)*n*p*rep*2/t.best(CPU_TIMER))*1e-9  <<  " GFLOPS \t(" << t.total(CPU_TIMER)  << "s)\n";
+    std::cout << "\"matlab\" real   " << t.best(REAL_TIMER)/rep << "s  \t" << (double(m)*n*p*rep*2/t.best(REAL_TIMER))*1e-9 <<  " GFLOPS \t(" << t.total(REAL_TIMER) << "s)\n";
+  }
+  #endif
+
+  return 0;
+}
+
diff --git a/resources/3rdparty/eigen/bench/bench_multi_compilers.sh b/resources/3rdParty/eigen/bench/bench_multi_compilers.sh
similarity index 100%
rename from resources/3rdparty/eigen/bench/bench_multi_compilers.sh
rename to resources/3rdParty/eigen/bench/bench_multi_compilers.sh
diff --git a/resources/3rdparty/eigen/bench/bench_norm.cpp b/resources/3rdParty/eigen/bench/bench_norm.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/bench_norm.cpp
rename to resources/3rdParty/eigen/bench/bench_norm.cpp
diff --git a/resources/3rdparty/eigen/bench/bench_reverse.cpp b/resources/3rdParty/eigen/bench/bench_reverse.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/bench_reverse.cpp
rename to resources/3rdParty/eigen/bench/bench_reverse.cpp
diff --git a/resources/3rdparty/eigen/bench/bench_sum.cpp b/resources/3rdParty/eigen/bench/bench_sum.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/bench_sum.cpp
rename to resources/3rdParty/eigen/bench/bench_sum.cpp
diff --git a/resources/3rdparty/eigen/bench/bench_unrolling b/resources/3rdParty/eigen/bench/bench_unrolling
similarity index 100%
rename from resources/3rdparty/eigen/bench/bench_unrolling
rename to resources/3rdParty/eigen/bench/bench_unrolling
diff --git a/resources/3rdparty/eigen/bench/benchmark.cpp b/resources/3rdParty/eigen/bench/benchmark.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/benchmark.cpp
rename to resources/3rdParty/eigen/bench/benchmark.cpp
diff --git a/resources/3rdparty/eigen/bench/benchmarkSlice.cpp b/resources/3rdParty/eigen/bench/benchmarkSlice.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/benchmarkSlice.cpp
rename to resources/3rdParty/eigen/bench/benchmarkSlice.cpp
diff --git a/resources/3rdparty/eigen/bench/benchmarkX.cpp b/resources/3rdParty/eigen/bench/benchmarkX.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/benchmarkX.cpp
rename to resources/3rdParty/eigen/bench/benchmarkX.cpp
diff --git a/resources/3rdparty/eigen/bench/benchmarkXcwise.cpp b/resources/3rdParty/eigen/bench/benchmarkXcwise.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/benchmarkXcwise.cpp
rename to resources/3rdParty/eigen/bench/benchmarkXcwise.cpp
diff --git a/resources/3rdparty/eigen/bench/benchmark_suite b/resources/3rdParty/eigen/bench/benchmark_suite
similarity index 100%
rename from resources/3rdparty/eigen/bench/benchmark_suite
rename to resources/3rdParty/eigen/bench/benchmark_suite
diff --git a/resources/3rdparty/eigen/bench/btl/CMakeLists.txt b/resources/3rdParty/eigen/bench/btl/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/CMakeLists.txt
rename to resources/3rdParty/eigen/bench/btl/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/bench/btl/COPYING b/resources/3rdParty/eigen/bench/btl/COPYING
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/COPYING
rename to resources/3rdParty/eigen/bench/btl/COPYING
diff --git a/resources/3rdparty/eigen/bench/btl/README b/resources/3rdParty/eigen/bench/btl/README
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/README
rename to resources/3rdParty/eigen/bench/btl/README
diff --git a/resources/3rdparty/eigen/bench/btl/actions/action_aat_product.hh b/resources/3rdParty/eigen/bench/btl/actions/action_aat_product.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/actions/action_aat_product.hh
rename to resources/3rdParty/eigen/bench/btl/actions/action_aat_product.hh
diff --git a/resources/3rdparty/eigen/bench/btl/actions/action_ata_product.hh b/resources/3rdParty/eigen/bench/btl/actions/action_ata_product.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/actions/action_ata_product.hh
rename to resources/3rdParty/eigen/bench/btl/actions/action_ata_product.hh
diff --git a/resources/3rdparty/eigen/bench/btl/actions/action_atv_product.hh b/resources/3rdParty/eigen/bench/btl/actions/action_atv_product.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/actions/action_atv_product.hh
rename to resources/3rdParty/eigen/bench/btl/actions/action_atv_product.hh
diff --git a/resources/3rdparty/eigen/bench/btl/actions/action_axpby.hh b/resources/3rdParty/eigen/bench/btl/actions/action_axpby.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/actions/action_axpby.hh
rename to resources/3rdParty/eigen/bench/btl/actions/action_axpby.hh
diff --git a/resources/3rdparty/eigen/bench/btl/actions/action_axpy.hh b/resources/3rdParty/eigen/bench/btl/actions/action_axpy.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/actions/action_axpy.hh
rename to resources/3rdParty/eigen/bench/btl/actions/action_axpy.hh
diff --git a/resources/3rdparty/eigen/bench/btl/actions/action_cholesky.hh b/resources/3rdParty/eigen/bench/btl/actions/action_cholesky.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/actions/action_cholesky.hh
rename to resources/3rdParty/eigen/bench/btl/actions/action_cholesky.hh
diff --git a/resources/3rdparty/eigen/bench/btl/actions/action_ger.hh b/resources/3rdParty/eigen/bench/btl/actions/action_ger.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/actions/action_ger.hh
rename to resources/3rdParty/eigen/bench/btl/actions/action_ger.hh
diff --git a/resources/3rdparty/eigen/bench/btl/actions/action_hessenberg.hh b/resources/3rdParty/eigen/bench/btl/actions/action_hessenberg.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/actions/action_hessenberg.hh
rename to resources/3rdParty/eigen/bench/btl/actions/action_hessenberg.hh
diff --git a/resources/3rdparty/eigen/bench/btl/actions/action_lu_decomp.hh b/resources/3rdParty/eigen/bench/btl/actions/action_lu_decomp.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/actions/action_lu_decomp.hh
rename to resources/3rdParty/eigen/bench/btl/actions/action_lu_decomp.hh
diff --git a/resources/3rdparty/eigen/bench/btl/actions/action_lu_solve.hh b/resources/3rdParty/eigen/bench/btl/actions/action_lu_solve.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/actions/action_lu_solve.hh
rename to resources/3rdParty/eigen/bench/btl/actions/action_lu_solve.hh
diff --git a/resources/3rdparty/eigen/bench/btl/actions/action_matrix_matrix_product.hh b/resources/3rdParty/eigen/bench/btl/actions/action_matrix_matrix_product.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/actions/action_matrix_matrix_product.hh
rename to resources/3rdParty/eigen/bench/btl/actions/action_matrix_matrix_product.hh
diff --git a/resources/3rdparty/eigen/bench/btl/actions/action_matrix_matrix_product_bis.hh b/resources/3rdParty/eigen/bench/btl/actions/action_matrix_matrix_product_bis.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/actions/action_matrix_matrix_product_bis.hh
rename to resources/3rdParty/eigen/bench/btl/actions/action_matrix_matrix_product_bis.hh
diff --git a/resources/3rdparty/eigen/bench/btl/actions/action_matrix_vector_product.hh b/resources/3rdParty/eigen/bench/btl/actions/action_matrix_vector_product.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/actions/action_matrix_vector_product.hh
rename to resources/3rdParty/eigen/bench/btl/actions/action_matrix_vector_product.hh
diff --git a/resources/3rdparty/eigen/bench/btl/actions/action_partial_lu.hh b/resources/3rdParty/eigen/bench/btl/actions/action_partial_lu.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/actions/action_partial_lu.hh
rename to resources/3rdParty/eigen/bench/btl/actions/action_partial_lu.hh
diff --git a/resources/3rdparty/eigen/bench/btl/actions/action_rot.hh b/resources/3rdParty/eigen/bench/btl/actions/action_rot.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/actions/action_rot.hh
rename to resources/3rdParty/eigen/bench/btl/actions/action_rot.hh
diff --git a/resources/3rdparty/eigen/bench/btl/actions/action_symv.hh b/resources/3rdParty/eigen/bench/btl/actions/action_symv.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/actions/action_symv.hh
rename to resources/3rdParty/eigen/bench/btl/actions/action_symv.hh
diff --git a/resources/3rdparty/eigen/bench/btl/actions/action_syr2.hh b/resources/3rdParty/eigen/bench/btl/actions/action_syr2.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/actions/action_syr2.hh
rename to resources/3rdParty/eigen/bench/btl/actions/action_syr2.hh
diff --git a/resources/3rdparty/eigen/bench/btl/actions/action_trisolve.hh b/resources/3rdParty/eigen/bench/btl/actions/action_trisolve.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/actions/action_trisolve.hh
rename to resources/3rdParty/eigen/bench/btl/actions/action_trisolve.hh
diff --git a/resources/3rdparty/eigen/bench/btl/actions/action_trisolve_matrix.hh b/resources/3rdParty/eigen/bench/btl/actions/action_trisolve_matrix.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/actions/action_trisolve_matrix.hh
rename to resources/3rdParty/eigen/bench/btl/actions/action_trisolve_matrix.hh
diff --git a/resources/3rdparty/eigen/bench/btl/actions/action_trmm.hh b/resources/3rdParty/eigen/bench/btl/actions/action_trmm.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/actions/action_trmm.hh
rename to resources/3rdParty/eigen/bench/btl/actions/action_trmm.hh
diff --git a/resources/3rdparty/eigen/bench/btl/actions/basic_actions.hh b/resources/3rdParty/eigen/bench/btl/actions/basic_actions.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/actions/basic_actions.hh
rename to resources/3rdParty/eigen/bench/btl/actions/basic_actions.hh
diff --git a/resources/3rdparty/eigen/bench/btl/cmake/FindACML.cmake b/resources/3rdParty/eigen/bench/btl/cmake/FindACML.cmake
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/cmake/FindACML.cmake
rename to resources/3rdParty/eigen/bench/btl/cmake/FindACML.cmake
diff --git a/resources/3rdparty/eigen/bench/btl/cmake/FindATLAS.cmake b/resources/3rdParty/eigen/bench/btl/cmake/FindATLAS.cmake
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/cmake/FindATLAS.cmake
rename to resources/3rdParty/eigen/bench/btl/cmake/FindATLAS.cmake
diff --git a/resources/3rdparty/eigen/bench/btl/cmake/FindBlitz.cmake b/resources/3rdParty/eigen/bench/btl/cmake/FindBlitz.cmake
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/cmake/FindBlitz.cmake
rename to resources/3rdParty/eigen/bench/btl/cmake/FindBlitz.cmake
diff --git a/resources/3rdparty/eigen/bench/btl/cmake/FindCBLAS.cmake b/resources/3rdParty/eigen/bench/btl/cmake/FindCBLAS.cmake
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/cmake/FindCBLAS.cmake
rename to resources/3rdParty/eigen/bench/btl/cmake/FindCBLAS.cmake
diff --git a/resources/3rdparty/eigen/bench/btl/cmake/FindGMM.cmake b/resources/3rdParty/eigen/bench/btl/cmake/FindGMM.cmake
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/cmake/FindGMM.cmake
rename to resources/3rdParty/eigen/bench/btl/cmake/FindGMM.cmake
diff --git a/resources/3rdparty/eigen/bench/btl/cmake/FindGOTO.cmake b/resources/3rdParty/eigen/bench/btl/cmake/FindGOTO.cmake
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/cmake/FindGOTO.cmake
rename to resources/3rdParty/eigen/bench/btl/cmake/FindGOTO.cmake
diff --git a/resources/3rdparty/eigen/bench/btl/cmake/FindGOTO2.cmake b/resources/3rdParty/eigen/bench/btl/cmake/FindGOTO2.cmake
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/cmake/FindGOTO2.cmake
rename to resources/3rdParty/eigen/bench/btl/cmake/FindGOTO2.cmake
diff --git a/resources/3rdparty/eigen/bench/btl/cmake/FindMKL.cmake b/resources/3rdParty/eigen/bench/btl/cmake/FindMKL.cmake
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/cmake/FindMKL.cmake
rename to resources/3rdParty/eigen/bench/btl/cmake/FindMKL.cmake
diff --git a/resources/3rdparty/eigen/bench/btl/cmake/FindMTL4.cmake b/resources/3rdParty/eigen/bench/btl/cmake/FindMTL4.cmake
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/cmake/FindMTL4.cmake
rename to resources/3rdParty/eigen/bench/btl/cmake/FindMTL4.cmake
diff --git a/resources/3rdparty/eigen/bench/btl/cmake/FindPackageHandleStandardArgs.cmake b/resources/3rdParty/eigen/bench/btl/cmake/FindPackageHandleStandardArgs.cmake
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/cmake/FindPackageHandleStandardArgs.cmake
rename to resources/3rdParty/eigen/bench/btl/cmake/FindPackageHandleStandardArgs.cmake
diff --git a/resources/3rdparty/eigen/bench/btl/cmake/FindTvmet.cmake b/resources/3rdParty/eigen/bench/btl/cmake/FindTvmet.cmake
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/cmake/FindTvmet.cmake
rename to resources/3rdParty/eigen/bench/btl/cmake/FindTvmet.cmake
diff --git a/resources/3rdparty/eigen/bench/btl/cmake/MacroOptionalAddSubdirectory.cmake b/resources/3rdParty/eigen/bench/btl/cmake/MacroOptionalAddSubdirectory.cmake
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/cmake/MacroOptionalAddSubdirectory.cmake
rename to resources/3rdParty/eigen/bench/btl/cmake/MacroOptionalAddSubdirectory.cmake
diff --git a/resources/3rdparty/eigen/bench/btl/data/CMakeLists.txt b/resources/3rdParty/eigen/bench/btl/data/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/data/CMakeLists.txt
rename to resources/3rdParty/eigen/bench/btl/data/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/bench/btl/data/action_settings.txt b/resources/3rdParty/eigen/bench/btl/data/action_settings.txt
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/data/action_settings.txt
rename to resources/3rdParty/eigen/bench/btl/data/action_settings.txt
diff --git a/resources/3rdparty/eigen/bench/btl/data/gnuplot_common_settings.hh b/resources/3rdParty/eigen/bench/btl/data/gnuplot_common_settings.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/data/gnuplot_common_settings.hh
rename to resources/3rdParty/eigen/bench/btl/data/gnuplot_common_settings.hh
diff --git a/resources/3rdparty/eigen/bench/btl/data/go_mean b/resources/3rdParty/eigen/bench/btl/data/go_mean
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/data/go_mean
rename to resources/3rdParty/eigen/bench/btl/data/go_mean
diff --git a/resources/3rdparty/eigen/bench/btl/data/mean.cxx b/resources/3rdParty/eigen/bench/btl/data/mean.cxx
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/data/mean.cxx
rename to resources/3rdParty/eigen/bench/btl/data/mean.cxx
diff --git a/resources/3rdparty/eigen/bench/btl/data/mk_gnuplot_script.sh b/resources/3rdParty/eigen/bench/btl/data/mk_gnuplot_script.sh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/data/mk_gnuplot_script.sh
rename to resources/3rdParty/eigen/bench/btl/data/mk_gnuplot_script.sh
diff --git a/resources/3rdparty/eigen/bench/btl/data/mk_mean_script.sh b/resources/3rdParty/eigen/bench/btl/data/mk_mean_script.sh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/data/mk_mean_script.sh
rename to resources/3rdParty/eigen/bench/btl/data/mk_mean_script.sh
diff --git a/resources/3rdparty/eigen/bench/btl/data/mk_new_gnuplot.sh b/resources/3rdParty/eigen/bench/btl/data/mk_new_gnuplot.sh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/data/mk_new_gnuplot.sh
rename to resources/3rdParty/eigen/bench/btl/data/mk_new_gnuplot.sh
diff --git a/resources/3rdparty/eigen/bench/btl/data/perlib_plot_settings.txt b/resources/3rdParty/eigen/bench/btl/data/perlib_plot_settings.txt
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/data/perlib_plot_settings.txt
rename to resources/3rdParty/eigen/bench/btl/data/perlib_plot_settings.txt
diff --git a/resources/3rdparty/eigen/bench/btl/data/regularize.cxx b/resources/3rdParty/eigen/bench/btl/data/regularize.cxx
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/data/regularize.cxx
rename to resources/3rdParty/eigen/bench/btl/data/regularize.cxx
diff --git a/resources/3rdparty/eigen/bench/btl/data/smooth.cxx b/resources/3rdParty/eigen/bench/btl/data/smooth.cxx
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/data/smooth.cxx
rename to resources/3rdParty/eigen/bench/btl/data/smooth.cxx
diff --git a/resources/3rdparty/eigen/bench/btl/data/smooth_all.sh b/resources/3rdParty/eigen/bench/btl/data/smooth_all.sh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/data/smooth_all.sh
rename to resources/3rdParty/eigen/bench/btl/data/smooth_all.sh
diff --git a/resources/3rdparty/eigen/bench/btl/generic_bench/bench.hh b/resources/3rdParty/eigen/bench/btl/generic_bench/bench.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/generic_bench/bench.hh
rename to resources/3rdParty/eigen/bench/btl/generic_bench/bench.hh
diff --git a/resources/3rdparty/eigen/bench/btl/generic_bench/bench_parameter.hh b/resources/3rdParty/eigen/bench/btl/generic_bench/bench_parameter.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/generic_bench/bench_parameter.hh
rename to resources/3rdParty/eigen/bench/btl/generic_bench/bench_parameter.hh
diff --git a/resources/3rdparty/eigen/bench/btl/generic_bench/btl.hh b/resources/3rdParty/eigen/bench/btl/generic_bench/btl.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/generic_bench/btl.hh
rename to resources/3rdParty/eigen/bench/btl/generic_bench/btl.hh
diff --git a/resources/3rdparty/eigen/bench/btl/generic_bench/init/init_function.hh b/resources/3rdParty/eigen/bench/btl/generic_bench/init/init_function.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/generic_bench/init/init_function.hh
rename to resources/3rdParty/eigen/bench/btl/generic_bench/init/init_function.hh
diff --git a/resources/3rdparty/eigen/bench/btl/generic_bench/init/init_matrix.hh b/resources/3rdParty/eigen/bench/btl/generic_bench/init/init_matrix.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/generic_bench/init/init_matrix.hh
rename to resources/3rdParty/eigen/bench/btl/generic_bench/init/init_matrix.hh
diff --git a/resources/3rdparty/eigen/bench/btl/generic_bench/init/init_vector.hh b/resources/3rdParty/eigen/bench/btl/generic_bench/init/init_vector.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/generic_bench/init/init_vector.hh
rename to resources/3rdParty/eigen/bench/btl/generic_bench/init/init_vector.hh
diff --git a/resources/3rdparty/eigen/bench/btl/generic_bench/static/bench_static.hh b/resources/3rdParty/eigen/bench/btl/generic_bench/static/bench_static.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/generic_bench/static/bench_static.hh
rename to resources/3rdParty/eigen/bench/btl/generic_bench/static/bench_static.hh
diff --git a/resources/3rdparty/eigen/bench/btl/generic_bench/static/intel_bench_fixed_size.hh b/resources/3rdParty/eigen/bench/btl/generic_bench/static/intel_bench_fixed_size.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/generic_bench/static/intel_bench_fixed_size.hh
rename to resources/3rdParty/eigen/bench/btl/generic_bench/static/intel_bench_fixed_size.hh
diff --git a/resources/3rdparty/eigen/bench/btl/generic_bench/static/static_size_generator.hh b/resources/3rdParty/eigen/bench/btl/generic_bench/static/static_size_generator.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/generic_bench/static/static_size_generator.hh
rename to resources/3rdParty/eigen/bench/btl/generic_bench/static/static_size_generator.hh
diff --git a/resources/3rdparty/eigen/bench/btl/generic_bench/timers/STL_perf_analyzer.hh b/resources/3rdParty/eigen/bench/btl/generic_bench/timers/STL_perf_analyzer.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/generic_bench/timers/STL_perf_analyzer.hh
rename to resources/3rdParty/eigen/bench/btl/generic_bench/timers/STL_perf_analyzer.hh
diff --git a/resources/3rdparty/eigen/bench/btl/generic_bench/timers/STL_timer.hh b/resources/3rdParty/eigen/bench/btl/generic_bench/timers/STL_timer.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/generic_bench/timers/STL_timer.hh
rename to resources/3rdParty/eigen/bench/btl/generic_bench/timers/STL_timer.hh
diff --git a/resources/3rdparty/eigen/bench/btl/generic_bench/timers/mixed_perf_analyzer.hh b/resources/3rdParty/eigen/bench/btl/generic_bench/timers/mixed_perf_analyzer.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/generic_bench/timers/mixed_perf_analyzer.hh
rename to resources/3rdParty/eigen/bench/btl/generic_bench/timers/mixed_perf_analyzer.hh
diff --git a/resources/3rdparty/eigen/bench/btl/generic_bench/timers/portable_perf_analyzer.hh b/resources/3rdParty/eigen/bench/btl/generic_bench/timers/portable_perf_analyzer.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/generic_bench/timers/portable_perf_analyzer.hh
rename to resources/3rdParty/eigen/bench/btl/generic_bench/timers/portable_perf_analyzer.hh
diff --git a/resources/3rdparty/eigen/bench/btl/generic_bench/timers/portable_perf_analyzer_old.hh b/resources/3rdParty/eigen/bench/btl/generic_bench/timers/portable_perf_analyzer_old.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/generic_bench/timers/portable_perf_analyzer_old.hh
rename to resources/3rdParty/eigen/bench/btl/generic_bench/timers/portable_perf_analyzer_old.hh
diff --git a/resources/3rdparty/eigen/bench/btl/generic_bench/timers/portable_timer.hh b/resources/3rdParty/eigen/bench/btl/generic_bench/timers/portable_timer.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/generic_bench/timers/portable_timer.hh
rename to resources/3rdParty/eigen/bench/btl/generic_bench/timers/portable_timer.hh
diff --git a/resources/3rdparty/eigen/bench/btl/generic_bench/timers/x86_perf_analyzer.hh b/resources/3rdParty/eigen/bench/btl/generic_bench/timers/x86_perf_analyzer.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/generic_bench/timers/x86_perf_analyzer.hh
rename to resources/3rdParty/eigen/bench/btl/generic_bench/timers/x86_perf_analyzer.hh
diff --git a/resources/3rdparty/eigen/bench/btl/generic_bench/timers/x86_timer.hh b/resources/3rdParty/eigen/bench/btl/generic_bench/timers/x86_timer.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/generic_bench/timers/x86_timer.hh
rename to resources/3rdParty/eigen/bench/btl/generic_bench/timers/x86_timer.hh
diff --git a/resources/3rdparty/eigen/bench/btl/generic_bench/utils/size_lin_log.hh b/resources/3rdParty/eigen/bench/btl/generic_bench/utils/size_lin_log.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/generic_bench/utils/size_lin_log.hh
rename to resources/3rdParty/eigen/bench/btl/generic_bench/utils/size_lin_log.hh
diff --git a/resources/3rdparty/eigen/bench/btl/generic_bench/utils/size_log.hh b/resources/3rdParty/eigen/bench/btl/generic_bench/utils/size_log.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/generic_bench/utils/size_log.hh
rename to resources/3rdParty/eigen/bench/btl/generic_bench/utils/size_log.hh
diff --git a/resources/3rdparty/eigen/bench/btl/generic_bench/utils/utilities.h b/resources/3rdParty/eigen/bench/btl/generic_bench/utils/utilities.h
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/generic_bench/utils/utilities.h
rename to resources/3rdParty/eigen/bench/btl/generic_bench/utils/utilities.h
diff --git a/resources/3rdparty/eigen/bench/btl/generic_bench/utils/xy_file.hh b/resources/3rdParty/eigen/bench/btl/generic_bench/utils/xy_file.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/generic_bench/utils/xy_file.hh
rename to resources/3rdParty/eigen/bench/btl/generic_bench/utils/xy_file.hh
diff --git a/resources/3rdparty/eigen/bench/btl/libs/BLAS/CMakeLists.txt b/resources/3rdParty/eigen/bench/btl/libs/BLAS/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/BLAS/CMakeLists.txt
rename to resources/3rdParty/eigen/bench/btl/libs/BLAS/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/bench/btl/libs/BLAS/blas.h b/resources/3rdParty/eigen/bench/btl/libs/BLAS/blas.h
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/BLAS/blas.h
rename to resources/3rdParty/eigen/bench/btl/libs/BLAS/blas.h
diff --git a/resources/3rdparty/eigen/bench/btl/libs/BLAS/blas_interface.hh b/resources/3rdParty/eigen/bench/btl/libs/BLAS/blas_interface.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/BLAS/blas_interface.hh
rename to resources/3rdParty/eigen/bench/btl/libs/BLAS/blas_interface.hh
diff --git a/resources/3rdparty/eigen/bench/btl/libs/BLAS/blas_interface_impl.hh b/resources/3rdParty/eigen/bench/btl/libs/BLAS/blas_interface_impl.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/BLAS/blas_interface_impl.hh
rename to resources/3rdParty/eigen/bench/btl/libs/BLAS/blas_interface_impl.hh
diff --git a/resources/3rdparty/eigen/bench/btl/libs/BLAS/c_interface_base.h b/resources/3rdParty/eigen/bench/btl/libs/BLAS/c_interface_base.h
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/BLAS/c_interface_base.h
rename to resources/3rdParty/eigen/bench/btl/libs/BLAS/c_interface_base.h
diff --git a/resources/3rdparty/eigen/bench/btl/libs/BLAS/main.cpp b/resources/3rdParty/eigen/bench/btl/libs/BLAS/main.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/BLAS/main.cpp
rename to resources/3rdParty/eigen/bench/btl/libs/BLAS/main.cpp
diff --git a/resources/3rdparty/eigen/bench/btl/libs/STL/CMakeLists.txt b/resources/3rdParty/eigen/bench/btl/libs/STL/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/STL/CMakeLists.txt
rename to resources/3rdParty/eigen/bench/btl/libs/STL/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/bench/btl/libs/STL/STL_interface.hh b/resources/3rdParty/eigen/bench/btl/libs/STL/STL_interface.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/STL/STL_interface.hh
rename to resources/3rdParty/eigen/bench/btl/libs/STL/STL_interface.hh
diff --git a/resources/3rdparty/eigen/bench/btl/libs/STL/main.cpp b/resources/3rdParty/eigen/bench/btl/libs/STL/main.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/STL/main.cpp
rename to resources/3rdParty/eigen/bench/btl/libs/STL/main.cpp
diff --git a/resources/3rdparty/eigen/bench/btl/libs/blitz/CMakeLists.txt b/resources/3rdParty/eigen/bench/btl/libs/blitz/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/blitz/CMakeLists.txt
rename to resources/3rdParty/eigen/bench/btl/libs/blitz/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/bench/btl/libs/blitz/blitz_LU_solve_interface.hh b/resources/3rdParty/eigen/bench/btl/libs/blitz/blitz_LU_solve_interface.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/blitz/blitz_LU_solve_interface.hh
rename to resources/3rdParty/eigen/bench/btl/libs/blitz/blitz_LU_solve_interface.hh
diff --git a/resources/3rdparty/eigen/bench/btl/libs/blitz/blitz_interface.hh b/resources/3rdParty/eigen/bench/btl/libs/blitz/blitz_interface.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/blitz/blitz_interface.hh
rename to resources/3rdParty/eigen/bench/btl/libs/blitz/blitz_interface.hh
diff --git a/resources/3rdparty/eigen/bench/btl/libs/blitz/btl_blitz.cpp b/resources/3rdParty/eigen/bench/btl/libs/blitz/btl_blitz.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/blitz/btl_blitz.cpp
rename to resources/3rdParty/eigen/bench/btl/libs/blitz/btl_blitz.cpp
diff --git a/resources/3rdparty/eigen/bench/btl/libs/blitz/btl_tiny_blitz.cpp b/resources/3rdParty/eigen/bench/btl/libs/blitz/btl_tiny_blitz.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/blitz/btl_tiny_blitz.cpp
rename to resources/3rdParty/eigen/bench/btl/libs/blitz/btl_tiny_blitz.cpp
diff --git a/resources/3rdparty/eigen/bench/btl/libs/blitz/tiny_blitz_interface.hh b/resources/3rdParty/eigen/bench/btl/libs/blitz/tiny_blitz_interface.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/blitz/tiny_blitz_interface.hh
rename to resources/3rdParty/eigen/bench/btl/libs/blitz/tiny_blitz_interface.hh
diff --git a/resources/3rdparty/eigen/bench/btl/libs/eigen2/CMakeLists.txt b/resources/3rdParty/eigen/bench/btl/libs/eigen2/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/eigen2/CMakeLists.txt
rename to resources/3rdParty/eigen/bench/btl/libs/eigen2/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/bench/btl/libs/eigen2/btl_tiny_eigen2.cpp b/resources/3rdParty/eigen/bench/btl/libs/eigen2/btl_tiny_eigen2.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/eigen2/btl_tiny_eigen2.cpp
rename to resources/3rdParty/eigen/bench/btl/libs/eigen2/btl_tiny_eigen2.cpp
diff --git a/resources/3rdparty/eigen/bench/btl/libs/eigen2/eigen2_interface.hh b/resources/3rdParty/eigen/bench/btl/libs/eigen2/eigen2_interface.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/eigen2/eigen2_interface.hh
rename to resources/3rdParty/eigen/bench/btl/libs/eigen2/eigen2_interface.hh
diff --git a/resources/3rdparty/eigen/bench/btl/libs/eigen2/main_adv.cpp b/resources/3rdParty/eigen/bench/btl/libs/eigen2/main_adv.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/eigen2/main_adv.cpp
rename to resources/3rdParty/eigen/bench/btl/libs/eigen2/main_adv.cpp
diff --git a/resources/3rdparty/eigen/bench/btl/libs/eigen2/main_linear.cpp b/resources/3rdParty/eigen/bench/btl/libs/eigen2/main_linear.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/eigen2/main_linear.cpp
rename to resources/3rdParty/eigen/bench/btl/libs/eigen2/main_linear.cpp
diff --git a/resources/3rdparty/eigen/bench/btl/libs/eigen2/main_matmat.cpp b/resources/3rdParty/eigen/bench/btl/libs/eigen2/main_matmat.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/eigen2/main_matmat.cpp
rename to resources/3rdParty/eigen/bench/btl/libs/eigen2/main_matmat.cpp
diff --git a/resources/3rdparty/eigen/bench/btl/libs/eigen2/main_vecmat.cpp b/resources/3rdParty/eigen/bench/btl/libs/eigen2/main_vecmat.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/eigen2/main_vecmat.cpp
rename to resources/3rdParty/eigen/bench/btl/libs/eigen2/main_vecmat.cpp
diff --git a/resources/3rdparty/eigen/bench/btl/libs/eigen3/CMakeLists.txt b/resources/3rdParty/eigen/bench/btl/libs/eigen3/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/eigen3/CMakeLists.txt
rename to resources/3rdParty/eigen/bench/btl/libs/eigen3/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/bench/btl/libs/eigen3/btl_tiny_eigen3.cpp b/resources/3rdParty/eigen/bench/btl/libs/eigen3/btl_tiny_eigen3.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/eigen3/btl_tiny_eigen3.cpp
rename to resources/3rdParty/eigen/bench/btl/libs/eigen3/btl_tiny_eigen3.cpp
diff --git a/resources/3rdparty/eigen/bench/btl/libs/eigen3/eigen3_interface.hh b/resources/3rdParty/eigen/bench/btl/libs/eigen3/eigen3_interface.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/eigen3/eigen3_interface.hh
rename to resources/3rdParty/eigen/bench/btl/libs/eigen3/eigen3_interface.hh
diff --git a/resources/3rdparty/eigen/bench/btl/libs/eigen3/main_adv.cpp b/resources/3rdParty/eigen/bench/btl/libs/eigen3/main_adv.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/eigen3/main_adv.cpp
rename to resources/3rdParty/eigen/bench/btl/libs/eigen3/main_adv.cpp
diff --git a/resources/3rdparty/eigen/bench/btl/libs/eigen3/main_linear.cpp b/resources/3rdParty/eigen/bench/btl/libs/eigen3/main_linear.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/eigen3/main_linear.cpp
rename to resources/3rdParty/eigen/bench/btl/libs/eigen3/main_linear.cpp
diff --git a/resources/3rdparty/eigen/bench/btl/libs/eigen3/main_matmat.cpp b/resources/3rdParty/eigen/bench/btl/libs/eigen3/main_matmat.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/eigen3/main_matmat.cpp
rename to resources/3rdParty/eigen/bench/btl/libs/eigen3/main_matmat.cpp
diff --git a/resources/3rdparty/eigen/bench/btl/libs/eigen3/main_vecmat.cpp b/resources/3rdParty/eigen/bench/btl/libs/eigen3/main_vecmat.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/eigen3/main_vecmat.cpp
rename to resources/3rdParty/eigen/bench/btl/libs/eigen3/main_vecmat.cpp
diff --git a/resources/3rdparty/eigen/bench/btl/libs/gmm/CMakeLists.txt b/resources/3rdParty/eigen/bench/btl/libs/gmm/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/gmm/CMakeLists.txt
rename to resources/3rdParty/eigen/bench/btl/libs/gmm/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/bench/btl/libs/gmm/gmm_LU_solve_interface.hh b/resources/3rdParty/eigen/bench/btl/libs/gmm/gmm_LU_solve_interface.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/gmm/gmm_LU_solve_interface.hh
rename to resources/3rdParty/eigen/bench/btl/libs/gmm/gmm_LU_solve_interface.hh
diff --git a/resources/3rdparty/eigen/bench/btl/libs/gmm/gmm_interface.hh b/resources/3rdParty/eigen/bench/btl/libs/gmm/gmm_interface.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/gmm/gmm_interface.hh
rename to resources/3rdParty/eigen/bench/btl/libs/gmm/gmm_interface.hh
diff --git a/resources/3rdparty/eigen/bench/btl/libs/gmm/main.cpp b/resources/3rdParty/eigen/bench/btl/libs/gmm/main.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/gmm/main.cpp
rename to resources/3rdParty/eigen/bench/btl/libs/gmm/main.cpp
diff --git a/resources/3rdparty/eigen/bench/btl/libs/mtl4/.kdbgrc.main b/resources/3rdParty/eigen/bench/btl/libs/mtl4/.kdbgrc.main
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/mtl4/.kdbgrc.main
rename to resources/3rdParty/eigen/bench/btl/libs/mtl4/.kdbgrc.main
diff --git a/resources/3rdparty/eigen/bench/btl/libs/mtl4/CMakeLists.txt b/resources/3rdParty/eigen/bench/btl/libs/mtl4/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/mtl4/CMakeLists.txt
rename to resources/3rdParty/eigen/bench/btl/libs/mtl4/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/bench/btl/libs/mtl4/main.cpp b/resources/3rdParty/eigen/bench/btl/libs/mtl4/main.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/mtl4/main.cpp
rename to resources/3rdParty/eigen/bench/btl/libs/mtl4/main.cpp
diff --git a/resources/3rdparty/eigen/bench/btl/libs/mtl4/mtl4_LU_solve_interface.hh b/resources/3rdParty/eigen/bench/btl/libs/mtl4/mtl4_LU_solve_interface.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/mtl4/mtl4_LU_solve_interface.hh
rename to resources/3rdParty/eigen/bench/btl/libs/mtl4/mtl4_LU_solve_interface.hh
diff --git a/resources/3rdparty/eigen/bench/btl/libs/mtl4/mtl4_interface.hh b/resources/3rdParty/eigen/bench/btl/libs/mtl4/mtl4_interface.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/mtl4/mtl4_interface.hh
rename to resources/3rdParty/eigen/bench/btl/libs/mtl4/mtl4_interface.hh
diff --git a/resources/3rdparty/eigen/bench/btl/libs/tvmet/CMakeLists.txt b/resources/3rdParty/eigen/bench/btl/libs/tvmet/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/tvmet/CMakeLists.txt
rename to resources/3rdParty/eigen/bench/btl/libs/tvmet/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/bench/btl/libs/tvmet/main.cpp b/resources/3rdParty/eigen/bench/btl/libs/tvmet/main.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/tvmet/main.cpp
rename to resources/3rdParty/eigen/bench/btl/libs/tvmet/main.cpp
diff --git a/resources/3rdparty/eigen/bench/btl/libs/tvmet/tvmet_interface.hh b/resources/3rdParty/eigen/bench/btl/libs/tvmet/tvmet_interface.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/tvmet/tvmet_interface.hh
rename to resources/3rdParty/eigen/bench/btl/libs/tvmet/tvmet_interface.hh
diff --git a/resources/3rdparty/eigen/bench/btl/libs/ublas/CMakeLists.txt b/resources/3rdParty/eigen/bench/btl/libs/ublas/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/ublas/CMakeLists.txt
rename to resources/3rdParty/eigen/bench/btl/libs/ublas/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/bench/btl/libs/ublas/main.cpp b/resources/3rdParty/eigen/bench/btl/libs/ublas/main.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/ublas/main.cpp
rename to resources/3rdParty/eigen/bench/btl/libs/ublas/main.cpp
diff --git a/resources/3rdparty/eigen/bench/btl/libs/ublas/ublas_interface.hh b/resources/3rdParty/eigen/bench/btl/libs/ublas/ublas_interface.hh
similarity index 100%
rename from resources/3rdparty/eigen/bench/btl/libs/ublas/ublas_interface.hh
rename to resources/3rdParty/eigen/bench/btl/libs/ublas/ublas_interface.hh
diff --git a/resources/3rdparty/eigen/bench/check_cache_queries.cpp b/resources/3rdParty/eigen/bench/check_cache_queries.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/check_cache_queries.cpp
rename to resources/3rdParty/eigen/bench/check_cache_queries.cpp
diff --git a/resources/3rdparty/eigen/bench/eig33.cpp b/resources/3rdParty/eigen/bench/eig33.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/eig33.cpp
rename to resources/3rdParty/eigen/bench/eig33.cpp
diff --git a/resources/3rdparty/eigen/bench/geometry.cpp b/resources/3rdParty/eigen/bench/geometry.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/geometry.cpp
rename to resources/3rdParty/eigen/bench/geometry.cpp
diff --git a/resources/3rdparty/eigen/bench/product_threshold.cpp b/resources/3rdParty/eigen/bench/product_threshold.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/product_threshold.cpp
rename to resources/3rdParty/eigen/bench/product_threshold.cpp
diff --git a/resources/3rdparty/eigen/bench/quat_slerp.cpp b/resources/3rdParty/eigen/bench/quat_slerp.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/quat_slerp.cpp
rename to resources/3rdParty/eigen/bench/quat_slerp.cpp
diff --git a/resources/3rdparty/eigen/bench/quatmul.cpp b/resources/3rdParty/eigen/bench/quatmul.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/quatmul.cpp
rename to resources/3rdParty/eigen/bench/quatmul.cpp
diff --git a/resources/3rdparty/eigen/bench/sparse_cholesky.cpp b/resources/3rdParty/eigen/bench/sparse_cholesky.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/sparse_cholesky.cpp
rename to resources/3rdParty/eigen/bench/sparse_cholesky.cpp
diff --git a/resources/3rdparty/eigen/bench/sparse_dense_product.cpp b/resources/3rdParty/eigen/bench/sparse_dense_product.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/sparse_dense_product.cpp
rename to resources/3rdParty/eigen/bench/sparse_dense_product.cpp
diff --git a/resources/3rdparty/eigen/bench/sparse_lu.cpp b/resources/3rdParty/eigen/bench/sparse_lu.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/sparse_lu.cpp
rename to resources/3rdParty/eigen/bench/sparse_lu.cpp
diff --git a/resources/3rdparty/eigen/bench/sparse_product.cpp b/resources/3rdParty/eigen/bench/sparse_product.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/sparse_product.cpp
rename to resources/3rdParty/eigen/bench/sparse_product.cpp
diff --git a/resources/3rdparty/eigen/bench/sparse_randomsetter.cpp b/resources/3rdParty/eigen/bench/sparse_randomsetter.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/sparse_randomsetter.cpp
rename to resources/3rdParty/eigen/bench/sparse_randomsetter.cpp
diff --git a/resources/3rdparty/eigen/bench/sparse_setter.cpp b/resources/3rdParty/eigen/bench/sparse_setter.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/sparse_setter.cpp
rename to resources/3rdParty/eigen/bench/sparse_setter.cpp
diff --git a/resources/3rdparty/eigen/bench/sparse_transpose.cpp b/resources/3rdParty/eigen/bench/sparse_transpose.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/sparse_transpose.cpp
rename to resources/3rdParty/eigen/bench/sparse_transpose.cpp
diff --git a/resources/3rdparty/eigen/bench/sparse_trisolver.cpp b/resources/3rdParty/eigen/bench/sparse_trisolver.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/sparse_trisolver.cpp
rename to resources/3rdParty/eigen/bench/sparse_trisolver.cpp
diff --git a/resources/3rdParty/eigen/bench/spbench/CMakeLists.txt b/resources/3rdParty/eigen/bench/spbench/CMakeLists.txt
new file mode 100644
index 000000000..079912266
--- /dev/null
+++ b/resources/3rdParty/eigen/bench/spbench/CMakeLists.txt
@@ -0,0 +1,65 @@
+
+
+set(BLAS_FOUND TRUE)
+set(LAPACK_FOUND TRUE)
+set(BLAS_LIBRARIES eigen_blas_static)
+set(LAPACK_LIBRARIES eigen_lapack_static)
+
+set(SPARSE_LIBS "")
+
+# find_library(PARDISO_LIBRARIES pardiso412-GNU450-X86-64)
+# if(PARDISO_LIBRARIES)
+#   add_definitions("-DEIGEN_PARDISO_SUPPORT")
+#   set(SPARSE_LIBS ${SPARSE_LIBS} ${PARDISO_LIBRARIES})
+# endif(PARDISO_LIBRARIES)
+
+find_package(Cholmod)
+if(CHOLMOD_FOUND AND BLAS_FOUND AND LAPACK_FOUND)
+  add_definitions("-DEIGEN_CHOLMOD_SUPPORT")
+  include_directories(${CHOLMOD_INCLUDES})
+  set(SPARSE_LIBS ${SPARSE_LIBS} ${CHOLMOD_LIBRARIES} ${BLAS_LIBRARIES} ${LAPACK_LIBRARIES})
+  set(CHOLMOD_ALL_LIBS  ${CHOLMOD_LIBRARIES} ${BLAS_LIBRARIES} ${LAPACK_LIBRARIES})
+endif()
+
+find_package(Umfpack)
+if(UMFPACK_FOUND AND BLAS_FOUND)
+  add_definitions("-DEIGEN_UMFPACK_SUPPORT")
+  include_directories(${UMFPACK_INCLUDES})
+  set(SPARSE_LIBS ${SPARSE_LIBS} ${UMFPACK_LIBRARIES} ${BLAS_LIBRARIES})
+  set(UMFPACK_ALL_LIBS ${UMFPACK_LIBRARIES} ${BLAS_LIBRARIES})
+endif()
+
+find_package(SuperLU)
+if(SUPERLU_FOUND AND BLAS_FOUND)
+  add_definitions("-DEIGEN_SUPERLU_SUPPORT")
+  include_directories(${SUPERLU_INCLUDES})
+  set(SPARSE_LIBS ${SPARSE_LIBS} ${SUPERLU_LIBRARIES} ${BLAS_LIBRARIES})
+  set(SUPERLU_ALL_LIBS ${SUPERLU_LIBRARIES} ${BLAS_LIBRARIES})
+endif()
+
+
+find_package(Pastix)
+find_package(Scotch)
+find_package(Metis)
+if(PASTIX_FOUND AND BLAS_FOUND)
+  add_definitions("-DEIGEN_PASTIX_SUPPORT")
+  include_directories(${PASTIX_INCLUDES})
+  if(SCOTCH_FOUND)
+    include_directories(${SCOTCH_INCLUDES})
+    set(PASTIX_LIBRARIES ${PASTIX_LIBRARIES} ${SCOTCH_LIBRARIES})
+  elseif(METIS_FOUND)
+    include_directories(${METIS_INCLUDES})
+    set(PASTIX_LIBRARIES ${PASTIX_LIBRARIES} ${METIS_LIBRARIES})  
+  endif(SCOTCH_FOUND)
+  set(SPARSE_LIBS ${SPARSE_LIBS} ${PASTIX_LIBRARIES} ${ORDERING_LIBRARIES} ${BLAS_LIBRARIES})
+  set(PASTIX_ALL_LIBS ${PASTIX_LIBRARIES} ${BLAS_LIBRARIES})
+endif(PASTIX_FOUND AND BLAS_FOUND)
+
+find_library(RT_LIBRARY rt)
+if(RT_LIBRARY)
+  set(SPARSE_LIBS ${SPARSE_LIBS} ${RT_LIBRARY})
+endif(RT_LIBRARY)
+
+add_executable(spbenchsolver spbenchsolver.cpp)
+target_link_libraries (spbenchsolver ${SPARSE_LIBS})
+
diff --git a/resources/3rdParty/eigen/bench/spbench/spbenchsolver.cpp b/resources/3rdParty/eigen/bench/spbench/spbenchsolver.cpp
new file mode 100644
index 000000000..830542ff1
--- /dev/null
+++ b/resources/3rdParty/eigen/bench/spbench/spbenchsolver.cpp
@@ -0,0 +1,90 @@
+#include <bench/spbench/spbenchsolver.h>
+
+void bench_printhelp()
+{
+    cout<< " \nbenchsolver : performs a benchmark of all the solvers available in Eigen \n\n";
+    cout<< " MATRIX FOLDER : \n";
+    cout<< " The matrices for the benchmark should be collected in a folder specified with an environment variable EIGEN_MATRIXDIR \n";
+    cout<< " This folder should contain the subfolders real/ and complex/ : \n";
+    cout<< " The matrices are stored using the matrix market coordinate format \n";
+    cout<< " The matrix and associated right-hand side (rhs) files are named respectively \n";
+    cout<< " as MatrixName.mtx and MatrixName_b.mtx. If the rhs does not exist, a random one is generated. \n";
+    cout<< " If a matrix is SPD, the matrix should be named as MatrixName_SPD.mtx \n";
+    cout<< " If a true solution exists, it should be named as MatrixName_x.mtx; \n"     ;
+    cout<< " it will be used to compute the norm of the error relative to the computed solutions\n\n";
+    cout<< " OPTIONS : \n"; 
+    cout<< " -h or --help \n    print this help and return\n\n";
+    cout<< " -d matrixdir \n    Use matrixdir as the matrix folder instead of the one specified in the environment variable EIGEN_MATRIXDIR\n\n"; 
+    cout<< " -o outputfile.html \n    Output the statistics to a html file \n\n";
+    cout<< " --eps <RelErr> Sets the relative tolerance for iterative solvers (default 1e-08) \n\n";
+    cout<< " --maxits <MaxIts> Sets the maximum number of iterations (default 1000) \n\n";
+    
+}
+int main(int argc, char ** args)
+{
+  
+  bool help = ( get_options(argc, args, "-h") || get_options(argc, args, "--help") );
+  if(help) {
+    bench_printhelp();
+    return 0;
+  }
+
+  // Get the location of the test matrices
+  string matrix_dir;
+  if (!get_options(argc, args, "-d", &matrix_dir))
+  {
+    if(getenv("EIGEN_MATRIXDIR") == NULL){
+      std::cerr << "Please, specify the location of the matrices with -d mat_folder or the environment variable EIGEN_MATRIXDIR \n";
+      std::cerr << " Run with --help to see the list of all the available options \n";
+      return -1;
+    }
+    matrix_dir = getenv("EIGEN_MATRIXDIR");
+  }
+     
+  std::ofstream statbuf;
+  string statFile ;
+  
+  // Get the file to write the statistics
+  bool statFileExists = get_options(argc, args, "-o", &statFile);
+  if(statFileExists)
+  {
+    statbuf.open(statFile.c_str(), std::ios::out);
+    if(statbuf.good()){
+      statFileExists = true; 
+      printStatheader(statbuf);
+      statbuf.close();
+    }
+    else
+      std::cerr << "Unable to open the provided file for writting... \n";
+  }       
+  
+  // Get the maximum number of iterations and the tolerance
+  int maxiters = 1000; 
+  double tol = 1e-08; 
+  string inval; 
+  if (get_options(argc, args, "--eps", &inval))
+    tol = atof(inval.c_str()); 
+  if(get_options(argc, args, "--maxits", &inval))
+    maxiters = atoi(inval.c_str()); 
+  
+  string current_dir; 
+  // Test the matrices in %EIGEN_MATRIXDIR/real
+  current_dir = matrix_dir + "/real"; 
+  Browse_Matrices<double>(current_dir, statFileExists, statFile,maxiters, tol);
+  
+  // Test the matrices in %EIGEN_MATRIXDIR/complex
+  current_dir = matrix_dir + "/complex"; 
+  Browse_Matrices<std::complex<double> >(current_dir, statFileExists, statFile, maxiters, tol); 
+  
+  if(statFileExists)
+  {
+    statbuf.open(statFile.c_str(), std::ios::app); 
+    statbuf << "</TABLE> \n";
+    cout << "\n Output written in " << statFile << " ...\n";
+    statbuf.close();
+  }
+
+  return 0;
+}
+
+      
diff --git a/resources/3rdParty/eigen/bench/spbench/spbenchsolver.h b/resources/3rdParty/eigen/bench/spbench/spbenchsolver.h
new file mode 100644
index 000000000..609c7c39d
--- /dev/null
+++ b/resources/3rdParty/eigen/bench/spbench/spbenchsolver.h
@@ -0,0 +1,533 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+#include <iostream>
+#include <fstream>
+#include "Eigen/SparseCore"
+#include <bench/BenchTimer.h>
+#include <cstdlib>
+#include <string>
+#include <Eigen/Cholesky>
+#include <Eigen/Jacobi>
+#include <Eigen/Householder>
+#include <Eigen/IterativeLinearSolvers>
+#include <unsupported/Eigen/IterativeSolvers>
+#include <Eigen/LU>
+#include <unsupported/Eigen/SparseExtra>
+
+#ifdef EIGEN_CHOLMOD_SUPPORT
+#include <Eigen/CholmodSupport>
+#endif
+
+#ifdef EIGEN_UMFPACK_SUPPORT
+#include <Eigen/UmfPackSupport>
+#endif
+
+#ifdef EIGEN_PARDISO_SUPPORT
+#include <Eigen/PardisoSupport>
+#endif
+
+#ifdef EIGEN_SUPERLU_SUPPORT
+#include <Eigen/SuperLUSupport>
+#endif
+
+#ifdef EIGEN_PASTIX_SUPPORT
+#include <Eigen/PaStiXSupport>
+#endif
+
+// CONSTANTS
+#define EIGEN_UMFPACK  0
+#define EIGEN_SUPERLU  1
+#define EIGEN_PASTIX  2
+#define EIGEN_PARDISO  3
+#define EIGEN_BICGSTAB  4
+#define EIGEN_BICGSTAB_ILUT  5
+#define EIGEN_GMRES 6
+#define EIGEN_GMRES_ILUT 7
+#define EIGEN_SIMPLICIAL_LDLT  8
+#define EIGEN_CHOLMOD_LDLT  9
+#define EIGEN_PASTIX_LDLT  10
+#define EIGEN_PARDISO_LDLT  11
+#define EIGEN_SIMPLICIAL_LLT  12
+#define EIGEN_CHOLMOD_SUPERNODAL_LLT  13
+#define EIGEN_CHOLMOD_SIMPLICIAL_LLT  14
+#define EIGEN_PASTIX_LLT  15
+#define EIGEN_PARDISO_LLT  16
+#define EIGEN_CG  17
+#define EIGEN_CG_PRECOND  18
+#define EIGEN_ALL_SOLVERS  19
+
+using namespace Eigen;
+using namespace std; 
+
+struct Stats{
+  ComputationInfo info;
+  double total_time;
+  double compute_time;
+  double solve_time; 
+  double rel_error;
+  int memory_used; 
+  int iterations;
+  int isavail; 
+  int isIterative;
+}; 
+
+// Global variables for input parameters
+int MaximumIters; // Maximum number of iterations
+double RelErr; // Relative error of the computed solution
+
+template<typename T> inline typename NumTraits<T>::Real test_precision() { return NumTraits<T>::dummy_precision(); }
+template<> inline float test_precision<float>() { return 1e-3f; }                                                             
+template<> inline double test_precision<double>() { return 1e-6; }                                                            
+template<> inline float test_precision<std::complex<float> >() { return test_precision<float>(); }
+template<> inline double test_precision<std::complex<double> >() { return test_precision<double>(); }
+
+void printStatheader(std::ofstream& out)
+{
+  int LUcnt = 0; 
+  string LUlist =" ", LLTlist = "<TH > LLT", LDLTlist = "<TH > LDLT ";
+  
+#ifdef EIGEN_UMFPACK_SUPPORT
+  LUlist += "<TH > UMFPACK "; LUcnt++;
+#endif
+#ifdef EIGEN_SUPERLU_SUPPORT
+  LUlist += "<TH > SUPERLU "; LUcnt++;
+#endif
+#ifdef EIGEN_CHOLMOD_SUPPORT
+  LLTlist += "<TH > CHOLMOD SP LLT<TH > CHOLMOD LLT"; 
+  LDLTlist += "<TH>CHOLMOD LDLT"; 
+#endif
+#ifdef EIGEN_PARDISO_SUPPORT
+  LUlist += "<TH > PARDISO LU";  LUcnt++;
+  LLTlist += "<TH > PARDISO LLT"; 
+  LDLTlist += "<TH > PARDISO LDLT";
+#endif
+#ifdef EIGEN_PASTIX_SUPPORT
+  LUlist += "<TH > PASTIX LU";  LUcnt++;
+  LLTlist += "<TH > PASTIX LLT"; 
+  LDLTlist += "<TH > PASTIX LDLT";
+#endif
+  
+  out << "<TABLE border=\"1\" >\n ";
+  out << "<TR><TH>Matrix <TH> N <TH> NNZ <TH> ";
+  if (LUcnt) out << LUlist;
+  out << " <TH >BiCGSTAB <TH >BiCGSTAB+ILUT"<< "<TH >GMRES+ILUT" <<LDLTlist << LLTlist <<  "<TH> CG "<< std::endl;
+}
+
+
+template<typename Solver, typename Scalar>
+Stats call_solver(Solver &solver, const typename Solver::MatrixType& A, const Matrix<Scalar, Dynamic, 1>& b, const Matrix<Scalar, Dynamic, 1>& refX)
+{
+  Stats stat; 
+  Matrix<Scalar, Dynamic, 1> x; 
+  BenchTimer timer; 
+  timer.reset();
+  timer.start();
+  solver.compute(A); 
+  if (solver.info() != Success)
+  {
+    stat.info = NumericalIssue;
+    std::cerr << "Solver failed ... \n";
+    return stat;
+  }
+  timer.stop(); 
+  stat.compute_time = timer.value();
+  
+  timer.reset();
+  timer.start();
+  x = solver.solve(b); 
+  if (solver.info() == NumericalIssue)
+  {
+    stat.info = NumericalIssue;
+    std::cerr << "Solver failed ... \n";
+    return stat;
+  }
+  
+  timer.stop();
+  stat.solve_time = timer.value();
+  stat.total_time = stat.solve_time + stat.compute_time;
+  stat.memory_used = 0; 
+  // Verify the relative error
+  if(refX.size() != 0)
+    stat.rel_error = (refX - x).norm()/refX.norm();
+  else 
+  {
+    // Compute the relative residual norm
+    Matrix<Scalar, Dynamic, 1> temp; 
+    temp = A * x; 
+    stat.rel_error = (b-temp).norm()/b.norm();
+  }
+  if ( stat.rel_error > RelErr )
+  {
+    stat.info = NoConvergence; 
+    return stat;
+  }
+  else 
+  {
+    stat.info = Success;
+    return stat; 
+  }
+}
+
+template<typename Solver, typename Scalar>
+Stats call_directsolver(Solver& solver, const typename Solver::MatrixType& A, const Matrix<Scalar, Dynamic, 1>& b, const Matrix<Scalar, Dynamic, 1>& refX)
+{
+    Stats stat;
+    stat = call_solver(solver, A, b, refX);
+    return stat;
+}
+
+template<typename Solver, typename Scalar>
+Stats call_itersolver(Solver &solver, const typename Solver::MatrixType& A, const Matrix<Scalar, Dynamic, 1>& b, const Matrix<Scalar, Dynamic, 1>& refX)
+{
+  Stats stat;
+  solver.setTolerance(RelErr); 
+  solver.setMaxIterations(MaximumIters);
+  
+  stat = call_solver(solver, A, b, refX); 
+  stat.iterations = solver.iterations();
+  return stat; 
+}
+
+inline void printStatItem(Stats *stat, int solver_id, int& best_time_id, double& best_time_val)
+{
+  stat[solver_id].isavail = 1;  
+  
+  if (stat[solver_id].info == NumericalIssue)
+  {
+    cout << " SOLVER FAILED ... Probably a numerical issue \n";
+    return;
+  }
+  if (stat[solver_id].info == NoConvergence){
+    cout << "REL. ERROR " <<  stat[solver_id].rel_error;
+    if(stat[solver_id].isIterative == 1)
+      cout << " (" << stat[solver_id].iterations << ") \n"; 
+    return;
+  }
+  
+  // Record the best CPU time 
+  if (!best_time_val) 
+  {
+    best_time_val = stat[solver_id].total_time;
+    best_time_id = solver_id;
+  }
+  else if (stat[solver_id].total_time < best_time_val)
+  {
+    best_time_val = stat[solver_id].total_time;
+    best_time_id = solver_id; 
+  }
+  // Print statistics to standard output
+  if (stat[solver_id].info == Success){
+    cout<< "COMPUTE TIME : " << stat[solver_id].compute_time<< " \n";
+    cout<< "SOLVE TIME : " << stat[solver_id].solve_time<< " \n";
+    cout<< "TOTAL TIME : " << stat[solver_id].total_time<< " \n";
+    cout << "REL. ERROR : " << stat[solver_id].rel_error ;
+    if(stat[solver_id].isIterative == 1) {
+      cout << " (" << stat[solver_id].iterations << ") ";
+    }
+    cout << std::endl;
+  }
+    
+}
+
+
+/* Print the results from all solvers corresponding to a particular matrix 
+ * The best CPU time is printed in bold
+ */
+inline void printHtmlStatLine(Stats *stat, int best_time_id, string& statline)
+{
+  
+  string markup;
+  ostringstream compute,solve,total,error;
+  for (int i = 0; i < EIGEN_ALL_SOLVERS; i++) 
+  {
+    if (stat[i].isavail == 0) continue;
+    if(i == best_time_id)
+      markup = "<TD style=\"background-color:red\">";
+    else
+      markup = "<TD>";
+    
+    if (stat[i].info == Success){
+      compute << markup << stat[i].compute_time;
+      solve << markup << stat[i].solve_time;
+      total << markup << stat[i].total_time; 
+      error << " <TD> " << stat[i].rel_error;
+      if(stat[i].isIterative == 1) {
+        error << " (" << stat[i].iterations << ") ";
+      }
+    }
+    else {
+      compute << " <TD> -" ;
+      solve << " <TD> -" ;
+      total << " <TD> -" ;
+      if(stat[i].info == NoConvergence){
+        error << " <TD> "<< stat[i].rel_error ;
+        if(stat[i].isIterative == 1)
+          error << " (" << stat[i].iterations << ") "; 
+      }
+      else    error << " <TD> - ";
+    }
+  }
+  
+  statline = "<TH>Compute Time " + compute.str() + "\n" 
+                        +  "<TR><TH>Solve Time " + solve.str() + "\n" 
+                        +  "<TR><TH>Total Time " + total.str() + "\n" 
+                        +"<TR><TH>Error(Iter)" + error.str() + "\n"; 
+  
+}
+
+template <typename Scalar>
+int SelectSolvers(const SparseMatrix<Scalar>&A, unsigned int sym, Matrix<Scalar, Dynamic, 1>& b, const Matrix<Scalar, Dynamic, 1>& refX, Stats *stat)
+{
+  typedef SparseMatrix<Scalar, ColMajor> SpMat; 
+  // First, deal with Nonsymmetric and symmetric matrices
+  int best_time_id = 0; 
+  double best_time_val = 0.0;
+  //UMFPACK
+  #ifdef EIGEN_UMFPACK_SUPPORT
+  {
+    cout << "Solving with UMFPACK LU ... \n"; 
+    UmfPackLU<SpMat> solver; 
+    stat[EIGEN_UMFPACK] = call_directsolver(solver, A, b, refX); 
+    printStatItem(stat, EIGEN_UMFPACK, best_time_id, best_time_val); 
+  }
+  #endif
+    //SuperLU
+  #ifdef EIGEN_SUPERLU_SUPPORT
+  {
+    cout << "\nSolving with SUPERLU ... \n"; 
+    SuperLU<SpMat> solver;
+    stat[EIGEN_SUPERLU] = call_directsolver(solver, A, b, refX); 
+    printStatItem(stat, EIGEN_SUPERLU, best_time_id, best_time_val); 
+  }
+  #endif
+    
+   // PaStix LU
+  #ifdef EIGEN_PASTIX_SUPPORT
+  {
+    cout << "\nSolving with PASTIX LU ... \n"; 
+    PastixLU<SpMat> solver; 
+    stat[EIGEN_PASTIX] = call_directsolver(solver, A, b, refX) ;
+    printStatItem(stat, EIGEN_PASTIX, best_time_id, best_time_val); 
+  }
+  #endif
+
+   //PARDISO LU
+  #ifdef EIGEN_PARDISO_SUPPORT
+  {
+    cout << "\nSolving with PARDISO LU ... \n"; 
+    PardisoLU<SpMat>  solver; 
+    stat[EIGEN_PARDISO] = call_directsolver(solver, A, b, refX);
+    printStatItem(stat, EIGEN_PARDISO, best_time_id, best_time_val); 
+  }
+  #endif
+
+
+  
+  //BiCGSTAB
+  {
+    cout << "\nSolving with BiCGSTAB ... \n"; 
+    BiCGSTAB<SpMat> solver; 
+    stat[EIGEN_BICGSTAB] = call_itersolver(solver, A, b, refX);
+    stat[EIGEN_BICGSTAB].isIterative = 1;
+    printStatItem(stat, EIGEN_BICGSTAB, best_time_id, best_time_val); 
+  }
+  //BiCGSTAB+ILUT
+  {
+    cout << "\nSolving with BiCGSTAB and ILUT ... \n"; 
+    BiCGSTAB<SpMat, IncompleteLUT<Scalar> > solver; 
+    stat[EIGEN_BICGSTAB_ILUT] = call_itersolver(solver, A, b, refX);
+    stat[EIGEN_BICGSTAB_ILUT].isIterative = 1;
+    printStatItem(stat, EIGEN_BICGSTAB_ILUT, best_time_id, best_time_val); 
+  }
+  
+   
+  //GMRES
+//   {
+//     cout << "\nSolving with GMRES ... \n"; 
+//     GMRES<SpMat> solver; 
+//     stat[EIGEN_GMRES] = call_itersolver(solver, A, b, refX);
+//     stat[EIGEN_GMRES].isIterative = 1;
+//     printStatItem(stat, EIGEN_GMRES, best_time_id, best_time_val); 
+//   }
+  //GMRES+ILUT
+  {
+    cout << "\nSolving with GMRES and ILUT ... \n"; 
+    GMRES<SpMat, IncompleteLUT<Scalar> > solver; 
+    stat[EIGEN_GMRES_ILUT] = call_itersolver(solver, A, b, refX);
+    stat[EIGEN_GMRES_ILUT].isIterative = 1;
+    printStatItem(stat, EIGEN_GMRES_ILUT, best_time_id, best_time_val); 
+  }
+  
+  // Hermitian and not necessarily positive-definites
+  if (sym != NonSymmetric)
+  {
+    // Internal Cholesky
+    {
+      cout << "\nSolving with Simplicial LDLT ... \n"; 
+      SimplicialLDLT<SpMat, Lower> solver;
+      stat[EIGEN_SIMPLICIAL_LDLT] = call_directsolver(solver, A, b, refX); 
+      printStatItem(stat, EIGEN_SIMPLICIAL_LDLT, best_time_id, best_time_val); 
+    }
+    
+    // CHOLMOD
+    #ifdef EIGEN_CHOLMOD_SUPPORT
+    {
+      cout << "\nSolving with CHOLMOD LDLT ... \n"; 
+      CholmodDecomposition<SpMat, Lower> solver;
+      solver.setMode(CholmodLDLt);
+      stat[EIGEN_CHOLMOD_LDLT] =  call_directsolver(solver, A, b, refX);
+      printStatItem(stat,EIGEN_CHOLMOD_LDLT, best_time_id, best_time_val); 
+    }
+    #endif
+    
+    //PASTIX LLT
+    #ifdef EIGEN_PASTIX_SUPPORT
+    {
+      cout << "\nSolving with PASTIX LDLT ... \n"; 
+      PastixLDLT<SpMat, Lower> solver; 
+      stat[EIGEN_PASTIX_LDLT] = call_directsolver(solver, A, b, refX);
+      printStatItem(stat,EIGEN_PASTIX_LDLT, best_time_id, best_time_val); 
+    }
+    #endif
+    
+    //PARDISO LLT
+    #ifdef EIGEN_PARDISO_SUPPORT
+    {
+      cout << "\nSolving with PARDISO LDLT ... \n"; 
+      PardisoLDLT<SpMat, Lower> solver; 
+      stat[EIGEN_PARDISO_LDLT] = call_directsolver(solver, A, b, refX); 
+      printStatItem(stat,EIGEN_PARDISO_LDLT, best_time_id, best_time_val); 
+    }
+    #endif
+  }
+
+   // Now, symmetric POSITIVE DEFINITE matrices
+  if (sym == SPD)
+  {
+    
+    //Internal Sparse Cholesky
+    {
+      cout << "\nSolving with SIMPLICIAL LLT ... \n"; 
+      SimplicialLLT<SpMat, Lower> solver; 
+      stat[EIGEN_SIMPLICIAL_LLT] = call_directsolver(solver, A, b, refX); 
+      printStatItem(stat,EIGEN_SIMPLICIAL_LLT, best_time_id, best_time_val); 
+    }
+    
+    // CHOLMOD
+    #ifdef EIGEN_CHOLMOD_SUPPORT
+    {
+      // CholMOD SuperNodal LLT
+      cout << "\nSolving with CHOLMOD LLT (Supernodal)... \n"; 
+      CholmodDecomposition<SpMat, Lower> solver;
+      solver.setMode(CholmodSupernodalLLt);
+      stat[EIGEN_CHOLMOD_SUPERNODAL_LLT] = call_directsolver(solver, A, b, refX);
+      printStatItem(stat,EIGEN_CHOLMOD_SUPERNODAL_LLT, best_time_id, best_time_val); 
+      // CholMod Simplicial LLT
+      cout << "\nSolving with CHOLMOD LLT (Simplicial) ... \n"; 
+      solver.setMode(CholmodSimplicialLLt);
+      stat[EIGEN_CHOLMOD_SIMPLICIAL_LLT] = call_directsolver(solver, A, b, refX);
+      printStatItem(stat,EIGEN_CHOLMOD_SIMPLICIAL_LLT, best_time_id, best_time_val); 
+    }
+    #endif
+    
+    //PASTIX LLT
+    #ifdef EIGEN_PASTIX_SUPPORT
+    {
+      cout << "\nSolving with PASTIX LLT ... \n"; 
+      PastixLLT<SpMat, Lower> solver; 
+      stat[EIGEN_PASTIX_LLT] =  call_directsolver(solver, A, b, refX);
+      printStatItem(stat,EIGEN_PASTIX_LLT, best_time_id, best_time_val); 
+    }
+    #endif
+    
+    //PARDISO LLT
+    #ifdef EIGEN_PARDISO_SUPPORT
+    {
+      cout << "\nSolving with PARDISO LLT ... \n"; 
+      PardisoLLT<SpMat, Lower> solver; 
+      stat[EIGEN_PARDISO_LLT] = call_directsolver(solver, A, b, refX);
+      printStatItem(stat,EIGEN_PARDISO_LLT, best_time_id, best_time_val); 
+    }
+    #endif
+    
+    // Internal CG
+    {
+      cout << "\nSolving with CG ... \n"; 
+      ConjugateGradient<SpMat, Lower> solver; 
+      stat[EIGEN_CG] = call_itersolver(solver, A, b, refX);
+      stat[EIGEN_CG].isIterative = 1;
+      printStatItem(stat,EIGEN_CG, best_time_id, best_time_val); 
+    }
+    //CG+IdentityPreconditioner
+//     {
+//       cout << "\nSolving with CG and IdentityPreconditioner ... \n"; 
+//       ConjugateGradient<SpMat, Lower, IdentityPreconditioner> solver; 
+//       stat[EIGEN_CG_PRECOND] = call_itersolver(solver, A, b, refX);
+//       stat[EIGEN_CG_PRECOND].isIterative = 1;
+//       printStatItem(stat,EIGEN_CG_PRECOND, best_time_id, best_time_val); 
+//     }
+  } // End SPD matrices 
+  
+  return best_time_id;
+}
+
+/* Browse all the matrices available in the specified folder 
+ * and solve the associated linear system.
+ * The results of each solve are printed in the standard output
+ * and optionally in the provided html file
+ */
+template <typename Scalar>
+void Browse_Matrices(const string folder, bool statFileExists, std::string& statFile, int maxiters, double tol)
+{
+  MaximumIters = maxiters; // Maximum number of iterations, global variable 
+  RelErr = tol;  //Relative residual error  as stopping criterion for iterative solvers
+  MatrixMarketIterator<Scalar> it(folder);
+  Stats stat[EIGEN_ALL_SOLVERS];
+  for ( ; it; ++it)
+  {    
+    for (int i = 0; i < EIGEN_ALL_SOLVERS; i++)
+    {
+      stat[i].isavail = 0;
+      stat[i].isIterative = 0;
+    }
+    
+    int best_time_id;
+    cout<< "\n\n===================================================== \n";
+    cout<< " ======  SOLVING WITH MATRIX " << it.matname() << " ====\n";
+    cout<< " =================================================== \n\n";
+    Matrix<Scalar, Dynamic, 1> refX;
+    if(it.hasrefX()) refX = it.refX();
+    best_time_id = SelectSolvers<Scalar>(it.matrix(), it.sym(), it.rhs(), refX, &stat[0]);
+    
+    if(statFileExists)
+    {
+      string statline;
+      printHtmlStatLine(&stat[0], best_time_id, statline); 
+      std::ofstream statbuf(statFile.c_str(), std::ios::app);
+      statbuf << "<TR><TH rowspan=\"4\">" << it.matname() << " <TD rowspan=\"4\"> "
+      << it.matrix().rows() << " <TD rowspan=\"4\"> " << it.matrix().nonZeros()<< " "<< statline ;
+      statbuf.close();
+    }
+  } 
+} 
+
+bool get_options(int argc, char **args, string option, string* value=0)
+{
+  int idx = 1, found=false; 
+  while (idx<argc && !found){
+    if (option.compare(args[idx]) == 0){
+      found = true; 
+      if(value) *value = args[idx+1];
+    }
+    idx+=2;
+  }
+  return found; 
+}
diff --git a/resources/3rdparty/eigen/bench/spmv.cpp b/resources/3rdParty/eigen/bench/spmv.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/spmv.cpp
rename to resources/3rdParty/eigen/bench/spmv.cpp
diff --git a/resources/3rdparty/eigen/bench/vdw_new.cpp b/resources/3rdParty/eigen/bench/vdw_new.cpp
similarity index 100%
rename from resources/3rdparty/eigen/bench/vdw_new.cpp
rename to resources/3rdParty/eigen/bench/vdw_new.cpp
diff --git a/resources/3rdparty/eigen/blas/BandTriangularSolver.h b/resources/3rdParty/eigen/blas/BandTriangularSolver.h
similarity index 100%
rename from resources/3rdparty/eigen/blas/BandTriangularSolver.h
rename to resources/3rdParty/eigen/blas/BandTriangularSolver.h
diff --git a/resources/3rdParty/eigen/blas/CMakeLists.txt b/resources/3rdParty/eigen/blas/CMakeLists.txt
new file mode 100644
index 000000000..453d5874c
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/CMakeLists.txt
@@ -0,0 +1,57 @@
+
+project(EigenBlas CXX)
+
+include("../cmake/language_support.cmake")
+
+workaround_9220(Fortran EIGEN_Fortran_COMPILER_WORKS)
+
+if(EIGEN_Fortran_COMPILER_WORKS)
+  enable_language(Fortran OPTIONAL)
+endif()
+
+add_custom_target(blas)
+
+set(EigenBlas_SRCS single.cpp double.cpp complex_single.cpp complex_double.cpp xerbla.cpp)
+
+if(EIGEN_Fortran_COMPILER_WORKS)
+
+set(EigenBlas_SRCS ${EigenBlas_SRCS}
+    complexdots.f
+    srotm.f srotmg.f drotm.f drotmg.f
+    lsame.f   chpr2.f  dspmv.f    dtpsv.f ssbmv.f  sspr.f   stpmv.f
+    zhpr2.f  chbmv.f  chpr.f   ctpmv.f     dspr2.f  sspmv.f    stpsv.f
+    zhbmv.f  zhpr.f   ztpmv.f chpmv.f   ctpsv.f    dsbmv.f  dspr.f   dtpmv.f   sspr2.f
+    zhpmv.f    ztpsv.f
+    dtbmv.f stbmv.f ctbmv.f ztbmv.f
+)
+else()
+
+message(WARNING " No fortran compiler has been detected, the blas build will be incomplete.")
+
+endif()
+
+add_library(eigen_blas_static ${EigenBlas_SRCS})
+add_library(eigen_blas SHARED ${EigenBlas_SRCS})
+
+if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
+  target_link_libraries(eigen_blas_static ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
+  target_link_libraries(eigen_blas        ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
+endif()
+
+add_dependencies(blas eigen_blas eigen_blas_static)
+
+install(TARGETS eigen_blas eigen_blas_static
+        RUNTIME DESTINATION bin
+        LIBRARY DESTINATION lib
+        ARCHIVE DESTINATION lib)
+
+if(EIGEN_Fortran_COMPILER_WORKS)
+
+if(EIGEN_LEAVE_TEST_IN_ALL_TARGET)
+  add_subdirectory(testing) # can't do EXCLUDE_FROM_ALL here, breaks CTest
+else()
+  add_subdirectory(testing EXCLUDE_FROM_ALL)
+endif()
+
+endif()
+
diff --git a/resources/3rdparty/eigen/blas/README.txt b/resources/3rdParty/eigen/blas/README.txt
similarity index 100%
rename from resources/3rdparty/eigen/blas/README.txt
rename to resources/3rdParty/eigen/blas/README.txt
diff --git a/resources/3rdparty/eigen/blas/chbmv.f b/resources/3rdParty/eigen/blas/chbmv.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/chbmv.f
rename to resources/3rdParty/eigen/blas/chbmv.f
diff --git a/resources/3rdparty/eigen/blas/chpmv.f b/resources/3rdParty/eigen/blas/chpmv.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/chpmv.f
rename to resources/3rdParty/eigen/blas/chpmv.f
diff --git a/resources/3rdParty/eigen/blas/chpr.f b/resources/3rdParty/eigen/blas/chpr.f
new file mode 100644
index 000000000..11bd5c6ee
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/chpr.f
@@ -0,0 +1,220 @@
+      SUBROUTINE CHPR(UPLO,N,ALPHA,X,INCX,AP)
+*     .. Scalar Arguments ..
+      REAL ALPHA
+      INTEGER INCX,N
+      CHARACTER UPLO
+*     ..
+*     .. Array Arguments ..
+      COMPLEX AP(*),X(*)
+*     ..
+*
+*  Purpose
+*  =======
+*
+*  CHPR    performs the hermitian rank 1 operation
+*
+*     A := alpha*x*conjg( x' ) + A,
+*
+*  where alpha is a real scalar, x is an n element vector and A is an
+*  n by n hermitian matrix, supplied in packed form.
+*
+*  Arguments
+*  ==========
+*
+*  UPLO   - CHARACTER*1.
+*           On entry, UPLO specifies whether the upper or lower
+*           triangular part of the matrix A is supplied in the packed
+*           array AP as follows:
+*
+*              UPLO = 'U' or 'u'   The upper triangular part of A is
+*                                  supplied in AP.
+*
+*              UPLO = 'L' or 'l'   The lower triangular part of A is
+*                                  supplied in AP.
+*
+*           Unchanged on exit.
+*
+*  N      - INTEGER.
+*           On entry, N specifies the order of the matrix A.
+*           N must be at least zero.
+*           Unchanged on exit.
+*
+*  ALPHA  - REAL            .
+*           On entry, ALPHA specifies the scalar alpha.
+*           Unchanged on exit.
+*
+*  X      - COMPLEX          array of dimension at least
+*           ( 1 + ( n - 1 )*abs( INCX ) ).
+*           Before entry, the incremented array X must contain the n
+*           element vector x.
+*           Unchanged on exit.
+*
+*  INCX   - INTEGER.
+*           On entry, INCX specifies the increment for the elements of
+*           X. INCX must not be zero.
+*           Unchanged on exit.
+*
+*  AP     - COMPLEX          array of DIMENSION at least
+*           ( ( n*( n + 1 ) )/2 ).
+*           Before entry with  UPLO = 'U' or 'u', the array AP must
+*           contain the upper triangular part of the hermitian matrix
+*           packed sequentially, column by column, so that AP( 1 )
+*           contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 )
+*           and a( 2, 2 ) respectively, and so on. On exit, the array
+*           AP is overwritten by the upper triangular part of the
+*           updated matrix.
+*           Before entry with UPLO = 'L' or 'l', the array AP must
+*           contain the lower triangular part of the hermitian matrix
+*           packed sequentially, column by column, so that AP( 1 )
+*           contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 )
+*           and a( 3, 1 ) respectively, and so on. On exit, the array
+*           AP is overwritten by the lower triangular part of the
+*           updated matrix.
+*           Note that the imaginary parts of the diagonal elements need
+*           not be set, they are assumed to be zero, and on exit they
+*           are set to zero.
+*
+*  Further Details
+*  ===============
+*
+*  Level 2 Blas routine.
+*
+*  -- Written on 22-October-1986.
+*     Jack Dongarra, Argonne National Lab.
+*     Jeremy Du Croz, Nag Central Office.
+*     Sven Hammarling, Nag Central Office.
+*     Richard Hanson, Sandia National Labs.
+*
+*  =====================================================================
+*
+*     .. Parameters ..
+      COMPLEX ZERO
+      PARAMETER (ZERO= (0.0E+0,0.0E+0))
+*     ..
+*     .. Local Scalars ..
+      COMPLEX TEMP
+      INTEGER I,INFO,IX,J,JX,K,KK,KX
+*     ..
+*     .. External Functions ..
+      LOGICAL LSAME
+      EXTERNAL LSAME
+*     ..
+*     .. External Subroutines ..
+      EXTERNAL XERBLA
+*     ..
+*     .. Intrinsic Functions ..
+      INTRINSIC CONJG,REAL
+*     ..
+*
+*     Test the input parameters.
+*
+      INFO = 0
+      IF (.NOT.LSAME(UPLO,'U') .AND. .NOT.LSAME(UPLO,'L')) THEN
+          INFO = 1
+      ELSE IF (N.LT.0) THEN
+          INFO = 2
+      ELSE IF (INCX.EQ.0) THEN
+          INFO = 5
+      END IF
+      IF (INFO.NE.0) THEN
+          CALL XERBLA('CHPR  ',INFO)
+          RETURN
+      END IF
+*
+*     Quick return if possible.
+*
+      IF ((N.EQ.0) .OR. (ALPHA.EQ.REAL(ZERO))) RETURN
+*
+*     Set the start point in X if the increment is not unity.
+*
+      IF (INCX.LE.0) THEN
+          KX = 1 - (N-1)*INCX
+      ELSE IF (INCX.NE.1) THEN
+          KX = 1
+      END IF
+*
+*     Start the operations. In this version the elements of the array AP
+*     are accessed sequentially with one pass through AP.
+*
+      KK = 1
+      IF (LSAME(UPLO,'U')) THEN
+*
+*        Form  A  when upper triangle is stored in AP.
+*
+          IF (INCX.EQ.1) THEN
+              DO 20 J = 1,N
+                  IF (X(J).NE.ZERO) THEN
+                      TEMP = ALPHA*CONJG(X(J))
+                      K = KK
+                      DO 10 I = 1,J - 1
+                          AP(K) = AP(K) + X(I)*TEMP
+                          K = K + 1
+   10                 CONTINUE
+                      AP(KK+J-1) = REAL(AP(KK+J-1)) + REAL(X(J)*TEMP)
+                  ELSE
+                      AP(KK+J-1) = REAL(AP(KK+J-1))
+                  END IF
+                  KK = KK + J
+   20         CONTINUE
+          ELSE
+              JX = KX
+              DO 40 J = 1,N
+                  IF (X(JX).NE.ZERO) THEN
+                      TEMP = ALPHA*CONJG(X(JX))
+                      IX = KX
+                      DO 30 K = KK,KK + J - 2
+                          AP(K) = AP(K) + X(IX)*TEMP
+                          IX = IX + INCX
+   30                 CONTINUE
+                      AP(KK+J-1) = REAL(AP(KK+J-1)) + REAL(X(JX)*TEMP)
+                  ELSE
+                      AP(KK+J-1) = REAL(AP(KK+J-1))
+                  END IF
+                  JX = JX + INCX
+                  KK = KK + J
+   40         CONTINUE
+          END IF
+      ELSE
+*
+*        Form  A  when lower triangle is stored in AP.
+*
+          IF (INCX.EQ.1) THEN
+              DO 60 J = 1,N
+                  IF (X(J).NE.ZERO) THEN
+                      TEMP = ALPHA*CONJG(X(J))
+                      AP(KK) = REAL(AP(KK)) + REAL(TEMP*X(J))
+                      K = KK + 1
+                      DO 50 I = J + 1,N
+                          AP(K) = AP(K) + X(I)*TEMP
+                          K = K + 1
+   50                 CONTINUE
+                  ELSE
+                      AP(KK) = REAL(AP(KK))
+                  END IF
+                  KK = KK + N - J + 1
+   60         CONTINUE
+          ELSE
+              JX = KX
+              DO 80 J = 1,N
+                  IF (X(JX).NE.ZERO) THEN
+                      TEMP = ALPHA*CONJG(X(JX))
+                      AP(KK) = REAL(AP(KK)) + REAL(TEMP*X(JX))
+                      IX = JX
+                      DO 70 K = KK + 1,KK + N - J
+                          IX = IX + INCX
+                          AP(K) = AP(K) + X(IX)*TEMP
+   70                 CONTINUE
+                  ELSE
+                      AP(KK) = REAL(AP(KK))
+                  END IF
+                  JX = JX + INCX
+                  KK = KK + N - J + 1
+   80         CONTINUE
+          END IF
+      END IF
+*
+      RETURN
+*
+*     End of CHPR  .
+*
+      END
diff --git a/resources/3rdParty/eigen/blas/chpr2.f b/resources/3rdParty/eigen/blas/chpr2.f
new file mode 100644
index 000000000..a0020ef3e
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/chpr2.f
@@ -0,0 +1,255 @@
+      SUBROUTINE CHPR2(UPLO,N,ALPHA,X,INCX,Y,INCY,AP)
+*     .. Scalar Arguments ..
+      COMPLEX ALPHA
+      INTEGER INCX,INCY,N
+      CHARACTER UPLO
+*     ..
+*     .. Array Arguments ..
+      COMPLEX AP(*),X(*),Y(*)
+*     ..
+*
+*  Purpose
+*  =======
+*
+*  CHPR2  performs the hermitian rank 2 operation
+*
+*     A := alpha*x*conjg( y' ) + conjg( alpha )*y*conjg( x' ) + A,
+*
+*  where alpha is a scalar, x and y are n element vectors and A is an
+*  n by n hermitian matrix, supplied in packed form.
+*
+*  Arguments
+*  ==========
+*
+*  UPLO   - CHARACTER*1.
+*           On entry, UPLO specifies whether the upper or lower
+*           triangular part of the matrix A is supplied in the packed
+*           array AP as follows:
+*
+*              UPLO = 'U' or 'u'   The upper triangular part of A is
+*                                  supplied in AP.
+*
+*              UPLO = 'L' or 'l'   The lower triangular part of A is
+*                                  supplied in AP.
+*
+*           Unchanged on exit.
+*
+*  N      - INTEGER.
+*           On entry, N specifies the order of the matrix A.
+*           N must be at least zero.
+*           Unchanged on exit.
+*
+*  ALPHA  - COMPLEX         .
+*           On entry, ALPHA specifies the scalar alpha.
+*           Unchanged on exit.
+*
+*  X      - COMPLEX          array of dimension at least
+*           ( 1 + ( n - 1 )*abs( INCX ) ).
+*           Before entry, the incremented array X must contain the n
+*           element vector x.
+*           Unchanged on exit.
+*
+*  INCX   - INTEGER.
+*           On entry, INCX specifies the increment for the elements of
+*           X. INCX must not be zero.
+*           Unchanged on exit.
+*
+*  Y      - COMPLEX          array of dimension at least
+*           ( 1 + ( n - 1 )*abs( INCY ) ).
+*           Before entry, the incremented array Y must contain the n
+*           element vector y.
+*           Unchanged on exit.
+*
+*  INCY   - INTEGER.
+*           On entry, INCY specifies the increment for the elements of
+*           Y. INCY must not be zero.
+*           Unchanged on exit.
+*
+*  AP     - COMPLEX          array of DIMENSION at least
+*           ( ( n*( n + 1 ) )/2 ).
+*           Before entry with  UPLO = 'U' or 'u', the array AP must
+*           contain the upper triangular part of the hermitian matrix
+*           packed sequentially, column by column, so that AP( 1 )
+*           contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 )
+*           and a( 2, 2 ) respectively, and so on. On exit, the array
+*           AP is overwritten by the upper triangular part of the
+*           updated matrix.
+*           Before entry with UPLO = 'L' or 'l', the array AP must
+*           contain the lower triangular part of the hermitian matrix
+*           packed sequentially, column by column, so that AP( 1 )
+*           contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 )
+*           and a( 3, 1 ) respectively, and so on. On exit, the array
+*           AP is overwritten by the lower triangular part of the
+*           updated matrix.
+*           Note that the imaginary parts of the diagonal elements need
+*           not be set, they are assumed to be zero, and on exit they
+*           are set to zero.
+*
+*  Further Details
+*  ===============
+*
+*  Level 2 Blas routine.
+*
+*  -- Written on 22-October-1986.
+*     Jack Dongarra, Argonne National Lab.
+*     Jeremy Du Croz, Nag Central Office.
+*     Sven Hammarling, Nag Central Office.
+*     Richard Hanson, Sandia National Labs.
+*
+*  =====================================================================
+*
+*     .. Parameters ..
+      COMPLEX ZERO
+      PARAMETER (ZERO= (0.0E+0,0.0E+0))
+*     ..
+*     .. Local Scalars ..
+      COMPLEX TEMP1,TEMP2
+      INTEGER I,INFO,IX,IY,J,JX,JY,K,KK,KX,KY
+*     ..
+*     .. External Functions ..
+      LOGICAL LSAME
+      EXTERNAL LSAME
+*     ..
+*     .. External Subroutines ..
+      EXTERNAL XERBLA
+*     ..
+*     .. Intrinsic Functions ..
+      INTRINSIC CONJG,REAL
+*     ..
+*
+*     Test the input parameters.
+*
+      INFO = 0
+      IF (.NOT.LSAME(UPLO,'U') .AND. .NOT.LSAME(UPLO,'L')) THEN
+          INFO = 1
+      ELSE IF (N.LT.0) THEN
+          INFO = 2
+      ELSE IF (INCX.EQ.0) THEN
+          INFO = 5
+      ELSE IF (INCY.EQ.0) THEN
+          INFO = 7
+      END IF
+      IF (INFO.NE.0) THEN
+          CALL XERBLA('CHPR2 ',INFO)
+          RETURN
+      END IF
+*
+*     Quick return if possible.
+*
+      IF ((N.EQ.0) .OR. (ALPHA.EQ.ZERO)) RETURN
+*
+*     Set up the start points in X and Y if the increments are not both
+*     unity.
+*
+      IF ((INCX.NE.1) .OR. (INCY.NE.1)) THEN
+          IF (INCX.GT.0) THEN
+              KX = 1
+          ELSE
+              KX = 1 - (N-1)*INCX
+          END IF
+          IF (INCY.GT.0) THEN
+              KY = 1
+          ELSE
+              KY = 1 - (N-1)*INCY
+          END IF
+          JX = KX
+          JY = KY
+      END IF
+*
+*     Start the operations. In this version the elements of the array AP
+*     are accessed sequentially with one pass through AP.
+*
+      KK = 1
+      IF (LSAME(UPLO,'U')) THEN
+*
+*        Form  A  when upper triangle is stored in AP.
+*
+          IF ((INCX.EQ.1) .AND. (INCY.EQ.1)) THEN
+              DO 20 J = 1,N
+                  IF ((X(J).NE.ZERO) .OR. (Y(J).NE.ZERO)) THEN
+                      TEMP1 = ALPHA*CONJG(Y(J))
+                      TEMP2 = CONJG(ALPHA*X(J))
+                      K = KK
+                      DO 10 I = 1,J - 1
+                          AP(K) = AP(K) + X(I)*TEMP1 + Y(I)*TEMP2
+                          K = K + 1
+   10                 CONTINUE
+                      AP(KK+J-1) = REAL(AP(KK+J-1)) +
+     +                             REAL(X(J)*TEMP1+Y(J)*TEMP2)
+                  ELSE
+                      AP(KK+J-1) = REAL(AP(KK+J-1))
+                  END IF
+                  KK = KK + J
+   20         CONTINUE
+          ELSE
+              DO 40 J = 1,N
+                  IF ((X(JX).NE.ZERO) .OR. (Y(JY).NE.ZERO)) THEN
+                      TEMP1 = ALPHA*CONJG(Y(JY))
+                      TEMP2 = CONJG(ALPHA*X(JX))
+                      IX = KX
+                      IY = KY
+                      DO 30 K = KK,KK + J - 2
+                          AP(K) = AP(K) + X(IX)*TEMP1 + Y(IY)*TEMP2
+                          IX = IX + INCX
+                          IY = IY + INCY
+   30                 CONTINUE
+                      AP(KK+J-1) = REAL(AP(KK+J-1)) +
+     +                             REAL(X(JX)*TEMP1+Y(JY)*TEMP2)
+                  ELSE
+                      AP(KK+J-1) = REAL(AP(KK+J-1))
+                  END IF
+                  JX = JX + INCX
+                  JY = JY + INCY
+                  KK = KK + J
+   40         CONTINUE
+          END IF
+      ELSE
+*
+*        Form  A  when lower triangle is stored in AP.
+*
+          IF ((INCX.EQ.1) .AND. (INCY.EQ.1)) THEN
+              DO 60 J = 1,N
+                  IF ((X(J).NE.ZERO) .OR. (Y(J).NE.ZERO)) THEN
+                      TEMP1 = ALPHA*CONJG(Y(J))
+                      TEMP2 = CONJG(ALPHA*X(J))
+                      AP(KK) = REAL(AP(KK)) +
+     +                         REAL(X(J)*TEMP1+Y(J)*TEMP2)
+                      K = KK + 1
+                      DO 50 I = J + 1,N
+                          AP(K) = AP(K) + X(I)*TEMP1 + Y(I)*TEMP2
+                          K = K + 1
+   50                 CONTINUE
+                  ELSE
+                      AP(KK) = REAL(AP(KK))
+                  END IF
+                  KK = KK + N - J + 1
+   60         CONTINUE
+          ELSE
+              DO 80 J = 1,N
+                  IF ((X(JX).NE.ZERO) .OR. (Y(JY).NE.ZERO)) THEN
+                      TEMP1 = ALPHA*CONJG(Y(JY))
+                      TEMP2 = CONJG(ALPHA*X(JX))
+                      AP(KK) = REAL(AP(KK)) +
+     +                         REAL(X(JX)*TEMP1+Y(JY)*TEMP2)
+                      IX = JX
+                      IY = JY
+                      DO 70 K = KK + 1,KK + N - J
+                          IX = IX + INCX
+                          IY = IY + INCY
+                          AP(K) = AP(K) + X(IX)*TEMP1 + Y(IY)*TEMP2
+   70                 CONTINUE
+                  ELSE
+                      AP(KK) = REAL(AP(KK))
+                  END IF
+                  JX = JX + INCX
+                  JY = JY + INCY
+                  KK = KK + N - J + 1
+   80         CONTINUE
+          END IF
+      END IF
+*
+      RETURN
+*
+*     End of CHPR2 .
+*
+      END
diff --git a/resources/3rdParty/eigen/blas/common.h b/resources/3rdParty/eigen/blas/common.h
new file mode 100644
index 000000000..b598c4e45
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/common.h
@@ -0,0 +1,140 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_BLAS_COMMON_H
+#define EIGEN_BLAS_COMMON_H
+
+#include <iostream>
+#include <complex>
+
+#ifndef SCALAR
+#error the token SCALAR must be defined to compile this file
+#endif
+
+#include <Eigen/src/misc/blas.h>
+
+
+#define NOTR    0
+#define TR      1
+#define ADJ     2
+
+#define LEFT    0
+#define RIGHT   1
+
+#define UP      0
+#define LO      1
+
+#define NUNIT   0
+#define UNIT    1
+
+#define INVALID 0xff
+
+#define OP(X)   (   ((X)=='N' || (X)=='n') ? NOTR   \
+                  : ((X)=='T' || (X)=='t') ? TR     \
+                  : ((X)=='C' || (X)=='c') ? ADJ    \
+                  : INVALID)
+
+#define SIDE(X) (   ((X)=='L' || (X)=='l') ? LEFT   \
+                  : ((X)=='R' || (X)=='r') ? RIGHT  \
+                  : INVALID)
+
+#define UPLO(X) (   ((X)=='U' || (X)=='u') ? UP     \
+                  : ((X)=='L' || (X)=='l') ? LO     \
+                  : INVALID)
+
+#define DIAG(X) (   ((X)=='N' || (X)=='N') ? NUNIT  \
+                  : ((X)=='U' || (X)=='u') ? UNIT   \
+                  : INVALID)
+
+
+inline bool check_op(const char* op)
+{
+  return OP(*op)!=0xff;
+}
+
+inline bool check_side(const char* side)
+{
+  return SIDE(*side)!=0xff;
+}
+
+inline bool check_uplo(const char* uplo)
+{
+  return UPLO(*uplo)!=0xff;
+}
+
+#include <Eigen/Core>
+#include <Eigen/Jacobi>
+
+
+namespace Eigen {
+#include "BandTriangularSolver.h"
+}
+
+using namespace Eigen;
+
+typedef SCALAR Scalar;
+typedef NumTraits<Scalar>::Real RealScalar;
+typedef std::complex<RealScalar> Complex;
+
+enum
+{
+  IsComplex = Eigen::NumTraits<SCALAR>::IsComplex,
+  Conj = IsComplex
+};
+
+typedef Matrix<Scalar,Dynamic,Dynamic,ColMajor> PlainMatrixType;
+typedef Map<Matrix<Scalar,Dynamic,Dynamic,ColMajor>, 0, OuterStride<> > MatrixType;
+typedef Map<Matrix<Scalar,Dynamic,1>, 0, InnerStride<Dynamic> > StridedVectorType;
+typedef Map<Matrix<Scalar,Dynamic,1> > CompactVectorType;
+
+template<typename T>
+Map<Matrix<T,Dynamic,Dynamic,ColMajor>, 0, OuterStride<> >
+matrix(T* data, int rows, int cols, int stride)
+{
+  return Map<Matrix<T,Dynamic,Dynamic,ColMajor>, 0, OuterStride<> >(data, rows, cols, OuterStride<>(stride));
+}
+
+template<typename T>
+Map<Matrix<T,Dynamic,1>, 0, InnerStride<Dynamic> > vector(T* data, int size, int incr)
+{
+  return Map<Matrix<T,Dynamic,1>, 0, InnerStride<Dynamic> >(data, size, InnerStride<Dynamic>(incr));
+}
+
+template<typename T>
+Map<Matrix<T,Dynamic,1> > vector(T* data, int size)
+{
+  return Map<Matrix<T,Dynamic,1> >(data, size);
+}
+
+template<typename T>
+T* get_compact_vector(T* x, int n, int incx)
+{
+  if(incx==1)
+    return x;
+
+  T* ret = new Scalar[n];
+  if(incx<0) vector(ret,n) = vector(x,n,-incx).reverse();
+  else       vector(ret,n) = vector(x,n, incx);
+  return ret;
+}
+
+template<typename T>
+T* copy_back(T* x_cpy, T* x, int n, int incx)
+{
+  if(x_cpy==x)
+    return 0;
+
+  if(incx<0) vector(x,n,-incx).reverse() = vector(x_cpy,n);
+  else       vector(x,n, incx)           = vector(x_cpy,n);
+  return x_cpy;
+}
+
+#define EIGEN_BLAS_FUNC(X) EIGEN_CAT(SCALAR_SUFFIX,X##_)
+
+#endif // EIGEN_BLAS_COMMON_H
diff --git a/resources/3rdparty/eigen/blas/complex_double.cpp b/resources/3rdParty/eigen/blas/complex_double.cpp
similarity index 100%
rename from resources/3rdparty/eigen/blas/complex_double.cpp
rename to resources/3rdParty/eigen/blas/complex_double.cpp
diff --git a/resources/3rdparty/eigen/blas/complex_single.cpp b/resources/3rdParty/eigen/blas/complex_single.cpp
similarity index 100%
rename from resources/3rdparty/eigen/blas/complex_single.cpp
rename to resources/3rdParty/eigen/blas/complex_single.cpp
diff --git a/resources/3rdparty/eigen/blas/complexdots.f b/resources/3rdParty/eigen/blas/complexdots.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/complexdots.f
rename to resources/3rdParty/eigen/blas/complexdots.f
diff --git a/resources/3rdparty/eigen/blas/ctbmv.f b/resources/3rdParty/eigen/blas/ctbmv.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/ctbmv.f
rename to resources/3rdParty/eigen/blas/ctbmv.f
diff --git a/resources/3rdParty/eigen/blas/ctpmv.f b/resources/3rdParty/eigen/blas/ctpmv.f
new file mode 100644
index 000000000..b63742ccb
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/ctpmv.f
@@ -0,0 +1,329 @@
+      SUBROUTINE CTPMV(UPLO,TRANS,DIAG,N,AP,X,INCX)
+*     .. Scalar Arguments ..
+      INTEGER INCX,N
+      CHARACTER DIAG,TRANS,UPLO
+*     ..
+*     .. Array Arguments ..
+      COMPLEX AP(*),X(*)
+*     ..
+*
+*  Purpose
+*  =======
+*
+*  CTPMV  performs one of the matrix-vector operations
+*
+*     x := A*x,   or   x := A'*x,   or   x := conjg( A' )*x,
+*
+*  where x is an n element vector and  A is an n by n unit, or non-unit,
+*  upper or lower triangular matrix, supplied in packed form.
+*
+*  Arguments
+*  ==========
+*
+*  UPLO   - CHARACTER*1.
+*           On entry, UPLO specifies whether the matrix is an upper or
+*           lower triangular matrix as follows:
+*
+*              UPLO = 'U' or 'u'   A is an upper triangular matrix.
+*
+*              UPLO = 'L' or 'l'   A is a lower triangular matrix.
+*
+*           Unchanged on exit.
+*
+*  TRANS  - CHARACTER*1.
+*           On entry, TRANS specifies the operation to be performed as
+*           follows:
+*
+*              TRANS = 'N' or 'n'   x := A*x.
+*
+*              TRANS = 'T' or 't'   x := A'*x.
+*
+*              TRANS = 'C' or 'c'   x := conjg( A' )*x.
+*
+*           Unchanged on exit.
+*
+*  DIAG   - CHARACTER*1.
+*           On entry, DIAG specifies whether or not A is unit
+*           triangular as follows:
+*
+*              DIAG = 'U' or 'u'   A is assumed to be unit triangular.
+*
+*              DIAG = 'N' or 'n'   A is not assumed to be unit
+*                                  triangular.
+*
+*           Unchanged on exit.
+*
+*  N      - INTEGER.
+*           On entry, N specifies the order of the matrix A.
+*           N must be at least zero.
+*           Unchanged on exit.
+*
+*  AP     - COMPLEX          array of DIMENSION at least
+*           ( ( n*( n + 1 ) )/2 ).
+*           Before entry with  UPLO = 'U' or 'u', the array AP must
+*           contain the upper triangular matrix packed sequentially,
+*           column by column, so that AP( 1 ) contains a( 1, 1 ),
+*           AP( 2 ) and AP( 3 ) contain a( 1, 2 ) and a( 2, 2 )
+*           respectively, and so on.
+*           Before entry with UPLO = 'L' or 'l', the array AP must
+*           contain the lower triangular matrix packed sequentially,
+*           column by column, so that AP( 1 ) contains a( 1, 1 ),
+*           AP( 2 ) and AP( 3 ) contain a( 2, 1 ) and a( 3, 1 )
+*           respectively, and so on.
+*           Note that when  DIAG = 'U' or 'u', the diagonal elements of
+*           A are not referenced, but are assumed to be unity.
+*           Unchanged on exit.
+*
+*  X      - COMPLEX          array of dimension at least
+*           ( 1 + ( n - 1 )*abs( INCX ) ).
+*           Before entry, the incremented array X must contain the n
+*           element vector x. On exit, X is overwritten with the
+*           tranformed vector x.
+*
+*  INCX   - INTEGER.
+*           On entry, INCX specifies the increment for the elements of
+*           X. INCX must not be zero.
+*           Unchanged on exit.
+*
+*  Further Details
+*  ===============
+*
+*  Level 2 Blas routine.
+*
+*  -- Written on 22-October-1986.
+*     Jack Dongarra, Argonne National Lab.
+*     Jeremy Du Croz, Nag Central Office.
+*     Sven Hammarling, Nag Central Office.
+*     Richard Hanson, Sandia National Labs.
+*
+*  =====================================================================
+*
+*     .. Parameters ..
+      COMPLEX ZERO
+      PARAMETER (ZERO= (0.0E+0,0.0E+0))
+*     ..
+*     .. Local Scalars ..
+      COMPLEX TEMP
+      INTEGER I,INFO,IX,J,JX,K,KK,KX
+      LOGICAL NOCONJ,NOUNIT
+*     ..
+*     .. External Functions ..
+      LOGICAL LSAME
+      EXTERNAL LSAME
+*     ..
+*     .. External Subroutines ..
+      EXTERNAL XERBLA
+*     ..
+*     .. Intrinsic Functions ..
+      INTRINSIC CONJG
+*     ..
+*
+*     Test the input parameters.
+*
+      INFO = 0
+      IF (.NOT.LSAME(UPLO,'U') .AND. .NOT.LSAME(UPLO,'L')) THEN
+          INFO = 1
+      ELSE IF (.NOT.LSAME(TRANS,'N') .AND. .NOT.LSAME(TRANS,'T') .AND.
+     +         .NOT.LSAME(TRANS,'C')) THEN
+          INFO = 2
+      ELSE IF (.NOT.LSAME(DIAG,'U') .AND. .NOT.LSAME(DIAG,'N')) THEN
+          INFO = 3
+      ELSE IF (N.LT.0) THEN
+          INFO = 4
+      ELSE IF (INCX.EQ.0) THEN
+          INFO = 7
+      END IF
+      IF (INFO.NE.0) THEN
+          CALL XERBLA('CTPMV ',INFO)
+          RETURN
+      END IF
+*
+*     Quick return if possible.
+*
+      IF (N.EQ.0) RETURN
+*
+      NOCONJ = LSAME(TRANS,'T')
+      NOUNIT = LSAME(DIAG,'N')
+*
+*     Set up the start point in X if the increment is not unity. This
+*     will be  ( N - 1 )*INCX  too small for descending loops.
+*
+      IF (INCX.LE.0) THEN
+          KX = 1 - (N-1)*INCX
+      ELSE IF (INCX.NE.1) THEN
+          KX = 1
+      END IF
+*
+*     Start the operations. In this version the elements of AP are
+*     accessed sequentially with one pass through AP.
+*
+      IF (LSAME(TRANS,'N')) THEN
+*
+*        Form  x:= A*x.
+*
+          IF (LSAME(UPLO,'U')) THEN
+              KK = 1
+              IF (INCX.EQ.1) THEN
+                  DO 20 J = 1,N
+                      IF (X(J).NE.ZERO) THEN
+                          TEMP = X(J)
+                          K = KK
+                          DO 10 I = 1,J - 1
+                              X(I) = X(I) + TEMP*AP(K)
+                              K = K + 1
+   10                     CONTINUE
+                          IF (NOUNIT) X(J) = X(J)*AP(KK+J-1)
+                      END IF
+                      KK = KK + J
+   20             CONTINUE
+              ELSE
+                  JX = KX
+                  DO 40 J = 1,N
+                      IF (X(JX).NE.ZERO) THEN
+                          TEMP = X(JX)
+                          IX = KX
+                          DO 30 K = KK,KK + J - 2
+                              X(IX) = X(IX) + TEMP*AP(K)
+                              IX = IX + INCX
+   30                     CONTINUE
+                          IF (NOUNIT) X(JX) = X(JX)*AP(KK+J-1)
+                      END IF
+                      JX = JX + INCX
+                      KK = KK + J
+   40             CONTINUE
+              END IF
+          ELSE
+              KK = (N* (N+1))/2
+              IF (INCX.EQ.1) THEN
+                  DO 60 J = N,1,-1
+                      IF (X(J).NE.ZERO) THEN
+                          TEMP = X(J)
+                          K = KK
+                          DO 50 I = N,J + 1,-1
+                              X(I) = X(I) + TEMP*AP(K)
+                              K = K - 1
+   50                     CONTINUE
+                          IF (NOUNIT) X(J) = X(J)*AP(KK-N+J)
+                      END IF
+                      KK = KK - (N-J+1)
+   60             CONTINUE
+              ELSE
+                  KX = KX + (N-1)*INCX
+                  JX = KX
+                  DO 80 J = N,1,-1
+                      IF (X(JX).NE.ZERO) THEN
+                          TEMP = X(JX)
+                          IX = KX
+                          DO 70 K = KK,KK - (N- (J+1)),-1
+                              X(IX) = X(IX) + TEMP*AP(K)
+                              IX = IX - INCX
+   70                     CONTINUE
+                          IF (NOUNIT) X(JX) = X(JX)*AP(KK-N+J)
+                      END IF
+                      JX = JX - INCX
+                      KK = KK - (N-J+1)
+   80             CONTINUE
+              END IF
+          END IF
+      ELSE
+*
+*        Form  x := A'*x  or  x := conjg( A' )*x.
+*
+          IF (LSAME(UPLO,'U')) THEN
+              KK = (N* (N+1))/2
+              IF (INCX.EQ.1) THEN
+                  DO 110 J = N,1,-1
+                      TEMP = X(J)
+                      K = KK - 1
+                      IF (NOCONJ) THEN
+                          IF (NOUNIT) TEMP = TEMP*AP(KK)
+                          DO 90 I = J - 1,1,-1
+                              TEMP = TEMP + AP(K)*X(I)
+                              K = K - 1
+   90                     CONTINUE
+                      ELSE
+                          IF (NOUNIT) TEMP = TEMP*CONJG(AP(KK))
+                          DO 100 I = J - 1,1,-1
+                              TEMP = TEMP + CONJG(AP(K))*X(I)
+                              K = K - 1
+  100                     CONTINUE
+                      END IF
+                      X(J) = TEMP
+                      KK = KK - J
+  110             CONTINUE
+              ELSE
+                  JX = KX + (N-1)*INCX
+                  DO 140 J = N,1,-1
+                      TEMP = X(JX)
+                      IX = JX
+                      IF (NOCONJ) THEN
+                          IF (NOUNIT) TEMP = TEMP*AP(KK)
+                          DO 120 K = KK - 1,KK - J + 1,-1
+                              IX = IX - INCX
+                              TEMP = TEMP + AP(K)*X(IX)
+  120                     CONTINUE
+                      ELSE
+                          IF (NOUNIT) TEMP = TEMP*CONJG(AP(KK))
+                          DO 130 K = KK - 1,KK - J + 1,-1
+                              IX = IX - INCX
+                              TEMP = TEMP + CONJG(AP(K))*X(IX)
+  130                     CONTINUE
+                      END IF
+                      X(JX) = TEMP
+                      JX = JX - INCX
+                      KK = KK - J
+  140             CONTINUE
+              END IF
+          ELSE
+              KK = 1
+              IF (INCX.EQ.1) THEN
+                  DO 170 J = 1,N
+                      TEMP = X(J)
+                      K = KK + 1
+                      IF (NOCONJ) THEN
+                          IF (NOUNIT) TEMP = TEMP*AP(KK)
+                          DO 150 I = J + 1,N
+                              TEMP = TEMP + AP(K)*X(I)
+                              K = K + 1
+  150                     CONTINUE
+                      ELSE
+                          IF (NOUNIT) TEMP = TEMP*CONJG(AP(KK))
+                          DO 160 I = J + 1,N
+                              TEMP = TEMP + CONJG(AP(K))*X(I)
+                              K = K + 1
+  160                     CONTINUE
+                      END IF
+                      X(J) = TEMP
+                      KK = KK + (N-J+1)
+  170             CONTINUE
+              ELSE
+                  JX = KX
+                  DO 200 J = 1,N
+                      TEMP = X(JX)
+                      IX = JX
+                      IF (NOCONJ) THEN
+                          IF (NOUNIT) TEMP = TEMP*AP(KK)
+                          DO 180 K = KK + 1,KK + N - J
+                              IX = IX + INCX
+                              TEMP = TEMP + AP(K)*X(IX)
+  180                     CONTINUE
+                      ELSE
+                          IF (NOUNIT) TEMP = TEMP*CONJG(AP(KK))
+                          DO 190 K = KK + 1,KK + N - J
+                              IX = IX + INCX
+                              TEMP = TEMP + CONJG(AP(K))*X(IX)
+  190                     CONTINUE
+                      END IF
+                      X(JX) = TEMP
+                      JX = JX + INCX
+                      KK = KK + (N-J+1)
+  200             CONTINUE
+              END IF
+          END IF
+      END IF
+*
+      RETURN
+*
+*     End of CTPMV .
+*
+      END
diff --git a/resources/3rdParty/eigen/blas/ctpsv.f b/resources/3rdParty/eigen/blas/ctpsv.f
new file mode 100644
index 000000000..1804797ea
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/ctpsv.f
@@ -0,0 +1,332 @@
+      SUBROUTINE CTPSV(UPLO,TRANS,DIAG,N,AP,X,INCX)
+*     .. Scalar Arguments ..
+      INTEGER INCX,N
+      CHARACTER DIAG,TRANS,UPLO
+*     ..
+*     .. Array Arguments ..
+      COMPLEX AP(*),X(*)
+*     ..
+*
+*  Purpose
+*  =======
+*
+*  CTPSV  solves one of the systems of equations
+*
+*     A*x = b,   or   A'*x = b,   or   conjg( A' )*x = b,
+*
+*  where b and x are n element vectors and A is an n by n unit, or
+*  non-unit, upper or lower triangular matrix, supplied in packed form.
+*
+*  No test for singularity or near-singularity is included in this
+*  routine. Such tests must be performed before calling this routine.
+*
+*  Arguments
+*  ==========
+*
+*  UPLO   - CHARACTER*1.
+*           On entry, UPLO specifies whether the matrix is an upper or
+*           lower triangular matrix as follows:
+*
+*              UPLO = 'U' or 'u'   A is an upper triangular matrix.
+*
+*              UPLO = 'L' or 'l'   A is a lower triangular matrix.
+*
+*           Unchanged on exit.
+*
+*  TRANS  - CHARACTER*1.
+*           On entry, TRANS specifies the equations to be solved as
+*           follows:
+*
+*              TRANS = 'N' or 'n'   A*x = b.
+*
+*              TRANS = 'T' or 't'   A'*x = b.
+*
+*              TRANS = 'C' or 'c'   conjg( A' )*x = b.
+*
+*           Unchanged on exit.
+*
+*  DIAG   - CHARACTER*1.
+*           On entry, DIAG specifies whether or not A is unit
+*           triangular as follows:
+*
+*              DIAG = 'U' or 'u'   A is assumed to be unit triangular.
+*
+*              DIAG = 'N' or 'n'   A is not assumed to be unit
+*                                  triangular.
+*
+*           Unchanged on exit.
+*
+*  N      - INTEGER.
+*           On entry, N specifies the order of the matrix A.
+*           N must be at least zero.
+*           Unchanged on exit.
+*
+*  AP     - COMPLEX          array of DIMENSION at least
+*           ( ( n*( n + 1 ) )/2 ).
+*           Before entry with  UPLO = 'U' or 'u', the array AP must
+*           contain the upper triangular matrix packed sequentially,
+*           column by column, so that AP( 1 ) contains a( 1, 1 ),
+*           AP( 2 ) and AP( 3 ) contain a( 1, 2 ) and a( 2, 2 )
+*           respectively, and so on.
+*           Before entry with UPLO = 'L' or 'l', the array AP must
+*           contain the lower triangular matrix packed sequentially,
+*           column by column, so that AP( 1 ) contains a( 1, 1 ),
+*           AP( 2 ) and AP( 3 ) contain a( 2, 1 ) and a( 3, 1 )
+*           respectively, and so on.
+*           Note that when  DIAG = 'U' or 'u', the diagonal elements of
+*           A are not referenced, but are assumed to be unity.
+*           Unchanged on exit.
+*
+*  X      - COMPLEX          array of dimension at least
+*           ( 1 + ( n - 1 )*abs( INCX ) ).
+*           Before entry, the incremented array X must contain the n
+*           element right-hand side vector b. On exit, X is overwritten
+*           with the solution vector x.
+*
+*  INCX   - INTEGER.
+*           On entry, INCX specifies the increment for the elements of
+*           X. INCX must not be zero.
+*           Unchanged on exit.
+*
+*  Further Details
+*  ===============
+*
+*  Level 2 Blas routine.
+*
+*  -- Written on 22-October-1986.
+*     Jack Dongarra, Argonne National Lab.
+*     Jeremy Du Croz, Nag Central Office.
+*     Sven Hammarling, Nag Central Office.
+*     Richard Hanson, Sandia National Labs.
+*
+*  =====================================================================
+*
+*     .. Parameters ..
+      COMPLEX ZERO
+      PARAMETER (ZERO= (0.0E+0,0.0E+0))
+*     ..
+*     .. Local Scalars ..
+      COMPLEX TEMP
+      INTEGER I,INFO,IX,J,JX,K,KK,KX
+      LOGICAL NOCONJ,NOUNIT
+*     ..
+*     .. External Functions ..
+      LOGICAL LSAME
+      EXTERNAL LSAME
+*     ..
+*     .. External Subroutines ..
+      EXTERNAL XERBLA
+*     ..
+*     .. Intrinsic Functions ..
+      INTRINSIC CONJG
+*     ..
+*
+*     Test the input parameters.
+*
+      INFO = 0
+      IF (.NOT.LSAME(UPLO,'U') .AND. .NOT.LSAME(UPLO,'L')) THEN
+          INFO = 1
+      ELSE IF (.NOT.LSAME(TRANS,'N') .AND. .NOT.LSAME(TRANS,'T') .AND.
+     +         .NOT.LSAME(TRANS,'C')) THEN
+          INFO = 2
+      ELSE IF (.NOT.LSAME(DIAG,'U') .AND. .NOT.LSAME(DIAG,'N')) THEN
+          INFO = 3
+      ELSE IF (N.LT.0) THEN
+          INFO = 4
+      ELSE IF (INCX.EQ.0) THEN
+          INFO = 7
+      END IF
+      IF (INFO.NE.0) THEN
+          CALL XERBLA('CTPSV ',INFO)
+          RETURN
+      END IF
+*
+*     Quick return if possible.
+*
+      IF (N.EQ.0) RETURN
+*
+      NOCONJ = LSAME(TRANS,'T')
+      NOUNIT = LSAME(DIAG,'N')
+*
+*     Set up the start point in X if the increment is not unity. This
+*     will be  ( N - 1 )*INCX  too small for descending loops.
+*
+      IF (INCX.LE.0) THEN
+          KX = 1 - (N-1)*INCX
+      ELSE IF (INCX.NE.1) THEN
+          KX = 1
+      END IF
+*
+*     Start the operations. In this version the elements of AP are
+*     accessed sequentially with one pass through AP.
+*
+      IF (LSAME(TRANS,'N')) THEN
+*
+*        Form  x := inv( A )*x.
+*
+          IF (LSAME(UPLO,'U')) THEN
+              KK = (N* (N+1))/2
+              IF (INCX.EQ.1) THEN
+                  DO 20 J = N,1,-1
+                      IF (X(J).NE.ZERO) THEN
+                          IF (NOUNIT) X(J) = X(J)/AP(KK)
+                          TEMP = X(J)
+                          K = KK - 1
+                          DO 10 I = J - 1,1,-1
+                              X(I) = X(I) - TEMP*AP(K)
+                              K = K - 1
+   10                     CONTINUE
+                      END IF
+                      KK = KK - J
+   20             CONTINUE
+              ELSE
+                  JX = KX + (N-1)*INCX
+                  DO 40 J = N,1,-1
+                      IF (X(JX).NE.ZERO) THEN
+                          IF (NOUNIT) X(JX) = X(JX)/AP(KK)
+                          TEMP = X(JX)
+                          IX = JX
+                          DO 30 K = KK - 1,KK - J + 1,-1
+                              IX = IX - INCX
+                              X(IX) = X(IX) - TEMP*AP(K)
+   30                     CONTINUE
+                      END IF
+                      JX = JX - INCX
+                      KK = KK - J
+   40             CONTINUE
+              END IF
+          ELSE
+              KK = 1
+              IF (INCX.EQ.1) THEN
+                  DO 60 J = 1,N
+                      IF (X(J).NE.ZERO) THEN
+                          IF (NOUNIT) X(J) = X(J)/AP(KK)
+                          TEMP = X(J)
+                          K = KK + 1
+                          DO 50 I = J + 1,N
+                              X(I) = X(I) - TEMP*AP(K)
+                              K = K + 1
+   50                     CONTINUE
+                      END IF
+                      KK = KK + (N-J+1)
+   60             CONTINUE
+              ELSE
+                  JX = KX
+                  DO 80 J = 1,N
+                      IF (X(JX).NE.ZERO) THEN
+                          IF (NOUNIT) X(JX) = X(JX)/AP(KK)
+                          TEMP = X(JX)
+                          IX = JX
+                          DO 70 K = KK + 1,KK + N - J
+                              IX = IX + INCX
+                              X(IX) = X(IX) - TEMP*AP(K)
+   70                     CONTINUE
+                      END IF
+                      JX = JX + INCX
+                      KK = KK + (N-J+1)
+   80             CONTINUE
+              END IF
+          END IF
+      ELSE
+*
+*        Form  x := inv( A' )*x  or  x := inv( conjg( A' ) )*x.
+*
+          IF (LSAME(UPLO,'U')) THEN
+              KK = 1
+              IF (INCX.EQ.1) THEN
+                  DO 110 J = 1,N
+                      TEMP = X(J)
+                      K = KK
+                      IF (NOCONJ) THEN
+                          DO 90 I = 1,J - 1
+                              TEMP = TEMP - AP(K)*X(I)
+                              K = K + 1
+   90                     CONTINUE
+                          IF (NOUNIT) TEMP = TEMP/AP(KK+J-1)
+                      ELSE
+                          DO 100 I = 1,J - 1
+                              TEMP = TEMP - CONJG(AP(K))*X(I)
+                              K = K + 1
+  100                     CONTINUE
+                          IF (NOUNIT) TEMP = TEMP/CONJG(AP(KK+J-1))
+                      END IF
+                      X(J) = TEMP
+                      KK = KK + J
+  110             CONTINUE
+              ELSE
+                  JX = KX
+                  DO 140 J = 1,N
+                      TEMP = X(JX)
+                      IX = KX
+                      IF (NOCONJ) THEN
+                          DO 120 K = KK,KK + J - 2
+                              TEMP = TEMP - AP(K)*X(IX)
+                              IX = IX + INCX
+  120                     CONTINUE
+                          IF (NOUNIT) TEMP = TEMP/AP(KK+J-1)
+                      ELSE
+                          DO 130 K = KK,KK + J - 2
+                              TEMP = TEMP - CONJG(AP(K))*X(IX)
+                              IX = IX + INCX
+  130                     CONTINUE
+                          IF (NOUNIT) TEMP = TEMP/CONJG(AP(KK+J-1))
+                      END IF
+                      X(JX) = TEMP
+                      JX = JX + INCX
+                      KK = KK + J
+  140             CONTINUE
+              END IF
+          ELSE
+              KK = (N* (N+1))/2
+              IF (INCX.EQ.1) THEN
+                  DO 170 J = N,1,-1
+                      TEMP = X(J)
+                      K = KK
+                      IF (NOCONJ) THEN
+                          DO 150 I = N,J + 1,-1
+                              TEMP = TEMP - AP(K)*X(I)
+                              K = K - 1
+  150                     CONTINUE
+                          IF (NOUNIT) TEMP = TEMP/AP(KK-N+J)
+                      ELSE
+                          DO 160 I = N,J + 1,-1
+                              TEMP = TEMP - CONJG(AP(K))*X(I)
+                              K = K - 1
+  160                     CONTINUE
+                          IF (NOUNIT) TEMP = TEMP/CONJG(AP(KK-N+J))
+                      END IF
+                      X(J) = TEMP
+                      KK = KK - (N-J+1)
+  170             CONTINUE
+              ELSE
+                  KX = KX + (N-1)*INCX
+                  JX = KX
+                  DO 200 J = N,1,-1
+                      TEMP = X(JX)
+                      IX = KX
+                      IF (NOCONJ) THEN
+                          DO 180 K = KK,KK - (N- (J+1)),-1
+                              TEMP = TEMP - AP(K)*X(IX)
+                              IX = IX - INCX
+  180                     CONTINUE
+                          IF (NOUNIT) TEMP = TEMP/AP(KK-N+J)
+                      ELSE
+                          DO 190 K = KK,KK - (N- (J+1)),-1
+                              TEMP = TEMP - CONJG(AP(K))*X(IX)
+                              IX = IX - INCX
+  190                     CONTINUE
+                          IF (NOUNIT) TEMP = TEMP/CONJG(AP(KK-N+J))
+                      END IF
+                      X(JX) = TEMP
+                      JX = JX - INCX
+                      KK = KK - (N-J+1)
+  200             CONTINUE
+              END IF
+          END IF
+      END IF
+*
+      RETURN
+*
+*     End of CTPSV .
+*
+      END
diff --git a/resources/3rdParty/eigen/blas/double.cpp b/resources/3rdParty/eigen/blas/double.cpp
new file mode 100644
index 000000000..cad2f63ec
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/double.cpp
@@ -0,0 +1,19 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define SCALAR        double
+#define SCALAR_SUFFIX d
+#define SCALAR_SUFFIX_UP "D"
+#define ISCOMPLEX     0
+
+#include "level1_impl.h"
+#include "level1_real_impl.h"
+#include "level2_impl.h"
+#include "level2_real_impl.h"
+#include "level3_impl.h"
diff --git a/resources/3rdparty/eigen/blas/drotm.f b/resources/3rdParty/eigen/blas/drotm.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/drotm.f
rename to resources/3rdParty/eigen/blas/drotm.f
diff --git a/resources/3rdparty/eigen/blas/drotmg.f b/resources/3rdParty/eigen/blas/drotmg.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/drotmg.f
rename to resources/3rdParty/eigen/blas/drotmg.f
diff --git a/resources/3rdparty/eigen/blas/dsbmv.f b/resources/3rdParty/eigen/blas/dsbmv.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/dsbmv.f
rename to resources/3rdParty/eigen/blas/dsbmv.f
diff --git a/resources/3rdparty/eigen/blas/dspmv.f b/resources/3rdParty/eigen/blas/dspmv.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/dspmv.f
rename to resources/3rdParty/eigen/blas/dspmv.f
diff --git a/resources/3rdParty/eigen/blas/dspr.f b/resources/3rdParty/eigen/blas/dspr.f
new file mode 100644
index 000000000..538e4f76b
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/dspr.f
@@ -0,0 +1,202 @@
+      SUBROUTINE DSPR(UPLO,N,ALPHA,X,INCX,AP)
+*     .. Scalar Arguments ..
+      DOUBLE PRECISION ALPHA
+      INTEGER INCX,N
+      CHARACTER UPLO
+*     ..
+*     .. Array Arguments ..
+      DOUBLE PRECISION AP(*),X(*)
+*     ..
+*
+*  Purpose
+*  =======
+*
+*  DSPR    performs the symmetric rank 1 operation
+*
+*     A := alpha*x*x' + A,
+*
+*  where alpha is a real scalar, x is an n element vector and A is an
+*  n by n symmetric matrix, supplied in packed form.
+*
+*  Arguments
+*  ==========
+*
+*  UPLO   - CHARACTER*1.
+*           On entry, UPLO specifies whether the upper or lower
+*           triangular part of the matrix A is supplied in the packed
+*           array AP as follows:
+*
+*              UPLO = 'U' or 'u'   The upper triangular part of A is
+*                                  supplied in AP.
+*
+*              UPLO = 'L' or 'l'   The lower triangular part of A is
+*                                  supplied in AP.
+*
+*           Unchanged on exit.
+*
+*  N      - INTEGER.
+*           On entry, N specifies the order of the matrix A.
+*           N must be at least zero.
+*           Unchanged on exit.
+*
+*  ALPHA  - DOUBLE PRECISION.
+*           On entry, ALPHA specifies the scalar alpha.
+*           Unchanged on exit.
+*
+*  X      - DOUBLE PRECISION array of dimension at least
+*           ( 1 + ( n - 1 )*abs( INCX ) ).
+*           Before entry, the incremented array X must contain the n
+*           element vector x.
+*           Unchanged on exit.
+*
+*  INCX   - INTEGER.
+*           On entry, INCX specifies the increment for the elements of
+*           X. INCX must not be zero.
+*           Unchanged on exit.
+*
+*  AP     - DOUBLE PRECISION array of DIMENSION at least
+*           ( ( n*( n + 1 ) )/2 ).
+*           Before entry with  UPLO = 'U' or 'u', the array AP must
+*           contain the upper triangular part of the symmetric matrix
+*           packed sequentially, column by column, so that AP( 1 )
+*           contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 )
+*           and a( 2, 2 ) respectively, and so on. On exit, the array
+*           AP is overwritten by the upper triangular part of the
+*           updated matrix.
+*           Before entry with UPLO = 'L' or 'l', the array AP must
+*           contain the lower triangular part of the symmetric matrix
+*           packed sequentially, column by column, so that AP( 1 )
+*           contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 )
+*           and a( 3, 1 ) respectively, and so on. On exit, the array
+*           AP is overwritten by the lower triangular part of the
+*           updated matrix.
+*
+*  Further Details
+*  ===============
+*
+*  Level 2 Blas routine.
+*
+*  -- Written on 22-October-1986.
+*     Jack Dongarra, Argonne National Lab.
+*     Jeremy Du Croz, Nag Central Office.
+*     Sven Hammarling, Nag Central Office.
+*     Richard Hanson, Sandia National Labs.
+*
+*  =====================================================================
+*
+*     .. Parameters ..
+      DOUBLE PRECISION ZERO
+      PARAMETER (ZERO=0.0D+0)
+*     ..
+*     .. Local Scalars ..
+      DOUBLE PRECISION TEMP
+      INTEGER I,INFO,IX,J,JX,K,KK,KX
+*     ..
+*     .. External Functions ..
+      LOGICAL LSAME
+      EXTERNAL LSAME
+*     ..
+*     .. External Subroutines ..
+      EXTERNAL XERBLA
+*     ..
+*
+*     Test the input parameters.
+*
+      INFO = 0
+      IF (.NOT.LSAME(UPLO,'U') .AND. .NOT.LSAME(UPLO,'L')) THEN
+          INFO = 1
+      ELSE IF (N.LT.0) THEN
+          INFO = 2
+      ELSE IF (INCX.EQ.0) THEN
+          INFO = 5
+      END IF
+      IF (INFO.NE.0) THEN
+          CALL XERBLA('DSPR  ',INFO)
+          RETURN
+      END IF
+*
+*     Quick return if possible.
+*
+      IF ((N.EQ.0) .OR. (ALPHA.EQ.ZERO)) RETURN
+*
+*     Set the start point in X if the increment is not unity.
+*
+      IF (INCX.LE.0) THEN
+          KX = 1 - (N-1)*INCX
+      ELSE IF (INCX.NE.1) THEN
+          KX = 1
+      END IF
+*
+*     Start the operations. In this version the elements of the array AP
+*     are accessed sequentially with one pass through AP.
+*
+      KK = 1
+      IF (LSAME(UPLO,'U')) THEN
+*
+*        Form  A  when upper triangle is stored in AP.
+*
+          IF (INCX.EQ.1) THEN
+              DO 20 J = 1,N
+                  IF (X(J).NE.ZERO) THEN
+                      TEMP = ALPHA*X(J)
+                      K = KK
+                      DO 10 I = 1,J
+                          AP(K) = AP(K) + X(I)*TEMP
+                          K = K + 1
+   10                 CONTINUE
+                  END IF
+                  KK = KK + J
+   20         CONTINUE
+          ELSE
+              JX = KX
+              DO 40 J = 1,N
+                  IF (X(JX).NE.ZERO) THEN
+                      TEMP = ALPHA*X(JX)
+                      IX = KX
+                      DO 30 K = KK,KK + J - 1
+                          AP(K) = AP(K) + X(IX)*TEMP
+                          IX = IX + INCX
+   30                 CONTINUE
+                  END IF
+                  JX = JX + INCX
+                  KK = KK + J
+   40         CONTINUE
+          END IF
+      ELSE
+*
+*        Form  A  when lower triangle is stored in AP.
+*
+          IF (INCX.EQ.1) THEN
+              DO 60 J = 1,N
+                  IF (X(J).NE.ZERO) THEN
+                      TEMP = ALPHA*X(J)
+                      K = KK
+                      DO 50 I = J,N
+                          AP(K) = AP(K) + X(I)*TEMP
+                          K = K + 1
+   50                 CONTINUE
+                  END IF
+                  KK = KK + N - J + 1
+   60         CONTINUE
+          ELSE
+              JX = KX
+              DO 80 J = 1,N
+                  IF (X(JX).NE.ZERO) THEN
+                      TEMP = ALPHA*X(JX)
+                      IX = JX
+                      DO 70 K = KK,KK + N - J
+                          AP(K) = AP(K) + X(IX)*TEMP
+                          IX = IX + INCX
+   70                 CONTINUE
+                  END IF
+                  JX = JX + INCX
+                  KK = KK + N - J + 1
+   80         CONTINUE
+          END IF
+      END IF
+*
+      RETURN
+*
+*     End of DSPR  .
+*
+      END
diff --git a/resources/3rdParty/eigen/blas/dspr2.f b/resources/3rdParty/eigen/blas/dspr2.f
new file mode 100644
index 000000000..6f6b54a8c
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/dspr2.f
@@ -0,0 +1,233 @@
+      SUBROUTINE DSPR2(UPLO,N,ALPHA,X,INCX,Y,INCY,AP)
+*     .. Scalar Arguments ..
+      DOUBLE PRECISION ALPHA
+      INTEGER INCX,INCY,N
+      CHARACTER UPLO
+*     ..
+*     .. Array Arguments ..
+      DOUBLE PRECISION AP(*),X(*),Y(*)
+*     ..
+*
+*  Purpose
+*  =======
+*
+*  DSPR2  performs the symmetric rank 2 operation
+*
+*     A := alpha*x*y' + alpha*y*x' + A,
+*
+*  where alpha is a scalar, x and y are n element vectors and A is an
+*  n by n symmetric matrix, supplied in packed form.
+*
+*  Arguments
+*  ==========
+*
+*  UPLO   - CHARACTER*1.
+*           On entry, UPLO specifies whether the upper or lower
+*           triangular part of the matrix A is supplied in the packed
+*           array AP as follows:
+*
+*              UPLO = 'U' or 'u'   The upper triangular part of A is
+*                                  supplied in AP.
+*
+*              UPLO = 'L' or 'l'   The lower triangular part of A is
+*                                  supplied in AP.
+*
+*           Unchanged on exit.
+*
+*  N      - INTEGER.
+*           On entry, N specifies the order of the matrix A.
+*           N must be at least zero.
+*           Unchanged on exit.
+*
+*  ALPHA  - DOUBLE PRECISION.
+*           On entry, ALPHA specifies the scalar alpha.
+*           Unchanged on exit.
+*
+*  X      - DOUBLE PRECISION array of dimension at least
+*           ( 1 + ( n - 1 )*abs( INCX ) ).
+*           Before entry, the incremented array X must contain the n
+*           element vector x.
+*           Unchanged on exit.
+*
+*  INCX   - INTEGER.
+*           On entry, INCX specifies the increment for the elements of
+*           X. INCX must not be zero.
+*           Unchanged on exit.
+*
+*  Y      - DOUBLE PRECISION array of dimension at least
+*           ( 1 + ( n - 1 )*abs( INCY ) ).
+*           Before entry, the incremented array Y must contain the n
+*           element vector y.
+*           Unchanged on exit.
+*
+*  INCY   - INTEGER.
+*           On entry, INCY specifies the increment for the elements of
+*           Y. INCY must not be zero.
+*           Unchanged on exit.
+*
+*  AP     - DOUBLE PRECISION array of DIMENSION at least
+*           ( ( n*( n + 1 ) )/2 ).
+*           Before entry with  UPLO = 'U' or 'u', the array AP must
+*           contain the upper triangular part of the symmetric matrix
+*           packed sequentially, column by column, so that AP( 1 )
+*           contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 )
+*           and a( 2, 2 ) respectively, and so on. On exit, the array
+*           AP is overwritten by the upper triangular part of the
+*           updated matrix.
+*           Before entry with UPLO = 'L' or 'l', the array AP must
+*           contain the lower triangular part of the symmetric matrix
+*           packed sequentially, column by column, so that AP( 1 )
+*           contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 )
+*           and a( 3, 1 ) respectively, and so on. On exit, the array
+*           AP is overwritten by the lower triangular part of the
+*           updated matrix.
+*
+*  Further Details
+*  ===============
+*
+*  Level 2 Blas routine.
+*
+*  -- Written on 22-October-1986.
+*     Jack Dongarra, Argonne National Lab.
+*     Jeremy Du Croz, Nag Central Office.
+*     Sven Hammarling, Nag Central Office.
+*     Richard Hanson, Sandia National Labs.
+*
+*  =====================================================================
+*
+*     .. Parameters ..
+      DOUBLE PRECISION ZERO
+      PARAMETER (ZERO=0.0D+0)
+*     ..
+*     .. Local Scalars ..
+      DOUBLE PRECISION TEMP1,TEMP2
+      INTEGER I,INFO,IX,IY,J,JX,JY,K,KK,KX,KY
+*     ..
+*     .. External Functions ..
+      LOGICAL LSAME
+      EXTERNAL LSAME
+*     ..
+*     .. External Subroutines ..
+      EXTERNAL XERBLA
+*     ..
+*
+*     Test the input parameters.
+*
+      INFO = 0
+      IF (.NOT.LSAME(UPLO,'U') .AND. .NOT.LSAME(UPLO,'L')) THEN
+          INFO = 1
+      ELSE IF (N.LT.0) THEN
+          INFO = 2
+      ELSE IF (INCX.EQ.0) THEN
+          INFO = 5
+      ELSE IF (INCY.EQ.0) THEN
+          INFO = 7
+      END IF
+      IF (INFO.NE.0) THEN
+          CALL XERBLA('DSPR2 ',INFO)
+          RETURN
+      END IF
+*
+*     Quick return if possible.
+*
+      IF ((N.EQ.0) .OR. (ALPHA.EQ.ZERO)) RETURN
+*
+*     Set up the start points in X and Y if the increments are not both
+*     unity.
+*
+      IF ((INCX.NE.1) .OR. (INCY.NE.1)) THEN
+          IF (INCX.GT.0) THEN
+              KX = 1
+          ELSE
+              KX = 1 - (N-1)*INCX
+          END IF
+          IF (INCY.GT.0) THEN
+              KY = 1
+          ELSE
+              KY = 1 - (N-1)*INCY
+          END IF
+          JX = KX
+          JY = KY
+      END IF
+*
+*     Start the operations. In this version the elements of the array AP
+*     are accessed sequentially with one pass through AP.
+*
+      KK = 1
+      IF (LSAME(UPLO,'U')) THEN
+*
+*        Form  A  when upper triangle is stored in AP.
+*
+          IF ((INCX.EQ.1) .AND. (INCY.EQ.1)) THEN
+              DO 20 J = 1,N
+                  IF ((X(J).NE.ZERO) .OR. (Y(J).NE.ZERO)) THEN
+                      TEMP1 = ALPHA*Y(J)
+                      TEMP2 = ALPHA*X(J)
+                      K = KK
+                      DO 10 I = 1,J
+                          AP(K) = AP(K) + X(I)*TEMP1 + Y(I)*TEMP2
+                          K = K + 1
+   10                 CONTINUE
+                  END IF
+                  KK = KK + J
+   20         CONTINUE
+          ELSE
+              DO 40 J = 1,N
+                  IF ((X(JX).NE.ZERO) .OR. (Y(JY).NE.ZERO)) THEN
+                      TEMP1 = ALPHA*Y(JY)
+                      TEMP2 = ALPHA*X(JX)
+                      IX = KX
+                      IY = KY
+                      DO 30 K = KK,KK + J - 1
+                          AP(K) = AP(K) + X(IX)*TEMP1 + Y(IY)*TEMP2
+                          IX = IX + INCX
+                          IY = IY + INCY
+   30                 CONTINUE
+                  END IF
+                  JX = JX + INCX
+                  JY = JY + INCY
+                  KK = KK + J
+   40         CONTINUE
+          END IF
+      ELSE
+*
+*        Form  A  when lower triangle is stored in AP.
+*
+          IF ((INCX.EQ.1) .AND. (INCY.EQ.1)) THEN
+              DO 60 J = 1,N
+                  IF ((X(J).NE.ZERO) .OR. (Y(J).NE.ZERO)) THEN
+                      TEMP1 = ALPHA*Y(J)
+                      TEMP2 = ALPHA*X(J)
+                      K = KK
+                      DO 50 I = J,N
+                          AP(K) = AP(K) + X(I)*TEMP1 + Y(I)*TEMP2
+                          K = K + 1
+   50                 CONTINUE
+                  END IF
+                  KK = KK + N - J + 1
+   60         CONTINUE
+          ELSE
+              DO 80 J = 1,N
+                  IF ((X(JX).NE.ZERO) .OR. (Y(JY).NE.ZERO)) THEN
+                      TEMP1 = ALPHA*Y(JY)
+                      TEMP2 = ALPHA*X(JX)
+                      IX = JX
+                      IY = JY
+                      DO 70 K = KK,KK + N - J
+                          AP(K) = AP(K) + X(IX)*TEMP1 + Y(IY)*TEMP2
+                          IX = IX + INCX
+                          IY = IY + INCY
+   70                 CONTINUE
+                  END IF
+                  JX = JX + INCX
+                  JY = JY + INCY
+                  KK = KK + N - J + 1
+   80         CONTINUE
+          END IF
+      END IF
+*
+      RETURN
+*
+*     End of DSPR2 .
+*
+      END
diff --git a/resources/3rdparty/eigen/blas/dtbmv.f b/resources/3rdParty/eigen/blas/dtbmv.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/dtbmv.f
rename to resources/3rdParty/eigen/blas/dtbmv.f
diff --git a/resources/3rdParty/eigen/blas/dtpmv.f b/resources/3rdParty/eigen/blas/dtpmv.f
new file mode 100644
index 000000000..c5bc112dc
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/dtpmv.f
@@ -0,0 +1,293 @@
+      SUBROUTINE DTPMV(UPLO,TRANS,DIAG,N,AP,X,INCX)
+*     .. Scalar Arguments ..
+      INTEGER INCX,N
+      CHARACTER DIAG,TRANS,UPLO
+*     ..
+*     .. Array Arguments ..
+      DOUBLE PRECISION AP(*),X(*)
+*     ..
+*
+*  Purpose
+*  =======
+*
+*  DTPMV  performs one of the matrix-vector operations
+*
+*     x := A*x,   or   x := A'*x,
+*
+*  where x is an n element vector and  A is an n by n unit, or non-unit,
+*  upper or lower triangular matrix, supplied in packed form.
+*
+*  Arguments
+*  ==========
+*
+*  UPLO   - CHARACTER*1.
+*           On entry, UPLO specifies whether the matrix is an upper or
+*           lower triangular matrix as follows:
+*
+*              UPLO = 'U' or 'u'   A is an upper triangular matrix.
+*
+*              UPLO = 'L' or 'l'   A is a lower triangular matrix.
+*
+*           Unchanged on exit.
+*
+*  TRANS  - CHARACTER*1.
+*           On entry, TRANS specifies the operation to be performed as
+*           follows:
+*
+*              TRANS = 'N' or 'n'   x := A*x.
+*
+*              TRANS = 'T' or 't'   x := A'*x.
+*
+*              TRANS = 'C' or 'c'   x := A'*x.
+*
+*           Unchanged on exit.
+*
+*  DIAG   - CHARACTER*1.
+*           On entry, DIAG specifies whether or not A is unit
+*           triangular as follows:
+*
+*              DIAG = 'U' or 'u'   A is assumed to be unit triangular.
+*
+*              DIAG = 'N' or 'n'   A is not assumed to be unit
+*                                  triangular.
+*
+*           Unchanged on exit.
+*
+*  N      - INTEGER.
+*           On entry, N specifies the order of the matrix A.
+*           N must be at least zero.
+*           Unchanged on exit.
+*
+*  AP     - DOUBLE PRECISION array of DIMENSION at least
+*           ( ( n*( n + 1 ) )/2 ).
+*           Before entry with  UPLO = 'U' or 'u', the array AP must
+*           contain the upper triangular matrix packed sequentially,
+*           column by column, so that AP( 1 ) contains a( 1, 1 ),
+*           AP( 2 ) and AP( 3 ) contain a( 1, 2 ) and a( 2, 2 )
+*           respectively, and so on.
+*           Before entry with UPLO = 'L' or 'l', the array AP must
+*           contain the lower triangular matrix packed sequentially,
+*           column by column, so that AP( 1 ) contains a( 1, 1 ),
+*           AP( 2 ) and AP( 3 ) contain a( 2, 1 ) and a( 3, 1 )
+*           respectively, and so on.
+*           Note that when  DIAG = 'U' or 'u', the diagonal elements of
+*           A are not referenced, but are assumed to be unity.
+*           Unchanged on exit.
+*
+*  X      - DOUBLE PRECISION array of dimension at least
+*           ( 1 + ( n - 1 )*abs( INCX ) ).
+*           Before entry, the incremented array X must contain the n
+*           element vector x. On exit, X is overwritten with the
+*           tranformed vector x.
+*
+*  INCX   - INTEGER.
+*           On entry, INCX specifies the increment for the elements of
+*           X. INCX must not be zero.
+*           Unchanged on exit.
+*
+*  Further Details
+*  ===============
+*
+*  Level 2 Blas routine.
+*
+*  -- Written on 22-October-1986.
+*     Jack Dongarra, Argonne National Lab.
+*     Jeremy Du Croz, Nag Central Office.
+*     Sven Hammarling, Nag Central Office.
+*     Richard Hanson, Sandia National Labs.
+*
+*  =====================================================================
+*
+*     .. Parameters ..
+      DOUBLE PRECISION ZERO
+      PARAMETER (ZERO=0.0D+0)
+*     ..
+*     .. Local Scalars ..
+      DOUBLE PRECISION TEMP
+      INTEGER I,INFO,IX,J,JX,K,KK,KX
+      LOGICAL NOUNIT
+*     ..
+*     .. External Functions ..
+      LOGICAL LSAME
+      EXTERNAL LSAME
+*     ..
+*     .. External Subroutines ..
+      EXTERNAL XERBLA
+*     ..
+*
+*     Test the input parameters.
+*
+      INFO = 0
+      IF (.NOT.LSAME(UPLO,'U') .AND. .NOT.LSAME(UPLO,'L')) THEN
+          INFO = 1
+      ELSE IF (.NOT.LSAME(TRANS,'N') .AND. .NOT.LSAME(TRANS,'T') .AND.
+     +         .NOT.LSAME(TRANS,'C')) THEN
+          INFO = 2
+      ELSE IF (.NOT.LSAME(DIAG,'U') .AND. .NOT.LSAME(DIAG,'N')) THEN
+          INFO = 3
+      ELSE IF (N.LT.0) THEN
+          INFO = 4
+      ELSE IF (INCX.EQ.0) THEN
+          INFO = 7
+      END IF
+      IF (INFO.NE.0) THEN
+          CALL XERBLA('DTPMV ',INFO)
+          RETURN
+      END IF
+*
+*     Quick return if possible.
+*
+      IF (N.EQ.0) RETURN
+*
+      NOUNIT = LSAME(DIAG,'N')
+*
+*     Set up the start point in X if the increment is not unity. This
+*     will be  ( N - 1 )*INCX  too small for descending loops.
+*
+      IF (INCX.LE.0) THEN
+          KX = 1 - (N-1)*INCX
+      ELSE IF (INCX.NE.1) THEN
+          KX = 1
+      END IF
+*
+*     Start the operations. In this version the elements of AP are
+*     accessed sequentially with one pass through AP.
+*
+      IF (LSAME(TRANS,'N')) THEN
+*
+*        Form  x:= A*x.
+*
+          IF (LSAME(UPLO,'U')) THEN
+              KK = 1
+              IF (INCX.EQ.1) THEN
+                  DO 20 J = 1,N
+                      IF (X(J).NE.ZERO) THEN
+                          TEMP = X(J)
+                          K = KK
+                          DO 10 I = 1,J - 1
+                              X(I) = X(I) + TEMP*AP(K)
+                              K = K + 1
+   10                     CONTINUE
+                          IF (NOUNIT) X(J) = X(J)*AP(KK+J-1)
+                      END IF
+                      KK = KK + J
+   20             CONTINUE
+              ELSE
+                  JX = KX
+                  DO 40 J = 1,N
+                      IF (X(JX).NE.ZERO) THEN
+                          TEMP = X(JX)
+                          IX = KX
+                          DO 30 K = KK,KK + J - 2
+                              X(IX) = X(IX) + TEMP*AP(K)
+                              IX = IX + INCX
+   30                     CONTINUE
+                          IF (NOUNIT) X(JX) = X(JX)*AP(KK+J-1)
+                      END IF
+                      JX = JX + INCX
+                      KK = KK + J
+   40             CONTINUE
+              END IF
+          ELSE
+              KK = (N* (N+1))/2
+              IF (INCX.EQ.1) THEN
+                  DO 60 J = N,1,-1
+                      IF (X(J).NE.ZERO) THEN
+                          TEMP = X(J)
+                          K = KK
+                          DO 50 I = N,J + 1,-1
+                              X(I) = X(I) + TEMP*AP(K)
+                              K = K - 1
+   50                     CONTINUE
+                          IF (NOUNIT) X(J) = X(J)*AP(KK-N+J)
+                      END IF
+                      KK = KK - (N-J+1)
+   60             CONTINUE
+              ELSE
+                  KX = KX + (N-1)*INCX
+                  JX = KX
+                  DO 80 J = N,1,-1
+                      IF (X(JX).NE.ZERO) THEN
+                          TEMP = X(JX)
+                          IX = KX
+                          DO 70 K = KK,KK - (N- (J+1)),-1
+                              X(IX) = X(IX) + TEMP*AP(K)
+                              IX = IX - INCX
+   70                     CONTINUE
+                          IF (NOUNIT) X(JX) = X(JX)*AP(KK-N+J)
+                      END IF
+                      JX = JX - INCX
+                      KK = KK - (N-J+1)
+   80             CONTINUE
+              END IF
+          END IF
+      ELSE
+*
+*        Form  x := A'*x.
+*
+          IF (LSAME(UPLO,'U')) THEN
+              KK = (N* (N+1))/2
+              IF (INCX.EQ.1) THEN
+                  DO 100 J = N,1,-1
+                      TEMP = X(J)
+                      IF (NOUNIT) TEMP = TEMP*AP(KK)
+                      K = KK - 1
+                      DO 90 I = J - 1,1,-1
+                          TEMP = TEMP + AP(K)*X(I)
+                          K = K - 1
+   90                 CONTINUE
+                      X(J) = TEMP
+                      KK = KK - J
+  100             CONTINUE
+              ELSE
+                  JX = KX + (N-1)*INCX
+                  DO 120 J = N,1,-1
+                      TEMP = X(JX)
+                      IX = JX
+                      IF (NOUNIT) TEMP = TEMP*AP(KK)
+                      DO 110 K = KK - 1,KK - J + 1,-1
+                          IX = IX - INCX
+                          TEMP = TEMP + AP(K)*X(IX)
+  110                 CONTINUE
+                      X(JX) = TEMP
+                      JX = JX - INCX
+                      KK = KK - J
+  120             CONTINUE
+              END IF
+          ELSE
+              KK = 1
+              IF (INCX.EQ.1) THEN
+                  DO 140 J = 1,N
+                      TEMP = X(J)
+                      IF (NOUNIT) TEMP = TEMP*AP(KK)
+                      K = KK + 1
+                      DO 130 I = J + 1,N
+                          TEMP = TEMP + AP(K)*X(I)
+                          K = K + 1
+  130                 CONTINUE
+                      X(J) = TEMP
+                      KK = KK + (N-J+1)
+  140             CONTINUE
+              ELSE
+                  JX = KX
+                  DO 160 J = 1,N
+                      TEMP = X(JX)
+                      IX = JX
+                      IF (NOUNIT) TEMP = TEMP*AP(KK)
+                      DO 150 K = KK + 1,KK + N - J
+                          IX = IX + INCX
+                          TEMP = TEMP + AP(K)*X(IX)
+  150                 CONTINUE
+                      X(JX) = TEMP
+                      JX = JX + INCX
+                      KK = KK + (N-J+1)
+  160             CONTINUE
+              END IF
+          END IF
+      END IF
+*
+      RETURN
+*
+*     End of DTPMV .
+*
+      END
diff --git a/resources/3rdParty/eigen/blas/dtpsv.f b/resources/3rdParty/eigen/blas/dtpsv.f
new file mode 100644
index 000000000..c7e58d32f
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/dtpsv.f
@@ -0,0 +1,296 @@
+      SUBROUTINE DTPSV(UPLO,TRANS,DIAG,N,AP,X,INCX)
+*     .. Scalar Arguments ..
+      INTEGER INCX,N
+      CHARACTER DIAG,TRANS,UPLO
+*     ..
+*     .. Array Arguments ..
+      DOUBLE PRECISION AP(*),X(*)
+*     ..
+*
+*  Purpose
+*  =======
+*
+*  DTPSV  solves one of the systems of equations
+*
+*     A*x = b,   or   A'*x = b,
+*
+*  where b and x are n element vectors and A is an n by n unit, or
+*  non-unit, upper or lower triangular matrix, supplied in packed form.
+*
+*  No test for singularity or near-singularity is included in this
+*  routine. Such tests must be performed before calling this routine.
+*
+*  Arguments
+*  ==========
+*
+*  UPLO   - CHARACTER*1.
+*           On entry, UPLO specifies whether the matrix is an upper or
+*           lower triangular matrix as follows:
+*
+*              UPLO = 'U' or 'u'   A is an upper triangular matrix.
+*
+*              UPLO = 'L' or 'l'   A is a lower triangular matrix.
+*
+*           Unchanged on exit.
+*
+*  TRANS  - CHARACTER*1.
+*           On entry, TRANS specifies the equations to be solved as
+*           follows:
+*
+*              TRANS = 'N' or 'n'   A*x = b.
+*
+*              TRANS = 'T' or 't'   A'*x = b.
+*
+*              TRANS = 'C' or 'c'   A'*x = b.
+*
+*           Unchanged on exit.
+*
+*  DIAG   - CHARACTER*1.
+*           On entry, DIAG specifies whether or not A is unit
+*           triangular as follows:
+*
+*              DIAG = 'U' or 'u'   A is assumed to be unit triangular.
+*
+*              DIAG = 'N' or 'n'   A is not assumed to be unit
+*                                  triangular.
+*
+*           Unchanged on exit.
+*
+*  N      - INTEGER.
+*           On entry, N specifies the order of the matrix A.
+*           N must be at least zero.
+*           Unchanged on exit.
+*
+*  AP     - DOUBLE PRECISION array of DIMENSION at least
+*           ( ( n*( n + 1 ) )/2 ).
+*           Before entry with  UPLO = 'U' or 'u', the array AP must
+*           contain the upper triangular matrix packed sequentially,
+*           column by column, so that AP( 1 ) contains a( 1, 1 ),
+*           AP( 2 ) and AP( 3 ) contain a( 1, 2 ) and a( 2, 2 )
+*           respectively, and so on.
+*           Before entry with UPLO = 'L' or 'l', the array AP must
+*           contain the lower triangular matrix packed sequentially,
+*           column by column, so that AP( 1 ) contains a( 1, 1 ),
+*           AP( 2 ) and AP( 3 ) contain a( 2, 1 ) and a( 3, 1 )
+*           respectively, and so on.
+*           Note that when  DIAG = 'U' or 'u', the diagonal elements of
+*           A are not referenced, but are assumed to be unity.
+*           Unchanged on exit.
+*
+*  X      - DOUBLE PRECISION array of dimension at least
+*           ( 1 + ( n - 1 )*abs( INCX ) ).
+*           Before entry, the incremented array X must contain the n
+*           element right-hand side vector b. On exit, X is overwritten
+*           with the solution vector x.
+*
+*  INCX   - INTEGER.
+*           On entry, INCX specifies the increment for the elements of
+*           X. INCX must not be zero.
+*           Unchanged on exit.
+*
+*  Further Details
+*  ===============
+*
+*  Level 2 Blas routine.
+*
+*  -- Written on 22-October-1986.
+*     Jack Dongarra, Argonne National Lab.
+*     Jeremy Du Croz, Nag Central Office.
+*     Sven Hammarling, Nag Central Office.
+*     Richard Hanson, Sandia National Labs.
+*
+*  =====================================================================
+*
+*     .. Parameters ..
+      DOUBLE PRECISION ZERO
+      PARAMETER (ZERO=0.0D+0)
+*     ..
+*     .. Local Scalars ..
+      DOUBLE PRECISION TEMP
+      INTEGER I,INFO,IX,J,JX,K,KK,KX
+      LOGICAL NOUNIT
+*     ..
+*     .. External Functions ..
+      LOGICAL LSAME
+      EXTERNAL LSAME
+*     ..
+*     .. External Subroutines ..
+      EXTERNAL XERBLA
+*     ..
+*
+*     Test the input parameters.
+*
+      INFO = 0
+      IF (.NOT.LSAME(UPLO,'U') .AND. .NOT.LSAME(UPLO,'L')) THEN
+          INFO = 1
+      ELSE IF (.NOT.LSAME(TRANS,'N') .AND. .NOT.LSAME(TRANS,'T') .AND.
+     +         .NOT.LSAME(TRANS,'C')) THEN
+          INFO = 2
+      ELSE IF (.NOT.LSAME(DIAG,'U') .AND. .NOT.LSAME(DIAG,'N')) THEN
+          INFO = 3
+      ELSE IF (N.LT.0) THEN
+          INFO = 4
+      ELSE IF (INCX.EQ.0) THEN
+          INFO = 7
+      END IF
+      IF (INFO.NE.0) THEN
+          CALL XERBLA('DTPSV ',INFO)
+          RETURN
+      END IF
+*
+*     Quick return if possible.
+*
+      IF (N.EQ.0) RETURN
+*
+      NOUNIT = LSAME(DIAG,'N')
+*
+*     Set up the start point in X if the increment is not unity. This
+*     will be  ( N - 1 )*INCX  too small for descending loops.
+*
+      IF (INCX.LE.0) THEN
+          KX = 1 - (N-1)*INCX
+      ELSE IF (INCX.NE.1) THEN
+          KX = 1
+      END IF
+*
+*     Start the operations. In this version the elements of AP are
+*     accessed sequentially with one pass through AP.
+*
+      IF (LSAME(TRANS,'N')) THEN
+*
+*        Form  x := inv( A )*x.
+*
+          IF (LSAME(UPLO,'U')) THEN
+              KK = (N* (N+1))/2
+              IF (INCX.EQ.1) THEN
+                  DO 20 J = N,1,-1
+                      IF (X(J).NE.ZERO) THEN
+                          IF (NOUNIT) X(J) = X(J)/AP(KK)
+                          TEMP = X(J)
+                          K = KK - 1
+                          DO 10 I = J - 1,1,-1
+                              X(I) = X(I) - TEMP*AP(K)
+                              K = K - 1
+   10                     CONTINUE
+                      END IF
+                      KK = KK - J
+   20             CONTINUE
+              ELSE
+                  JX = KX + (N-1)*INCX
+                  DO 40 J = N,1,-1
+                      IF (X(JX).NE.ZERO) THEN
+                          IF (NOUNIT) X(JX) = X(JX)/AP(KK)
+                          TEMP = X(JX)
+                          IX = JX
+                          DO 30 K = KK - 1,KK - J + 1,-1
+                              IX = IX - INCX
+                              X(IX) = X(IX) - TEMP*AP(K)
+   30                     CONTINUE
+                      END IF
+                      JX = JX - INCX
+                      KK = KK - J
+   40             CONTINUE
+              END IF
+          ELSE
+              KK = 1
+              IF (INCX.EQ.1) THEN
+                  DO 60 J = 1,N
+                      IF (X(J).NE.ZERO) THEN
+                          IF (NOUNIT) X(J) = X(J)/AP(KK)
+                          TEMP = X(J)
+                          K = KK + 1
+                          DO 50 I = J + 1,N
+                              X(I) = X(I) - TEMP*AP(K)
+                              K = K + 1
+   50                     CONTINUE
+                      END IF
+                      KK = KK + (N-J+1)
+   60             CONTINUE
+              ELSE
+                  JX = KX
+                  DO 80 J = 1,N
+                      IF (X(JX).NE.ZERO) THEN
+                          IF (NOUNIT) X(JX) = X(JX)/AP(KK)
+                          TEMP = X(JX)
+                          IX = JX
+                          DO 70 K = KK + 1,KK + N - J
+                              IX = IX + INCX
+                              X(IX) = X(IX) - TEMP*AP(K)
+   70                     CONTINUE
+                      END IF
+                      JX = JX + INCX
+                      KK = KK + (N-J+1)
+   80             CONTINUE
+              END IF
+          END IF
+      ELSE
+*
+*        Form  x := inv( A' )*x.
+*
+          IF (LSAME(UPLO,'U')) THEN
+              KK = 1
+              IF (INCX.EQ.1) THEN
+                  DO 100 J = 1,N
+                      TEMP = X(J)
+                      K = KK
+                      DO 90 I = 1,J - 1
+                          TEMP = TEMP - AP(K)*X(I)
+                          K = K + 1
+   90                 CONTINUE
+                      IF (NOUNIT) TEMP = TEMP/AP(KK+J-1)
+                      X(J) = TEMP
+                      KK = KK + J
+  100             CONTINUE
+              ELSE
+                  JX = KX
+                  DO 120 J = 1,N
+                      TEMP = X(JX)
+                      IX = KX
+                      DO 110 K = KK,KK + J - 2
+                          TEMP = TEMP - AP(K)*X(IX)
+                          IX = IX + INCX
+  110                 CONTINUE
+                      IF (NOUNIT) TEMP = TEMP/AP(KK+J-1)
+                      X(JX) = TEMP
+                      JX = JX + INCX
+                      KK = KK + J
+  120             CONTINUE
+              END IF
+          ELSE
+              KK = (N* (N+1))/2
+              IF (INCX.EQ.1) THEN
+                  DO 140 J = N,1,-1
+                      TEMP = X(J)
+                      K = KK
+                      DO 130 I = N,J + 1,-1
+                          TEMP = TEMP - AP(K)*X(I)
+                          K = K - 1
+  130                 CONTINUE
+                      IF (NOUNIT) TEMP = TEMP/AP(KK-N+J)
+                      X(J) = TEMP
+                      KK = KK - (N-J+1)
+  140             CONTINUE
+              ELSE
+                  KX = KX + (N-1)*INCX
+                  JX = KX
+                  DO 160 J = N,1,-1
+                      TEMP = X(JX)
+                      IX = KX
+                      DO 150 K = KK,KK - (N- (J+1)),-1
+                          TEMP = TEMP - AP(K)*X(IX)
+                          IX = IX - INCX
+  150                 CONTINUE
+                      IF (NOUNIT) TEMP = TEMP/AP(KK-N+J)
+                      X(JX) = TEMP
+                      JX = JX - INCX
+                      KK = KK - (N-J+1)
+  160             CONTINUE
+              END IF
+          END IF
+      END IF
+*
+      RETURN
+*
+*     End of DTPSV .
+*
+      END
diff --git a/resources/3rdparty/eigen/blas/level1_cplx_impl.h b/resources/3rdParty/eigen/blas/level1_cplx_impl.h
similarity index 100%
rename from resources/3rdparty/eigen/blas/level1_cplx_impl.h
rename to resources/3rdParty/eigen/blas/level1_cplx_impl.h
diff --git a/resources/3rdparty/eigen/blas/level1_impl.h b/resources/3rdParty/eigen/blas/level1_impl.h
similarity index 100%
rename from resources/3rdparty/eigen/blas/level1_impl.h
rename to resources/3rdParty/eigen/blas/level1_impl.h
diff --git a/resources/3rdparty/eigen/blas/level1_real_impl.h b/resources/3rdParty/eigen/blas/level1_real_impl.h
similarity index 100%
rename from resources/3rdparty/eigen/blas/level1_real_impl.h
rename to resources/3rdParty/eigen/blas/level1_real_impl.h
diff --git a/resources/3rdParty/eigen/blas/level2_cplx_impl.h b/resources/3rdParty/eigen/blas/level2_cplx_impl.h
new file mode 100644
index 000000000..7878f2a16
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/level2_cplx_impl.h
@@ -0,0 +1,270 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "common.h"
+
+/**  ZHEMV  performs the matrix-vector  operation
+  *
+  *     y := alpha*A*x + beta*y,
+  *
+  *  where alpha and beta are scalars, x and y are n element vectors and
+  *  A is an n by n hermitian matrix.
+  */
+int EIGEN_BLAS_FUNC(hemv)(char *uplo, int *n, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *px, int *incx, RealScalar *pbeta, RealScalar *py, int *incy)
+{
+  Scalar* a = reinterpret_cast<Scalar*>(pa);
+  Scalar* x = reinterpret_cast<Scalar*>(px);
+  Scalar* y = reinterpret_cast<Scalar*>(py);
+  Scalar alpha  = *reinterpret_cast<Scalar*>(palpha);
+  Scalar beta   = *reinterpret_cast<Scalar*>(pbeta);
+
+  // check arguments
+  int info = 0;
+  if(UPLO(*uplo)==INVALID)        info = 1;
+  else if(*n<0)                   info = 2;
+  else if(*lda<std::max(1,*n))    info = 5;
+  else if(*incx==0)               info = 7;
+  else if(*incy==0)               info = 10;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"HEMV ",&info,6);
+
+  if(*n==0)
+    return 1;
+
+  Scalar* actual_x = get_compact_vector(x,*n,*incx);
+  Scalar* actual_y = get_compact_vector(y,*n,*incy);
+
+  if(beta!=Scalar(1))
+  {
+    if(beta==Scalar(0)) vector(actual_y, *n).setZero();
+    else                vector(actual_y, *n) *= beta;
+  }
+
+  if(alpha!=Scalar(0))
+  {
+    // TODO performs a direct call to the underlying implementation function
+         if(UPLO(*uplo)==UP) vector(actual_y,*n).noalias() += matrix(a,*n,*n,*lda).selfadjointView<Upper>() * (alpha * vector(actual_x,*n));
+    else if(UPLO(*uplo)==LO) vector(actual_y,*n).noalias() += matrix(a,*n,*n,*lda).selfadjointView<Lower>() * (alpha * vector(actual_x,*n));
+  }
+
+  if(actual_x!=x) delete[] actual_x;
+  if(actual_y!=y) delete[] copy_back(actual_y,y,*n,*incy);
+
+  return 1;
+}
+
+/**  ZHBMV  performs the matrix-vector  operation
+  *
+  *     y := alpha*A*x + beta*y,
+  *
+  *  where alpha and beta are scalars, x and y are n element vectors and
+  *  A is an n by n hermitian band matrix, with k super-diagonals.
+  */
+// int EIGEN_BLAS_FUNC(hbmv)(char *uplo, int *n, int *k, RealScalar *alpha, RealScalar *a, int *lda,
+//                           RealScalar *x, int *incx, RealScalar *beta, RealScalar *y, int *incy)
+// {
+//   return 1;
+// }
+
+/**  ZHPMV  performs the matrix-vector operation
+  *
+  *     y := alpha*A*x + beta*y,
+  *
+  *  where alpha and beta are scalars, x and y are n element vectors and
+  *  A is an n by n hermitian matrix, supplied in packed form.
+  */
+// int EIGEN_BLAS_FUNC(hpmv)(char *uplo, int *n, RealScalar *alpha, RealScalar *ap, RealScalar *x, int *incx, RealScalar *beta, RealScalar *y, int *incy)
+// {
+//   return 1;
+// }
+
+/**  ZHPR    performs the hermitian rank 1 operation
+  *
+  *     A := alpha*x*conjg( x' ) + A,
+  *
+  *  where alpha is a real scalar, x is an n element vector and A is an
+  *  n by n hermitian matrix, supplied in packed form.
+  */
+// int EIGEN_BLAS_FUNC(hpr)(char *uplo, int *n, RealScalar *alpha, RealScalar *x, int *incx, RealScalar *ap)
+// {
+//   return 1;
+// }
+
+/**  ZHPR2  performs the hermitian rank 2 operation
+  *
+  *     A := alpha*x*conjg( y' ) + conjg( alpha )*y*conjg( x' ) + A,
+  *
+  *  where alpha is a scalar, x and y are n element vectors and A is an
+  *  n by n hermitian matrix, supplied in packed form.
+  */
+// int EIGEN_BLAS_FUNC(hpr2)(char *uplo, int *n, RealScalar *palpha, RealScalar *x, int *incx, RealScalar *y, int *incy, RealScalar *ap)
+// {
+//   return 1;
+// }
+
+/**  ZHER   performs the hermitian rank 1 operation
+  *
+  *     A := alpha*x*conjg( x' ) + A,
+  *
+  *  where alpha is a real scalar, x is an n element vector and A is an
+  *  n by n hermitian matrix.
+  */
+int EIGEN_BLAS_FUNC(her)(char *uplo, int *n, RealScalar *palpha, RealScalar *px, int *incx, RealScalar *pa, int *lda)
+{
+  Scalar* x = reinterpret_cast<Scalar*>(px);
+  Scalar* a = reinterpret_cast<Scalar*>(pa);
+  RealScalar alpha = *reinterpret_cast<RealScalar*>(palpha);
+
+  int info = 0;
+  if(UPLO(*uplo)==INVALID)                                            info = 1;
+  else if(*n<0)                                                       info = 2;
+  else if(*incx==0)                                                   info = 5;
+  else if(*lda<std::max(1,*n))                                        info = 7;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"HER  ",&info,6);
+
+  if(alpha==RealScalar(0))
+    return 1;
+
+  Scalar* x_cpy = get_compact_vector(x, *n, *incx);
+
+  // TODO perform direct calls to underlying implementation
+//   if(UPLO(*uplo)==LO)       matrix(a,*n,*n,*lda).selfadjointView<Lower>().rankUpdate(vector(x_cpy,*n), alpha);
+//   else if(UPLO(*uplo)==UP)  matrix(a,*n,*n,*lda).selfadjointView<Upper>().rankUpdate(vector(x_cpy,*n), alpha);
+
+  if(UPLO(*uplo)==LO)
+    for(int j=0;j<*n;++j)
+      matrix(a,*n,*n,*lda).col(j).tail(*n-j) += alpha * internal::conj(x_cpy[j]) * vector(x_cpy+j,*n-j);
+  else
+    for(int j=0;j<*n;++j)
+      matrix(a,*n,*n,*lda).col(j).head(j+1) += alpha * internal::conj(x_cpy[j]) * vector(x_cpy,j+1);
+
+  matrix(a,*n,*n,*lda).diagonal().imag().setZero();
+
+  if(x_cpy!=x)  delete[] x_cpy;
+
+  return 1;
+}
+
+/**  ZHER2  performs the hermitian rank 2 operation
+  *
+  *     A := alpha*x*conjg( y' ) + conjg( alpha )*y*conjg( x' ) + A,
+  *
+  *  where alpha is a scalar, x and y are n element vectors and A is an n
+  *  by n hermitian matrix.
+  */
+int EIGEN_BLAS_FUNC(her2)(char *uplo, int *n, RealScalar *palpha, RealScalar *px, int *incx, RealScalar *py, int *incy, RealScalar *pa, int *lda)
+{
+  Scalar* x = reinterpret_cast<Scalar*>(px);
+  Scalar* y = reinterpret_cast<Scalar*>(py);
+  Scalar* a = reinterpret_cast<Scalar*>(pa);
+  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
+
+  int info = 0;
+  if(UPLO(*uplo)==INVALID)                                            info = 1;
+  else if(*n<0)                                                       info = 2;
+  else if(*incx==0)                                                   info = 5;
+  else if(*incy==0)                                                   info = 7;
+  else if(*lda<std::max(1,*n))                                        info = 9;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"HER2 ",&info,6);
+
+  if(alpha==Scalar(0))
+    return 1;
+
+  Scalar* x_cpy = get_compact_vector(x, *n, *incx);
+  Scalar* y_cpy = get_compact_vector(y, *n, *incy);
+
+  // TODO perform direct calls to underlying implementation
+  if(UPLO(*uplo)==LO)       matrix(a,*n,*n,*lda).selfadjointView<Lower>().rankUpdate(vector(x_cpy,*n),vector(y_cpy,*n),alpha);
+  else if(UPLO(*uplo)==UP)  matrix(a,*n,*n,*lda).selfadjointView<Upper>().rankUpdate(vector(x_cpy,*n),vector(y_cpy,*n),alpha);
+
+  matrix(a,*n,*n,*lda).diagonal().imag().setZero();
+
+  if(x_cpy!=x)  delete[] x_cpy;
+  if(y_cpy!=y)  delete[] y_cpy;
+
+  return 1;
+}
+
+/**  ZGERU  performs the rank 1 operation
+  *
+  *     A := alpha*x*y' + A,
+  *
+  *  where alpha is a scalar, x is an m element vector, y is an n element
+  *  vector and A is an m by n matrix.
+  */
+int EIGEN_BLAS_FUNC(geru)(int *m, int *n, RealScalar *palpha, RealScalar *px, int *incx, RealScalar *py, int *incy, RealScalar *pa, int *lda)
+{
+  Scalar* x = reinterpret_cast<Scalar*>(px);
+  Scalar* y = reinterpret_cast<Scalar*>(py);
+  Scalar* a = reinterpret_cast<Scalar*>(pa);
+  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
+
+  int info = 0;
+       if(*m<0)                                                       info = 1;
+  else if(*n<0)                                                       info = 2;
+  else if(*incx==0)                                                   info = 5;
+  else if(*incy==0)                                                   info = 7;
+  else if(*lda<std::max(1,*m))                                        info = 9;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"GERU ",&info,6);
+
+  if(alpha==Scalar(0))
+    return 1;
+
+  Scalar* x_cpy = get_compact_vector(x,*m,*incx);
+  Scalar* y_cpy = get_compact_vector(y,*n,*incy);
+
+  // TODO perform direct calls to underlying implementation
+  matrix(a,*m,*n,*lda) += alpha * vector(x_cpy,*m) * vector(y_cpy,*n).transpose();
+
+  if(x_cpy!=x)  delete[] x_cpy;
+  if(y_cpy!=y)  delete[] y_cpy;
+
+  return 1;
+}
+
+/**  ZGERC  performs the rank 1 operation
+  *
+  *     A := alpha*x*conjg( y' ) + A,
+  *
+  *  where alpha is a scalar, x is an m element vector, y is an n element
+  *  vector and A is an m by n matrix.
+  */
+int EIGEN_BLAS_FUNC(gerc)(int *m, int *n, RealScalar *palpha, RealScalar *px, int *incx, RealScalar *py, int *incy, RealScalar *pa, int *lda)
+{
+  Scalar* x = reinterpret_cast<Scalar*>(px);
+  Scalar* y = reinterpret_cast<Scalar*>(py);
+  Scalar* a = reinterpret_cast<Scalar*>(pa);
+  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
+
+  int info = 0;
+       if(*m<0)                                                       info = 1;
+  else if(*n<0)                                                       info = 2;
+  else if(*incx==0)                                                   info = 5;
+  else if(*incy==0)                                                   info = 7;
+  else if(*lda<std::max(1,*m))                                        info = 9;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"GERC ",&info,6);
+
+  if(alpha==Scalar(0))
+    return 1;
+
+  Scalar* x_cpy = get_compact_vector(x,*m,*incx);
+  Scalar* y_cpy = get_compact_vector(y,*n,*incy);
+
+  // TODO perform direct calls to underlying implementation
+  matrix(a,*m,*n,*lda) += alpha * vector(x_cpy,*m) * vector(y_cpy,*n).adjoint();
+
+  if(x_cpy!=x)  delete[] x_cpy;
+  if(y_cpy!=y)  delete[] y_cpy;
+
+  return 1;
+}
diff --git a/resources/3rdParty/eigen/blas/level2_impl.h b/resources/3rdParty/eigen/blas/level2_impl.h
new file mode 100644
index 000000000..7099cf96d
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/level2_impl.h
@@ -0,0 +1,457 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "common.h"
+
+int EIGEN_BLAS_FUNC(gemv)(char *opa, int *m, int *n, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *pb, int *incb, RealScalar *pbeta, RealScalar *pc, int *incc)
+{
+  typedef void (*functype)(int, int, const Scalar *, int, const Scalar *, int , Scalar *, int, Scalar);
+  static functype func[4];
+
+  static bool init = false;
+  if(!init)
+  {
+    for(int k=0; k<4; ++k)
+      func[k] = 0;
+
+    func[NOTR] = (internal::general_matrix_vector_product<int,Scalar,ColMajor,false,Scalar,false>::run);
+    func[TR  ] = (internal::general_matrix_vector_product<int,Scalar,RowMajor,false,Scalar,false>::run);
+    func[ADJ ] = (internal::general_matrix_vector_product<int,Scalar,RowMajor,Conj, Scalar,false>::run);
+
+    init = true;
+  }
+
+  Scalar* a = reinterpret_cast<Scalar*>(pa);
+  Scalar* b = reinterpret_cast<Scalar*>(pb);
+  Scalar* c = reinterpret_cast<Scalar*>(pc);
+  Scalar alpha  = *reinterpret_cast<Scalar*>(palpha);
+  Scalar beta   = *reinterpret_cast<Scalar*>(pbeta);
+
+  // check arguments
+  int info = 0;
+  if(OP(*opa)==INVALID)           info = 1;
+  else if(*m<0)                   info = 2;
+  else if(*n<0)                   info = 3;
+  else if(*lda<std::max(1,*m))    info = 6;
+  else if(*incb==0)               info = 8;
+  else if(*incc==0)               info = 11;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"GEMV ",&info,6);
+
+  if(*m==0 || *n==0 || (alpha==Scalar(0) && beta==Scalar(1)))
+    return 0;
+
+  int actual_m = *m;
+  int actual_n = *n;
+  if(OP(*opa)!=NOTR)
+    std::swap(actual_m,actual_n);
+
+  Scalar* actual_b = get_compact_vector(b,actual_n,*incb);
+  Scalar* actual_c = get_compact_vector(c,actual_m,*incc);
+
+  if(beta!=Scalar(1))
+  {
+    if(beta==Scalar(0)) vector(actual_c, actual_m).setZero();
+    else                vector(actual_c, actual_m) *= beta;
+  }
+
+  int code = OP(*opa);
+  func[code](actual_m, actual_n, a, *lda, actual_b, 1, actual_c, 1, alpha);
+
+  if(actual_b!=b) delete[] actual_b;
+  if(actual_c!=c) delete[] copy_back(actual_c,c,actual_m,*incc);
+
+  return 1;
+}
+
+int EIGEN_BLAS_FUNC(trsv)(char *uplo, char *opa, char *diag, int *n, RealScalar *pa, int *lda, RealScalar *pb, int *incb)
+{
+  typedef void (*functype)(int, const Scalar *, int, Scalar *);
+  static functype func[16];
+
+  static bool init = false;
+  if(!init)
+  {
+    for(int k=0; k<16; ++k)
+      func[k] = 0;
+
+    func[NOTR  | (UP << 2) | (NUNIT << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Upper|0,       false,ColMajor>::run);
+    func[TR    | (UP << 2) | (NUNIT << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Lower|0,       false,RowMajor>::run);
+    func[ADJ   | (UP << 2) | (NUNIT << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Lower|0,       Conj, RowMajor>::run);
+
+    func[NOTR  | (LO << 2) | (NUNIT << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Lower|0,       false,ColMajor>::run);
+    func[TR    | (LO << 2) | (NUNIT << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Upper|0,       false,RowMajor>::run);
+    func[ADJ   | (LO << 2) | (NUNIT << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Upper|0,       Conj, RowMajor>::run);
+
+    func[NOTR  | (UP << 2) | (UNIT  << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Upper|UnitDiag,false,ColMajor>::run);
+    func[TR    | (UP << 2) | (UNIT  << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Lower|UnitDiag,false,RowMajor>::run);
+    func[ADJ   | (UP << 2) | (UNIT  << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Lower|UnitDiag,Conj, RowMajor>::run);
+
+    func[NOTR  | (LO << 2) | (UNIT  << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Lower|UnitDiag,false,ColMajor>::run);
+    func[TR    | (LO << 2) | (UNIT  << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Upper|UnitDiag,false,RowMajor>::run);
+    func[ADJ   | (LO << 2) | (UNIT  << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Upper|UnitDiag,Conj, RowMajor>::run);
+
+    init = true;
+  }
+
+  Scalar* a = reinterpret_cast<Scalar*>(pa);
+  Scalar* b = reinterpret_cast<Scalar*>(pb);
+
+  int info = 0;
+  if(UPLO(*uplo)==INVALID)                                            info = 1;
+  else if(OP(*opa)==INVALID)                                          info = 2;
+  else if(DIAG(*diag)==INVALID)                                       info = 3;
+  else if(*n<0)                                                       info = 4;
+  else if(*lda<std::max(1,*n))                                        info = 6;
+  else if(*incb==0)                                                   info = 8;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"TRSV ",&info,6);
+
+  Scalar* actual_b = get_compact_vector(b,*n,*incb);
+
+  int code = OP(*opa) | (UPLO(*uplo) << 2) | (DIAG(*diag) << 3);
+  func[code](*n, a, *lda, actual_b);
+
+  if(actual_b!=b) delete[] copy_back(actual_b,b,*n,*incb);
+
+  return 0;
+}
+
+
+
+int EIGEN_BLAS_FUNC(trmv)(char *uplo, char *opa, char *diag, int *n, RealScalar *pa, int *lda, RealScalar *pb, int *incb)
+{
+  typedef void (*functype)(int, int, const Scalar *, int, const Scalar *, int, Scalar *, int, Scalar);
+  static functype func[16];
+
+  static bool init = false;
+  if(!init)
+  {
+    for(int k=0; k<16; ++k)
+      func[k] = 0;
+
+    func[NOTR  | (UP << 2) | (NUNIT << 3)] = (internal::triangular_matrix_vector_product<int,Upper|0,       Scalar,false,Scalar,false,ColMajor>::run);
+    func[TR    | (UP << 2) | (NUNIT << 3)] = (internal::triangular_matrix_vector_product<int,Lower|0,       Scalar,false,Scalar,false,RowMajor>::run);
+    func[ADJ   | (UP << 2) | (NUNIT << 3)] = (internal::triangular_matrix_vector_product<int,Lower|0,       Scalar,Conj, Scalar,false,RowMajor>::run);
+
+    func[NOTR  | (LO << 2) | (NUNIT << 3)] = (internal::triangular_matrix_vector_product<int,Lower|0,       Scalar,false,Scalar,false,ColMajor>::run);
+    func[TR    | (LO << 2) | (NUNIT << 3)] = (internal::triangular_matrix_vector_product<int,Upper|0,       Scalar,false,Scalar,false,RowMajor>::run);
+    func[ADJ   | (LO << 2) | (NUNIT << 3)] = (internal::triangular_matrix_vector_product<int,Upper|0,       Scalar,Conj, Scalar,false,RowMajor>::run);
+
+    func[NOTR  | (UP << 2) | (UNIT  << 3)] = (internal::triangular_matrix_vector_product<int,Upper|UnitDiag,Scalar,false,Scalar,false,ColMajor>::run);
+    func[TR    | (UP << 2) | (UNIT  << 3)] = (internal::triangular_matrix_vector_product<int,Lower|UnitDiag,Scalar,false,Scalar,false,RowMajor>::run);
+    func[ADJ   | (UP << 2) | (UNIT  << 3)] = (internal::triangular_matrix_vector_product<int,Lower|UnitDiag,Scalar,Conj, Scalar,false,RowMajor>::run);
+
+    func[NOTR  | (LO << 2) | (UNIT  << 3)] = (internal::triangular_matrix_vector_product<int,Lower|UnitDiag,Scalar,false,Scalar,false,ColMajor>::run);
+    func[TR    | (LO << 2) | (UNIT  << 3)] = (internal::triangular_matrix_vector_product<int,Upper|UnitDiag,Scalar,false,Scalar,false,RowMajor>::run);
+    func[ADJ   | (LO << 2) | (UNIT  << 3)] = (internal::triangular_matrix_vector_product<int,Upper|UnitDiag,Scalar,Conj, Scalar,false,RowMajor>::run);
+
+    init = true;
+  }
+
+  Scalar* a = reinterpret_cast<Scalar*>(pa);
+  Scalar* b = reinterpret_cast<Scalar*>(pb);
+
+  int info = 0;
+  if(UPLO(*uplo)==INVALID)                                            info = 1;
+  else if(OP(*opa)==INVALID)                                          info = 2;
+  else if(DIAG(*diag)==INVALID)                                       info = 3;
+  else if(*n<0)                                                       info = 4;
+  else if(*lda<std::max(1,*n))                                        info = 6;
+  else if(*incb==0)                                                   info = 8;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"TRMV ",&info,6);
+
+  if(*n==0)
+    return 1;
+
+  Scalar* actual_b = get_compact_vector(b,*n,*incb);
+  Matrix<Scalar,Dynamic,1> res(*n);
+  res.setZero();
+
+  int code = OP(*opa) | (UPLO(*uplo) << 2) | (DIAG(*diag) << 3);
+  if(code>=16 || func[code]==0)
+    return 0;
+
+  func[code](*n, *n, a, *lda, actual_b, 1, res.data(), 1, Scalar(1));
+
+  copy_back(res.data(),b,*n,*incb);
+  if(actual_b!=b) delete[] actual_b;
+
+  return 0;
+}
+
+/**  GBMV  performs one of the matrix-vector operations
+  *
+  *     y := alpha*A*x + beta*y,   or   y := alpha*A'*x + beta*y,
+  *
+  *  where alpha and beta are scalars, x and y are vectors and A is an
+  *  m by n band matrix, with kl sub-diagonals and ku super-diagonals.
+  */
+int EIGEN_BLAS_FUNC(gbmv)(char *trans, int *m, int *n, int *kl, int *ku, RealScalar *palpha, RealScalar *pa, int *lda,
+                          RealScalar *px, int *incx, RealScalar *pbeta, RealScalar *py, int *incy)
+{
+  Scalar* a = reinterpret_cast<Scalar*>(pa);
+  Scalar* x = reinterpret_cast<Scalar*>(px);
+  Scalar* y = reinterpret_cast<Scalar*>(py);
+  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
+  Scalar beta = *reinterpret_cast<Scalar*>(pbeta);
+  int coeff_rows = *kl+*ku+1;
+  
+  int info = 0;
+       if(OP(*trans)==INVALID)                                        info = 1;
+  else if(*m<0)                                                       info = 2;
+  else if(*n<0)                                                       info = 3;
+  else if(*kl<0)                                                      info = 4;
+  else if(*ku<0)                                                      info = 5;
+  else if(*lda<coeff_rows)                                            info = 8;
+  else if(*incx==0)                                                   info = 10;
+  else if(*incy==0)                                                   info = 13;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"GBMV ",&info,6);
+  
+  if(*m==0 || *n==0 || (alpha==Scalar(0) && beta==Scalar(1)))
+    return 0;
+  
+  int actual_m = *m;
+  int actual_n = *n;
+  if(OP(*trans)!=NOTR)
+    std::swap(actual_m,actual_n);
+  
+  Scalar* actual_x = get_compact_vector(x,actual_n,*incx);
+  Scalar* actual_y = get_compact_vector(y,actual_m,*incy);
+  
+  if(beta!=Scalar(1))
+  {
+    if(beta==Scalar(0)) vector(actual_y, actual_m).setZero();
+    else                vector(actual_y, actual_m) *= beta;
+  }
+  
+  MatrixType mat_coeffs(a,coeff_rows,*n,*lda);
+  
+  int nb = std::min(*n,(*m)+(*ku));
+  for(int j=0; j<nb; ++j)
+  {
+    int start = std::max(0,j - *ku);
+    int end = std::min((*m)-1,j + *kl);
+    int len = end - start + 1;
+    int offset = (*ku) - j + start;
+    if(OP(*trans)==NOTR)
+      vector(actual_y+start,len) += (alpha*actual_x[j]) * mat_coeffs.col(j).segment(offset,len);
+    else if(OP(*trans)==TR)
+      actual_y[j] += alpha * ( mat_coeffs.col(j).segment(offset,len).transpose() * vector(actual_x+start,len) ).value();
+    else
+      actual_y[j] += alpha * ( mat_coeffs.col(j).segment(offset,len).adjoint()   * vector(actual_x+start,len) ).value();
+  }    
+  
+  if(actual_x!=x) delete[] actual_x;
+  if(actual_y!=y) delete[] copy_back(actual_y,y,actual_m,*incy);
+  
+  return 0;
+}
+
+#if 0
+/**  TBMV  performs one of the matrix-vector operations
+  *
+  *     x := A*x,   or   x := A'*x,
+  *
+  *  where x is an n element vector and  A is an n by n unit, or non-unit,
+  *  upper or lower triangular band matrix, with ( k + 1 ) diagonals.
+  */
+int EIGEN_BLAS_FUNC(tbmv)(char *uplo, char *opa, char *diag, int *n, int *k, RealScalar *pa, int *lda, RealScalar *px, int *incx)
+{
+  Scalar* a = reinterpret_cast<Scalar*>(pa);
+  Scalar* x = reinterpret_cast<Scalar*>(px);
+  int coeff_rows = *k + 1;
+  
+  int info = 0;
+       if(UPLO(*uplo)==INVALID)                                       info = 1;
+  else if(OP(*opa)==INVALID)                                          info = 2;
+  else if(DIAG(*diag)==INVALID)                                       info = 3;
+  else if(*n<0)                                                       info = 4;
+  else if(*k<0)                                                       info = 5;
+  else if(*lda<coeff_rows)                                            info = 7;
+  else if(*incx==0)                                                   info = 9;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"TBMV ",&info,6);
+  
+  if(*n==0)
+    return 0;
+  
+  int actual_n = *n;
+  
+  Scalar* actual_x = get_compact_vector(x,actual_n,*incx);
+  
+  MatrixType mat_coeffs(a,coeff_rows,*n,*lda);
+  
+  int ku = UPLO(*uplo)==UPPER ? *k : 0;
+  int kl = UPLO(*uplo)==LOWER ? *k : 0;
+  
+  for(int j=0; j<*n; ++j)
+  {
+    int start = std::max(0,j - ku);
+    int end = std::min((*m)-1,j + kl);
+    int len = end - start + 1;
+    int offset = (ku) - j + start;
+    
+    if(OP(*trans)==NOTR)
+      vector(actual_y+start,len) += (alpha*actual_x[j]) * mat_coeffs.col(j).segment(offset,len);
+    else if(OP(*trans)==TR)
+      actual_y[j] += alpha * ( mat_coeffs.col(j).segment(offset,len).transpose() * vector(actual_x+start,len) ).value();
+    else
+      actual_y[j] += alpha * ( mat_coeffs.col(j).segment(offset,len).adjoint()   * vector(actual_x+start,len) ).value();
+  }    
+  
+  if(actual_x!=x) delete[] actual_x;
+  if(actual_y!=y) delete[] copy_back(actual_y,y,actual_m,*incy);
+  
+  return 0;
+}
+#endif
+
+/**  DTBSV  solves one of the systems of equations
+  *
+  *     A*x = b,   or   A'*x = b,
+  *
+  *  where b and x are n element vectors and A is an n by n unit, or
+  *  non-unit, upper or lower triangular band matrix, with ( k + 1 )
+  *  diagonals.
+  *
+  *  No test for singularity or near-singularity is included in this
+  *  routine. Such tests must be performed before calling this routine.
+  */
+int EIGEN_BLAS_FUNC(tbsv)(char *uplo, char *op, char *diag, int *n, int *k, RealScalar *pa, int *lda, RealScalar *px, int *incx)
+{
+  typedef void (*functype)(int, int, const Scalar *, int, Scalar *);
+  static functype func[16];
+
+  static bool init = false;
+  if(!init)
+  {
+    for(int k=0; k<16; ++k)
+      func[k] = 0;
+
+    func[NOTR  | (UP << 2) | (NUNIT << 3)] = (internal::band_solve_triangular_selector<int,Upper|0,       Scalar,false,Scalar,ColMajor>::run);
+    func[TR    | (UP << 2) | (NUNIT << 3)] = (internal::band_solve_triangular_selector<int,Lower|0,       Scalar,false,Scalar,RowMajor>::run);
+    func[ADJ   | (UP << 2) | (NUNIT << 3)] = (internal::band_solve_triangular_selector<int,Lower|0,       Scalar,Conj, Scalar,RowMajor>::run);
+
+    func[NOTR  | (LO << 2) | (NUNIT << 3)] = (internal::band_solve_triangular_selector<int,Lower|0,       Scalar,false,Scalar,ColMajor>::run);
+    func[TR    | (LO << 2) | (NUNIT << 3)] = (internal::band_solve_triangular_selector<int,Upper|0,       Scalar,false,Scalar,RowMajor>::run);
+    func[ADJ   | (LO << 2) | (NUNIT << 3)] = (internal::band_solve_triangular_selector<int,Upper|0,       Scalar,Conj, Scalar,RowMajor>::run);
+
+    func[NOTR  | (UP << 2) | (UNIT  << 3)] = (internal::band_solve_triangular_selector<int,Upper|UnitDiag,Scalar,false,Scalar,ColMajor>::run);
+    func[TR    | (UP << 2) | (UNIT  << 3)] = (internal::band_solve_triangular_selector<int,Lower|UnitDiag,Scalar,false,Scalar,RowMajor>::run);
+    func[ADJ   | (UP << 2) | (UNIT  << 3)] = (internal::band_solve_triangular_selector<int,Lower|UnitDiag,Scalar,Conj, Scalar,RowMajor>::run);
+
+    func[NOTR  | (LO << 2) | (UNIT  << 3)] = (internal::band_solve_triangular_selector<int,Lower|UnitDiag,Scalar,false,Scalar,ColMajor>::run);
+    func[TR    | (LO << 2) | (UNIT  << 3)] = (internal::band_solve_triangular_selector<int,Upper|UnitDiag,Scalar,false,Scalar,RowMajor>::run);
+    func[ADJ   | (LO << 2) | (UNIT  << 3)] = (internal::band_solve_triangular_selector<int,Upper|UnitDiag,Scalar,Conj, Scalar,RowMajor>::run);
+
+    init = true;
+  }
+
+  Scalar* a = reinterpret_cast<Scalar*>(pa);
+  Scalar* x = reinterpret_cast<Scalar*>(px);
+  int coeff_rows = *k+1;
+  
+  int info = 0;
+       if(UPLO(*uplo)==INVALID)                                       info = 1;
+  else if(OP(*op)==INVALID)                                           info = 2;
+  else if(DIAG(*diag)==INVALID)                                       info = 3;
+  else if(*n<0)                                                       info = 4;
+  else if(*k<0)                                                       info = 5;
+  else if(*lda<coeff_rows)                                            info = 7;
+  else if(*incx==0)                                                   info = 9;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"TBSV ",&info,6);
+  
+  if(*n==0 || (*k==0 && DIAG(*diag)==UNIT))
+    return 0;
+  
+  int actual_n = *n;
+ 
+  Scalar* actual_x = get_compact_vector(x,actual_n,*incx);
+  
+  int code = OP(*op) | (UPLO(*uplo) << 2) | (DIAG(*diag) << 3);
+  if(code>=16 || func[code]==0)
+    return 0;
+
+  func[code](*n, *k, a, *lda, actual_x);
+  
+  if(actual_x!=x) delete[] copy_back(actual_x,x,actual_n,*incx);
+  
+  return 0;
+}
+
+/**  DTPMV  performs one of the matrix-vector operations
+  *
+  *     x := A*x,   or   x := A'*x,
+  *
+  *  where x is an n element vector and  A is an n by n unit, or non-unit,
+  *  upper or lower triangular matrix, supplied in packed form.
+  */
+// int EIGEN_BLAS_FUNC(tpmv)(char *uplo, char *trans, char *diag, int *n, RealScalar *ap, RealScalar *x, int *incx)
+// {
+//   return 1;
+// }
+
+/**  DTPSV  solves one of the systems of equations
+  *
+  *     A*x = b,   or   A'*x = b,
+  *
+  *  where b and x are n element vectors and A is an n by n unit, or
+  *  non-unit, upper or lower triangular matrix, supplied in packed form.
+  *
+  *  No test for singularity or near-singularity is included in this
+  *  routine. Such tests must be performed before calling this routine.
+  */
+// int EIGEN_BLAS_FUNC(tpsv)(char *uplo, char *trans, char *diag, int *n, RealScalar *ap, RealScalar *x, int *incx)
+// {
+//   return 1;
+// }
+
+/**  DGER   performs the rank 1 operation
+  *
+  *     A := alpha*x*y' + A,
+  *
+  *  where alpha is a scalar, x is an m element vector, y is an n element
+  *  vector and A is an m by n matrix.
+  */
+int EIGEN_BLAS_FUNC(ger)(int *m, int *n, Scalar *palpha, Scalar *px, int *incx, Scalar *py, int *incy, Scalar *pa, int *lda)
+{
+  Scalar* x = reinterpret_cast<Scalar*>(px);
+  Scalar* y = reinterpret_cast<Scalar*>(py);
+  Scalar* a = reinterpret_cast<Scalar*>(pa);
+  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
+
+  int info = 0;
+       if(*m<0)                                                       info = 1;
+  else if(*n<0)                                                       info = 2;
+  else if(*incx==0)                                                   info = 5;
+  else if(*incy==0)                                                   info = 7;
+  else if(*lda<std::max(1,*m))                                        info = 9;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"GER  ",&info,6);
+
+  if(alpha==Scalar(0))
+    return 1;
+
+  Scalar* x_cpy = get_compact_vector(x,*m,*incx);
+  Scalar* y_cpy = get_compact_vector(y,*n,*incy);
+
+  // TODO perform direct calls to underlying implementation
+  matrix(a,*m,*n,*lda) += alpha * vector(x_cpy,*m) * vector(y_cpy,*n).adjoint();
+
+  if(x_cpy!=x)  delete[] x_cpy;
+  if(y_cpy!=y)  delete[] y_cpy;
+
+  return 1;
+}
+
+
diff --git a/resources/3rdParty/eigen/blas/level2_real_impl.h b/resources/3rdParty/eigen/blas/level2_real_impl.h
new file mode 100644
index 000000000..cd8332973
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/level2_real_impl.h
@@ -0,0 +1,210 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "common.h"
+
+// y = alpha*A*x + beta*y
+int EIGEN_BLAS_FUNC(symv) (char *uplo, int *n, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *px, int *incx, RealScalar *pbeta, RealScalar *py, int *incy)
+{
+  Scalar* a = reinterpret_cast<Scalar*>(pa);
+  Scalar* x = reinterpret_cast<Scalar*>(px);
+  Scalar* y = reinterpret_cast<Scalar*>(py);
+  Scalar alpha  = *reinterpret_cast<Scalar*>(palpha);
+  Scalar beta   = *reinterpret_cast<Scalar*>(pbeta);
+
+  // check arguments
+  int info = 0;
+  if(UPLO(*uplo)==INVALID)        info = 1;
+  else if(*n<0)                   info = 2;
+  else if(*lda<std::max(1,*n))    info = 5;
+  else if(*incx==0)               info = 7;
+  else if(*incy==0)               info = 10;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"SYMV ",&info,6);
+
+  if(*n==0)
+    return 0;
+
+  Scalar* actual_x = get_compact_vector(x,*n,*incx);
+  Scalar* actual_y = get_compact_vector(y,*n,*incy);
+
+  if(beta!=Scalar(1))
+  {
+    if(beta==Scalar(0)) vector(actual_y, *n).setZero();
+    else                vector(actual_y, *n) *= beta;
+  }
+
+  // TODO performs a direct call to the underlying implementation function
+       if(UPLO(*uplo)==UP) vector(actual_y,*n).noalias() += matrix(a,*n,*n,*lda).selfadjointView<Upper>() * (alpha * vector(actual_x,*n));
+  else if(UPLO(*uplo)==LO) vector(actual_y,*n).noalias() += matrix(a,*n,*n,*lda).selfadjointView<Lower>() * (alpha * vector(actual_x,*n));
+
+  if(actual_x!=x) delete[] actual_x;
+  if(actual_y!=y) delete[] copy_back(actual_y,y,*n,*incy);
+
+  return 1;
+}
+
+// C := alpha*x*x' + C
+int EIGEN_BLAS_FUNC(syr)(char *uplo, int *n, RealScalar *palpha, RealScalar *px, int *incx, RealScalar *pc, int *ldc)
+{
+
+//   typedef void (*functype)(int, const Scalar *, int, Scalar *, int, Scalar);
+//   static functype func[2];
+
+//   static bool init = false;
+//   if(!init)
+//   {
+//     for(int k=0; k<2; ++k)
+//       func[k] = 0;
+//
+//     func[UP] = (internal::selfadjoint_product<Scalar,ColMajor,ColMajor,false,UpperTriangular>::run);
+//     func[LO] = (internal::selfadjoint_product<Scalar,ColMajor,ColMajor,false,LowerTriangular>::run);
+
+//     init = true;
+//   }
+
+  Scalar* x = reinterpret_cast<Scalar*>(px);
+  Scalar* c = reinterpret_cast<Scalar*>(pc);
+  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
+
+  int info = 0;
+  if(UPLO(*uplo)==INVALID)                                            info = 1;
+  else if(*n<0)                                                       info = 2;
+  else if(*incx==0)                                                   info = 5;
+  else if(*ldc<std::max(1,*n))                                        info = 7;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"SYR  ",&info,6);
+
+  if(*n==0 || alpha==Scalar(0)) return 1;
+
+  // if the increment is not 1, let's copy it to a temporary vector to enable vectorization
+  Scalar* x_cpy = get_compact_vector(x,*n,*incx);
+
+  Matrix<Scalar,Dynamic,Dynamic> m2(matrix(c,*n,*n,*ldc));
+  
+  // TODO check why this is not accurate enough for lapack tests
+//   if(UPLO(*uplo)==LO)       matrix(c,*n,*n,*ldc).selfadjointView<Lower>().rankUpdate(vector(x_cpy,*n), alpha);
+//   else if(UPLO(*uplo)==UP)  matrix(c,*n,*n,*ldc).selfadjointView<Upper>().rankUpdate(vector(x_cpy,*n), alpha);
+
+  if(UPLO(*uplo)==LO)
+    for(int j=0;j<*n;++j)
+      matrix(c,*n,*n,*ldc).col(j).tail(*n-j) += alpha * x_cpy[j] * vector(x_cpy+j,*n-j);
+  else
+    for(int j=0;j<*n;++j)
+      matrix(c,*n,*n,*ldc).col(j).head(j+1) += alpha * x_cpy[j] * vector(x_cpy,j+1);
+
+  if(x_cpy!=x)  delete[] x_cpy;
+
+  return 1;
+}
+
+// C := alpha*x*y' + alpha*y*x' + C
+int EIGEN_BLAS_FUNC(syr2)(char *uplo, int *n, RealScalar *palpha, RealScalar *px, int *incx, RealScalar *py, int *incy, RealScalar *pc, int *ldc)
+{
+//   typedef void (*functype)(int, const Scalar *, int, const Scalar *, int, Scalar *, int, Scalar);
+//   static functype func[2];
+//
+//   static bool init = false;
+//   if(!init)
+//   {
+//     for(int k=0; k<2; ++k)
+//       func[k] = 0;
+//
+//     func[UP] = (internal::selfadjoint_product<Scalar,ColMajor,ColMajor,false,UpperTriangular>::run);
+//     func[LO] = (internal::selfadjoint_product<Scalar,ColMajor,ColMajor,false,LowerTriangular>::run);
+//
+//     init = true;
+//   }
+
+  Scalar* x = reinterpret_cast<Scalar*>(px);
+  Scalar* y = reinterpret_cast<Scalar*>(py);
+  Scalar* c = reinterpret_cast<Scalar*>(pc);
+  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
+
+  int info = 0;
+  if(UPLO(*uplo)==INVALID)                                            info = 1;
+  else if(*n<0)                                                       info = 2;
+  else if(*incx==0)                                                   info = 5;
+  else if(*incy==0)                                                   info = 7;
+  else if(*ldc<std::max(1,*n))                                        info = 9;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"SYR2 ",&info,6);
+
+  if(alpha==Scalar(0))
+    return 1;
+
+  Scalar* x_cpy = get_compact_vector(x,*n,*incx);
+  Scalar* y_cpy = get_compact_vector(y,*n,*incy);
+
+  // TODO perform direct calls to underlying implementation
+  if(UPLO(*uplo)==LO)       matrix(c,*n,*n,*ldc).selfadjointView<Lower>().rankUpdate(vector(x_cpy,*n), vector(y_cpy,*n), alpha);
+  else if(UPLO(*uplo)==UP)  matrix(c,*n,*n,*ldc).selfadjointView<Upper>().rankUpdate(vector(x_cpy,*n), vector(y_cpy,*n), alpha);
+
+  if(x_cpy!=x)  delete[] x_cpy;
+  if(y_cpy!=y)  delete[] y_cpy;
+
+//   int code = UPLO(*uplo);
+//   if(code>=2 || func[code]==0)
+//     return 0;
+
+//   func[code](*n, a, *inca, b, *incb, c, *ldc, alpha);
+  return 1;
+}
+
+/**  DSBMV  performs the matrix-vector  operation
+  *
+  *     y := alpha*A*x + beta*y,
+  *
+  *  where alpha and beta are scalars, x and y are n element vectors and
+  *  A is an n by n symmetric band matrix, with k super-diagonals.
+  */
+// int EIGEN_BLAS_FUNC(sbmv)( char *uplo, int *n, int *k, RealScalar *alpha, RealScalar *a, int *lda,
+//                            RealScalar *x, int *incx, RealScalar *beta, RealScalar *y, int *incy)
+// {
+//   return 1;
+// }
+
+
+/**  DSPMV  performs the matrix-vector operation
+  *
+  *     y := alpha*A*x + beta*y,
+  *
+  *  where alpha and beta are scalars, x and y are n element vectors and
+  *  A is an n by n symmetric matrix, supplied in packed form.
+  *
+  */
+// int EIGEN_BLAS_FUNC(spmv)(char *uplo, int *n, RealScalar *alpha, RealScalar *ap, RealScalar *x, int *incx, RealScalar *beta, RealScalar *y, int *incy)
+// {
+//   return 1;
+// }
+
+/**  DSPR    performs the symmetric rank 1 operation
+  *
+  *     A := alpha*x*x' + A,
+  *
+  *  where alpha is a real scalar, x is an n element vector and A is an
+  *  n by n symmetric matrix, supplied in packed form.
+  */
+// int EIGEN_BLAS_FUNC(spr)(char *uplo, int *n, Scalar *alpha, Scalar *x, int *incx, Scalar *ap)
+// {
+//   return 1;
+// }
+
+/**  DSPR2  performs the symmetric rank 2 operation
+  *
+  *     A := alpha*x*y' + alpha*y*x' + A,
+  *
+  *  where alpha is a scalar, x and y are n element vectors and A is an
+  *  n by n symmetric matrix, supplied in packed form.
+  */
+// int EIGEN_BLAS_FUNC(spr2)(char *uplo, int *n, RealScalar *alpha, RealScalar *x, int *incx, RealScalar *y, int *incy, RealScalar *ap)
+// {
+//   return 1;
+// }
+
diff --git a/resources/3rdParty/eigen/blas/level3_impl.h b/resources/3rdParty/eigen/blas/level3_impl.h
new file mode 100644
index 000000000..2371f25c3
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/level3_impl.h
@@ -0,0 +1,632 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "common.h"
+
+int EIGEN_BLAS_FUNC(gemm)(char *opa, char *opb, int *m, int *n, int *k, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *pb, int *ldb, RealScalar *pbeta, RealScalar *pc, int *ldc)
+{
+//   std::cerr << "in gemm " << *opa << " " << *opb << " " << *m << " " << *n << " " << *k << " " << *lda << " " << *ldb << " " << *ldc << " " << *palpha << " " << *pbeta << "\n";
+  typedef void (*functype)(DenseIndex, DenseIndex, DenseIndex, const Scalar *, DenseIndex, const Scalar *, DenseIndex, Scalar *, DenseIndex, Scalar, internal::level3_blocking<Scalar,Scalar>&, Eigen::internal::GemmParallelInfo<DenseIndex>*);
+  static functype func[12];
+
+  static bool init = false;
+  if(!init)
+  {
+    for(int k=0; k<12; ++k)
+      func[k] = 0;
+    func[NOTR  | (NOTR << 2)] = (internal::general_matrix_matrix_product<DenseIndex,Scalar,ColMajor,false,Scalar,ColMajor,false,ColMajor>::run);
+    func[TR    | (NOTR << 2)] = (internal::general_matrix_matrix_product<DenseIndex,Scalar,RowMajor,false,Scalar,ColMajor,false,ColMajor>::run);
+    func[ADJ   | (NOTR << 2)] = (internal::general_matrix_matrix_product<DenseIndex,Scalar,RowMajor,Conj, Scalar,ColMajor,false,ColMajor>::run);
+    func[NOTR  | (TR   << 2)] = (internal::general_matrix_matrix_product<DenseIndex,Scalar,ColMajor,false,Scalar,RowMajor,false,ColMajor>::run);
+    func[TR    | (TR   << 2)] = (internal::general_matrix_matrix_product<DenseIndex,Scalar,RowMajor,false,Scalar,RowMajor,false,ColMajor>::run);
+    func[ADJ   | (TR   << 2)] = (internal::general_matrix_matrix_product<DenseIndex,Scalar,RowMajor,Conj, Scalar,RowMajor,false,ColMajor>::run);
+    func[NOTR  | (ADJ  << 2)] = (internal::general_matrix_matrix_product<DenseIndex,Scalar,ColMajor,false,Scalar,RowMajor,Conj, ColMajor>::run);
+    func[TR    | (ADJ  << 2)] = (internal::general_matrix_matrix_product<DenseIndex,Scalar,RowMajor,false,Scalar,RowMajor,Conj, ColMajor>::run);
+    func[ADJ   | (ADJ  << 2)] = (internal::general_matrix_matrix_product<DenseIndex,Scalar,RowMajor,Conj, Scalar,RowMajor,Conj, ColMajor>::run);
+    init = true;
+  }
+
+  Scalar* a = reinterpret_cast<Scalar*>(pa);
+  Scalar* b = reinterpret_cast<Scalar*>(pb);
+  Scalar* c = reinterpret_cast<Scalar*>(pc);
+  Scalar alpha  = *reinterpret_cast<Scalar*>(palpha);
+  Scalar beta   = *reinterpret_cast<Scalar*>(pbeta);
+
+  int info = 0;
+  if(OP(*opa)==INVALID)                                               info = 1;
+  else if(OP(*opb)==INVALID)                                          info = 2;
+  else if(*m<0)                                                       info = 3;
+  else if(*n<0)                                                       info = 4;
+  else if(*k<0)                                                       info = 5;
+  else if(*lda<std::max(1,(OP(*opa)==NOTR)?*m:*k))                    info = 8;
+  else if(*ldb<std::max(1,(OP(*opb)==NOTR)?*k:*n))                    info = 10;
+  else if(*ldc<std::max(1,*m))                                        info = 13;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"GEMM ",&info,6);
+
+  if(beta!=Scalar(1))
+  {
+    if(beta==Scalar(0)) matrix(c, *m, *n, *ldc).setZero();
+    else                matrix(c, *m, *n, *ldc) *= beta;
+  }
+
+  internal::gemm_blocking_space<ColMajor,Scalar,Scalar,Dynamic,Dynamic,Dynamic> blocking(*m,*n,*k);
+
+  int code = OP(*opa) | (OP(*opb) << 2);
+  func[code](*m, *n, *k, a, *lda, b, *ldb, c, *ldc, alpha, blocking, 0);
+  return 0;
+}
+
+int EIGEN_BLAS_FUNC(trsm)(char *side, char *uplo, char *opa, char *diag, int *m, int *n, RealScalar *palpha,  RealScalar *pa, int *lda, RealScalar *pb, int *ldb)
+{
+//   std::cerr << "in trsm " << *side << " " << *uplo << " " << *opa << " " << *diag << " " << *m << "," << *n << " " << *palpha << " " << *lda << " " << *ldb<< "\n";
+  typedef void (*functype)(DenseIndex, DenseIndex, const Scalar *, DenseIndex, Scalar *, DenseIndex, internal::level3_blocking<Scalar,Scalar>&);
+  static functype func[32];
+
+  static bool init = false;
+  if(!init)
+  {
+    for(int k=0; k<32; ++k)
+      func[k] = 0;
+
+    func[NOTR  | (LEFT  << 2) | (UP << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Upper|0,          false,ColMajor,ColMajor>::run);
+    func[TR    | (LEFT  << 2) | (UP << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Lower|0,          false,RowMajor,ColMajor>::run);
+    func[ADJ   | (LEFT  << 2) | (UP << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Lower|0,          Conj, RowMajor,ColMajor>::run);
+
+    func[NOTR  | (RIGHT << 2) | (UP << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Upper|0,          false,ColMajor,ColMajor>::run);
+    func[TR    | (RIGHT << 2) | (UP << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Lower|0,          false,RowMajor,ColMajor>::run);
+    func[ADJ   | (RIGHT << 2) | (UP << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Lower|0,          Conj, RowMajor,ColMajor>::run);
+
+    func[NOTR  | (LEFT  << 2) | (LO << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Lower|0,          false,ColMajor,ColMajor>::run);
+    func[TR    | (LEFT  << 2) | (LO << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Upper|0,          false,RowMajor,ColMajor>::run);
+    func[ADJ   | (LEFT  << 2) | (LO << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Upper|0,          Conj, RowMajor,ColMajor>::run);
+
+    func[NOTR  | (RIGHT << 2) | (LO << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Lower|0,          false,ColMajor,ColMajor>::run);
+    func[TR    | (RIGHT << 2) | (LO << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Upper|0,          false,RowMajor,ColMajor>::run);
+    func[ADJ   | (RIGHT << 2) | (LO << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Upper|0,          Conj, RowMajor,ColMajor>::run);
+
+
+    func[NOTR  | (LEFT  << 2) | (UP << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Upper|UnitDiag,false,ColMajor,ColMajor>::run);
+    func[TR    | (LEFT  << 2) | (UP << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Lower|UnitDiag,false,RowMajor,ColMajor>::run);
+    func[ADJ   | (LEFT  << 2) | (UP << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Lower|UnitDiag,Conj, RowMajor,ColMajor>::run);
+
+    func[NOTR  | (RIGHT << 2) | (UP << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Upper|UnitDiag,false,ColMajor,ColMajor>::run);
+    func[TR    | (RIGHT << 2) | (UP << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Lower|UnitDiag,false,RowMajor,ColMajor>::run);
+    func[ADJ   | (RIGHT << 2) | (UP << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Lower|UnitDiag,Conj, RowMajor,ColMajor>::run);
+
+    func[NOTR  | (LEFT  << 2) | (LO << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Lower|UnitDiag,false,ColMajor,ColMajor>::run);
+    func[TR    | (LEFT  << 2) | (LO << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Upper|UnitDiag,false,RowMajor,ColMajor>::run);
+    func[ADJ   | (LEFT  << 2) | (LO << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Upper|UnitDiag,Conj, RowMajor,ColMajor>::run);
+
+    func[NOTR  | (RIGHT << 2) | (LO << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Lower|UnitDiag,false,ColMajor,ColMajor>::run);
+    func[TR    | (RIGHT << 2) | (LO << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Upper|UnitDiag,false,RowMajor,ColMajor>::run);
+    func[ADJ   | (RIGHT << 2) | (LO << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Upper|UnitDiag,Conj, RowMajor,ColMajor>::run);
+
+    init = true;
+  }
+
+  Scalar* a = reinterpret_cast<Scalar*>(pa);
+  Scalar* b = reinterpret_cast<Scalar*>(pb);
+  Scalar  alpha = *reinterpret_cast<Scalar*>(palpha);
+
+  int info = 0;
+  if(SIDE(*side)==INVALID)                                            info = 1;
+  else if(UPLO(*uplo)==INVALID)                                       info = 2;
+  else if(OP(*opa)==INVALID)                                          info = 3;
+  else if(DIAG(*diag)==INVALID)                                       info = 4;
+  else if(*m<0)                                                       info = 5;
+  else if(*n<0)                                                       info = 6;
+  else if(*lda<std::max(1,(SIDE(*side)==LEFT)?*m:*n))                 info = 9;
+  else if(*ldb<std::max(1,*m))                                        info = 11;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"TRSM ",&info,6);
+
+  int code = OP(*opa) | (SIDE(*side) << 2) | (UPLO(*uplo) << 3) | (DIAG(*diag) << 4);
+  
+  if(SIDE(*side)==LEFT)
+  {
+    internal::gemm_blocking_space<ColMajor,Scalar,Scalar,Dynamic,Dynamic,Dynamic,4> blocking(*m,*n,*m);
+    func[code](*m, *n, a, *lda, b, *ldb, blocking);
+  }
+  else
+  {
+    internal::gemm_blocking_space<ColMajor,Scalar,Scalar,Dynamic,Dynamic,Dynamic,4> blocking(*m,*n,*n);
+    func[code](*n, *m, a, *lda, b, *ldb, blocking);
+  }
+
+  if(alpha!=Scalar(1))
+    matrix(b,*m,*n,*ldb) *= alpha;
+
+  return 0;
+}
+
+
+// b = alpha*op(a)*b  for side = 'L'or'l'
+// b = alpha*b*op(a)  for side = 'R'or'r'
+int EIGEN_BLAS_FUNC(trmm)(char *side, char *uplo, char *opa, char *diag, int *m, int *n, RealScalar *palpha,  RealScalar *pa, int *lda, RealScalar *pb, int *ldb)
+{
+//   std::cerr << "in trmm " << *side << " " << *uplo << " " << *opa << " " << *diag << " " << *m << " " << *n << " " << *lda << " " << *ldb << " " << *palpha << "\n";
+  typedef void (*functype)(DenseIndex, DenseIndex, DenseIndex, const Scalar *, DenseIndex, const Scalar *, DenseIndex, Scalar *, DenseIndex, Scalar, internal::level3_blocking<Scalar,Scalar>&);
+  static functype func[32];
+  static bool init = false;
+  if(!init)
+  {
+    for(int k=0; k<32; ++k)
+      func[k] = 0;
+
+    func[NOTR  | (LEFT  << 2) | (UP << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|0,          true, ColMajor,false,ColMajor,false,ColMajor>::run);
+    func[TR    | (LEFT  << 2) | (UP << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|0,          true, RowMajor,false,ColMajor,false,ColMajor>::run);
+    func[ADJ   | (LEFT  << 2) | (UP << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|0,          true, RowMajor,Conj, ColMajor,false,ColMajor>::run);
+
+    func[NOTR  | (RIGHT << 2) | (UP << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|0,          false,ColMajor,false,ColMajor,false,ColMajor>::run);
+    func[TR    | (RIGHT << 2) | (UP << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|0,          false,ColMajor,false,RowMajor,false,ColMajor>::run);
+    func[ADJ   | (RIGHT << 2) | (UP << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|0,          false,ColMajor,false,RowMajor,Conj, ColMajor>::run);
+
+    func[NOTR  | (LEFT  << 2) | (LO << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|0,          true, ColMajor,false,ColMajor,false,ColMajor>::run);
+    func[TR    | (LEFT  << 2) | (LO << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|0,          true, RowMajor,false,ColMajor,false,ColMajor>::run);
+    func[ADJ   | (LEFT  << 2) | (LO << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|0,          true, RowMajor,Conj, ColMajor,false,ColMajor>::run);
+
+    func[NOTR  | (RIGHT << 2) | (LO << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|0,          false,ColMajor,false,ColMajor,false,ColMajor>::run);
+    func[TR    | (RIGHT << 2) | (LO << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|0,          false,ColMajor,false,RowMajor,false,ColMajor>::run);
+    func[ADJ   | (RIGHT << 2) | (LO << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|0,          false,ColMajor,false,RowMajor,Conj, ColMajor>::run);
+
+    func[NOTR  | (LEFT  << 2) | (UP << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|UnitDiag,true, ColMajor,false,ColMajor,false,ColMajor>::run);
+    func[TR    | (LEFT  << 2) | (UP << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|UnitDiag,true, RowMajor,false,ColMajor,false,ColMajor>::run);
+    func[ADJ   | (LEFT  << 2) | (UP << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|UnitDiag,true, RowMajor,Conj, ColMajor,false,ColMajor>::run);
+
+    func[NOTR  | (RIGHT << 2) | (UP << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|UnitDiag,false,ColMajor,false,ColMajor,false,ColMajor>::run);
+    func[TR    | (RIGHT << 2) | (UP << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|UnitDiag,false,ColMajor,false,RowMajor,false,ColMajor>::run);
+    func[ADJ   | (RIGHT << 2) | (UP << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|UnitDiag,false,ColMajor,false,RowMajor,Conj, ColMajor>::run);
+
+    func[NOTR  | (LEFT  << 2) | (LO << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|UnitDiag,true, ColMajor,false,ColMajor,false,ColMajor>::run);
+    func[TR    | (LEFT  << 2) | (LO << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|UnitDiag,true, RowMajor,false,ColMajor,false,ColMajor>::run);
+    func[ADJ   | (LEFT  << 2) | (LO << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|UnitDiag,true, RowMajor,Conj, ColMajor,false,ColMajor>::run);
+
+    func[NOTR  | (RIGHT << 2) | (LO << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|UnitDiag,false,ColMajor,false,ColMajor,false,ColMajor>::run);
+    func[TR    | (RIGHT << 2) | (LO << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|UnitDiag,false,ColMajor,false,RowMajor,false,ColMajor>::run);
+    func[ADJ   | (RIGHT << 2) | (LO << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|UnitDiag,false,ColMajor,false,RowMajor,Conj, ColMajor>::run);
+
+    init = true;
+  }
+
+  Scalar* a = reinterpret_cast<Scalar*>(pa);
+  Scalar* b = reinterpret_cast<Scalar*>(pb);
+  Scalar  alpha = *reinterpret_cast<Scalar*>(palpha);
+
+  int info = 0;
+  if(SIDE(*side)==INVALID)                                            info = 1;
+  else if(UPLO(*uplo)==INVALID)                                       info = 2;
+  else if(OP(*opa)==INVALID)                                          info = 3;
+  else if(DIAG(*diag)==INVALID)                                       info = 4;
+  else if(*m<0)                                                       info = 5;
+  else if(*n<0)                                                       info = 6;
+  else if(*lda<std::max(1,(SIDE(*side)==LEFT)?*m:*n))                 info = 9;
+  else if(*ldb<std::max(1,*m))                                        info = 11;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"TRMM ",&info,6);
+
+  int code = OP(*opa) | (SIDE(*side) << 2) | (UPLO(*uplo) << 3) | (DIAG(*diag) << 4);
+
+  if(*m==0 || *n==0)
+    return 1;
+
+  // FIXME find a way to avoid this copy
+  Matrix<Scalar,Dynamic,Dynamic,ColMajor> tmp = matrix(b,*m,*n,*ldb);
+  matrix(b,*m,*n,*ldb).setZero();
+
+  if(SIDE(*side)==LEFT)
+  {
+    internal::gemm_blocking_space<ColMajor,Scalar,Scalar,Dynamic,Dynamic,Dynamic,4> blocking(*m,*n,*m);
+    func[code](*m, *n, *m, a, *lda, tmp.data(), tmp.outerStride(), b, *ldb, alpha, blocking);
+  }
+  else
+  {
+    internal::gemm_blocking_space<ColMajor,Scalar,Scalar,Dynamic,Dynamic,Dynamic,4> blocking(*m,*n,*n);
+    func[code](*m, *n, *n, tmp.data(), tmp.outerStride(), a, *lda, b, *ldb, alpha, blocking);
+  }
+  return 1;
+}
+
+// c = alpha*a*b + beta*c  for side = 'L'or'l'
+// c = alpha*b*a + beta*c  for side = 'R'or'r
+int EIGEN_BLAS_FUNC(symm)(char *side, char *uplo, int *m, int *n, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *pb, int *ldb, RealScalar *pbeta, RealScalar *pc, int *ldc)
+{
+//   std::cerr << "in symm " << *side << " " << *uplo << " " << *m << "x" << *n << " lda:" << *lda << " ldb:" << *ldb << " ldc:" << *ldc << " alpha:" << *palpha << " beta:" << *pbeta << "\n";
+  Scalar* a = reinterpret_cast<Scalar*>(pa);
+  Scalar* b = reinterpret_cast<Scalar*>(pb);
+  Scalar* c = reinterpret_cast<Scalar*>(pc);
+  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
+  Scalar beta  = *reinterpret_cast<Scalar*>(pbeta);
+
+  int info = 0;
+  if(SIDE(*side)==INVALID)                                            info = 1;
+  else if(UPLO(*uplo)==INVALID)                                       info = 2;
+  else if(*m<0)                                                       info = 3;
+  else if(*n<0)                                                       info = 4;
+  else if(*lda<std::max(1,(SIDE(*side)==LEFT)?*m:*n))                 info = 7;
+  else if(*ldb<std::max(1,*m))                                        info = 9;
+  else if(*ldc<std::max(1,*m))                                        info = 12;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"SYMM ",&info,6);
+
+  if(beta!=Scalar(1))
+  {
+    if(beta==Scalar(0)) matrix(c, *m, *n, *ldc).setZero();
+    else                matrix(c, *m, *n, *ldc) *= beta;
+  }
+
+  if(*m==0 || *n==0)
+  {
+    return 1;
+  }
+
+  #if ISCOMPLEX
+  // FIXME add support for symmetric complex matrix
+  int size = (SIDE(*side)==LEFT) ? (*m) : (*n);
+  Matrix<Scalar,Dynamic,Dynamic,ColMajor> matA(size,size);
+  if(UPLO(*uplo)==UP)
+  {
+    matA.triangularView<Upper>() = matrix(a,size,size,*lda);
+    matA.triangularView<Lower>() = matrix(a,size,size,*lda).transpose();
+  }
+  else if(UPLO(*uplo)==LO)
+  {
+    matA.triangularView<Lower>() = matrix(a,size,size,*lda);
+    matA.triangularView<Upper>() = matrix(a,size,size,*lda).transpose();
+  }
+  if(SIDE(*side)==LEFT)
+    matrix(c, *m, *n, *ldc) += alpha * matA * matrix(b, *m, *n, *ldb);
+  else if(SIDE(*side)==RIGHT)
+    matrix(c, *m, *n, *ldc) += alpha * matrix(b, *m, *n, *ldb) * matA;
+  #else
+  if(SIDE(*side)==LEFT)
+    if(UPLO(*uplo)==UP)       internal::product_selfadjoint_matrix<Scalar, DenseIndex, RowMajor,true,false, ColMajor,false,false, ColMajor>::run(*m, *n, a, *lda, b, *ldb, c, *ldc, alpha);
+    else if(UPLO(*uplo)==LO)  internal::product_selfadjoint_matrix<Scalar, DenseIndex, ColMajor,true,false, ColMajor,false,false, ColMajor>::run(*m, *n, a, *lda, b, *ldb, c, *ldc, alpha);
+    else                      return 0;
+  else if(SIDE(*side)==RIGHT)
+    if(UPLO(*uplo)==UP)       internal::product_selfadjoint_matrix<Scalar, DenseIndex, ColMajor,false,false, RowMajor,true,false, ColMajor>::run(*m, *n, b, *ldb, a, *lda, c, *ldc, alpha);
+    else if(UPLO(*uplo)==LO)  internal::product_selfadjoint_matrix<Scalar, DenseIndex, ColMajor,false,false, ColMajor,true,false, ColMajor>::run(*m, *n, b, *ldb, a, *lda, c, *ldc, alpha);
+    else                      return 0;
+  else
+    return 0;
+  #endif
+
+  return 0;
+}
+
+// c = alpha*a*a' + beta*c  for op = 'N'or'n'
+// c = alpha*a'*a + beta*c  for op = 'T'or't','C'or'c'
+int EIGEN_BLAS_FUNC(syrk)(char *uplo, char *op, int *n, int *k, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *pbeta, RealScalar *pc, int *ldc)
+{
+//   std::cerr << "in syrk " << *uplo << " " << *op << " " << *n << " " << *k << " " << *palpha << " " << *lda << " " << *pbeta << " " << *ldc << "\n";
+  typedef void (*functype)(DenseIndex, DenseIndex, const Scalar *, DenseIndex, const Scalar *, DenseIndex, Scalar *, DenseIndex, Scalar);
+  static functype func[8];
+
+  static bool init = false;
+  if(!init)
+  {
+    for(int k=0; k<8; ++k)
+      func[k] = 0;
+
+    func[NOTR  | (UP << 2)] = (internal::general_matrix_matrix_triangular_product<DenseIndex,Scalar,ColMajor,false,Scalar,RowMajor,ColMajor,Conj, Upper>::run);
+    func[TR    | (UP << 2)] = (internal::general_matrix_matrix_triangular_product<DenseIndex,Scalar,RowMajor,false,Scalar,ColMajor,ColMajor,Conj, Upper>::run);
+    func[ADJ   | (UP << 2)] = (internal::general_matrix_matrix_triangular_product<DenseIndex,Scalar,RowMajor,Conj, Scalar,ColMajor,ColMajor,false,Upper>::run);
+
+    func[NOTR  | (LO << 2)] = (internal::general_matrix_matrix_triangular_product<DenseIndex,Scalar,ColMajor,false,Scalar,RowMajor,ColMajor,Conj, Lower>::run);
+    func[TR    | (LO << 2)] = (internal::general_matrix_matrix_triangular_product<DenseIndex,Scalar,RowMajor,false,Scalar,ColMajor,ColMajor,Conj, Lower>::run);
+    func[ADJ   | (LO << 2)] = (internal::general_matrix_matrix_triangular_product<DenseIndex,Scalar,RowMajor,Conj, Scalar,ColMajor,ColMajor,false,Lower>::run);
+
+    init = true;
+  }
+
+  Scalar* a = reinterpret_cast<Scalar*>(pa);
+  Scalar* c = reinterpret_cast<Scalar*>(pc);
+  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
+  Scalar beta  = *reinterpret_cast<Scalar*>(pbeta);
+
+  int info = 0;
+  if(UPLO(*uplo)==INVALID)                                            info = 1;
+  else if(OP(*op)==INVALID)                                           info = 2;
+  else if(*n<0)                                                       info = 3;
+  else if(*k<0)                                                       info = 4;
+  else if(*lda<std::max(1,(OP(*op)==NOTR)?*n:*k))                     info = 7;
+  else if(*ldc<std::max(1,*n))                                        info = 10;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"SYRK ",&info,6);
+
+  if(beta!=Scalar(1))
+  {
+    if(UPLO(*uplo)==UP)
+      if(beta==Scalar(0)) matrix(c, *n, *n, *ldc).triangularView<Upper>().setZero();
+      else                matrix(c, *n, *n, *ldc).triangularView<Upper>() *= beta;
+    else
+      if(beta==Scalar(0)) matrix(c, *n, *n, *ldc).triangularView<Lower>().setZero();
+      else                matrix(c, *n, *n, *ldc).triangularView<Lower>() *= beta;
+  }
+
+  #if ISCOMPLEX
+  // FIXME add support for symmetric complex matrix
+  if(UPLO(*uplo)==UP)
+  {
+    if(OP(*op)==NOTR)
+      matrix(c, *n, *n, *ldc).triangularView<Upper>() += alpha * matrix(a,*n,*k,*lda) * matrix(a,*n,*k,*lda).transpose();
+    else
+      matrix(c, *n, *n, *ldc).triangularView<Upper>() += alpha * matrix(a,*k,*n,*lda).transpose() * matrix(a,*k,*n,*lda);
+  }
+  else
+  {
+    if(OP(*op)==NOTR)
+      matrix(c, *n, *n, *ldc).triangularView<Lower>() += alpha * matrix(a,*n,*k,*lda) * matrix(a,*n,*k,*lda).transpose();
+    else
+      matrix(c, *n, *n, *ldc).triangularView<Lower>() += alpha * matrix(a,*k,*n,*lda).transpose() * matrix(a,*k,*n,*lda);
+  }
+  #else
+  int code = OP(*op) | (UPLO(*uplo) << 2);
+  func[code](*n, *k, a, *lda, a, *lda, c, *ldc, alpha);
+  #endif
+
+  return 0;
+}
+
+// c = alpha*a*b' + alpha*b*a' + beta*c  for op = 'N'or'n'
+// c = alpha*a'*b + alpha*b'*a + beta*c  for op = 'T'or't'
+int EIGEN_BLAS_FUNC(syr2k)(char *uplo, char *op, int *n, int *k, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *pb, int *ldb, RealScalar *pbeta, RealScalar *pc, int *ldc)
+{
+  Scalar* a = reinterpret_cast<Scalar*>(pa);
+  Scalar* b = reinterpret_cast<Scalar*>(pb);
+  Scalar* c = reinterpret_cast<Scalar*>(pc);
+  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
+  Scalar beta  = *reinterpret_cast<Scalar*>(pbeta);
+
+  int info = 0;
+  if(UPLO(*uplo)==INVALID)                                            info = 1;
+  else if(OP(*op)==INVALID)                                           info = 2;
+  else if(*n<0)                                                       info = 3;
+  else if(*k<0)                                                       info = 4;
+  else if(*lda<std::max(1,(OP(*op)==NOTR)?*n:*k))                     info = 7;
+  else if(*ldb<std::max(1,(OP(*op)==NOTR)?*n:*k))                     info = 9;
+  else if(*ldc<std::max(1,*n))                                        info = 12;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"SYR2K",&info,6);
+
+  if(beta!=Scalar(1))
+  {
+    if(UPLO(*uplo)==UP)
+      if(beta==Scalar(0)) matrix(c, *n, *n, *ldc).triangularView<Upper>().setZero();
+      else                matrix(c, *n, *n, *ldc).triangularView<Upper>() *= beta;
+    else
+      if(beta==Scalar(0)) matrix(c, *n, *n, *ldc).triangularView<Lower>().setZero();
+      else                matrix(c, *n, *n, *ldc).triangularView<Lower>() *= beta;
+  }
+
+  if(*k==0)
+    return 1;
+
+  if(OP(*op)==NOTR)
+  {
+    if(UPLO(*uplo)==UP)
+    {
+      matrix(c, *n, *n, *ldc).triangularView<Upper>()
+        += alpha *matrix(a, *n, *k, *lda)*matrix(b, *n, *k, *ldb).transpose()
+        +  alpha*matrix(b, *n, *k, *ldb)*matrix(a, *n, *k, *lda).transpose();
+    }
+    else if(UPLO(*uplo)==LO)
+      matrix(c, *n, *n, *ldc).triangularView<Lower>()
+        += alpha*matrix(a, *n, *k, *lda)*matrix(b, *n, *k, *ldb).transpose()
+        +  alpha*matrix(b, *n, *k, *ldb)*matrix(a, *n, *k, *lda).transpose();
+  }
+  else if(OP(*op)==TR || OP(*op)==ADJ)
+  {
+    if(UPLO(*uplo)==UP)
+      matrix(c, *n, *n, *ldc).triangularView<Upper>()
+        += alpha*matrix(a, *k, *n, *lda).transpose()*matrix(b, *k, *n, *ldb)
+        +  alpha*matrix(b, *k, *n, *ldb).transpose()*matrix(a, *k, *n, *lda);
+    else if(UPLO(*uplo)==LO)
+      matrix(c, *n, *n, *ldc).triangularView<Lower>()
+        += alpha*matrix(a, *k, *n, *lda).transpose()*matrix(b, *k, *n, *ldb)
+        +  alpha*matrix(b, *k, *n, *ldb).transpose()*matrix(a, *k, *n, *lda);
+  }
+
+  return 0;
+}
+
+
+#if ISCOMPLEX
+
+// c = alpha*a*b + beta*c  for side = 'L'or'l'
+// c = alpha*b*a + beta*c  for side = 'R'or'r
+int EIGEN_BLAS_FUNC(hemm)(char *side, char *uplo, int *m, int *n, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *pb, int *ldb, RealScalar *pbeta, RealScalar *pc, int *ldc)
+{
+  Scalar* a = reinterpret_cast<Scalar*>(pa);
+  Scalar* b = reinterpret_cast<Scalar*>(pb);
+  Scalar* c = reinterpret_cast<Scalar*>(pc);
+  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
+  Scalar beta  = *reinterpret_cast<Scalar*>(pbeta);
+
+//   std::cerr << "in hemm " << *side << " " << *uplo << " " << *m << " " << *n << " " << alpha << " " << *lda << " " << beta << " " << *ldc << "\n";
+
+  int info = 0;
+  if(SIDE(*side)==INVALID)                                            info = 1;
+  else if(UPLO(*uplo)==INVALID)                                       info = 2;
+  else if(*m<0)                                                       info = 3;
+  else if(*n<0)                                                       info = 4;
+  else if(*lda<std::max(1,(SIDE(*side)==LEFT)?*m:*n))                 info = 7;
+  else if(*ldb<std::max(1,*m))                                        info = 9;
+  else if(*ldc<std::max(1,*m))                                        info = 12;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"HEMM ",&info,6);
+
+  if(beta==Scalar(0))       matrix(c, *m, *n, *ldc).setZero();
+  else if(beta!=Scalar(1))  matrix(c, *m, *n, *ldc) *= beta;
+
+  if(*m==0 || *n==0)
+  {
+    return 1;
+  }
+
+  if(SIDE(*side)==LEFT)
+  {
+    if(UPLO(*uplo)==UP)       internal::product_selfadjoint_matrix<Scalar,DenseIndex,RowMajor,true,Conj,  ColMajor,false,false, ColMajor>
+                                ::run(*m, *n, a, *lda, b, *ldb, c, *ldc, alpha);
+    else if(UPLO(*uplo)==LO)  internal::product_selfadjoint_matrix<Scalar,DenseIndex,ColMajor,true,false, ColMajor,false,false, ColMajor>
+                                ::run(*m, *n, a, *lda, b, *ldb, c, *ldc, alpha);
+    else                      return 0;
+  }
+  else if(SIDE(*side)==RIGHT)
+  {
+    if(UPLO(*uplo)==UP)       matrix(c,*m,*n,*ldc) += alpha * matrix(b,*m,*n,*ldb) * matrix(a,*n,*n,*lda).selfadjointView<Upper>();/*internal::product_selfadjoint_matrix<Scalar,DenseIndex,ColMajor,false,false, RowMajor,true,Conj,  ColMajor>
+                                ::run(*m, *n, b, *ldb, a, *lda, c, *ldc, alpha);*/
+    else if(UPLO(*uplo)==LO)  internal::product_selfadjoint_matrix<Scalar,DenseIndex,ColMajor,false,false, ColMajor,true,false, ColMajor>
+                                ::run(*m, *n, b, *ldb, a, *lda, c, *ldc, alpha);
+    else                      return 0;
+  }
+  else
+  {
+    return 0;
+  }
+
+  return 0;
+}
+
+// c = alpha*a*conj(a') + beta*c  for op = 'N'or'n'
+// c = alpha*conj(a')*a + beta*c  for op  = 'C'or'c'
+int EIGEN_BLAS_FUNC(herk)(char *uplo, char *op, int *n, int *k, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *pbeta, RealScalar *pc, int *ldc)
+{
+  typedef void (*functype)(DenseIndex, DenseIndex, const Scalar *, DenseIndex, const Scalar *, DenseIndex, Scalar *, DenseIndex, Scalar);
+  static functype func[8];
+
+  static bool init = false;
+  if(!init)
+  {
+    for(int k=0; k<8; ++k)
+      func[k] = 0;
+
+    func[NOTR  | (UP << 2)] = (internal::general_matrix_matrix_triangular_product<DenseIndex,Scalar,ColMajor,false,Scalar,RowMajor,Conj, ColMajor,Upper>::run);
+    func[ADJ   | (UP << 2)] = (internal::general_matrix_matrix_triangular_product<DenseIndex,Scalar,RowMajor,Conj, Scalar,ColMajor,false,ColMajor,Upper>::run);
+
+    func[NOTR  | (LO << 2)] = (internal::general_matrix_matrix_triangular_product<DenseIndex,Scalar,ColMajor,false,Scalar,RowMajor,Conj, ColMajor,Lower>::run);
+    func[ADJ   | (LO << 2)] = (internal::general_matrix_matrix_triangular_product<DenseIndex,Scalar,RowMajor,Conj, Scalar,ColMajor,false,ColMajor,Lower>::run);
+
+    init = true;
+  }
+
+  Scalar* a = reinterpret_cast<Scalar*>(pa);
+  Scalar* c = reinterpret_cast<Scalar*>(pc);
+  RealScalar alpha = *palpha;
+  RealScalar beta  = *pbeta;
+
+//   std::cerr << "in herk " << *uplo << " " << *op << " " << *n << " " << *k << " " << alpha << " " << *lda << " " << beta << " " << *ldc << "\n";
+
+  int info = 0;
+  if(UPLO(*uplo)==INVALID)                                            info = 1;
+  else if((OP(*op)==INVALID) || (OP(*op)==TR))                        info = 2;
+  else if(*n<0)                                                       info = 3;
+  else if(*k<0)                                                       info = 4;
+  else if(*lda<std::max(1,(OP(*op)==NOTR)?*n:*k))                     info = 7;
+  else if(*ldc<std::max(1,*n))                                        info = 10;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"HERK ",&info,6);
+
+  int code = OP(*op) | (UPLO(*uplo) << 2);
+
+  if(beta!=RealScalar(1))
+  {
+    if(UPLO(*uplo)==UP)
+      if(beta==Scalar(0)) matrix(c, *n, *n, *ldc).triangularView<Upper>().setZero();
+      else                matrix(c, *n, *n, *ldc).triangularView<StrictlyUpper>() *= beta;
+    else
+      if(beta==Scalar(0)) matrix(c, *n, *n, *ldc).triangularView<Lower>().setZero();
+      else                matrix(c, *n, *n, *ldc).triangularView<StrictlyLower>() *= beta;
+  
+    if(beta!=Scalar(0))
+    {
+      matrix(c, *n, *n, *ldc).diagonal().real() *= beta;
+      matrix(c, *n, *n, *ldc).diagonal().imag().setZero();
+    }
+  }
+
+  if(*k>0 && alpha!=RealScalar(0))
+  {
+    func[code](*n, *k, a, *lda, a, *lda, c, *ldc, alpha);
+    matrix(c, *n, *n, *ldc).diagonal().imag().setZero();
+  }
+  return 0;
+}
+
+// c = alpha*a*conj(b') + conj(alpha)*b*conj(a') + beta*c,  for op = 'N'or'n'
+// c = alpha*conj(a')*b + conj(alpha)*conj(b')*a + beta*c,  for op = 'C'or'c'
+int EIGEN_BLAS_FUNC(her2k)(char *uplo, char *op, int *n, int *k, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *pb, int *ldb, RealScalar *pbeta, RealScalar *pc, int *ldc)
+{
+  Scalar* a = reinterpret_cast<Scalar*>(pa);
+  Scalar* b = reinterpret_cast<Scalar*>(pb);
+  Scalar* c = reinterpret_cast<Scalar*>(pc);
+  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
+  RealScalar beta  = *pbeta;
+
+  int info = 0;
+  if(UPLO(*uplo)==INVALID)                                            info = 1;
+  else if((OP(*op)==INVALID) || (OP(*op)==TR))                        info = 2;
+  else if(*n<0)                                                       info = 3;
+  else if(*k<0)                                                       info = 4;
+  else if(*lda<std::max(1,(OP(*op)==NOTR)?*n:*k))                     info = 7;
+  else if(*lda<std::max(1,(OP(*op)==NOTR)?*n:*k))                     info = 9;
+  else if(*ldc<std::max(1,*n))                                        info = 12;
+  if(info)
+    return xerbla_(SCALAR_SUFFIX_UP"HER2K",&info,6);
+
+  if(beta!=RealScalar(1))
+  {
+    if(UPLO(*uplo)==UP)
+      if(beta==Scalar(0)) matrix(c, *n, *n, *ldc).triangularView<Upper>().setZero();
+      else                matrix(c, *n, *n, *ldc).triangularView<StrictlyUpper>() *= beta;
+    else
+      if(beta==Scalar(0)) matrix(c, *n, *n, *ldc).triangularView<Lower>().setZero();
+      else                matrix(c, *n, *n, *ldc).triangularView<StrictlyLower>() *= beta;
+
+    if(beta!=Scalar(0))
+    {
+      matrix(c, *n, *n, *ldc).diagonal().real() *= beta;
+      matrix(c, *n, *n, *ldc).diagonal().imag().setZero();
+    }
+  }
+  else if(*k>0 && alpha!=Scalar(0))
+    matrix(c, *n, *n, *ldc).diagonal().imag().setZero();
+
+  if(*k==0)
+    return 1;
+
+  if(OP(*op)==NOTR)
+  {
+    if(UPLO(*uplo)==UP)
+    {
+      matrix(c, *n, *n, *ldc).triangularView<Upper>()
+        +=         alpha *matrix(a, *n, *k, *lda)*matrix(b, *n, *k, *ldb).adjoint()
+        +  internal::conj(alpha)*matrix(b, *n, *k, *ldb)*matrix(a, *n, *k, *lda).adjoint();
+    }
+    else if(UPLO(*uplo)==LO)
+      matrix(c, *n, *n, *ldc).triangularView<Lower>()
+        += alpha*matrix(a, *n, *k, *lda)*matrix(b, *n, *k, *ldb).adjoint()
+        +  internal::conj(alpha)*matrix(b, *n, *k, *ldb)*matrix(a, *n, *k, *lda).adjoint();
+  }
+  else if(OP(*op)==ADJ)
+  {
+    if(UPLO(*uplo)==UP)
+      matrix(c, *n, *n, *ldc).triangularView<Upper>()
+        += alpha*matrix(a, *k, *n, *lda).adjoint()*matrix(b, *k, *n, *ldb)
+        +  internal::conj(alpha)*matrix(b, *k, *n, *ldb).adjoint()*matrix(a, *k, *n, *lda);
+    else if(UPLO(*uplo)==LO)
+      matrix(c, *n, *n, *ldc).triangularView<Lower>()
+        += alpha*matrix(a, *k, *n, *lda).adjoint()*matrix(b, *k, *n, *ldb)
+        +  internal::conj(alpha)*matrix(b, *k, *n, *ldb).adjoint()*matrix(a, *k, *n, *lda);
+  }
+
+  return 1;
+}
+
+#endif // ISCOMPLEX
diff --git a/resources/3rdparty/eigen/blas/lsame.f b/resources/3rdParty/eigen/blas/lsame.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/lsame.f
rename to resources/3rdParty/eigen/blas/lsame.f
diff --git a/resources/3rdParty/eigen/blas/single.cpp b/resources/3rdParty/eigen/blas/single.cpp
new file mode 100644
index 000000000..1b7775aed
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/single.cpp
@@ -0,0 +1,19 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define SCALAR        float
+#define SCALAR_SUFFIX s
+#define SCALAR_SUFFIX_UP "S"
+#define ISCOMPLEX     0
+
+#include "level1_impl.h"
+#include "level1_real_impl.h"
+#include "level2_impl.h"
+#include "level2_real_impl.h"
+#include "level3_impl.h"
diff --git a/resources/3rdparty/eigen/blas/srotm.f b/resources/3rdParty/eigen/blas/srotm.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/srotm.f
rename to resources/3rdParty/eigen/blas/srotm.f
diff --git a/resources/3rdparty/eigen/blas/srotmg.f b/resources/3rdParty/eigen/blas/srotmg.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/srotmg.f
rename to resources/3rdParty/eigen/blas/srotmg.f
diff --git a/resources/3rdparty/eigen/blas/ssbmv.f b/resources/3rdParty/eigen/blas/ssbmv.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/ssbmv.f
rename to resources/3rdParty/eigen/blas/ssbmv.f
diff --git a/resources/3rdparty/eigen/blas/sspmv.f b/resources/3rdParty/eigen/blas/sspmv.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/sspmv.f
rename to resources/3rdParty/eigen/blas/sspmv.f
diff --git a/resources/3rdParty/eigen/blas/sspr.f b/resources/3rdParty/eigen/blas/sspr.f
new file mode 100644
index 000000000..bae92612e
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/sspr.f
@@ -0,0 +1,202 @@
+      SUBROUTINE SSPR(UPLO,N,ALPHA,X,INCX,AP)
+*     .. Scalar Arguments ..
+      REAL ALPHA
+      INTEGER INCX,N
+      CHARACTER UPLO
+*     ..
+*     .. Array Arguments ..
+      REAL AP(*),X(*)
+*     ..
+*
+*  Purpose
+*  =======
+*
+*  SSPR    performs the symmetric rank 1 operation
+*
+*     A := alpha*x*x' + A,
+*
+*  where alpha is a real scalar, x is an n element vector and A is an
+*  n by n symmetric matrix, supplied in packed form.
+*
+*  Arguments
+*  ==========
+*
+*  UPLO   - CHARACTER*1.
+*           On entry, UPLO specifies whether the upper or lower
+*           triangular part of the matrix A is supplied in the packed
+*           array AP as follows:
+*
+*              UPLO = 'U' or 'u'   The upper triangular part of A is
+*                                  supplied in AP.
+*
+*              UPLO = 'L' or 'l'   The lower triangular part of A is
+*                                  supplied in AP.
+*
+*           Unchanged on exit.
+*
+*  N      - INTEGER.
+*           On entry, N specifies the order of the matrix A.
+*           N must be at least zero.
+*           Unchanged on exit.
+*
+*  ALPHA  - REAL            .
+*           On entry, ALPHA specifies the scalar alpha.
+*           Unchanged on exit.
+*
+*  X      - REAL             array of dimension at least
+*           ( 1 + ( n - 1 )*abs( INCX ) ).
+*           Before entry, the incremented array X must contain the n
+*           element vector x.
+*           Unchanged on exit.
+*
+*  INCX   - INTEGER.
+*           On entry, INCX specifies the increment for the elements of
+*           X. INCX must not be zero.
+*           Unchanged on exit.
+*
+*  AP     - REAL             array of DIMENSION at least
+*           ( ( n*( n + 1 ) )/2 ).
+*           Before entry with  UPLO = 'U' or 'u', the array AP must
+*           contain the upper triangular part of the symmetric matrix
+*           packed sequentially, column by column, so that AP( 1 )
+*           contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 )
+*           and a( 2, 2 ) respectively, and so on. On exit, the array
+*           AP is overwritten by the upper triangular part of the
+*           updated matrix.
+*           Before entry with UPLO = 'L' or 'l', the array AP must
+*           contain the lower triangular part of the symmetric matrix
+*           packed sequentially, column by column, so that AP( 1 )
+*           contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 )
+*           and a( 3, 1 ) respectively, and so on. On exit, the array
+*           AP is overwritten by the lower triangular part of the
+*           updated matrix.
+*
+*  Further Details
+*  ===============
+*
+*  Level 2 Blas routine.
+*
+*  -- Written on 22-October-1986.
+*     Jack Dongarra, Argonne National Lab.
+*     Jeremy Du Croz, Nag Central Office.
+*     Sven Hammarling, Nag Central Office.
+*     Richard Hanson, Sandia National Labs.
+*
+*  =====================================================================
+*
+*     .. Parameters ..
+      REAL ZERO
+      PARAMETER (ZERO=0.0E+0)
+*     ..
+*     .. Local Scalars ..
+      REAL TEMP
+      INTEGER I,INFO,IX,J,JX,K,KK,KX
+*     ..
+*     .. External Functions ..
+      LOGICAL LSAME
+      EXTERNAL LSAME
+*     ..
+*     .. External Subroutines ..
+      EXTERNAL XERBLA
+*     ..
+*
+*     Test the input parameters.
+*
+      INFO = 0
+      IF (.NOT.LSAME(UPLO,'U') .AND. .NOT.LSAME(UPLO,'L')) THEN
+          INFO = 1
+      ELSE IF (N.LT.0) THEN
+          INFO = 2
+      ELSE IF (INCX.EQ.0) THEN
+          INFO = 5
+      END IF
+      IF (INFO.NE.0) THEN
+          CALL XERBLA('SSPR  ',INFO)
+          RETURN
+      END IF
+*
+*     Quick return if possible.
+*
+      IF ((N.EQ.0) .OR. (ALPHA.EQ.ZERO)) RETURN
+*
+*     Set the start point in X if the increment is not unity.
+*
+      IF (INCX.LE.0) THEN
+          KX = 1 - (N-1)*INCX
+      ELSE IF (INCX.NE.1) THEN
+          KX = 1
+      END IF
+*
+*     Start the operations. In this version the elements of the array AP
+*     are accessed sequentially with one pass through AP.
+*
+      KK = 1
+      IF (LSAME(UPLO,'U')) THEN
+*
+*        Form  A  when upper triangle is stored in AP.
+*
+          IF (INCX.EQ.1) THEN
+              DO 20 J = 1,N
+                  IF (X(J).NE.ZERO) THEN
+                      TEMP = ALPHA*X(J)
+                      K = KK
+                      DO 10 I = 1,J
+                          AP(K) = AP(K) + X(I)*TEMP
+                          K = K + 1
+   10                 CONTINUE
+                  END IF
+                  KK = KK + J
+   20         CONTINUE
+          ELSE
+              JX = KX
+              DO 40 J = 1,N
+                  IF (X(JX).NE.ZERO) THEN
+                      TEMP = ALPHA*X(JX)
+                      IX = KX
+                      DO 30 K = KK,KK + J - 1
+                          AP(K) = AP(K) + X(IX)*TEMP
+                          IX = IX + INCX
+   30                 CONTINUE
+                  END IF
+                  JX = JX + INCX
+                  KK = KK + J
+   40         CONTINUE
+          END IF
+      ELSE
+*
+*        Form  A  when lower triangle is stored in AP.
+*
+          IF (INCX.EQ.1) THEN
+              DO 60 J = 1,N
+                  IF (X(J).NE.ZERO) THEN
+                      TEMP = ALPHA*X(J)
+                      K = KK
+                      DO 50 I = J,N
+                          AP(K) = AP(K) + X(I)*TEMP
+                          K = K + 1
+   50                 CONTINUE
+                  END IF
+                  KK = KK + N - J + 1
+   60         CONTINUE
+          ELSE
+              JX = KX
+              DO 80 J = 1,N
+                  IF (X(JX).NE.ZERO) THEN
+                      TEMP = ALPHA*X(JX)
+                      IX = JX
+                      DO 70 K = KK,KK + N - J
+                          AP(K) = AP(K) + X(IX)*TEMP
+                          IX = IX + INCX
+   70                 CONTINUE
+                  END IF
+                  JX = JX + INCX
+                  KK = KK + N - J + 1
+   80         CONTINUE
+          END IF
+      END IF
+*
+      RETURN
+*
+*     End of SSPR  .
+*
+      END
diff --git a/resources/3rdParty/eigen/blas/sspr2.f b/resources/3rdParty/eigen/blas/sspr2.f
new file mode 100644
index 000000000..cd27c734b
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/sspr2.f
@@ -0,0 +1,233 @@
+      SUBROUTINE SSPR2(UPLO,N,ALPHA,X,INCX,Y,INCY,AP)
+*     .. Scalar Arguments ..
+      REAL ALPHA
+      INTEGER INCX,INCY,N
+      CHARACTER UPLO
+*     ..
+*     .. Array Arguments ..
+      REAL AP(*),X(*),Y(*)
+*     ..
+*
+*  Purpose
+*  =======
+*
+*  SSPR2  performs the symmetric rank 2 operation
+*
+*     A := alpha*x*y' + alpha*y*x' + A,
+*
+*  where alpha is a scalar, x and y are n element vectors and A is an
+*  n by n symmetric matrix, supplied in packed form.
+*
+*  Arguments
+*  ==========
+*
+*  UPLO   - CHARACTER*1.
+*           On entry, UPLO specifies whether the upper or lower
+*           triangular part of the matrix A is supplied in the packed
+*           array AP as follows:
+*
+*              UPLO = 'U' or 'u'   The upper triangular part of A is
+*                                  supplied in AP.
+*
+*              UPLO = 'L' or 'l'   The lower triangular part of A is
+*                                  supplied in AP.
+*
+*           Unchanged on exit.
+*
+*  N      - INTEGER.
+*           On entry, N specifies the order of the matrix A.
+*           N must be at least zero.
+*           Unchanged on exit.
+*
+*  ALPHA  - REAL            .
+*           On entry, ALPHA specifies the scalar alpha.
+*           Unchanged on exit.
+*
+*  X      - REAL             array of dimension at least
+*           ( 1 + ( n - 1 )*abs( INCX ) ).
+*           Before entry, the incremented array X must contain the n
+*           element vector x.
+*           Unchanged on exit.
+*
+*  INCX   - INTEGER.
+*           On entry, INCX specifies the increment for the elements of
+*           X. INCX must not be zero.
+*           Unchanged on exit.
+*
+*  Y      - REAL             array of dimension at least
+*           ( 1 + ( n - 1 )*abs( INCY ) ).
+*           Before entry, the incremented array Y must contain the n
+*           element vector y.
+*           Unchanged on exit.
+*
+*  INCY   - INTEGER.
+*           On entry, INCY specifies the increment for the elements of
+*           Y. INCY must not be zero.
+*           Unchanged on exit.
+*
+*  AP     - REAL             array of DIMENSION at least
+*           ( ( n*( n + 1 ) )/2 ).
+*           Before entry with  UPLO = 'U' or 'u', the array AP must
+*           contain the upper triangular part of the symmetric matrix
+*           packed sequentially, column by column, so that AP( 1 )
+*           contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 )
+*           and a( 2, 2 ) respectively, and so on. On exit, the array
+*           AP is overwritten by the upper triangular part of the
+*           updated matrix.
+*           Before entry with UPLO = 'L' or 'l', the array AP must
+*           contain the lower triangular part of the symmetric matrix
+*           packed sequentially, column by column, so that AP( 1 )
+*           contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 )
+*           and a( 3, 1 ) respectively, and so on. On exit, the array
+*           AP is overwritten by the lower triangular part of the
+*           updated matrix.
+*
+*  Further Details
+*  ===============
+*
+*  Level 2 Blas routine.
+*
+*  -- Written on 22-October-1986.
+*     Jack Dongarra, Argonne National Lab.
+*     Jeremy Du Croz, Nag Central Office.
+*     Sven Hammarling, Nag Central Office.
+*     Richard Hanson, Sandia National Labs.
+*
+*  =====================================================================
+*
+*     .. Parameters ..
+      REAL ZERO
+      PARAMETER (ZERO=0.0E+0)
+*     ..
+*     .. Local Scalars ..
+      REAL TEMP1,TEMP2
+      INTEGER I,INFO,IX,IY,J,JX,JY,K,KK,KX,KY
+*     ..
+*     .. External Functions ..
+      LOGICAL LSAME
+      EXTERNAL LSAME
+*     ..
+*     .. External Subroutines ..
+      EXTERNAL XERBLA
+*     ..
+*
+*     Test the input parameters.
+*
+      INFO = 0
+      IF (.NOT.LSAME(UPLO,'U') .AND. .NOT.LSAME(UPLO,'L')) THEN
+          INFO = 1
+      ELSE IF (N.LT.0) THEN
+          INFO = 2
+      ELSE IF (INCX.EQ.0) THEN
+          INFO = 5
+      ELSE IF (INCY.EQ.0) THEN
+          INFO = 7
+      END IF
+      IF (INFO.NE.0) THEN
+          CALL XERBLA('SSPR2 ',INFO)
+          RETURN
+      END IF
+*
+*     Quick return if possible.
+*
+      IF ((N.EQ.0) .OR. (ALPHA.EQ.ZERO)) RETURN
+*
+*     Set up the start points in X and Y if the increments are not both
+*     unity.
+*
+      IF ((INCX.NE.1) .OR. (INCY.NE.1)) THEN
+          IF (INCX.GT.0) THEN
+              KX = 1
+          ELSE
+              KX = 1 - (N-1)*INCX
+          END IF
+          IF (INCY.GT.0) THEN
+              KY = 1
+          ELSE
+              KY = 1 - (N-1)*INCY
+          END IF
+          JX = KX
+          JY = KY
+      END IF
+*
+*     Start the operations. In this version the elements of the array AP
+*     are accessed sequentially with one pass through AP.
+*
+      KK = 1
+      IF (LSAME(UPLO,'U')) THEN
+*
+*        Form  A  when upper triangle is stored in AP.
+*
+          IF ((INCX.EQ.1) .AND. (INCY.EQ.1)) THEN
+              DO 20 J = 1,N
+                  IF ((X(J).NE.ZERO) .OR. (Y(J).NE.ZERO)) THEN
+                      TEMP1 = ALPHA*Y(J)
+                      TEMP2 = ALPHA*X(J)
+                      K = KK
+                      DO 10 I = 1,J
+                          AP(K) = AP(K) + X(I)*TEMP1 + Y(I)*TEMP2
+                          K = K + 1
+   10                 CONTINUE
+                  END IF
+                  KK = KK + J
+   20         CONTINUE
+          ELSE
+              DO 40 J = 1,N
+                  IF ((X(JX).NE.ZERO) .OR. (Y(JY).NE.ZERO)) THEN
+                      TEMP1 = ALPHA*Y(JY)
+                      TEMP2 = ALPHA*X(JX)
+                      IX = KX
+                      IY = KY
+                      DO 30 K = KK,KK + J - 1
+                          AP(K) = AP(K) + X(IX)*TEMP1 + Y(IY)*TEMP2
+                          IX = IX + INCX
+                          IY = IY + INCY
+   30                 CONTINUE
+                  END IF
+                  JX = JX + INCX
+                  JY = JY + INCY
+                  KK = KK + J
+   40         CONTINUE
+          END IF
+      ELSE
+*
+*        Form  A  when lower triangle is stored in AP.
+*
+          IF ((INCX.EQ.1) .AND. (INCY.EQ.1)) THEN
+              DO 60 J = 1,N
+                  IF ((X(J).NE.ZERO) .OR. (Y(J).NE.ZERO)) THEN
+                      TEMP1 = ALPHA*Y(J)
+                      TEMP2 = ALPHA*X(J)
+                      K = KK
+                      DO 50 I = J,N
+                          AP(K) = AP(K) + X(I)*TEMP1 + Y(I)*TEMP2
+                          K = K + 1
+   50                 CONTINUE
+                  END IF
+                  KK = KK + N - J + 1
+   60         CONTINUE
+          ELSE
+              DO 80 J = 1,N
+                  IF ((X(JX).NE.ZERO) .OR. (Y(JY).NE.ZERO)) THEN
+                      TEMP1 = ALPHA*Y(JY)
+                      TEMP2 = ALPHA*X(JX)
+                      IX = JX
+                      IY = JY
+                      DO 70 K = KK,KK + N - J
+                          AP(K) = AP(K) + X(IX)*TEMP1 + Y(IY)*TEMP2
+                          IX = IX + INCX
+                          IY = IY + INCY
+   70                 CONTINUE
+                  END IF
+                  JX = JX + INCX
+                  JY = JY + INCY
+                  KK = KK + N - J + 1
+   80         CONTINUE
+          END IF
+      END IF
+*
+      RETURN
+*
+*     End of SSPR2 .
+*
+      END
diff --git a/resources/3rdparty/eigen/blas/stbmv.f b/resources/3rdParty/eigen/blas/stbmv.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/stbmv.f
rename to resources/3rdParty/eigen/blas/stbmv.f
diff --git a/resources/3rdParty/eigen/blas/stpmv.f b/resources/3rdParty/eigen/blas/stpmv.f
new file mode 100644
index 000000000..71ea49a36
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/stpmv.f
@@ -0,0 +1,293 @@
+      SUBROUTINE STPMV(UPLO,TRANS,DIAG,N,AP,X,INCX)
+*     .. Scalar Arguments ..
+      INTEGER INCX,N
+      CHARACTER DIAG,TRANS,UPLO
+*     ..
+*     .. Array Arguments ..
+      REAL AP(*),X(*)
+*     ..
+*
+*  Purpose
+*  =======
+*
+*  STPMV  performs one of the matrix-vector operations
+*
+*     x := A*x,   or   x := A'*x,
+*
+*  where x is an n element vector and  A is an n by n unit, or non-unit,
+*  upper or lower triangular matrix, supplied in packed form.
+*
+*  Arguments
+*  ==========
+*
+*  UPLO   - CHARACTER*1.
+*           On entry, UPLO specifies whether the matrix is an upper or
+*           lower triangular matrix as follows:
+*
+*              UPLO = 'U' or 'u'   A is an upper triangular matrix.
+*
+*              UPLO = 'L' or 'l'   A is a lower triangular matrix.
+*
+*           Unchanged on exit.
+*
+*  TRANS  - CHARACTER*1.
+*           On entry, TRANS specifies the operation to be performed as
+*           follows:
+*
+*              TRANS = 'N' or 'n'   x := A*x.
+*
+*              TRANS = 'T' or 't'   x := A'*x.
+*
+*              TRANS = 'C' or 'c'   x := A'*x.
+*
+*           Unchanged on exit.
+*
+*  DIAG   - CHARACTER*1.
+*           On entry, DIAG specifies whether or not A is unit
+*           triangular as follows:
+*
+*              DIAG = 'U' or 'u'   A is assumed to be unit triangular.
+*
+*              DIAG = 'N' or 'n'   A is not assumed to be unit
+*                                  triangular.
+*
+*           Unchanged on exit.
+*
+*  N      - INTEGER.
+*           On entry, N specifies the order of the matrix A.
+*           N must be at least zero.
+*           Unchanged on exit.
+*
+*  AP     - REAL             array of DIMENSION at least
+*           ( ( n*( n + 1 ) )/2 ).
+*           Before entry with  UPLO = 'U' or 'u', the array AP must
+*           contain the upper triangular matrix packed sequentially,
+*           column by column, so that AP( 1 ) contains a( 1, 1 ),
+*           AP( 2 ) and AP( 3 ) contain a( 1, 2 ) and a( 2, 2 )
+*           respectively, and so on.
+*           Before entry with UPLO = 'L' or 'l', the array AP must
+*           contain the lower triangular matrix packed sequentially,
+*           column by column, so that AP( 1 ) contains a( 1, 1 ),
+*           AP( 2 ) and AP( 3 ) contain a( 2, 1 ) and a( 3, 1 )
+*           respectively, and so on.
+*           Note that when  DIAG = 'U' or 'u', the diagonal elements of
+*           A are not referenced, but are assumed to be unity.
+*           Unchanged on exit.
+*
+*  X      - REAL             array of dimension at least
+*           ( 1 + ( n - 1 )*abs( INCX ) ).
+*           Before entry, the incremented array X must contain the n
+*           element vector x. On exit, X is overwritten with the
+*           tranformed vector x.
+*
+*  INCX   - INTEGER.
+*           On entry, INCX specifies the increment for the elements of
+*           X. INCX must not be zero.
+*           Unchanged on exit.
+*
+*  Further Details
+*  ===============
+*
+*  Level 2 Blas routine.
+*
+*  -- Written on 22-October-1986.
+*     Jack Dongarra, Argonne National Lab.
+*     Jeremy Du Croz, Nag Central Office.
+*     Sven Hammarling, Nag Central Office.
+*     Richard Hanson, Sandia National Labs.
+*
+*  =====================================================================
+*
+*     .. Parameters ..
+      REAL ZERO
+      PARAMETER (ZERO=0.0E+0)
+*     ..
+*     .. Local Scalars ..
+      REAL TEMP
+      INTEGER I,INFO,IX,J,JX,K,KK,KX
+      LOGICAL NOUNIT
+*     ..
+*     .. External Functions ..
+      LOGICAL LSAME
+      EXTERNAL LSAME
+*     ..
+*     .. External Subroutines ..
+      EXTERNAL XERBLA
+*     ..
+*
+*     Test the input parameters.
+*
+      INFO = 0
+      IF (.NOT.LSAME(UPLO,'U') .AND. .NOT.LSAME(UPLO,'L')) THEN
+          INFO = 1
+      ELSE IF (.NOT.LSAME(TRANS,'N') .AND. .NOT.LSAME(TRANS,'T') .AND.
+     +         .NOT.LSAME(TRANS,'C')) THEN
+          INFO = 2
+      ELSE IF (.NOT.LSAME(DIAG,'U') .AND. .NOT.LSAME(DIAG,'N')) THEN
+          INFO = 3
+      ELSE IF (N.LT.0) THEN
+          INFO = 4
+      ELSE IF (INCX.EQ.0) THEN
+          INFO = 7
+      END IF
+      IF (INFO.NE.0) THEN
+          CALL XERBLA('STPMV ',INFO)
+          RETURN
+      END IF
+*
+*     Quick return if possible.
+*
+      IF (N.EQ.0) RETURN
+*
+      NOUNIT = LSAME(DIAG,'N')
+*
+*     Set up the start point in X if the increment is not unity. This
+*     will be  ( N - 1 )*INCX  too small for descending loops.
+*
+      IF (INCX.LE.0) THEN
+          KX = 1 - (N-1)*INCX
+      ELSE IF (INCX.NE.1) THEN
+          KX = 1
+      END IF
+*
+*     Start the operations. In this version the elements of AP are
+*     accessed sequentially with one pass through AP.
+*
+      IF (LSAME(TRANS,'N')) THEN
+*
+*        Form  x:= A*x.
+*
+          IF (LSAME(UPLO,'U')) THEN
+              KK = 1
+              IF (INCX.EQ.1) THEN
+                  DO 20 J = 1,N
+                      IF (X(J).NE.ZERO) THEN
+                          TEMP = X(J)
+                          K = KK
+                          DO 10 I = 1,J - 1
+                              X(I) = X(I) + TEMP*AP(K)
+                              K = K + 1
+   10                     CONTINUE
+                          IF (NOUNIT) X(J) = X(J)*AP(KK+J-1)
+                      END IF
+                      KK = KK + J
+   20             CONTINUE
+              ELSE
+                  JX = KX
+                  DO 40 J = 1,N
+                      IF (X(JX).NE.ZERO) THEN
+                          TEMP = X(JX)
+                          IX = KX
+                          DO 30 K = KK,KK + J - 2
+                              X(IX) = X(IX) + TEMP*AP(K)
+                              IX = IX + INCX
+   30                     CONTINUE
+                          IF (NOUNIT) X(JX) = X(JX)*AP(KK+J-1)
+                      END IF
+                      JX = JX + INCX
+                      KK = KK + J
+   40             CONTINUE
+              END IF
+          ELSE
+              KK = (N* (N+1))/2
+              IF (INCX.EQ.1) THEN
+                  DO 60 J = N,1,-1
+                      IF (X(J).NE.ZERO) THEN
+                          TEMP = X(J)
+                          K = KK
+                          DO 50 I = N,J + 1,-1
+                              X(I) = X(I) + TEMP*AP(K)
+                              K = K - 1
+   50                     CONTINUE
+                          IF (NOUNIT) X(J) = X(J)*AP(KK-N+J)
+                      END IF
+                      KK = KK - (N-J+1)
+   60             CONTINUE
+              ELSE
+                  KX = KX + (N-1)*INCX
+                  JX = KX
+                  DO 80 J = N,1,-1
+                      IF (X(JX).NE.ZERO) THEN
+                          TEMP = X(JX)
+                          IX = KX
+                          DO 70 K = KK,KK - (N- (J+1)),-1
+                              X(IX) = X(IX) + TEMP*AP(K)
+                              IX = IX - INCX
+   70                     CONTINUE
+                          IF (NOUNIT) X(JX) = X(JX)*AP(KK-N+J)
+                      END IF
+                      JX = JX - INCX
+                      KK = KK - (N-J+1)
+   80             CONTINUE
+              END IF
+          END IF
+      ELSE
+*
+*        Form  x := A'*x.
+*
+          IF (LSAME(UPLO,'U')) THEN
+              KK = (N* (N+1))/2
+              IF (INCX.EQ.1) THEN
+                  DO 100 J = N,1,-1
+                      TEMP = X(J)
+                      IF (NOUNIT) TEMP = TEMP*AP(KK)
+                      K = KK - 1
+                      DO 90 I = J - 1,1,-1
+                          TEMP = TEMP + AP(K)*X(I)
+                          K = K - 1
+   90                 CONTINUE
+                      X(J) = TEMP
+                      KK = KK - J
+  100             CONTINUE
+              ELSE
+                  JX = KX + (N-1)*INCX
+                  DO 120 J = N,1,-1
+                      TEMP = X(JX)
+                      IX = JX
+                      IF (NOUNIT) TEMP = TEMP*AP(KK)
+                      DO 110 K = KK - 1,KK - J + 1,-1
+                          IX = IX - INCX
+                          TEMP = TEMP + AP(K)*X(IX)
+  110                 CONTINUE
+                      X(JX) = TEMP
+                      JX = JX - INCX
+                      KK = KK - J
+  120             CONTINUE
+              END IF
+          ELSE
+              KK = 1
+              IF (INCX.EQ.1) THEN
+                  DO 140 J = 1,N
+                      TEMP = X(J)
+                      IF (NOUNIT) TEMP = TEMP*AP(KK)
+                      K = KK + 1
+                      DO 130 I = J + 1,N
+                          TEMP = TEMP + AP(K)*X(I)
+                          K = K + 1
+  130                 CONTINUE
+                      X(J) = TEMP
+                      KK = KK + (N-J+1)
+  140             CONTINUE
+              ELSE
+                  JX = KX
+                  DO 160 J = 1,N
+                      TEMP = X(JX)
+                      IX = JX
+                      IF (NOUNIT) TEMP = TEMP*AP(KK)
+                      DO 150 K = KK + 1,KK + N - J
+                          IX = IX + INCX
+                          TEMP = TEMP + AP(K)*X(IX)
+  150                 CONTINUE
+                      X(JX) = TEMP
+                      JX = JX + INCX
+                      KK = KK + (N-J+1)
+  160             CONTINUE
+              END IF
+          END IF
+      END IF
+*
+      RETURN
+*
+*     End of STPMV .
+*
+      END
diff --git a/resources/3rdParty/eigen/blas/stpsv.f b/resources/3rdParty/eigen/blas/stpsv.f
new file mode 100644
index 000000000..7d95efbde
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/stpsv.f
@@ -0,0 +1,296 @@
+      SUBROUTINE STPSV(UPLO,TRANS,DIAG,N,AP,X,INCX)
+*     .. Scalar Arguments ..
+      INTEGER INCX,N
+      CHARACTER DIAG,TRANS,UPLO
+*     ..
+*     .. Array Arguments ..
+      REAL AP(*),X(*)
+*     ..
+*
+*  Purpose
+*  =======
+*
+*  STPSV  solves one of the systems of equations
+*
+*     A*x = b,   or   A'*x = b,
+*
+*  where b and x are n element vectors and A is an n by n unit, or
+*  non-unit, upper or lower triangular matrix, supplied in packed form.
+*
+*  No test for singularity or near-singularity is included in this
+*  routine. Such tests must be performed before calling this routine.
+*
+*  Arguments
+*  ==========
+*
+*  UPLO   - CHARACTER*1.
+*           On entry, UPLO specifies whether the matrix is an upper or
+*           lower triangular matrix as follows:
+*
+*              UPLO = 'U' or 'u'   A is an upper triangular matrix.
+*
+*              UPLO = 'L' or 'l'   A is a lower triangular matrix.
+*
+*           Unchanged on exit.
+*
+*  TRANS  - CHARACTER*1.
+*           On entry, TRANS specifies the equations to be solved as
+*           follows:
+*
+*              TRANS = 'N' or 'n'   A*x = b.
+*
+*              TRANS = 'T' or 't'   A'*x = b.
+*
+*              TRANS = 'C' or 'c'   A'*x = b.
+*
+*           Unchanged on exit.
+*
+*  DIAG   - CHARACTER*1.
+*           On entry, DIAG specifies whether or not A is unit
+*           triangular as follows:
+*
+*              DIAG = 'U' or 'u'   A is assumed to be unit triangular.
+*
+*              DIAG = 'N' or 'n'   A is not assumed to be unit
+*                                  triangular.
+*
+*           Unchanged on exit.
+*
+*  N      - INTEGER.
+*           On entry, N specifies the order of the matrix A.
+*           N must be at least zero.
+*           Unchanged on exit.
+*
+*  AP     - REAL             array of DIMENSION at least
+*           ( ( n*( n + 1 ) )/2 ).
+*           Before entry with  UPLO = 'U' or 'u', the array AP must
+*           contain the upper triangular matrix packed sequentially,
+*           column by column, so that AP( 1 ) contains a( 1, 1 ),
+*           AP( 2 ) and AP( 3 ) contain a( 1, 2 ) and a( 2, 2 )
+*           respectively, and so on.
+*           Before entry with UPLO = 'L' or 'l', the array AP must
+*           contain the lower triangular matrix packed sequentially,
+*           column by column, so that AP( 1 ) contains a( 1, 1 ),
+*           AP( 2 ) and AP( 3 ) contain a( 2, 1 ) and a( 3, 1 )
+*           respectively, and so on.
+*           Note that when  DIAG = 'U' or 'u', the diagonal elements of
+*           A are not referenced, but are assumed to be unity.
+*           Unchanged on exit.
+*
+*  X      - REAL             array of dimension at least
+*           ( 1 + ( n - 1 )*abs( INCX ) ).
+*           Before entry, the incremented array X must contain the n
+*           element right-hand side vector b. On exit, X is overwritten
+*           with the solution vector x.
+*
+*  INCX   - INTEGER.
+*           On entry, INCX specifies the increment for the elements of
+*           X. INCX must not be zero.
+*           Unchanged on exit.
+*
+*  Further Details
+*  ===============
+*
+*  Level 2 Blas routine.
+*
+*  -- Written on 22-October-1986.
+*     Jack Dongarra, Argonne National Lab.
+*     Jeremy Du Croz, Nag Central Office.
+*     Sven Hammarling, Nag Central Office.
+*     Richard Hanson, Sandia National Labs.
+*
+*  =====================================================================
+*
+*     .. Parameters ..
+      REAL ZERO
+      PARAMETER (ZERO=0.0E+0)
+*     ..
+*     .. Local Scalars ..
+      REAL TEMP
+      INTEGER I,INFO,IX,J,JX,K,KK,KX
+      LOGICAL NOUNIT
+*     ..
+*     .. External Functions ..
+      LOGICAL LSAME
+      EXTERNAL LSAME
+*     ..
+*     .. External Subroutines ..
+      EXTERNAL XERBLA
+*     ..
+*
+*     Test the input parameters.
+*
+      INFO = 0
+      IF (.NOT.LSAME(UPLO,'U') .AND. .NOT.LSAME(UPLO,'L')) THEN
+          INFO = 1
+      ELSE IF (.NOT.LSAME(TRANS,'N') .AND. .NOT.LSAME(TRANS,'T') .AND.
+     +         .NOT.LSAME(TRANS,'C')) THEN
+          INFO = 2
+      ELSE IF (.NOT.LSAME(DIAG,'U') .AND. .NOT.LSAME(DIAG,'N')) THEN
+          INFO = 3
+      ELSE IF (N.LT.0) THEN
+          INFO = 4
+      ELSE IF (INCX.EQ.0) THEN
+          INFO = 7
+      END IF
+      IF (INFO.NE.0) THEN
+          CALL XERBLA('STPSV ',INFO)
+          RETURN
+      END IF
+*
+*     Quick return if possible.
+*
+      IF (N.EQ.0) RETURN
+*
+      NOUNIT = LSAME(DIAG,'N')
+*
+*     Set up the start point in X if the increment is not unity. This
+*     will be  ( N - 1 )*INCX  too small for descending loops.
+*
+      IF (INCX.LE.0) THEN
+          KX = 1 - (N-1)*INCX
+      ELSE IF (INCX.NE.1) THEN
+          KX = 1
+      END IF
+*
+*     Start the operations. In this version the elements of AP are
+*     accessed sequentially with one pass through AP.
+*
+      IF (LSAME(TRANS,'N')) THEN
+*
+*        Form  x := inv( A )*x.
+*
+          IF (LSAME(UPLO,'U')) THEN
+              KK = (N* (N+1))/2
+              IF (INCX.EQ.1) THEN
+                  DO 20 J = N,1,-1
+                      IF (X(J).NE.ZERO) THEN
+                          IF (NOUNIT) X(J) = X(J)/AP(KK)
+                          TEMP = X(J)
+                          K = KK - 1
+                          DO 10 I = J - 1,1,-1
+                              X(I) = X(I) - TEMP*AP(K)
+                              K = K - 1
+   10                     CONTINUE
+                      END IF
+                      KK = KK - J
+   20             CONTINUE
+              ELSE
+                  JX = KX + (N-1)*INCX
+                  DO 40 J = N,1,-1
+                      IF (X(JX).NE.ZERO) THEN
+                          IF (NOUNIT) X(JX) = X(JX)/AP(KK)
+                          TEMP = X(JX)
+                          IX = JX
+                          DO 30 K = KK - 1,KK - J + 1,-1
+                              IX = IX - INCX
+                              X(IX) = X(IX) - TEMP*AP(K)
+   30                     CONTINUE
+                      END IF
+                      JX = JX - INCX
+                      KK = KK - J
+   40             CONTINUE
+              END IF
+          ELSE
+              KK = 1
+              IF (INCX.EQ.1) THEN
+                  DO 60 J = 1,N
+                      IF (X(J).NE.ZERO) THEN
+                          IF (NOUNIT) X(J) = X(J)/AP(KK)
+                          TEMP = X(J)
+                          K = KK + 1
+                          DO 50 I = J + 1,N
+                              X(I) = X(I) - TEMP*AP(K)
+                              K = K + 1
+   50                     CONTINUE
+                      END IF
+                      KK = KK + (N-J+1)
+   60             CONTINUE
+              ELSE
+                  JX = KX
+                  DO 80 J = 1,N
+                      IF (X(JX).NE.ZERO) THEN
+                          IF (NOUNIT) X(JX) = X(JX)/AP(KK)
+                          TEMP = X(JX)
+                          IX = JX
+                          DO 70 K = KK + 1,KK + N - J
+                              IX = IX + INCX
+                              X(IX) = X(IX) - TEMP*AP(K)
+   70                     CONTINUE
+                      END IF
+                      JX = JX + INCX
+                      KK = KK + (N-J+1)
+   80             CONTINUE
+              END IF
+          END IF
+      ELSE
+*
+*        Form  x := inv( A' )*x.
+*
+          IF (LSAME(UPLO,'U')) THEN
+              KK = 1
+              IF (INCX.EQ.1) THEN
+                  DO 100 J = 1,N
+                      TEMP = X(J)
+                      K = KK
+                      DO 90 I = 1,J - 1
+                          TEMP = TEMP - AP(K)*X(I)
+                          K = K + 1
+   90                 CONTINUE
+                      IF (NOUNIT) TEMP = TEMP/AP(KK+J-1)
+                      X(J) = TEMP
+                      KK = KK + J
+  100             CONTINUE
+              ELSE
+                  JX = KX
+                  DO 120 J = 1,N
+                      TEMP = X(JX)
+                      IX = KX
+                      DO 110 K = KK,KK + J - 2
+                          TEMP = TEMP - AP(K)*X(IX)
+                          IX = IX + INCX
+  110                 CONTINUE
+                      IF (NOUNIT) TEMP = TEMP/AP(KK+J-1)
+                      X(JX) = TEMP
+                      JX = JX + INCX
+                      KK = KK + J
+  120             CONTINUE
+              END IF
+          ELSE
+              KK = (N* (N+1))/2
+              IF (INCX.EQ.1) THEN
+                  DO 140 J = N,1,-1
+                      TEMP = X(J)
+                      K = KK
+                      DO 130 I = N,J + 1,-1
+                          TEMP = TEMP - AP(K)*X(I)
+                          K = K - 1
+  130                 CONTINUE
+                      IF (NOUNIT) TEMP = TEMP/AP(KK-N+J)
+                      X(J) = TEMP
+                      KK = KK - (N-J+1)
+  140             CONTINUE
+              ELSE
+                  KX = KX + (N-1)*INCX
+                  JX = KX
+                  DO 160 J = N,1,-1
+                      TEMP = X(JX)
+                      IX = KX
+                      DO 150 K = KK,KK - (N- (J+1)),-1
+                          TEMP = TEMP - AP(K)*X(IX)
+                          IX = IX - INCX
+  150                 CONTINUE
+                      IF (NOUNIT) TEMP = TEMP/AP(KK-N+J)
+                      X(JX) = TEMP
+                      JX = JX - INCX
+                      KK = KK - (N-J+1)
+  160             CONTINUE
+              END IF
+          END IF
+      END IF
+*
+      RETURN
+*
+*     End of STPSV .
+*
+      END
diff --git a/resources/3rdparty/eigen/blas/testing/CMakeLists.txt b/resources/3rdParty/eigen/blas/testing/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/blas/testing/CMakeLists.txt
rename to resources/3rdParty/eigen/blas/testing/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/blas/testing/cblat1.f b/resources/3rdParty/eigen/blas/testing/cblat1.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/testing/cblat1.f
rename to resources/3rdParty/eigen/blas/testing/cblat1.f
diff --git a/resources/3rdparty/eigen/blas/testing/cblat2.dat b/resources/3rdParty/eigen/blas/testing/cblat2.dat
similarity index 100%
rename from resources/3rdparty/eigen/blas/testing/cblat2.dat
rename to resources/3rdParty/eigen/blas/testing/cblat2.dat
diff --git a/resources/3rdparty/eigen/blas/testing/cblat2.f b/resources/3rdParty/eigen/blas/testing/cblat2.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/testing/cblat2.f
rename to resources/3rdParty/eigen/blas/testing/cblat2.f
diff --git a/resources/3rdparty/eigen/blas/testing/cblat3.dat b/resources/3rdParty/eigen/blas/testing/cblat3.dat
similarity index 100%
rename from resources/3rdparty/eigen/blas/testing/cblat3.dat
rename to resources/3rdParty/eigen/blas/testing/cblat3.dat
diff --git a/resources/3rdparty/eigen/blas/testing/cblat3.f b/resources/3rdParty/eigen/blas/testing/cblat3.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/testing/cblat3.f
rename to resources/3rdParty/eigen/blas/testing/cblat3.f
diff --git a/resources/3rdParty/eigen/blas/testing/dblat1.f b/resources/3rdParty/eigen/blas/testing/dblat1.f
new file mode 100644
index 000000000..5a45d69f4
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/testing/dblat1.f
@@ -0,0 +1,769 @@
+      PROGRAM DBLAT1
+*     Test program for the DOUBLE PRECISION Level 1 BLAS.
+*     Based upon the original BLAS test routine together with:
+*     F06EAF Example Program Text
+*     .. Parameters ..
+      INTEGER          NOUT
+      PARAMETER        (NOUT=6)
+*     .. Scalars in Common ..
+      INTEGER          ICASE, INCX, INCY, MODE, N
+      LOGICAL          PASS
+*     .. Local Scalars ..
+      DOUBLE PRECISION SFAC
+      INTEGER          IC
+*     .. External Subroutines ..
+      EXTERNAL         CHECK0, CHECK1, CHECK2, CHECK3, HEADER
+*     .. Common blocks ..
+      COMMON           /COMBLA/ICASE, N, INCX, INCY, MODE, PASS
+*     .. Data statements ..
+      DATA             SFAC/9.765625D-4/
+*     .. Executable Statements ..
+      WRITE (NOUT,99999)
+      DO 20 IC = 1, 10
+         ICASE = IC
+         CALL HEADER
+*
+*        .. Initialize  PASS,  INCX,  INCY, and MODE for a new case. ..
+*        .. the value 9999 for INCX, INCY or MODE will appear in the ..
+*        .. detailed  output, if any, for cases  that do not involve ..
+*        .. these parameters ..
+*
+         PASS = .TRUE.
+         INCX = 9999
+         INCY = 9999
+         MODE = 9999
+         IF (ICASE.EQ.3) THEN
+            CALL CHECK0(SFAC)
+         ELSE IF (ICASE.EQ.7 .OR. ICASE.EQ.8 .OR. ICASE.EQ.9 .OR.
+     +            ICASE.EQ.10) THEN
+            CALL CHECK1(SFAC)
+         ELSE IF (ICASE.EQ.1 .OR. ICASE.EQ.2 .OR. ICASE.EQ.5 .OR.
+     +            ICASE.EQ.6) THEN
+            CALL CHECK2(SFAC)
+         ELSE IF (ICASE.EQ.4) THEN
+            CALL CHECK3(SFAC)
+         END IF
+*        -- Print
+         IF (PASS) WRITE (NOUT,99998)
+   20 CONTINUE
+      STOP
+*
+99999 FORMAT (' Real BLAS Test Program Results',/1X)
+99998 FORMAT ('                                    ----- PASS -----')
+      END
+      SUBROUTINE HEADER
+*     .. Parameters ..
+      INTEGER          NOUT
+      PARAMETER        (NOUT=6)
+*     .. Scalars in Common ..
+      INTEGER          ICASE, INCX, INCY, MODE, N
+      LOGICAL          PASS
+*     .. Local Arrays ..
+      CHARACTER*6      L(10)
+*     .. Common blocks ..
+      COMMON           /COMBLA/ICASE, N, INCX, INCY, MODE, PASS
+*     .. Data statements ..
+      DATA             L(1)/' DDOT '/
+      DATA             L(2)/'DAXPY '/
+      DATA             L(3)/'DROTG '/
+      DATA             L(4)/' DROT '/
+      DATA             L(5)/'DCOPY '/
+      DATA             L(6)/'DSWAP '/
+      DATA             L(7)/'DNRM2 '/
+      DATA             L(8)/'DASUM '/
+      DATA             L(9)/'DSCAL '/
+      DATA             L(10)/'IDAMAX'/
+*     .. Executable Statements ..
+      WRITE (NOUT,99999) ICASE, L(ICASE)
+      RETURN
+*
+99999 FORMAT (/' Test of subprogram number',I3,12X,A6)
+      END
+      SUBROUTINE CHECK0(SFAC)
+*     .. Parameters ..
+      INTEGER           NOUT
+      PARAMETER         (NOUT=6)
+*     .. Scalar Arguments ..
+      DOUBLE PRECISION  SFAC
+*     .. Scalars in Common ..
+      INTEGER           ICASE, INCX, INCY, MODE, N
+      LOGICAL           PASS
+*     .. Local Scalars ..
+      DOUBLE PRECISION  D12, SA, SB, SC, SS
+      INTEGER           K
+*     .. Local Arrays ..
+      DOUBLE PRECISION  DA1(8), DATRUE(8), DB1(8), DBTRUE(8), DC1(8),
+     +                  DS1(8)
+*     .. External Subroutines ..
+      EXTERNAL          DROTG, STEST1
+*     .. Common blocks ..
+      COMMON            /COMBLA/ICASE, N, INCX, INCY, MODE, PASS
+*     .. Data statements ..
+      DATA              DA1/0.3D0, 0.4D0, -0.3D0, -0.4D0, -0.3D0, 0.0D0,
+     +                  0.0D0, 1.0D0/
+      DATA              DB1/0.4D0, 0.3D0, 0.4D0, 0.3D0, -0.4D0, 0.0D0,
+     +                  1.0D0, 0.0D0/
+      DATA              DC1/0.6D0, 0.8D0, -0.6D0, 0.8D0, 0.6D0, 1.0D0,
+     +                  0.0D0, 1.0D0/
+      DATA              DS1/0.8D0, 0.6D0, 0.8D0, -0.6D0, 0.8D0, 0.0D0,
+     +                  1.0D0, 0.0D0/
+      DATA              DATRUE/0.5D0, 0.5D0, 0.5D0, -0.5D0, -0.5D0,
+     +                  0.0D0, 1.0D0, 1.0D0/
+      DATA              DBTRUE/0.0D0, 0.6D0, 0.0D0, -0.6D0, 0.0D0,
+     +                  0.0D0, 1.0D0, 0.0D0/
+      DATA              D12/4096.0D0/
+*     .. Executable Statements ..
+*
+*     Compute true values which cannot be prestored
+*     in decimal notation
+*
+      DBTRUE(1) = 1.0D0/0.6D0
+      DBTRUE(3) = -1.0D0/0.6D0
+      DBTRUE(5) = 1.0D0/0.6D0
+*
+      DO 20 K = 1, 8
+*        .. Set N=K for identification in output if any ..
+         N = K
+         IF (ICASE.EQ.3) THEN
+*           .. DROTG ..
+            IF (K.GT.8) GO TO 40
+            SA = DA1(K)
+            SB = DB1(K)
+            CALL DROTG(SA,SB,SC,SS)
+            CALL STEST1(SA,DATRUE(K),DATRUE(K),SFAC)
+            CALL STEST1(SB,DBTRUE(K),DBTRUE(K),SFAC)
+            CALL STEST1(SC,DC1(K),DC1(K),SFAC)
+            CALL STEST1(SS,DS1(K),DS1(K),SFAC)
+         ELSE
+            WRITE (NOUT,*) ' Shouldn''t be here in CHECK0'
+            STOP
+         END IF
+   20 CONTINUE
+   40 RETURN
+      END
+      SUBROUTINE CHECK1(SFAC)
+*     .. Parameters ..
+      INTEGER           NOUT
+      PARAMETER         (NOUT=6)
+*     .. Scalar Arguments ..
+      DOUBLE PRECISION  SFAC
+*     .. Scalars in Common ..
+      INTEGER           ICASE, INCX, INCY, MODE, N
+      LOGICAL           PASS
+*     .. Local Scalars ..
+      INTEGER           I, LEN, NP1
+*     .. Local Arrays ..
+      DOUBLE PRECISION  DTRUE1(5), DTRUE3(5), DTRUE5(8,5,2), DV(8,5,2),
+     +                  SA(10), STEMP(1), STRUE(8), SX(8)
+      INTEGER           ITRUE2(5)
+*     .. External Functions ..
+      DOUBLE PRECISION  DASUM, DNRM2
+      INTEGER           IDAMAX
+      EXTERNAL          DASUM, DNRM2, IDAMAX
+*     .. External Subroutines ..
+      EXTERNAL          ITEST1, DSCAL, STEST, STEST1
+*     .. Intrinsic Functions ..
+      INTRINSIC         MAX
+*     .. Common blocks ..
+      COMMON            /COMBLA/ICASE, N, INCX, INCY, MODE, PASS
+*     .. Data statements ..
+      DATA              SA/0.3D0, -1.0D0, 0.0D0, 1.0D0, 0.3D0, 0.3D0,
+     +                  0.3D0, 0.3D0, 0.3D0, 0.3D0/
+      DATA              DV/0.1D0, 2.0D0, 2.0D0, 2.0D0, 2.0D0, 2.0D0,
+     +                  2.0D0, 2.0D0, 0.3D0, 3.0D0, 3.0D0, 3.0D0, 3.0D0,
+     +                  3.0D0, 3.0D0, 3.0D0, 0.3D0, -0.4D0, 4.0D0,
+     +                  4.0D0, 4.0D0, 4.0D0, 4.0D0, 4.0D0, 0.2D0,
+     +                  -0.6D0, 0.3D0, 5.0D0, 5.0D0, 5.0D0, 5.0D0,
+     +                  5.0D0, 0.1D0, -0.3D0, 0.5D0, -0.1D0, 6.0D0,
+     +                  6.0D0, 6.0D0, 6.0D0, 0.1D0, 8.0D0, 8.0D0, 8.0D0,
+     +                  8.0D0, 8.0D0, 8.0D0, 8.0D0, 0.3D0, 9.0D0, 9.0D0,
+     +                  9.0D0, 9.0D0, 9.0D0, 9.0D0, 9.0D0, 0.3D0, 2.0D0,
+     +                  -0.4D0, 2.0D0, 2.0D0, 2.0D0, 2.0D0, 2.0D0,
+     +                  0.2D0, 3.0D0, -0.6D0, 5.0D0, 0.3D0, 2.0D0,
+     +                  2.0D0, 2.0D0, 0.1D0, 4.0D0, -0.3D0, 6.0D0,
+     +                  -0.5D0, 7.0D0, -0.1D0, 3.0D0/
+      DATA              DTRUE1/0.0D0, 0.3D0, 0.5D0, 0.7D0, 0.6D0/
+      DATA              DTRUE3/0.0D0, 0.3D0, 0.7D0, 1.1D0, 1.0D0/
+      DATA              DTRUE5/0.10D0, 2.0D0, 2.0D0, 2.0D0, 2.0D0,
+     +                  2.0D0, 2.0D0, 2.0D0, -0.3D0, 3.0D0, 3.0D0,
+     +                  3.0D0, 3.0D0, 3.0D0, 3.0D0, 3.0D0, 0.0D0, 0.0D0,
+     +                  4.0D0, 4.0D0, 4.0D0, 4.0D0, 4.0D0, 4.0D0,
+     +                  0.20D0, -0.60D0, 0.30D0, 5.0D0, 5.0D0, 5.0D0,
+     +                  5.0D0, 5.0D0, 0.03D0, -0.09D0, 0.15D0, -0.03D0,
+     +                  6.0D0, 6.0D0, 6.0D0, 6.0D0, 0.10D0, 8.0D0,
+     +                  8.0D0, 8.0D0, 8.0D0, 8.0D0, 8.0D0, 8.0D0,
+     +                  0.09D0, 9.0D0, 9.0D0, 9.0D0, 9.0D0, 9.0D0,
+     +                  9.0D0, 9.0D0, 0.09D0, 2.0D0, -0.12D0, 2.0D0,
+     +                  2.0D0, 2.0D0, 2.0D0, 2.0D0, 0.06D0, 3.0D0,
+     +                  -0.18D0, 5.0D0, 0.09D0, 2.0D0, 2.0D0, 2.0D0,
+     +                  0.03D0, 4.0D0, -0.09D0, 6.0D0, -0.15D0, 7.0D0,
+     +                  -0.03D0, 3.0D0/
+      DATA              ITRUE2/0, 1, 2, 2, 3/
+*     .. Executable Statements ..
+      DO 80 INCX = 1, 2
+         DO 60 NP1 = 1, 5
+            N = NP1 - 1
+            LEN = 2*MAX(N,1)
+*           .. Set vector arguments ..
+            DO 20 I = 1, LEN
+               SX(I) = DV(I,NP1,INCX)
+   20       CONTINUE
+*
+            IF (ICASE.EQ.7) THEN
+*              .. DNRM2 ..
+               STEMP(1) = DTRUE1(NP1)
+               CALL STEST1(DNRM2(N,SX,INCX),STEMP,STEMP,SFAC)
+            ELSE IF (ICASE.EQ.8) THEN
+*              .. DASUM ..
+               STEMP(1) = DTRUE3(NP1)
+               CALL STEST1(DASUM(N,SX,INCX),STEMP,STEMP,SFAC)
+            ELSE IF (ICASE.EQ.9) THEN
+*              .. DSCAL ..
+               CALL DSCAL(N,SA((INCX-1)*5+NP1),SX,INCX)
+               DO 40 I = 1, LEN
+                  STRUE(I) = DTRUE5(I,NP1,INCX)
+   40          CONTINUE
+               CALL STEST(LEN,SX,STRUE,STRUE,SFAC)
+            ELSE IF (ICASE.EQ.10) THEN
+*              .. IDAMAX ..
+               CALL ITEST1(IDAMAX(N,SX,INCX),ITRUE2(NP1))
+            ELSE
+               WRITE (NOUT,*) ' Shouldn''t be here in CHECK1'
+               STOP
+            END IF
+   60    CONTINUE
+   80 CONTINUE
+      RETURN
+      END
+      SUBROUTINE CHECK2(SFAC)
+*     .. Parameters ..
+      INTEGER           NOUT
+      PARAMETER         (NOUT=6)
+*     .. Scalar Arguments ..
+      DOUBLE PRECISION  SFAC
+*     .. Scalars in Common ..
+      INTEGER           ICASE, INCX, INCY, MODE, N
+      LOGICAL           PASS
+*     .. Local Scalars ..
+      DOUBLE PRECISION  SA, SC, SS
+      INTEGER           I, J, KI, KN, KSIZE, LENX, LENY, MX, MY
+*     .. Local Arrays ..
+      DOUBLE PRECISION  DT10X(7,4,4), DT10Y(7,4,4), DT7(4,4),
+     +                  DT8(7,4,4), DT9X(7,4,4), DT9Y(7,4,4), DX1(7),
+     +                  DY1(7), SSIZE1(4), SSIZE2(14,2), STX(7), STY(7),
+     +                  SX(7), SY(7)
+      INTEGER           INCXS(4), INCYS(4), LENS(4,2), NS(4)
+*     .. External Functions ..
+      DOUBLE PRECISION  DDOT
+      EXTERNAL          DDOT
+*     .. External Subroutines ..
+      EXTERNAL          DAXPY, DCOPY, DSWAP, STEST, STEST1
+*     .. Intrinsic Functions ..
+      INTRINSIC         ABS, MIN
+*     .. Common blocks ..
+      COMMON            /COMBLA/ICASE, N, INCX, INCY, MODE, PASS
+*     .. Data statements ..
+      DATA              SA/0.3D0/
+      DATA              INCXS/1, 2, -2, -1/
+      DATA              INCYS/1, -2, 1, -2/
+      DATA              LENS/1, 1, 2, 4, 1, 1, 3, 7/
+      DATA              NS/0, 1, 2, 4/
+      DATA              DX1/0.6D0, 0.1D0, -0.5D0, 0.8D0, 0.9D0, -0.3D0,
+     +                  -0.4D0/
+      DATA              DY1/0.5D0, -0.9D0, 0.3D0, 0.7D0, -0.6D0, 0.2D0,
+     +                  0.8D0/
+      DATA              SC, SS/0.8D0, 0.6D0/
+      DATA              DT7/0.0D0, 0.30D0, 0.21D0, 0.62D0, 0.0D0,
+     +                  0.30D0, -0.07D0, 0.85D0, 0.0D0, 0.30D0, -0.79D0,
+     +                  -0.74D0, 0.0D0, 0.30D0, 0.33D0, 1.27D0/
+      DATA              DT8/0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.68D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.68D0, -0.87D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.68D0, -0.87D0, 0.15D0,
+     +                  0.94D0, 0.0D0, 0.0D0, 0.0D0, 0.5D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.68D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.35D0, -0.9D0, 0.48D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.38D0, -0.9D0, 0.57D0, 0.7D0, -0.75D0,
+     +                  0.2D0, 0.98D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.68D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.35D0, -0.72D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.38D0,
+     +                  -0.63D0, 0.15D0, 0.88D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.68D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.68D0, -0.9D0, 0.33D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.68D0, -0.9D0, 0.33D0, 0.7D0,
+     +                  -0.75D0, 0.2D0, 1.04D0/
+      DATA              DT9X/0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.78D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.78D0, -0.46D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.78D0, -0.46D0, -0.22D0,
+     +                  1.06D0, 0.0D0, 0.0D0, 0.0D0, 0.6D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.78D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.66D0, 0.1D0, -0.1D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.96D0, 0.1D0, -0.76D0, 0.8D0, 0.90D0,
+     +                  -0.3D0, -0.02D0, 0.6D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.78D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, -0.06D0, 0.1D0,
+     +                  -0.1D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.90D0,
+     +                  0.1D0, -0.22D0, 0.8D0, 0.18D0, -0.3D0, -0.02D0,
+     +                  0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.78D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.78D0, 0.26D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.78D0, 0.26D0, -0.76D0, 1.12D0,
+     +                  0.0D0, 0.0D0, 0.0D0/
+      DATA              DT9Y/0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.04D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.04D0, -0.78D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.04D0, -0.78D0, 0.54D0,
+     +                  0.08D0, 0.0D0, 0.0D0, 0.0D0, 0.5D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.04D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.7D0,
+     +                  -0.9D0, -0.12D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.64D0, -0.9D0, -0.30D0, 0.7D0, -0.18D0, 0.2D0,
+     +                  0.28D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.04D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.7D0, -1.08D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.64D0, -1.26D0,
+     +                  0.54D0, 0.20D0, 0.0D0, 0.0D0, 0.0D0, 0.5D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.04D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.04D0, -0.9D0, 0.18D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.04D0, -0.9D0, 0.18D0, 0.7D0,
+     +                  -0.18D0, 0.2D0, 0.16D0/
+      DATA              DT10X/0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.5D0, -0.9D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.5D0, -0.9D0, 0.3D0, 0.7D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.6D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.3D0, 0.1D0, 0.5D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.8D0, 0.1D0, -0.6D0,
+     +                  0.8D0, 0.3D0, -0.3D0, 0.5D0, 0.6D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.5D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, -0.9D0,
+     +                  0.1D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.7D0,
+     +                  0.1D0, 0.3D0, 0.8D0, -0.9D0, -0.3D0, 0.5D0,
+     +                  0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.5D0, 0.3D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.5D0, 0.3D0, -0.6D0, 0.8D0, 0.0D0, 0.0D0,
+     +                  0.0D0/
+      DATA              DT10Y/0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.6D0, 0.1D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.6D0, 0.1D0, -0.5D0, 0.8D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, -0.5D0, -0.9D0, 0.6D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, -0.4D0, -0.9D0, 0.9D0,
+     +                  0.7D0, -0.5D0, 0.2D0, 0.6D0, 0.5D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.6D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, -0.5D0,
+     +                  0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  -0.4D0, 0.9D0, -0.5D0, 0.6D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.6D0, -0.9D0, 0.1D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.6D0, -0.9D0, 0.1D0, 0.7D0,
+     +                  -0.5D0, 0.2D0, 0.8D0/
+      DATA              SSIZE1/0.0D0, 0.3D0, 1.6D0, 3.2D0/
+      DATA              SSIZE2/0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0,
+     +                  1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0,
+     +                  1.17D0, 1.17D0, 1.17D0/
+*     .. Executable Statements ..
+*
+      DO 120 KI = 1, 4
+         INCX = INCXS(KI)
+         INCY = INCYS(KI)
+         MX = ABS(INCX)
+         MY = ABS(INCY)
+*
+         DO 100 KN = 1, 4
+            N = NS(KN)
+            KSIZE = MIN(2,KN)
+            LENX = LENS(KN,MX)
+            LENY = LENS(KN,MY)
+*           .. Initialize all argument arrays ..
+            DO 20 I = 1, 7
+               SX(I) = DX1(I)
+               SY(I) = DY1(I)
+   20       CONTINUE
+*
+            IF (ICASE.EQ.1) THEN
+*              .. DDOT ..
+               CALL STEST1(DDOT(N,SX,INCX,SY,INCY),DT7(KN,KI),SSIZE1(KN)
+     +                     ,SFAC)
+            ELSE IF (ICASE.EQ.2) THEN
+*              .. DAXPY ..
+               CALL DAXPY(N,SA,SX,INCX,SY,INCY)
+               DO 40 J = 1, LENY
+                  STY(J) = DT8(J,KN,KI)
+   40          CONTINUE
+               CALL STEST(LENY,SY,STY,SSIZE2(1,KSIZE),SFAC)
+            ELSE IF (ICASE.EQ.5) THEN
+*              .. DCOPY ..
+               DO 60 I = 1, 7
+                  STY(I) = DT10Y(I,KN,KI)
+   60          CONTINUE
+               CALL DCOPY(N,SX,INCX,SY,INCY)
+               CALL STEST(LENY,SY,STY,SSIZE2(1,1),1.0D0)
+            ELSE IF (ICASE.EQ.6) THEN
+*              .. DSWAP ..
+               CALL DSWAP(N,SX,INCX,SY,INCY)
+               DO 80 I = 1, 7
+                  STX(I) = DT10X(I,KN,KI)
+                  STY(I) = DT10Y(I,KN,KI)
+   80          CONTINUE
+               CALL STEST(LENX,SX,STX,SSIZE2(1,1),1.0D0)
+               CALL STEST(LENY,SY,STY,SSIZE2(1,1),1.0D0)
+            ELSE
+               WRITE (NOUT,*) ' Shouldn''t be here in CHECK2'
+               STOP
+            END IF
+  100    CONTINUE
+  120 CONTINUE
+      RETURN
+      END
+      SUBROUTINE CHECK3(SFAC)
+*     .. Parameters ..
+      INTEGER           NOUT
+      PARAMETER         (NOUT=6)
+*     .. Scalar Arguments ..
+      DOUBLE PRECISION  SFAC
+*     .. Scalars in Common ..
+      INTEGER           ICASE, INCX, INCY, MODE, N
+      LOGICAL           PASS
+*     .. Local Scalars ..
+      DOUBLE PRECISION  SA, SC, SS
+      INTEGER           I, K, KI, KN, KSIZE, LENX, LENY, MX, MY
+*     .. Local Arrays ..
+      DOUBLE PRECISION  COPYX(5), COPYY(5), DT9X(7,4,4), DT9Y(7,4,4),
+     +                  DX1(7), DY1(7), MWPC(11), MWPS(11), MWPSTX(5),
+     +                  MWPSTY(5), MWPTX(11,5), MWPTY(11,5), MWPX(5),
+     +                  MWPY(5), SSIZE2(14,2), STX(7), STY(7), SX(7),
+     +                  SY(7)
+      INTEGER           INCXS(4), INCYS(4), LENS(4,2), MWPINX(11),
+     +                  MWPINY(11), MWPN(11), NS(4)
+*     .. External Subroutines ..
+      EXTERNAL          DROT, STEST
+*     .. Intrinsic Functions ..
+      INTRINSIC         ABS, MIN
+*     .. Common blocks ..
+      COMMON            /COMBLA/ICASE, N, INCX, INCY, MODE, PASS
+*     .. Data statements ..
+      DATA              SA/0.3D0/
+      DATA              INCXS/1, 2, -2, -1/
+      DATA              INCYS/1, -2, 1, -2/
+      DATA              LENS/1, 1, 2, 4, 1, 1, 3, 7/
+      DATA              NS/0, 1, 2, 4/
+      DATA              DX1/0.6D0, 0.1D0, -0.5D0, 0.8D0, 0.9D0, -0.3D0,
+     +                  -0.4D0/
+      DATA              DY1/0.5D0, -0.9D0, 0.3D0, 0.7D0, -0.6D0, 0.2D0,
+     +                  0.8D0/
+      DATA              SC, SS/0.8D0, 0.6D0/
+      DATA              DT9X/0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.78D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.78D0, -0.46D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.78D0, -0.46D0, -0.22D0,
+     +                  1.06D0, 0.0D0, 0.0D0, 0.0D0, 0.6D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.78D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.66D0, 0.1D0, -0.1D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.96D0, 0.1D0, -0.76D0, 0.8D0, 0.90D0,
+     +                  -0.3D0, -0.02D0, 0.6D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.78D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, -0.06D0, 0.1D0,
+     +                  -0.1D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.90D0,
+     +                  0.1D0, -0.22D0, 0.8D0, 0.18D0, -0.3D0, -0.02D0,
+     +                  0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.78D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.78D0, 0.26D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.78D0, 0.26D0, -0.76D0, 1.12D0,
+     +                  0.0D0, 0.0D0, 0.0D0/
+      DATA              DT9Y/0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.04D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.04D0, -0.78D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.04D0, -0.78D0, 0.54D0,
+     +                  0.08D0, 0.0D0, 0.0D0, 0.0D0, 0.5D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.04D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.7D0,
+     +                  -0.9D0, -0.12D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.64D0, -0.9D0, -0.30D0, 0.7D0, -0.18D0, 0.2D0,
+     +                  0.28D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.04D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.7D0, -1.08D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.64D0, -1.26D0,
+     +                  0.54D0, 0.20D0, 0.0D0, 0.0D0, 0.0D0, 0.5D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.04D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.04D0, -0.9D0, 0.18D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.04D0, -0.9D0, 0.18D0, 0.7D0,
+     +                  -0.18D0, 0.2D0, 0.16D0/
+      DATA              SSIZE2/0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
+     +                  0.0D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0,
+     +                  1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0,
+     +                  1.17D0, 1.17D0, 1.17D0/
+*     .. Executable Statements ..
+*
+      DO 60 KI = 1, 4
+         INCX = INCXS(KI)
+         INCY = INCYS(KI)
+         MX = ABS(INCX)
+         MY = ABS(INCY)
+*
+         DO 40 KN = 1, 4
+            N = NS(KN)
+            KSIZE = MIN(2,KN)
+            LENX = LENS(KN,MX)
+            LENY = LENS(KN,MY)
+*
+            IF (ICASE.EQ.4) THEN
+*              .. DROT ..
+               DO 20 I = 1, 7
+                  SX(I) = DX1(I)
+                  SY(I) = DY1(I)
+                  STX(I) = DT9X(I,KN,KI)
+                  STY(I) = DT9Y(I,KN,KI)
+   20          CONTINUE
+               CALL DROT(N,SX,INCX,SY,INCY,SC,SS)
+               CALL STEST(LENX,SX,STX,SSIZE2(1,KSIZE),SFAC)
+               CALL STEST(LENY,SY,STY,SSIZE2(1,KSIZE),SFAC)
+            ELSE
+               WRITE (NOUT,*) ' Shouldn''t be here in CHECK3'
+               STOP
+            END IF
+   40    CONTINUE
+   60 CONTINUE
+*
+      MWPC(1) = 1
+      DO 80 I = 2, 11
+         MWPC(I) = 0
+   80 CONTINUE
+      MWPS(1) = 0
+      DO 100 I = 2, 6
+         MWPS(I) = 1
+  100 CONTINUE
+      DO 120 I = 7, 11
+         MWPS(I) = -1
+  120 CONTINUE
+      MWPINX(1) = 1
+      MWPINX(2) = 1
+      MWPINX(3) = 1
+      MWPINX(4) = -1
+      MWPINX(5) = 1
+      MWPINX(6) = -1
+      MWPINX(7) = 1
+      MWPINX(8) = 1
+      MWPINX(9) = -1
+      MWPINX(10) = 1
+      MWPINX(11) = -1
+      MWPINY(1) = 1
+      MWPINY(2) = 1
+      MWPINY(3) = -1
+      MWPINY(4) = -1
+      MWPINY(5) = 2
+      MWPINY(6) = 1
+      MWPINY(7) = 1
+      MWPINY(8) = -1
+      MWPINY(9) = -1
+      MWPINY(10) = 2
+      MWPINY(11) = 1
+      DO 140 I = 1, 11
+         MWPN(I) = 5
+  140 CONTINUE
+      MWPN(5) = 3
+      MWPN(10) = 3
+      DO 160 I = 1, 5
+         MWPX(I) = I
+         MWPY(I) = I
+         MWPTX(1,I) = I
+         MWPTY(1,I) = I
+         MWPTX(2,I) = I
+         MWPTY(2,I) = -I
+         MWPTX(3,I) = 6 - I
+         MWPTY(3,I) = I - 6
+         MWPTX(4,I) = I
+         MWPTY(4,I) = -I
+         MWPTX(6,I) = 6 - I
+         MWPTY(6,I) = I - 6
+         MWPTX(7,I) = -I
+         MWPTY(7,I) = I
+         MWPTX(8,I) = I - 6
+         MWPTY(8,I) = 6 - I
+         MWPTX(9,I) = -I
+         MWPTY(9,I) = I
+         MWPTX(11,I) = I - 6
+         MWPTY(11,I) = 6 - I
+  160 CONTINUE
+      MWPTX(5,1) = 1
+      MWPTX(5,2) = 3
+      MWPTX(5,3) = 5
+      MWPTX(5,4) = 4
+      MWPTX(5,5) = 5
+      MWPTY(5,1) = -1
+      MWPTY(5,2) = 2
+      MWPTY(5,3) = -2
+      MWPTY(5,4) = 4
+      MWPTY(5,5) = -3
+      MWPTX(10,1) = -1
+      MWPTX(10,2) = -3
+      MWPTX(10,3) = -5
+      MWPTX(10,4) = 4
+      MWPTX(10,5) = 5
+      MWPTY(10,1) = 1
+      MWPTY(10,2) = 2
+      MWPTY(10,3) = 2
+      MWPTY(10,4) = 4
+      MWPTY(10,5) = 3
+      DO 200 I = 1, 11
+         INCX = MWPINX(I)
+         INCY = MWPINY(I)
+         DO 180 K = 1, 5
+            COPYX(K) = MWPX(K)
+            COPYY(K) = MWPY(K)
+            MWPSTX(K) = MWPTX(I,K)
+            MWPSTY(K) = MWPTY(I,K)
+  180    CONTINUE
+         CALL DROT(MWPN(I),COPYX,INCX,COPYY,INCY,MWPC(I),MWPS(I))
+         CALL STEST(5,COPYX,MWPSTX,MWPSTX,SFAC)
+         CALL STEST(5,COPYY,MWPSTY,MWPSTY,SFAC)
+  200 CONTINUE
+      RETURN
+      END
+      SUBROUTINE STEST(LEN,SCOMP,STRUE,SSIZE,SFAC)
+*     ********************************* STEST **************************
+*
+*     THIS SUBR COMPARES ARRAYS  SCOMP() AND STRUE() OF LENGTH LEN TO
+*     SEE IF THE TERM BY TERM DIFFERENCES, MULTIPLIED BY SFAC, ARE
+*     NEGLIGIBLE.
+*
+*     C. L. LAWSON, JPL, 1974 DEC 10
+*
+*     .. Parameters ..
+      INTEGER          NOUT
+      PARAMETER        (NOUT=6)
+*     .. Scalar Arguments ..
+      DOUBLE PRECISION SFAC
+      INTEGER          LEN
+*     .. Array Arguments ..
+      DOUBLE PRECISION SCOMP(LEN), SSIZE(LEN), STRUE(LEN)
+*     .. Scalars in Common ..
+      INTEGER          ICASE, INCX, INCY, MODE, N
+      LOGICAL          PASS
+*     .. Local Scalars ..
+      DOUBLE PRECISION SD
+      INTEGER          I
+*     .. External Functions ..
+      DOUBLE PRECISION SDIFF
+      EXTERNAL         SDIFF
+*     .. Intrinsic Functions ..
+      INTRINSIC        ABS
+*     .. Common blocks ..
+      COMMON           /COMBLA/ICASE, N, INCX, INCY, MODE, PASS
+*     .. Executable Statements ..
+*
+      DO 40 I = 1, LEN
+         SD = SCOMP(I) - STRUE(I)
+         IF (SDIFF(ABS(SSIZE(I))+ABS(SFAC*SD),ABS(SSIZE(I))).EQ.0.0D0)
+     +       GO TO 40
+*
+*                             HERE    SCOMP(I) IS NOT CLOSE TO STRUE(I).
+*
+         IF ( .NOT. PASS) GO TO 20
+*                             PRINT FAIL MESSAGE AND HEADER.
+         PASS = .FALSE.
+         WRITE (NOUT,99999)
+         WRITE (NOUT,99998)
+   20    WRITE (NOUT,99997) ICASE, N, INCX, INCY, MODE, I, SCOMP(I),
+     +     STRUE(I), SD, SSIZE(I)
+   40 CONTINUE
+      RETURN
+*
+99999 FORMAT ('                                       FAIL')
+99998 FORMAT (/' CASE  N INCX INCY MODE  I                            ',
+     +       ' COMP(I)                             TRUE(I)  DIFFERENCE',
+     +       '     SIZE(I)',/1X)
+99997 FORMAT (1X,I4,I3,3I5,I3,2D36.8,2D12.4)
+      END
+      SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC)
+*     ************************* STEST1 *****************************
+*
+*     THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN
+*     REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE
+*     ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT.
+*
+*     C.L. LAWSON, JPL, 1978 DEC 6
+*
+*     .. Scalar Arguments ..
+      DOUBLE PRECISION  SCOMP1, SFAC, STRUE1
+*     .. Array Arguments ..
+      DOUBLE PRECISION  SSIZE(*)
+*     .. Local Arrays ..
+      DOUBLE PRECISION  SCOMP(1), STRUE(1)
+*     .. External Subroutines ..
+      EXTERNAL          STEST
+*     .. Executable Statements ..
+*
+      SCOMP(1) = SCOMP1
+      STRUE(1) = STRUE1
+      CALL STEST(1,SCOMP,STRUE,SSIZE,SFAC)
+*
+      RETURN
+      END
+      DOUBLE PRECISION FUNCTION SDIFF(SA,SB)
+*     ********************************* SDIFF **************************
+*     COMPUTES DIFFERENCE OF TWO NUMBERS.  C. L. LAWSON, JPL 1974 FEB 15
+*
+*     .. Scalar Arguments ..
+      DOUBLE PRECISION                SA, SB
+*     .. Executable Statements ..
+      SDIFF = SA - SB
+      RETURN
+      END
+      SUBROUTINE ITEST1(ICOMP,ITRUE)
+*     ********************************* ITEST1 *************************
+*
+*     THIS SUBROUTINE COMPARES THE VARIABLES ICOMP AND ITRUE FOR
+*     EQUALITY.
+*     C. L. LAWSON, JPL, 1974 DEC 10
+*
+*     .. Parameters ..
+      INTEGER           NOUT
+      PARAMETER         (NOUT=6)
+*     .. Scalar Arguments ..
+      INTEGER           ICOMP, ITRUE
+*     .. Scalars in Common ..
+      INTEGER           ICASE, INCX, INCY, MODE, N
+      LOGICAL           PASS
+*     .. Local Scalars ..
+      INTEGER           ID
+*     .. Common blocks ..
+      COMMON            /COMBLA/ICASE, N, INCX, INCY, MODE, PASS
+*     .. Executable Statements ..
+*
+      IF (ICOMP.EQ.ITRUE) GO TO 40
+*
+*                            HERE ICOMP IS NOT EQUAL TO ITRUE.
+*
+      IF ( .NOT. PASS) GO TO 20
+*                             PRINT FAIL MESSAGE AND HEADER.
+      PASS = .FALSE.
+      WRITE (NOUT,99999)
+      WRITE (NOUT,99998)
+   20 ID = ICOMP - ITRUE
+      WRITE (NOUT,99997) ICASE, N, INCX, INCY, MODE, ICOMP, ITRUE, ID
+   40 CONTINUE
+      RETURN
+*
+99999 FORMAT ('                                       FAIL')
+99998 FORMAT (/' CASE  N INCX INCY MODE                               ',
+     +       ' COMP                                TRUE     DIFFERENCE',
+     +       /1X)
+99997 FORMAT (1X,I4,I3,3I5,2I36,I12)
+      END
diff --git a/resources/3rdparty/eigen/blas/testing/dblat2.dat b/resources/3rdParty/eigen/blas/testing/dblat2.dat
similarity index 100%
rename from resources/3rdparty/eigen/blas/testing/dblat2.dat
rename to resources/3rdParty/eigen/blas/testing/dblat2.dat
diff --git a/resources/3rdparty/eigen/blas/testing/dblat2.f b/resources/3rdParty/eigen/blas/testing/dblat2.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/testing/dblat2.f
rename to resources/3rdParty/eigen/blas/testing/dblat2.f
diff --git a/resources/3rdparty/eigen/blas/testing/dblat3.dat b/resources/3rdParty/eigen/blas/testing/dblat3.dat
similarity index 100%
rename from resources/3rdparty/eigen/blas/testing/dblat3.dat
rename to resources/3rdParty/eigen/blas/testing/dblat3.dat
diff --git a/resources/3rdparty/eigen/blas/testing/dblat3.f b/resources/3rdParty/eigen/blas/testing/dblat3.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/testing/dblat3.f
rename to resources/3rdParty/eigen/blas/testing/dblat3.f
diff --git a/resources/3rdparty/eigen/blas/testing/runblastest.sh b/resources/3rdParty/eigen/blas/testing/runblastest.sh
similarity index 100%
rename from resources/3rdparty/eigen/blas/testing/runblastest.sh
rename to resources/3rdParty/eigen/blas/testing/runblastest.sh
diff --git a/resources/3rdParty/eigen/blas/testing/sblat1.f b/resources/3rdParty/eigen/blas/testing/sblat1.f
new file mode 100644
index 000000000..a982d1852
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/testing/sblat1.f
@@ -0,0 +1,769 @@
+      PROGRAM SBLAT1
+*     Test program for the REAL             Level 1 BLAS.
+*     Based upon the original BLAS test routine together with:
+*     F06EAF Example Program Text
+*     .. Parameters ..
+      INTEGER          NOUT
+      PARAMETER        (NOUT=6)
+*     .. Scalars in Common ..
+      INTEGER          ICASE, INCX, INCY, MODE, N
+      LOGICAL          PASS
+*     .. Local Scalars ..
+      REAL             SFAC
+      INTEGER          IC
+*     .. External Subroutines ..
+      EXTERNAL         CHECK0, CHECK1, CHECK2, CHECK3, HEADER
+*     .. Common blocks ..
+      COMMON           /COMBLA/ICASE, N, INCX, INCY, MODE, PASS
+*     .. Data statements ..
+      DATA             SFAC/9.765625E-4/
+*     .. Executable Statements ..
+      WRITE (NOUT,99999)
+      DO 20 IC = 1, 10
+         ICASE = IC
+         CALL HEADER
+*
+*        .. Initialize  PASS,  INCX,  INCY, and MODE for a new case. ..
+*        .. the value 9999 for INCX, INCY or MODE will appear in the ..
+*        .. detailed  output, if any, for cases  that do not involve ..
+*        .. these parameters ..
+*
+         PASS = .TRUE.
+         INCX = 9999
+         INCY = 9999
+         MODE = 9999
+         IF (ICASE.EQ.3) THEN
+            CALL CHECK0(SFAC)
+         ELSE IF (ICASE.EQ.7 .OR. ICASE.EQ.8 .OR. ICASE.EQ.9 .OR.
+     +            ICASE.EQ.10) THEN
+            CALL CHECK1(SFAC)
+         ELSE IF (ICASE.EQ.1 .OR. ICASE.EQ.2 .OR. ICASE.EQ.5 .OR.
+     +            ICASE.EQ.6) THEN
+            CALL CHECK2(SFAC)
+         ELSE IF (ICASE.EQ.4) THEN
+            CALL CHECK3(SFAC)
+         END IF
+*        -- Print
+         IF (PASS) WRITE (NOUT,99998)
+   20 CONTINUE
+      STOP
+*
+99999 FORMAT (' Real BLAS Test Program Results',/1X)
+99998 FORMAT ('                                    ----- PASS -----')
+      END
+      SUBROUTINE HEADER
+*     .. Parameters ..
+      INTEGER          NOUT
+      PARAMETER        (NOUT=6)
+*     .. Scalars in Common ..
+      INTEGER          ICASE, INCX, INCY, MODE, N
+      LOGICAL          PASS
+*     .. Local Arrays ..
+      CHARACTER*6      L(10)
+*     .. Common blocks ..
+      COMMON           /COMBLA/ICASE, N, INCX, INCY, MODE, PASS
+*     .. Data statements ..
+      DATA             L(1)/' SDOT '/
+      DATA             L(2)/'SAXPY '/
+      DATA             L(3)/'SROTG '/
+      DATA             L(4)/' SROT '/
+      DATA             L(5)/'SCOPY '/
+      DATA             L(6)/'SSWAP '/
+      DATA             L(7)/'SNRM2 '/
+      DATA             L(8)/'SASUM '/
+      DATA             L(9)/'SSCAL '/
+      DATA             L(10)/'ISAMAX'/
+*     .. Executable Statements ..
+      WRITE (NOUT,99999) ICASE, L(ICASE)
+      RETURN
+*
+99999 FORMAT (/' Test of subprogram number',I3,12X,A6)
+      END
+      SUBROUTINE CHECK0(SFAC)
+*     .. Parameters ..
+      INTEGER           NOUT
+      PARAMETER         (NOUT=6)
+*     .. Scalar Arguments ..
+      REAL              SFAC
+*     .. Scalars in Common ..
+      INTEGER           ICASE, INCX, INCY, MODE, N
+      LOGICAL           PASS
+*     .. Local Scalars ..
+      REAL              D12, SA, SB, SC, SS
+      INTEGER           K
+*     .. Local Arrays ..
+      REAL              DA1(8), DATRUE(8), DB1(8), DBTRUE(8), DC1(8),
+     +                  DS1(8)
+*     .. External Subroutines ..
+      EXTERNAL          SROTG, STEST1
+*     .. Common blocks ..
+      COMMON            /COMBLA/ICASE, N, INCX, INCY, MODE, PASS
+*     .. Data statements ..
+      DATA              DA1/0.3E0, 0.4E0, -0.3E0, -0.4E0, -0.3E0, 0.0E0,
+     +                  0.0E0, 1.0E0/
+      DATA              DB1/0.4E0, 0.3E0, 0.4E0, 0.3E0, -0.4E0, 0.0E0,
+     +                  1.0E0, 0.0E0/
+      DATA              DC1/0.6E0, 0.8E0, -0.6E0, 0.8E0, 0.6E0, 1.0E0,
+     +                  0.0E0, 1.0E0/
+      DATA              DS1/0.8E0, 0.6E0, 0.8E0, -0.6E0, 0.8E0, 0.0E0,
+     +                  1.0E0, 0.0E0/
+      DATA              DATRUE/0.5E0, 0.5E0, 0.5E0, -0.5E0, -0.5E0,
+     +                  0.0E0, 1.0E0, 1.0E0/
+      DATA              DBTRUE/0.0E0, 0.6E0, 0.0E0, -0.6E0, 0.0E0,
+     +                  0.0E0, 1.0E0, 0.0E0/
+      DATA              D12/4096.0E0/
+*     .. Executable Statements ..
+*
+*     Compute true values which cannot be prestored
+*     in decimal notation
+*
+      DBTRUE(1) = 1.0E0/0.6E0
+      DBTRUE(3) = -1.0E0/0.6E0
+      DBTRUE(5) = 1.0E0/0.6E0
+*
+      DO 20 K = 1, 8
+*        .. Set N=K for identification in output if any ..
+         N = K
+         IF (ICASE.EQ.3) THEN
+*           .. SROTG ..
+            IF (K.GT.8) GO TO 40
+            SA = DA1(K)
+            SB = DB1(K)
+            CALL SROTG(SA,SB,SC,SS)
+            CALL STEST1(SA,DATRUE(K),DATRUE(K),SFAC)
+            CALL STEST1(SB,DBTRUE(K),DBTRUE(K),SFAC)
+            CALL STEST1(SC,DC1(K),DC1(K),SFAC)
+            CALL STEST1(SS,DS1(K),DS1(K),SFAC)
+         ELSE
+            WRITE (NOUT,*) ' Shouldn''t be here in CHECK0'
+            STOP
+         END IF
+   20 CONTINUE
+   40 RETURN
+      END
+      SUBROUTINE CHECK1(SFAC)
+*     .. Parameters ..
+      INTEGER           NOUT
+      PARAMETER         (NOUT=6)
+*     .. Scalar Arguments ..
+      REAL              SFAC
+*     .. Scalars in Common ..
+      INTEGER           ICASE, INCX, INCY, MODE, N
+      LOGICAL           PASS
+*     .. Local Scalars ..
+      INTEGER           I, LEN, NP1
+*     .. Local Arrays ..
+      REAL              DTRUE1(5), DTRUE3(5), DTRUE5(8,5,2), DV(8,5,2),
+     +                  SA(10), STEMP(1), STRUE(8), SX(8)
+      INTEGER           ITRUE2(5)
+*     .. External Functions ..
+      REAL              SASUM, SNRM2
+      INTEGER           ISAMAX
+      EXTERNAL          SASUM, SNRM2, ISAMAX
+*     .. External Subroutines ..
+      EXTERNAL          ITEST1, SSCAL, STEST, STEST1
+*     .. Intrinsic Functions ..
+      INTRINSIC         MAX
+*     .. Common blocks ..
+      COMMON            /COMBLA/ICASE, N, INCX, INCY, MODE, PASS
+*     .. Data statements ..
+      DATA              SA/0.3E0, -1.0E0, 0.0E0, 1.0E0, 0.3E0, 0.3E0,
+     +                  0.3E0, 0.3E0, 0.3E0, 0.3E0/
+      DATA              DV/0.1E0, 2.0E0, 2.0E0, 2.0E0, 2.0E0, 2.0E0,
+     +                  2.0E0, 2.0E0, 0.3E0, 3.0E0, 3.0E0, 3.0E0, 3.0E0,
+     +                  3.0E0, 3.0E0, 3.0E0, 0.3E0, -0.4E0, 4.0E0,
+     +                  4.0E0, 4.0E0, 4.0E0, 4.0E0, 4.0E0, 0.2E0,
+     +                  -0.6E0, 0.3E0, 5.0E0, 5.0E0, 5.0E0, 5.0E0,
+     +                  5.0E0, 0.1E0, -0.3E0, 0.5E0, -0.1E0, 6.0E0,
+     +                  6.0E0, 6.0E0, 6.0E0, 0.1E0, 8.0E0, 8.0E0, 8.0E0,
+     +                  8.0E0, 8.0E0, 8.0E0, 8.0E0, 0.3E0, 9.0E0, 9.0E0,
+     +                  9.0E0, 9.0E0, 9.0E0, 9.0E0, 9.0E0, 0.3E0, 2.0E0,
+     +                  -0.4E0, 2.0E0, 2.0E0, 2.0E0, 2.0E0, 2.0E0,
+     +                  0.2E0, 3.0E0, -0.6E0, 5.0E0, 0.3E0, 2.0E0,
+     +                  2.0E0, 2.0E0, 0.1E0, 4.0E0, -0.3E0, 6.0E0,
+     +                  -0.5E0, 7.0E0, -0.1E0, 3.0E0/
+      DATA              DTRUE1/0.0E0, 0.3E0, 0.5E0, 0.7E0, 0.6E0/
+      DATA              DTRUE3/0.0E0, 0.3E0, 0.7E0, 1.1E0, 1.0E0/
+      DATA              DTRUE5/0.10E0, 2.0E0, 2.0E0, 2.0E0, 2.0E0,
+     +                  2.0E0, 2.0E0, 2.0E0, -0.3E0, 3.0E0, 3.0E0,
+     +                  3.0E0, 3.0E0, 3.0E0, 3.0E0, 3.0E0, 0.0E0, 0.0E0,
+     +                  4.0E0, 4.0E0, 4.0E0, 4.0E0, 4.0E0, 4.0E0,
+     +                  0.20E0, -0.60E0, 0.30E0, 5.0E0, 5.0E0, 5.0E0,
+     +                  5.0E0, 5.0E0, 0.03E0, -0.09E0, 0.15E0, -0.03E0,
+     +                  6.0E0, 6.0E0, 6.0E0, 6.0E0, 0.10E0, 8.0E0,
+     +                  8.0E0, 8.0E0, 8.0E0, 8.0E0, 8.0E0, 8.0E0,
+     +                  0.09E0, 9.0E0, 9.0E0, 9.0E0, 9.0E0, 9.0E0,
+     +                  9.0E0, 9.0E0, 0.09E0, 2.0E0, -0.12E0, 2.0E0,
+     +                  2.0E0, 2.0E0, 2.0E0, 2.0E0, 0.06E0, 3.0E0,
+     +                  -0.18E0, 5.0E0, 0.09E0, 2.0E0, 2.0E0, 2.0E0,
+     +                  0.03E0, 4.0E0, -0.09E0, 6.0E0, -0.15E0, 7.0E0,
+     +                  -0.03E0, 3.0E0/
+      DATA              ITRUE2/0, 1, 2, 2, 3/
+*     .. Executable Statements ..
+      DO 80 INCX = 1, 2
+         DO 60 NP1 = 1, 5
+            N = NP1 - 1
+            LEN = 2*MAX(N,1)
+*           .. Set vector arguments ..
+            DO 20 I = 1, LEN
+               SX(I) = DV(I,NP1,INCX)
+   20       CONTINUE
+*
+            IF (ICASE.EQ.7) THEN
+*              .. SNRM2 ..
+               STEMP(1) = DTRUE1(NP1)
+               CALL STEST1(SNRM2(N,SX,INCX),STEMP,STEMP,SFAC)
+            ELSE IF (ICASE.EQ.8) THEN
+*              .. SASUM ..
+               STEMP(1) = DTRUE3(NP1)
+               CALL STEST1(SASUM(N,SX,INCX),STEMP,STEMP,SFAC)
+            ELSE IF (ICASE.EQ.9) THEN
+*              .. SSCAL ..
+               CALL SSCAL(N,SA((INCX-1)*5+NP1),SX,INCX)
+               DO 40 I = 1, LEN
+                  STRUE(I) = DTRUE5(I,NP1,INCX)
+   40          CONTINUE
+               CALL STEST(LEN,SX,STRUE,STRUE,SFAC)
+            ELSE IF (ICASE.EQ.10) THEN
+*              .. ISAMAX ..
+               CALL ITEST1(ISAMAX(N,SX,INCX),ITRUE2(NP1))
+            ELSE
+               WRITE (NOUT,*) ' Shouldn''t be here in CHECK1'
+               STOP
+            END IF
+   60    CONTINUE
+   80 CONTINUE
+      RETURN
+      END
+      SUBROUTINE CHECK2(SFAC)
+*     .. Parameters ..
+      INTEGER           NOUT
+      PARAMETER         (NOUT=6)
+*     .. Scalar Arguments ..
+      REAL              SFAC
+*     .. Scalars in Common ..
+      INTEGER           ICASE, INCX, INCY, MODE, N
+      LOGICAL           PASS
+*     .. Local Scalars ..
+      REAL              SA, SC, SS
+      INTEGER           I, J, KI, KN, KSIZE, LENX, LENY, MX, MY
+*     .. Local Arrays ..
+      REAL              DT10X(7,4,4), DT10Y(7,4,4), DT7(4,4),
+     +                  DT8(7,4,4), DT9X(7,4,4), DT9Y(7,4,4), DX1(7),
+     +                  DY1(7), SSIZE1(4), SSIZE2(14,2), STX(7), STY(7),
+     +                  SX(7), SY(7)
+      INTEGER           INCXS(4), INCYS(4), LENS(4,2), NS(4)
+*     .. External Functions ..
+      REAL              SDOT
+      EXTERNAL          SDOT
+*     .. External Subroutines ..
+      EXTERNAL          SAXPY, SCOPY, SSWAP, STEST, STEST1
+*     .. Intrinsic Functions ..
+      INTRINSIC         ABS, MIN
+*     .. Common blocks ..
+      COMMON            /COMBLA/ICASE, N, INCX, INCY, MODE, PASS
+*     .. Data statements ..
+      DATA              SA/0.3E0/
+      DATA              INCXS/1, 2, -2, -1/
+      DATA              INCYS/1, -2, 1, -2/
+      DATA              LENS/1, 1, 2, 4, 1, 1, 3, 7/
+      DATA              NS/0, 1, 2, 4/
+      DATA              DX1/0.6E0, 0.1E0, -0.5E0, 0.8E0, 0.9E0, -0.3E0,
+     +                  -0.4E0/
+      DATA              DY1/0.5E0, -0.9E0, 0.3E0, 0.7E0, -0.6E0, 0.2E0,
+     +                  0.8E0/
+      DATA              SC, SS/0.8E0, 0.6E0/
+      DATA              DT7/0.0E0, 0.30E0, 0.21E0, 0.62E0, 0.0E0,
+     +                  0.30E0, -0.07E0, 0.85E0, 0.0E0, 0.30E0, -0.79E0,
+     +                  -0.74E0, 0.0E0, 0.30E0, 0.33E0, 1.27E0/
+      DATA              DT8/0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.68E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.68E0, -0.87E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.68E0, -0.87E0, 0.15E0,
+     +                  0.94E0, 0.0E0, 0.0E0, 0.0E0, 0.5E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.68E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.35E0, -0.9E0, 0.48E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.38E0, -0.9E0, 0.57E0, 0.7E0, -0.75E0,
+     +                  0.2E0, 0.98E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.68E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.35E0, -0.72E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.38E0,
+     +                  -0.63E0, 0.15E0, 0.88E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.68E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.68E0, -0.9E0, 0.33E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.68E0, -0.9E0, 0.33E0, 0.7E0,
+     +                  -0.75E0, 0.2E0, 1.04E0/
+      DATA              DT9X/0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.78E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.78E0, -0.46E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.78E0, -0.46E0, -0.22E0,
+     +                  1.06E0, 0.0E0, 0.0E0, 0.0E0, 0.6E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.78E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.66E0, 0.1E0, -0.1E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.96E0, 0.1E0, -0.76E0, 0.8E0, 0.90E0,
+     +                  -0.3E0, -0.02E0, 0.6E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.78E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, -0.06E0, 0.1E0,
+     +                  -0.1E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.90E0,
+     +                  0.1E0, -0.22E0, 0.8E0, 0.18E0, -0.3E0, -0.02E0,
+     +                  0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.78E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.78E0, 0.26E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.78E0, 0.26E0, -0.76E0, 1.12E0,
+     +                  0.0E0, 0.0E0, 0.0E0/
+      DATA              DT9Y/0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.04E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.04E0, -0.78E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.04E0, -0.78E0, 0.54E0,
+     +                  0.08E0, 0.0E0, 0.0E0, 0.0E0, 0.5E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.04E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.7E0,
+     +                  -0.9E0, -0.12E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.64E0, -0.9E0, -0.30E0, 0.7E0, -0.18E0, 0.2E0,
+     +                  0.28E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.04E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.7E0, -1.08E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.64E0, -1.26E0,
+     +                  0.54E0, 0.20E0, 0.0E0, 0.0E0, 0.0E0, 0.5E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.04E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.04E0, -0.9E0, 0.18E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.04E0, -0.9E0, 0.18E0, 0.7E0,
+     +                  -0.18E0, 0.2E0, 0.16E0/
+      DATA              DT10X/0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.5E0, -0.9E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.5E0, -0.9E0, 0.3E0, 0.7E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.6E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.3E0, 0.1E0, 0.5E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.8E0, 0.1E0, -0.6E0,
+     +                  0.8E0, 0.3E0, -0.3E0, 0.5E0, 0.6E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.5E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, -0.9E0,
+     +                  0.1E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.7E0,
+     +                  0.1E0, 0.3E0, 0.8E0, -0.9E0, -0.3E0, 0.5E0,
+     +                  0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.5E0, 0.3E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.5E0, 0.3E0, -0.6E0, 0.8E0, 0.0E0, 0.0E0,
+     +                  0.0E0/
+      DATA              DT10Y/0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.6E0, 0.1E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.6E0, 0.1E0, -0.5E0, 0.8E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, -0.5E0, -0.9E0, 0.6E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, -0.4E0, -0.9E0, 0.9E0,
+     +                  0.7E0, -0.5E0, 0.2E0, 0.6E0, 0.5E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.6E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, -0.5E0,
+     +                  0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  -0.4E0, 0.9E0, -0.5E0, 0.6E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.6E0, -0.9E0, 0.1E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.6E0, -0.9E0, 0.1E0, 0.7E0,
+     +                  -0.5E0, 0.2E0, 0.8E0/
+      DATA              SSIZE1/0.0E0, 0.3E0, 1.6E0, 3.2E0/
+      DATA              SSIZE2/0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0,
+     +                  1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0,
+     +                  1.17E0, 1.17E0, 1.17E0/
+*     .. Executable Statements ..
+*
+      DO 120 KI = 1, 4
+         INCX = INCXS(KI)
+         INCY = INCYS(KI)
+         MX = ABS(INCX)
+         MY = ABS(INCY)
+*
+         DO 100 KN = 1, 4
+            N = NS(KN)
+            KSIZE = MIN(2,KN)
+            LENX = LENS(KN,MX)
+            LENY = LENS(KN,MY)
+*           .. Initialize all argument arrays ..
+            DO 20 I = 1, 7
+               SX(I) = DX1(I)
+               SY(I) = DY1(I)
+   20       CONTINUE
+*
+            IF (ICASE.EQ.1) THEN
+*              .. SDOT ..
+               CALL STEST1(SDOT(N,SX,INCX,SY,INCY),DT7(KN,KI),SSIZE1(KN)
+     +                     ,SFAC)
+            ELSE IF (ICASE.EQ.2) THEN
+*              .. SAXPY ..
+               CALL SAXPY(N,SA,SX,INCX,SY,INCY)
+               DO 40 J = 1, LENY
+                  STY(J) = DT8(J,KN,KI)
+   40          CONTINUE
+               CALL STEST(LENY,SY,STY,SSIZE2(1,KSIZE),SFAC)
+            ELSE IF (ICASE.EQ.5) THEN
+*              .. SCOPY ..
+               DO 60 I = 1, 7
+                  STY(I) = DT10Y(I,KN,KI)
+   60          CONTINUE
+               CALL SCOPY(N,SX,INCX,SY,INCY)
+               CALL STEST(LENY,SY,STY,SSIZE2(1,1),1.0E0)
+            ELSE IF (ICASE.EQ.6) THEN
+*              .. SSWAP ..
+               CALL SSWAP(N,SX,INCX,SY,INCY)
+               DO 80 I = 1, 7
+                  STX(I) = DT10X(I,KN,KI)
+                  STY(I) = DT10Y(I,KN,KI)
+   80          CONTINUE
+               CALL STEST(LENX,SX,STX,SSIZE2(1,1),1.0E0)
+               CALL STEST(LENY,SY,STY,SSIZE2(1,1),1.0E0)
+            ELSE
+               WRITE (NOUT,*) ' Shouldn''t be here in CHECK2'
+               STOP
+            END IF
+  100    CONTINUE
+  120 CONTINUE
+      RETURN
+      END
+      SUBROUTINE CHECK3(SFAC)
+*     .. Parameters ..
+      INTEGER           NOUT
+      PARAMETER         (NOUT=6)
+*     .. Scalar Arguments ..
+      REAL              SFAC
+*     .. Scalars in Common ..
+      INTEGER           ICASE, INCX, INCY, MODE, N
+      LOGICAL           PASS
+*     .. Local Scalars ..
+      REAL              SA, SC, SS
+      INTEGER           I, K, KI, KN, KSIZE, LENX, LENY, MX, MY
+*     .. Local Arrays ..
+      REAL              COPYX(5), COPYY(5), DT9X(7,4,4), DT9Y(7,4,4),
+     +                  DX1(7), DY1(7), MWPC(11), MWPS(11), MWPSTX(5),
+     +                  MWPSTY(5), MWPTX(11,5), MWPTY(11,5), MWPX(5),
+     +                  MWPY(5), SSIZE2(14,2), STX(7), STY(7), SX(7),
+     +                  SY(7)
+      INTEGER           INCXS(4), INCYS(4), LENS(4,2), MWPINX(11),
+     +                  MWPINY(11), MWPN(11), NS(4)
+*     .. External Subroutines ..
+      EXTERNAL          SROT, STEST
+*     .. Intrinsic Functions ..
+      INTRINSIC         ABS, MIN
+*     .. Common blocks ..
+      COMMON            /COMBLA/ICASE, N, INCX, INCY, MODE, PASS
+*     .. Data statements ..
+      DATA              SA/0.3E0/
+      DATA              INCXS/1, 2, -2, -1/
+      DATA              INCYS/1, -2, 1, -2/
+      DATA              LENS/1, 1, 2, 4, 1, 1, 3, 7/
+      DATA              NS/0, 1, 2, 4/
+      DATA              DX1/0.6E0, 0.1E0, -0.5E0, 0.8E0, 0.9E0, -0.3E0,
+     +                  -0.4E0/
+      DATA              DY1/0.5E0, -0.9E0, 0.3E0, 0.7E0, -0.6E0, 0.2E0,
+     +                  0.8E0/
+      DATA              SC, SS/0.8E0, 0.6E0/
+      DATA              DT9X/0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.78E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.78E0, -0.46E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.78E0, -0.46E0, -0.22E0,
+     +                  1.06E0, 0.0E0, 0.0E0, 0.0E0, 0.6E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.78E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.66E0, 0.1E0, -0.1E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.96E0, 0.1E0, -0.76E0, 0.8E0, 0.90E0,
+     +                  -0.3E0, -0.02E0, 0.6E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.78E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, -0.06E0, 0.1E0,
+     +                  -0.1E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.90E0,
+     +                  0.1E0, -0.22E0, 0.8E0, 0.18E0, -0.3E0, -0.02E0,
+     +                  0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.78E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.78E0, 0.26E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.78E0, 0.26E0, -0.76E0, 1.12E0,
+     +                  0.0E0, 0.0E0, 0.0E0/
+      DATA              DT9Y/0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.04E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.04E0, -0.78E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.04E0, -0.78E0, 0.54E0,
+     +                  0.08E0, 0.0E0, 0.0E0, 0.0E0, 0.5E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.04E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.7E0,
+     +                  -0.9E0, -0.12E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.64E0, -0.9E0, -0.30E0, 0.7E0, -0.18E0, 0.2E0,
+     +                  0.28E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.04E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.7E0, -1.08E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.64E0, -1.26E0,
+     +                  0.54E0, 0.20E0, 0.0E0, 0.0E0, 0.0E0, 0.5E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.04E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.04E0, -0.9E0, 0.18E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.04E0, -0.9E0, 0.18E0, 0.7E0,
+     +                  -0.18E0, 0.2E0, 0.16E0/
+      DATA              SSIZE2/0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
+     +                  0.0E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0,
+     +                  1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0,
+     +                  1.17E0, 1.17E0, 1.17E0/
+*     .. Executable Statements ..
+*
+      DO 60 KI = 1, 4
+         INCX = INCXS(KI)
+         INCY = INCYS(KI)
+         MX = ABS(INCX)
+         MY = ABS(INCY)
+*
+         DO 40 KN = 1, 4
+            N = NS(KN)
+            KSIZE = MIN(2,KN)
+            LENX = LENS(KN,MX)
+            LENY = LENS(KN,MY)
+*
+            IF (ICASE.EQ.4) THEN
+*              .. SROT ..
+               DO 20 I = 1, 7
+                  SX(I) = DX1(I)
+                  SY(I) = DY1(I)
+                  STX(I) = DT9X(I,KN,KI)
+                  STY(I) = DT9Y(I,KN,KI)
+   20          CONTINUE
+               CALL SROT(N,SX,INCX,SY,INCY,SC,SS)
+               CALL STEST(LENX,SX,STX,SSIZE2(1,KSIZE),SFAC)
+               CALL STEST(LENY,SY,STY,SSIZE2(1,KSIZE),SFAC)
+            ELSE
+               WRITE (NOUT,*) ' Shouldn''t be here in CHECK3'
+               STOP
+            END IF
+   40    CONTINUE
+   60 CONTINUE
+*
+      MWPC(1) = 1
+      DO 80 I = 2, 11
+         MWPC(I) = 0
+   80 CONTINUE
+      MWPS(1) = 0
+      DO 100 I = 2, 6
+         MWPS(I) = 1
+  100 CONTINUE
+      DO 120 I = 7, 11
+         MWPS(I) = -1
+  120 CONTINUE
+      MWPINX(1) = 1
+      MWPINX(2) = 1
+      MWPINX(3) = 1
+      MWPINX(4) = -1
+      MWPINX(5) = 1
+      MWPINX(6) = -1
+      MWPINX(7) = 1
+      MWPINX(8) = 1
+      MWPINX(9) = -1
+      MWPINX(10) = 1
+      MWPINX(11) = -1
+      MWPINY(1) = 1
+      MWPINY(2) = 1
+      MWPINY(3) = -1
+      MWPINY(4) = -1
+      MWPINY(5) = 2
+      MWPINY(6) = 1
+      MWPINY(7) = 1
+      MWPINY(8) = -1
+      MWPINY(9) = -1
+      MWPINY(10) = 2
+      MWPINY(11) = 1
+      DO 140 I = 1, 11
+         MWPN(I) = 5
+  140 CONTINUE
+      MWPN(5) = 3
+      MWPN(10) = 3
+      DO 160 I = 1, 5
+         MWPX(I) = I
+         MWPY(I) = I
+         MWPTX(1,I) = I
+         MWPTY(1,I) = I
+         MWPTX(2,I) = I
+         MWPTY(2,I) = -I
+         MWPTX(3,I) = 6 - I
+         MWPTY(3,I) = I - 6
+         MWPTX(4,I) = I
+         MWPTY(4,I) = -I
+         MWPTX(6,I) = 6 - I
+         MWPTY(6,I) = I - 6
+         MWPTX(7,I) = -I
+         MWPTY(7,I) = I
+         MWPTX(8,I) = I - 6
+         MWPTY(8,I) = 6 - I
+         MWPTX(9,I) = -I
+         MWPTY(9,I) = I
+         MWPTX(11,I) = I - 6
+         MWPTY(11,I) = 6 - I
+  160 CONTINUE
+      MWPTX(5,1) = 1
+      MWPTX(5,2) = 3
+      MWPTX(5,3) = 5
+      MWPTX(5,4) = 4
+      MWPTX(5,5) = 5
+      MWPTY(5,1) = -1
+      MWPTY(5,2) = 2
+      MWPTY(5,3) = -2
+      MWPTY(5,4) = 4
+      MWPTY(5,5) = -3
+      MWPTX(10,1) = -1
+      MWPTX(10,2) = -3
+      MWPTX(10,3) = -5
+      MWPTX(10,4) = 4
+      MWPTX(10,5) = 5
+      MWPTY(10,1) = 1
+      MWPTY(10,2) = 2
+      MWPTY(10,3) = 2
+      MWPTY(10,4) = 4
+      MWPTY(10,5) = 3
+      DO 200 I = 1, 11
+         INCX = MWPINX(I)
+         INCY = MWPINY(I)
+         DO 180 K = 1, 5
+            COPYX(K) = MWPX(K)
+            COPYY(K) = MWPY(K)
+            MWPSTX(K) = MWPTX(I,K)
+            MWPSTY(K) = MWPTY(I,K)
+  180    CONTINUE
+         CALL SROT(MWPN(I),COPYX,INCX,COPYY,INCY,MWPC(I),MWPS(I))
+         CALL STEST(5,COPYX,MWPSTX,MWPSTX,SFAC)
+         CALL STEST(5,COPYY,MWPSTY,MWPSTY,SFAC)
+  200 CONTINUE
+      RETURN
+      END
+      SUBROUTINE STEST(LEN,SCOMP,STRUE,SSIZE,SFAC)
+*     ********************************* STEST **************************
+*
+*     THIS SUBR COMPARES ARRAYS  SCOMP() AND STRUE() OF LENGTH LEN TO
+*     SEE IF THE TERM BY TERM DIFFERENCES, MULTIPLIED BY SFAC, ARE
+*     NEGLIGIBLE.
+*
+*     C. L. LAWSON, JPL, 1974 DEC 10
+*
+*     .. Parameters ..
+      INTEGER          NOUT
+      PARAMETER        (NOUT=6)
+*     .. Scalar Arguments ..
+      REAL             SFAC
+      INTEGER          LEN
+*     .. Array Arguments ..
+      REAL             SCOMP(LEN), SSIZE(LEN), STRUE(LEN)
+*     .. Scalars in Common ..
+      INTEGER          ICASE, INCX, INCY, MODE, N
+      LOGICAL          PASS
+*     .. Local Scalars ..
+      REAL             SD
+      INTEGER          I
+*     .. External Functions ..
+      REAL             SDIFF
+      EXTERNAL         SDIFF
+*     .. Intrinsic Functions ..
+      INTRINSIC        ABS
+*     .. Common blocks ..
+      COMMON           /COMBLA/ICASE, N, INCX, INCY, MODE, PASS
+*     .. Executable Statements ..
+*
+      DO 40 I = 1, LEN
+         SD = SCOMP(I) - STRUE(I)
+         IF (SDIFF(ABS(SSIZE(I))+ABS(SFAC*SD),ABS(SSIZE(I))).EQ.0.0E0)
+     +       GO TO 40
+*
+*                             HERE    SCOMP(I) IS NOT CLOSE TO STRUE(I).
+*
+         IF ( .NOT. PASS) GO TO 20
+*                             PRINT FAIL MESSAGE AND HEADER.
+         PASS = .FALSE.
+         WRITE (NOUT,99999)
+         WRITE (NOUT,99998)
+   20    WRITE (NOUT,99997) ICASE, N, INCX, INCY, MODE, I, SCOMP(I),
+     +     STRUE(I), SD, SSIZE(I)
+   40 CONTINUE
+      RETURN
+*
+99999 FORMAT ('                                       FAIL')
+99998 FORMAT (/' CASE  N INCX INCY MODE  I                            ',
+     +       ' COMP(I)                             TRUE(I)  DIFFERENCE',
+     +       '     SIZE(I)',/1X)
+99997 FORMAT (1X,I4,I3,3I5,I3,2E36.8,2E12.4)
+      END
+      SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC)
+*     ************************* STEST1 *****************************
+*
+*     THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN
+*     REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE
+*     ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT.
+*
+*     C.L. LAWSON, JPL, 1978 DEC 6
+*
+*     .. Scalar Arguments ..
+      REAL              SCOMP1, SFAC, STRUE1
+*     .. Array Arguments ..
+      REAL              SSIZE(*)
+*     .. Local Arrays ..
+      REAL              SCOMP(1), STRUE(1)
+*     .. External Subroutines ..
+      EXTERNAL          STEST
+*     .. Executable Statements ..
+*
+      SCOMP(1) = SCOMP1
+      STRUE(1) = STRUE1
+      CALL STEST(1,SCOMP,STRUE,SSIZE,SFAC)
+*
+      RETURN
+      END
+      REAL             FUNCTION SDIFF(SA,SB)
+*     ********************************* SDIFF **************************
+*     COMPUTES DIFFERENCE OF TWO NUMBERS.  C. L. LAWSON, JPL 1974 FEB 15
+*
+*     .. Scalar Arguments ..
+      REAL                            SA, SB
+*     .. Executable Statements ..
+      SDIFF = SA - SB
+      RETURN
+      END
+      SUBROUTINE ITEST1(ICOMP,ITRUE)
+*     ********************************* ITEST1 *************************
+*
+*     THIS SUBROUTINE COMPARES THE VARIABLES ICOMP AND ITRUE FOR
+*     EQUALITY.
+*     C. L. LAWSON, JPL, 1974 DEC 10
+*
+*     .. Parameters ..
+      INTEGER           NOUT
+      PARAMETER         (NOUT=6)
+*     .. Scalar Arguments ..
+      INTEGER           ICOMP, ITRUE
+*     .. Scalars in Common ..
+      INTEGER           ICASE, INCX, INCY, MODE, N
+      LOGICAL           PASS
+*     .. Local Scalars ..
+      INTEGER           ID
+*     .. Common blocks ..
+      COMMON            /COMBLA/ICASE, N, INCX, INCY, MODE, PASS
+*     .. Executable Statements ..
+*
+      IF (ICOMP.EQ.ITRUE) GO TO 40
+*
+*                            HERE ICOMP IS NOT EQUAL TO ITRUE.
+*
+      IF ( .NOT. PASS) GO TO 20
+*                             PRINT FAIL MESSAGE AND HEADER.
+      PASS = .FALSE.
+      WRITE (NOUT,99999)
+      WRITE (NOUT,99998)
+   20 ID = ICOMP - ITRUE
+      WRITE (NOUT,99997) ICASE, N, INCX, INCY, MODE, ICOMP, ITRUE, ID
+   40 CONTINUE
+      RETURN
+*
+99999 FORMAT ('                                       FAIL')
+99998 FORMAT (/' CASE  N INCX INCY MODE                               ',
+     +       ' COMP                                TRUE     DIFFERENCE',
+     +       /1X)
+99997 FORMAT (1X,I4,I3,3I5,2I36,I12)
+      END
diff --git a/resources/3rdparty/eigen/blas/testing/sblat2.dat b/resources/3rdParty/eigen/blas/testing/sblat2.dat
similarity index 100%
rename from resources/3rdparty/eigen/blas/testing/sblat2.dat
rename to resources/3rdParty/eigen/blas/testing/sblat2.dat
diff --git a/resources/3rdparty/eigen/blas/testing/sblat2.f b/resources/3rdParty/eigen/blas/testing/sblat2.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/testing/sblat2.f
rename to resources/3rdParty/eigen/blas/testing/sblat2.f
diff --git a/resources/3rdparty/eigen/blas/testing/sblat3.dat b/resources/3rdParty/eigen/blas/testing/sblat3.dat
similarity index 100%
rename from resources/3rdparty/eigen/blas/testing/sblat3.dat
rename to resources/3rdParty/eigen/blas/testing/sblat3.dat
diff --git a/resources/3rdparty/eigen/blas/testing/sblat3.f b/resources/3rdParty/eigen/blas/testing/sblat3.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/testing/sblat3.f
rename to resources/3rdParty/eigen/blas/testing/sblat3.f
diff --git a/resources/3rdparty/eigen/blas/testing/zblat1.f b/resources/3rdParty/eigen/blas/testing/zblat1.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/testing/zblat1.f
rename to resources/3rdParty/eigen/blas/testing/zblat1.f
diff --git a/resources/3rdparty/eigen/blas/testing/zblat2.dat b/resources/3rdParty/eigen/blas/testing/zblat2.dat
similarity index 100%
rename from resources/3rdparty/eigen/blas/testing/zblat2.dat
rename to resources/3rdParty/eigen/blas/testing/zblat2.dat
diff --git a/resources/3rdparty/eigen/blas/testing/zblat2.f b/resources/3rdParty/eigen/blas/testing/zblat2.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/testing/zblat2.f
rename to resources/3rdParty/eigen/blas/testing/zblat2.f
diff --git a/resources/3rdparty/eigen/blas/testing/zblat3.dat b/resources/3rdParty/eigen/blas/testing/zblat3.dat
similarity index 100%
rename from resources/3rdparty/eigen/blas/testing/zblat3.dat
rename to resources/3rdParty/eigen/blas/testing/zblat3.dat
diff --git a/resources/3rdparty/eigen/blas/testing/zblat3.f b/resources/3rdParty/eigen/blas/testing/zblat3.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/testing/zblat3.f
rename to resources/3rdParty/eigen/blas/testing/zblat3.f
diff --git a/resources/3rdparty/eigen/blas/xerbla.cpp b/resources/3rdParty/eigen/blas/xerbla.cpp
similarity index 100%
rename from resources/3rdparty/eigen/blas/xerbla.cpp
rename to resources/3rdParty/eigen/blas/xerbla.cpp
diff --git a/resources/3rdparty/eigen/blas/zhbmv.f b/resources/3rdParty/eigen/blas/zhbmv.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/zhbmv.f
rename to resources/3rdParty/eigen/blas/zhbmv.f
diff --git a/resources/3rdparty/eigen/blas/zhpmv.f b/resources/3rdParty/eigen/blas/zhpmv.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/zhpmv.f
rename to resources/3rdParty/eigen/blas/zhpmv.f
diff --git a/resources/3rdParty/eigen/blas/zhpr.f b/resources/3rdParty/eigen/blas/zhpr.f
new file mode 100644
index 000000000..40efbc7d5
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/zhpr.f
@@ -0,0 +1,220 @@
+      SUBROUTINE ZHPR(UPLO,N,ALPHA,X,INCX,AP)
+*     .. Scalar Arguments ..
+      DOUBLE PRECISION ALPHA
+      INTEGER INCX,N
+      CHARACTER UPLO
+*     ..
+*     .. Array Arguments ..
+      DOUBLE COMPLEX AP(*),X(*)
+*     ..
+*
+*  Purpose
+*  =======
+*
+*  ZHPR    performs the hermitian rank 1 operation
+*
+*     A := alpha*x*conjg( x' ) + A,
+*
+*  where alpha is a real scalar, x is an n element vector and A is an
+*  n by n hermitian matrix, supplied in packed form.
+*
+*  Arguments
+*  ==========
+*
+*  UPLO   - CHARACTER*1.
+*           On entry, UPLO specifies whether the upper or lower
+*           triangular part of the matrix A is supplied in the packed
+*           array AP as follows:
+*
+*              UPLO = 'U' or 'u'   The upper triangular part of A is
+*                                  supplied in AP.
+*
+*              UPLO = 'L' or 'l'   The lower triangular part of A is
+*                                  supplied in AP.
+*
+*           Unchanged on exit.
+*
+*  N      - INTEGER.
+*           On entry, N specifies the order of the matrix A.
+*           N must be at least zero.
+*           Unchanged on exit.
+*
+*  ALPHA  - DOUBLE PRECISION.
+*           On entry, ALPHA specifies the scalar alpha.
+*           Unchanged on exit.
+*
+*  X      - COMPLEX*16       array of dimension at least
+*           ( 1 + ( n - 1 )*abs( INCX ) ).
+*           Before entry, the incremented array X must contain the n
+*           element vector x.
+*           Unchanged on exit.
+*
+*  INCX   - INTEGER.
+*           On entry, INCX specifies the increment for the elements of
+*           X. INCX must not be zero.
+*           Unchanged on exit.
+*
+*  AP     - COMPLEX*16       array of DIMENSION at least
+*           ( ( n*( n + 1 ) )/2 ).
+*           Before entry with  UPLO = 'U' or 'u', the array AP must
+*           contain the upper triangular part of the hermitian matrix
+*           packed sequentially, column by column, so that AP( 1 )
+*           contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 )
+*           and a( 2, 2 ) respectively, and so on. On exit, the array
+*           AP is overwritten by the upper triangular part of the
+*           updated matrix.
+*           Before entry with UPLO = 'L' or 'l', the array AP must
+*           contain the lower triangular part of the hermitian matrix
+*           packed sequentially, column by column, so that AP( 1 )
+*           contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 )
+*           and a( 3, 1 ) respectively, and so on. On exit, the array
+*           AP is overwritten by the lower triangular part of the
+*           updated matrix.
+*           Note that the imaginary parts of the diagonal elements need
+*           not be set, they are assumed to be zero, and on exit they
+*           are set to zero.
+*
+*  Further Details
+*  ===============
+*
+*  Level 2 Blas routine.
+*
+*  -- Written on 22-October-1986.
+*     Jack Dongarra, Argonne National Lab.
+*     Jeremy Du Croz, Nag Central Office.
+*     Sven Hammarling, Nag Central Office.
+*     Richard Hanson, Sandia National Labs.
+*
+*  =====================================================================
+*
+*     .. Parameters ..
+      DOUBLE COMPLEX ZERO
+      PARAMETER (ZERO= (0.0D+0,0.0D+0))
+*     ..
+*     .. Local Scalars ..
+      DOUBLE COMPLEX TEMP
+      INTEGER I,INFO,IX,J,JX,K,KK,KX
+*     ..
+*     .. External Functions ..
+      LOGICAL LSAME
+      EXTERNAL LSAME
+*     ..
+*     .. External Subroutines ..
+      EXTERNAL XERBLA
+*     ..
+*     .. Intrinsic Functions ..
+      INTRINSIC DBLE,DCONJG
+*     ..
+*
+*     Test the input parameters.
+*
+      INFO = 0
+      IF (.NOT.LSAME(UPLO,'U') .AND. .NOT.LSAME(UPLO,'L')) THEN
+          INFO = 1
+      ELSE IF (N.LT.0) THEN
+          INFO = 2
+      ELSE IF (INCX.EQ.0) THEN
+          INFO = 5
+      END IF
+      IF (INFO.NE.0) THEN
+          CALL XERBLA('ZHPR  ',INFO)
+          RETURN
+      END IF
+*
+*     Quick return if possible.
+*
+      IF ((N.EQ.0) .OR. (ALPHA.EQ.DBLE(ZERO))) RETURN
+*
+*     Set the start point in X if the increment is not unity.
+*
+      IF (INCX.LE.0) THEN
+          KX = 1 - (N-1)*INCX
+      ELSE IF (INCX.NE.1) THEN
+          KX = 1
+      END IF
+*
+*     Start the operations. In this version the elements of the array AP
+*     are accessed sequentially with one pass through AP.
+*
+      KK = 1
+      IF (LSAME(UPLO,'U')) THEN
+*
+*        Form  A  when upper triangle is stored in AP.
+*
+          IF (INCX.EQ.1) THEN
+              DO 20 J = 1,N
+                  IF (X(J).NE.ZERO) THEN
+                      TEMP = ALPHA*DCONJG(X(J))
+                      K = KK
+                      DO 10 I = 1,J - 1
+                          AP(K) = AP(K) + X(I)*TEMP
+                          K = K + 1
+   10                 CONTINUE
+                      AP(KK+J-1) = DBLE(AP(KK+J-1)) + DBLE(X(J)*TEMP)
+                  ELSE
+                      AP(KK+J-1) = DBLE(AP(KK+J-1))
+                  END IF
+                  KK = KK + J
+   20         CONTINUE
+          ELSE
+              JX = KX
+              DO 40 J = 1,N
+                  IF (X(JX).NE.ZERO) THEN
+                      TEMP = ALPHA*DCONJG(X(JX))
+                      IX = KX
+                      DO 30 K = KK,KK + J - 2
+                          AP(K) = AP(K) + X(IX)*TEMP
+                          IX = IX + INCX
+   30                 CONTINUE
+                      AP(KK+J-1) = DBLE(AP(KK+J-1)) + DBLE(X(JX)*TEMP)
+                  ELSE
+                      AP(KK+J-1) = DBLE(AP(KK+J-1))
+                  END IF
+                  JX = JX + INCX
+                  KK = KK + J
+   40         CONTINUE
+          END IF
+      ELSE
+*
+*        Form  A  when lower triangle is stored in AP.
+*
+          IF (INCX.EQ.1) THEN
+              DO 60 J = 1,N
+                  IF (X(J).NE.ZERO) THEN
+                      TEMP = ALPHA*DCONJG(X(J))
+                      AP(KK) = DBLE(AP(KK)) + DBLE(TEMP*X(J))
+                      K = KK + 1
+                      DO 50 I = J + 1,N
+                          AP(K) = AP(K) + X(I)*TEMP
+                          K = K + 1
+   50                 CONTINUE
+                  ELSE
+                      AP(KK) = DBLE(AP(KK))
+                  END IF
+                  KK = KK + N - J + 1
+   60         CONTINUE
+          ELSE
+              JX = KX
+              DO 80 J = 1,N
+                  IF (X(JX).NE.ZERO) THEN
+                      TEMP = ALPHA*DCONJG(X(JX))
+                      AP(KK) = DBLE(AP(KK)) + DBLE(TEMP*X(JX))
+                      IX = JX
+                      DO 70 K = KK + 1,KK + N - J
+                          IX = IX + INCX
+                          AP(K) = AP(K) + X(IX)*TEMP
+   70                 CONTINUE
+                  ELSE
+                      AP(KK) = DBLE(AP(KK))
+                  END IF
+                  JX = JX + INCX
+                  KK = KK + N - J + 1
+   80         CONTINUE
+          END IF
+      END IF
+*
+      RETURN
+*
+*     End of ZHPR  .
+*
+      END
diff --git a/resources/3rdParty/eigen/blas/zhpr2.f b/resources/3rdParty/eigen/blas/zhpr2.f
new file mode 100644
index 000000000..99977462e
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/zhpr2.f
@@ -0,0 +1,255 @@
+      SUBROUTINE ZHPR2(UPLO,N,ALPHA,X,INCX,Y,INCY,AP)
+*     .. Scalar Arguments ..
+      DOUBLE COMPLEX ALPHA
+      INTEGER INCX,INCY,N
+      CHARACTER UPLO
+*     ..
+*     .. Array Arguments ..
+      DOUBLE COMPLEX AP(*),X(*),Y(*)
+*     ..
+*
+*  Purpose
+*  =======
+*
+*  ZHPR2  performs the hermitian rank 2 operation
+*
+*     A := alpha*x*conjg( y' ) + conjg( alpha )*y*conjg( x' ) + A,
+*
+*  where alpha is a scalar, x and y are n element vectors and A is an
+*  n by n hermitian matrix, supplied in packed form.
+*
+*  Arguments
+*  ==========
+*
+*  UPLO   - CHARACTER*1.
+*           On entry, UPLO specifies whether the upper or lower
+*           triangular part of the matrix A is supplied in the packed
+*           array AP as follows:
+*
+*              UPLO = 'U' or 'u'   The upper triangular part of A is
+*                                  supplied in AP.
+*
+*              UPLO = 'L' or 'l'   The lower triangular part of A is
+*                                  supplied in AP.
+*
+*           Unchanged on exit.
+*
+*  N      - INTEGER.
+*           On entry, N specifies the order of the matrix A.
+*           N must be at least zero.
+*           Unchanged on exit.
+*
+*  ALPHA  - COMPLEX*16      .
+*           On entry, ALPHA specifies the scalar alpha.
+*           Unchanged on exit.
+*
+*  X      - COMPLEX*16       array of dimension at least
+*           ( 1 + ( n - 1 )*abs( INCX ) ).
+*           Before entry, the incremented array X must contain the n
+*           element vector x.
+*           Unchanged on exit.
+*
+*  INCX   - INTEGER.
+*           On entry, INCX specifies the increment for the elements of
+*           X. INCX must not be zero.
+*           Unchanged on exit.
+*
+*  Y      - COMPLEX*16       array of dimension at least
+*           ( 1 + ( n - 1 )*abs( INCY ) ).
+*           Before entry, the incremented array Y must contain the n
+*           element vector y.
+*           Unchanged on exit.
+*
+*  INCY   - INTEGER.
+*           On entry, INCY specifies the increment for the elements of
+*           Y. INCY must not be zero.
+*           Unchanged on exit.
+*
+*  AP     - COMPLEX*16       array of DIMENSION at least
+*           ( ( n*( n + 1 ) )/2 ).
+*           Before entry with  UPLO = 'U' or 'u', the array AP must
+*           contain the upper triangular part of the hermitian matrix
+*           packed sequentially, column by column, so that AP( 1 )
+*           contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 )
+*           and a( 2, 2 ) respectively, and so on. On exit, the array
+*           AP is overwritten by the upper triangular part of the
+*           updated matrix.
+*           Before entry with UPLO = 'L' or 'l', the array AP must
+*           contain the lower triangular part of the hermitian matrix
+*           packed sequentially, column by column, so that AP( 1 )
+*           contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 )
+*           and a( 3, 1 ) respectively, and so on. On exit, the array
+*           AP is overwritten by the lower triangular part of the
+*           updated matrix.
+*           Note that the imaginary parts of the diagonal elements need
+*           not be set, they are assumed to be zero, and on exit they
+*           are set to zero.
+*
+*  Further Details
+*  ===============
+*
+*  Level 2 Blas routine.
+*
+*  -- Written on 22-October-1986.
+*     Jack Dongarra, Argonne National Lab.
+*     Jeremy Du Croz, Nag Central Office.
+*     Sven Hammarling, Nag Central Office.
+*     Richard Hanson, Sandia National Labs.
+*
+*  =====================================================================
+*
+*     .. Parameters ..
+      DOUBLE COMPLEX ZERO
+      PARAMETER (ZERO= (0.0D+0,0.0D+0))
+*     ..
+*     .. Local Scalars ..
+      DOUBLE COMPLEX TEMP1,TEMP2
+      INTEGER I,INFO,IX,IY,J,JX,JY,K,KK,KX,KY
+*     ..
+*     .. External Functions ..
+      LOGICAL LSAME
+      EXTERNAL LSAME
+*     ..
+*     .. External Subroutines ..
+      EXTERNAL XERBLA
+*     ..
+*     .. Intrinsic Functions ..
+      INTRINSIC DBLE,DCONJG
+*     ..
+*
+*     Test the input parameters.
+*
+      INFO = 0
+      IF (.NOT.LSAME(UPLO,'U') .AND. .NOT.LSAME(UPLO,'L')) THEN
+          INFO = 1
+      ELSE IF (N.LT.0) THEN
+          INFO = 2
+      ELSE IF (INCX.EQ.0) THEN
+          INFO = 5
+      ELSE IF (INCY.EQ.0) THEN
+          INFO = 7
+      END IF
+      IF (INFO.NE.0) THEN
+          CALL XERBLA('ZHPR2 ',INFO)
+          RETURN
+      END IF
+*
+*     Quick return if possible.
+*
+      IF ((N.EQ.0) .OR. (ALPHA.EQ.ZERO)) RETURN
+*
+*     Set up the start points in X and Y if the increments are not both
+*     unity.
+*
+      IF ((INCX.NE.1) .OR. (INCY.NE.1)) THEN
+          IF (INCX.GT.0) THEN
+              KX = 1
+          ELSE
+              KX = 1 - (N-1)*INCX
+          END IF
+          IF (INCY.GT.0) THEN
+              KY = 1
+          ELSE
+              KY = 1 - (N-1)*INCY
+          END IF
+          JX = KX
+          JY = KY
+      END IF
+*
+*     Start the operations. In this version the elements of the array AP
+*     are accessed sequentially with one pass through AP.
+*
+      KK = 1
+      IF (LSAME(UPLO,'U')) THEN
+*
+*        Form  A  when upper triangle is stored in AP.
+*
+          IF ((INCX.EQ.1) .AND. (INCY.EQ.1)) THEN
+              DO 20 J = 1,N
+                  IF ((X(J).NE.ZERO) .OR. (Y(J).NE.ZERO)) THEN
+                      TEMP1 = ALPHA*DCONJG(Y(J))
+                      TEMP2 = DCONJG(ALPHA*X(J))
+                      K = KK
+                      DO 10 I = 1,J - 1
+                          AP(K) = AP(K) + X(I)*TEMP1 + Y(I)*TEMP2
+                          K = K + 1
+   10                 CONTINUE
+                      AP(KK+J-1) = DBLE(AP(KK+J-1)) +
+     +                             DBLE(X(J)*TEMP1+Y(J)*TEMP2)
+                  ELSE
+                      AP(KK+J-1) = DBLE(AP(KK+J-1))
+                  END IF
+                  KK = KK + J
+   20         CONTINUE
+          ELSE
+              DO 40 J = 1,N
+                  IF ((X(JX).NE.ZERO) .OR. (Y(JY).NE.ZERO)) THEN
+                      TEMP1 = ALPHA*DCONJG(Y(JY))
+                      TEMP2 = DCONJG(ALPHA*X(JX))
+                      IX = KX
+                      IY = KY
+                      DO 30 K = KK,KK + J - 2
+                          AP(K) = AP(K) + X(IX)*TEMP1 + Y(IY)*TEMP2
+                          IX = IX + INCX
+                          IY = IY + INCY
+   30                 CONTINUE
+                      AP(KK+J-1) = DBLE(AP(KK+J-1)) +
+     +                             DBLE(X(JX)*TEMP1+Y(JY)*TEMP2)
+                  ELSE
+                      AP(KK+J-1) = DBLE(AP(KK+J-1))
+                  END IF
+                  JX = JX + INCX
+                  JY = JY + INCY
+                  KK = KK + J
+   40         CONTINUE
+          END IF
+      ELSE
+*
+*        Form  A  when lower triangle is stored in AP.
+*
+          IF ((INCX.EQ.1) .AND. (INCY.EQ.1)) THEN
+              DO 60 J = 1,N
+                  IF ((X(J).NE.ZERO) .OR. (Y(J).NE.ZERO)) THEN
+                      TEMP1 = ALPHA*DCONJG(Y(J))
+                      TEMP2 = DCONJG(ALPHA*X(J))
+                      AP(KK) = DBLE(AP(KK)) +
+     +                         DBLE(X(J)*TEMP1+Y(J)*TEMP2)
+                      K = KK + 1
+                      DO 50 I = J + 1,N
+                          AP(K) = AP(K) + X(I)*TEMP1 + Y(I)*TEMP2
+                          K = K + 1
+   50                 CONTINUE
+                  ELSE
+                      AP(KK) = DBLE(AP(KK))
+                  END IF
+                  KK = KK + N - J + 1
+   60         CONTINUE
+          ELSE
+              DO 80 J = 1,N
+                  IF ((X(JX).NE.ZERO) .OR. (Y(JY).NE.ZERO)) THEN
+                      TEMP1 = ALPHA*DCONJG(Y(JY))
+                      TEMP2 = DCONJG(ALPHA*X(JX))
+                      AP(KK) = DBLE(AP(KK)) +
+     +                         DBLE(X(JX)*TEMP1+Y(JY)*TEMP2)
+                      IX = JX
+                      IY = JY
+                      DO 70 K = KK + 1,KK + N - J
+                          IX = IX + INCX
+                          IY = IY + INCY
+                          AP(K) = AP(K) + X(IX)*TEMP1 + Y(IY)*TEMP2
+   70                 CONTINUE
+                  ELSE
+                      AP(KK) = DBLE(AP(KK))
+                  END IF
+                  JX = JX + INCX
+                  JY = JY + INCY
+                  KK = KK + N - J + 1
+   80         CONTINUE
+          END IF
+      END IF
+*
+      RETURN
+*
+*     End of ZHPR2 .
+*
+      END
diff --git a/resources/3rdparty/eigen/blas/ztbmv.f b/resources/3rdParty/eigen/blas/ztbmv.f
similarity index 100%
rename from resources/3rdparty/eigen/blas/ztbmv.f
rename to resources/3rdParty/eigen/blas/ztbmv.f
diff --git a/resources/3rdParty/eigen/blas/ztpmv.f b/resources/3rdParty/eigen/blas/ztpmv.f
new file mode 100644
index 000000000..5a7b3b8b7
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/ztpmv.f
@@ -0,0 +1,329 @@
+      SUBROUTINE ZTPMV(UPLO,TRANS,DIAG,N,AP,X,INCX)
+*     .. Scalar Arguments ..
+      INTEGER INCX,N
+      CHARACTER DIAG,TRANS,UPLO
+*     ..
+*     .. Array Arguments ..
+      DOUBLE COMPLEX AP(*),X(*)
+*     ..
+*
+*  Purpose
+*  =======
+*
+*  ZTPMV  performs one of the matrix-vector operations
+*
+*     x := A*x,   or   x := A'*x,   or   x := conjg( A' )*x,
+*
+*  where x is an n element vector and  A is an n by n unit, or non-unit,
+*  upper or lower triangular matrix, supplied in packed form.
+*
+*  Arguments
+*  ==========
+*
+*  UPLO   - CHARACTER*1.
+*           On entry, UPLO specifies whether the matrix is an upper or
+*           lower triangular matrix as follows:
+*
+*              UPLO = 'U' or 'u'   A is an upper triangular matrix.
+*
+*              UPLO = 'L' or 'l'   A is a lower triangular matrix.
+*
+*           Unchanged on exit.
+*
+*  TRANS  - CHARACTER*1.
+*           On entry, TRANS specifies the operation to be performed as
+*           follows:
+*
+*              TRANS = 'N' or 'n'   x := A*x.
+*
+*              TRANS = 'T' or 't'   x := A'*x.
+*
+*              TRANS = 'C' or 'c'   x := conjg( A' )*x.
+*
+*           Unchanged on exit.
+*
+*  DIAG   - CHARACTER*1.
+*           On entry, DIAG specifies whether or not A is unit
+*           triangular as follows:
+*
+*              DIAG = 'U' or 'u'   A is assumed to be unit triangular.
+*
+*              DIAG = 'N' or 'n'   A is not assumed to be unit
+*                                  triangular.
+*
+*           Unchanged on exit.
+*
+*  N      - INTEGER.
+*           On entry, N specifies the order of the matrix A.
+*           N must be at least zero.
+*           Unchanged on exit.
+*
+*  AP     - COMPLEX*16       array of DIMENSION at least
+*           ( ( n*( n + 1 ) )/2 ).
+*           Before entry with  UPLO = 'U' or 'u', the array AP must
+*           contain the upper triangular matrix packed sequentially,
+*           column by column, so that AP( 1 ) contains a( 1, 1 ),
+*           AP( 2 ) and AP( 3 ) contain a( 1, 2 ) and a( 2, 2 )
+*           respectively, and so on.
+*           Before entry with UPLO = 'L' or 'l', the array AP must
+*           contain the lower triangular matrix packed sequentially,
+*           column by column, so that AP( 1 ) contains a( 1, 1 ),
+*           AP( 2 ) and AP( 3 ) contain a( 2, 1 ) and a( 3, 1 )
+*           respectively, and so on.
+*           Note that when  DIAG = 'U' or 'u', the diagonal elements of
+*           A are not referenced, but are assumed to be unity.
+*           Unchanged on exit.
+*
+*  X      - COMPLEX*16       array of dimension at least
+*           ( 1 + ( n - 1 )*abs( INCX ) ).
+*           Before entry, the incremented array X must contain the n
+*           element vector x. On exit, X is overwritten with the
+*           tranformed vector x.
+*
+*  INCX   - INTEGER.
+*           On entry, INCX specifies the increment for the elements of
+*           X. INCX must not be zero.
+*           Unchanged on exit.
+*
+*  Further Details
+*  ===============
+*
+*  Level 2 Blas routine.
+*
+*  -- Written on 22-October-1986.
+*     Jack Dongarra, Argonne National Lab.
+*     Jeremy Du Croz, Nag Central Office.
+*     Sven Hammarling, Nag Central Office.
+*     Richard Hanson, Sandia National Labs.
+*
+*  =====================================================================
+*
+*     .. Parameters ..
+      DOUBLE COMPLEX ZERO
+      PARAMETER (ZERO= (0.0D+0,0.0D+0))
+*     ..
+*     .. Local Scalars ..
+      DOUBLE COMPLEX TEMP
+      INTEGER I,INFO,IX,J,JX,K,KK,KX
+      LOGICAL NOCONJ,NOUNIT
+*     ..
+*     .. External Functions ..
+      LOGICAL LSAME
+      EXTERNAL LSAME
+*     ..
+*     .. External Subroutines ..
+      EXTERNAL XERBLA
+*     ..
+*     .. Intrinsic Functions ..
+      INTRINSIC DCONJG
+*     ..
+*
+*     Test the input parameters.
+*
+      INFO = 0
+      IF (.NOT.LSAME(UPLO,'U') .AND. .NOT.LSAME(UPLO,'L')) THEN
+          INFO = 1
+      ELSE IF (.NOT.LSAME(TRANS,'N') .AND. .NOT.LSAME(TRANS,'T') .AND.
+     +         .NOT.LSAME(TRANS,'C')) THEN
+          INFO = 2
+      ELSE IF (.NOT.LSAME(DIAG,'U') .AND. .NOT.LSAME(DIAG,'N')) THEN
+          INFO = 3
+      ELSE IF (N.LT.0) THEN
+          INFO = 4
+      ELSE IF (INCX.EQ.0) THEN
+          INFO = 7
+      END IF
+      IF (INFO.NE.0) THEN
+          CALL XERBLA('ZTPMV ',INFO)
+          RETURN
+      END IF
+*
+*     Quick return if possible.
+*
+      IF (N.EQ.0) RETURN
+*
+      NOCONJ = LSAME(TRANS,'T')
+      NOUNIT = LSAME(DIAG,'N')
+*
+*     Set up the start point in X if the increment is not unity. This
+*     will be  ( N - 1 )*INCX  too small for descending loops.
+*
+      IF (INCX.LE.0) THEN
+          KX = 1 - (N-1)*INCX
+      ELSE IF (INCX.NE.1) THEN
+          KX = 1
+      END IF
+*
+*     Start the operations. In this version the elements of AP are
+*     accessed sequentially with one pass through AP.
+*
+      IF (LSAME(TRANS,'N')) THEN
+*
+*        Form  x:= A*x.
+*
+          IF (LSAME(UPLO,'U')) THEN
+              KK = 1
+              IF (INCX.EQ.1) THEN
+                  DO 20 J = 1,N
+                      IF (X(J).NE.ZERO) THEN
+                          TEMP = X(J)
+                          K = KK
+                          DO 10 I = 1,J - 1
+                              X(I) = X(I) + TEMP*AP(K)
+                              K = K + 1
+   10                     CONTINUE
+                          IF (NOUNIT) X(J) = X(J)*AP(KK+J-1)
+                      END IF
+                      KK = KK + J
+   20             CONTINUE
+              ELSE
+                  JX = KX
+                  DO 40 J = 1,N
+                      IF (X(JX).NE.ZERO) THEN
+                          TEMP = X(JX)
+                          IX = KX
+                          DO 30 K = KK,KK + J - 2
+                              X(IX) = X(IX) + TEMP*AP(K)
+                              IX = IX + INCX
+   30                     CONTINUE
+                          IF (NOUNIT) X(JX) = X(JX)*AP(KK+J-1)
+                      END IF
+                      JX = JX + INCX
+                      KK = KK + J
+   40             CONTINUE
+              END IF
+          ELSE
+              KK = (N* (N+1))/2
+              IF (INCX.EQ.1) THEN
+                  DO 60 J = N,1,-1
+                      IF (X(J).NE.ZERO) THEN
+                          TEMP = X(J)
+                          K = KK
+                          DO 50 I = N,J + 1,-1
+                              X(I) = X(I) + TEMP*AP(K)
+                              K = K - 1
+   50                     CONTINUE
+                          IF (NOUNIT) X(J) = X(J)*AP(KK-N+J)
+                      END IF
+                      KK = KK - (N-J+1)
+   60             CONTINUE
+              ELSE
+                  KX = KX + (N-1)*INCX
+                  JX = KX
+                  DO 80 J = N,1,-1
+                      IF (X(JX).NE.ZERO) THEN
+                          TEMP = X(JX)
+                          IX = KX
+                          DO 70 K = KK,KK - (N- (J+1)),-1
+                              X(IX) = X(IX) + TEMP*AP(K)
+                              IX = IX - INCX
+   70                     CONTINUE
+                          IF (NOUNIT) X(JX) = X(JX)*AP(KK-N+J)
+                      END IF
+                      JX = JX - INCX
+                      KK = KK - (N-J+1)
+   80             CONTINUE
+              END IF
+          END IF
+      ELSE
+*
+*        Form  x := A'*x  or  x := conjg( A' )*x.
+*
+          IF (LSAME(UPLO,'U')) THEN
+              KK = (N* (N+1))/2
+              IF (INCX.EQ.1) THEN
+                  DO 110 J = N,1,-1
+                      TEMP = X(J)
+                      K = KK - 1
+                      IF (NOCONJ) THEN
+                          IF (NOUNIT) TEMP = TEMP*AP(KK)
+                          DO 90 I = J - 1,1,-1
+                              TEMP = TEMP + AP(K)*X(I)
+                              K = K - 1
+   90                     CONTINUE
+                      ELSE
+                          IF (NOUNIT) TEMP = TEMP*DCONJG(AP(KK))
+                          DO 100 I = J - 1,1,-1
+                              TEMP = TEMP + DCONJG(AP(K))*X(I)
+                              K = K - 1
+  100                     CONTINUE
+                      END IF
+                      X(J) = TEMP
+                      KK = KK - J
+  110             CONTINUE
+              ELSE
+                  JX = KX + (N-1)*INCX
+                  DO 140 J = N,1,-1
+                      TEMP = X(JX)
+                      IX = JX
+                      IF (NOCONJ) THEN
+                          IF (NOUNIT) TEMP = TEMP*AP(KK)
+                          DO 120 K = KK - 1,KK - J + 1,-1
+                              IX = IX - INCX
+                              TEMP = TEMP + AP(K)*X(IX)
+  120                     CONTINUE
+                      ELSE
+                          IF (NOUNIT) TEMP = TEMP*DCONJG(AP(KK))
+                          DO 130 K = KK - 1,KK - J + 1,-1
+                              IX = IX - INCX
+                              TEMP = TEMP + DCONJG(AP(K))*X(IX)
+  130                     CONTINUE
+                      END IF
+                      X(JX) = TEMP
+                      JX = JX - INCX
+                      KK = KK - J
+  140             CONTINUE
+              END IF
+          ELSE
+              KK = 1
+              IF (INCX.EQ.1) THEN
+                  DO 170 J = 1,N
+                      TEMP = X(J)
+                      K = KK + 1
+                      IF (NOCONJ) THEN
+                          IF (NOUNIT) TEMP = TEMP*AP(KK)
+                          DO 150 I = J + 1,N
+                              TEMP = TEMP + AP(K)*X(I)
+                              K = K + 1
+  150                     CONTINUE
+                      ELSE
+                          IF (NOUNIT) TEMP = TEMP*DCONJG(AP(KK))
+                          DO 160 I = J + 1,N
+                              TEMP = TEMP + DCONJG(AP(K))*X(I)
+                              K = K + 1
+  160                     CONTINUE
+                      END IF
+                      X(J) = TEMP
+                      KK = KK + (N-J+1)
+  170             CONTINUE
+              ELSE
+                  JX = KX
+                  DO 200 J = 1,N
+                      TEMP = X(JX)
+                      IX = JX
+                      IF (NOCONJ) THEN
+                          IF (NOUNIT) TEMP = TEMP*AP(KK)
+                          DO 180 K = KK + 1,KK + N - J
+                              IX = IX + INCX
+                              TEMP = TEMP + AP(K)*X(IX)
+  180                     CONTINUE
+                      ELSE
+                          IF (NOUNIT) TEMP = TEMP*DCONJG(AP(KK))
+                          DO 190 K = KK + 1,KK + N - J
+                              IX = IX + INCX
+                              TEMP = TEMP + DCONJG(AP(K))*X(IX)
+  190                     CONTINUE
+                      END IF
+                      X(JX) = TEMP
+                      JX = JX + INCX
+                      KK = KK + (N-J+1)
+  200             CONTINUE
+              END IF
+          END IF
+      END IF
+*
+      RETURN
+*
+*     End of ZTPMV .
+*
+      END
diff --git a/resources/3rdParty/eigen/blas/ztpsv.f b/resources/3rdParty/eigen/blas/ztpsv.f
new file mode 100644
index 000000000..b56e1d8c4
--- /dev/null
+++ b/resources/3rdParty/eigen/blas/ztpsv.f
@@ -0,0 +1,332 @@
+      SUBROUTINE ZTPSV(UPLO,TRANS,DIAG,N,AP,X,INCX)
+*     .. Scalar Arguments ..
+      INTEGER INCX,N
+      CHARACTER DIAG,TRANS,UPLO
+*     ..
+*     .. Array Arguments ..
+      DOUBLE COMPLEX AP(*),X(*)
+*     ..
+*
+*  Purpose
+*  =======
+*
+*  ZTPSV  solves one of the systems of equations
+*
+*     A*x = b,   or   A'*x = b,   or   conjg( A' )*x = b,
+*
+*  where b and x are n element vectors and A is an n by n unit, or
+*  non-unit, upper or lower triangular matrix, supplied in packed form.
+*
+*  No test for singularity or near-singularity is included in this
+*  routine. Such tests must be performed before calling this routine.
+*
+*  Arguments
+*  ==========
+*
+*  UPLO   - CHARACTER*1.
+*           On entry, UPLO specifies whether the matrix is an upper or
+*           lower triangular matrix as follows:
+*
+*              UPLO = 'U' or 'u'   A is an upper triangular matrix.
+*
+*              UPLO = 'L' or 'l'   A is a lower triangular matrix.
+*
+*           Unchanged on exit.
+*
+*  TRANS  - CHARACTER*1.
+*           On entry, TRANS specifies the equations to be solved as
+*           follows:
+*
+*              TRANS = 'N' or 'n'   A*x = b.
+*
+*              TRANS = 'T' or 't'   A'*x = b.
+*
+*              TRANS = 'C' or 'c'   conjg( A' )*x = b.
+*
+*           Unchanged on exit.
+*
+*  DIAG   - CHARACTER*1.
+*           On entry, DIAG specifies whether or not A is unit
+*           triangular as follows:
+*
+*              DIAG = 'U' or 'u'   A is assumed to be unit triangular.
+*
+*              DIAG = 'N' or 'n'   A is not assumed to be unit
+*                                  triangular.
+*
+*           Unchanged on exit.
+*
+*  N      - INTEGER.
+*           On entry, N specifies the order of the matrix A.
+*           N must be at least zero.
+*           Unchanged on exit.
+*
+*  AP     - COMPLEX*16       array of DIMENSION at least
+*           ( ( n*( n + 1 ) )/2 ).
+*           Before entry with  UPLO = 'U' or 'u', the array AP must
+*           contain the upper triangular matrix packed sequentially,
+*           column by column, so that AP( 1 ) contains a( 1, 1 ),
+*           AP( 2 ) and AP( 3 ) contain a( 1, 2 ) and a( 2, 2 )
+*           respectively, and so on.
+*           Before entry with UPLO = 'L' or 'l', the array AP must
+*           contain the lower triangular matrix packed sequentially,
+*           column by column, so that AP( 1 ) contains a( 1, 1 ),
+*           AP( 2 ) and AP( 3 ) contain a( 2, 1 ) and a( 3, 1 )
+*           respectively, and so on.
+*           Note that when  DIAG = 'U' or 'u', the diagonal elements of
+*           A are not referenced, but are assumed to be unity.
+*           Unchanged on exit.
+*
+*  X      - COMPLEX*16       array of dimension at least
+*           ( 1 + ( n - 1 )*abs( INCX ) ).
+*           Before entry, the incremented array X must contain the n
+*           element right-hand side vector b. On exit, X is overwritten
+*           with the solution vector x.
+*
+*  INCX   - INTEGER.
+*           On entry, INCX specifies the increment for the elements of
+*           X. INCX must not be zero.
+*           Unchanged on exit.
+*
+*  Further Details
+*  ===============
+*
+*  Level 2 Blas routine.
+*
+*  -- Written on 22-October-1986.
+*     Jack Dongarra, Argonne National Lab.
+*     Jeremy Du Croz, Nag Central Office.
+*     Sven Hammarling, Nag Central Office.
+*     Richard Hanson, Sandia National Labs.
+*
+*  =====================================================================
+*
+*     .. Parameters ..
+      DOUBLE COMPLEX ZERO
+      PARAMETER (ZERO= (0.0D+0,0.0D+0))
+*     ..
+*     .. Local Scalars ..
+      DOUBLE COMPLEX TEMP
+      INTEGER I,INFO,IX,J,JX,K,KK,KX
+      LOGICAL NOCONJ,NOUNIT
+*     ..
+*     .. External Functions ..
+      LOGICAL LSAME
+      EXTERNAL LSAME
+*     ..
+*     .. External Subroutines ..
+      EXTERNAL XERBLA
+*     ..
+*     .. Intrinsic Functions ..
+      INTRINSIC DCONJG
+*     ..
+*
+*     Test the input parameters.
+*
+      INFO = 0
+      IF (.NOT.LSAME(UPLO,'U') .AND. .NOT.LSAME(UPLO,'L')) THEN
+          INFO = 1
+      ELSE IF (.NOT.LSAME(TRANS,'N') .AND. .NOT.LSAME(TRANS,'T') .AND.
+     +         .NOT.LSAME(TRANS,'C')) THEN
+          INFO = 2
+      ELSE IF (.NOT.LSAME(DIAG,'U') .AND. .NOT.LSAME(DIAG,'N')) THEN
+          INFO = 3
+      ELSE IF (N.LT.0) THEN
+          INFO = 4
+      ELSE IF (INCX.EQ.0) THEN
+          INFO = 7
+      END IF
+      IF (INFO.NE.0) THEN
+          CALL XERBLA('ZTPSV ',INFO)
+          RETURN
+      END IF
+*
+*     Quick return if possible.
+*
+      IF (N.EQ.0) RETURN
+*
+      NOCONJ = LSAME(TRANS,'T')
+      NOUNIT = LSAME(DIAG,'N')
+*
+*     Set up the start point in X if the increment is not unity. This
+*     will be  ( N - 1 )*INCX  too small for descending loops.
+*
+      IF (INCX.LE.0) THEN
+          KX = 1 - (N-1)*INCX
+      ELSE IF (INCX.NE.1) THEN
+          KX = 1
+      END IF
+*
+*     Start the operations. In this version the elements of AP are
+*     accessed sequentially with one pass through AP.
+*
+      IF (LSAME(TRANS,'N')) THEN
+*
+*        Form  x := inv( A )*x.
+*
+          IF (LSAME(UPLO,'U')) THEN
+              KK = (N* (N+1))/2
+              IF (INCX.EQ.1) THEN
+                  DO 20 J = N,1,-1
+                      IF (X(J).NE.ZERO) THEN
+                          IF (NOUNIT) X(J) = X(J)/AP(KK)
+                          TEMP = X(J)
+                          K = KK - 1
+                          DO 10 I = J - 1,1,-1
+                              X(I) = X(I) - TEMP*AP(K)
+                              K = K - 1
+   10                     CONTINUE
+                      END IF
+                      KK = KK - J
+   20             CONTINUE
+              ELSE
+                  JX = KX + (N-1)*INCX
+                  DO 40 J = N,1,-1
+                      IF (X(JX).NE.ZERO) THEN
+                          IF (NOUNIT) X(JX) = X(JX)/AP(KK)
+                          TEMP = X(JX)
+                          IX = JX
+                          DO 30 K = KK - 1,KK - J + 1,-1
+                              IX = IX - INCX
+                              X(IX) = X(IX) - TEMP*AP(K)
+   30                     CONTINUE
+                      END IF
+                      JX = JX - INCX
+                      KK = KK - J
+   40             CONTINUE
+              END IF
+          ELSE
+              KK = 1
+              IF (INCX.EQ.1) THEN
+                  DO 60 J = 1,N
+                      IF (X(J).NE.ZERO) THEN
+                          IF (NOUNIT) X(J) = X(J)/AP(KK)
+                          TEMP = X(J)
+                          K = KK + 1
+                          DO 50 I = J + 1,N
+                              X(I) = X(I) - TEMP*AP(K)
+                              K = K + 1
+   50                     CONTINUE
+                      END IF
+                      KK = KK + (N-J+1)
+   60             CONTINUE
+              ELSE
+                  JX = KX
+                  DO 80 J = 1,N
+                      IF (X(JX).NE.ZERO) THEN
+                          IF (NOUNIT) X(JX) = X(JX)/AP(KK)
+                          TEMP = X(JX)
+                          IX = JX
+                          DO 70 K = KK + 1,KK + N - J
+                              IX = IX + INCX
+                              X(IX) = X(IX) - TEMP*AP(K)
+   70                     CONTINUE
+                      END IF
+                      JX = JX + INCX
+                      KK = KK + (N-J+1)
+   80             CONTINUE
+              END IF
+          END IF
+      ELSE
+*
+*        Form  x := inv( A' )*x  or  x := inv( conjg( A' ) )*x.
+*
+          IF (LSAME(UPLO,'U')) THEN
+              KK = 1
+              IF (INCX.EQ.1) THEN
+                  DO 110 J = 1,N
+                      TEMP = X(J)
+                      K = KK
+                      IF (NOCONJ) THEN
+                          DO 90 I = 1,J - 1
+                              TEMP = TEMP - AP(K)*X(I)
+                              K = K + 1
+   90                     CONTINUE
+                          IF (NOUNIT) TEMP = TEMP/AP(KK+J-1)
+                      ELSE
+                          DO 100 I = 1,J - 1
+                              TEMP = TEMP - DCONJG(AP(K))*X(I)
+                              K = K + 1
+  100                     CONTINUE
+                          IF (NOUNIT) TEMP = TEMP/DCONJG(AP(KK+J-1))
+                      END IF
+                      X(J) = TEMP
+                      KK = KK + J
+  110             CONTINUE
+              ELSE
+                  JX = KX
+                  DO 140 J = 1,N
+                      TEMP = X(JX)
+                      IX = KX
+                      IF (NOCONJ) THEN
+                          DO 120 K = KK,KK + J - 2
+                              TEMP = TEMP - AP(K)*X(IX)
+                              IX = IX + INCX
+  120                     CONTINUE
+                          IF (NOUNIT) TEMP = TEMP/AP(KK+J-1)
+                      ELSE
+                          DO 130 K = KK,KK + J - 2
+                              TEMP = TEMP - DCONJG(AP(K))*X(IX)
+                              IX = IX + INCX
+  130                     CONTINUE
+                          IF (NOUNIT) TEMP = TEMP/DCONJG(AP(KK+J-1))
+                      END IF
+                      X(JX) = TEMP
+                      JX = JX + INCX
+                      KK = KK + J
+  140             CONTINUE
+              END IF
+          ELSE
+              KK = (N* (N+1))/2
+              IF (INCX.EQ.1) THEN
+                  DO 170 J = N,1,-1
+                      TEMP = X(J)
+                      K = KK
+                      IF (NOCONJ) THEN
+                          DO 150 I = N,J + 1,-1
+                              TEMP = TEMP - AP(K)*X(I)
+                              K = K - 1
+  150                     CONTINUE
+                          IF (NOUNIT) TEMP = TEMP/AP(KK-N+J)
+                      ELSE
+                          DO 160 I = N,J + 1,-1
+                              TEMP = TEMP - DCONJG(AP(K))*X(I)
+                              K = K - 1
+  160                     CONTINUE
+                          IF (NOUNIT) TEMP = TEMP/DCONJG(AP(KK-N+J))
+                      END IF
+                      X(J) = TEMP
+                      KK = KK - (N-J+1)
+  170             CONTINUE
+              ELSE
+                  KX = KX + (N-1)*INCX
+                  JX = KX
+                  DO 200 J = N,1,-1
+                      TEMP = X(JX)
+                      IX = KX
+                      IF (NOCONJ) THEN
+                          DO 180 K = KK,KK - (N- (J+1)),-1
+                              TEMP = TEMP - AP(K)*X(IX)
+                              IX = IX - INCX
+  180                     CONTINUE
+                          IF (NOUNIT) TEMP = TEMP/AP(KK-N+J)
+                      ELSE
+                          DO 190 K = KK,KK - (N- (J+1)),-1
+                              TEMP = TEMP - DCONJG(AP(K))*X(IX)
+                              IX = IX - INCX
+  190                     CONTINUE
+                          IF (NOUNIT) TEMP = TEMP/DCONJG(AP(KK-N+J))
+                      END IF
+                      X(JX) = TEMP
+                      JX = JX - INCX
+                      KK = KK - (N-J+1)
+  200             CONTINUE
+              END IF
+          END IF
+      END IF
+*
+      RETURN
+*
+*     End of ZTPSV .
+*
+      END
diff --git a/resources/3rdparty/eigen/cmake/CMakeDetermineVSServicePack.cmake b/resources/3rdParty/eigen/cmake/CMakeDetermineVSServicePack.cmake
similarity index 100%
rename from resources/3rdparty/eigen/cmake/CMakeDetermineVSServicePack.cmake
rename to resources/3rdParty/eigen/cmake/CMakeDetermineVSServicePack.cmake
diff --git a/resources/3rdparty/eigen/cmake/EigenConfigureTesting.cmake b/resources/3rdParty/eigen/cmake/EigenConfigureTesting.cmake
similarity index 100%
rename from resources/3rdparty/eigen/cmake/EigenConfigureTesting.cmake
rename to resources/3rdParty/eigen/cmake/EigenConfigureTesting.cmake
diff --git a/resources/3rdparty/eigen/cmake/EigenDetermineOSVersion.cmake b/resources/3rdParty/eigen/cmake/EigenDetermineOSVersion.cmake
similarity index 100%
rename from resources/3rdparty/eigen/cmake/EigenDetermineOSVersion.cmake
rename to resources/3rdParty/eigen/cmake/EigenDetermineOSVersion.cmake
diff --git a/resources/3rdparty/eigen/cmake/EigenTesting.cmake b/resources/3rdParty/eigen/cmake/EigenTesting.cmake
similarity index 100%
rename from resources/3rdparty/eigen/cmake/EigenTesting.cmake
rename to resources/3rdParty/eigen/cmake/EigenTesting.cmake
diff --git a/resources/3rdparty/eigen/cmake/FindAdolc.cmake b/resources/3rdParty/eigen/cmake/FindAdolc.cmake
similarity index 100%
rename from resources/3rdparty/eigen/cmake/FindAdolc.cmake
rename to resources/3rdParty/eigen/cmake/FindAdolc.cmake
diff --git a/resources/3rdparty/eigen/cmake/FindBLAS.cmake b/resources/3rdParty/eigen/cmake/FindBLAS.cmake
similarity index 100%
rename from resources/3rdparty/eigen/cmake/FindBLAS.cmake
rename to resources/3rdParty/eigen/cmake/FindBLAS.cmake
diff --git a/resources/3rdparty/eigen/cmake/FindCholmod.cmake b/resources/3rdParty/eigen/cmake/FindCholmod.cmake
similarity index 100%
rename from resources/3rdparty/eigen/cmake/FindCholmod.cmake
rename to resources/3rdParty/eigen/cmake/FindCholmod.cmake
diff --git a/resources/3rdparty/eigen/cmake/FindEigen2.cmake b/resources/3rdParty/eigen/cmake/FindEigen2.cmake
similarity index 100%
rename from resources/3rdparty/eigen/cmake/FindEigen2.cmake
rename to resources/3rdParty/eigen/cmake/FindEigen2.cmake
diff --git a/resources/3rdparty/eigen/cmake/FindEigen3.cmake b/resources/3rdParty/eigen/cmake/FindEigen3.cmake
similarity index 100%
rename from resources/3rdparty/eigen/cmake/FindEigen3.cmake
rename to resources/3rdParty/eigen/cmake/FindEigen3.cmake
diff --git a/resources/3rdparty/eigen/cmake/FindFFTW.cmake b/resources/3rdParty/eigen/cmake/FindFFTW.cmake
similarity index 100%
rename from resources/3rdparty/eigen/cmake/FindFFTW.cmake
rename to resources/3rdParty/eigen/cmake/FindFFTW.cmake
diff --git a/resources/3rdparty/eigen/cmake/FindGLEW.cmake b/resources/3rdParty/eigen/cmake/FindGLEW.cmake
similarity index 100%
rename from resources/3rdparty/eigen/cmake/FindGLEW.cmake
rename to resources/3rdParty/eigen/cmake/FindGLEW.cmake
diff --git a/resources/3rdparty/eigen/cmake/FindGMP.cmake b/resources/3rdParty/eigen/cmake/FindGMP.cmake
similarity index 100%
rename from resources/3rdparty/eigen/cmake/FindGMP.cmake
rename to resources/3rdParty/eigen/cmake/FindGMP.cmake
diff --git a/resources/3rdparty/eigen/cmake/FindGSL.cmake b/resources/3rdParty/eigen/cmake/FindGSL.cmake
similarity index 100%
rename from resources/3rdparty/eigen/cmake/FindGSL.cmake
rename to resources/3rdParty/eigen/cmake/FindGSL.cmake
diff --git a/resources/3rdparty/eigen/cmake/FindGoogleHash.cmake b/resources/3rdParty/eigen/cmake/FindGoogleHash.cmake
similarity index 100%
rename from resources/3rdparty/eigen/cmake/FindGoogleHash.cmake
rename to resources/3rdParty/eigen/cmake/FindGoogleHash.cmake
diff --git a/resources/3rdparty/eigen/cmake/FindLAPACK.cmake b/resources/3rdParty/eigen/cmake/FindLAPACK.cmake
similarity index 100%
rename from resources/3rdparty/eigen/cmake/FindLAPACK.cmake
rename to resources/3rdParty/eigen/cmake/FindLAPACK.cmake
diff --git a/resources/3rdparty/eigen/cmake/FindMPFR.cmake b/resources/3rdParty/eigen/cmake/FindMPFR.cmake
similarity index 100%
rename from resources/3rdparty/eigen/cmake/FindMPFR.cmake
rename to resources/3rdParty/eigen/cmake/FindMPFR.cmake
diff --git a/resources/3rdParty/eigen/cmake/FindMetis.cmake b/resources/3rdParty/eigen/cmake/FindMetis.cmake
new file mode 100644
index 000000000..e4d6ef258
--- /dev/null
+++ b/resources/3rdParty/eigen/cmake/FindMetis.cmake
@@ -0,0 +1,24 @@
+# Pastix requires METIS or METIS (partitioning and reordering tools)
+
+if (METIS_INCLUDES AND METIS_LIBRARIES)
+  set(METIS_FIND_QUIETLY TRUE)
+endif (METIS_INCLUDES AND METIS_LIBRARIES)
+
+find_path(METIS_INCLUDES 
+  NAMES 
+  metis.h 
+  PATHS 
+  $ENV{METISDIR} 
+  ${INCLUDE_INSTALL_DIR} 
+  PATH_SUFFIXES 
+  metis
+)
+
+
+find_library(METIS_LIBRARIES metis PATHS $ENV{METISDIR} ${LIB_INSTALL_DIR})
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(METIS DEFAULT_MSG
+                                  METIS_INCLUDES METIS_LIBRARIES)
+
+mark_as_advanced(METIS_INCLUDES METIS_LIBRARIES)
diff --git a/resources/3rdparty/eigen/cmake/FindPastix.cmake b/resources/3rdParty/eigen/cmake/FindPastix.cmake
similarity index 100%
rename from resources/3rdparty/eigen/cmake/FindPastix.cmake
rename to resources/3rdParty/eigen/cmake/FindPastix.cmake
diff --git a/resources/3rdparty/eigen/cmake/FindScotch.cmake b/resources/3rdParty/eigen/cmake/FindScotch.cmake
similarity index 100%
rename from resources/3rdparty/eigen/cmake/FindScotch.cmake
rename to resources/3rdParty/eigen/cmake/FindScotch.cmake
diff --git a/resources/3rdparty/eigen/cmake/FindStandardMathLibrary.cmake b/resources/3rdParty/eigen/cmake/FindStandardMathLibrary.cmake
similarity index 100%
rename from resources/3rdparty/eigen/cmake/FindStandardMathLibrary.cmake
rename to resources/3rdParty/eigen/cmake/FindStandardMathLibrary.cmake
diff --git a/resources/3rdparty/eigen/cmake/FindSuperLU.cmake b/resources/3rdParty/eigen/cmake/FindSuperLU.cmake
similarity index 100%
rename from resources/3rdparty/eigen/cmake/FindSuperLU.cmake
rename to resources/3rdParty/eigen/cmake/FindSuperLU.cmake
diff --git a/resources/3rdparty/eigen/cmake/FindUmfpack.cmake b/resources/3rdParty/eigen/cmake/FindUmfpack.cmake
similarity index 100%
rename from resources/3rdparty/eigen/cmake/FindUmfpack.cmake
rename to resources/3rdParty/eigen/cmake/FindUmfpack.cmake
diff --git a/resources/3rdparty/eigen/cmake/RegexUtils.cmake b/resources/3rdParty/eigen/cmake/RegexUtils.cmake
similarity index 100%
rename from resources/3rdparty/eigen/cmake/RegexUtils.cmake
rename to resources/3rdParty/eigen/cmake/RegexUtils.cmake
diff --git a/resources/3rdparty/eigen/cmake/language_support.cmake b/resources/3rdParty/eigen/cmake/language_support.cmake
similarity index 100%
rename from resources/3rdparty/eigen/cmake/language_support.cmake
rename to resources/3rdParty/eigen/cmake/language_support.cmake
diff --git a/resources/3rdparty/eigen/debug/gdb/__init__.py b/resources/3rdParty/eigen/debug/gdb/__init__.py
similarity index 100%
rename from resources/3rdparty/eigen/debug/gdb/__init__.py
rename to resources/3rdParty/eigen/debug/gdb/__init__.py
diff --git a/resources/3rdparty/eigen/debug/gdb/printers.py b/resources/3rdParty/eigen/debug/gdb/printers.py
similarity index 100%
rename from resources/3rdparty/eigen/debug/gdb/printers.py
rename to resources/3rdParty/eigen/debug/gdb/printers.py
diff --git a/resources/3rdparty/eigen/debug/msvc/eigen_autoexp_part.dat b/resources/3rdParty/eigen/debug/msvc/eigen_autoexp_part.dat
similarity index 100%
rename from resources/3rdparty/eigen/debug/msvc/eigen_autoexp_part.dat
rename to resources/3rdParty/eigen/debug/msvc/eigen_autoexp_part.dat
diff --git a/resources/3rdparty/eigen/demos/CMakeLists.txt b/resources/3rdParty/eigen/demos/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/demos/CMakeLists.txt
rename to resources/3rdParty/eigen/demos/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/demos/mandelbrot/CMakeLists.txt b/resources/3rdParty/eigen/demos/mandelbrot/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/demos/mandelbrot/CMakeLists.txt
rename to resources/3rdParty/eigen/demos/mandelbrot/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/demos/mandelbrot/README b/resources/3rdParty/eigen/demos/mandelbrot/README
similarity index 100%
rename from resources/3rdparty/eigen/demos/mandelbrot/README
rename to resources/3rdParty/eigen/demos/mandelbrot/README
diff --git a/resources/3rdparty/eigen/demos/mandelbrot/mandelbrot.cpp b/resources/3rdParty/eigen/demos/mandelbrot/mandelbrot.cpp
similarity index 100%
rename from resources/3rdparty/eigen/demos/mandelbrot/mandelbrot.cpp
rename to resources/3rdParty/eigen/demos/mandelbrot/mandelbrot.cpp
diff --git a/resources/3rdparty/eigen/demos/mandelbrot/mandelbrot.h b/resources/3rdParty/eigen/demos/mandelbrot/mandelbrot.h
similarity index 100%
rename from resources/3rdparty/eigen/demos/mandelbrot/mandelbrot.h
rename to resources/3rdParty/eigen/demos/mandelbrot/mandelbrot.h
diff --git a/resources/3rdparty/eigen/demos/mix_eigen_and_c/README b/resources/3rdParty/eigen/demos/mix_eigen_and_c/README
similarity index 100%
rename from resources/3rdparty/eigen/demos/mix_eigen_and_c/README
rename to resources/3rdParty/eigen/demos/mix_eigen_and_c/README
diff --git a/resources/3rdparty/eigen/demos/mix_eigen_and_c/binary_library.cpp b/resources/3rdParty/eigen/demos/mix_eigen_and_c/binary_library.cpp
similarity index 100%
rename from resources/3rdparty/eigen/demos/mix_eigen_and_c/binary_library.cpp
rename to resources/3rdParty/eigen/demos/mix_eigen_and_c/binary_library.cpp
diff --git a/resources/3rdparty/eigen/demos/mix_eigen_and_c/binary_library.h b/resources/3rdParty/eigen/demos/mix_eigen_and_c/binary_library.h
similarity index 100%
rename from resources/3rdparty/eigen/demos/mix_eigen_and_c/binary_library.h
rename to resources/3rdParty/eigen/demos/mix_eigen_and_c/binary_library.h
diff --git a/resources/3rdparty/eigen/demos/mix_eigen_and_c/example.c b/resources/3rdParty/eigen/demos/mix_eigen_and_c/example.c
similarity index 100%
rename from resources/3rdparty/eigen/demos/mix_eigen_and_c/example.c
rename to resources/3rdParty/eigen/demos/mix_eigen_and_c/example.c
diff --git a/resources/3rdParty/eigen/demos/opengl/CMakeLists.txt b/resources/3rdParty/eigen/demos/opengl/CMakeLists.txt
new file mode 100644
index 000000000..299aa441d
--- /dev/null
+++ b/resources/3rdParty/eigen/demos/opengl/CMakeLists.txt
@@ -0,0 +1,28 @@
+find_package(Qt4)
+find_package(OpenGL)
+
+if(QT4_FOUND AND OPENGL_FOUND)
+
+  set(QT_USE_QTOPENGL TRUE)
+  include(${QT_USE_FILE})
+
+  set(CMAKE_INCLUDE_CURRENT_DIR ON)
+
+  include_directories( ${QT_INCLUDE_DIR} )
+
+  set(quaternion_demo_SRCS  gpuhelper.cpp icosphere.cpp camera.cpp trackball.cpp quaternion_demo.cpp)
+
+  qt4_automoc(${quaternion_demo_SRCS})
+
+  add_executable(quaternion_demo ${quaternion_demo_SRCS})
+  add_dependencies(demos quaternion_demo)
+
+  target_link_libraries(quaternion_demo
+    ${QT_QTCORE_LIBRARY}    ${QT_QTGUI_LIBRARY}
+    ${QT_QTOPENGL_LIBRARY}  ${OPENGL_LIBRARIES} )
+
+else()
+
+  message(STATUS "OpenGL demo disabled because Qt4 and/or OpenGL have not been found.")
+
+endif()
\ No newline at end of file
diff --git a/resources/3rdparty/eigen/demos/opengl/README b/resources/3rdParty/eigen/demos/opengl/README
similarity index 100%
rename from resources/3rdparty/eigen/demos/opengl/README
rename to resources/3rdParty/eigen/demos/opengl/README
diff --git a/resources/3rdparty/eigen/demos/opengl/camera.cpp b/resources/3rdParty/eigen/demos/opengl/camera.cpp
similarity index 100%
rename from resources/3rdparty/eigen/demos/opengl/camera.cpp
rename to resources/3rdParty/eigen/demos/opengl/camera.cpp
diff --git a/resources/3rdparty/eigen/demos/opengl/camera.h b/resources/3rdParty/eigen/demos/opengl/camera.h
similarity index 100%
rename from resources/3rdparty/eigen/demos/opengl/camera.h
rename to resources/3rdParty/eigen/demos/opengl/camera.h
diff --git a/resources/3rdparty/eigen/demos/opengl/gpuhelper.cpp b/resources/3rdParty/eigen/demos/opengl/gpuhelper.cpp
similarity index 100%
rename from resources/3rdparty/eigen/demos/opengl/gpuhelper.cpp
rename to resources/3rdParty/eigen/demos/opengl/gpuhelper.cpp
diff --git a/resources/3rdparty/eigen/demos/opengl/gpuhelper.h b/resources/3rdParty/eigen/demos/opengl/gpuhelper.h
similarity index 100%
rename from resources/3rdparty/eigen/demos/opengl/gpuhelper.h
rename to resources/3rdParty/eigen/demos/opengl/gpuhelper.h
diff --git a/resources/3rdparty/eigen/demos/opengl/icosphere.cpp b/resources/3rdParty/eigen/demos/opengl/icosphere.cpp
similarity index 100%
rename from resources/3rdparty/eigen/demos/opengl/icosphere.cpp
rename to resources/3rdParty/eigen/demos/opengl/icosphere.cpp
diff --git a/resources/3rdparty/eigen/demos/opengl/icosphere.h b/resources/3rdParty/eigen/demos/opengl/icosphere.h
similarity index 100%
rename from resources/3rdparty/eigen/demos/opengl/icosphere.h
rename to resources/3rdParty/eigen/demos/opengl/icosphere.h
diff --git a/resources/3rdparty/eigen/demos/opengl/quaternion_demo.cpp b/resources/3rdParty/eigen/demos/opengl/quaternion_demo.cpp
similarity index 100%
rename from resources/3rdparty/eigen/demos/opengl/quaternion_demo.cpp
rename to resources/3rdParty/eigen/demos/opengl/quaternion_demo.cpp
diff --git a/resources/3rdparty/eigen/demos/opengl/quaternion_demo.h b/resources/3rdParty/eigen/demos/opengl/quaternion_demo.h
similarity index 100%
rename from resources/3rdparty/eigen/demos/opengl/quaternion_demo.h
rename to resources/3rdParty/eigen/demos/opengl/quaternion_demo.h
diff --git a/resources/3rdparty/eigen/demos/opengl/trackball.cpp b/resources/3rdParty/eigen/demos/opengl/trackball.cpp
similarity index 100%
rename from resources/3rdparty/eigen/demos/opengl/trackball.cpp
rename to resources/3rdParty/eigen/demos/opengl/trackball.cpp
diff --git a/resources/3rdparty/eigen/demos/opengl/trackball.h b/resources/3rdParty/eigen/demos/opengl/trackball.h
similarity index 100%
rename from resources/3rdparty/eigen/demos/opengl/trackball.h
rename to resources/3rdParty/eigen/demos/opengl/trackball.h
diff --git a/resources/3rdparty/eigen/doc/A05_PortingFrom2To3.dox b/resources/3rdParty/eigen/doc/A05_PortingFrom2To3.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/A05_PortingFrom2To3.dox
rename to resources/3rdParty/eigen/doc/A05_PortingFrom2To3.dox
diff --git a/resources/3rdparty/eigen/doc/A10_Eigen2SupportModes.dox b/resources/3rdParty/eigen/doc/A10_Eigen2SupportModes.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/A10_Eigen2SupportModes.dox
rename to resources/3rdParty/eigen/doc/A10_Eigen2SupportModes.dox
diff --git a/resources/3rdparty/eigen/doc/AsciiQuickReference.txt b/resources/3rdParty/eigen/doc/AsciiQuickReference.txt
similarity index 100%
rename from resources/3rdparty/eigen/doc/AsciiQuickReference.txt
rename to resources/3rdParty/eigen/doc/AsciiQuickReference.txt
diff --git a/resources/3rdparty/eigen/doc/B01_Experimental.dox b/resources/3rdParty/eigen/doc/B01_Experimental.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/B01_Experimental.dox
rename to resources/3rdParty/eigen/doc/B01_Experimental.dox
diff --git a/resources/3rdparty/eigen/doc/C00_QuickStartGuide.dox b/resources/3rdParty/eigen/doc/C00_QuickStartGuide.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/C00_QuickStartGuide.dox
rename to resources/3rdParty/eigen/doc/C00_QuickStartGuide.dox
diff --git a/resources/3rdparty/eigen/doc/C01_TutorialMatrixClass.dox b/resources/3rdParty/eigen/doc/C01_TutorialMatrixClass.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/C01_TutorialMatrixClass.dox
rename to resources/3rdParty/eigen/doc/C01_TutorialMatrixClass.dox
diff --git a/resources/3rdparty/eigen/doc/C02_TutorialMatrixArithmetic.dox b/resources/3rdParty/eigen/doc/C02_TutorialMatrixArithmetic.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/C02_TutorialMatrixArithmetic.dox
rename to resources/3rdParty/eigen/doc/C02_TutorialMatrixArithmetic.dox
diff --git a/resources/3rdparty/eigen/doc/C03_TutorialArrayClass.dox b/resources/3rdParty/eigen/doc/C03_TutorialArrayClass.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/C03_TutorialArrayClass.dox
rename to resources/3rdParty/eigen/doc/C03_TutorialArrayClass.dox
diff --git a/resources/3rdparty/eigen/doc/C04_TutorialBlockOperations.dox b/resources/3rdParty/eigen/doc/C04_TutorialBlockOperations.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/C04_TutorialBlockOperations.dox
rename to resources/3rdParty/eigen/doc/C04_TutorialBlockOperations.dox
diff --git a/resources/3rdparty/eigen/doc/C05_TutorialAdvancedInitialization.dox b/resources/3rdParty/eigen/doc/C05_TutorialAdvancedInitialization.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/C05_TutorialAdvancedInitialization.dox
rename to resources/3rdParty/eigen/doc/C05_TutorialAdvancedInitialization.dox
diff --git a/resources/3rdparty/eigen/doc/C06_TutorialLinearAlgebra.dox b/resources/3rdParty/eigen/doc/C06_TutorialLinearAlgebra.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/C06_TutorialLinearAlgebra.dox
rename to resources/3rdParty/eigen/doc/C06_TutorialLinearAlgebra.dox
diff --git a/resources/3rdparty/eigen/doc/C07_TutorialReductionsVisitorsBroadcasting.dox b/resources/3rdParty/eigen/doc/C07_TutorialReductionsVisitorsBroadcasting.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/C07_TutorialReductionsVisitorsBroadcasting.dox
rename to resources/3rdParty/eigen/doc/C07_TutorialReductionsVisitorsBroadcasting.dox
diff --git a/resources/3rdparty/eigen/doc/C08_TutorialGeometry.dox b/resources/3rdParty/eigen/doc/C08_TutorialGeometry.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/C08_TutorialGeometry.dox
rename to resources/3rdParty/eigen/doc/C08_TutorialGeometry.dox
diff --git a/resources/3rdParty/eigen/doc/C09_TutorialSparse.dox b/resources/3rdParty/eigen/doc/C09_TutorialSparse.dox
new file mode 100644
index 000000000..34154bd0d
--- /dev/null
+++ b/resources/3rdParty/eigen/doc/C09_TutorialSparse.dox
@@ -0,0 +1,455 @@
+namespace Eigen {
+
+/** \page TutorialSparse Tutorial page 9 - Sparse Matrix
+    \ingroup Tutorial
+
+\li \b Previous: \ref TutorialGeometry
+\li \b Next: \ref TutorialMapClass
+
+\b Table \b of \b contents \n
+  - \ref TutorialSparseIntro
+  - \ref TutorialSparseExample "Example"
+  - \ref TutorialSparseSparseMatrix
+  - \ref TutorialSparseFilling
+  - \ref TutorialSparseDirectSolvers
+  - \ref TutorialSparseFeatureSet
+    - \ref TutorialSparse_BasicOps
+    - \ref TutorialSparse_Products
+    - \ref TutorialSparse_TriangularSelfadjoint
+    - \ref TutorialSparse_Submat
+
+
+<hr>
+
+Manipulating and solving sparse problems involves various modules which are summarized below:
+
+<table class="manual">
+<tr><th>Module</th><th>Header file</th><th>Contents</th></tr>
+<tr><td>\link Sparse_Module SparseCore \endlink</td><td>\code#include <Eigen/SparseCore>\endcode</td><td>SparseMatrix and SparseVector classes, matrix assembly, basic sparse linear algebra (including sparse triangular solvers)</td></tr>
+<tr><td>\link SparseCholesky_Module SparseCholesky \endlink</td><td>\code#include <Eigen/SparseCholesky>\endcode</td><td>Direct sparse LLT and LDLT Cholesky factorization to solve sparse self-adjoint positive definite problems</td></tr>
+<tr><td>\link IterativeLinearSolvers_Module IterativeLinearSolvers \endlink</td><td>\code#include <Eigen/IterativeLinearSolvers>\endcode</td><td>Iterative solvers to solve large general linear square problems (including self-adjoint positive definite problems)</td></tr>
+<tr><td></td><td>\code#include <Eigen/Sparse>\endcode</td><td>Includes all the above modules</td></tr>
+</table>
+
+\section TutorialSparseIntro Sparse matrix representation
+
+In many applications (e.g., finite element methods) it is common to deal with very large matrices where only a few coefficients are different from zero.  In such cases, memory consumption can be reduced and performance increased by using a specialized representation storing only the nonzero coefficients. Such a matrix is called a sparse matrix.
+
+\b The \b %SparseMatrix \b class
+
+The class SparseMatrix is the main sparse matrix representation of Eigen's sparse module; it offers high performance and low memory usage.
+It implements a more versatile variant of the widely-used Compressed Column (or Row) Storage scheme.
+It consists of four compact arrays:
+ - \c Values: stores the coefficient values of the non-zeros.
+ - \c InnerIndices: stores the row (resp. column) indices of the non-zeros.
+ - \c OuterStarts: stores for each column (resp. row) the index of the first non-zero in the previous two arrays.
+ - \c InnerNNZs: stores the number of non-zeros of each column (resp. row).
+The word \c inner refers to an \em inner \em vector that is a column for a column-major matrix, or a row for a row-major matrix.
+The word \c outer refers to the other direction.
+
+This storage scheme is better explained on an example. The following matrix
+<table class="manual">
+<tr><td> 0</td><td>3</td><td> 0</td><td>0</td><td> 0</td></tr>
+<tr><td>22</td><td>0</td><td> 0</td><td>0</td><td>17</td></tr>
+<tr><td> 7</td><td>5</td><td> 0</td><td>1</td><td> 0</td></tr>
+<tr><td> 0</td><td>0</td><td> 0</td><td>0</td><td> 0</td></tr>
+<tr><td> 0</td><td>0</td><td>14</td><td>0</td><td> 8</td></tr>
+</table>
+
+and one of its possible sparse, \b column \b major representation:
+<table class="manual">
+<tr><td>Values:</td>        <td>22</td><td>7</td><td>_</td><td>3</td><td>5</td><td>14</td><td>_</td><td>_</td><td>1</td><td>_</td><td>17</td><td>8</td></tr>
+<tr><td>InnerIndices:</td>  <td> 1</td><td>2</td><td>_</td><td>0</td><td>2</td><td> 4</td><td>_</td><td>_</td><td>2</td><td>_</td><td> 1</td><td>4</td></tr>
+</table>
+<table class="manual">
+<tr><td>OuterStarts:</td><td>0</td><td>3</td><td>5</td><td>8</td><td>10</td><td>\em 12 </td></tr>
+<tr><td>InnerNNZs:</td>    <td>2</td><td>2</td><td>1</td><td>1</td><td> 2</td><td></td></tr>
+</table>
+
+Currently the elements of a given inner vector are guaranteed to be always sorted by increasing inner indices.
+The \c "_" indicates available free space to quickly insert new elements.
+Assuming no reallocation is needed, the insertion of a random element is therefore in O(nnz_j) where nnz_j is the number of nonzeros of the respective inner vector.
+On the other hand, inserting elements with increasing inner indices in a given inner vector is much more efficient since this only requires to increase the respective \c InnerNNZs entry that is a O(1) operation.
+
+The case where no empty space is available is a special case, and is refered as the \em compressed mode.
+It corresponds to the widely used Compressed Column (or Row) Storage schemes (CCS or CRS).
+Any SparseMatrix can be turned to this form by calling the SparseMatrix::makeCompressed() function.
+In this case, one can remark that the \c InnerNNZs array is redundant with \c OuterStarts because we the equality: \c InnerNNZs[j] = \c OuterStarts[j+1]-\c OuterStarts[j].
+Therefore, in practice a call to SparseMatrix::makeCompressed() frees this buffer.
+
+It is worth noting that most of our wrappers to external libraries requires compressed matrices as inputs.
+
+The results of %Eigen's operations always produces \b compressed sparse matrices.
+On the other hand, the insertion of a new element into a SparseMatrix converts this later to the \b uncompressed mode.
+
+Here is the previous matrix represented in compressed mode:
+<table class="manual">
+<tr><td>Values:</td>        <td>22</td><td>7</td><td>3</td><td>5</td><td>14</td><td>1</td><td>17</td><td>8</td></tr>
+<tr><td>InnerIndices:</td>  <td> 1</td><td>2</td><td>0</td><td>2</td><td> 4</td><td>2</td><td> 1</td><td>4</td></tr>
+</table>
+<table class="manual">
+<tr><td>OuterStarts:</td><td>0</td><td>2</td><td>4</td><td>5</td><td>6</td><td>\em 8 </td></tr>
+</table>
+
+A SparseVector is a special case of a SparseMatrix where only the \c Values and \c InnerIndices arrays are stored.
+There is no notion of compressed/uncompressed mode for a SparseVector.
+
+
+\section TutorialSparseExample First example
+
+Before describing each individual class, let's start with the following typical example: solving the Lapace equation \f$ \nabla u = 0 \f$ on a regular 2D grid using a finite difference scheme and Dirichlet boundary conditions.
+Such problem can be mathematically expressed as a linear problem of the form \f$ Ax=b \f$ where \f$ x \f$ is the vector of \c m unknowns (in our case, the values of the pixels), \f$ b \f$ is the right hand side vector resulting from the boundary conditions, and \f$ A \f$ is an \f$ m \times m \f$ matrix containing only a few non-zero elements resulting from the discretization of the Laplacian operator.
+
+<table class="manual">
+<tr><td>
+\include Tutorial_sparse_example.cpp
+</td>
+<td>
+\image html Tutorial_sparse_example.jpeg
+</td></tr></table>
+
+In this example, we start by defining a column-major sparse matrix type of double \c SparseMatrix<double>, and a triplet list of the same scalar type \c  Triplet<double>. A triplet is a simple object representing a non-zero entry as the triplet: \c row index, \c column index, \c value.
+
+In the main function, we declare a list \c coefficients of triplets (as a std vector) and the right hand side vector \f$ b \f$ which are filled by the \a buildProblem function.
+The raw and flat list of non-zero entries is then converted to a true SparseMatrix object \c A.
+Note that the elements of the list do not have to be sorted, and possible duplicate entries will be summed up.
+
+The last step consists of effectively solving the assembled problem.
+Since the resulting matrix \c A is symmetric by construction, we can perform a direct Cholesky factorization via the SimplicialLDLT class which behaves like its LDLT counterpart for dense objects.
+
+The resulting vector \c x contains the pixel values as a 1D array which is saved to a jpeg file shown on the right of the code above.
+
+Describing the \a buildProblem and \a save functions is out of the scope of this tutorial. They are given \ref TutorialSparse_example_details "here" for the curious and reproducibility purpose.
+
+
+
+
+\section TutorialSparseSparseMatrix The SparseMatrix class
+
+\b %Matrix \b and \b vector \b properties \n
+
+The SparseMatrix and SparseVector classes take three template arguments:
+ * the scalar type (e.g., double)
+ * the storage order (ColMajor or RowMajor, the default is RowMajor)
+ * the inner index type (default is \c int).
+
+As for dense Matrix objects, constructors takes the size of the object.
+Here are some examples:
+
+\code
+SparseMatrix<std::complex<float> > mat(1000,2000);         // declares a 1000x2000 column-major compressed sparse matrix of complex<float>
+SparseMatrix<double,RowMajor> mat(1000,2000);              // declares a 1000x2000 row-major compressed sparse matrix of double
+SparseVector<std::complex<float> > vec(1000);              // declares a column sparse vector of complex<float> of size 1000
+SparseVector<double,RowMajor> vec(1000);                   // declares a row sparse vector of double of size 1000
+\endcode
+
+In the rest of the tutorial, \c mat and \c vec represent any sparse-matrix and sparse-vector objects, respectively.
+
+The dimensions of a matrix can be queried using the following functions:
+<table class="manual">
+<tr><td>Standard \n dimensions</td><td>\code
+mat.rows()
+mat.cols()\endcode</td>
+<td>\code
+vec.size() \endcode</td>
+</tr>
+<tr><td>Sizes along the \n inner/outer dimensions</td><td>\code
+mat.innerSize()
+mat.outerSize()\endcode</td>
+<td></td>
+</tr>
+<tr><td>Number of non \n zero coefficients</td><td>\code
+mat.nonZeros() \endcode</td>
+<td>\code
+vec.nonZeros() \endcode</td></tr>
+</table>
+
+
+\b Iterating \b over \b the \b nonzero \b coefficients \n
+
+Random access to the elements of a sparse object can be done through the \c coeffRef(i,j) function.
+However, this function involves a quite expensive binary search.
+In most cases, one only wants to iterate over the non-zeros elements. This is achieved by a standard loop over the outer dimension, and then by iterating over the non-zeros of the current inner vector via an InnerIterator. Thus, the non-zero entries have to be visited in the same order than the storage order.
+Here is an example:
+<table class="manual">
+<tr><td>
+\code
+SparseMatrix<double> mat(rows,cols);
+for (int k=0; k<mat.outerSize(); ++k)
+  for (SparseMatrix<double>::InnerIterator it(mat,k); it; ++it)
+  {
+    it.value();
+    it.row();   // row index
+    it.col();   // col index (here it is equal to k)
+    it.index(); // inner index, here it is equal to it.row()
+  }
+\endcode
+</td><td>
+\code
+SparseVector<double> vec(size);
+for (SparseVector<double>::InnerIterator it(vec); it; ++it)
+{
+  it.value(); // == vec[ it.index() ]
+  it.index();
+}
+\endcode
+</td></tr>
+</table>
+For a writable expression, the referenced value can be modified using the valueRef() function.
+If the type of the sparse matrix or vector depends on a template parameter, then the \c typename keyword is
+required to indicate that \c InnerIterator denotes a type; see \ref TopicTemplateKeyword for details.
+
+
+\section TutorialSparseFilling Filling a sparse matrix
+
+Because of the special storage scheme of a SparseMatrix, special care has to be taken when adding new nonzero entries.
+For instance, the cost of a single purely random insertion into a SparseMatrix is \c O(nnz), where \c nnz is the current number of non-zero coefficients.
+
+The simplest way to create a sparse matrix while guaranteeing good performance is thus to first build a list of so-called \em triplets, and then convert it to a SparseMatrix.
+
+Here is a typical usage example:
+\code
+typedef Eigen::Triplet<double> T;
+std::vector<T> tripletList;
+triplets.reserve(estimation_of_entries);
+for(...)
+{
+  // ...
+  tripletList.push_back(T(i,j,v_ij));
+}
+SparseMatrixType mat(rows,cols);
+mat.setFromTriplets(tripletList.begin(), tripletList.end());
+// mat is ready to go!
+\endcode
+The \c std::vector of triplets might contain the elements in arbitrary order, and might even contain duplicated elements that will be summed up by setFromTriplets().
+See the SparseMatrix::setFromTriplets() function and class Triplet for more details.
+
+
+In some cases, however, slightly higher performance, and lower memory consumption can be reached by directly inserting the non-zeros into the destination matrix.
+A typical scenario of this approach is illustrated bellow:
+\code
+1: SparseMatrix<double> mat(rows,cols);         // default is column major
+2: mat.reserve(VectorXi::Constant(cols,6));
+3: for each i,j such that v_ij != 0
+4:   mat.insert(i,j) = v_ij;                    // alternative: mat.coeffRef(i,j) += v_ij;
+5: mat.makeCompressed();                        // optional
+\endcode
+
+- The key ingredient here is the line 2 where we reserve room for 6 non-zeros per column. In many cases, the number of non-zeros per column or row can easily be known in advance. If it varies significantly for each inner vector, then it is possible to specify a reserve size for each inner vector by providing a vector object with an operator[](int j) returning the reserve size of the \c j-th inner vector (e.g., via a VectorXi or std::vector<int>). If only a rought estimate of the number of nonzeros per inner-vector can be obtained, it is highly recommended to overestimate it rather than the opposite. If this line is omitted, then the first insertion of a new element will reserve room for 2 elements per inner vector.
+- The line 4 performs a sorted insertion. In this example, the ideal case is when the \c j-th column is not full and contains non-zeros whose inner-indices are smaller than \c i. In this case, this operation boils down to trivial O(1) operation.
+- When calling insert(i,j) the element \c i \c ,j must not already exists, otherwise use the coeffRef(i,j) method that will allow to, e.g., accumulate values. This method first performs a binary search and finally calls insert(i,j) if the element does not already exist. It is more flexible than insert() but also more costly.
+- The line 5 suppresses the remaining empty space and transforms the matrix into a compressed column storage.
+
+
+\section TutorialSparseDirectSolvers Solving linear problems
+
+%Eigen currently provides a limited set of built-in solvers, as well as wrappers to external solver libraries.
+They are summarized in the following table:
+
+<table class="manual">
+<tr><th>Class</th><th>Module</th><th>Solver kind</th><th>Matrix kind</th><th>Features related to performance</th>
+    <th>Dependencies,License</th><th class="width20em"><p>Notes</p></th></tr>
+<tr><td>SimplicialLLT    </td><td>\link SparseCholesky_Module SparseCholesky \endlink</td><td>Direct LLt factorization</td><td>SPD</td><td>Fill-in reducing</td>
+    <td>built-in, LGPL</td>
+    <td>SimplicialLDLT is often preferable</td></tr>
+<tr><td>SimplicialLDLT   </td><td>\link SparseCholesky_Module SparseCholesky \endlink</td><td>Direct LDLt factorization</td><td>SPD</td><td>Fill-in reducing</td>
+    <td>built-in, LGPL</td>
+    <td>Recommended for very sparse and not too large problems (e.g., 2D Poisson eq.)</td></tr>
+<tr><td>ConjugateGradient</td><td>\link IterativeLinearSolvers_Module IterativeLinearSolvers \endlink</td><td>Classic iterative CG</td><td>SPD</td><td>Preconditionning</td>
+    <td>built-in, LGPL</td>
+    <td>Recommended for large symmetric problems (e.g., 3D Poisson eq.)</td></tr>
+<tr><td>BiCGSTAB</td><td>\link IterativeLinearSolvers_Module IterativeLinearSolvers \endlink</td><td>Iterative stabilized bi-conjugate gradient</td><td>Square</td><td>Preconditionning</td>
+    <td>built-in, LGPL</td>
+    <td>Might not always converge</td></tr>
+
+
+<tr><td>PastixLLT \n PastixLDLT \n PastixLU</td><td>\link PaStiXSupport_Module PaStiXSupport \endlink</td><td>Direct LLt, LDLt, LU factorizations</td><td>SPD \n SPD \n Square</td><td>Fill-in reducing, Leverage fast dense algebra, Multithreading</td>
+    <td>Requires the <a href="http://pastix.gforge.inria.fr">PaStiX</a> package, \b CeCILL-C </td>
+    <td>optimized for tough problems and symmetric patterns</td></tr>
+<tr><td>CholmodSupernodalLLT</td><td>\link CholmodSupport_Module CholmodSupport \endlink</td><td>Direct LLt factorization</td><td>SPD</td><td>Fill-in reducing, Leverage fast dense algebra</td>
+    <td>Requires the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">SuiteSparse</a> package, \b GPL </td>
+    <td></td></tr>
+<tr><td>UmfPackLU</td><td>\link UmfPackSupport_Module UmfPackSupport \endlink</td><td>Direct LU factorization</td><td>Square</td><td>Fill-in reducing, Leverage fast dense algebra</td>
+    <td>Requires the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">SuiteSparse</a> package, \b GPL </td>
+    <td></td></tr>
+<tr><td>SuperLU</td><td>\link SuperLUSupport_Module SuperLUSupport \endlink</td><td>Direct LU factorization</td><td>Square</td><td>Fill-in reducing, Leverage fast dense algebra</td>
+    <td>Requires the <a href="http://crd-legacy.lbl.gov/~xiaoye/SuperLU/">SuperLU</a> library, (BSD-like)</td>
+    <td></td></tr>
+</table>
+
+Here \c SPD means symmetric positive definite.
+
+All these solvers follow the same general concept.
+Here is a typical and general example:
+\code
+#include <Eigen/RequiredModuleName>
+// ...
+SparseMatrix<double> A;
+// fill A
+VectorXd b, x;
+// fill b
+// solve Ax = b
+SolverClassName<SparseMatrix<double> > solver;
+solver.compute(A);
+if(solver.info()!=Succeeded) {
+  // decomposition failed
+  return;
+}
+x = solver.solve(b);
+if(solver.info()!=Succeeded) {
+  // solving failed
+  return;
+}
+// solve for another right hand side:
+x1 = solver.solve(b1);
+\endcode
+
+For \c SPD solvers, a second optional template argument allows to specify which triangular part have to be used, e.g.:
+
+\code
+#include <Eigen/IterativeLinearSolvers>
+
+ConjugateGradient<SparseMatrix<double>, Eigen::Upper> solver;
+x = solver.compute(A).solve(b);
+\endcode
+In the above example, only the upper triangular part of the input matrix A is considered for solving. The opposite triangle might either be empty or contain arbitrary values.
+
+In the case where multiple problems with the same sparcity pattern have to be solved, then the "compute" step can be decomposed as follow:
+\code
+SolverClassName<SparseMatrix<double> > solver;
+solver.analyzePattern(A);   // for this step the numerical values of A are not used
+solver.factorize(A);
+x1 = solver.solve(b1);
+x2 = solver.solve(b2);
+...
+A = ...;                    // modify the values of the nonzeros of A, the nonzeros pattern must stay unchanged
+solver.factorize(A);
+x1 = solver.solve(b1);
+x2 = solver.solve(b2);
+...
+\endcode
+The compute() method is equivalent to calling both analyzePattern() and factorize().
+
+Finally, each solver provides some specific features, such as determinant, access to the factors, controls of the iterations, and so on.
+More details are availble in the documentations of the respective classes.
+
+
+\section TutorialSparseFeatureSet Supported operators and functions
+
+Because of their special storage format, sparse matrices cannot offer the same level of flexbility than dense matrices.
+In Eigen's sparse module we chose to expose only the subset of the dense matrix API which can be efficiently implemented.
+In the following \em sm denotes a sparse matrix, \em sv a sparse vector, \em dm a dense matrix, and \em dv a dense vector.
+
+\subsection TutorialSparse_BasicOps Basic operations
+
+%Sparse expressions support most of the unary and binary coefficient wise operations:
+\code
+sm1.real()   sm1.imag()   -sm1                    0.5*sm1
+sm1+sm2      sm1-sm2      sm1.cwiseProduct(sm2)
+\endcode
+However, a strong restriction is that the storage orders must match. For instance, in the following example:
+\code
+sm4 = sm1 + sm2 + sm3;
+\endcode
+sm1, sm2, and sm3 must all be row-major or all column major.
+On the other hand, there is no restriction on the target matrix sm4.
+For instance, this means that for computing \f$ A^T + A \f$, the matrix \f$ A^T \f$ must be evaluated into a temporary matrix of compatible storage order:
+\code
+SparseMatrix<double> A, B;
+B = SparseMatrix<double>(A.transpose()) + A;
+\endcode
+
+Binary coefficient wise operators can also mix sparse and dense expressions:
+\code
+sm2 = sm1.cwiseProduct(dm1);
+dm2 = sm1 + dm1;
+\endcode
+
+
+%Sparse expressions also support transposition:
+\code
+sm1 = sm2.transpose();
+sm1 = sm2.adjoint();
+\endcode
+However, there is no transposeInPlace() method.
+
+
+\subsection TutorialSparse_Products Matrix products
+
+%Eigen supports various kind of sparse matrix products which are summarize below:
+  - \b sparse-dense:
+    \code
+dv2 = sm1 * dv1;
+dm2 = dm1 * sm1.adjoint();
+dm2 = 2. * sm1 * dm1;
+    \endcode
+  - \b symmetric \b sparse-dense. The product of a sparse symmetric matrix with a dense matrix (or vector) can also be optimized by specifying the symmetry with selfadjointView():
+    \code
+dm2 = sm1.selfadjointView<>() * dm1;        // if all coefficients of A are stored
+dm2 = A.selfadjointView<Upper>() * dm1;     // if only the upper part of A is stored
+dm2 = A.selfadjointView<Lower>() * dm1;     // if only the lower part of A is stored
+    \endcode
+  - \b sparse-sparse. For sparse-sparse products, two different algorithms are available. The default one is conservative and preserve the explicit zeros that might appear:
+    \code
+sm3 = sm1 * sm2;
+sm3 = 4 * sm1.adjoint() * sm2;
+    \endcode
+    The second algorithm prunes on the fly the explicit zeros, or the values smaller than a given threshold. It is enabled and controlled through the prune() functions:
+    \code
+sm3 = (sm1 * sm2).prune();                  // removes numerical zeros
+sm3 = (sm1 * sm2).prune(ref);               // removes elements much smaller than ref
+sm3 = (sm1 * sm2).prune(ref,epsilon);       // removes elements smaller than ref*epsilon
+    \endcode
+
+  - \b permutations. Finally, permutations can be applied to sparse matrices too:
+    \code
+PermutationMatrix<Dynamic,Dynamic> P = ...;
+sm2 = P * sm1;
+sm2 = sm1 * P.inverse();
+sm2 = sm1.transpose() * P;
+    \endcode
+
+
+\subsection TutorialSparse_TriangularSelfadjoint Triangular and selfadjoint views
+
+Just as with dense matrices, the triangularView() function can be used to address a triangular part of the matrix, and perform triangular solves with a dense right hand side:
+\code
+dm2 = sm1.triangularView<Lower>(dm1);
+dv2 = sm1.transpose().triangularView<Upper>(dv1);
+\endcode
+
+The selfadjointView() function permits various operations:
+ - optimized sparse-dense matrix products:
+    \code
+dm2 = sm1.selfadjointView<>() * dm1;        // if all coefficients of A are stored
+dm2 = A.selfadjointView<Upper>() * dm1;     // if only the upper part of A is stored
+dm2 = A.selfadjointView<Lower>() * dm1;     // if only the lower part of A is stored
+    \endcode
+ - copy of triangular parts:
+    \code
+sm2 = sm1.selfadjointView<Upper>();                               // makes a full selfadjoint matrix from the upper triangular part
+sm2.selfadjointView<Lower>() = sm1.selfadjointView<Upper>();      // copies the upper triangular part to the lower triangular part
+    \endcode
+ - application of symmetric permutations:
+ \code
+PermutationMatrix<Dynamic,Dynamic> P = ...;
+sm2 = A.selfadjointView<Upper>().twistedBy(P);                                // compute P S P' from the upper triangular part of A, and make it a full matrix
+sm2.selfadjointView<Lower>() = A.selfadjointView<Lower>().twistedBy(P);       // compute P S P' from the lower triangular part of A, and then only compute the lower part
+ \endcode
+
+\subsection TutorialSparse_Submat Sub-matrices
+
+%Sparse matrices does not support yet the addressing of arbitrary sub matrices. Currently, one can only reference a set of contiguous \em inner vectors, i.e., a set of contiguous rows for a row-major matrix, or a set of contiguous columns for a column major matrix:
+\code
+  sm1.innerVector(j);       // returns an expression of the j-th column (resp. row) of the matrix if sm1 is col-major (resp. row-major)
+  sm1.innerVectors(j, nb);  // returns an expression of the nb columns (resp. row) starting from the j-th column (resp. row)
+                            // of the matrix if sm1 is col-major (resp. row-major)
+  sm1.middleRows(j, nb);    // for row major matrices only, get a range of nb rows
+  sm1.middleCols(j, nb);    // for column major matrices only, get a range of nb columns
+\endcode
+
+\li \b Next: \ref TutorialMapClass
+
+*/
+
+}
diff --git a/resources/3rdparty/eigen/doc/C10_TutorialMapClass.dox b/resources/3rdParty/eigen/doc/C10_TutorialMapClass.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/C10_TutorialMapClass.dox
rename to resources/3rdParty/eigen/doc/C10_TutorialMapClass.dox
diff --git a/resources/3rdparty/eigen/doc/CMakeLists.txt b/resources/3rdParty/eigen/doc/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/doc/CMakeLists.txt
rename to resources/3rdParty/eigen/doc/CMakeLists.txt
diff --git a/resources/3rdParty/eigen/doc/D01_StlContainers.dox b/resources/3rdParty/eigen/doc/D01_StlContainers.dox
new file mode 100644
index 000000000..b5dbf0698
--- /dev/null
+++ b/resources/3rdParty/eigen/doc/D01_StlContainers.dox
@@ -0,0 +1,65 @@
+namespace Eigen {
+
+/** \page TopicStlContainers Using STL Containers with Eigen
+
+\b Table \b of \b contents
+  - \ref summary
+  - \ref allocator
+  - \ref vector
+
+\section summary Executive summary
+
+Using STL containers on \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", or classes having members of such types, requires taking the following two steps:
+
+\li A 16-byte-aligned allocator must be used. Eigen does provide one ready for use: aligned_allocator.
+\li If you want to use the std::vector container, you need to \#include <Eigen/StdVector>.
+
+These issues arise only with \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types" and \ref TopicStructHavingEigenMembers "structures having such Eigen objects as member". For other Eigen types, such as Vector3f or MatrixXd, no special care is needed when using STL containers.
+
+\section allocator Using an aligned allocator
+
+STL containers take an optional template parameter, the allocator type. When using STL containers on \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", you need tell the container to use an allocator that will always allocate memory at 16-byte-aligned locations. Fortunately, Eigen does provide such an allocator: Eigen::aligned_allocator.
+
+For example, instead of
+\code
+std::map<int, Eigen::Vector4f>
+\endcode
+you need to use
+\code
+std::map<int, Eigen::Vector4f, std::less<int>, 
+         Eigen::aligned_allocator<std::pair<const int, Eigen::Vector4f> > >
+\endcode
+Note that the third parameter "std::less<int>" is just the default value, but we have to include it because we want to specify the fourth parameter, which is the allocator type.
+
+\section vector The case of std::vector
+
+The situation with std::vector was even worse (explanation below) so we had to specialize it for the Eigen::aligned_allocator type. In practice you \b must use the Eigen::aligned_allocator (not another aligned allocator), \b and \#include <Eigen/StdVector>.
+
+Here is an example:
+\code
+#include<Eigen/StdVector>
+\/* ... *\/
+std::vector<Eigen::Vector4f,Eigen::aligned_allocator<Eigen::Vector4f> >
+\endcode
+
+\subsection vector_spec An alternative - specializing std::vector for Eigen types
+
+As an alternative to the recommended approach described above, you have the option to specialize std::vector for Eigen types requiring alignment. 
+The advantage is that you won't need to declare std::vector all over with Eigen::allocator. One drawback on the other hand side is that
+the specialization needs to be defined before all code pieces in which e.g. std::vector<Vector2d> is used. Otherwise, without knowing the specialization
+the compiler will compile that particular instance with the default std::allocator and you program is most likely to crash.
+
+Here is an example:
+\code
+#include<Eigen/StdVector>
+\/* ... *\/
+EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Matrix2d)
+std::vector<Eigen::Vector2d>
+\endcode
+
+<span class="note">\b Explanation: The resize() method of std::vector takes a value_type argument (defaulting to value_type()). So with std::vector<Eigen::Vector4f>, some Eigen::Vector4f objects will be passed by value, which discards any alignment modifiers, so a Eigen::Vector4f can be created at an unaligned location. In order to avoid that, the only solution we saw was to specialize std::vector to make it work on a slight modification of, here, Eigen::Vector4f, that is able to deal properly with this situation.
+</span>
+
+*/
+
+}
diff --git a/resources/3rdparty/eigen/doc/D03_WrongStackAlignment.dox b/resources/3rdParty/eigen/doc/D03_WrongStackAlignment.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/D03_WrongStackAlignment.dox
rename to resources/3rdParty/eigen/doc/D03_WrongStackAlignment.dox
diff --git a/resources/3rdparty/eigen/doc/D07_PassingByValue.dox b/resources/3rdParty/eigen/doc/D07_PassingByValue.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/D07_PassingByValue.dox
rename to resources/3rdParty/eigen/doc/D07_PassingByValue.dox
diff --git a/resources/3rdparty/eigen/doc/D09_StructHavingEigenMembers.dox b/resources/3rdParty/eigen/doc/D09_StructHavingEigenMembers.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/D09_StructHavingEigenMembers.dox
rename to resources/3rdParty/eigen/doc/D09_StructHavingEigenMembers.dox
diff --git a/resources/3rdparty/eigen/doc/D11_UnalignedArrayAssert.dox b/resources/3rdParty/eigen/doc/D11_UnalignedArrayAssert.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/D11_UnalignedArrayAssert.dox
rename to resources/3rdParty/eigen/doc/D11_UnalignedArrayAssert.dox
diff --git a/resources/3rdparty/eigen/doc/Doxyfile.in b/resources/3rdParty/eigen/doc/Doxyfile.in
similarity index 100%
rename from resources/3rdparty/eigen/doc/Doxyfile.in
rename to resources/3rdParty/eigen/doc/Doxyfile.in
diff --git a/resources/3rdparty/eigen/doc/Eigen_Silly_Professor_64x64.png b/resources/3rdParty/eigen/doc/Eigen_Silly_Professor_64x64.png
similarity index 100%
rename from resources/3rdparty/eigen/doc/Eigen_Silly_Professor_64x64.png
rename to resources/3rdParty/eigen/doc/Eigen_Silly_Professor_64x64.png
diff --git a/resources/3rdparty/eigen/doc/I00_CustomizingEigen.dox b/resources/3rdParty/eigen/doc/I00_CustomizingEigen.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/I00_CustomizingEigen.dox
rename to resources/3rdParty/eigen/doc/I00_CustomizingEigen.dox
diff --git a/resources/3rdparty/eigen/doc/I01_TopicLazyEvaluation.dox b/resources/3rdParty/eigen/doc/I01_TopicLazyEvaluation.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/I01_TopicLazyEvaluation.dox
rename to resources/3rdParty/eigen/doc/I01_TopicLazyEvaluation.dox
diff --git a/resources/3rdParty/eigen/doc/I02_HiPerformance.dox b/resources/3rdParty/eigen/doc/I02_HiPerformance.dox
new file mode 100644
index 000000000..ac1c2ca2b
--- /dev/null
+++ b/resources/3rdParty/eigen/doc/I02_HiPerformance.dox
@@ -0,0 +1,128 @@
+
+namespace Eigen {
+
+/** \page TopicWritingEfficientProductExpression Writing efficient matrix product expressions
+
+In general achieving good performance with Eigen does no require any special effort:
+simply write your expressions in the most high level way. This is especially true
+for small fixed size matrices. For large matrices, however, it might be useful to
+take some care when writing your expressions in order to minimize useless evaluations
+and optimize the performance.
+In this page we will give a brief overview of the Eigen's internal mechanism to simplify
+and evaluate complex product expressions, and discuss the current limitations.
+In particular we will focus on expressions matching level 2 and 3 BLAS routines, i.e,
+all kind of matrix products and triangular solvers.
+
+Indeed, in Eigen we have implemented a set of highly optimized routines which are very similar
+to BLAS's ones. Unlike BLAS, those routines are made available to user via a high level and
+natural API. Each of these routines can compute in a single evaluation a wide variety of expressions.
+Given an expression, the challenge is then to map it to a minimal set of routines.
+As explained latter, this mechanism has some limitations, and knowing them will allow
+you to write faster code by making your expressions more Eigen friendly.
+
+\section GEMM General Matrix-Matrix product (GEMM)
+
+Let's start with the most common primitive: the matrix product of general dense matrices.
+In the BLAS world this corresponds to the GEMM routine. Our equivalent primitive can
+perform the following operation:
+\f$ C.noalias() += \alpha op1(A) op2(B) \f$
+where A, B, and C are column and/or row major matrices (or sub-matrices),
+alpha is a scalar value, and op1, op2 can be transpose, adjoint, conjugate, or the identity.
+When Eigen detects a matrix product, it analyzes both sides of the product to extract a
+unique scalar factor alpha, and for each side, its effective storage order, shape, and conjugation states.
+More precisely each side is simplified by iteratively removing trivial expressions such as scalar multiple,
+negation and conjugation. Transpose and Block expressions are not evaluated and they only modify the storage order
+and shape. All other expressions are immediately evaluated.
+For instance, the following expression:
+\code m1.noalias() -= s4 * (s1 * m2.adjoint() * (-(s3*m3).conjugate()*s2))  \endcode
+is automatically simplified to:
+\code m1.noalias() += (s1*s2*conj(s3)*s4) * m2.adjoint() * m3.conjugate() \endcode
+which exactly matches our GEMM routine.
+
+\subsection GEMM_Limitations Limitations
+Unfortunately, this simplification mechanism is not perfect yet and not all expressions which could be
+handled by a single GEMM-like call are correctly detected.
+<table class="manual" style="width:100%">
+<tr>
+<th>Not optimal expression</th>
+<th>Evaluated as</th>
+<th>Optimal version (single evaluation)</th>
+<th>Comments</th>
+</tr>
+<tr>
+<td>\code
+m1 += m2 * m3; \endcode</td>
+<td>\code
+temp = m2 * m3;
+m1 += temp; \endcode</td>
+<td>\code
+m1.noalias() += m2 * m3; \endcode</td>
+<td>Use .noalias() to tell Eigen the result and right-hand-sides do not alias. 
+    Otherwise the product m2 * m3 is evaluated into a temporary.</td>
+</tr>
+<tr class="alt">
+<td></td>
+<td></td>
+<td>\code
+m1.noalias() += s1 * (m2 * m3); \endcode</td>
+<td>This is a special feature of Eigen. Here the product between a scalar
+    and a matrix product does not evaluate the matrix product but instead it
+    returns a matrix product expression tracking the scalar scaling factor. <br>
+    Without this optimization, the matrix product would be evaluated into a
+    temporary as in the next example.</td>
+</tr>
+<tr>
+<td>\code
+m1.noalias() += (m2 * m3).adjoint(); \endcode</td>
+<td>\code
+temp = m2 * m3;
+m1 += temp.adjoint(); \endcode</td>
+<td>\code
+m1.noalias() += m3.adjoint()
+              * m2.adjoint(); \endcode</td>
+<td>This is because the product expression has the EvalBeforeNesting bit which
+    enforces the evaluation of the product by the Tranpose expression.</td>
+</tr>
+<tr class="alt">
+<td>\code
+m1 = m1 + m2 * m3; \endcode</td>
+<td>\code
+temp = m2 * m3;
+m1 = m1 + temp; \endcode</td>
+<td>\code m1.noalias() += m2 * m3; \endcode</td>
+<td>Here there is no way to detect at compile time that the two m1 are the same,
+    and so the matrix product will be immediately evaluated.</td>
+</tr>
+<tr>
+<td>\code
+m1.noalias() = m4 + m2 * m3; \endcode</td>
+<td>\code
+temp = m2 * m3;
+m1 = m4 + temp; \endcode</td>
+<td>\code
+m1 = m4;
+m1.noalias() += m2 * m3; \endcode</td>
+<td>First of all, here the .noalias() in the first expression is useless because
+    m2*m3 will be evaluated anyway. However, note how this expression can be rewritten
+    so that no temporary is required. (tip: for very small fixed size matrix
+    it is slighlty better to rewrite it like this: m1.noalias() = m2 * m3; m1 += m4;</td>
+</tr>
+<tr class="alt">
+<td>\code
+m1.noalias() += (s1*m2).block(..) * m3; \endcode</td>
+<td>\code
+temp = (s1*m2).block(..);
+m1 += temp * m3; \endcode</td>
+<td>\code
+m1.noalias() += s1 * m2.block(..) * m3; \endcode</td>
+<td>This is because our expression analyzer is currently not able to extract trivial
+    expressions nested in a Block expression. Therefore the nested scalar
+    multiple cannot be properly extracted.</td>
+</tr>
+</table>
+
+Of course all these remarks hold for all other kind of products involving triangular or selfadjoint matrices.
+
+*/
+
+}
diff --git a/resources/3rdparty/eigen/doc/I03_InsideEigenExample.dox b/resources/3rdParty/eigen/doc/I03_InsideEigenExample.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/I03_InsideEigenExample.dox
rename to resources/3rdParty/eigen/doc/I03_InsideEigenExample.dox
diff --git a/resources/3rdparty/eigen/doc/I05_FixedSizeVectorizable.dox b/resources/3rdParty/eigen/doc/I05_FixedSizeVectorizable.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/I05_FixedSizeVectorizable.dox
rename to resources/3rdParty/eigen/doc/I05_FixedSizeVectorizable.dox
diff --git a/resources/3rdparty/eigen/doc/I06_TopicEigenExpressionTemplates.dox b/resources/3rdParty/eigen/doc/I06_TopicEigenExpressionTemplates.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/I06_TopicEigenExpressionTemplates.dox
rename to resources/3rdParty/eigen/doc/I06_TopicEigenExpressionTemplates.dox
diff --git a/resources/3rdparty/eigen/doc/I07_TopicScalarTypes.dox b/resources/3rdParty/eigen/doc/I07_TopicScalarTypes.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/I07_TopicScalarTypes.dox
rename to resources/3rdParty/eigen/doc/I07_TopicScalarTypes.dox
diff --git a/resources/3rdparty/eigen/doc/I08_Resizing.dox b/resources/3rdParty/eigen/doc/I08_Resizing.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/I08_Resizing.dox
rename to resources/3rdParty/eigen/doc/I08_Resizing.dox
diff --git a/resources/3rdparty/eigen/doc/I09_Vectorization.dox b/resources/3rdParty/eigen/doc/I09_Vectorization.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/I09_Vectorization.dox
rename to resources/3rdParty/eigen/doc/I09_Vectorization.dox
diff --git a/resources/3rdParty/eigen/doc/I10_Assertions.dox b/resources/3rdParty/eigen/doc/I10_Assertions.dox
new file mode 100644
index 000000000..d5697fcee
--- /dev/null
+++ b/resources/3rdParty/eigen/doc/I10_Assertions.dox
@@ -0,0 +1,13 @@
+namespace Eigen {
+
+/** \page TopicAssertions Assertions
+
+
+TODO: write this dox page!
+
+Is linked from the tutorial on matrix arithmetic.
+
+\sa Section \ref TopicPreprocessorDirectivesAssertions on page \ref TopicPreprocessorDirectives.
+
+*/
+}
diff --git a/resources/3rdparty/eigen/doc/I11_Aliasing.dox b/resources/3rdParty/eigen/doc/I11_Aliasing.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/I11_Aliasing.dox
rename to resources/3rdParty/eigen/doc/I11_Aliasing.dox
diff --git a/resources/3rdparty/eigen/doc/I12_ClassHierarchy.dox b/resources/3rdParty/eigen/doc/I12_ClassHierarchy.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/I12_ClassHierarchy.dox
rename to resources/3rdParty/eigen/doc/I12_ClassHierarchy.dox
diff --git a/resources/3rdparty/eigen/doc/I13_FunctionsTakingEigenTypes.dox b/resources/3rdParty/eigen/doc/I13_FunctionsTakingEigenTypes.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/I13_FunctionsTakingEigenTypes.dox
rename to resources/3rdParty/eigen/doc/I13_FunctionsTakingEigenTypes.dox
diff --git a/resources/3rdparty/eigen/doc/I14_PreprocessorDirectives.dox b/resources/3rdParty/eigen/doc/I14_PreprocessorDirectives.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/I14_PreprocessorDirectives.dox
rename to resources/3rdParty/eigen/doc/I14_PreprocessorDirectives.dox
diff --git a/resources/3rdparty/eigen/doc/I15_StorageOrders.dox b/resources/3rdParty/eigen/doc/I15_StorageOrders.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/I15_StorageOrders.dox
rename to resources/3rdParty/eigen/doc/I15_StorageOrders.dox
diff --git a/resources/3rdparty/eigen/doc/I16_TemplateKeyword.dox b/resources/3rdParty/eigen/doc/I16_TemplateKeyword.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/I16_TemplateKeyword.dox
rename to resources/3rdParty/eigen/doc/I16_TemplateKeyword.dox
diff --git a/resources/3rdparty/eigen/doc/Overview.dox b/resources/3rdParty/eigen/doc/Overview.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/Overview.dox
rename to resources/3rdParty/eigen/doc/Overview.dox
diff --git a/resources/3rdparty/eigen/doc/QuickReference.dox b/resources/3rdParty/eigen/doc/QuickReference.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/QuickReference.dox
rename to resources/3rdParty/eigen/doc/QuickReference.dox
diff --git a/resources/3rdparty/eigen/doc/SparseQuickReference.dox b/resources/3rdParty/eigen/doc/SparseQuickReference.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/SparseQuickReference.dox
rename to resources/3rdParty/eigen/doc/SparseQuickReference.dox
diff --git a/resources/3rdparty/eigen/doc/TopicLinearAlgebraDecompositions.dox b/resources/3rdParty/eigen/doc/TopicLinearAlgebraDecompositions.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/TopicLinearAlgebraDecompositions.dox
rename to resources/3rdParty/eigen/doc/TopicLinearAlgebraDecompositions.dox
diff --git a/resources/3rdparty/eigen/doc/TopicMultithreading.dox b/resources/3rdParty/eigen/doc/TopicMultithreading.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/TopicMultithreading.dox
rename to resources/3rdParty/eigen/doc/TopicMultithreading.dox
diff --git a/resources/3rdparty/eigen/doc/TutorialSparse_example_details.dox b/resources/3rdParty/eigen/doc/TutorialSparse_example_details.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/TutorialSparse_example_details.dox
rename to resources/3rdParty/eigen/doc/TutorialSparse_example_details.dox
diff --git a/resources/3rdparty/eigen/doc/UsingIntelMKL.dox b/resources/3rdParty/eigen/doc/UsingIntelMKL.dox
similarity index 100%
rename from resources/3rdparty/eigen/doc/UsingIntelMKL.dox
rename to resources/3rdParty/eigen/doc/UsingIntelMKL.dox
diff --git a/resources/3rdparty/eigen/doc/eigendoxy.css b/resources/3rdParty/eigen/doc/eigendoxy.css
similarity index 100%
rename from resources/3rdparty/eigen/doc/eigendoxy.css
rename to resources/3rdParty/eigen/doc/eigendoxy.css
diff --git a/resources/3rdparty/eigen/doc/eigendoxy_footer.html.in b/resources/3rdParty/eigen/doc/eigendoxy_footer.html.in
similarity index 100%
rename from resources/3rdparty/eigen/doc/eigendoxy_footer.html.in
rename to resources/3rdParty/eigen/doc/eigendoxy_footer.html.in
diff --git a/resources/3rdparty/eigen/doc/eigendoxy_header.html.in b/resources/3rdParty/eigen/doc/eigendoxy_header.html.in
similarity index 100%
rename from resources/3rdparty/eigen/doc/eigendoxy_header.html.in
rename to resources/3rdParty/eigen/doc/eigendoxy_header.html.in
diff --git a/resources/3rdparty/eigen/doc/eigendoxy_tabs.css b/resources/3rdParty/eigen/doc/eigendoxy_tabs.css
similarity index 100%
rename from resources/3rdparty/eigen/doc/eigendoxy_tabs.css
rename to resources/3rdParty/eigen/doc/eigendoxy_tabs.css
diff --git a/resources/3rdparty/eigen/doc/examples/.krazy b/resources/3rdParty/eigen/doc/examples/.krazy
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/.krazy
rename to resources/3rdParty/eigen/doc/examples/.krazy
diff --git a/resources/3rdparty/eigen/doc/examples/CMakeLists.txt b/resources/3rdParty/eigen/doc/examples/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/CMakeLists.txt
rename to resources/3rdParty/eigen/doc/examples/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/doc/examples/DenseBase_middleCols_int.cpp b/resources/3rdParty/eigen/doc/examples/DenseBase_middleCols_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/DenseBase_middleCols_int.cpp
rename to resources/3rdParty/eigen/doc/examples/DenseBase_middleCols_int.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/DenseBase_middleRows_int.cpp b/resources/3rdParty/eigen/doc/examples/DenseBase_middleRows_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/DenseBase_middleRows_int.cpp
rename to resources/3rdParty/eigen/doc/examples/DenseBase_middleRows_int.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/DenseBase_template_int_middleCols.cpp b/resources/3rdParty/eigen/doc/examples/DenseBase_template_int_middleCols.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/DenseBase_template_int_middleCols.cpp
rename to resources/3rdParty/eigen/doc/examples/DenseBase_template_int_middleCols.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/DenseBase_template_int_middleRows.cpp b/resources/3rdParty/eigen/doc/examples/DenseBase_template_int_middleRows.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/DenseBase_template_int_middleRows.cpp
rename to resources/3rdParty/eigen/doc/examples/DenseBase_template_int_middleRows.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/MatrixBase_cwise_const.cpp b/resources/3rdParty/eigen/doc/examples/MatrixBase_cwise_const.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/MatrixBase_cwise_const.cpp
rename to resources/3rdParty/eigen/doc/examples/MatrixBase_cwise_const.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/QuickStart_example.cpp b/resources/3rdParty/eigen/doc/examples/QuickStart_example.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/QuickStart_example.cpp
rename to resources/3rdParty/eigen/doc/examples/QuickStart_example.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/QuickStart_example2_dynamic.cpp b/resources/3rdParty/eigen/doc/examples/QuickStart_example2_dynamic.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/QuickStart_example2_dynamic.cpp
rename to resources/3rdParty/eigen/doc/examples/QuickStart_example2_dynamic.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/QuickStart_example2_fixed.cpp b/resources/3rdParty/eigen/doc/examples/QuickStart_example2_fixed.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/QuickStart_example2_fixed.cpp
rename to resources/3rdParty/eigen/doc/examples/QuickStart_example2_fixed.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/TemplateKeyword_flexible.cpp b/resources/3rdParty/eigen/doc/examples/TemplateKeyword_flexible.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/TemplateKeyword_flexible.cpp
rename to resources/3rdParty/eigen/doc/examples/TemplateKeyword_flexible.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/TemplateKeyword_simple.cpp b/resources/3rdParty/eigen/doc/examples/TemplateKeyword_simple.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/TemplateKeyword_simple.cpp
rename to resources/3rdParty/eigen/doc/examples/TemplateKeyword_simple.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/TutorialLinAlgComputeTwice.cpp b/resources/3rdParty/eigen/doc/examples/TutorialLinAlgComputeTwice.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/TutorialLinAlgComputeTwice.cpp
rename to resources/3rdParty/eigen/doc/examples/TutorialLinAlgComputeTwice.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/TutorialLinAlgExComputeSolveError.cpp b/resources/3rdParty/eigen/doc/examples/TutorialLinAlgExComputeSolveError.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/TutorialLinAlgExComputeSolveError.cpp
rename to resources/3rdParty/eigen/doc/examples/TutorialLinAlgExComputeSolveError.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/TutorialLinAlgExSolveColPivHouseholderQR.cpp b/resources/3rdParty/eigen/doc/examples/TutorialLinAlgExSolveColPivHouseholderQR.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/TutorialLinAlgExSolveColPivHouseholderQR.cpp
rename to resources/3rdParty/eigen/doc/examples/TutorialLinAlgExSolveColPivHouseholderQR.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/TutorialLinAlgExSolveLDLT.cpp b/resources/3rdParty/eigen/doc/examples/TutorialLinAlgExSolveLDLT.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/TutorialLinAlgExSolveLDLT.cpp
rename to resources/3rdParty/eigen/doc/examples/TutorialLinAlgExSolveLDLT.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/TutorialLinAlgInverseDeterminant.cpp b/resources/3rdParty/eigen/doc/examples/TutorialLinAlgInverseDeterminant.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/TutorialLinAlgInverseDeterminant.cpp
rename to resources/3rdParty/eigen/doc/examples/TutorialLinAlgInverseDeterminant.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/TutorialLinAlgRankRevealing.cpp b/resources/3rdParty/eigen/doc/examples/TutorialLinAlgRankRevealing.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/TutorialLinAlgRankRevealing.cpp
rename to resources/3rdParty/eigen/doc/examples/TutorialLinAlgRankRevealing.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/TutorialLinAlgSVDSolve.cpp b/resources/3rdParty/eigen/doc/examples/TutorialLinAlgSVDSolve.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/TutorialLinAlgSVDSolve.cpp
rename to resources/3rdParty/eigen/doc/examples/TutorialLinAlgSVDSolve.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/TutorialLinAlgSelfAdjointEigenSolver.cpp b/resources/3rdParty/eigen/doc/examples/TutorialLinAlgSelfAdjointEigenSolver.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/TutorialLinAlgSelfAdjointEigenSolver.cpp
rename to resources/3rdParty/eigen/doc/examples/TutorialLinAlgSelfAdjointEigenSolver.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/TutorialLinAlgSetThreshold.cpp b/resources/3rdParty/eigen/doc/examples/TutorialLinAlgSetThreshold.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/TutorialLinAlgSetThreshold.cpp
rename to resources/3rdParty/eigen/doc/examples/TutorialLinAlgSetThreshold.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/Tutorial_ArrayClass_accessors.cpp b/resources/3rdParty/eigen/doc/examples/Tutorial_ArrayClass_accessors.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/Tutorial_ArrayClass_accessors.cpp
rename to resources/3rdParty/eigen/doc/examples/Tutorial_ArrayClass_accessors.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/Tutorial_ArrayClass_addition.cpp b/resources/3rdParty/eigen/doc/examples/Tutorial_ArrayClass_addition.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/Tutorial_ArrayClass_addition.cpp
rename to resources/3rdParty/eigen/doc/examples/Tutorial_ArrayClass_addition.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/Tutorial_ArrayClass_cwise_other.cpp b/resources/3rdParty/eigen/doc/examples/Tutorial_ArrayClass_cwise_other.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/Tutorial_ArrayClass_cwise_other.cpp
rename to resources/3rdParty/eigen/doc/examples/Tutorial_ArrayClass_cwise_other.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/Tutorial_ArrayClass_interop.cpp b/resources/3rdParty/eigen/doc/examples/Tutorial_ArrayClass_interop.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/Tutorial_ArrayClass_interop.cpp
rename to resources/3rdParty/eigen/doc/examples/Tutorial_ArrayClass_interop.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/Tutorial_ArrayClass_interop_matrix.cpp b/resources/3rdParty/eigen/doc/examples/Tutorial_ArrayClass_interop_matrix.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/Tutorial_ArrayClass_interop_matrix.cpp
rename to resources/3rdParty/eigen/doc/examples/Tutorial_ArrayClass_interop_matrix.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/Tutorial_ArrayClass_mult.cpp b/resources/3rdParty/eigen/doc/examples/Tutorial_ArrayClass_mult.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/Tutorial_ArrayClass_mult.cpp
rename to resources/3rdParty/eigen/doc/examples/Tutorial_ArrayClass_mult.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/Tutorial_BlockOperations_block_assignment.cpp b/resources/3rdParty/eigen/doc/examples/Tutorial_BlockOperations_block_assignment.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/Tutorial_BlockOperations_block_assignment.cpp
rename to resources/3rdParty/eigen/doc/examples/Tutorial_BlockOperations_block_assignment.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/Tutorial_BlockOperations_colrow.cpp b/resources/3rdParty/eigen/doc/examples/Tutorial_BlockOperations_colrow.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/Tutorial_BlockOperations_colrow.cpp
rename to resources/3rdParty/eigen/doc/examples/Tutorial_BlockOperations_colrow.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/Tutorial_BlockOperations_corner.cpp b/resources/3rdParty/eigen/doc/examples/Tutorial_BlockOperations_corner.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/Tutorial_BlockOperations_corner.cpp
rename to resources/3rdParty/eigen/doc/examples/Tutorial_BlockOperations_corner.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/Tutorial_BlockOperations_print_block.cpp b/resources/3rdParty/eigen/doc/examples/Tutorial_BlockOperations_print_block.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/Tutorial_BlockOperations_print_block.cpp
rename to resources/3rdParty/eigen/doc/examples/Tutorial_BlockOperations_print_block.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/Tutorial_BlockOperations_vector.cpp b/resources/3rdParty/eigen/doc/examples/Tutorial_BlockOperations_vector.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/Tutorial_BlockOperations_vector.cpp
rename to resources/3rdParty/eigen/doc/examples/Tutorial_BlockOperations_vector.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/Tutorial_PartialLU_solve.cpp b/resources/3rdParty/eigen/doc/examples/Tutorial_PartialLU_solve.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/Tutorial_PartialLU_solve.cpp
rename to resources/3rdParty/eigen/doc/examples/Tutorial_PartialLU_solve.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_1nn.cpp b/resources/3rdParty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_1nn.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_1nn.cpp
rename to resources/3rdParty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_1nn.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple.cpp b/resources/3rdParty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple.cpp
rename to resources/3rdParty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple_rowwise.cpp b/resources/3rdParty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple_rowwise.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple_rowwise.cpp
rename to resources/3rdParty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple_rowwise.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_colwise.cpp b/resources/3rdParty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_colwise.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_colwise.cpp
rename to resources/3rdParty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_colwise.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_maxnorm.cpp b/resources/3rdParty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_maxnorm.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_maxnorm.cpp
rename to resources/3rdParty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_maxnorm.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_bool.cpp b/resources/3rdParty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_bool.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_bool.cpp
rename to resources/3rdParty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_bool.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_norm.cpp b/resources/3rdParty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_norm.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_norm.cpp
rename to resources/3rdParty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_norm.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_rowwise.cpp b/resources/3rdParty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_rowwise.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_rowwise.cpp
rename to resources/3rdParty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_rowwise.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_visitors.cpp b/resources/3rdParty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_visitors.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_visitors.cpp
rename to resources/3rdParty/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_visitors.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/Tutorial_simple_example_dynamic_size.cpp b/resources/3rdParty/eigen/doc/examples/Tutorial_simple_example_dynamic_size.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/Tutorial_simple_example_dynamic_size.cpp
rename to resources/3rdParty/eigen/doc/examples/Tutorial_simple_example_dynamic_size.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/Tutorial_simple_example_fixed_size.cpp b/resources/3rdParty/eigen/doc/examples/Tutorial_simple_example_fixed_size.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/Tutorial_simple_example_fixed_size.cpp
rename to resources/3rdParty/eigen/doc/examples/Tutorial_simple_example_fixed_size.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/class_Block.cpp b/resources/3rdParty/eigen/doc/examples/class_Block.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/class_Block.cpp
rename to resources/3rdParty/eigen/doc/examples/class_Block.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/class_CwiseBinaryOp.cpp b/resources/3rdParty/eigen/doc/examples/class_CwiseBinaryOp.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/class_CwiseBinaryOp.cpp
rename to resources/3rdParty/eigen/doc/examples/class_CwiseBinaryOp.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/class_CwiseUnaryOp.cpp b/resources/3rdParty/eigen/doc/examples/class_CwiseUnaryOp.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/class_CwiseUnaryOp.cpp
rename to resources/3rdParty/eigen/doc/examples/class_CwiseUnaryOp.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/class_CwiseUnaryOp_ptrfun.cpp b/resources/3rdParty/eigen/doc/examples/class_CwiseUnaryOp_ptrfun.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/class_CwiseUnaryOp_ptrfun.cpp
rename to resources/3rdParty/eigen/doc/examples/class_CwiseUnaryOp_ptrfun.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/class_FixedBlock.cpp b/resources/3rdParty/eigen/doc/examples/class_FixedBlock.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/class_FixedBlock.cpp
rename to resources/3rdParty/eigen/doc/examples/class_FixedBlock.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/class_FixedVectorBlock.cpp b/resources/3rdParty/eigen/doc/examples/class_FixedVectorBlock.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/class_FixedVectorBlock.cpp
rename to resources/3rdParty/eigen/doc/examples/class_FixedVectorBlock.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/class_VectorBlock.cpp b/resources/3rdParty/eigen/doc/examples/class_VectorBlock.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/class_VectorBlock.cpp
rename to resources/3rdParty/eigen/doc/examples/class_VectorBlock.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/function_taking_eigenbase.cpp b/resources/3rdParty/eigen/doc/examples/function_taking_eigenbase.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/function_taking_eigenbase.cpp
rename to resources/3rdParty/eigen/doc/examples/function_taking_eigenbase.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/tut_arithmetic_add_sub.cpp b/resources/3rdParty/eigen/doc/examples/tut_arithmetic_add_sub.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/tut_arithmetic_add_sub.cpp
rename to resources/3rdParty/eigen/doc/examples/tut_arithmetic_add_sub.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/tut_arithmetic_dot_cross.cpp b/resources/3rdParty/eigen/doc/examples/tut_arithmetic_dot_cross.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/tut_arithmetic_dot_cross.cpp
rename to resources/3rdParty/eigen/doc/examples/tut_arithmetic_dot_cross.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/tut_arithmetic_matrix_mul.cpp b/resources/3rdParty/eigen/doc/examples/tut_arithmetic_matrix_mul.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/tut_arithmetic_matrix_mul.cpp
rename to resources/3rdParty/eigen/doc/examples/tut_arithmetic_matrix_mul.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/tut_arithmetic_redux_basic.cpp b/resources/3rdParty/eigen/doc/examples/tut_arithmetic_redux_basic.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/tut_arithmetic_redux_basic.cpp
rename to resources/3rdParty/eigen/doc/examples/tut_arithmetic_redux_basic.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/tut_arithmetic_scalar_mul_div.cpp b/resources/3rdParty/eigen/doc/examples/tut_arithmetic_scalar_mul_div.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/tut_arithmetic_scalar_mul_div.cpp
rename to resources/3rdParty/eigen/doc/examples/tut_arithmetic_scalar_mul_div.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/tut_matrix_coefficient_accessors.cpp b/resources/3rdParty/eigen/doc/examples/tut_matrix_coefficient_accessors.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/tut_matrix_coefficient_accessors.cpp
rename to resources/3rdParty/eigen/doc/examples/tut_matrix_coefficient_accessors.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/tut_matrix_resize.cpp b/resources/3rdParty/eigen/doc/examples/tut_matrix_resize.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/tut_matrix_resize.cpp
rename to resources/3rdParty/eigen/doc/examples/tut_matrix_resize.cpp
diff --git a/resources/3rdparty/eigen/doc/examples/tut_matrix_resize_fixed_size.cpp b/resources/3rdParty/eigen/doc/examples/tut_matrix_resize_fixed_size.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/examples/tut_matrix_resize_fixed_size.cpp
rename to resources/3rdParty/eigen/doc/examples/tut_matrix_resize_fixed_size.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/.krazy b/resources/3rdParty/eigen/doc/snippets/.krazy
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/.krazy
rename to resources/3rdParty/eigen/doc/snippets/.krazy
diff --git a/resources/3rdparty/eigen/doc/snippets/AngleAxis_mimic_euler.cpp b/resources/3rdParty/eigen/doc/snippets/AngleAxis_mimic_euler.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/AngleAxis_mimic_euler.cpp
rename to resources/3rdParty/eigen/doc/snippets/AngleAxis_mimic_euler.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/CMakeLists.txt b/resources/3rdParty/eigen/doc/snippets/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/CMakeLists.txt
rename to resources/3rdParty/eigen/doc/snippets/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/doc/snippets/ColPivHouseholderQR_solve.cpp b/resources/3rdParty/eigen/doc/snippets/ColPivHouseholderQR_solve.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/ColPivHouseholderQR_solve.cpp
rename to resources/3rdParty/eigen/doc/snippets/ColPivHouseholderQR_solve.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/ComplexEigenSolver_compute.cpp b/resources/3rdParty/eigen/doc/snippets/ComplexEigenSolver_compute.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/ComplexEigenSolver_compute.cpp
rename to resources/3rdParty/eigen/doc/snippets/ComplexEigenSolver_compute.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/ComplexEigenSolver_eigenvalues.cpp b/resources/3rdParty/eigen/doc/snippets/ComplexEigenSolver_eigenvalues.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/ComplexEigenSolver_eigenvalues.cpp
rename to resources/3rdParty/eigen/doc/snippets/ComplexEigenSolver_eigenvalues.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/ComplexEigenSolver_eigenvectors.cpp b/resources/3rdParty/eigen/doc/snippets/ComplexEigenSolver_eigenvectors.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/ComplexEigenSolver_eigenvectors.cpp
rename to resources/3rdParty/eigen/doc/snippets/ComplexEigenSolver_eigenvectors.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/ComplexSchur_compute.cpp b/resources/3rdParty/eigen/doc/snippets/ComplexSchur_compute.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/ComplexSchur_compute.cpp
rename to resources/3rdParty/eigen/doc/snippets/ComplexSchur_compute.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/ComplexSchur_matrixT.cpp b/resources/3rdParty/eigen/doc/snippets/ComplexSchur_matrixT.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/ComplexSchur_matrixT.cpp
rename to resources/3rdParty/eigen/doc/snippets/ComplexSchur_matrixT.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/ComplexSchur_matrixU.cpp b/resources/3rdParty/eigen/doc/snippets/ComplexSchur_matrixU.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/ComplexSchur_matrixU.cpp
rename to resources/3rdParty/eigen/doc/snippets/ComplexSchur_matrixU.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_abs.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_abs.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_abs.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_abs.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_abs2.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_abs2.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_abs2.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_abs2.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_acos.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_acos.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_acos.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_acos.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_boolean_and.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_boolean_and.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_boolean_and.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_boolean_and.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_boolean_or.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_boolean_or.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_boolean_or.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_boolean_or.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_cos.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_cos.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_cos.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_cos.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_cube.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_cube.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_cube.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_cube.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_equal_equal.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_equal_equal.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_equal_equal.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_equal_equal.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_exp.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_exp.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_exp.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_exp.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_greater.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_greater.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_greater.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_greater.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_greater_equal.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_greater_equal.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_greater_equal.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_greater_equal.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_inverse.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_inverse.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_inverse.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_inverse.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_less.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_less.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_less.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_less.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_less_equal.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_less_equal.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_less_equal.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_less_equal.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_log.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_log.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_log.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_log.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_max.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_max.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_max.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_max.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_min.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_min.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_min.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_min.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_minus.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_minus.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_minus.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_minus.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_minus_equal.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_minus_equal.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_minus_equal.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_minus_equal.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_not_equal.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_not_equal.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_not_equal.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_not_equal.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_plus.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_plus.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_plus.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_plus.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_plus_equal.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_plus_equal.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_plus_equal.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_plus_equal.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_pow.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_pow.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_pow.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_pow.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_product.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_product.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_product.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_product.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_quotient.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_quotient.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_quotient.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_quotient.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_sin.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_sin.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_sin.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_sin.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_slash_equal.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_slash_equal.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_slash_equal.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_slash_equal.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_sqrt.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_sqrt.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_sqrt.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_sqrt.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_square.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_square.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_square.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_square.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_tan.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_tan.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_tan.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_tan.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Cwise_times_equal.cpp b/resources/3rdParty/eigen/doc/snippets/Cwise_times_equal.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Cwise_times_equal.cpp
rename to resources/3rdParty/eigen/doc/snippets/Cwise_times_equal.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/DenseBase_LinSpaced.cpp b/resources/3rdParty/eigen/doc/snippets/DenseBase_LinSpaced.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/DenseBase_LinSpaced.cpp
rename to resources/3rdParty/eigen/doc/snippets/DenseBase_LinSpaced.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/DenseBase_LinSpaced_seq.cpp b/resources/3rdParty/eigen/doc/snippets/DenseBase_LinSpaced_seq.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/DenseBase_LinSpaced_seq.cpp
rename to resources/3rdParty/eigen/doc/snippets/DenseBase_LinSpaced_seq.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/DenseBase_setLinSpaced.cpp b/resources/3rdParty/eigen/doc/snippets/DenseBase_setLinSpaced.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/DenseBase_setLinSpaced.cpp
rename to resources/3rdParty/eigen/doc/snippets/DenseBase_setLinSpaced.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/DirectionWise_replicate.cpp b/resources/3rdParty/eigen/doc/snippets/DirectionWise_replicate.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/DirectionWise_replicate.cpp
rename to resources/3rdParty/eigen/doc/snippets/DirectionWise_replicate.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/DirectionWise_replicate_int.cpp b/resources/3rdParty/eigen/doc/snippets/DirectionWise_replicate_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/DirectionWise_replicate_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/DirectionWise_replicate_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/EigenSolver_EigenSolver_MatrixType.cpp b/resources/3rdParty/eigen/doc/snippets/EigenSolver_EigenSolver_MatrixType.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/EigenSolver_EigenSolver_MatrixType.cpp
rename to resources/3rdParty/eigen/doc/snippets/EigenSolver_EigenSolver_MatrixType.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/EigenSolver_compute.cpp b/resources/3rdParty/eigen/doc/snippets/EigenSolver_compute.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/EigenSolver_compute.cpp
rename to resources/3rdParty/eigen/doc/snippets/EigenSolver_compute.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/EigenSolver_eigenvalues.cpp b/resources/3rdParty/eigen/doc/snippets/EigenSolver_eigenvalues.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/EigenSolver_eigenvalues.cpp
rename to resources/3rdParty/eigen/doc/snippets/EigenSolver_eigenvalues.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/EigenSolver_eigenvectors.cpp b/resources/3rdParty/eigen/doc/snippets/EigenSolver_eigenvectors.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/EigenSolver_eigenvectors.cpp
rename to resources/3rdParty/eigen/doc/snippets/EigenSolver_eigenvectors.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/EigenSolver_pseudoEigenvectors.cpp b/resources/3rdParty/eigen/doc/snippets/EigenSolver_pseudoEigenvectors.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/EigenSolver_pseudoEigenvectors.cpp
rename to resources/3rdParty/eigen/doc/snippets/EigenSolver_pseudoEigenvectors.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/FullPivHouseholderQR_solve.cpp b/resources/3rdParty/eigen/doc/snippets/FullPivHouseholderQR_solve.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/FullPivHouseholderQR_solve.cpp
rename to resources/3rdParty/eigen/doc/snippets/FullPivHouseholderQR_solve.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/FullPivLU_image.cpp b/resources/3rdParty/eigen/doc/snippets/FullPivLU_image.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/FullPivLU_image.cpp
rename to resources/3rdParty/eigen/doc/snippets/FullPivLU_image.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/FullPivLU_kernel.cpp b/resources/3rdParty/eigen/doc/snippets/FullPivLU_kernel.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/FullPivLU_kernel.cpp
rename to resources/3rdParty/eigen/doc/snippets/FullPivLU_kernel.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/FullPivLU_solve.cpp b/resources/3rdParty/eigen/doc/snippets/FullPivLU_solve.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/FullPivLU_solve.cpp
rename to resources/3rdParty/eigen/doc/snippets/FullPivLU_solve.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/HessenbergDecomposition_compute.cpp b/resources/3rdParty/eigen/doc/snippets/HessenbergDecomposition_compute.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/HessenbergDecomposition_compute.cpp
rename to resources/3rdParty/eigen/doc/snippets/HessenbergDecomposition_compute.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/HessenbergDecomposition_matrixH.cpp b/resources/3rdParty/eigen/doc/snippets/HessenbergDecomposition_matrixH.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/HessenbergDecomposition_matrixH.cpp
rename to resources/3rdParty/eigen/doc/snippets/HessenbergDecomposition_matrixH.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/HessenbergDecomposition_packedMatrix.cpp b/resources/3rdParty/eigen/doc/snippets/HessenbergDecomposition_packedMatrix.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/HessenbergDecomposition_packedMatrix.cpp
rename to resources/3rdParty/eigen/doc/snippets/HessenbergDecomposition_packedMatrix.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/HouseholderQR_solve.cpp b/resources/3rdParty/eigen/doc/snippets/HouseholderQR_solve.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/HouseholderQR_solve.cpp
rename to resources/3rdParty/eigen/doc/snippets/HouseholderQR_solve.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/HouseholderSequence_HouseholderSequence.cpp b/resources/3rdParty/eigen/doc/snippets/HouseholderSequence_HouseholderSequence.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/HouseholderSequence_HouseholderSequence.cpp
rename to resources/3rdParty/eigen/doc/snippets/HouseholderSequence_HouseholderSequence.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/IOFormat.cpp b/resources/3rdParty/eigen/doc/snippets/IOFormat.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/IOFormat.cpp
rename to resources/3rdParty/eigen/doc/snippets/IOFormat.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/JacobiSVD_basic.cpp b/resources/3rdParty/eigen/doc/snippets/JacobiSVD_basic.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/JacobiSVD_basic.cpp
rename to resources/3rdParty/eigen/doc/snippets/JacobiSVD_basic.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Jacobi_makeGivens.cpp b/resources/3rdParty/eigen/doc/snippets/Jacobi_makeGivens.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Jacobi_makeGivens.cpp
rename to resources/3rdParty/eigen/doc/snippets/Jacobi_makeGivens.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Jacobi_makeJacobi.cpp b/resources/3rdParty/eigen/doc/snippets/Jacobi_makeJacobi.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Jacobi_makeJacobi.cpp
rename to resources/3rdParty/eigen/doc/snippets/Jacobi_makeJacobi.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/LLT_example.cpp b/resources/3rdParty/eigen/doc/snippets/LLT_example.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/LLT_example.cpp
rename to resources/3rdParty/eigen/doc/snippets/LLT_example.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/LLT_solve.cpp b/resources/3rdParty/eigen/doc/snippets/LLT_solve.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/LLT_solve.cpp
rename to resources/3rdParty/eigen/doc/snippets/LLT_solve.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Map_general_stride.cpp b/resources/3rdParty/eigen/doc/snippets/Map_general_stride.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Map_general_stride.cpp
rename to resources/3rdParty/eigen/doc/snippets/Map_general_stride.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Map_inner_stride.cpp b/resources/3rdParty/eigen/doc/snippets/Map_inner_stride.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Map_inner_stride.cpp
rename to resources/3rdParty/eigen/doc/snippets/Map_inner_stride.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Map_outer_stride.cpp b/resources/3rdParty/eigen/doc/snippets/Map_outer_stride.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Map_outer_stride.cpp
rename to resources/3rdParty/eigen/doc/snippets/Map_outer_stride.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Map_placement_new.cpp b/resources/3rdParty/eigen/doc/snippets/Map_placement_new.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Map_placement_new.cpp
rename to resources/3rdParty/eigen/doc/snippets/Map_placement_new.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Map_simple.cpp b/resources/3rdParty/eigen/doc/snippets/Map_simple.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Map_simple.cpp
rename to resources/3rdParty/eigen/doc/snippets/Map_simple.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_adjoint.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_adjoint.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_adjoint.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_adjoint.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_all.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_all.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_all.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_all.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_array.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_array.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_array.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_array.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_array_const.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_array_const.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_array_const.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_array_const.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_asDiagonal.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_asDiagonal.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_asDiagonal.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_asDiagonal.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_block_int_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_block_int_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_block_int_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_block_int_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_block_int_int_int_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_block_int_int_int_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_block_int_int_int_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_block_int_int_int_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_bottomLeftCorner_int_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_bottomLeftCorner_int_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_bottomLeftCorner_int_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_bottomLeftCorner_int_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_bottomRightCorner_int_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_bottomRightCorner_int_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_bottomRightCorner_int_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_bottomRightCorner_int_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_bottomRows_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_bottomRows_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_bottomRows_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_bottomRows_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_cast.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_cast.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_cast.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_cast.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_col.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_col.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_col.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_col.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_colwise.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_colwise.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_colwise.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_colwise.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_computeInverseAndDetWithCheck.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_computeInverseAndDetWithCheck.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_computeInverseAndDetWithCheck.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_computeInverseAndDetWithCheck.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_computeInverseWithCheck.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_computeInverseWithCheck.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_computeInverseWithCheck.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_computeInverseWithCheck.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_cwiseAbs.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_cwiseAbs.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_cwiseAbs.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_cwiseAbs.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_cwiseAbs2.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_cwiseAbs2.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_cwiseAbs2.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_cwiseAbs2.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_cwiseEqual.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_cwiseEqual.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_cwiseEqual.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_cwiseEqual.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_cwiseInverse.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_cwiseInverse.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_cwiseInverse.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_cwiseInverse.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_cwiseMax.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_cwiseMax.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_cwiseMax.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_cwiseMax.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_cwiseMin.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_cwiseMin.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_cwiseMin.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_cwiseMin.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_cwiseNotEqual.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_cwiseNotEqual.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_cwiseNotEqual.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_cwiseNotEqual.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_cwiseProduct.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_cwiseProduct.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_cwiseProduct.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_cwiseProduct.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_cwiseQuotient.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_cwiseQuotient.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_cwiseQuotient.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_cwiseQuotient.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_cwiseSqrt.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_cwiseSqrt.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_cwiseSqrt.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_cwiseSqrt.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_diagonal.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_diagonal.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_diagonal.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_diagonal.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_diagonal_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_diagonal_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_diagonal_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_diagonal_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_diagonal_template_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_diagonal_template_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_diagonal_template_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_diagonal_template_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_eigenvalues.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_eigenvalues.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_eigenvalues.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_eigenvalues.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_end_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_end_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_end_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_end_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_eval.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_eval.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_eval.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_eval.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_extract.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_extract.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_extract.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_extract.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_fixedBlock_int_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_fixedBlock_int_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_fixedBlock_int_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_fixedBlock_int_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_identity.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_identity.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_identity.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_identity.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_identity_int_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_identity_int_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_identity_int_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_identity_int_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_inverse.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_inverse.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_inverse.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_inverse.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_isDiagonal.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_isDiagonal.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_isDiagonal.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_isDiagonal.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_isIdentity.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_isIdentity.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_isIdentity.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_isIdentity.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_isOnes.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_isOnes.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_isOnes.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_isOnes.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_isOrthogonal.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_isOrthogonal.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_isOrthogonal.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_isOrthogonal.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_isUnitary.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_isUnitary.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_isUnitary.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_isUnitary.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_isZero.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_isZero.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_isZero.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_isZero.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_leftCols_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_leftCols_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_leftCols_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_leftCols_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_marked.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_marked.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_marked.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_marked.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_noalias.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_noalias.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_noalias.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_noalias.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_ones.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_ones.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_ones.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_ones.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_ones_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_ones_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_ones_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_ones_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_ones_int_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_ones_int_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_ones_int_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_ones_int_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_operatorNorm.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_operatorNorm.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_operatorNorm.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_operatorNorm.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_part.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_part.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_part.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_part.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_prod.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_prod.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_prod.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_prod.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_random.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_random.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_random.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_random.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_random_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_random_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_random_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_random_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_random_int_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_random_int_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_random_int_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_random_int_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_replicate.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_replicate.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_replicate.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_replicate.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_replicate_int_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_replicate_int_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_replicate_int_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_replicate_int_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_reverse.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_reverse.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_reverse.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_reverse.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_rightCols_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_rightCols_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_rightCols_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_rightCols_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_row.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_row.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_row.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_row.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_rowwise.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_rowwise.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_rowwise.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_rowwise.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_segment_int_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_segment_int_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_segment_int_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_segment_int_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_select.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_select.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_select.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_select.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_set.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_set.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_set.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_set.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_setIdentity.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_setIdentity.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_setIdentity.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_setIdentity.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_setOnes.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_setOnes.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_setOnes.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_setOnes.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_setRandom.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_setRandom.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_setRandom.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_setRandom.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_setZero.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_setZero.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_setZero.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_setZero.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_start_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_start_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_start_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_start_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_template_int_bottomRows.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_template_int_bottomRows.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_template_int_bottomRows.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_template_int_bottomRows.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_template_int_end.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_template_int_end.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_template_int_end.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_template_int_end.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_template_int_int_bottomLeftCorner.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_template_int_int_bottomLeftCorner.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_template_int_int_bottomLeftCorner.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_template_int_int_bottomLeftCorner.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_template_int_int_bottomRightCorner.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_template_int_int_bottomRightCorner.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_template_int_int_bottomRightCorner.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_template_int_int_bottomRightCorner.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_template_int_int_topLeftCorner.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_template_int_int_topLeftCorner.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_template_int_int_topLeftCorner.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_template_int_int_topLeftCorner.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_template_int_int_topRightCorner.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_template_int_int_topRightCorner.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_template_int_int_topRightCorner.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_template_int_int_topRightCorner.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_template_int_leftCols.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_template_int_leftCols.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_template_int_leftCols.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_template_int_leftCols.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_template_int_rightCols.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_template_int_rightCols.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_template_int_rightCols.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_template_int_rightCols.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_template_int_segment.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_template_int_segment.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_template_int_segment.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_template_int_segment.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_template_int_start.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_template_int_start.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_template_int_start.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_template_int_start.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_template_int_topRows.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_template_int_topRows.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_template_int_topRows.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_template_int_topRows.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_topLeftCorner_int_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_topLeftCorner_int_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_topLeftCorner_int_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_topLeftCorner_int_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_topRightCorner_int_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_topRightCorner_int_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_topRightCorner_int_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_topRightCorner_int_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_topRows_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_topRows_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_topRows_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_topRows_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_transpose.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_transpose.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_transpose.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_transpose.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_zero.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_zero.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_zero.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_zero.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_zero_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_zero_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_zero_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_zero_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/MatrixBase_zero_int_int.cpp b/resources/3rdParty/eigen/doc/snippets/MatrixBase_zero_int_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/MatrixBase_zero_int_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/MatrixBase_zero_int_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Matrix_resize_NoChange_int.cpp b/resources/3rdParty/eigen/doc/snippets/Matrix_resize_NoChange_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Matrix_resize_NoChange_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/Matrix_resize_NoChange_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Matrix_resize_int.cpp b/resources/3rdParty/eigen/doc/snippets/Matrix_resize_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Matrix_resize_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/Matrix_resize_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Matrix_resize_int_NoChange.cpp b/resources/3rdParty/eigen/doc/snippets/Matrix_resize_int_NoChange.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Matrix_resize_int_NoChange.cpp
rename to resources/3rdParty/eigen/doc/snippets/Matrix_resize_int_NoChange.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Matrix_resize_int_int.cpp b/resources/3rdParty/eigen/doc/snippets/Matrix_resize_int_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Matrix_resize_int_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/Matrix_resize_int_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Matrix_setConstant_int.cpp b/resources/3rdParty/eigen/doc/snippets/Matrix_setConstant_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Matrix_setConstant_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/Matrix_setConstant_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Matrix_setConstant_int_int.cpp b/resources/3rdParty/eigen/doc/snippets/Matrix_setConstant_int_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Matrix_setConstant_int_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/Matrix_setConstant_int_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Matrix_setIdentity_int_int.cpp b/resources/3rdParty/eigen/doc/snippets/Matrix_setIdentity_int_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Matrix_setIdentity_int_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/Matrix_setIdentity_int_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Matrix_setOnes_int.cpp b/resources/3rdParty/eigen/doc/snippets/Matrix_setOnes_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Matrix_setOnes_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/Matrix_setOnes_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Matrix_setOnes_int_int.cpp b/resources/3rdParty/eigen/doc/snippets/Matrix_setOnes_int_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Matrix_setOnes_int_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/Matrix_setOnes_int_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Matrix_setRandom_int.cpp b/resources/3rdParty/eigen/doc/snippets/Matrix_setRandom_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Matrix_setRandom_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/Matrix_setRandom_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Matrix_setRandom_int_int.cpp b/resources/3rdParty/eigen/doc/snippets/Matrix_setRandom_int_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Matrix_setRandom_int_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/Matrix_setRandom_int_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Matrix_setZero_int.cpp b/resources/3rdParty/eigen/doc/snippets/Matrix_setZero_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Matrix_setZero_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/Matrix_setZero_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Matrix_setZero_int_int.cpp b/resources/3rdParty/eigen/doc/snippets/Matrix_setZero_int_int.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Matrix_setZero_int_int.cpp
rename to resources/3rdParty/eigen/doc/snippets/Matrix_setZero_int_int.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/PartialPivLU_solve.cpp b/resources/3rdParty/eigen/doc/snippets/PartialPivLU_solve.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/PartialPivLU_solve.cpp
rename to resources/3rdParty/eigen/doc/snippets/PartialPivLU_solve.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/PartialRedux_count.cpp b/resources/3rdParty/eigen/doc/snippets/PartialRedux_count.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/PartialRedux_count.cpp
rename to resources/3rdParty/eigen/doc/snippets/PartialRedux_count.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/PartialRedux_maxCoeff.cpp b/resources/3rdParty/eigen/doc/snippets/PartialRedux_maxCoeff.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/PartialRedux_maxCoeff.cpp
rename to resources/3rdParty/eigen/doc/snippets/PartialRedux_maxCoeff.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/PartialRedux_minCoeff.cpp b/resources/3rdParty/eigen/doc/snippets/PartialRedux_minCoeff.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/PartialRedux_minCoeff.cpp
rename to resources/3rdParty/eigen/doc/snippets/PartialRedux_minCoeff.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/PartialRedux_norm.cpp b/resources/3rdParty/eigen/doc/snippets/PartialRedux_norm.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/PartialRedux_norm.cpp
rename to resources/3rdParty/eigen/doc/snippets/PartialRedux_norm.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/PartialRedux_prod.cpp b/resources/3rdParty/eigen/doc/snippets/PartialRedux_prod.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/PartialRedux_prod.cpp
rename to resources/3rdParty/eigen/doc/snippets/PartialRedux_prod.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/PartialRedux_squaredNorm.cpp b/resources/3rdParty/eigen/doc/snippets/PartialRedux_squaredNorm.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/PartialRedux_squaredNorm.cpp
rename to resources/3rdParty/eigen/doc/snippets/PartialRedux_squaredNorm.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/PartialRedux_sum.cpp b/resources/3rdParty/eigen/doc/snippets/PartialRedux_sum.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/PartialRedux_sum.cpp
rename to resources/3rdParty/eigen/doc/snippets/PartialRedux_sum.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/RealSchur_RealSchur_MatrixType.cpp b/resources/3rdParty/eigen/doc/snippets/RealSchur_RealSchur_MatrixType.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/RealSchur_RealSchur_MatrixType.cpp
rename to resources/3rdParty/eigen/doc/snippets/RealSchur_RealSchur_MatrixType.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/RealSchur_compute.cpp b/resources/3rdParty/eigen/doc/snippets/RealSchur_compute.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/RealSchur_compute.cpp
rename to resources/3rdParty/eigen/doc/snippets/RealSchur_compute.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp b/resources/3rdParty/eigen/doc/snippets/SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp
rename to resources/3rdParty/eigen/doc/snippets/SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.cpp b/resources/3rdParty/eigen/doc/snippets/SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.cpp
rename to resources/3rdParty/eigen/doc/snippets/SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.cpp b/resources/3rdParty/eigen/doc/snippets/SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.cpp
rename to resources/3rdParty/eigen/doc/snippets/SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/SelfAdjointEigenSolver_compute_MatrixType.cpp b/resources/3rdParty/eigen/doc/snippets/SelfAdjointEigenSolver_compute_MatrixType.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/SelfAdjointEigenSolver_compute_MatrixType.cpp
rename to resources/3rdParty/eigen/doc/snippets/SelfAdjointEigenSolver_compute_MatrixType.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/SelfAdjointEigenSolver_compute_MatrixType2.cpp b/resources/3rdParty/eigen/doc/snippets/SelfAdjointEigenSolver_compute_MatrixType2.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/SelfAdjointEigenSolver_compute_MatrixType2.cpp
rename to resources/3rdParty/eigen/doc/snippets/SelfAdjointEigenSolver_compute_MatrixType2.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/SelfAdjointEigenSolver_eigenvalues.cpp b/resources/3rdParty/eigen/doc/snippets/SelfAdjointEigenSolver_eigenvalues.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/SelfAdjointEigenSolver_eigenvalues.cpp
rename to resources/3rdParty/eigen/doc/snippets/SelfAdjointEigenSolver_eigenvalues.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/SelfAdjointEigenSolver_eigenvectors.cpp b/resources/3rdParty/eigen/doc/snippets/SelfAdjointEigenSolver_eigenvectors.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/SelfAdjointEigenSolver_eigenvectors.cpp
rename to resources/3rdParty/eigen/doc/snippets/SelfAdjointEigenSolver_eigenvectors.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/SelfAdjointEigenSolver_operatorInverseSqrt.cpp b/resources/3rdParty/eigen/doc/snippets/SelfAdjointEigenSolver_operatorInverseSqrt.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/SelfAdjointEigenSolver_operatorInverseSqrt.cpp
rename to resources/3rdParty/eigen/doc/snippets/SelfAdjointEigenSolver_operatorInverseSqrt.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/SelfAdjointEigenSolver_operatorSqrt.cpp b/resources/3rdParty/eigen/doc/snippets/SelfAdjointEigenSolver_operatorSqrt.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/SelfAdjointEigenSolver_operatorSqrt.cpp
rename to resources/3rdParty/eigen/doc/snippets/SelfAdjointEigenSolver_operatorSqrt.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/SelfAdjointView_eigenvalues.cpp b/resources/3rdParty/eigen/doc/snippets/SelfAdjointView_eigenvalues.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/SelfAdjointView_eigenvalues.cpp
rename to resources/3rdParty/eigen/doc/snippets/SelfAdjointView_eigenvalues.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/SelfAdjointView_operatorNorm.cpp b/resources/3rdParty/eigen/doc/snippets/SelfAdjointView_operatorNorm.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/SelfAdjointView_operatorNorm.cpp
rename to resources/3rdParty/eigen/doc/snippets/SelfAdjointView_operatorNorm.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/TopicAliasing_block.cpp b/resources/3rdParty/eigen/doc/snippets/TopicAliasing_block.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/TopicAliasing_block.cpp
rename to resources/3rdParty/eigen/doc/snippets/TopicAliasing_block.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/TopicAliasing_block_correct.cpp b/resources/3rdParty/eigen/doc/snippets/TopicAliasing_block_correct.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/TopicAliasing_block_correct.cpp
rename to resources/3rdParty/eigen/doc/snippets/TopicAliasing_block_correct.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/TopicAliasing_cwise.cpp b/resources/3rdParty/eigen/doc/snippets/TopicAliasing_cwise.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/TopicAliasing_cwise.cpp
rename to resources/3rdParty/eigen/doc/snippets/TopicAliasing_cwise.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/TopicAliasing_mult1.cpp b/resources/3rdParty/eigen/doc/snippets/TopicAliasing_mult1.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/TopicAliasing_mult1.cpp
rename to resources/3rdParty/eigen/doc/snippets/TopicAliasing_mult1.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/TopicAliasing_mult2.cpp b/resources/3rdParty/eigen/doc/snippets/TopicAliasing_mult2.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/TopicAliasing_mult2.cpp
rename to resources/3rdParty/eigen/doc/snippets/TopicAliasing_mult2.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/TopicAliasing_mult3.cpp b/resources/3rdParty/eigen/doc/snippets/TopicAliasing_mult3.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/TopicAliasing_mult3.cpp
rename to resources/3rdParty/eigen/doc/snippets/TopicAliasing_mult3.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/TopicStorageOrders_example.cpp b/resources/3rdParty/eigen/doc/snippets/TopicStorageOrders_example.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/TopicStorageOrders_example.cpp
rename to resources/3rdParty/eigen/doc/snippets/TopicStorageOrders_example.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Tridiagonalization_Tridiagonalization_MatrixType.cpp b/resources/3rdParty/eigen/doc/snippets/Tridiagonalization_Tridiagonalization_MatrixType.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Tridiagonalization_Tridiagonalization_MatrixType.cpp
rename to resources/3rdParty/eigen/doc/snippets/Tridiagonalization_Tridiagonalization_MatrixType.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Tridiagonalization_compute.cpp b/resources/3rdParty/eigen/doc/snippets/Tridiagonalization_compute.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Tridiagonalization_compute.cpp
rename to resources/3rdParty/eigen/doc/snippets/Tridiagonalization_compute.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Tridiagonalization_decomposeInPlace.cpp b/resources/3rdParty/eigen/doc/snippets/Tridiagonalization_decomposeInPlace.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Tridiagonalization_decomposeInPlace.cpp
rename to resources/3rdParty/eigen/doc/snippets/Tridiagonalization_decomposeInPlace.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Tridiagonalization_diagonal.cpp b/resources/3rdParty/eigen/doc/snippets/Tridiagonalization_diagonal.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Tridiagonalization_diagonal.cpp
rename to resources/3rdParty/eigen/doc/snippets/Tridiagonalization_diagonal.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Tridiagonalization_householderCoefficients.cpp b/resources/3rdParty/eigen/doc/snippets/Tridiagonalization_householderCoefficients.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Tridiagonalization_householderCoefficients.cpp
rename to resources/3rdParty/eigen/doc/snippets/Tridiagonalization_householderCoefficients.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Tridiagonalization_packedMatrix.cpp b/resources/3rdParty/eigen/doc/snippets/Tridiagonalization_packedMatrix.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Tridiagonalization_packedMatrix.cpp
rename to resources/3rdParty/eigen/doc/snippets/Tridiagonalization_packedMatrix.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Tutorial_AdvancedInitialization_Block.cpp b/resources/3rdParty/eigen/doc/snippets/Tutorial_AdvancedInitialization_Block.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Tutorial_AdvancedInitialization_Block.cpp
rename to resources/3rdParty/eigen/doc/snippets/Tutorial_AdvancedInitialization_Block.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Tutorial_AdvancedInitialization_CommaTemporary.cpp b/resources/3rdParty/eigen/doc/snippets/Tutorial_AdvancedInitialization_CommaTemporary.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Tutorial_AdvancedInitialization_CommaTemporary.cpp
rename to resources/3rdParty/eigen/doc/snippets/Tutorial_AdvancedInitialization_CommaTemporary.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Tutorial_AdvancedInitialization_Join.cpp b/resources/3rdParty/eigen/doc/snippets/Tutorial_AdvancedInitialization_Join.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Tutorial_AdvancedInitialization_Join.cpp
rename to resources/3rdParty/eigen/doc/snippets/Tutorial_AdvancedInitialization_Join.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Tutorial_AdvancedInitialization_LinSpaced.cpp b/resources/3rdParty/eigen/doc/snippets/Tutorial_AdvancedInitialization_LinSpaced.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Tutorial_AdvancedInitialization_LinSpaced.cpp
rename to resources/3rdParty/eigen/doc/snippets/Tutorial_AdvancedInitialization_LinSpaced.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Tutorial_AdvancedInitialization_ThreeWays.cpp b/resources/3rdParty/eigen/doc/snippets/Tutorial_AdvancedInitialization_ThreeWays.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Tutorial_AdvancedInitialization_ThreeWays.cpp
rename to resources/3rdParty/eigen/doc/snippets/Tutorial_AdvancedInitialization_ThreeWays.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Tutorial_AdvancedInitialization_Zero.cpp b/resources/3rdParty/eigen/doc/snippets/Tutorial_AdvancedInitialization_Zero.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Tutorial_AdvancedInitialization_Zero.cpp
rename to resources/3rdParty/eigen/doc/snippets/Tutorial_AdvancedInitialization_Zero.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Tutorial_Map_rowmajor.cpp b/resources/3rdParty/eigen/doc/snippets/Tutorial_Map_rowmajor.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Tutorial_Map_rowmajor.cpp
rename to resources/3rdParty/eigen/doc/snippets/Tutorial_Map_rowmajor.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Tutorial_Map_using.cpp b/resources/3rdParty/eigen/doc/snippets/Tutorial_Map_using.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Tutorial_Map_using.cpp
rename to resources/3rdParty/eigen/doc/snippets/Tutorial_Map_using.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Tutorial_commainit_01.cpp b/resources/3rdParty/eigen/doc/snippets/Tutorial_commainit_01.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Tutorial_commainit_01.cpp
rename to resources/3rdParty/eigen/doc/snippets/Tutorial_commainit_01.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Tutorial_commainit_01b.cpp b/resources/3rdParty/eigen/doc/snippets/Tutorial_commainit_01b.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Tutorial_commainit_01b.cpp
rename to resources/3rdParty/eigen/doc/snippets/Tutorial_commainit_01b.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Tutorial_commainit_02.cpp b/resources/3rdParty/eigen/doc/snippets/Tutorial_commainit_02.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Tutorial_commainit_02.cpp
rename to resources/3rdParty/eigen/doc/snippets/Tutorial_commainit_02.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Tutorial_solve_matrix_inverse.cpp b/resources/3rdParty/eigen/doc/snippets/Tutorial_solve_matrix_inverse.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Tutorial_solve_matrix_inverse.cpp
rename to resources/3rdParty/eigen/doc/snippets/Tutorial_solve_matrix_inverse.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Tutorial_solve_multiple_rhs.cpp b/resources/3rdParty/eigen/doc/snippets/Tutorial_solve_multiple_rhs.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Tutorial_solve_multiple_rhs.cpp
rename to resources/3rdParty/eigen/doc/snippets/Tutorial_solve_multiple_rhs.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Tutorial_solve_reuse_decomposition.cpp b/resources/3rdParty/eigen/doc/snippets/Tutorial_solve_reuse_decomposition.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Tutorial_solve_reuse_decomposition.cpp
rename to resources/3rdParty/eigen/doc/snippets/Tutorial_solve_reuse_decomposition.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Tutorial_solve_singular.cpp b/resources/3rdParty/eigen/doc/snippets/Tutorial_solve_singular.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Tutorial_solve_singular.cpp
rename to resources/3rdParty/eigen/doc/snippets/Tutorial_solve_singular.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Tutorial_solve_triangular.cpp b/resources/3rdParty/eigen/doc/snippets/Tutorial_solve_triangular.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Tutorial_solve_triangular.cpp
rename to resources/3rdParty/eigen/doc/snippets/Tutorial_solve_triangular.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Tutorial_solve_triangular_inplace.cpp b/resources/3rdParty/eigen/doc/snippets/Tutorial_solve_triangular_inplace.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Tutorial_solve_triangular_inplace.cpp
rename to resources/3rdParty/eigen/doc/snippets/Tutorial_solve_triangular_inplace.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/Vectorwise_reverse.cpp b/resources/3rdParty/eigen/doc/snippets/Vectorwise_reverse.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/Vectorwise_reverse.cpp
rename to resources/3rdParty/eigen/doc/snippets/Vectorwise_reverse.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/class_FullPivLU.cpp b/resources/3rdParty/eigen/doc/snippets/class_FullPivLU.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/class_FullPivLU.cpp
rename to resources/3rdParty/eigen/doc/snippets/class_FullPivLU.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/compile_snippet.cpp.in b/resources/3rdParty/eigen/doc/snippets/compile_snippet.cpp.in
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/compile_snippet.cpp.in
rename to resources/3rdParty/eigen/doc/snippets/compile_snippet.cpp.in
diff --git a/resources/3rdparty/eigen/doc/snippets/tut_arithmetic_redux_minmax.cpp b/resources/3rdParty/eigen/doc/snippets/tut_arithmetic_redux_minmax.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/tut_arithmetic_redux_minmax.cpp
rename to resources/3rdParty/eigen/doc/snippets/tut_arithmetic_redux_minmax.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/tut_arithmetic_transpose_aliasing.cpp b/resources/3rdParty/eigen/doc/snippets/tut_arithmetic_transpose_aliasing.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/tut_arithmetic_transpose_aliasing.cpp
rename to resources/3rdParty/eigen/doc/snippets/tut_arithmetic_transpose_aliasing.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/tut_arithmetic_transpose_conjugate.cpp b/resources/3rdParty/eigen/doc/snippets/tut_arithmetic_transpose_conjugate.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/tut_arithmetic_transpose_conjugate.cpp
rename to resources/3rdParty/eigen/doc/snippets/tut_arithmetic_transpose_conjugate.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/tut_arithmetic_transpose_inplace.cpp b/resources/3rdParty/eigen/doc/snippets/tut_arithmetic_transpose_inplace.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/tut_arithmetic_transpose_inplace.cpp
rename to resources/3rdParty/eigen/doc/snippets/tut_arithmetic_transpose_inplace.cpp
diff --git a/resources/3rdparty/eigen/doc/snippets/tut_matrix_assignment_resizing.cpp b/resources/3rdParty/eigen/doc/snippets/tut_matrix_assignment_resizing.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/snippets/tut_matrix_assignment_resizing.cpp
rename to resources/3rdParty/eigen/doc/snippets/tut_matrix_assignment_resizing.cpp
diff --git a/resources/3rdparty/eigen/doc/special_examples/CMakeLists.txt b/resources/3rdParty/eigen/doc/special_examples/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/doc/special_examples/CMakeLists.txt
rename to resources/3rdParty/eigen/doc/special_examples/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/doc/special_examples/Tutorial_sparse_example.cpp b/resources/3rdParty/eigen/doc/special_examples/Tutorial_sparse_example.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/special_examples/Tutorial_sparse_example.cpp
rename to resources/3rdParty/eigen/doc/special_examples/Tutorial_sparse_example.cpp
diff --git a/resources/3rdparty/eigen/doc/special_examples/Tutorial_sparse_example_details.cpp b/resources/3rdParty/eigen/doc/special_examples/Tutorial_sparse_example_details.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/special_examples/Tutorial_sparse_example_details.cpp
rename to resources/3rdParty/eigen/doc/special_examples/Tutorial_sparse_example_details.cpp
diff --git a/resources/3rdparty/eigen/doc/tutorial.cpp b/resources/3rdParty/eigen/doc/tutorial.cpp
similarity index 100%
rename from resources/3rdparty/eigen/doc/tutorial.cpp
rename to resources/3rdParty/eigen/doc/tutorial.cpp
diff --git a/resources/3rdparty/eigen/eigen3.pc.in b/resources/3rdParty/eigen/eigen3.pc.in
similarity index 100%
rename from resources/3rdparty/eigen/eigen3.pc.in
rename to resources/3rdParty/eigen/eigen3.pc.in
diff --git a/resources/3rdparty/eigen/failtest/CMakeLists.txt b/resources/3rdParty/eigen/failtest/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/failtest/CMakeLists.txt
rename to resources/3rdParty/eigen/failtest/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/failtest/block_nonconst_ctor_on_const_xpr_0.cpp b/resources/3rdParty/eigen/failtest/block_nonconst_ctor_on_const_xpr_0.cpp
similarity index 100%
rename from resources/3rdparty/eigen/failtest/block_nonconst_ctor_on_const_xpr_0.cpp
rename to resources/3rdParty/eigen/failtest/block_nonconst_ctor_on_const_xpr_0.cpp
diff --git a/resources/3rdparty/eigen/failtest/block_nonconst_ctor_on_const_xpr_1.cpp b/resources/3rdParty/eigen/failtest/block_nonconst_ctor_on_const_xpr_1.cpp
similarity index 100%
rename from resources/3rdparty/eigen/failtest/block_nonconst_ctor_on_const_xpr_1.cpp
rename to resources/3rdParty/eigen/failtest/block_nonconst_ctor_on_const_xpr_1.cpp
diff --git a/resources/3rdparty/eigen/failtest/block_nonconst_ctor_on_const_xpr_2.cpp b/resources/3rdParty/eigen/failtest/block_nonconst_ctor_on_const_xpr_2.cpp
similarity index 100%
rename from resources/3rdparty/eigen/failtest/block_nonconst_ctor_on_const_xpr_2.cpp
rename to resources/3rdParty/eigen/failtest/block_nonconst_ctor_on_const_xpr_2.cpp
diff --git a/resources/3rdparty/eigen/failtest/block_on_const_type_actually_const_0.cpp b/resources/3rdParty/eigen/failtest/block_on_const_type_actually_const_0.cpp
similarity index 100%
rename from resources/3rdparty/eigen/failtest/block_on_const_type_actually_const_0.cpp
rename to resources/3rdParty/eigen/failtest/block_on_const_type_actually_const_0.cpp
diff --git a/resources/3rdparty/eigen/failtest/block_on_const_type_actually_const_1.cpp b/resources/3rdParty/eigen/failtest/block_on_const_type_actually_const_1.cpp
similarity index 100%
rename from resources/3rdparty/eigen/failtest/block_on_const_type_actually_const_1.cpp
rename to resources/3rdParty/eigen/failtest/block_on_const_type_actually_const_1.cpp
diff --git a/resources/3rdparty/eigen/failtest/const_qualified_block_method_retval_0.cpp b/resources/3rdParty/eigen/failtest/const_qualified_block_method_retval_0.cpp
similarity index 100%
rename from resources/3rdparty/eigen/failtest/const_qualified_block_method_retval_0.cpp
rename to resources/3rdParty/eigen/failtest/const_qualified_block_method_retval_0.cpp
diff --git a/resources/3rdparty/eigen/failtest/const_qualified_block_method_retval_1.cpp b/resources/3rdParty/eigen/failtest/const_qualified_block_method_retval_1.cpp
similarity index 100%
rename from resources/3rdparty/eigen/failtest/const_qualified_block_method_retval_1.cpp
rename to resources/3rdParty/eigen/failtest/const_qualified_block_method_retval_1.cpp
diff --git a/resources/3rdparty/eigen/failtest/const_qualified_diagonal_method_retval.cpp b/resources/3rdParty/eigen/failtest/const_qualified_diagonal_method_retval.cpp
similarity index 100%
rename from resources/3rdparty/eigen/failtest/const_qualified_diagonal_method_retval.cpp
rename to resources/3rdParty/eigen/failtest/const_qualified_diagonal_method_retval.cpp
diff --git a/resources/3rdparty/eigen/failtest/const_qualified_transpose_method_retval.cpp b/resources/3rdParty/eigen/failtest/const_qualified_transpose_method_retval.cpp
similarity index 100%
rename from resources/3rdparty/eigen/failtest/const_qualified_transpose_method_retval.cpp
rename to resources/3rdParty/eigen/failtest/const_qualified_transpose_method_retval.cpp
diff --git a/resources/3rdparty/eigen/failtest/diagonal_nonconst_ctor_on_const_xpr.cpp b/resources/3rdParty/eigen/failtest/diagonal_nonconst_ctor_on_const_xpr.cpp
similarity index 100%
rename from resources/3rdparty/eigen/failtest/diagonal_nonconst_ctor_on_const_xpr.cpp
rename to resources/3rdParty/eigen/failtest/diagonal_nonconst_ctor_on_const_xpr.cpp
diff --git a/resources/3rdparty/eigen/failtest/diagonal_on_const_type_actually_const.cpp b/resources/3rdParty/eigen/failtest/diagonal_on_const_type_actually_const.cpp
similarity index 100%
rename from resources/3rdparty/eigen/failtest/diagonal_on_const_type_actually_const.cpp
rename to resources/3rdParty/eigen/failtest/diagonal_on_const_type_actually_const.cpp
diff --git a/resources/3rdparty/eigen/failtest/failtest_sanity_check.cpp b/resources/3rdParty/eigen/failtest/failtest_sanity_check.cpp
similarity index 100%
rename from resources/3rdparty/eigen/failtest/failtest_sanity_check.cpp
rename to resources/3rdParty/eigen/failtest/failtest_sanity_check.cpp
diff --git a/resources/3rdparty/eigen/failtest/map_nonconst_ctor_on_const_ptr_0.cpp b/resources/3rdParty/eigen/failtest/map_nonconst_ctor_on_const_ptr_0.cpp
similarity index 100%
rename from resources/3rdparty/eigen/failtest/map_nonconst_ctor_on_const_ptr_0.cpp
rename to resources/3rdParty/eigen/failtest/map_nonconst_ctor_on_const_ptr_0.cpp
diff --git a/resources/3rdparty/eigen/failtest/map_nonconst_ctor_on_const_ptr_1.cpp b/resources/3rdParty/eigen/failtest/map_nonconst_ctor_on_const_ptr_1.cpp
similarity index 100%
rename from resources/3rdparty/eigen/failtest/map_nonconst_ctor_on_const_ptr_1.cpp
rename to resources/3rdParty/eigen/failtest/map_nonconst_ctor_on_const_ptr_1.cpp
diff --git a/resources/3rdparty/eigen/failtest/map_nonconst_ctor_on_const_ptr_2.cpp b/resources/3rdParty/eigen/failtest/map_nonconst_ctor_on_const_ptr_2.cpp
similarity index 100%
rename from resources/3rdparty/eigen/failtest/map_nonconst_ctor_on_const_ptr_2.cpp
rename to resources/3rdParty/eigen/failtest/map_nonconst_ctor_on_const_ptr_2.cpp
diff --git a/resources/3rdparty/eigen/failtest/map_nonconst_ctor_on_const_ptr_3.cpp b/resources/3rdParty/eigen/failtest/map_nonconst_ctor_on_const_ptr_3.cpp
similarity index 100%
rename from resources/3rdparty/eigen/failtest/map_nonconst_ctor_on_const_ptr_3.cpp
rename to resources/3rdParty/eigen/failtest/map_nonconst_ctor_on_const_ptr_3.cpp
diff --git a/resources/3rdparty/eigen/failtest/map_nonconst_ctor_on_const_ptr_4.cpp b/resources/3rdParty/eigen/failtest/map_nonconst_ctor_on_const_ptr_4.cpp
similarity index 100%
rename from resources/3rdparty/eigen/failtest/map_nonconst_ctor_on_const_ptr_4.cpp
rename to resources/3rdParty/eigen/failtest/map_nonconst_ctor_on_const_ptr_4.cpp
diff --git a/resources/3rdparty/eigen/failtest/map_on_const_type_actually_const_0.cpp b/resources/3rdParty/eigen/failtest/map_on_const_type_actually_const_0.cpp
similarity index 100%
rename from resources/3rdparty/eigen/failtest/map_on_const_type_actually_const_0.cpp
rename to resources/3rdParty/eigen/failtest/map_on_const_type_actually_const_0.cpp
diff --git a/resources/3rdparty/eigen/failtest/map_on_const_type_actually_const_1.cpp b/resources/3rdParty/eigen/failtest/map_on_const_type_actually_const_1.cpp
similarity index 100%
rename from resources/3rdparty/eigen/failtest/map_on_const_type_actually_const_1.cpp
rename to resources/3rdParty/eigen/failtest/map_on_const_type_actually_const_1.cpp
diff --git a/resources/3rdparty/eigen/failtest/transpose_nonconst_ctor_on_const_xpr.cpp b/resources/3rdParty/eigen/failtest/transpose_nonconst_ctor_on_const_xpr.cpp
similarity index 100%
rename from resources/3rdparty/eigen/failtest/transpose_nonconst_ctor_on_const_xpr.cpp
rename to resources/3rdParty/eigen/failtest/transpose_nonconst_ctor_on_const_xpr.cpp
diff --git a/resources/3rdparty/eigen/failtest/transpose_on_const_type_actually_const.cpp b/resources/3rdParty/eigen/failtest/transpose_on_const_type_actually_const.cpp
similarity index 100%
rename from resources/3rdparty/eigen/failtest/transpose_on_const_type_actually_const.cpp
rename to resources/3rdParty/eigen/failtest/transpose_on_const_type_actually_const.cpp
diff --git a/resources/3rdparty/eigen/lapack/CMakeLists.txt b/resources/3rdParty/eigen/lapack/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/lapack/CMakeLists.txt
rename to resources/3rdParty/eigen/lapack/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/lapack/cholesky.cpp b/resources/3rdParty/eigen/lapack/cholesky.cpp
similarity index 100%
rename from resources/3rdparty/eigen/lapack/cholesky.cpp
rename to resources/3rdParty/eigen/lapack/cholesky.cpp
diff --git a/resources/3rdparty/eigen/lapack/complex_double.cpp b/resources/3rdParty/eigen/lapack/complex_double.cpp
similarity index 100%
rename from resources/3rdparty/eigen/lapack/complex_double.cpp
rename to resources/3rdParty/eigen/lapack/complex_double.cpp
diff --git a/resources/3rdparty/eigen/lapack/complex_single.cpp b/resources/3rdParty/eigen/lapack/complex_single.cpp
similarity index 100%
rename from resources/3rdparty/eigen/lapack/complex_single.cpp
rename to resources/3rdParty/eigen/lapack/complex_single.cpp
diff --git a/resources/3rdparty/eigen/lapack/double.cpp b/resources/3rdParty/eigen/lapack/double.cpp
similarity index 100%
rename from resources/3rdparty/eigen/lapack/double.cpp
rename to resources/3rdParty/eigen/lapack/double.cpp
diff --git a/resources/3rdparty/eigen/lapack/eigenvalues.cpp b/resources/3rdParty/eigen/lapack/eigenvalues.cpp
similarity index 100%
rename from resources/3rdparty/eigen/lapack/eigenvalues.cpp
rename to resources/3rdParty/eigen/lapack/eigenvalues.cpp
diff --git a/resources/3rdparty/eigen/lapack/lapack_common.h b/resources/3rdParty/eigen/lapack/lapack_common.h
similarity index 100%
rename from resources/3rdparty/eigen/lapack/lapack_common.h
rename to resources/3rdParty/eigen/lapack/lapack_common.h
diff --git a/resources/3rdparty/eigen/lapack/lu.cpp b/resources/3rdParty/eigen/lapack/lu.cpp
similarity index 100%
rename from resources/3rdparty/eigen/lapack/lu.cpp
rename to resources/3rdParty/eigen/lapack/lu.cpp
diff --git a/resources/3rdparty/eigen/lapack/single.cpp b/resources/3rdParty/eigen/lapack/single.cpp
similarity index 100%
rename from resources/3rdparty/eigen/lapack/single.cpp
rename to resources/3rdParty/eigen/lapack/single.cpp
diff --git a/resources/3rdparty/eigen/scripts/CMakeLists.txt b/resources/3rdParty/eigen/scripts/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/scripts/CMakeLists.txt
rename to resources/3rdParty/eigen/scripts/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/scripts/buildtests.in b/resources/3rdParty/eigen/scripts/buildtests.in
similarity index 100%
rename from resources/3rdparty/eigen/scripts/buildtests.in
rename to resources/3rdParty/eigen/scripts/buildtests.in
diff --git a/resources/3rdparty/eigen/scripts/check.in b/resources/3rdParty/eigen/scripts/check.in
similarity index 100%
rename from resources/3rdparty/eigen/scripts/check.in
rename to resources/3rdParty/eigen/scripts/check.in
diff --git a/resources/3rdparty/eigen/scripts/debug.in b/resources/3rdParty/eigen/scripts/debug.in
similarity index 100%
rename from resources/3rdparty/eigen/scripts/debug.in
rename to resources/3rdParty/eigen/scripts/debug.in
diff --git a/resources/3rdparty/eigen/scripts/eigen_gen_credits.cpp b/resources/3rdParty/eigen/scripts/eigen_gen_credits.cpp
similarity index 100%
rename from resources/3rdparty/eigen/scripts/eigen_gen_credits.cpp
rename to resources/3rdParty/eigen/scripts/eigen_gen_credits.cpp
diff --git a/resources/3rdParty/eigen/scripts/eigen_gen_docs b/resources/3rdParty/eigen/scripts/eigen_gen_docs
new file mode 100644
index 000000000..9c3ba28a2
--- /dev/null
+++ b/resources/3rdParty/eigen/scripts/eigen_gen_docs
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+# configuration
+# You should call this script with USER set as you want, else some default
+# will be used
+USER=${USER:-'orzel'}
+
+#ulimit -v 1024000
+
+# step 1 : build
+mkdir build -p
+(cd build && cmake .. && make doc) || { echo "make failed"; exit 1; }
+
+#step 2 : upload
+# (the '/' at the end of path is very important, see rsync documentation)
+rsync -az --no-p --delete build/doc/html/ $USER@ssh.tuxfamily.org:eigen/eigen.tuxfamily.org-web/htdocs/dox/ || { echo "upload failed"; exit 1; }
+
+#step 3 : fix the perm
+ssh $USER@ssh.tuxfamily.org 'chmod -R g+w /home/eigen/eigen.tuxfamily.org-web/htdocs/dox-devel' || { echo "perm failed"; exit 1; }
+
+echo "Uploaded successfully"
+
diff --git a/resources/3rdparty/eigen/scripts/release.in b/resources/3rdParty/eigen/scripts/release.in
similarity index 100%
rename from resources/3rdparty/eigen/scripts/release.in
rename to resources/3rdParty/eigen/scripts/release.in
diff --git a/resources/3rdparty/eigen/scripts/relicense.py b/resources/3rdParty/eigen/scripts/relicense.py
similarity index 100%
rename from resources/3rdparty/eigen/scripts/relicense.py
rename to resources/3rdParty/eigen/scripts/relicense.py
diff --git a/resources/3rdparty/eigen/signature_of_eigen3_matrix_library b/resources/3rdParty/eigen/signature_of_eigen3_matrix_library
similarity index 100%
rename from resources/3rdparty/eigen/signature_of_eigen3_matrix_library
rename to resources/3rdParty/eigen/signature_of_eigen3_matrix_library
diff --git a/resources/3rdParty/eigen/test/CMakeLists.txt b/resources/3rdParty/eigen/test/CMakeLists.txt
new file mode 100644
index 000000000..6f8fc4ae3
--- /dev/null
+++ b/resources/3rdParty/eigen/test/CMakeLists.txt
@@ -0,0 +1,243 @@
+
+# generate split test header file
+message(STATUS ${CMAKE_CURRENT_BINARY_DIR})
+file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h "")
+foreach(i RANGE 1 999)
+  file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h
+    "#ifdef EIGEN_TEST_PART_${i}\n"
+    "#define CALL_SUBTEST_${i}(FUNC) CALL_SUBTEST(FUNC)\n"
+    "#else\n"
+    "#define CALL_SUBTEST_${i}(FUNC)\n"
+    "#endif\n\n"
+    )
+endforeach()
+
+# configure blas/lapack (use Eigen's ones)
+set(BLAS_FOUND TRUE)
+set(LAPACK_FOUND TRUE)
+set(BLAS_LIBRARIES eigen_blas)
+set(LAPACK_LIBRARIES eigen_lapack)
+
+set(EIGEN_TEST_MATRIX_DIR "" CACHE STRING "Enable testing of realword sparse matrices contained in the specified path")
+if(EIGEN_TEST_MATRIX_DIR)
+  if(NOT WIN32)
+    message(STATUS "Test realworld sparse matrices: ${EIGEN_TEST_MATRIX_DIR}")
+    add_definitions( -DTEST_REAL_CASES="${EIGEN_TEST_MATRIX_DIR}" )
+  else(NOT WIN32)
+    message(STATUS "REAL CASES CAN NOT BE CURRENTLY TESTED ON WIN32")
+  endif(NOT WIN32)
+endif(EIGEN_TEST_MATRIX_DIR)
+
+set(SPARSE_LIBS " ")
+
+find_package(Cholmod)
+if(CHOLMOD_FOUND AND BLAS_FOUND AND LAPACK_FOUND)
+  add_definitions("-DEIGEN_CHOLMOD_SUPPORT")
+  include_directories(${CHOLMOD_INCLUDES})
+  set(SPARSE_LIBS ${SPARSE_LIBS} ${CHOLMOD_LIBRARIES} ${BLAS_LIBRARIES} ${LAPACK_LIBRARIES})
+  set(CHOLMOD_ALL_LIBS  ${CHOLMOD_LIBRARIES} ${BLAS_LIBRARIES} ${LAPACK_LIBRARIES})
+  ei_add_property(EIGEN_TESTED_BACKENDS "Cholmod, ")
+else()
+  ei_add_property(EIGEN_MISSING_BACKENDS "Cholmod, ")
+endif()
+
+find_package(Umfpack)
+if(UMFPACK_FOUND AND BLAS_FOUND)
+  add_definitions("-DEIGEN_UMFPACK_SUPPORT")
+  include_directories(${UMFPACK_INCLUDES})
+  set(SPARSE_LIBS ${SPARSE_LIBS} ${UMFPACK_LIBRARIES} ${BLAS_LIBRARIES})
+  set(UMFPACK_ALL_LIBS ${UMFPACK_LIBRARIES} ${BLAS_LIBRARIES})
+  ei_add_property(EIGEN_TESTED_BACKENDS "UmfPack, ")
+else()
+  ei_add_property(EIGEN_MISSING_BACKENDS "UmfPack, ")
+endif()
+
+find_package(SuperLU)
+if(SUPERLU_FOUND AND BLAS_FOUND)
+  add_definitions("-DEIGEN_SUPERLU_SUPPORT")
+  include_directories(${SUPERLU_INCLUDES})
+  set(SPARSE_LIBS ${SPARSE_LIBS} ${SUPERLU_LIBRARIES} ${BLAS_LIBRARIES})
+  set(SUPERLU_ALL_LIBS ${SUPERLU_LIBRARIES} ${BLAS_LIBRARIES})
+  ei_add_property(EIGEN_TESTED_BACKENDS  "SuperLU, ")
+else()
+  ei_add_property(EIGEN_MISSING_BACKENDS  "SuperLU, ")
+endif()
+
+
+find_package(Pastix)
+find_package(Scotch)
+find_package(Metis)
+if(PASTIX_FOUND AND BLAS_FOUND)
+  add_definitions("-DEIGEN_PASTIX_SUPPORT")
+  include_directories(${PASTIX_INCLUDES})
+  if(SCOTCH_FOUND)
+    include_directories(${SCOTCH_INCLUDES})
+    set(PASTIX_LIBRARIES ${PASTIX_LIBRARIES} ${SCOTCH_LIBRARIES})
+  elseif(METIS_FOUND)
+    include_directories(${METIS_INCLUDES})
+    set(PASTIX_LIBRARIES ${PASTIX_LIBRARIES} ${METIS_LIBRARIES})
+  else(SCOTCH_FOUND)
+    ei_add_property(EIGEN_MISSING_BACKENDS  "PaStiX, ")
+  endif(SCOTCH_FOUND)
+  set(SPARSE_LIBS ${SPARSE_LIBS} ${PASTIX_LIBRARIES} ${ORDERING_LIBRARIES} ${BLAS_LIBRARIES})
+  set(PASTIX_ALL_LIBS ${PASTIX_LIBRARIES} ${BLAS_LIBRARIES})
+  ei_add_property(EIGEN_TESTED_BACKENDS  "PaStiX, ")
+else()
+  ei_add_property(EIGEN_MISSING_BACKENDS  "PaStiX, ")
+endif()
+
+option(EIGEN_TEST_NOQT "Disable Qt support in unit tests" OFF)
+if(NOT EIGEN_TEST_NOQT)
+  find_package(Qt4)
+  if(QT4_FOUND)
+    include(${QT_USE_FILE})
+    ei_add_property(EIGEN_TESTED_BACKENDS  "Qt4 support, ")
+  else()
+    ei_add_property(EIGEN_MISSING_BACKENDS  "Qt4 support, ")
+  endif()
+endif(NOT EIGEN_TEST_NOQT)
+
+if(TEST_LIB)
+  add_definitions("-DEIGEN_EXTERN_INSTANTIATIONS=1")
+endif(TEST_LIB)
+
+ei_add_test(meta)
+ei_add_test(sizeof)
+ei_add_test(dynalloc)
+ei_add_test(nomalloc)
+ei_add_test(first_aligned)
+ei_add_test(mixingtypes)
+ei_add_test(packetmath)
+ei_add_test(unalignedassert)
+ei_add_test(vectorization_logic)
+ei_add_test(basicstuff)
+ei_add_test(linearstructure)
+ei_add_test(integer_types)
+ei_add_test(cwiseop)
+ei_add_test(unalignedcount)
+ei_add_test(exceptions)
+ei_add_test(redux)
+ei_add_test(visitor)
+ei_add_test(block)
+ei_add_test(corners)
+ei_add_test(product_small)
+ei_add_test(product_large)
+ei_add_test(product_extra)
+ei_add_test(diagonalmatrices)
+ei_add_test(adjoint)
+ei_add_test(diagonal)
+ei_add_test(miscmatrices)
+ei_add_test(commainitializer)
+ei_add_test(smallvectors)
+ei_add_test(map)
+ei_add_test(mapstride)
+ei_add_test(mapstaticmethods)
+ei_add_test(array)
+ei_add_test(array_for_matrix)
+ei_add_test(array_replicate)
+ei_add_test(array_reverse)
+ei_add_test(triangular)
+ei_add_test(selfadjoint)
+ei_add_test(product_selfadjoint)
+ei_add_test(product_symm)
+ei_add_test(product_syrk)
+ei_add_test(product_trmv)
+ei_add_test(product_trmm)
+ei_add_test(product_trsolve)
+ei_add_test(product_mmtr)
+ei_add_test(product_notemporary)
+ei_add_test(stable_norm)
+ei_add_test(bandmatrix)
+ei_add_test(cholesky)
+ei_add_test(lu)
+ei_add_test(determinant)
+ei_add_test(inverse)
+ei_add_test(qr)
+ei_add_test(qr_colpivoting)
+ei_add_test(qr_fullpivoting)
+ei_add_test(upperbidiagonalization)
+ei_add_test(hessenberg)
+ei_add_test(schur_real)
+ei_add_test(schur_complex)
+ei_add_test(eigensolver_selfadjoint)
+ei_add_test(eigensolver_generic)
+ei_add_test(eigensolver_complex)
+ei_add_test(jacobi)
+ei_add_test(jacobisvd)
+ei_add_test(geo_orthomethods)
+ei_add_test(geo_homogeneous)
+ei_add_test(geo_quaternion)
+ei_add_test(geo_transformations)
+ei_add_test(geo_eulerangles)
+ei_add_test(geo_hyperplane)
+ei_add_test(geo_parametrizedline)
+ei_add_test(geo_alignedbox)
+ei_add_test(stdvector)
+ei_add_test(stdvector_overload)
+ei_add_test(stdlist)
+ei_add_test(stddeque)
+ei_add_test(resize)
+if(QT4_FOUND)
+  ei_add_test(qtvector "" "${QT_QTCORE_LIBRARY}")
+endif(QT4_FOUND)
+ei_add_test(sparse_vector)
+ei_add_test(sparse_basic)
+ei_add_test(sparse_product)
+ei_add_test(sparse_solvers)
+ei_add_test(umeyama)
+ei_add_test(householder)
+ei_add_test(swap)
+ei_add_test(conservative_resize)
+ei_add_test(permutationmatrices)
+ei_add_test(sparse_permutations)
+ei_add_test(eigen2support)
+ei_add_test(nullary)
+ei_add_test(nesting_ops "${CMAKE_CXX_FLAGS_DEBUG}")
+ei_add_test(zerosized)
+ei_add_test(dontalign)
+ei_add_test(sizeoverflow)
+ei_add_test(prec_inverse_4x4)
+ei_add_test(vectorwiseop)
+
+ei_add_test(simplicial_cholesky)
+ei_add_test(conjugate_gradient)
+ei_add_test(bicgstab)
+
+
+if(UMFPACK_FOUND)
+  ei_add_test(umfpack_support "" "${UMFPACK_ALL_LIBS}")
+endif()
+
+if(SUPERLU_FOUND)
+  ei_add_test(superlu_support "" "${SUPERLU_ALL_LIBS}")
+endif()
+
+if(CHOLMOD_FOUND)
+  ei_add_test(cholmod_support "" "${CHOLMOD_ALL_LIBS}")
+endif()
+
+if(PARDISO_FOUND)
+  ei_add_test(pardiso_support "" "${PARDISO_ALL_LIBS}")
+endif()
+
+if(PASTIX_FOUND AND (SCOTCH_FOUND OR METIS_FOUND))
+  ei_add_test(pastix_support "" "${PASTIX_ALL_LIBS}")
+endif()
+
+string(TOLOWER "${CMAKE_CXX_COMPILER}" cmake_cxx_compiler_tolower)
+if(cmake_cxx_compiler_tolower MATCHES "qcc")
+  set(CXX_IS_QCC "ON")
+endif()
+
+ei_add_property(EIGEN_TESTING_SUMMARY "CXX:               ${CMAKE_CXX_COMPILER}\n")
+if(CMAKE_COMPILER_IS_GNUCXX AND NOT CXX_IS_QCC)
+  execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version COMMAND head -n 1 OUTPUT_VARIABLE EIGEN_CXX_VERSION_STRING OUTPUT_STRIP_TRAILING_WHITESPACE)
+  ei_add_property(EIGEN_TESTING_SUMMARY "CXX_VERSION:       ${EIGEN_CXX_VERSION_STRING}\n")
+endif()
+ei_add_property(EIGEN_TESTING_SUMMARY "CXX_FLAGS:         ${CMAKE_CXX_FLAGS}\n")
+ei_add_property(EIGEN_TESTING_SUMMARY "Sparse lib flags:  ${SPARSE_LIBS}\n")
+
+option(EIGEN_TEST_EIGEN2 "Run whole Eigen2 test suite against EIGEN2_SUPPORT" OFF)
+if(EIGEN_TEST_EIGEN2)
+  add_subdirectory(eigen2)
+endif()
diff --git a/resources/3rdparty/eigen/test/adjoint.cpp b/resources/3rdParty/eigen/test/adjoint.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/adjoint.cpp
rename to resources/3rdParty/eigen/test/adjoint.cpp
diff --git a/resources/3rdparty/eigen/test/array.cpp b/resources/3rdParty/eigen/test/array.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/array.cpp
rename to resources/3rdParty/eigen/test/array.cpp
diff --git a/resources/3rdparty/eigen/test/array_for_matrix.cpp b/resources/3rdParty/eigen/test/array_for_matrix.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/array_for_matrix.cpp
rename to resources/3rdParty/eigen/test/array_for_matrix.cpp
diff --git a/resources/3rdparty/eigen/test/array_replicate.cpp b/resources/3rdParty/eigen/test/array_replicate.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/array_replicate.cpp
rename to resources/3rdParty/eigen/test/array_replicate.cpp
diff --git a/resources/3rdparty/eigen/test/array_reverse.cpp b/resources/3rdParty/eigen/test/array_reverse.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/array_reverse.cpp
rename to resources/3rdParty/eigen/test/array_reverse.cpp
diff --git a/resources/3rdparty/eigen/test/bandmatrix.cpp b/resources/3rdParty/eigen/test/bandmatrix.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/bandmatrix.cpp
rename to resources/3rdParty/eigen/test/bandmatrix.cpp
diff --git a/resources/3rdparty/eigen/test/basicstuff.cpp b/resources/3rdParty/eigen/test/basicstuff.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/basicstuff.cpp
rename to resources/3rdParty/eigen/test/basicstuff.cpp
diff --git a/resources/3rdparty/eigen/test/bicgstab.cpp b/resources/3rdParty/eigen/test/bicgstab.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/bicgstab.cpp
rename to resources/3rdParty/eigen/test/bicgstab.cpp
diff --git a/resources/3rdparty/eigen/test/block.cpp b/resources/3rdParty/eigen/test/block.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/block.cpp
rename to resources/3rdParty/eigen/test/block.cpp
diff --git a/resources/3rdParty/eigen/test/cholesky.cpp b/resources/3rdParty/eigen/test/cholesky.cpp
new file mode 100644
index 000000000..14e01c006
--- /dev/null
+++ b/resources/3rdParty/eigen/test/cholesky.cpp
@@ -0,0 +1,310 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_NO_ASSERTION_CHECKING
+#define EIGEN_NO_ASSERTION_CHECKING
+#endif
+
+static int nb_temporaries;
+
+#define EIGEN_DENSE_STORAGE_CTOR_PLUGIN { if(size!=0) nb_temporaries++; }
+
+#include "main.h"
+#include <Eigen/Cholesky>
+#include <Eigen/QR>
+
+#define VERIFY_EVALUATION_COUNT(XPR,N) {\
+    nb_temporaries = 0; \
+    XPR; \
+    if(nb_temporaries!=N) std::cerr << "nb_temporaries == " << nb_temporaries << "\n"; \
+    VERIFY( (#XPR) && nb_temporaries==N ); \
+  }
+
+template<typename MatrixType,template <typename,int> class CholType> void test_chol_update(const MatrixType& symm)
+{
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename MatrixType::RealScalar RealScalar;
+  typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
+
+  MatrixType symmLo = symm.template triangularView<Lower>();
+  MatrixType symmUp = symm.template triangularView<Upper>();
+  MatrixType symmCpy = symm;
+
+  CholType<MatrixType,Lower> chollo(symmLo);
+  CholType<MatrixType,Upper> cholup(symmUp);
+
+  for (int k=0; k<10; ++k)
+  {
+    VectorType vec = VectorType::Random(symm.rows());
+    RealScalar sigma = internal::random<RealScalar>();
+    symmCpy += sigma * vec * vec.adjoint();
+
+    // we are doing some downdates, so it might be the case that the matrix is not SPD anymore
+    CholType<MatrixType,Lower> chol(symmCpy);
+    if(chol.info()!=Success)
+      break;
+
+    chollo.rankUpdate(vec, sigma);
+    VERIFY_IS_APPROX(symmCpy, chollo.reconstructedMatrix());
+
+    cholup.rankUpdate(vec, sigma);
+    VERIFY_IS_APPROX(symmCpy, cholup.reconstructedMatrix());
+  }
+}
+
+template<typename MatrixType> void cholesky(const MatrixType& m)
+{
+  typedef typename MatrixType::Index Index;
+  /* this test covers the following files:
+     LLT.h LDLT.h
+  */
+  Index rows = m.rows();
+  Index cols = m.cols();
+
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime> SquareMatrixType;
+  typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
+
+  MatrixType a0 = MatrixType::Random(rows,cols);
+  VectorType vecB = VectorType::Random(rows), vecX(rows);
+  MatrixType matB = MatrixType::Random(rows,cols), matX(rows,cols);
+  SquareMatrixType symm =  a0 * a0.adjoint();
+  // let's make sure the matrix is not singular or near singular
+  for (int k=0; k<3; ++k)
+  {
+    MatrixType a1 = MatrixType::Random(rows,cols);
+    symm += a1 * a1.adjoint();
+  }
+
+  SquareMatrixType symmUp = symm.template triangularView<Upper>();
+  SquareMatrixType symmLo = symm.template triangularView<Lower>();
+
+  // to test if really Cholesky only uses the upper triangular part, uncomment the following
+  // FIXME: currently that fails !!
+  //symm.template part<StrictlyLower>().setZero();
+
+  {
+    LLT<SquareMatrixType,Lower> chollo(symmLo);
+    VERIFY_IS_APPROX(symm, chollo.reconstructedMatrix());
+    vecX = chollo.solve(vecB);
+    VERIFY_IS_APPROX(symm * vecX, vecB);
+    matX = chollo.solve(matB);
+    VERIFY_IS_APPROX(symm * matX, matB);
+
+    // test the upper mode
+    LLT<SquareMatrixType,Upper> cholup(symmUp);
+    VERIFY_IS_APPROX(symm, cholup.reconstructedMatrix());
+    vecX = cholup.solve(vecB);
+    VERIFY_IS_APPROX(symm * vecX, vecB);
+    matX = cholup.solve(matB);
+    VERIFY_IS_APPROX(symm * matX, matB);
+
+    MatrixType neg = -symmLo;
+    chollo.compute(neg);
+    VERIFY(chollo.info()==NumericalIssue);
+
+    VERIFY_IS_APPROX(MatrixType(chollo.matrixL().transpose().conjugate()), MatrixType(chollo.matrixU()));
+    VERIFY_IS_APPROX(MatrixType(chollo.matrixU().transpose().conjugate()), MatrixType(chollo.matrixL()));
+    VERIFY_IS_APPROX(MatrixType(cholup.matrixL().transpose().conjugate()), MatrixType(cholup.matrixU()));
+    VERIFY_IS_APPROX(MatrixType(cholup.matrixU().transpose().conjugate()), MatrixType(cholup.matrixL()));
+  }
+
+  // LDLT
+  {
+    int sign = internal::random<int>()%2 ? 1 : -1;
+
+    if(sign == -1)
+    {
+      symm = -symm; // test a negative matrix
+    }
+
+    SquareMatrixType symmUp = symm.template triangularView<Upper>();
+    SquareMatrixType symmLo = symm.template triangularView<Lower>();
+
+    LDLT<SquareMatrixType,Lower> ldltlo(symmLo);
+    VERIFY_IS_APPROX(symm, ldltlo.reconstructedMatrix());
+    vecX = ldltlo.solve(vecB);
+    VERIFY_IS_APPROX(symm * vecX, vecB);
+    matX = ldltlo.solve(matB);
+    VERIFY_IS_APPROX(symm * matX, matB);
+
+    LDLT<SquareMatrixType,Upper> ldltup(symmUp);
+    VERIFY_IS_APPROX(symm, ldltup.reconstructedMatrix());
+    vecX = ldltup.solve(vecB);
+    VERIFY_IS_APPROX(symm * vecX, vecB);
+    matX = ldltup.solve(matB);
+    VERIFY_IS_APPROX(symm * matX, matB);
+
+    VERIFY_IS_APPROX(MatrixType(ldltlo.matrixL().transpose().conjugate()), MatrixType(ldltlo.matrixU()));
+    VERIFY_IS_APPROX(MatrixType(ldltlo.matrixU().transpose().conjugate()), MatrixType(ldltlo.matrixL()));
+    VERIFY_IS_APPROX(MatrixType(ldltup.matrixL().transpose().conjugate()), MatrixType(ldltup.matrixU()));
+    VERIFY_IS_APPROX(MatrixType(ldltup.matrixU().transpose().conjugate()), MatrixType(ldltup.matrixL()));
+
+    if(MatrixType::RowsAtCompileTime==Dynamic)
+    {
+      // note : each inplace permutation requires a small temporary vector (mask)
+
+      // check inplace solve
+      matX = matB;
+      VERIFY_EVALUATION_COUNT(matX = ldltlo.solve(matX), 0);
+      VERIFY_IS_APPROX(matX, ldltlo.solve(matB).eval());
+
+
+      matX = matB;
+      VERIFY_EVALUATION_COUNT(matX = ldltup.solve(matX), 0);
+      VERIFY_IS_APPROX(matX, ldltup.solve(matB).eval());
+    }
+
+    // restore
+    if(sign == -1)
+      symm = -symm;
+  }
+
+  // test some special use cases of SelfCwiseBinaryOp:
+  MatrixType m1 = MatrixType::Random(rows,cols), m2(rows,cols);
+  m2 = m1;
+  m2 += symmLo.template selfadjointView<Lower>().llt().solve(matB);
+  VERIFY_IS_APPROX(m2, m1 + symmLo.template selfadjointView<Lower>().llt().solve(matB));
+  m2 = m1;
+  m2 -= symmLo.template selfadjointView<Lower>().llt().solve(matB);
+  VERIFY_IS_APPROX(m2, m1 - symmLo.template selfadjointView<Lower>().llt().solve(matB));
+  m2 = m1;
+  m2.noalias() += symmLo.template selfadjointView<Lower>().llt().solve(matB);
+  VERIFY_IS_APPROX(m2, m1 + symmLo.template selfadjointView<Lower>().llt().solve(matB));
+  m2 = m1;
+  m2.noalias() -= symmLo.template selfadjointView<Lower>().llt().solve(matB);
+  VERIFY_IS_APPROX(m2, m1 - symmLo.template selfadjointView<Lower>().llt().solve(matB));
+
+  // update/downdate
+  CALL_SUBTEST(( test_chol_update<SquareMatrixType,LLT>(symm)  ));
+  CALL_SUBTEST(( test_chol_update<SquareMatrixType,LDLT>(symm) ));
+}
+
+template<typename MatrixType> void cholesky_cplx(const MatrixType& m)
+{
+  // classic test
+  cholesky(m);
+
+  // test mixing real/scalar types
+
+  typedef typename MatrixType::Index Index;
+
+  Index rows = m.rows();
+  Index cols = m.cols();
+
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  typedef Matrix<RealScalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime> RealMatrixType;
+  typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
+
+  RealMatrixType a0 = RealMatrixType::Random(rows,cols);
+  VectorType vecB = VectorType::Random(rows), vecX(rows);
+  MatrixType matB = MatrixType::Random(rows,cols), matX(rows,cols);
+  RealMatrixType symm =  a0 * a0.adjoint();
+  // let's make sure the matrix is not singular or near singular
+  for (int k=0; k<3; ++k)
+  {
+    RealMatrixType a1 = RealMatrixType::Random(rows,cols);
+    symm += a1 * a1.adjoint();
+  }
+
+  {
+    RealMatrixType symmLo = symm.template triangularView<Lower>();
+
+    LLT<RealMatrixType,Lower> chollo(symmLo);
+    VERIFY_IS_APPROX(symm, chollo.reconstructedMatrix());
+    vecX = chollo.solve(vecB);
+    VERIFY_IS_APPROX(symm * vecX, vecB);
+//     matX = chollo.solve(matB);
+//     VERIFY_IS_APPROX(symm * matX, matB);
+  }
+
+  // LDLT
+  {
+    int sign = internal::random<int>()%2 ? 1 : -1;
+
+    if(sign == -1)
+    {
+      symm = -symm; // test a negative matrix
+    }
+
+    RealMatrixType symmLo = symm.template triangularView<Lower>();
+
+    LDLT<RealMatrixType,Lower> ldltlo(symmLo);
+    VERIFY_IS_APPROX(symm, ldltlo.reconstructedMatrix());
+    vecX = ldltlo.solve(vecB);
+    VERIFY_IS_APPROX(symm * vecX, vecB);
+//     matX = ldltlo.solve(matB);
+//     VERIFY_IS_APPROX(symm * matX, matB);
+  }
+}
+
+// regression test for bug 241
+template<typename MatrixType> void cholesky_bug241(const MatrixType& m)
+{
+  eigen_assert(m.rows() == 2 && m.cols() == 2);
+
+  typedef typename MatrixType::Scalar Scalar;
+  typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
+
+  MatrixType matA;
+  matA << 1, 1, 1, 1;
+  VectorType vecB;
+  vecB << 1, 1;
+  VectorType vecX = matA.ldlt().solve(vecB);
+  VERIFY_IS_APPROX(matA * vecX, vecB);
+}
+
+template<typename MatrixType> void cholesky_verify_assert()
+{
+  MatrixType tmp;
+
+  LLT<MatrixType> llt;
+  VERIFY_RAISES_ASSERT(llt.matrixL())
+  VERIFY_RAISES_ASSERT(llt.matrixU())
+  VERIFY_RAISES_ASSERT(llt.solve(tmp))
+  VERIFY_RAISES_ASSERT(llt.solveInPlace(&tmp))
+
+  LDLT<MatrixType> ldlt;
+  VERIFY_RAISES_ASSERT(ldlt.matrixL())
+  VERIFY_RAISES_ASSERT(ldlt.permutationP())
+  VERIFY_RAISES_ASSERT(ldlt.vectorD())
+  VERIFY_RAISES_ASSERT(ldlt.isPositive())
+  VERIFY_RAISES_ASSERT(ldlt.isNegative())
+  VERIFY_RAISES_ASSERT(ldlt.solve(tmp))
+  VERIFY_RAISES_ASSERT(ldlt.solveInPlace(&tmp))
+}
+
+void test_cholesky()
+{
+  int s;
+  for(int i = 0; i < g_repeat; i++) {
+    CALL_SUBTEST_1( cholesky(Matrix<double,1,1>()) );
+    CALL_SUBTEST_3( cholesky(Matrix2d()) );
+    CALL_SUBTEST_3( cholesky_bug241(Matrix2d()) );
+    CALL_SUBTEST_4( cholesky(Matrix3f()) );
+    CALL_SUBTEST_5( cholesky(Matrix4d()) );
+    s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE);
+    CALL_SUBTEST_2( cholesky(MatrixXd(s,s)) );
+    s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2);
+    CALL_SUBTEST_6( cholesky_cplx(MatrixXcd(s,s)) );
+  }
+
+  CALL_SUBTEST_4( cholesky_verify_assert<Matrix3f>() );
+  CALL_SUBTEST_7( cholesky_verify_assert<Matrix3d>() );
+  CALL_SUBTEST_8( cholesky_verify_assert<MatrixXf>() );
+  CALL_SUBTEST_2( cholesky_verify_assert<MatrixXd>() );
+
+  // Test problem size constructors
+  CALL_SUBTEST_9( LLT<MatrixXf>(10) );
+  CALL_SUBTEST_9( LDLT<MatrixXf>(10) );
+  
+  EIGEN_UNUSED_VARIABLE(s)
+}
diff --git a/resources/3rdparty/eigen/test/cholmod_support.cpp b/resources/3rdParty/eigen/test/cholmod_support.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/cholmod_support.cpp
rename to resources/3rdParty/eigen/test/cholmod_support.cpp
diff --git a/resources/3rdparty/eigen/test/commainitializer.cpp b/resources/3rdParty/eigen/test/commainitializer.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/commainitializer.cpp
rename to resources/3rdParty/eigen/test/commainitializer.cpp
diff --git a/resources/3rdparty/eigen/test/conjugate_gradient.cpp b/resources/3rdParty/eigen/test/conjugate_gradient.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/conjugate_gradient.cpp
rename to resources/3rdParty/eigen/test/conjugate_gradient.cpp
diff --git a/resources/3rdparty/eigen/test/conservative_resize.cpp b/resources/3rdParty/eigen/test/conservative_resize.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/conservative_resize.cpp
rename to resources/3rdParty/eigen/test/conservative_resize.cpp
diff --git a/resources/3rdparty/eigen/test/corners.cpp b/resources/3rdParty/eigen/test/corners.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/corners.cpp
rename to resources/3rdParty/eigen/test/corners.cpp
diff --git a/resources/3rdparty/eigen/test/cwiseop.cpp b/resources/3rdParty/eigen/test/cwiseop.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/cwiseop.cpp
rename to resources/3rdParty/eigen/test/cwiseop.cpp
diff --git a/resources/3rdparty/eigen/test/determinant.cpp b/resources/3rdParty/eigen/test/determinant.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/determinant.cpp
rename to resources/3rdParty/eigen/test/determinant.cpp
diff --git a/resources/3rdParty/eigen/test/diagonal.cpp b/resources/3rdParty/eigen/test/diagonal.cpp
new file mode 100644
index 000000000..95cd10372
--- /dev/null
+++ b/resources/3rdParty/eigen/test/diagonal.cpp
@@ -0,0 +1,83 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+template<typename MatrixType> void diagonal(const MatrixType& m)
+{
+  typedef typename MatrixType::Index Index;
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename MatrixType::RealScalar RealScalar;
+  typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
+  typedef Matrix<Scalar, 1, MatrixType::ColsAtCompileTime> RowVectorType;
+
+  Index rows = m.rows();
+  Index cols = m.cols();
+
+  MatrixType m1 = MatrixType::Random(rows, cols),
+             m2 = MatrixType::Random(rows, cols);
+
+  //check diagonal()
+  VERIFY_IS_APPROX(m1.diagonal(), m1.transpose().diagonal());
+  m2.diagonal() = 2 * m1.diagonal();
+  m2.diagonal()[0] *= 3;
+
+  if (rows>2)
+  {
+    enum {
+      N1 = MatrixType::RowsAtCompileTime>1 ?  1 : 0,
+      N2 = MatrixType::RowsAtCompileTime>2 ? -2 : 0
+    };
+
+    // check sub/super diagonal
+    if(m1.template diagonal<N1>().RowsAtCompileTime!=Dynamic)
+    {
+      VERIFY(m1.template diagonal<N1>().RowsAtCompileTime == m1.diagonal(N1).size());
+    }
+    if(m1.template diagonal<N2>().RowsAtCompileTime!=Dynamic)
+    {
+      VERIFY(m1.template diagonal<N2>().RowsAtCompileTime == m1.diagonal(N2).size());
+    }
+
+    m2.template diagonal<N1>() = 2 * m1.template diagonal<N1>();
+    VERIFY_IS_APPROX(m2.template diagonal<N1>(), static_cast<Scalar>(2) * m1.diagonal(N1));
+    m2.template diagonal<N1>()[0] *= 3;
+    VERIFY_IS_APPROX(m2.template diagonal<N1>()[0], static_cast<Scalar>(6) * m1.template diagonal<N1>()[0]);
+
+
+    m2.template diagonal<N2>() = 2 * m1.template diagonal<N2>();
+    m2.template diagonal<N2>()[0] *= 3;
+    VERIFY_IS_APPROX(m2.template diagonal<N2>()[0], static_cast<Scalar>(6) * m1.template diagonal<N2>()[0]);
+
+    m2.diagonal(N1) = 2 * m1.diagonal(N1);
+    VERIFY_IS_APPROX(m2.diagonal<N1>(), static_cast<Scalar>(2) * m1.diagonal(N1));
+    m2.diagonal(N1)[0] *= 3;
+    VERIFY_IS_APPROX(m2.diagonal(N1)[0], static_cast<Scalar>(6) * m1.diagonal(N1)[0]);
+
+    m2.diagonal(N2) = 2 * m1.diagonal(N2);
+    VERIFY_IS_APPROX(m2.diagonal<N2>(), static_cast<Scalar>(2) * m1.diagonal(N2));
+    m2.diagonal(N2)[0] *= 3;
+    VERIFY_IS_APPROX(m2.diagonal(N2)[0], static_cast<Scalar>(6) * m1.diagonal(N2)[0]);
+  }
+}
+
+void test_diagonal()
+{
+  for(int i = 0; i < g_repeat; i++) {
+    CALL_SUBTEST_1( diagonal(Matrix<float, 1, 1>()) );
+    CALL_SUBTEST_1( diagonal(Matrix<float, 4, 9>()) );
+    CALL_SUBTEST_1( diagonal(Matrix<float, 7, 3>()) );
+    CALL_SUBTEST_2( diagonal(Matrix4d()) );
+    CALL_SUBTEST_2( diagonal(MatrixXcf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
+    CALL_SUBTEST_2( diagonal(MatrixXi(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
+    CALL_SUBTEST_2( diagonal(MatrixXcd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
+    CALL_SUBTEST_1( diagonal(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
+    CALL_SUBTEST_1( diagonal(Matrix<float,Dynamic,4>(3, 4)) );
+  }
+}
diff --git a/resources/3rdparty/eigen/test/diagonalmatrices.cpp b/resources/3rdParty/eigen/test/diagonalmatrices.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/diagonalmatrices.cpp
rename to resources/3rdParty/eigen/test/diagonalmatrices.cpp
diff --git a/resources/3rdparty/eigen/test/dontalign.cpp b/resources/3rdParty/eigen/test/dontalign.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/dontalign.cpp
rename to resources/3rdParty/eigen/test/dontalign.cpp
diff --git a/resources/3rdparty/eigen/test/dynalloc.cpp b/resources/3rdParty/eigen/test/dynalloc.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/dynalloc.cpp
rename to resources/3rdParty/eigen/test/dynalloc.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/CMakeLists.txt b/resources/3rdParty/eigen/test/eigen2/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/CMakeLists.txt
rename to resources/3rdParty/eigen/test/eigen2/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_adjoint.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_adjoint.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_adjoint.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_adjoint.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_alignedbox.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_alignedbox.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_alignedbox.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_alignedbox.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_array.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_array.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_array.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_array.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_basicstuff.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_basicstuff.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_basicstuff.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_basicstuff.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_bug_132.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_bug_132.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_bug_132.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_bug_132.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_cholesky.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_cholesky.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_cholesky.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_cholesky.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_commainitializer.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_commainitializer.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_commainitializer.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_commainitializer.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_cwiseop.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_cwiseop.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_cwiseop.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_cwiseop.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_determinant.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_determinant.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_determinant.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_determinant.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_dynalloc.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_dynalloc.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_dynalloc.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_dynalloc.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_eigensolver.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_eigensolver.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_eigensolver.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_eigensolver.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_first_aligned.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_first_aligned.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_first_aligned.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_first_aligned.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_geometry.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_geometry.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_geometry.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_geometry.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_geometry_with_eigen2_prefix.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_geometry_with_eigen2_prefix.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_geometry_with_eigen2_prefix.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_geometry_with_eigen2_prefix.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_hyperplane.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_hyperplane.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_hyperplane.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_hyperplane.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_inverse.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_inverse.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_inverse.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_inverse.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_linearstructure.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_linearstructure.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_linearstructure.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_linearstructure.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_lu.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_lu.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_lu.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_lu.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_map.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_map.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_map.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_map.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_meta.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_meta.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_meta.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_meta.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_miscmatrices.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_miscmatrices.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_miscmatrices.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_miscmatrices.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_mixingtypes.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_mixingtypes.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_mixingtypes.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_mixingtypes.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_newstdvector.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_newstdvector.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_newstdvector.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_newstdvector.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_nomalloc.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_nomalloc.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_nomalloc.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_nomalloc.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_packetmath.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_packetmath.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_packetmath.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_packetmath.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_parametrizedline.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_parametrizedline.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_parametrizedline.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_parametrizedline.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_prec_inverse_4x4.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_prec_inverse_4x4.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_prec_inverse_4x4.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_prec_inverse_4x4.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_product_large.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_product_large.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_product_large.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_product_large.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_product_small.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_product_small.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_product_small.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_product_small.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_qr.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_qr.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_qr.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_qr.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_qtvector.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_qtvector.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_qtvector.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_qtvector.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_regression.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_regression.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_regression.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_regression.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_sizeof.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_sizeof.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_sizeof.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_sizeof.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_smallvectors.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_smallvectors.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_smallvectors.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_smallvectors.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_sparse_basic.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_sparse_basic.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_sparse_basic.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_sparse_basic.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_sparse_product.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_sparse_product.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_sparse_product.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_sparse_product.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_sparse_solvers.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_sparse_solvers.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_sparse_solvers.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_sparse_solvers.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_sparse_vector.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_sparse_vector.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_sparse_vector.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_sparse_vector.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_stdvector.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_stdvector.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_stdvector.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_stdvector.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_submatrices.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_submatrices.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_submatrices.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_submatrices.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_sum.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_sum.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_sum.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_sum.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_svd.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_svd.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_svd.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_svd.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_swap.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_swap.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_swap.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_swap.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_triangular.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_triangular.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_triangular.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_triangular.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_unalignedassert.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_unalignedassert.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_unalignedassert.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_unalignedassert.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/eigen2_visitor.cpp b/resources/3rdParty/eigen/test/eigen2/eigen2_visitor.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/eigen2_visitor.cpp
rename to resources/3rdParty/eigen/test/eigen2/eigen2_visitor.cpp
diff --git a/resources/3rdparty/eigen/test/eigen2/gsl_helper.h b/resources/3rdParty/eigen/test/eigen2/gsl_helper.h
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/gsl_helper.h
rename to resources/3rdParty/eigen/test/eigen2/gsl_helper.h
diff --git a/resources/3rdparty/eigen/test/eigen2/main.h b/resources/3rdParty/eigen/test/eigen2/main.h
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/main.h
rename to resources/3rdParty/eigen/test/eigen2/main.h
diff --git a/resources/3rdparty/eigen/test/eigen2/product.h b/resources/3rdParty/eigen/test/eigen2/product.h
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/product.h
rename to resources/3rdParty/eigen/test/eigen2/product.h
diff --git a/resources/3rdparty/eigen/test/eigen2/runtest.sh b/resources/3rdParty/eigen/test/eigen2/runtest.sh
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/runtest.sh
rename to resources/3rdParty/eigen/test/eigen2/runtest.sh
diff --git a/resources/3rdparty/eigen/test/eigen2/sparse.h b/resources/3rdParty/eigen/test/eigen2/sparse.h
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/sparse.h
rename to resources/3rdParty/eigen/test/eigen2/sparse.h
diff --git a/resources/3rdparty/eigen/test/eigen2/testsuite.cmake b/resources/3rdParty/eigen/test/eigen2/testsuite.cmake
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2/testsuite.cmake
rename to resources/3rdParty/eigen/test/eigen2/testsuite.cmake
diff --git a/resources/3rdparty/eigen/test/eigen2support.cpp b/resources/3rdParty/eigen/test/eigen2support.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigen2support.cpp
rename to resources/3rdParty/eigen/test/eigen2support.cpp
diff --git a/resources/3rdParty/eigen/test/eigensolver_complex.cpp b/resources/3rdParty/eigen/test/eigensolver_complex.cpp
new file mode 100644
index 000000000..0c2059512
--- /dev/null
+++ b/resources/3rdParty/eigen/test/eigensolver_complex.cpp
@@ -0,0 +1,115 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include <limits>
+#include <Eigen/Eigenvalues>
+#include <Eigen/LU>
+
+/* Check that two column vectors are approximately equal upto permutations,
+   by checking that the k-th power sums are equal for k = 1, ..., vec1.rows() */
+template<typename VectorType>
+void verify_is_approx_upto_permutation(const VectorType& vec1, const VectorType& vec2)
+{
+  typedef typename NumTraits<typename VectorType::Scalar>::Real RealScalar;
+
+  VERIFY(vec1.cols() == 1);
+  VERIFY(vec2.cols() == 1);
+  VERIFY(vec1.rows() == vec2.rows());
+  for (int k = 1; k <= vec1.rows(); ++k)
+  {
+    VERIFY_IS_APPROX(vec1.array().pow(RealScalar(k)).sum(), vec2.array().pow(RealScalar(k)).sum());
+  }
+}
+
+
+template<typename MatrixType> void eigensolver(const MatrixType& m)
+{
+  typedef typename MatrixType::Index Index;
+  /* this test covers the following files:
+     ComplexEigenSolver.h, and indirectly ComplexSchur.h
+  */
+  Index rows = m.rows();
+  Index cols = m.cols();
+
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
+  typedef Matrix<RealScalar, MatrixType::RowsAtCompileTime, 1> RealVectorType;
+  typedef typename std::complex<typename NumTraits<typename MatrixType::Scalar>::Real> Complex;
+
+  MatrixType a = MatrixType::Random(rows,cols);
+  MatrixType symmA =  a.adjoint() * a;
+
+  ComplexEigenSolver<MatrixType> ei0(symmA);
+  VERIFY_IS_EQUAL(ei0.info(), Success);
+  VERIFY_IS_APPROX(symmA * ei0.eigenvectors(), ei0.eigenvectors() * ei0.eigenvalues().asDiagonal());
+
+  ComplexEigenSolver<MatrixType> ei1(a);
+  VERIFY_IS_EQUAL(ei1.info(), Success);
+  VERIFY_IS_APPROX(a * ei1.eigenvectors(), ei1.eigenvectors() * ei1.eigenvalues().asDiagonal());
+  // Note: If MatrixType is real then a.eigenvalues() uses EigenSolver and thus
+  // another algorithm so results may differ slightly
+  verify_is_approx_upto_permutation(a.eigenvalues(), ei1.eigenvalues());
+
+  ComplexEigenSolver<MatrixType> eiNoEivecs(a, false);
+  VERIFY_IS_EQUAL(eiNoEivecs.info(), Success);
+  VERIFY_IS_APPROX(ei1.eigenvalues(), eiNoEivecs.eigenvalues());
+
+  // Regression test for issue #66
+  MatrixType z = MatrixType::Zero(rows,cols);
+  ComplexEigenSolver<MatrixType> eiz(z);
+  VERIFY((eiz.eigenvalues().cwiseEqual(0)).all());
+
+  MatrixType id = MatrixType::Identity(rows, cols);
+  VERIFY_IS_APPROX(id.operatorNorm(), RealScalar(1));
+
+  if (rows > 1)
+  {
+    // Test matrix with NaN
+    a(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
+    ComplexEigenSolver<MatrixType> eiNaN(a);
+    VERIFY_IS_EQUAL(eiNaN.info(), NoConvergence);
+  }
+}
+
+template<typename MatrixType> void eigensolver_verify_assert(const MatrixType& m)
+{
+  ComplexEigenSolver<MatrixType> eig;
+  VERIFY_RAISES_ASSERT(eig.eigenvectors());
+  VERIFY_RAISES_ASSERT(eig.eigenvalues());
+
+  MatrixType a = MatrixType::Random(m.rows(),m.cols());
+  eig.compute(a, false);
+  VERIFY_RAISES_ASSERT(eig.eigenvectors());
+}
+
+void test_eigensolver_complex()
+{
+  int s;
+  for(int i = 0; i < g_repeat; i++) {
+    CALL_SUBTEST_1( eigensolver(Matrix4cf()) );
+    s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4);
+    CALL_SUBTEST_2( eigensolver(MatrixXcd(s,s)) );
+    CALL_SUBTEST_3( eigensolver(Matrix<std::complex<float>, 1, 1>()) );
+    CALL_SUBTEST_4( eigensolver(Matrix3f()) );
+  }
+
+  CALL_SUBTEST_1( eigensolver_verify_assert(Matrix4cf()) );
+  s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4);
+  CALL_SUBTEST_2( eigensolver_verify_assert(MatrixXcd(s,s)) );
+  CALL_SUBTEST_3( eigensolver_verify_assert(Matrix<std::complex<float>, 1, 1>()) );
+  CALL_SUBTEST_4( eigensolver_verify_assert(Matrix3f()) );
+
+  // Test problem size constructors
+  CALL_SUBTEST_5(ComplexEigenSolver<MatrixXf>(s));
+  
+  EIGEN_UNUSED_VARIABLE(s)
+}
diff --git a/resources/3rdParty/eigen/test/eigensolver_generic.cpp b/resources/3rdParty/eigen/test/eigensolver_generic.cpp
new file mode 100644
index 000000000..0b55ccd93
--- /dev/null
+++ b/resources/3rdParty/eigen/test/eigensolver_generic.cpp
@@ -0,0 +1,115 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include <limits>
+#include <Eigen/Eigenvalues>
+
+template<typename MatrixType> void eigensolver(const MatrixType& m)
+{
+  typedef typename MatrixType::Index Index;
+  /* this test covers the following files:
+     EigenSolver.h
+  */
+  Index rows = m.rows();
+  Index cols = m.cols();
+
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
+  typedef Matrix<RealScalar, MatrixType::RowsAtCompileTime, 1> RealVectorType;
+  typedef typename std::complex<typename NumTraits<typename MatrixType::Scalar>::Real> Complex;
+
+  MatrixType a = MatrixType::Random(rows,cols);
+  MatrixType a1 = MatrixType::Random(rows,cols);
+  MatrixType symmA =  a.adjoint() * a + a1.adjoint() * a1;
+
+  EigenSolver<MatrixType> ei0(symmA);
+  VERIFY_IS_EQUAL(ei0.info(), Success);
+  VERIFY_IS_APPROX(symmA * ei0.pseudoEigenvectors(), ei0.pseudoEigenvectors() * ei0.pseudoEigenvalueMatrix());
+  VERIFY_IS_APPROX((symmA.template cast<Complex>()) * (ei0.pseudoEigenvectors().template cast<Complex>()),
+    (ei0.pseudoEigenvectors().template cast<Complex>()) * (ei0.eigenvalues().asDiagonal()));
+
+  EigenSolver<MatrixType> ei1(a);
+  VERIFY_IS_EQUAL(ei1.info(), Success);
+  VERIFY_IS_APPROX(a * ei1.pseudoEigenvectors(), ei1.pseudoEigenvectors() * ei1.pseudoEigenvalueMatrix());
+  VERIFY_IS_APPROX(a.template cast<Complex>() * ei1.eigenvectors(),
+                   ei1.eigenvectors() * ei1.eigenvalues().asDiagonal());
+  VERIFY_IS_APPROX(ei1.eigenvectors().colwise().norm(), RealVectorType::Ones(rows).transpose());
+  VERIFY_IS_APPROX(a.eigenvalues(), ei1.eigenvalues());
+
+  EigenSolver<MatrixType> eiNoEivecs(a, false);
+  VERIFY_IS_EQUAL(eiNoEivecs.info(), Success);
+  VERIFY_IS_APPROX(ei1.eigenvalues(), eiNoEivecs.eigenvalues());
+  VERIFY_IS_APPROX(ei1.pseudoEigenvalueMatrix(), eiNoEivecs.pseudoEigenvalueMatrix());
+
+  MatrixType id = MatrixType::Identity(rows, cols);
+  VERIFY_IS_APPROX(id.operatorNorm(), RealScalar(1));
+
+  if (rows > 2)
+  {
+    // Test matrix with NaN
+    a(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
+    EigenSolver<MatrixType> eiNaN(a);
+    VERIFY_IS_EQUAL(eiNaN.info(), NoConvergence);
+  }
+}
+
+template<typename MatrixType> void eigensolver_verify_assert(const MatrixType& m)
+{
+  EigenSolver<MatrixType> eig;
+  VERIFY_RAISES_ASSERT(eig.eigenvectors());
+  VERIFY_RAISES_ASSERT(eig.pseudoEigenvectors());
+  VERIFY_RAISES_ASSERT(eig.pseudoEigenvalueMatrix());
+  VERIFY_RAISES_ASSERT(eig.eigenvalues());
+
+  MatrixType a = MatrixType::Random(m.rows(),m.cols());
+  eig.compute(a, false);
+  VERIFY_RAISES_ASSERT(eig.eigenvectors());
+  VERIFY_RAISES_ASSERT(eig.pseudoEigenvectors());
+}
+
+void test_eigensolver_generic()
+{
+  int s;
+  for(int i = 0; i < g_repeat; i++) {
+    CALL_SUBTEST_1( eigensolver(Matrix4f()) );
+    s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4);
+    CALL_SUBTEST_2( eigensolver(MatrixXd(s,s)) );
+
+    // some trivial but implementation-wise tricky cases
+    CALL_SUBTEST_2( eigensolver(MatrixXd(1,1)) );
+    CALL_SUBTEST_2( eigensolver(MatrixXd(2,2)) );
+    CALL_SUBTEST_3( eigensolver(Matrix<double,1,1>()) );
+    CALL_SUBTEST_4( eigensolver(Matrix2d()) );
+  }
+
+  CALL_SUBTEST_1( eigensolver_verify_assert(Matrix4f()) );
+  s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4);
+  CALL_SUBTEST_2( eigensolver_verify_assert(MatrixXd(s,s)) );
+  CALL_SUBTEST_3( eigensolver_verify_assert(Matrix<double,1,1>()) );
+  CALL_SUBTEST_4( eigensolver_verify_assert(Matrix2d()) );
+
+  // Test problem size constructors
+  CALL_SUBTEST_5(EigenSolver<MatrixXf>(s));
+
+  // regression test for bug 410
+  CALL_SUBTEST_2(
+  {
+     MatrixXd A(1,1);
+     A(0,0) = std::sqrt(-1.);
+     Eigen::EigenSolver<MatrixXd> solver(A);
+     MatrixXd V(1, 1);
+     V(0,0) = solver.eigenvectors()(0,0).real();
+  }
+  );
+  
+  EIGEN_UNUSED_VARIABLE(s)
+}
diff --git a/resources/3rdparty/eigen/test/eigensolver_selfadjoint.cpp b/resources/3rdParty/eigen/test/eigensolver_selfadjoint.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/eigensolver_selfadjoint.cpp
rename to resources/3rdParty/eigen/test/eigensolver_selfadjoint.cpp
diff --git a/resources/3rdparty/eigen/test/exceptions.cpp b/resources/3rdParty/eigen/test/exceptions.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/exceptions.cpp
rename to resources/3rdParty/eigen/test/exceptions.cpp
diff --git a/resources/3rdparty/eigen/test/first_aligned.cpp b/resources/3rdParty/eigen/test/first_aligned.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/first_aligned.cpp
rename to resources/3rdParty/eigen/test/first_aligned.cpp
diff --git a/resources/3rdparty/eigen/test/geo_alignedbox.cpp b/resources/3rdParty/eigen/test/geo_alignedbox.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/geo_alignedbox.cpp
rename to resources/3rdParty/eigen/test/geo_alignedbox.cpp
diff --git a/resources/3rdparty/eigen/test/geo_eulerangles.cpp b/resources/3rdParty/eigen/test/geo_eulerangles.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/geo_eulerangles.cpp
rename to resources/3rdParty/eigen/test/geo_eulerangles.cpp
diff --git a/resources/3rdparty/eigen/test/geo_homogeneous.cpp b/resources/3rdParty/eigen/test/geo_homogeneous.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/geo_homogeneous.cpp
rename to resources/3rdParty/eigen/test/geo_homogeneous.cpp
diff --git a/resources/3rdparty/eigen/test/geo_hyperplane.cpp b/resources/3rdParty/eigen/test/geo_hyperplane.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/geo_hyperplane.cpp
rename to resources/3rdParty/eigen/test/geo_hyperplane.cpp
diff --git a/resources/3rdparty/eigen/test/geo_orthomethods.cpp b/resources/3rdParty/eigen/test/geo_orthomethods.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/geo_orthomethods.cpp
rename to resources/3rdParty/eigen/test/geo_orthomethods.cpp
diff --git a/resources/3rdparty/eigen/test/geo_parametrizedline.cpp b/resources/3rdParty/eigen/test/geo_parametrizedline.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/geo_parametrizedline.cpp
rename to resources/3rdParty/eigen/test/geo_parametrizedline.cpp
diff --git a/resources/3rdparty/eigen/test/geo_quaternion.cpp b/resources/3rdParty/eigen/test/geo_quaternion.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/geo_quaternion.cpp
rename to resources/3rdParty/eigen/test/geo_quaternion.cpp
diff --git a/resources/3rdparty/eigen/test/geo_transformations.cpp b/resources/3rdParty/eigen/test/geo_transformations.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/geo_transformations.cpp
rename to resources/3rdParty/eigen/test/geo_transformations.cpp
diff --git a/resources/3rdparty/eigen/test/hessenberg.cpp b/resources/3rdParty/eigen/test/hessenberg.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/hessenberg.cpp
rename to resources/3rdParty/eigen/test/hessenberg.cpp
diff --git a/resources/3rdparty/eigen/test/householder.cpp b/resources/3rdParty/eigen/test/householder.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/householder.cpp
rename to resources/3rdParty/eigen/test/householder.cpp
diff --git a/resources/3rdparty/eigen/test/integer_types.cpp b/resources/3rdParty/eigen/test/integer_types.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/integer_types.cpp
rename to resources/3rdParty/eigen/test/integer_types.cpp
diff --git a/resources/3rdparty/eigen/test/inverse.cpp b/resources/3rdParty/eigen/test/inverse.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/inverse.cpp
rename to resources/3rdParty/eigen/test/inverse.cpp
diff --git a/resources/3rdparty/eigen/test/jacobi.cpp b/resources/3rdParty/eigen/test/jacobi.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/jacobi.cpp
rename to resources/3rdParty/eigen/test/jacobi.cpp
diff --git a/resources/3rdparty/eigen/test/jacobisvd.cpp b/resources/3rdParty/eigen/test/jacobisvd.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/jacobisvd.cpp
rename to resources/3rdParty/eigen/test/jacobisvd.cpp
diff --git a/resources/3rdparty/eigen/test/linearstructure.cpp b/resources/3rdParty/eigen/test/linearstructure.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/linearstructure.cpp
rename to resources/3rdParty/eigen/test/linearstructure.cpp
diff --git a/resources/3rdparty/eigen/test/lu.cpp b/resources/3rdParty/eigen/test/lu.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/lu.cpp
rename to resources/3rdParty/eigen/test/lu.cpp
diff --git a/resources/3rdparty/eigen/test/main.h b/resources/3rdParty/eigen/test/main.h
similarity index 100%
rename from resources/3rdparty/eigen/test/main.h
rename to resources/3rdParty/eigen/test/main.h
diff --git a/resources/3rdparty/eigen/test/map.cpp b/resources/3rdParty/eigen/test/map.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/map.cpp
rename to resources/3rdParty/eigen/test/map.cpp
diff --git a/resources/3rdparty/eigen/test/mapstaticmethods.cpp b/resources/3rdParty/eigen/test/mapstaticmethods.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/mapstaticmethods.cpp
rename to resources/3rdParty/eigen/test/mapstaticmethods.cpp
diff --git a/resources/3rdparty/eigen/test/mapstride.cpp b/resources/3rdParty/eigen/test/mapstride.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/mapstride.cpp
rename to resources/3rdParty/eigen/test/mapstride.cpp
diff --git a/resources/3rdparty/eigen/test/meta.cpp b/resources/3rdParty/eigen/test/meta.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/meta.cpp
rename to resources/3rdParty/eigen/test/meta.cpp
diff --git a/resources/3rdparty/eigen/test/miscmatrices.cpp b/resources/3rdParty/eigen/test/miscmatrices.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/miscmatrices.cpp
rename to resources/3rdParty/eigen/test/miscmatrices.cpp
diff --git a/resources/3rdparty/eigen/test/mixingtypes.cpp b/resources/3rdParty/eigen/test/mixingtypes.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/mixingtypes.cpp
rename to resources/3rdParty/eigen/test/mixingtypes.cpp
diff --git a/resources/3rdparty/eigen/test/nesting_ops.cpp b/resources/3rdParty/eigen/test/nesting_ops.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/nesting_ops.cpp
rename to resources/3rdParty/eigen/test/nesting_ops.cpp
diff --git a/resources/3rdparty/eigen/test/nomalloc.cpp b/resources/3rdParty/eigen/test/nomalloc.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/nomalloc.cpp
rename to resources/3rdParty/eigen/test/nomalloc.cpp
diff --git a/resources/3rdparty/eigen/test/nullary.cpp b/resources/3rdParty/eigen/test/nullary.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/nullary.cpp
rename to resources/3rdParty/eigen/test/nullary.cpp
diff --git a/resources/3rdparty/eigen/test/packetmath.cpp b/resources/3rdParty/eigen/test/packetmath.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/packetmath.cpp
rename to resources/3rdParty/eigen/test/packetmath.cpp
diff --git a/resources/3rdparty/eigen/test/pardiso_support.cpp b/resources/3rdParty/eigen/test/pardiso_support.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/pardiso_support.cpp
rename to resources/3rdParty/eigen/test/pardiso_support.cpp
diff --git a/resources/3rdparty/eigen/test/pastix_support.cpp b/resources/3rdParty/eigen/test/pastix_support.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/pastix_support.cpp
rename to resources/3rdParty/eigen/test/pastix_support.cpp
diff --git a/resources/3rdparty/eigen/test/permutationmatrices.cpp b/resources/3rdParty/eigen/test/permutationmatrices.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/permutationmatrices.cpp
rename to resources/3rdParty/eigen/test/permutationmatrices.cpp
diff --git a/resources/3rdparty/eigen/test/prec_inverse_4x4.cpp b/resources/3rdParty/eigen/test/prec_inverse_4x4.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/prec_inverse_4x4.cpp
rename to resources/3rdParty/eigen/test/prec_inverse_4x4.cpp
diff --git a/resources/3rdparty/eigen/test/product.h b/resources/3rdParty/eigen/test/product.h
similarity index 100%
rename from resources/3rdparty/eigen/test/product.h
rename to resources/3rdParty/eigen/test/product.h
diff --git a/resources/3rdparty/eigen/test/product_extra.cpp b/resources/3rdParty/eigen/test/product_extra.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/product_extra.cpp
rename to resources/3rdParty/eigen/test/product_extra.cpp
diff --git a/resources/3rdparty/eigen/test/product_large.cpp b/resources/3rdParty/eigen/test/product_large.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/product_large.cpp
rename to resources/3rdParty/eigen/test/product_large.cpp
diff --git a/resources/3rdparty/eigen/test/product_mmtr.cpp b/resources/3rdParty/eigen/test/product_mmtr.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/product_mmtr.cpp
rename to resources/3rdParty/eigen/test/product_mmtr.cpp
diff --git a/resources/3rdparty/eigen/test/product_notemporary.cpp b/resources/3rdParty/eigen/test/product_notemporary.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/product_notemporary.cpp
rename to resources/3rdParty/eigen/test/product_notemporary.cpp
diff --git a/resources/3rdparty/eigen/test/product_selfadjoint.cpp b/resources/3rdParty/eigen/test/product_selfadjoint.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/product_selfadjoint.cpp
rename to resources/3rdParty/eigen/test/product_selfadjoint.cpp
diff --git a/resources/3rdparty/eigen/test/product_small.cpp b/resources/3rdParty/eigen/test/product_small.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/product_small.cpp
rename to resources/3rdParty/eigen/test/product_small.cpp
diff --git a/resources/3rdparty/eigen/test/product_symm.cpp b/resources/3rdParty/eigen/test/product_symm.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/product_symm.cpp
rename to resources/3rdParty/eigen/test/product_symm.cpp
diff --git a/resources/3rdparty/eigen/test/product_syrk.cpp b/resources/3rdParty/eigen/test/product_syrk.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/product_syrk.cpp
rename to resources/3rdParty/eigen/test/product_syrk.cpp
diff --git a/resources/3rdparty/eigen/test/product_trmm.cpp b/resources/3rdParty/eigen/test/product_trmm.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/product_trmm.cpp
rename to resources/3rdParty/eigen/test/product_trmm.cpp
diff --git a/resources/3rdparty/eigen/test/product_trmv.cpp b/resources/3rdParty/eigen/test/product_trmv.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/product_trmv.cpp
rename to resources/3rdParty/eigen/test/product_trmv.cpp
diff --git a/resources/3rdparty/eigen/test/product_trsolve.cpp b/resources/3rdParty/eigen/test/product_trsolve.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/product_trsolve.cpp
rename to resources/3rdParty/eigen/test/product_trsolve.cpp
diff --git a/resources/3rdparty/eigen/test/qr.cpp b/resources/3rdParty/eigen/test/qr.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/qr.cpp
rename to resources/3rdParty/eigen/test/qr.cpp
diff --git a/resources/3rdparty/eigen/test/qr_colpivoting.cpp b/resources/3rdParty/eigen/test/qr_colpivoting.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/qr_colpivoting.cpp
rename to resources/3rdParty/eigen/test/qr_colpivoting.cpp
diff --git a/resources/3rdparty/eigen/test/qr_fullpivoting.cpp b/resources/3rdParty/eigen/test/qr_fullpivoting.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/qr_fullpivoting.cpp
rename to resources/3rdParty/eigen/test/qr_fullpivoting.cpp
diff --git a/resources/3rdparty/eigen/test/qtvector.cpp b/resources/3rdParty/eigen/test/qtvector.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/qtvector.cpp
rename to resources/3rdParty/eigen/test/qtvector.cpp
diff --git a/resources/3rdparty/eigen/test/redux.cpp b/resources/3rdParty/eigen/test/redux.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/redux.cpp
rename to resources/3rdParty/eigen/test/redux.cpp
diff --git a/resources/3rdparty/eigen/test/resize.cpp b/resources/3rdParty/eigen/test/resize.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/resize.cpp
rename to resources/3rdParty/eigen/test/resize.cpp
diff --git a/resources/3rdparty/eigen/test/runtest.sh b/resources/3rdParty/eigen/test/runtest.sh
similarity index 100%
rename from resources/3rdparty/eigen/test/runtest.sh
rename to resources/3rdParty/eigen/test/runtest.sh
diff --git a/resources/3rdParty/eigen/test/schur_complex.cpp b/resources/3rdParty/eigen/test/schur_complex.cpp
new file mode 100644
index 000000000..a6f66ab02
--- /dev/null
+++ b/resources/3rdParty/eigen/test/schur_complex.cpp
@@ -0,0 +1,74 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include <limits>
+#include <Eigen/Eigenvalues>
+
+template<typename MatrixType> void schur(int size = MatrixType::ColsAtCompileTime)
+{
+  typedef typename ComplexSchur<MatrixType>::ComplexScalar ComplexScalar;
+  typedef typename ComplexSchur<MatrixType>::ComplexMatrixType ComplexMatrixType;
+
+  // Test basic functionality: T is triangular and A = U T U*
+  for(int counter = 0; counter < g_repeat; ++counter) {
+    MatrixType A = MatrixType::Random(size, size);
+    ComplexSchur<MatrixType> schurOfA(A);
+    VERIFY_IS_EQUAL(schurOfA.info(), Success);
+    ComplexMatrixType U = schurOfA.matrixU();
+    ComplexMatrixType T = schurOfA.matrixT();
+    for(int row = 1; row < size; ++row) {
+      for(int col = 0; col < row; ++col) {
+	VERIFY(T(row,col) == (typename MatrixType::Scalar)0);
+      }
+    }
+    VERIFY_IS_APPROX(A.template cast<ComplexScalar>(), U * T * U.adjoint());
+  }
+
+  // Test asserts when not initialized
+  ComplexSchur<MatrixType> csUninitialized;
+  VERIFY_RAISES_ASSERT(csUninitialized.matrixT());
+  VERIFY_RAISES_ASSERT(csUninitialized.matrixU());
+  VERIFY_RAISES_ASSERT(csUninitialized.info());
+  
+  // Test whether compute() and constructor returns same result
+  MatrixType A = MatrixType::Random(size, size);
+  ComplexSchur<MatrixType> cs1;
+  cs1.compute(A);
+  ComplexSchur<MatrixType> cs2(A);
+  VERIFY_IS_EQUAL(cs1.info(), Success);
+  VERIFY_IS_EQUAL(cs2.info(), Success);
+  VERIFY_IS_EQUAL(cs1.matrixT(), cs2.matrixT());
+  VERIFY_IS_EQUAL(cs1.matrixU(), cs2.matrixU());
+
+  // Test computation of only T, not U
+  ComplexSchur<MatrixType> csOnlyT(A, false);
+  VERIFY_IS_EQUAL(csOnlyT.info(), Success);
+  VERIFY_IS_EQUAL(cs1.matrixT(), csOnlyT.matrixT());
+  VERIFY_RAISES_ASSERT(csOnlyT.matrixU());
+
+  if (size > 1)
+  {
+    // Test matrix with NaN
+    A(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
+    ComplexSchur<MatrixType> csNaN(A);
+    VERIFY_IS_EQUAL(csNaN.info(), NoConvergence);
+  }
+}
+
+void test_schur_complex()
+{
+  CALL_SUBTEST_1(( schur<Matrix4cd>() ));
+  CALL_SUBTEST_2(( schur<MatrixXcf>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4)) ));
+  CALL_SUBTEST_3(( schur<Matrix<std::complex<float>, 1, 1> >() ));
+  CALL_SUBTEST_4(( schur<Matrix<float, 3, 3, Eigen::RowMajor> >() ));
+
+  // Test problem size constructors
+  CALL_SUBTEST_5(ComplexSchur<MatrixXf>(10));
+}
diff --git a/resources/3rdParty/eigen/test/schur_real.cpp b/resources/3rdParty/eigen/test/schur_real.cpp
new file mode 100644
index 000000000..e6351d94a
--- /dev/null
+++ b/resources/3rdParty/eigen/test/schur_real.cpp
@@ -0,0 +1,93 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include <limits>
+#include <Eigen/Eigenvalues>
+
+template<typename MatrixType> void verifyIsQuasiTriangular(const MatrixType& T)
+{
+  typedef typename MatrixType::Index Index;
+
+  const Index size = T.cols();
+  typedef typename MatrixType::Scalar Scalar;
+
+  // Check T is lower Hessenberg
+  for(int row = 2; row < size; ++row) {
+    for(int col = 0; col < row - 1; ++col) {
+      VERIFY(T(row,col) == Scalar(0));
+    }
+  }
+
+  // Check that any non-zero on the subdiagonal is followed by a zero and is
+  // part of a 2x2 diagonal block with imaginary eigenvalues.
+  for(int row = 1; row < size; ++row) {
+    if (T(row,row-1) != Scalar(0)) {
+      VERIFY(row == size-1 || T(row+1,row) == 0);
+      Scalar tr = T(row-1,row-1) + T(row,row);
+      Scalar det = T(row-1,row-1) * T(row,row) - T(row-1,row) * T(row,row-1);
+      VERIFY(4 * det > tr * tr);
+    }
+  }
+}
+
+template<typename MatrixType> void schur(int size = MatrixType::ColsAtCompileTime)
+{
+  // Test basic functionality: T is quasi-triangular and A = U T U*
+  for(int counter = 0; counter < g_repeat; ++counter) {
+    MatrixType A = MatrixType::Random(size, size);
+    RealSchur<MatrixType> schurOfA(A);
+    VERIFY_IS_EQUAL(schurOfA.info(), Success);
+    MatrixType U = schurOfA.matrixU();
+    MatrixType T = schurOfA.matrixT();
+    verifyIsQuasiTriangular(T);
+    VERIFY_IS_APPROX(A, U * T * U.transpose());
+  }
+
+  // Test asserts when not initialized
+  RealSchur<MatrixType> rsUninitialized;
+  VERIFY_RAISES_ASSERT(rsUninitialized.matrixT());
+  VERIFY_RAISES_ASSERT(rsUninitialized.matrixU());
+  VERIFY_RAISES_ASSERT(rsUninitialized.info());
+  
+  // Test whether compute() and constructor returns same result
+  MatrixType A = MatrixType::Random(size, size);
+  RealSchur<MatrixType> rs1;
+  rs1.compute(A);
+  RealSchur<MatrixType> rs2(A);
+  VERIFY_IS_EQUAL(rs1.info(), Success);
+  VERIFY_IS_EQUAL(rs2.info(), Success);
+  VERIFY_IS_EQUAL(rs1.matrixT(), rs2.matrixT());
+  VERIFY_IS_EQUAL(rs1.matrixU(), rs2.matrixU());
+
+  // Test computation of only T, not U
+  RealSchur<MatrixType> rsOnlyT(A, false);
+  VERIFY_IS_EQUAL(rsOnlyT.info(), Success);
+  VERIFY_IS_EQUAL(rs1.matrixT(), rsOnlyT.matrixT());
+  VERIFY_RAISES_ASSERT(rsOnlyT.matrixU());
+
+  if (size > 2)
+  {
+    // Test matrix with NaN
+    A(0,0) = std::numeric_limits<typename MatrixType::Scalar>::quiet_NaN();
+    RealSchur<MatrixType> rsNaN(A);
+    VERIFY_IS_EQUAL(rsNaN.info(), NoConvergence);
+  }
+}
+
+void test_schur_real()
+{
+  CALL_SUBTEST_1(( schur<Matrix4f>() ));
+  CALL_SUBTEST_2(( schur<MatrixXd>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4)) ));
+  CALL_SUBTEST_3(( schur<Matrix<float, 1, 1> >() ));
+  CALL_SUBTEST_4(( schur<Matrix<double, 3, 3, Eigen::RowMajor> >() ));
+
+  // Test problem size constructors
+  CALL_SUBTEST_5(RealSchur<MatrixXf>(10));
+}
diff --git a/resources/3rdparty/eigen/test/selfadjoint.cpp b/resources/3rdParty/eigen/test/selfadjoint.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/selfadjoint.cpp
rename to resources/3rdParty/eigen/test/selfadjoint.cpp
diff --git a/resources/3rdparty/eigen/test/simplicial_cholesky.cpp b/resources/3rdParty/eigen/test/simplicial_cholesky.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/simplicial_cholesky.cpp
rename to resources/3rdParty/eigen/test/simplicial_cholesky.cpp
diff --git a/resources/3rdparty/eigen/test/sizeof.cpp b/resources/3rdParty/eigen/test/sizeof.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/sizeof.cpp
rename to resources/3rdParty/eigen/test/sizeof.cpp
diff --git a/resources/3rdparty/eigen/test/sizeoverflow.cpp b/resources/3rdParty/eigen/test/sizeoverflow.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/sizeoverflow.cpp
rename to resources/3rdParty/eigen/test/sizeoverflow.cpp
diff --git a/resources/3rdparty/eigen/test/smallvectors.cpp b/resources/3rdParty/eigen/test/smallvectors.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/smallvectors.cpp
rename to resources/3rdParty/eigen/test/smallvectors.cpp
diff --git a/resources/3rdparty/eigen/test/sparse.h b/resources/3rdParty/eigen/test/sparse.h
similarity index 100%
rename from resources/3rdparty/eigen/test/sparse.h
rename to resources/3rdParty/eigen/test/sparse.h
diff --git a/resources/3rdParty/eigen/test/sparse_basic.cpp b/resources/3rdParty/eigen/test/sparse_basic.cpp
new file mode 100644
index 000000000..efe5a7c89
--- /dev/null
+++ b/resources/3rdParty/eigen/test/sparse_basic.cpp
@@ -0,0 +1,401 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008 Daniel Gomez Ferro <dgomezferro@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "sparse.h"
+
+template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& ref)
+{
+  typedef typename SparseMatrixType::Index Index;
+
+  const Index rows = ref.rows();
+  const Index cols = ref.cols();
+  typedef typename SparseMatrixType::Scalar Scalar;
+  enum { Flags = SparseMatrixType::Flags };
+
+  double density = (std::max)(8./(rows*cols), 0.01);
+  typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
+  typedef Matrix<Scalar,Dynamic,1> DenseVector;
+  Scalar eps = 1e-6;
+
+  SparseMatrixType m(rows, cols);
+  DenseMatrix refMat = DenseMatrix::Zero(rows, cols);
+  DenseVector vec1 = DenseVector::Random(rows);
+  Scalar s1 = internal::random<Scalar>();
+
+  std::vector<Vector2i> zeroCoords;
+  std::vector<Vector2i> nonzeroCoords;
+  initSparse<Scalar>(density, refMat, m, 0, &zeroCoords, &nonzeroCoords);
+
+  if (zeroCoords.size()==0 || nonzeroCoords.size()==0)
+    return;
+
+  // test coeff and coeffRef
+  for (int i=0; i<(int)zeroCoords.size(); ++i)
+  {
+    VERIFY_IS_MUCH_SMALLER_THAN( m.coeff(zeroCoords[i].x(),zeroCoords[i].y()), eps );
+    if(internal::is_same<SparseMatrixType,SparseMatrix<Scalar,Flags> >::value)
+      VERIFY_RAISES_ASSERT( m.coeffRef(zeroCoords[0].x(),zeroCoords[0].y()) = 5 );
+  }
+  VERIFY_IS_APPROX(m, refMat);
+
+  m.coeffRef(nonzeroCoords[0].x(), nonzeroCoords[0].y()) = Scalar(5);
+  refMat.coeffRef(nonzeroCoords[0].x(), nonzeroCoords[0].y()) = Scalar(5);
+
+  VERIFY_IS_APPROX(m, refMat);
+  /*
+  // test InnerIterators and Block expressions
+  for (int t=0; t<10; ++t)
+  {
+    int j = internal::random<int>(0,cols-1);
+    int i = internal::random<int>(0,rows-1);
+    int w = internal::random<int>(1,cols-j-1);
+    int h = internal::random<int>(1,rows-i-1);
+
+//     VERIFY_IS_APPROX(m.block(i,j,h,w), refMat.block(i,j,h,w));
+    for(int c=0; c<w; c++)
+    {
+      VERIFY_IS_APPROX(m.block(i,j,h,w).col(c), refMat.block(i,j,h,w).col(c));
+      for(int r=0; r<h; r++)
+      {
+//         VERIFY_IS_APPROX(m.block(i,j,h,w).col(c).coeff(r), refMat.block(i,j,h,w).col(c).coeff(r));
+      }
+    }
+//     for(int r=0; r<h; r++)
+//     {
+//       VERIFY_IS_APPROX(m.block(i,j,h,w).row(r), refMat.block(i,j,h,w).row(r));
+//       for(int c=0; c<w; c++)
+//       {
+//         VERIFY_IS_APPROX(m.block(i,j,h,w).row(r).coeff(c), refMat.block(i,j,h,w).row(r).coeff(c));
+//       }
+//     }
+  }
+
+  for(int c=0; c<cols; c++)
+  {
+    VERIFY_IS_APPROX(m.col(c) + m.col(c), (m + m).col(c));
+    VERIFY_IS_APPROX(m.col(c) + m.col(c), refMat.col(c) + refMat.col(c));
+  }
+
+  for(int r=0; r<rows; r++)
+  {
+    VERIFY_IS_APPROX(m.row(r) + m.row(r), (m + m).row(r));
+    VERIFY_IS_APPROX(m.row(r) + m.row(r), refMat.row(r) + refMat.row(r));
+  }
+  */
+
+    // test insert (inner random)
+    {
+      DenseMatrix m1(rows,cols);
+      m1.setZero();
+      SparseMatrixType m2(rows,cols);
+      if(internal::random<int>()%2)
+        m2.reserve(VectorXi::Constant(m2.outerSize(), 2));
+      for (int j=0; j<cols; ++j)
+      {
+        for (int k=0; k<rows/2; ++k)
+        {
+          int i = internal::random<int>(0,rows-1);
+          if (m1.coeff(i,j)==Scalar(0))
+            m2.insert(i,j) = m1(i,j) = internal::random<Scalar>();
+        }
+      }
+      m2.finalize();
+      VERIFY_IS_APPROX(m2,m1);
+    }
+
+    // test insert (fully random)
+    {
+      DenseMatrix m1(rows,cols);
+      m1.setZero();
+      SparseMatrixType m2(rows,cols);
+      if(internal::random<int>()%2)
+        m2.reserve(VectorXi::Constant(m2.outerSize(), 2));
+      for (int k=0; k<rows*cols; ++k)
+      {
+        int i = internal::random<int>(0,rows-1);
+        int j = internal::random<int>(0,cols-1);
+        if ((m1.coeff(i,j)==Scalar(0)) && (internal::random<int>()%2))
+          m2.insert(i,j) = m1(i,j) = internal::random<Scalar>();
+        else
+        {
+          Scalar v = internal::random<Scalar>();
+          m2.coeffRef(i,j) += v;
+          m1(i,j) += v;
+        }
+      }
+      VERIFY_IS_APPROX(m2,m1);
+    }
+    
+    // test insert (un-compressed)
+    for(int mode=0;mode<4;++mode)
+    {
+      DenseMatrix m1(rows,cols);
+      m1.setZero();
+      SparseMatrixType m2(rows,cols);
+      VectorXi r(VectorXi::Constant(m2.outerSize(), ((mode%2)==0) ? m2.innerSize() : std::max<int>(1,m2.innerSize()/8)));
+      m2.reserve(r);
+      for (int k=0; k<rows*cols; ++k)
+      {
+        int i = internal::random<int>(0,rows-1);
+        int j = internal::random<int>(0,cols-1);
+        if (m1.coeff(i,j)==Scalar(0))
+          m2.insert(i,j) = m1(i,j) = internal::random<Scalar>();
+        if(mode==3)
+          m2.reserve(r);
+      }
+      if(internal::random<int>()%2)
+        m2.makeCompressed();
+      VERIFY_IS_APPROX(m2,m1);
+    }
+
+  // test basic computations
+  {
+    DenseMatrix refM1 = DenseMatrix::Zero(rows, rows);
+    DenseMatrix refM2 = DenseMatrix::Zero(rows, rows);
+    DenseMatrix refM3 = DenseMatrix::Zero(rows, rows);
+    DenseMatrix refM4 = DenseMatrix::Zero(rows, rows);
+    SparseMatrixType m1(rows, rows);
+    SparseMatrixType m2(rows, rows);
+    SparseMatrixType m3(rows, rows);
+    SparseMatrixType m4(rows, rows);
+    initSparse<Scalar>(density, refM1, m1);
+    initSparse<Scalar>(density, refM2, m2);
+    initSparse<Scalar>(density, refM3, m3);
+    initSparse<Scalar>(density, refM4, m4);
+
+    VERIFY_IS_APPROX(m1+m2, refM1+refM2);
+    VERIFY_IS_APPROX(m1+m2+m3, refM1+refM2+refM3);
+    VERIFY_IS_APPROX(m3.cwiseProduct(m1+m2), refM3.cwiseProduct(refM1+refM2));
+    VERIFY_IS_APPROX(m1*s1-m2, refM1*s1-refM2);
+
+    VERIFY_IS_APPROX(m1*=s1, refM1*=s1);
+    VERIFY_IS_APPROX(m1/=s1, refM1/=s1);
+
+    VERIFY_IS_APPROX(m1+=m2, refM1+=refM2);
+    VERIFY_IS_APPROX(m1-=m2, refM1-=refM2);
+
+    if(SparseMatrixType::IsRowMajor)
+      VERIFY_IS_APPROX(m1.innerVector(0).dot(refM2.row(0)), refM1.row(0).dot(refM2.row(0)));
+    else
+      VERIFY_IS_APPROX(m1.innerVector(0).dot(refM2.row(0)), refM1.col(0).dot(refM2.row(0)));
+
+    VERIFY_IS_APPROX(m1.conjugate(), refM1.conjugate());
+    VERIFY_IS_APPROX(m1.real(), refM1.real());
+
+    refM4.setRandom();
+    // sparse cwise* dense
+    VERIFY_IS_APPROX(m3.cwiseProduct(refM4), refM3.cwiseProduct(refM4));
+//     VERIFY_IS_APPROX(m3.cwise()/refM4, refM3.cwise()/refM4);
+
+    // test aliasing
+    VERIFY_IS_APPROX((m1 = -m1), (refM1 = -refM1));
+    VERIFY_IS_APPROX((m1 = m1.transpose()), (refM1 = refM1.transpose().eval()));
+    VERIFY_IS_APPROX((m1 = -m1.transpose()), (refM1 = -refM1.transpose().eval()));
+    VERIFY_IS_APPROX((m1 += -m1), (refM1 += -refM1));
+  }
+
+  // test transpose
+  {
+    DenseMatrix refMat2 = DenseMatrix::Zero(rows, rows);
+    SparseMatrixType m2(rows, rows);
+    initSparse<Scalar>(density, refMat2, m2);
+    VERIFY_IS_APPROX(m2.transpose().eval(), refMat2.transpose().eval());
+    VERIFY_IS_APPROX(m2.transpose(), refMat2.transpose());
+
+    VERIFY_IS_APPROX(SparseMatrixType(m2.adjoint()), refMat2.adjoint());
+  }
+
+  // test innerVector()
+  {
+    DenseMatrix refMat2 = DenseMatrix::Zero(rows, rows);
+    SparseMatrixType m2(rows, rows);
+    initSparse<Scalar>(density, refMat2, m2);
+    int j0 = internal::random<int>(0,rows-1);
+    int j1 = internal::random<int>(0,rows-1);
+    if(SparseMatrixType::IsRowMajor)
+      VERIFY_IS_APPROX(m2.innerVector(j0), refMat2.row(j0));
+    else
+      VERIFY_IS_APPROX(m2.innerVector(j0), refMat2.col(j0));
+
+    if(SparseMatrixType::IsRowMajor)
+      VERIFY_IS_APPROX(m2.innerVector(j0)+m2.innerVector(j1), refMat2.row(j0)+refMat2.row(j1));
+    else
+      VERIFY_IS_APPROX(m2.innerVector(j0)+m2.innerVector(j1), refMat2.col(j0)+refMat2.col(j1));
+
+    SparseMatrixType m3(rows,rows);
+    m3.reserve(VectorXi::Constant(rows,rows/2));
+    for(int j=0; j<rows; ++j)
+      for(int k=0; k<j; ++k)
+        m3.insertByOuterInner(j,k) = k+1;
+    for(int j=0; j<rows; ++j)
+    {
+      VERIFY(j==internal::real(m3.innerVector(j).nonZeros()));
+      if(j>0)
+        VERIFY(j==internal::real(m3.innerVector(j).lastCoeff()));
+    }
+    m3.makeCompressed();
+    for(int j=0; j<rows; ++j)
+    {
+      VERIFY(j==internal::real(m3.innerVector(j).nonZeros()));
+      if(j>0)
+        VERIFY(j==internal::real(m3.innerVector(j).lastCoeff()));
+    }
+
+    //m2.innerVector(j0) = 2*m2.innerVector(j1);
+    //refMat2.col(j0) = 2*refMat2.col(j1);
+    //VERIFY_IS_APPROX(m2, refMat2);
+  }
+
+  // test innerVectors()
+  {
+    DenseMatrix refMat2 = DenseMatrix::Zero(rows, rows);
+    SparseMatrixType m2(rows, rows);
+    initSparse<Scalar>(density, refMat2, m2);
+    int j0 = internal::random<int>(0,rows-2);
+    int j1 = internal::random<int>(0,rows-2);
+    int n0 = internal::random<int>(1,rows-(std::max)(j0,j1));
+    if(SparseMatrixType::IsRowMajor)
+      VERIFY_IS_APPROX(m2.innerVectors(j0,n0), refMat2.block(j0,0,n0,cols));
+    else
+      VERIFY_IS_APPROX(m2.innerVectors(j0,n0), refMat2.block(0,j0,rows,n0));
+    if(SparseMatrixType::IsRowMajor)
+      VERIFY_IS_APPROX(m2.innerVectors(j0,n0)+m2.innerVectors(j1,n0),
+                      refMat2.block(j0,0,n0,cols)+refMat2.block(j1,0,n0,cols));
+    else
+      VERIFY_IS_APPROX(m2.innerVectors(j0,n0)+m2.innerVectors(j1,n0),
+                      refMat2.block(0,j0,rows,n0)+refMat2.block(0,j1,rows,n0));
+    //m2.innerVectors(j0,n0) = m2.innerVectors(j0,n0) + m2.innerVectors(j1,n0);
+    //refMat2.block(0,j0,rows,n0) = refMat2.block(0,j0,rows,n0) + refMat2.block(0,j1,rows,n0);
+  }
+
+  // test prune
+  {
+    SparseMatrixType m2(rows, rows);
+    DenseMatrix refM2(rows, rows);
+    refM2.setZero();
+    int countFalseNonZero = 0;
+    int countTrueNonZero = 0;
+    for (int j=0; j<m2.outerSize(); ++j)
+    {
+      m2.startVec(j);
+      for (int i=0; i<m2.innerSize(); ++i)
+      {
+        float x = internal::random<float>(0,1);
+        if (x<0.1)
+        {
+          // do nothing
+        }
+        else if (x<0.5)
+        {
+          countFalseNonZero++;
+          m2.insertBackByOuterInner(j,i) = Scalar(0);
+        }
+        else
+        {
+          countTrueNonZero++;
+          m2.insertBackByOuterInner(j,i) = Scalar(1);
+          if(SparseMatrixType::IsRowMajor)
+            refM2(j,i) = Scalar(1);
+          else
+            refM2(i,j) = Scalar(1);
+        }
+      }
+    }
+    m2.finalize();
+    VERIFY(countFalseNonZero+countTrueNonZero == m2.nonZeros());
+    VERIFY_IS_APPROX(m2, refM2);
+    m2.prune(Scalar(1));
+    VERIFY(countTrueNonZero==m2.nonZeros());
+    VERIFY_IS_APPROX(m2, refM2);
+  }
+
+  // test setFromTriplets
+  {
+    typedef Triplet<Scalar,Index> TripletType;
+    std::vector<TripletType> triplets;
+    int ntriplets = rows*cols;
+    triplets.reserve(ntriplets);
+    DenseMatrix refMat(rows,cols);
+    refMat.setZero();
+    for(int i=0;i<ntriplets;++i)
+    {
+      int r = internal::random<int>(0,rows-1);
+      int c = internal::random<int>(0,cols-1);
+      Scalar v = internal::random<Scalar>();
+      triplets.push_back(TripletType(r,c,v));
+      refMat(r,c) += v;
+    }
+    SparseMatrixType m(rows,cols);
+    m.setFromTriplets(triplets.begin(), triplets.end());
+    VERIFY_IS_APPROX(m, refMat);
+  }
+
+  // test triangularView
+  {
+    DenseMatrix refMat2(rows, rows), refMat3(rows, rows);
+    SparseMatrixType m2(rows, rows), m3(rows, rows);
+    initSparse<Scalar>(density, refMat2, m2);
+    refMat3 = refMat2.template triangularView<Lower>();
+    m3 = m2.template triangularView<Lower>();
+    VERIFY_IS_APPROX(m3, refMat3);
+
+    refMat3 = refMat2.template triangularView<Upper>();
+    m3 = m2.template triangularView<Upper>();
+    VERIFY_IS_APPROX(m3, refMat3);
+
+    refMat3 = refMat2.template triangularView<UnitUpper>();
+    m3 = m2.template triangularView<UnitUpper>();
+    VERIFY_IS_APPROX(m3, refMat3);
+
+    refMat3 = refMat2.template triangularView<UnitLower>();
+    m3 = m2.template triangularView<UnitLower>();
+    VERIFY_IS_APPROX(m3, refMat3);
+  }
+  
+  // test selfadjointView
+  if(!SparseMatrixType::IsRowMajor)
+  {
+    DenseMatrix refMat2(rows, rows), refMat3(rows, rows);
+    SparseMatrixType m2(rows, rows), m3(rows, rows);
+    initSparse<Scalar>(density, refMat2, m2);
+    refMat3 = refMat2.template selfadjointView<Lower>();
+    m3 = m2.template selfadjointView<Lower>();
+    VERIFY_IS_APPROX(m3, refMat3);
+  }
+  
+  // test sparseView
+  {
+    DenseMatrix refMat2 = DenseMatrix::Zero(rows, rows);
+    SparseMatrixType m2(rows, rows);
+    initSparse<Scalar>(density, refMat2, m2);
+    VERIFY_IS_APPROX(m2.eval(), refMat2.sparseView().eval());
+  }
+
+  // test diagonal
+  {
+    DenseMatrix refMat2 = DenseMatrix::Zero(rows, rows);
+    SparseMatrixType m2(rows, rows);
+    initSparse<Scalar>(density, refMat2, m2);
+    VERIFY_IS_APPROX(m2.diagonal(), refMat2.diagonal().eval());
+  }
+}
+
+void test_sparse_basic()
+{
+  for(int i = 0; i < g_repeat; i++) {
+    int s = Eigen::internal::random<int>(1,50);
+    CALL_SUBTEST_1(( sparse_basic(SparseMatrix<double>(8, 8)) ));
+    CALL_SUBTEST_2(( sparse_basic(SparseMatrix<std::complex<double>, ColMajor>(s, s)) ));
+    CALL_SUBTEST_2(( sparse_basic(SparseMatrix<std::complex<double>, RowMajor>(s, s)) ));
+    CALL_SUBTEST_1(( sparse_basic(SparseMatrix<double>(s, s)) ));
+    CALL_SUBTEST_1(( sparse_basic(SparseMatrix<double,ColMajor,long int>(s, s)) ));
+    CALL_SUBTEST_1(( sparse_basic(SparseMatrix<double,RowMajor,long int>(s, s)) ));
+  }
+}
diff --git a/resources/3rdparty/eigen/test/sparse_permutations.cpp b/resources/3rdParty/eigen/test/sparse_permutations.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/sparse_permutations.cpp
rename to resources/3rdParty/eigen/test/sparse_permutations.cpp
diff --git a/resources/3rdparty/eigen/test/sparse_product.cpp b/resources/3rdParty/eigen/test/sparse_product.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/sparse_product.cpp
rename to resources/3rdParty/eigen/test/sparse_product.cpp
diff --git a/resources/3rdParty/eigen/test/sparse_solver.h b/resources/3rdParty/eigen/test/sparse_solver.h
new file mode 100644
index 000000000..75fa85082
--- /dev/null
+++ b/resources/3rdParty/eigen/test/sparse_solver.h
@@ -0,0 +1,309 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "sparse.h"
+#include <Eigen/SparseCore>
+
+template<typename Solver, typename Rhs, typename DenseMat, typename DenseRhs>
+void check_sparse_solving(Solver& solver, const typename Solver::MatrixType& A, const Rhs& b, const DenseMat& dA, const DenseRhs& db)
+{
+  typedef typename Solver::MatrixType Mat;
+  typedef typename Mat::Scalar Scalar;
+
+  DenseRhs refX = dA.lu().solve(db);
+
+  Rhs x(b.rows(), b.cols());
+  Rhs oldb = b;
+
+  solver.compute(A);
+  if (solver.info() != Success)
+  {
+    std::cerr << "sparse solver testing: factorization failed (check_sparse_solving)\n";
+    exit(0);
+    return;
+  }
+  x = solver.solve(b);
+  if (solver.info() != Success)
+  {
+    std::cerr << "sparse solver testing: solving failed\n";
+    return;
+  }
+  VERIFY(oldb.isApprox(b) && "sparse solver testing: the rhs should not be modified!");
+
+  VERIFY(x.isApprox(refX,test_precision<Scalar>()));
+  
+  x.setZero();
+  // test the analyze/factorize API
+  solver.analyzePattern(A);
+  solver.factorize(A);
+  if (solver.info() != Success)
+  {
+    std::cerr << "sparse solver testing: factorization failed (check_sparse_solving)\n";
+    exit(0);
+    return;
+  }
+  x = solver.solve(b);
+  if (solver.info() != Success)
+  {
+    std::cerr << "sparse solver testing: solving failed\n";
+    return;
+  }
+  VERIFY(oldb.isApprox(b) && "sparse solver testing: the rhs should not be modified!");
+
+  VERIFY(x.isApprox(refX,test_precision<Scalar>()));
+  
+  // test Block as the result and rhs:
+  {
+    DenseRhs x(db.rows(), db.cols());
+    DenseRhs b(db), oldb(db);
+    x.setZero();
+    x.block(0,0,x.rows(),x.cols()) = solver.solve(b.block(0,0,b.rows(),b.cols()));
+    VERIFY(oldb.isApprox(b) && "sparse solver testing: the rhs should not be modified!");
+    VERIFY(x.isApprox(refX,test_precision<Scalar>()));
+  }
+}
+
+template<typename Solver, typename Rhs>
+void check_sparse_solving_real_cases(Solver& solver, const typename Solver::MatrixType& A, const Rhs& b, const Rhs& refX)
+{
+  typedef typename Solver::MatrixType Mat;
+  typedef typename Mat::Scalar Scalar;
+  typedef typename Mat::RealScalar RealScalar;
+  
+  Rhs x(b.rows(), b.cols());
+  
+  solver.compute(A);
+  if (solver.info() != Success)
+  {
+    std::cerr << "sparse solver testing: factorization failed (check_sparse_solving_real_cases)\n";
+    exit(0);
+    return;
+  }
+  x = solver.solve(b);
+  if (solver.info() != Success)
+  {
+    std::cerr << "sparse solver testing: solving failed\n";
+    return;
+  }
+  
+  RealScalar res_error;
+  // Compute the norm of the relative error
+  if(refX.size() != 0)
+    res_error = (refX - x).norm()/refX.norm();
+  else
+  { 
+    // Compute the relative residual norm
+    res_error = (b - A * x).norm()/b.norm();
+  }
+  if (res_error > test_precision<Scalar>() ){
+    std::cerr << "Test " << g_test_stack.back() << " failed in "EI_PP_MAKE_STRING(__FILE__) 
+    << " (" << EI_PP_MAKE_STRING(__LINE__) << ")" << std::endl << std::endl;
+    abort();
+  }
+  
+}
+template<typename Solver, typename DenseMat>
+void check_sparse_determinant(Solver& solver, const typename Solver::MatrixType& A, const DenseMat& dA)
+{
+  typedef typename Solver::MatrixType Mat;
+  typedef typename Mat::Scalar Scalar;
+  typedef typename Mat::RealScalar RealScalar;
+  
+  solver.compute(A);
+  if (solver.info() != Success)
+  {
+    std::cerr << "sparse solver testing: factorization failed (check_sparse_determinant)\n";
+    return;
+  }
+
+  Scalar refDet = dA.determinant();
+  VERIFY_IS_APPROX(refDet,solver.determinant());
+}
+
+
+template<typename Solver, typename DenseMat>
+int generate_sparse_spd_problem(Solver& , typename Solver::MatrixType& A, typename Solver::MatrixType& halfA, DenseMat& dA, int maxSize = 300)
+{
+  typedef typename Solver::MatrixType Mat;
+  typedef typename Mat::Scalar Scalar;
+  typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
+
+  int size = internal::random<int>(1,maxSize);
+  double density = (std::max)(8./(size*size), 0.01);
+
+  Mat M(size, size);
+  DenseMatrix dM(size, size);
+
+  initSparse<Scalar>(density, dM, M, ForceNonZeroDiag);
+
+  A = M * M.adjoint();
+  dA = dM * dM.adjoint();
+  
+  halfA.resize(size,size);
+  halfA.template selfadjointView<Solver::UpLo>().rankUpdate(M);
+  
+  return size;
+}
+
+
+#ifdef TEST_REAL_CASES
+template<typename Scalar>
+inline std::string get_matrixfolder()
+{
+  std::string mat_folder = TEST_REAL_CASES; 
+  if( internal::is_same<Scalar, std::complex<float> >::value || internal::is_same<Scalar, std::complex<double> >::value )
+    mat_folder  = mat_folder + static_cast<string>("/complex/");
+  else
+    mat_folder = mat_folder + static_cast<string>("/real/");
+  return mat_folder;
+}
+#endif
+
+template<typename Solver> void check_sparse_spd_solving(Solver& solver)
+{
+  typedef typename Solver::MatrixType Mat;
+  typedef typename Mat::Scalar Scalar;
+  typedef typename Mat::Index Index; 
+  typedef SparseMatrix<Scalar,ColMajor> SpMat;
+  typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
+  typedef Matrix<Scalar,Dynamic,1> DenseVector;
+
+  // generate the problem
+  Mat A, halfA;
+  DenseMatrix dA;
+  int size = generate_sparse_spd_problem(solver, A, halfA, dA);
+
+  // generate the right hand sides
+  int rhsCols = internal::random<int>(1,16);
+  double density = (std::max)(8./(size*rhsCols), 0.1);
+  SpMat B(size,rhsCols);
+  DenseVector b = DenseVector::Random(size);
+  DenseMatrix dB(size,rhsCols);
+  initSparse<Scalar>(density, dB, B, ForceNonZeroDiag);
+  
+  for (int i = 0; i < g_repeat; i++) {
+    check_sparse_solving(solver, A,     b,  dA, b);
+    check_sparse_solving(solver, halfA, b,  dA, b);
+    check_sparse_solving(solver, A,     dB, dA, dB);
+    check_sparse_solving(solver, halfA, dB, dA, dB);
+    check_sparse_solving(solver, A,     B,  dA, dB);
+    check_sparse_solving(solver, halfA, B,  dA, dB);
+  }
+
+  // First, get the folder 
+#ifdef TEST_REAL_CASES  
+  if (internal::is_same<Scalar, float>::value 
+      || internal::is_same<Scalar, std::complex<float> >::value)
+    return ;
+  
+  std::string mat_folder = get_matrixfolder<Scalar>();
+  MatrixMarketIterator<Scalar> it(mat_folder);
+  for (; it; ++it)
+  {
+    if (it.sym() == SPD){
+      Mat halfA;
+      PermutationMatrix<Dynamic, Dynamic, Index> pnull;
+      halfA.template selfadjointView<Solver::UpLo>() = it.matrix().template triangularView<Eigen::Lower>().twistedBy(pnull);
+      
+      std::cout<< " ==== SOLVING WITH MATRIX " << it.matname() << " ==== \n";
+      check_sparse_solving_real_cases(solver, it.matrix(), it.rhs(), it.refX());
+      check_sparse_solving_real_cases(solver, halfA, it.rhs(), it.refX());
+    }
+  }
+#endif
+}
+
+template<typename Solver> void check_sparse_spd_determinant(Solver& solver)
+{
+  typedef typename Solver::MatrixType Mat;
+  typedef typename Mat::Scalar Scalar;
+  typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
+
+  // generate the problem
+  Mat A, halfA;
+  DenseMatrix dA;
+  generate_sparse_spd_problem(solver, A, halfA, dA, 30);
+  
+  for (int i = 0; i < g_repeat; i++) {
+    check_sparse_determinant(solver, A,     dA);
+    check_sparse_determinant(solver, halfA, dA );
+  }
+}
+
+template<typename Solver, typename DenseMat>
+int generate_sparse_square_problem(Solver&, typename Solver::MatrixType& A, DenseMat& dA, int maxSize = 300)
+{
+  typedef typename Solver::MatrixType Mat;
+  typedef typename Mat::Scalar Scalar;
+  typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
+
+  int size = internal::random<int>(1,maxSize);
+  double density = (std::max)(8./(size*size), 0.01);
+  
+  A.resize(size,size);
+  dA.resize(size,size);
+
+  initSparse<Scalar>(density, dA, A, ForceNonZeroDiag);
+  
+  return size;
+}
+
+template<typename Solver> void check_sparse_square_solving(Solver& solver)
+{
+  typedef typename Solver::MatrixType Mat;
+  typedef typename Mat::Scalar Scalar;
+  typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
+  typedef Matrix<Scalar,Dynamic,1> DenseVector;
+
+  int rhsCols = internal::random<int>(1,16);
+
+  Mat A;
+  DenseMatrix dA;
+  int size = generate_sparse_square_problem(solver, A, dA);
+
+  DenseVector b = DenseVector::Random(size);
+  DenseMatrix dB = DenseMatrix::Random(size,rhsCols);
+  A.makeCompressed();
+  for (int i = 0; i < g_repeat; i++) {
+    check_sparse_solving(solver, A, b,  dA, b);
+    check_sparse_solving(solver, A, dB, dA, dB);
+  }
+   
+  // First, get the folder 
+#ifdef TEST_REAL_CASES
+  if (internal::is_same<Scalar, float>::value 
+      || internal::is_same<Scalar, std::complex<float> >::value)
+    return ;
+  
+  std::string mat_folder = get_matrixfolder<Scalar>();
+  MatrixMarketIterator<Scalar> it(mat_folder);
+  for (; it; ++it)
+  {
+    std::cout<< " ==== SOLVING WITH MATRIX " << it.matname() << " ==== \n";
+    check_sparse_solving_real_cases(solver, it.matrix(), it.rhs(), it.refX());
+  }
+#endif
+
+}
+
+template<typename Solver> void check_sparse_square_determinant(Solver& solver)
+{
+  typedef typename Solver::MatrixType Mat;
+  typedef typename Mat::Scalar Scalar;
+  typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
+
+  // generate the problem
+  Mat A;
+  DenseMatrix dA;
+  generate_sparse_square_problem(solver, A, dA, 30);
+  A.makeCompressed();
+  for (int i = 0; i < g_repeat; i++) {
+    check_sparse_determinant(solver, A, dA);
+  }
+}
diff --git a/resources/3rdparty/eigen/test/sparse_solvers.cpp b/resources/3rdParty/eigen/test/sparse_solvers.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/sparse_solvers.cpp
rename to resources/3rdParty/eigen/test/sparse_solvers.cpp
diff --git a/resources/3rdparty/eigen/test/sparse_vector.cpp b/resources/3rdParty/eigen/test/sparse_vector.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/sparse_vector.cpp
rename to resources/3rdParty/eigen/test/sparse_vector.cpp
diff --git a/resources/3rdparty/eigen/test/stable_norm.cpp b/resources/3rdParty/eigen/test/stable_norm.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/stable_norm.cpp
rename to resources/3rdParty/eigen/test/stable_norm.cpp
diff --git a/resources/3rdparty/eigen/test/stddeque.cpp b/resources/3rdParty/eigen/test/stddeque.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/stddeque.cpp
rename to resources/3rdParty/eigen/test/stddeque.cpp
diff --git a/resources/3rdparty/eigen/test/stdlist.cpp b/resources/3rdParty/eigen/test/stdlist.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/stdlist.cpp
rename to resources/3rdParty/eigen/test/stdlist.cpp
diff --git a/resources/3rdparty/eigen/test/stdvector.cpp b/resources/3rdParty/eigen/test/stdvector.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/stdvector.cpp
rename to resources/3rdParty/eigen/test/stdvector.cpp
diff --git a/resources/3rdparty/eigen/test/stdvector_overload.cpp b/resources/3rdParty/eigen/test/stdvector_overload.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/stdvector_overload.cpp
rename to resources/3rdParty/eigen/test/stdvector_overload.cpp
diff --git a/resources/3rdparty/eigen/test/superlu_support.cpp b/resources/3rdParty/eigen/test/superlu_support.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/superlu_support.cpp
rename to resources/3rdParty/eigen/test/superlu_support.cpp
diff --git a/resources/3rdparty/eigen/test/swap.cpp b/resources/3rdParty/eigen/test/swap.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/swap.cpp
rename to resources/3rdParty/eigen/test/swap.cpp
diff --git a/resources/3rdparty/eigen/test/testsuite.cmake b/resources/3rdParty/eigen/test/testsuite.cmake
similarity index 100%
rename from resources/3rdparty/eigen/test/testsuite.cmake
rename to resources/3rdParty/eigen/test/testsuite.cmake
diff --git a/resources/3rdparty/eigen/test/triangular.cpp b/resources/3rdParty/eigen/test/triangular.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/triangular.cpp
rename to resources/3rdParty/eigen/test/triangular.cpp
diff --git a/resources/3rdparty/eigen/test/umeyama.cpp b/resources/3rdParty/eigen/test/umeyama.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/umeyama.cpp
rename to resources/3rdParty/eigen/test/umeyama.cpp
diff --git a/resources/3rdparty/eigen/test/umfpack_support.cpp b/resources/3rdParty/eigen/test/umfpack_support.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/umfpack_support.cpp
rename to resources/3rdParty/eigen/test/umfpack_support.cpp
diff --git a/resources/3rdparty/eigen/test/unalignedassert.cpp b/resources/3rdParty/eigen/test/unalignedassert.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/unalignedassert.cpp
rename to resources/3rdParty/eigen/test/unalignedassert.cpp
diff --git a/resources/3rdparty/eigen/test/unalignedcount.cpp b/resources/3rdParty/eigen/test/unalignedcount.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/unalignedcount.cpp
rename to resources/3rdParty/eigen/test/unalignedcount.cpp
diff --git a/resources/3rdparty/eigen/test/upperbidiagonalization.cpp b/resources/3rdParty/eigen/test/upperbidiagonalization.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/upperbidiagonalization.cpp
rename to resources/3rdParty/eigen/test/upperbidiagonalization.cpp
diff --git a/resources/3rdparty/eigen/test/vectorization_logic.cpp b/resources/3rdParty/eigen/test/vectorization_logic.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/vectorization_logic.cpp
rename to resources/3rdParty/eigen/test/vectorization_logic.cpp
diff --git a/resources/3rdparty/eigen/test/vectorwiseop.cpp b/resources/3rdParty/eigen/test/vectorwiseop.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/vectorwiseop.cpp
rename to resources/3rdParty/eigen/test/vectorwiseop.cpp
diff --git a/resources/3rdparty/eigen/test/visitor.cpp b/resources/3rdParty/eigen/test/visitor.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/visitor.cpp
rename to resources/3rdParty/eigen/test/visitor.cpp
diff --git a/resources/3rdparty/eigen/test/zerosized.cpp b/resources/3rdParty/eigen/test/zerosized.cpp
similarity index 100%
rename from resources/3rdparty/eigen/test/zerosized.cpp
rename to resources/3rdParty/eigen/test/zerosized.cpp
diff --git a/resources/3rdparty/eigen/unsupported/CMakeLists.txt b/resources/3rdParty/eigen/unsupported/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/CMakeLists.txt
rename to resources/3rdParty/eigen/unsupported/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/AdolcForward b/resources/3rdParty/eigen/unsupported/Eigen/AdolcForward
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/AdolcForward
rename to resources/3rdParty/eigen/unsupported/Eigen/AdolcForward
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/AlignedVector3 b/resources/3rdParty/eigen/unsupported/Eigen/AlignedVector3
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/AlignedVector3
rename to resources/3rdParty/eigen/unsupported/Eigen/AlignedVector3
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/AutoDiff b/resources/3rdParty/eigen/unsupported/Eigen/AutoDiff
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/AutoDiff
rename to resources/3rdParty/eigen/unsupported/Eigen/AutoDiff
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/BVH b/resources/3rdParty/eigen/unsupported/Eigen/BVH
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/BVH
rename to resources/3rdParty/eigen/unsupported/Eigen/BVH
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/CMakeLists.txt b/resources/3rdParty/eigen/unsupported/Eigen/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/CMakeLists.txt
rename to resources/3rdParty/eigen/unsupported/Eigen/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/FFT b/resources/3rdParty/eigen/unsupported/Eigen/FFT
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/FFT
rename to resources/3rdParty/eigen/unsupported/Eigen/FFT
diff --git a/resources/3rdParty/eigen/unsupported/Eigen/IterativeSolvers b/resources/3rdParty/eigen/unsupported/Eigen/IterativeSolvers
new file mode 100644
index 000000000..6c6946d91
--- /dev/null
+++ b/resources/3rdParty/eigen/unsupported/Eigen/IterativeSolvers
@@ -0,0 +1,40 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ITERATIVE_SOLVERS_MODULE_H
+#define EIGEN_ITERATIVE_SOLVERS_MODULE_H
+
+#include <Eigen/Sparse>
+
+/** \ingroup Unsupported_modules
+  * \defgroup IterativeSolvers_Module Iterative solvers module
+  * This module aims to provide various iterative linear and non linear solver algorithms.
+  * It currently provides:
+  *  - a constrained conjugate gradient
+  *  - a Householder GMRES implementation
+  * \code
+  * #include <unsupported/Eigen/IterativeSolvers>
+  * \endcode
+  */
+//@{
+
+#include "../../Eigen/src/misc/Solve.h"
+#include "../../Eigen/src/misc/SparseSolve.h"
+
+#include "src/IterativeSolvers/IterationController.h"
+#include "src/IterativeSolvers/ConstrainedConjGrad.h"
+#include "src/IterativeSolvers/IncompleteLU.h"
+#include "../../Eigen/Jacobi"
+#include "../../Eigen/Householder"
+#include "src/IterativeSolvers/GMRES.h"
+//#include "src/IterativeSolvers/SSORPreconditioner.h"
+
+//@}
+
+#endif // EIGEN_ITERATIVE_SOLVERS_MODULE_H
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/KroneckerProduct b/resources/3rdParty/eigen/unsupported/Eigen/KroneckerProduct
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/KroneckerProduct
rename to resources/3rdParty/eigen/unsupported/Eigen/KroneckerProduct
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/MPRealSupport b/resources/3rdParty/eigen/unsupported/Eigen/MPRealSupport
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/MPRealSupport
rename to resources/3rdParty/eigen/unsupported/Eigen/MPRealSupport
diff --git a/resources/3rdParty/eigen/unsupported/Eigen/MatrixFunctions b/resources/3rdParty/eigen/unsupported/Eigen/MatrixFunctions
new file mode 100644
index 000000000..56ab71cd3
--- /dev/null
+++ b/resources/3rdParty/eigen/unsupported/Eigen/MatrixFunctions
@@ -0,0 +1,380 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATRIX_FUNCTIONS
+#define EIGEN_MATRIX_FUNCTIONS
+
+#include <cfloat>
+#include <list>
+#include <functional>
+#include <iterator>
+
+#include <Eigen/Core>
+#include <Eigen/LU>
+#include <Eigen/Eigenvalues>
+
+/** \ingroup Unsupported_modules
+  * \defgroup MatrixFunctions_Module Matrix functions module
+  * \brief This module aims to provide various methods for the computation of
+  * matrix functions. 
+  *
+  * To use this module, add 
+  * \code
+  * #include <unsupported/Eigen/MatrixFunctions>
+  * \endcode
+  * at the start of your source file.
+  *
+  * This module defines the following MatrixBase methods.
+  *  - \ref matrixbase_cos "MatrixBase::cos()", for computing the matrix cosine
+  *  - \ref matrixbase_cosh "MatrixBase::cosh()", for computing the matrix hyperbolic cosine
+  *  - \ref matrixbase_exp "MatrixBase::exp()", for computing the matrix exponential
+  *  - \ref matrixbase_log "MatrixBase::log()", for computing the matrix logarithm
+  *  - \ref matrixbase_matrixfunction "MatrixBase::matrixFunction()", for computing general matrix functions
+  *  - \ref matrixbase_sin "MatrixBase::sin()", for computing the matrix sine
+  *  - \ref matrixbase_sinh "MatrixBase::sinh()", for computing the matrix hyperbolic sine
+  *  - \ref matrixbase_sqrt "MatrixBase::sqrt()", for computing the matrix square root
+  *
+  * These methods are the main entry points to this module. 
+  *
+  * %Matrix functions are defined as follows.  Suppose that \f$ f \f$
+  * is an entire function (that is, a function on the complex plane
+  * that is everywhere complex differentiable).  Then its Taylor
+  * series
+  * \f[ f(0) + f'(0) x + \frac{f''(0)}{2} x^2 + \frac{f'''(0)}{3!} x^3 + \cdots \f]
+  * converges to \f$ f(x) \f$. In this case, we can define the matrix
+  * function by the same series:
+  * \f[ f(M) = f(0) + f'(0) M + \frac{f''(0)}{2} M^2 + \frac{f'''(0)}{3!} M^3 + \cdots \f]
+  *
+  */
+
+#include "src/MatrixFunctions/MatrixExponential.h"
+#include "src/MatrixFunctions/MatrixFunction.h"
+#include "src/MatrixFunctions/MatrixSquareRoot.h"
+#include "src/MatrixFunctions/MatrixLogarithm.h"
+
+
+
+/** 
+\page matrixbaseextra MatrixBase methods defined in the MatrixFunctions module
+\ingroup MatrixFunctions_Module
+
+The remainder of the page documents the following MatrixBase methods
+which are defined in the MatrixFunctions module.
+
+
+
+\section matrixbase_cos MatrixBase::cos()
+
+Compute the matrix cosine.
+
+\code
+const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::cos() const
+\endcode
+
+\param[in]  M  a square matrix.
+\returns  expression representing \f$ \cos(M) \f$.
+
+This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::cos().
+
+\sa \ref matrixbase_sin "sin()" for an example.
+
+
+
+\section matrixbase_cosh MatrixBase::cosh()
+
+Compute the matrix hyberbolic cosine.
+
+\code
+const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::cosh() const
+\endcode
+
+\param[in]  M  a square matrix.
+\returns  expression representing \f$ \cosh(M) \f$
+
+This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::cosh().
+
+\sa \ref matrixbase_sinh "sinh()" for an example.
+
+
+
+\section matrixbase_exp MatrixBase::exp()
+
+Compute the matrix exponential.
+
+\code
+const MatrixExponentialReturnValue<Derived> MatrixBase<Derived>::exp() const
+\endcode
+
+\param[in]  M  matrix whose exponential is to be computed.
+\returns    expression representing the matrix exponential of \p M.
+
+The matrix exponential of \f$ M \f$ is defined by
+\f[ \exp(M) = \sum_{k=0}^\infty \frac{M^k}{k!}. \f]
+The matrix exponential can be used to solve linear ordinary
+differential equations: the solution of \f$ y' = My \f$ with the
+initial condition \f$ y(0) = y_0 \f$ is given by
+\f$ y(t) = \exp(M) y_0 \f$.
+
+The cost of the computation is approximately \f$ 20 n^3 \f$ for
+matrices of size \f$ n \f$. The number 20 depends weakly on the
+norm of the matrix.
+
+The matrix exponential is computed using the scaling-and-squaring
+method combined with Pad&eacute; approximation. The matrix is first
+rescaled, then the exponential of the reduced matrix is computed
+approximant, and then the rescaling is undone by repeated
+squaring. The degree of the Pad&eacute; approximant is chosen such
+that the approximation error is less than the round-off
+error. However, errors may accumulate during the squaring phase.
+
+Details of the algorithm can be found in: Nicholas J. Higham, "The
+scaling and squaring method for the matrix exponential revisited,"
+<em>SIAM J. %Matrix Anal. Applic.</em>, <b>26</b>:1179&ndash;1193,
+2005.
+
+Example: The following program checks that
+\f[ \exp \left[ \begin{array}{ccc}
+      0 & \frac14\pi & 0 \\
+      -\frac14\pi & 0 & 0 \\
+      0 & 0 & 0
+    \end{array} \right] = \left[ \begin{array}{ccc}
+      \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\
+      \frac12\sqrt2 & \frac12\sqrt2 & 0 \\
+      0 & 0 & 1
+    \end{array} \right]. \f]
+This corresponds to a rotation of \f$ \frac14\pi \f$ radians around
+the z-axis.
+
+\include MatrixExponential.cpp
+Output: \verbinclude MatrixExponential.out
+
+\note \p M has to be a matrix of \c float, \c double, \c long double
+\c complex<float>, \c complex<double>, or \c complex<long double> .
+
+
+\section matrixbase_log MatrixBase::log()
+
+Compute the matrix logarithm.
+
+\code
+const MatrixLogarithmReturnValue<Derived> MatrixBase<Derived>::log() const
+\endcode
+
+\param[in]  M  invertible matrix whose logarithm is to be computed.
+\returns    expression representing the matrix logarithm root of \p M.
+
+The matrix logarithm of \f$ M \f$ is a matrix \f$ X \f$ such that 
+\f$ \exp(X) = M \f$ where exp denotes the matrix exponential. As for
+the scalar logarithm, the equation \f$ \exp(X) = M \f$ may have
+multiple solutions; this function returns a matrix whose eigenvalues
+have imaginary part in the interval \f$ (-\pi,\pi] \f$.
+
+In the real case, the matrix \f$ M \f$ should be invertible and
+it should have no eigenvalues which are real and negative (pairs of
+complex conjugate eigenvalues are allowed). In the complex case, it
+only needs to be invertible.
+
+This function computes the matrix logarithm using the Schur-Parlett
+algorithm as implemented by MatrixBase::matrixFunction(). The
+logarithm of an atomic block is computed by MatrixLogarithmAtomic,
+which uses direct computation for 1-by-1 and 2-by-2 blocks and an
+inverse scaling-and-squaring algorithm for bigger blocks, with the
+square roots computed by MatrixBase::sqrt().
+
+Details of the algorithm can be found in Section 11.6.2 of:
+Nicholas J. Higham,
+<em>Functions of Matrices: Theory and Computation</em>,
+SIAM 2008. ISBN 978-0-898716-46-7.
+
+Example: The following program checks that
+\f[ \log \left[ \begin{array}{ccc} 
+      \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\
+      \frac12\sqrt2 & \frac12\sqrt2 & 0 \\
+      0 & 0 & 1
+    \end{array} \right] = \left[ \begin{array}{ccc}
+      0 & \frac14\pi & 0 \\ 
+      -\frac14\pi & 0 & 0 \\
+      0 & 0 & 0 
+    \end{array} \right]. \f]
+This corresponds to a rotation of \f$ \frac14\pi \f$ radians around
+the z-axis. This is the inverse of the example used in the
+documentation of \ref matrixbase_exp "exp()".
+
+\include MatrixLogarithm.cpp
+Output: \verbinclude MatrixLogarithm.out
+
+\note \p M has to be a matrix of \c float, \c double, \c long double
+\c complex<float>, \c complex<double>, or \c complex<long double> .
+
+\sa MatrixBase::exp(), MatrixBase::matrixFunction(), 
+    class MatrixLogarithmAtomic, MatrixBase::sqrt().
+
+
+\section matrixbase_matrixfunction MatrixBase::matrixFunction()
+
+Compute a matrix function.
+
+\code
+const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::matrixFunction(typename internal::stem_function<typename internal::traits<Derived>::Scalar>::type f) const
+\endcode
+
+\param[in]  M  argument of matrix function, should be a square matrix.
+\param[in]  f  an entire function; \c f(x,n) should compute the n-th
+derivative of f at x.
+\returns  expression representing \p f applied to \p M.
+
+Suppose that \p M is a matrix whose entries have type \c Scalar. 
+Then, the second argument, \p f, should be a function with prototype
+\code 
+ComplexScalar f(ComplexScalar, int) 
+\endcode
+where \c ComplexScalar = \c std::complex<Scalar> if \c Scalar is
+real (e.g., \c float or \c double) and \c ComplexScalar =
+\c Scalar if \c Scalar is complex. The return value of \c f(x,n)
+should be \f$ f^{(n)}(x) \f$, the n-th derivative of f at x.
+
+This routine uses the algorithm described in:
+Philip Davies and Nicholas J. Higham, 
+"A Schur-Parlett algorithm for computing matrix functions", 
+<em>SIAM J. %Matrix Anal. Applic.</em>, <b>25</b>:464&ndash;485, 2003.
+
+The actual work is done by the MatrixFunction class.
+
+Example: The following program checks that
+\f[ \exp \left[ \begin{array}{ccc} 
+      0 & \frac14\pi & 0 \\ 
+      -\frac14\pi & 0 & 0 \\
+      0 & 0 & 0 
+    \end{array} \right] = \left[ \begin{array}{ccc}
+      \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\
+      \frac12\sqrt2 & \frac12\sqrt2 & 0 \\
+      0 & 0 & 1
+    \end{array} \right]. \f]
+This corresponds to a rotation of \f$ \frac14\pi \f$ radians around
+the z-axis. This is the same example as used in the documentation
+of \ref matrixbase_exp "exp()".
+
+\include MatrixFunction.cpp
+Output: \verbinclude MatrixFunction.out
+
+Note that the function \c expfn is defined for complex numbers 
+\c x, even though the matrix \c A is over the reals. Instead of
+\c expfn, we could also have used StdStemFunctions::exp:
+\code
+A.matrixFunction(StdStemFunctions<std::complex<double> >::exp, &B);
+\endcode
+
+
+
+\section matrixbase_sin MatrixBase::sin()
+
+Compute the matrix sine.
+
+\code
+const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::sin() const
+\endcode
+
+\param[in]  M  a square matrix.
+\returns  expression representing \f$ \sin(M) \f$.
+
+This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::sin().
+
+Example: \include MatrixSine.cpp
+Output: \verbinclude MatrixSine.out
+
+
+
+\section matrixbase_sinh MatrixBase::sinh()
+
+Compute the matrix hyperbolic sine.
+
+\code
+MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::sinh() const
+\endcode
+
+\param[in]  M  a square matrix.
+\returns  expression representing \f$ \sinh(M) \f$
+
+This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::sinh().
+
+Example: \include MatrixSinh.cpp
+Output: \verbinclude MatrixSinh.out
+
+
+\section matrixbase_sqrt MatrixBase::sqrt()
+
+Compute the matrix square root.
+
+\code
+const MatrixSquareRootReturnValue<Derived> MatrixBase<Derived>::sqrt() const
+\endcode
+
+\param[in]  M  invertible matrix whose square root is to be computed.
+\returns    expression representing the matrix square root of \p M.
+
+The matrix square root of \f$ M \f$ is the matrix \f$ M^{1/2} \f$
+whose square is the original matrix; so if \f$ S = M^{1/2} \f$ then
+\f$ S^2 = M \f$. 
+
+In the <b>real case</b>, the matrix \f$ M \f$ should be invertible and
+it should have no eigenvalues which are real and negative (pairs of
+complex conjugate eigenvalues are allowed). In that case, the matrix
+has a square root which is also real, and this is the square root
+computed by this function. 
+
+The matrix square root is computed by first reducing the matrix to
+quasi-triangular form with the real Schur decomposition. The square
+root of the quasi-triangular matrix can then be computed directly. The
+cost is approximately \f$ 25 n^3 \f$ real flops for the real Schur
+decomposition and \f$ 3\frac13 n^3 \f$ real flops for the remainder
+(though the computation time in practice is likely more than this
+indicates).
+
+Details of the algorithm can be found in: Nicholas J. Highan,
+"Computing real square roots of a real matrix", <em>Linear Algebra
+Appl.</em>, 88/89:405&ndash;430, 1987.
+
+If the matrix is <b>positive-definite symmetric</b>, then the square
+root is also positive-definite symmetric. In this case, it is best to
+use SelfAdjointEigenSolver::operatorSqrt() to compute it.
+
+In the <b>complex case</b>, the matrix \f$ M \f$ should be invertible;
+this is a restriction of the algorithm. The square root computed by
+this algorithm is the one whose eigenvalues have an argument in the
+interval \f$ (-\frac12\pi, \frac12\pi] \f$. This is the usual branch
+cut.
+
+The computation is the same as in the real case, except that the
+complex Schur decomposition is used to reduce the matrix to a
+triangular matrix. The theoretical cost is the same. Details are in:
+&Aring;ke Bj&ouml;rck and Sven Hammarling, "A Schur method for the
+square root of a matrix", <em>Linear Algebra Appl.</em>,
+52/53:127&ndash;140, 1983.
+
+Example: The following program checks that the square root of
+\f[ \left[ \begin{array}{cc} 
+              \cos(\frac13\pi) & -\sin(\frac13\pi) \\
+              \sin(\frac13\pi) & \cos(\frac13\pi)
+    \end{array} \right], \f]
+corresponding to a rotation over 60 degrees, is a rotation over 30 degrees:
+\f[ \left[ \begin{array}{cc} 
+              \cos(\frac16\pi) & -\sin(\frac16\pi) \\
+              \sin(\frac16\pi) & \cos(\frac16\pi)
+    \end{array} \right]. \f]
+
+\include MatrixSquareRoot.cpp
+Output: \verbinclude MatrixSquareRoot.out
+
+\sa class RealSchur, class ComplexSchur, class MatrixSquareRoot,
+    SelfAdjointEigenSolver::operatorSqrt().
+
+*/
+
+#endif // EIGEN_MATRIX_FUNCTIONS
+
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/MoreVectorization b/resources/3rdParty/eigen/unsupported/Eigen/MoreVectorization
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/MoreVectorization
rename to resources/3rdParty/eigen/unsupported/Eigen/MoreVectorization
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/NonLinearOptimization b/resources/3rdParty/eigen/unsupported/Eigen/NonLinearOptimization
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/NonLinearOptimization
rename to resources/3rdParty/eigen/unsupported/Eigen/NonLinearOptimization
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/NumericalDiff b/resources/3rdParty/eigen/unsupported/Eigen/NumericalDiff
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/NumericalDiff
rename to resources/3rdParty/eigen/unsupported/Eigen/NumericalDiff
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/OpenGLSupport b/resources/3rdParty/eigen/unsupported/Eigen/OpenGLSupport
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/OpenGLSupport
rename to resources/3rdParty/eigen/unsupported/Eigen/OpenGLSupport
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/Polynomials b/resources/3rdParty/eigen/unsupported/Eigen/Polynomials
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/Polynomials
rename to resources/3rdParty/eigen/unsupported/Eigen/Polynomials
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/Skyline b/resources/3rdParty/eigen/unsupported/Eigen/Skyline
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/Skyline
rename to resources/3rdParty/eigen/unsupported/Eigen/Skyline
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/SparseExtra b/resources/3rdParty/eigen/unsupported/Eigen/SparseExtra
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/SparseExtra
rename to resources/3rdParty/eigen/unsupported/Eigen/SparseExtra
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/Splines b/resources/3rdParty/eigen/unsupported/Eigen/Splines
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/Splines
rename to resources/3rdParty/eigen/unsupported/Eigen/Splines
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h b/resources/3rdParty/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h b/resources/3rdParty/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffVector.h b/resources/3rdParty/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffVector.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffVector.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffVector.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/AutoDiff/CMakeLists.txt b/resources/3rdParty/eigen/unsupported/Eigen/src/AutoDiff/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/AutoDiff/CMakeLists.txt
rename to resources/3rdParty/eigen/unsupported/Eigen/src/AutoDiff/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/BVH/BVAlgorithms.h b/resources/3rdParty/eigen/unsupported/Eigen/src/BVH/BVAlgorithms.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/BVH/BVAlgorithms.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/BVH/BVAlgorithms.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/BVH/CMakeLists.txt b/resources/3rdParty/eigen/unsupported/Eigen/src/BVH/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/BVH/CMakeLists.txt
rename to resources/3rdParty/eigen/unsupported/Eigen/src/BVH/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/BVH/KdBVH.h b/resources/3rdParty/eigen/unsupported/Eigen/src/BVH/KdBVH.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/BVH/KdBVH.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/BVH/KdBVH.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/CMakeLists.txt b/resources/3rdParty/eigen/unsupported/Eigen/src/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/CMakeLists.txt
rename to resources/3rdParty/eigen/unsupported/Eigen/src/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/FFT/CMakeLists.txt b/resources/3rdParty/eigen/unsupported/Eigen/src/FFT/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/FFT/CMakeLists.txt
rename to resources/3rdParty/eigen/unsupported/Eigen/src/FFT/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/FFT/ei_fftw_impl.h b/resources/3rdParty/eigen/unsupported/Eigen/src/FFT/ei_fftw_impl.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/FFT/ei_fftw_impl.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/FFT/ei_fftw_impl.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/FFT/ei_kissfft_impl.h b/resources/3rdParty/eigen/unsupported/Eigen/src/FFT/ei_kissfft_impl.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/FFT/ei_kissfft_impl.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/FFT/ei_kissfft_impl.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/IterativeSolvers/CMakeLists.txt b/resources/3rdParty/eigen/unsupported/Eigen/src/IterativeSolvers/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/IterativeSolvers/CMakeLists.txt
rename to resources/3rdParty/eigen/unsupported/Eigen/src/IterativeSolvers/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h b/resources/3rdParty/eigen/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/IterativeSolvers/GMRES.h b/resources/3rdParty/eigen/unsupported/Eigen/src/IterativeSolvers/GMRES.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/IterativeSolvers/GMRES.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/IterativeSolvers/GMRES.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h b/resources/3rdParty/eigen/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/IterativeSolvers/IterationController.h b/resources/3rdParty/eigen/unsupported/Eigen/src/IterativeSolvers/IterationController.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/IterativeSolvers/IterationController.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/IterativeSolvers/IterationController.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/IterativeSolvers/Scaling.h b/resources/3rdParty/eigen/unsupported/Eigen/src/IterativeSolvers/Scaling.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/IterativeSolvers/Scaling.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/IterativeSolvers/Scaling.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/KroneckerProduct/CMakeLists.txt b/resources/3rdParty/eigen/unsupported/Eigen/src/KroneckerProduct/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/KroneckerProduct/CMakeLists.txt
rename to resources/3rdParty/eigen/unsupported/Eigen/src/KroneckerProduct/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h b/resources/3rdParty/eigen/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/CMakeLists.txt b/resources/3rdParty/eigen/unsupported/Eigen/src/MatrixFunctions/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/CMakeLists.txt
rename to resources/3rdParty/eigen/unsupported/Eigen/src/MatrixFunctions/CMakeLists.txt
diff --git a/resources/3rdParty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h b/resources/3rdParty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
new file mode 100644
index 000000000..642916764
--- /dev/null
+++ b/resources/3rdParty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
@@ -0,0 +1,454 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009, 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+// Copyright (C) 2011 Chen-Pang He <jdh8@ms63.hinet.net>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATRIX_EXPONENTIAL
+#define EIGEN_MATRIX_EXPONENTIAL
+
+#include "StemFunction.h"
+
+namespace Eigen { 
+
+#if defined(_MSC_VER) || defined(__FreeBSD__)
+  template <typename Scalar> Scalar log2(Scalar v) { using std::log; return log(v)/log(Scalar(2)); }
+#endif
+
+
+/** \ingroup MatrixFunctions_Module
+  * \brief Class for computing the matrix exponential.
+  * \tparam MatrixType type of the argument of the exponential,
+  * expected to be an instantiation of the Matrix class template.
+  */
+template <typename MatrixType>
+class MatrixExponential {
+
+  public:
+
+    /** \brief Constructor.
+      * 
+      * The class stores a reference to \p M, so it should not be
+      * changed (or destroyed) before compute() is called.
+      *
+      * \param[in] M  matrix whose exponential is to be computed.
+      */
+    MatrixExponential(const MatrixType &M);
+
+    /** \brief Computes the matrix exponential.
+      *
+      * \param[out] result  the matrix exponential of \p M in the constructor.
+      */
+    template <typename ResultType> 
+    void compute(ResultType &result);
+
+  private:
+
+    // Prevent copying
+    MatrixExponential(const MatrixExponential&);
+    MatrixExponential& operator=(const MatrixExponential&);
+
+    /** \brief Compute the (3,3)-Pad&eacute; approximant to the exponential.
+     *
+     *  After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
+     *  approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
+     *
+     *  \param[in] A   Argument of matrix exponential
+     */
+    void pade3(const MatrixType &A);
+
+    /** \brief Compute the (5,5)-Pad&eacute; approximant to the exponential.
+     *
+     *  After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
+     *  approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
+     *
+     *  \param[in] A   Argument of matrix exponential
+     */
+    void pade5(const MatrixType &A);
+
+    /** \brief Compute the (7,7)-Pad&eacute; approximant to the exponential.
+     *
+     *  After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
+     *  approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
+     *
+     *  \param[in] A   Argument of matrix exponential
+     */
+    void pade7(const MatrixType &A);
+
+    /** \brief Compute the (9,9)-Pad&eacute; approximant to the exponential.
+     *
+     *  After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
+     *  approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
+     *
+     *  \param[in] A   Argument of matrix exponential
+     */
+    void pade9(const MatrixType &A);
+
+    /** \brief Compute the (13,13)-Pad&eacute; approximant to the exponential.
+     *
+     *  After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
+     *  approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
+     *
+     *  \param[in] A   Argument of matrix exponential
+     */
+    void pade13(const MatrixType &A);
+
+    /** \brief Compute the (17,17)-Pad&eacute; approximant to the exponential.
+     *
+     *  After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
+     *  approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
+     *
+     *  This function activates only if your long double is double-double or quadruple.
+     *
+     *  \param[in] A   Argument of matrix exponential
+     */
+    void pade17(const MatrixType &A);
+
+    /** \brief Compute Pad&eacute; approximant to the exponential.
+     *
+     * Computes \c m_U, \c m_V and \c m_squarings such that
+     * \f$ (V+U)(V-U)^{-1} \f$ is a Pad&eacute; of
+     * \f$ \exp(2^{-\mbox{squarings}}M) \f$ around \f$ M = 0 \f$. The
+     * degree of the Pad&eacute; approximant and the value of
+     * squarings are chosen such that the approximation error is no
+     * more than the round-off error.
+     *
+     * The argument of this function should correspond with the (real
+     * part of) the entries of \c m_M.  It is used to select the
+     * correct implementation using overloading.
+     */
+    void computeUV(double);
+
+    /** \brief Compute Pad&eacute; approximant to the exponential.
+     *
+     *  \sa computeUV(double);
+     */
+    void computeUV(float);
+    
+    /** \brief Compute Pad&eacute; approximant to the exponential.
+     *
+     *  \sa computeUV(double);
+     */
+    void computeUV(long double);
+
+    typedef typename internal::traits<MatrixType>::Scalar Scalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    typedef typename std::complex<RealScalar> ComplexScalar;
+
+    /** \brief Reference to matrix whose exponential is to be computed. */
+    typename internal::nested<MatrixType>::type m_M;
+
+    /** \brief Odd-degree terms in numerator of Pad&eacute; approximant. */
+    MatrixType m_U;
+
+    /** \brief Even-degree terms in numerator of Pad&eacute; approximant. */
+    MatrixType m_V;
+
+    /** \brief Used for temporary storage. */
+    MatrixType m_tmp1;
+
+    /** \brief Used for temporary storage. */
+    MatrixType m_tmp2;
+
+    /** \brief Identity matrix of the same size as \c m_M. */
+    MatrixType m_Id;
+
+    /** \brief Number of squarings required in the last step. */
+    int m_squarings;
+
+    /** \brief L1 norm of m_M. */
+    RealScalar m_l1norm;
+};
+
+template <typename MatrixType>
+MatrixExponential<MatrixType>::MatrixExponential(const MatrixType &M) :
+  m_M(M),
+  m_U(M.rows(),M.cols()),
+  m_V(M.rows(),M.cols()),
+  m_tmp1(M.rows(),M.cols()),
+  m_tmp2(M.rows(),M.cols()),
+  m_Id(MatrixType::Identity(M.rows(), M.cols())),
+  m_squarings(0),
+  m_l1norm(M.cwiseAbs().colwise().sum().maxCoeff())
+{
+  /* empty body */
+}
+
+template <typename MatrixType>
+template <typename ResultType> 
+void MatrixExponential<MatrixType>::compute(ResultType &result)
+{
+#if LDBL_MANT_DIG > 112 // rarely happens
+  if(sizeof(RealScalar) > 14) {
+    result = m_M.matrixFunction(StdStemFunctions<ComplexScalar>::exp);
+    return;
+  }
+#endif
+  computeUV(RealScalar());
+  m_tmp1 = m_U + m_V;   // numerator of Pade approximant
+  m_tmp2 = -m_U + m_V;  // denominator of Pade approximant
+  result = m_tmp2.partialPivLu().solve(m_tmp1);
+  for (int i=0; i<m_squarings; i++)
+    result *= result;   // undo scaling by repeated squaring
+}
+
+template <typename MatrixType>
+EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade3(const MatrixType &A)
+{
+  const RealScalar b[] = {120., 60., 12., 1.};
+  m_tmp1.noalias() = A * A;
+  m_tmp2 = b[3]*m_tmp1 + b[1]*m_Id;
+  m_U.noalias() = A * m_tmp2;
+  m_V = b[2]*m_tmp1 + b[0]*m_Id;
+}
+
+template <typename MatrixType>
+EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade5(const MatrixType &A)
+{
+  const RealScalar b[] = {30240., 15120., 3360., 420., 30., 1.};
+  MatrixType A2 = A * A;
+  m_tmp1.noalias() = A2 * A2;
+  m_tmp2 = b[5]*m_tmp1 + b[3]*A2 + b[1]*m_Id;
+  m_U.noalias() = A * m_tmp2;
+  m_V = b[4]*m_tmp1 + b[2]*A2 + b[0]*m_Id;
+}
+
+template <typename MatrixType>
+EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade7(const MatrixType &A)
+{
+  const RealScalar b[] = {17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.};
+  MatrixType A2 = A * A;
+  MatrixType A4 = A2 * A2;
+  m_tmp1.noalias() = A4 * A2;
+  m_tmp2 = b[7]*m_tmp1 + b[5]*A4 + b[3]*A2 + b[1]*m_Id;
+  m_U.noalias() = A * m_tmp2;
+  m_V = b[6]*m_tmp1 + b[4]*A4 + b[2]*A2 + b[0]*m_Id;
+}
+
+template <typename MatrixType>
+EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade9(const MatrixType &A)
+{
+  const RealScalar b[] = {17643225600., 8821612800., 2075673600., 302702400., 30270240.,
+  		      2162160., 110880., 3960., 90., 1.};
+  MatrixType A2 = A * A;
+  MatrixType A4 = A2 * A2;
+  MatrixType A6 = A4 * A2;
+  m_tmp1.noalias() = A6 * A2;
+  m_tmp2 = b[9]*m_tmp1 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*m_Id;
+  m_U.noalias() = A * m_tmp2;
+  m_V = b[8]*m_tmp1 + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*m_Id;
+}
+
+template <typename MatrixType>
+EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade13(const MatrixType &A)
+{
+  const RealScalar b[] = {64764752532480000., 32382376266240000., 7771770303897600.,
+  		      1187353796428800., 129060195264000., 10559470521600., 670442572800.,
+  		      33522128640., 1323241920., 40840800., 960960., 16380., 182., 1.};
+  MatrixType A2 = A * A;
+  MatrixType A4 = A2 * A2;
+  m_tmp1.noalias() = A4 * A2;
+  m_V = b[13]*m_tmp1 + b[11]*A4 + b[9]*A2; // used for temporary storage
+  m_tmp2.noalias() = m_tmp1 * m_V;
+  m_tmp2 += b[7]*m_tmp1 + b[5]*A4 + b[3]*A2 + b[1]*m_Id;
+  m_U.noalias() = A * m_tmp2;
+  m_tmp2 = b[12]*m_tmp1 + b[10]*A4 + b[8]*A2;
+  m_V.noalias() = m_tmp1 * m_tmp2;
+  m_V += b[6]*m_tmp1 + b[4]*A4 + b[2]*A2 + b[0]*m_Id;
+}
+
+#if LDBL_MANT_DIG > 64
+template <typename MatrixType>
+EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade17(const MatrixType &A)
+{
+  const RealScalar b[] = {830034394580628357120000.L, 415017197290314178560000.L,
+            100610229646136770560000.L, 15720348382208870400000.L,
+            1774878043152614400000.L, 153822763739893248000.L, 10608466464820224000.L,
+            595373117923584000.L, 27563570274240000.L, 1060137318240000.L,
+            33924394183680.L, 899510451840.L, 19554575040.L, 341863200.L, 4651200.L,
+            46512.L, 306.L, 1.L};
+  MatrixType A2 = A * A;
+  MatrixType A4 = A2 * A2;
+  MatrixType A6 = A4 * A2;
+  m_tmp1.noalias() = A4 * A4;
+  m_V = b[17]*m_tmp1 + b[15]*A6 + b[13]*A4 + b[11]*A2; // used for temporary storage
+  m_tmp2.noalias() = m_tmp1 * m_V;
+  m_tmp2 += b[9]*m_tmp1 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*m_Id;
+  m_U.noalias() = A * m_tmp2;
+  m_tmp2 = b[16]*m_tmp1 + b[14]*A6 + b[12]*A4 + b[10]*A2;
+  m_V.noalias() = m_tmp1 * m_tmp2;
+  m_V += b[8]*m_tmp1 + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*m_Id;
+}
+#endif
+
+template <typename MatrixType>
+void MatrixExponential<MatrixType>::computeUV(float)
+{
+  using std::max;
+  using std::pow;
+  using std::ceil;
+  if (m_l1norm < 4.258730016922831e-001) {
+    pade3(m_M);
+  } else if (m_l1norm < 1.880152677804762e+000) {
+    pade5(m_M);
+  } else {
+    const float maxnorm = 3.925724783138660f;
+    m_squarings = (max)(0, (int)ceil(log2(m_l1norm / maxnorm)));
+    MatrixType A = m_M / pow(Scalar(2), m_squarings);
+    pade7(A);
+  }
+}
+
+template <typename MatrixType>
+void MatrixExponential<MatrixType>::computeUV(double)
+{
+  using std::max;
+  using std::pow;
+  using std::ceil;
+  if (m_l1norm < 1.495585217958292e-002) {
+    pade3(m_M);
+  } else if (m_l1norm < 2.539398330063230e-001) {
+    pade5(m_M);
+  } else if (m_l1norm < 9.504178996162932e-001) {
+    pade7(m_M);
+  } else if (m_l1norm < 2.097847961257068e+000) {
+    pade9(m_M);
+  } else {
+    const double maxnorm = 5.371920351148152;
+    m_squarings = (max)(0, (int)ceil(log2(m_l1norm / maxnorm)));
+    MatrixType A = m_M / pow(Scalar(2), m_squarings);
+    pade13(A);
+  }
+}
+
+template <typename MatrixType>
+void MatrixExponential<MatrixType>::computeUV(long double)
+{
+  using std::max;
+  using std::pow;
+  using std::ceil;
+#if   LDBL_MANT_DIG == 53   // double precision
+  computeUV(double());
+#elif LDBL_MANT_DIG <= 64   // extended precision
+  if (m_l1norm < 4.1968497232266989671e-003L) {
+    pade3(m_M);
+  } else if (m_l1norm < 1.1848116734693823091e-001L) {
+    pade5(m_M);
+  } else if (m_l1norm < 5.5170388480686700274e-001L) {
+    pade7(m_M);
+  } else if (m_l1norm < 1.3759868875587845383e+000L) {
+    pade9(m_M);
+  } else {
+    const long double maxnorm = 4.0246098906697353063L;
+    m_squarings = (max)(0, (int)ceil(log2(m_l1norm / maxnorm)));
+    MatrixType A = m_M / pow(Scalar(2), m_squarings);
+    pade13(A);
+  }
+#elif LDBL_MANT_DIG <= 106  // double-double
+  if (m_l1norm < 3.2787892205607026992947488108213e-005L) {
+    pade3(m_M);
+  } else if (m_l1norm < 6.4467025060072760084130906076332e-003L) {
+    pade5(m_M);
+  } else if (m_l1norm < 6.8988028496595374751374122881143e-002L) {
+    pade7(m_M);
+  } else if (m_l1norm < 2.7339737518502231741495857201670e-001L) {
+    pade9(m_M);
+  } else if (m_l1norm < 1.3203382096514474905666448850278e+000L) {
+    pade13(m_M);
+  } else {
+    const long double maxnorm = 3.2579440895405400856599663723517L;
+    m_squarings = (max)(0, (int)ceil(log2(m_l1norm / maxnorm)));
+    MatrixType A = m_M / pow(Scalar(2), m_squarings);
+    pade17(A);
+  }
+#elif LDBL_MANT_DIG <= 112  // quadruple precison
+  if (m_l1norm < 1.639394610288918690547467954466970e-005L) {
+    pade3(m_M);
+  } else if (m_l1norm < 4.253237712165275566025884344433009e-003L) {
+    pade5(m_M);
+  } else if (m_l1norm < 5.125804063165764409885122032933142e-002L) {
+    pade7(m_M);
+  } else if (m_l1norm < 2.170000765161155195453205651889853e-001L) {
+    pade9(m_M);
+  } else if (m_l1norm < 1.125358383453143065081397882891878e+000L) {
+    pade13(m_M);
+  } else {
+    const long double maxnorm = 2.884233277829519311757165057717815L;
+    m_squarings = (max)(0, (int)ceil(log2(m_l1norm / maxnorm)));
+    MatrixType A = m_M / pow(Scalar(2), m_squarings);
+    pade17(A);
+  }
+#else
+  // this case should be handled in compute()
+  eigen_assert(false && "Bug in MatrixExponential"); 
+#endif  // LDBL_MANT_DIG
+}
+
+/** \ingroup MatrixFunctions_Module
+  *
+  * \brief Proxy for the matrix exponential of some matrix (expression).
+  *
+  * \tparam Derived  Type of the argument to the matrix exponential.
+  *
+  * This class holds the argument to the matrix exponential until it
+  * is assigned or evaluated for some other reason (so the argument
+  * should not be changed in the meantime). It is the return type of
+  * MatrixBase::exp() and most of the time this is the only way it is
+  * used.
+  */
+template<typename Derived> struct MatrixExponentialReturnValue
+: public ReturnByValue<MatrixExponentialReturnValue<Derived> >
+{
+    typedef typename Derived::Index Index;
+  public:
+    /** \brief Constructor.
+      *
+      * \param[in] src %Matrix (expression) forming the argument of the
+      * matrix exponential.
+      */
+    MatrixExponentialReturnValue(const Derived& src) : m_src(src) { }
+
+    /** \brief Compute the matrix exponential.
+      *
+      * \param[out] result the matrix exponential of \p src in the
+      * constructor.
+      */
+    template <typename ResultType>
+    inline void evalTo(ResultType& result) const
+    {
+      const typename Derived::PlainObject srcEvaluated = m_src.eval();
+      MatrixExponential<typename Derived::PlainObject> me(srcEvaluated);
+      me.compute(result);
+    }
+
+    Index rows() const { return m_src.rows(); }
+    Index cols() const { return m_src.cols(); }
+
+  protected:
+    const Derived& m_src;
+  private:
+    MatrixExponentialReturnValue& operator=(const MatrixExponentialReturnValue&);
+};
+
+namespace internal {
+template<typename Derived>
+struct traits<MatrixExponentialReturnValue<Derived> >
+{
+  typedef typename Derived::PlainObject ReturnType;
+};
+}
+
+template <typename Derived>
+const MatrixExponentialReturnValue<Derived> MatrixBase<Derived>::exp() const
+{
+  eigen_assert(rows() == cols());
+  return MatrixExponentialReturnValue<Derived>(derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATRIX_EXPONENTIAL
diff --git a/resources/3rdParty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h b/resources/3rdParty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
new file mode 100644
index 000000000..c57ca87ed
--- /dev/null
+++ b/resources/3rdParty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
@@ -0,0 +1,590 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2011 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATRIX_FUNCTION
+#define EIGEN_MATRIX_FUNCTION
+
+#include "StemFunction.h"
+#include "MatrixFunctionAtomic.h"
+
+
+namespace Eigen { 
+
+/** \ingroup MatrixFunctions_Module
+  * \brief Class for computing matrix functions.
+  * \tparam  MatrixType  type of the argument of the matrix function,
+  *                      expected to be an instantiation of the Matrix class template.
+  * \tparam  AtomicType  type for computing matrix function of atomic blocks.
+  * \tparam  IsComplex   used internally to select correct specialization.
+  *
+  * This class implements the Schur-Parlett algorithm for computing matrix functions. The spectrum of the
+  * matrix is divided in clustered of eigenvalues that lies close together. This class delegates the
+  * computation of the matrix function on every block corresponding to these clusters to an object of type
+  * \p AtomicType and uses these results to compute the matrix function of the whole matrix. The class
+  * \p AtomicType should have a \p compute() member function for computing the matrix function of a block.
+  *
+  * \sa class MatrixFunctionAtomic, class MatrixLogarithmAtomic
+  */
+template <typename MatrixType, 
+	  typename AtomicType,  
+          int IsComplex = NumTraits<typename internal::traits<MatrixType>::Scalar>::IsComplex>
+class MatrixFunction
+{  
+  public:
+
+    /** \brief Constructor. 
+      *
+      * \param[in]  A       argument of matrix function, should be a square matrix.
+      * \param[in]  atomic  class for computing matrix function of atomic blocks.
+      *
+      * The class stores references to \p A and \p atomic, so they should not be
+      * changed (or destroyed) before compute() is called.
+      */
+    MatrixFunction(const MatrixType& A, AtomicType& atomic);
+
+    /** \brief Compute the matrix function.
+      *
+      * \param[out] result  the function \p f applied to \p A, as
+      * specified in the constructor.
+      *
+      * See MatrixBase::matrixFunction() for details on how this computation
+      * is implemented.
+      */
+    template <typename ResultType> 
+    void compute(ResultType &result);    
+};
+
+
+/** \internal \ingroup MatrixFunctions_Module 
+  * \brief Partial specialization of MatrixFunction for real matrices
+  */
+template <typename MatrixType, typename AtomicType>
+class MatrixFunction<MatrixType, AtomicType, 0>
+{  
+  private:
+
+    typedef internal::traits<MatrixType> Traits;
+    typedef typename Traits::Scalar Scalar;
+    static const int Rows = Traits::RowsAtCompileTime;
+    static const int Cols = Traits::ColsAtCompileTime;
+    static const int Options = MatrixType::Options;
+    static const int MaxRows = Traits::MaxRowsAtCompileTime;
+    static const int MaxCols = Traits::MaxColsAtCompileTime;
+
+    typedef std::complex<Scalar> ComplexScalar;
+    typedef Matrix<ComplexScalar, Rows, Cols, Options, MaxRows, MaxCols> ComplexMatrix;
+
+  public:
+
+    /** \brief Constructor. 
+      *
+      * \param[in]  A       argument of matrix function, should be a square matrix.
+      * \param[in]  atomic  class for computing matrix function of atomic blocks.
+      */
+    MatrixFunction(const MatrixType& A, AtomicType& atomic) : m_A(A), m_atomic(atomic) { }
+
+    /** \brief Compute the matrix function.
+      *
+      * \param[out] result  the function \p f applied to \p A, as
+      * specified in the constructor.
+      *
+      * This function converts the real matrix \c A to a complex matrix,
+      * uses MatrixFunction<MatrixType,1> and then converts the result back to
+      * a real matrix.
+      */
+    template <typename ResultType>
+    void compute(ResultType& result) 
+    {
+      ComplexMatrix CA = m_A.template cast<ComplexScalar>();
+      ComplexMatrix Cresult;
+      MatrixFunction<ComplexMatrix, AtomicType> mf(CA, m_atomic);
+      mf.compute(Cresult);
+      result = Cresult.real();
+    }
+
+  private:
+    typename internal::nested<MatrixType>::type m_A; /**< \brief Reference to argument of matrix function. */
+    AtomicType& m_atomic; /**< \brief Class for computing matrix function of atomic blocks. */
+
+    MatrixFunction& operator=(const MatrixFunction&);
+};
+
+      
+/** \internal \ingroup MatrixFunctions_Module 
+  * \brief Partial specialization of MatrixFunction for complex matrices
+  */
+template <typename MatrixType, typename AtomicType>
+class MatrixFunction<MatrixType, AtomicType, 1>
+{
+  private:
+
+    typedef internal::traits<MatrixType> Traits;
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::Index Index;
+    static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
+    static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
+    static const int Options = MatrixType::Options;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    typedef Matrix<Scalar, Traits::RowsAtCompileTime, 1> VectorType;
+    typedef Matrix<Index, Traits::RowsAtCompileTime, 1> IntVectorType;
+    typedef Matrix<Index, Dynamic, 1> DynamicIntVectorType;
+    typedef std::list<Scalar> Cluster;
+    typedef std::list<Cluster> ListOfClusters;
+    typedef Matrix<Scalar, Dynamic, Dynamic, Options, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
+
+  public:
+
+    MatrixFunction(const MatrixType& A, AtomicType& atomic);
+    template <typename ResultType> void compute(ResultType& result);
+
+  private:
+
+    void computeSchurDecomposition();
+    void partitionEigenvalues();
+    typename ListOfClusters::iterator findCluster(Scalar key);
+    void computeClusterSize();
+    void computeBlockStart();
+    void constructPermutation();
+    void permuteSchur();
+    void swapEntriesInSchur(Index index);
+    void computeBlockAtomic();
+    Block<MatrixType> block(MatrixType& A, Index i, Index j);
+    void computeOffDiagonal();
+    DynMatrixType solveTriangularSylvester(const DynMatrixType& A, const DynMatrixType& B, const DynMatrixType& C);
+
+    typename internal::nested<MatrixType>::type m_A; /**< \brief Reference to argument of matrix function. */
+    AtomicType& m_atomic; /**< \brief Class for computing matrix function of atomic blocks. */
+    MatrixType m_T; /**< \brief Triangular part of Schur decomposition */
+    MatrixType m_U; /**< \brief Unitary part of Schur decomposition */
+    MatrixType m_fT; /**< \brief %Matrix function applied to #m_T */
+    ListOfClusters m_clusters; /**< \brief Partition of eigenvalues into clusters of ei'vals "close" to each other */
+    DynamicIntVectorType m_eivalToCluster; /**< \brief m_eivalToCluster[i] = j means i-th ei'val is in j-th cluster */
+    DynamicIntVectorType m_clusterSize; /**< \brief Number of eigenvalues in each clusters  */
+    DynamicIntVectorType m_blockStart; /**< \brief Row index at which block corresponding to i-th cluster starts */
+    IntVectorType m_permutation; /**< \brief Permutation which groups ei'vals in the same cluster together */
+
+    /** \brief Maximum distance allowed between eigenvalues to be considered "close".
+      *
+      * This is morally a \c static \c const \c Scalar, but only
+      * integers can be static constant class members in C++. The
+      * separation constant is set to 0.1, a value taken from the
+      * paper by Davies and Higham. */
+    static const RealScalar separation() { return static_cast<RealScalar>(0.1); }
+
+    MatrixFunction& operator=(const MatrixFunction&);
+};
+
+/** \brief Constructor. 
+ *
+ * \param[in]  A       argument of matrix function, should be a square matrix.
+ * \param[in]  atomic  class for computing matrix function of atomic blocks.
+ */
+template <typename MatrixType, typename AtomicType>
+MatrixFunction<MatrixType,AtomicType,1>::MatrixFunction(const MatrixType& A, AtomicType& atomic)
+  : m_A(A), m_atomic(atomic)
+{
+  /* empty body */
+}
+
+/** \brief Compute the matrix function.
+  *
+  * \param[out] result  the function \p f applied to \p A, as
+  * specified in the constructor.
+  */
+template <typename MatrixType, typename AtomicType>
+template <typename ResultType>
+void MatrixFunction<MatrixType,AtomicType,1>::compute(ResultType& result) 
+{
+  computeSchurDecomposition();
+  partitionEigenvalues();
+  computeClusterSize();
+  computeBlockStart();
+  constructPermutation();
+  permuteSchur();
+  computeBlockAtomic();
+  computeOffDiagonal();
+  result = m_U * m_fT * m_U.adjoint();
+}
+
+/** \brief Store the Schur decomposition of #m_A in #m_T and #m_U */
+template <typename MatrixType, typename AtomicType>
+void MatrixFunction<MatrixType,AtomicType,1>::computeSchurDecomposition()
+{
+  const ComplexSchur<MatrixType> schurOfA(m_A);  
+  m_T = schurOfA.matrixT();
+  m_U = schurOfA.matrixU();
+}
+
+/** \brief Partition eigenvalues in clusters of ei'vals close to each other
+  * 
+  * This function computes #m_clusters. This is a partition of the
+  * eigenvalues of #m_T in clusters, such that
+  * # Any eigenvalue in a certain cluster is at most separation() away
+  *   from another eigenvalue in the same cluster.
+  * # The distance between two eigenvalues in different clusters is
+  *   more than separation().
+  * The implementation follows Algorithm 4.1 in the paper of Davies
+  * and Higham. 
+  */
+template <typename MatrixType, typename AtomicType>
+void MatrixFunction<MatrixType,AtomicType,1>::partitionEigenvalues()
+{
+  const Index rows = m_T.rows();
+  VectorType diag = m_T.diagonal(); // contains eigenvalues of A
+
+  for (Index i=0; i<rows; ++i) {
+    // Find set containing diag(i), adding a new set if necessary
+    typename ListOfClusters::iterator qi = findCluster(diag(i));
+    if (qi == m_clusters.end()) {
+      Cluster l;
+      l.push_back(diag(i));
+      m_clusters.push_back(l);
+      qi = m_clusters.end();
+      --qi;
+    }
+
+    // Look for other element to add to the set
+    for (Index j=i+1; j<rows; ++j) {
+      if (internal::abs(diag(j) - diag(i)) <= separation() && std::find(qi->begin(), qi->end(), diag(j)) == qi->end()) {
+	typename ListOfClusters::iterator qj = findCluster(diag(j));
+	if (qj == m_clusters.end()) {
+	  qi->push_back(diag(j));
+	} else {
+	  qi->insert(qi->end(), qj->begin(), qj->end());
+	  m_clusters.erase(qj);
+	}
+      }
+    }
+  }
+}
+
+/** \brief Find cluster in #m_clusters containing some value 
+  * \param[in] key Value to find
+  * \returns Iterator to cluster containing \c key, or
+  * \c m_clusters.end() if no cluster in m_clusters contains \c key.
+  */
+template <typename MatrixType, typename AtomicType>
+typename MatrixFunction<MatrixType,AtomicType,1>::ListOfClusters::iterator MatrixFunction<MatrixType,AtomicType,1>::findCluster(Scalar key)
+{
+  typename Cluster::iterator j;
+  for (typename ListOfClusters::iterator i = m_clusters.begin(); i != m_clusters.end(); ++i) {
+    j = std::find(i->begin(), i->end(), key);
+    if (j != i->end())
+      return i;
+  }
+  return m_clusters.end();
+}
+
+/** \brief Compute #m_clusterSize and #m_eivalToCluster using #m_clusters */
+template <typename MatrixType, typename AtomicType>
+void MatrixFunction<MatrixType,AtomicType,1>::computeClusterSize()
+{
+  const Index rows = m_T.rows();
+  VectorType diag = m_T.diagonal(); 
+  const Index numClusters = static_cast<Index>(m_clusters.size());
+
+  m_clusterSize.setZero(numClusters);
+  m_eivalToCluster.resize(rows);
+  Index clusterIndex = 0;
+  for (typename ListOfClusters::const_iterator cluster = m_clusters.begin(); cluster != m_clusters.end(); ++cluster) {
+    for (Index i = 0; i < diag.rows(); ++i) {
+      if (std::find(cluster->begin(), cluster->end(), diag(i)) != cluster->end()) {
+        ++m_clusterSize[clusterIndex];
+        m_eivalToCluster[i] = clusterIndex;
+      }
+    }
+    ++clusterIndex;
+  }
+}
+
+/** \brief Compute #m_blockStart using #m_clusterSize */
+template <typename MatrixType, typename AtomicType>
+void MatrixFunction<MatrixType,AtomicType,1>::computeBlockStart()
+{
+  m_blockStart.resize(m_clusterSize.rows());
+  m_blockStart(0) = 0;
+  for (Index i = 1; i < m_clusterSize.rows(); i++) {
+    m_blockStart(i) = m_blockStart(i-1) + m_clusterSize(i-1);
+  }
+}
+
+/** \brief Compute #m_permutation using #m_eivalToCluster and #m_blockStart */
+template <typename MatrixType, typename AtomicType>
+void MatrixFunction<MatrixType,AtomicType,1>::constructPermutation()
+{
+  DynamicIntVectorType indexNextEntry = m_blockStart;
+  m_permutation.resize(m_T.rows());
+  for (Index i = 0; i < m_T.rows(); i++) {
+    Index cluster = m_eivalToCluster[i];
+    m_permutation[i] = indexNextEntry[cluster];
+    ++indexNextEntry[cluster];
+  }
+}  
+
+/** \brief Permute Schur decomposition in #m_U and #m_T according to #m_permutation */
+template <typename MatrixType, typename AtomicType>
+void MatrixFunction<MatrixType,AtomicType,1>::permuteSchur()
+{
+  IntVectorType p = m_permutation;
+  for (Index i = 0; i < p.rows() - 1; i++) {
+    Index j;
+    for (j = i; j < p.rows(); j++) {
+      if (p(j) == i) break;
+    }
+    eigen_assert(p(j) == i);
+    for (Index k = j-1; k >= i; k--) {
+      swapEntriesInSchur(k);
+      std::swap(p.coeffRef(k), p.coeffRef(k+1));
+    }
+  }
+}
+
+/** \brief Swap rows \a index and \a index+1 in Schur decomposition in #m_U and #m_T */
+template <typename MatrixType, typename AtomicType>
+void MatrixFunction<MatrixType,AtomicType,1>::swapEntriesInSchur(Index index)
+{
+  JacobiRotation<Scalar> rotation;
+  rotation.makeGivens(m_T(index, index+1), m_T(index+1, index+1) - m_T(index, index));
+  m_T.applyOnTheLeft(index, index+1, rotation.adjoint());
+  m_T.applyOnTheRight(index, index+1, rotation);
+  m_U.applyOnTheRight(index, index+1, rotation);
+}  
+
+/** \brief Compute block diagonal part of #m_fT.
+  *
+  * This routine computes the matrix function applied to the block diagonal part of #m_T, with the blocking
+  * given by #m_blockStart. The matrix function of each diagonal block is computed by #m_atomic. The
+  * off-diagonal parts of #m_fT are set to zero.
+  */
+template <typename MatrixType, typename AtomicType>
+void MatrixFunction<MatrixType,AtomicType,1>::computeBlockAtomic()
+{ 
+  m_fT.resize(m_T.rows(), m_T.cols());
+  m_fT.setZero();
+  for (Index i = 0; i < m_clusterSize.rows(); ++i) {
+    block(m_fT, i, i) = m_atomic.compute(block(m_T, i, i));
+  }
+}
+
+/** \brief Return block of matrix according to blocking given by #m_blockStart */
+template <typename MatrixType, typename AtomicType>
+Block<MatrixType> MatrixFunction<MatrixType,AtomicType,1>::block(MatrixType& A, Index i, Index j)
+{
+  return A.block(m_blockStart(i), m_blockStart(j), m_clusterSize(i), m_clusterSize(j));
+}
+
+/** \brief Compute part of #m_fT above block diagonal.
+  *
+  * This routine assumes that the block diagonal part of #m_fT (which
+  * equals the matrix function applied to #m_T) has already been computed and computes
+  * the part above the block diagonal. The part below the diagonal is
+  * zero, because #m_T is upper triangular.
+  */
+template <typename MatrixType, typename AtomicType>
+void MatrixFunction<MatrixType,AtomicType,1>::computeOffDiagonal()
+{ 
+  for (Index diagIndex = 1; diagIndex < m_clusterSize.rows(); diagIndex++) {
+    for (Index blockIndex = 0; blockIndex < m_clusterSize.rows() - diagIndex; blockIndex++) {
+      // compute (blockIndex, blockIndex+diagIndex) block
+      DynMatrixType A = block(m_T, blockIndex, blockIndex);
+      DynMatrixType B = -block(m_T, blockIndex+diagIndex, blockIndex+diagIndex);
+      DynMatrixType C = block(m_fT, blockIndex, blockIndex) * block(m_T, blockIndex, blockIndex+diagIndex);
+      C -= block(m_T, blockIndex, blockIndex+diagIndex) * block(m_fT, blockIndex+diagIndex, blockIndex+diagIndex);
+      for (Index k = blockIndex + 1; k < blockIndex + diagIndex; k++) {
+	C += block(m_fT, blockIndex, k) * block(m_T, k, blockIndex+diagIndex);
+	C -= block(m_T, blockIndex, k) * block(m_fT, k, blockIndex+diagIndex);
+      }
+      block(m_fT, blockIndex, blockIndex+diagIndex) = solveTriangularSylvester(A, B, C);
+    }
+  }
+}
+
+/** \brief Solve a triangular Sylvester equation AX + XB = C 
+  *
+  * \param[in]  A  the matrix A; should be square and upper triangular
+  * \param[in]  B  the matrix B; should be square and upper triangular
+  * \param[in]  C  the matrix C; should have correct size.
+  *
+  * \returns the solution X.
+  *
+  * If A is m-by-m and B is n-by-n, then both C and X are m-by-n. 
+  * The (i,j)-th component of the Sylvester equation is
+  * \f[ 
+  *     \sum_{k=i}^m A_{ik} X_{kj} + \sum_{k=1}^j X_{ik} B_{kj} = C_{ij}. 
+  * \f]
+  * This can be re-arranged to yield:
+  * \f[ 
+  *     X_{ij} = \frac{1}{A_{ii} + B_{jj}} \Bigl( C_{ij}
+  *     - \sum_{k=i+1}^m A_{ik} X_{kj} - \sum_{k=1}^{j-1} X_{ik} B_{kj} \Bigr).
+  * \f]
+  * It is assumed that A and B are such that the numerator is never
+  * zero (otherwise the Sylvester equation does not have a unique
+  * solution). In that case, these equations can be evaluated in the
+  * order \f$ i=m,\ldots,1 \f$ and \f$ j=1,\ldots,n \f$.
+  */
+template <typename MatrixType, typename AtomicType>
+typename MatrixFunction<MatrixType,AtomicType,1>::DynMatrixType MatrixFunction<MatrixType,AtomicType,1>::solveTriangularSylvester(
+  const DynMatrixType& A, 
+  const DynMatrixType& B, 
+  const DynMatrixType& C)
+{
+  eigen_assert(A.rows() == A.cols());
+  eigen_assert(A.isUpperTriangular());
+  eigen_assert(B.rows() == B.cols());
+  eigen_assert(B.isUpperTriangular());
+  eigen_assert(C.rows() == A.rows());
+  eigen_assert(C.cols() == B.rows());
+
+  Index m = A.rows();
+  Index n = B.rows();
+  DynMatrixType X(m, n);
+
+  for (Index i = m - 1; i >= 0; --i) {
+    for (Index j = 0; j < n; ++j) {
+
+      // Compute AX = \sum_{k=i+1}^m A_{ik} X_{kj}
+      Scalar AX;
+      if (i == m - 1) {
+	AX = 0; 
+      } else {
+	Matrix<Scalar,1,1> AXmatrix = A.row(i).tail(m-1-i) * X.col(j).tail(m-1-i);
+	AX = AXmatrix(0,0);
+      }
+
+      // Compute XB = \sum_{k=1}^{j-1} X_{ik} B_{kj}
+      Scalar XB;
+      if (j == 0) {
+	XB = 0; 
+      } else {
+	Matrix<Scalar,1,1> XBmatrix = X.row(i).head(j) * B.col(j).head(j);
+	XB = XBmatrix(0,0);
+      }
+
+      X(i,j) = (C(i,j) - AX - XB) / (A(i,i) + B(j,j));
+    }
+  }
+  return X;
+}
+
+/** \ingroup MatrixFunctions_Module
+  *
+  * \brief Proxy for the matrix function of some matrix (expression).
+  *
+  * \tparam Derived  Type of the argument to the matrix function.
+  *
+  * This class holds the argument to the matrix function until it is
+  * assigned or evaluated for some other reason (so the argument
+  * should not be changed in the meantime). It is the return type of
+  * matrixBase::matrixFunction() and related functions and most of the
+  * time this is the only way it is used.
+  */
+template<typename Derived> class MatrixFunctionReturnValue
+: public ReturnByValue<MatrixFunctionReturnValue<Derived> >
+{
+  public:
+
+    typedef typename Derived::Scalar Scalar;
+    typedef typename Derived::Index Index;
+    typedef typename internal::stem_function<Scalar>::type StemFunction;
+
+   /** \brief Constructor.
+      *
+      * \param[in] A  %Matrix (expression) forming the argument of the
+      * matrix function.
+      * \param[in] f  Stem function for matrix function under consideration.
+      */
+    MatrixFunctionReturnValue(const Derived& A, StemFunction f) : m_A(A), m_f(f) { }
+
+    /** \brief Compute the matrix function.
+      *
+      * \param[out] result \p f applied to \p A, where \p f and \p A
+      * are as in the constructor.
+      */
+    template <typename ResultType>
+    inline void evalTo(ResultType& result) const
+    {
+      typedef typename Derived::PlainObject PlainObject;
+      typedef internal::traits<PlainObject> Traits;
+      static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
+      static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
+      static const int Options = PlainObject::Options;
+      typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
+      typedef Matrix<ComplexScalar, Dynamic, Dynamic, Options, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
+      typedef MatrixFunctionAtomic<DynMatrixType> AtomicType;
+      AtomicType atomic(m_f);
+
+      const PlainObject Aevaluated = m_A.eval();
+      MatrixFunction<PlainObject, AtomicType> mf(Aevaluated, atomic);
+      mf.compute(result);
+    }
+
+    Index rows() const { return m_A.rows(); }
+    Index cols() const { return m_A.cols(); }
+
+  private:
+    typename internal::nested<Derived>::type m_A;
+    StemFunction *m_f;
+
+    MatrixFunctionReturnValue& operator=(const MatrixFunctionReturnValue&);
+};
+
+namespace internal {
+template<typename Derived>
+struct traits<MatrixFunctionReturnValue<Derived> >
+{
+  typedef typename Derived::PlainObject ReturnType;
+};
+}
+
+
+/********** MatrixBase methods **********/
+
+
+template <typename Derived>
+const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::matrixFunction(typename internal::stem_function<typename internal::traits<Derived>::Scalar>::type f) const
+{
+  eigen_assert(rows() == cols());
+  return MatrixFunctionReturnValue<Derived>(derived(), f);
+}
+
+template <typename Derived>
+const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::sin() const
+{
+  eigen_assert(rows() == cols());
+  typedef typename internal::stem_function<Scalar>::ComplexScalar ComplexScalar;
+  return MatrixFunctionReturnValue<Derived>(derived(), StdStemFunctions<ComplexScalar>::sin);
+}
+
+template <typename Derived>
+const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::cos() const
+{
+  eigen_assert(rows() == cols());
+  typedef typename internal::stem_function<Scalar>::ComplexScalar ComplexScalar;
+  return MatrixFunctionReturnValue<Derived>(derived(), StdStemFunctions<ComplexScalar>::cos);
+}
+
+template <typename Derived>
+const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::sinh() const
+{
+  eigen_assert(rows() == cols());
+  typedef typename internal::stem_function<Scalar>::ComplexScalar ComplexScalar;
+  return MatrixFunctionReturnValue<Derived>(derived(), StdStemFunctions<ComplexScalar>::sinh);
+}
+
+template <typename Derived>
+const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::cosh() const
+{
+  eigen_assert(rows() == cols());
+  typedef typename internal::stem_function<Scalar>::ComplexScalar ComplexScalar;
+  return MatrixFunctionReturnValue<Derived>(derived(), StdStemFunctions<ComplexScalar>::cosh);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATRIX_FUNCTION
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h b/resources/3rdParty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h
diff --git a/resources/3rdParty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h b/resources/3rdParty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
new file mode 100644
index 000000000..892d0c9a9
--- /dev/null
+++ b/resources/3rdParty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
@@ -0,0 +1,495 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Jitse Niesen <jitse@maths.leeds.ac.uk>
+// Copyright (C) 2011 Chen-Pang He <jdh8@ms63.hinet.net>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATRIX_LOGARITHM
+#define EIGEN_MATRIX_LOGARITHM
+
+#ifndef M_PI
+#define M_PI 3.141592653589793238462643383279503L
+#endif
+
+namespace Eigen { 
+
+/** \ingroup MatrixFunctions_Module
+  * \class MatrixLogarithmAtomic
+  * \brief Helper class for computing matrix logarithm of atomic matrices.
+  *
+  * \internal
+  * Here, an atomic matrix is a triangular matrix whose diagonal
+  * entries are close to each other.
+  *
+  * \sa class MatrixFunctionAtomic, MatrixBase::log()
+  */
+template <typename MatrixType>
+class MatrixLogarithmAtomic
+{
+public:
+
+  typedef typename MatrixType::Scalar Scalar;
+  // typedef typename MatrixType::Index Index;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  // typedef typename internal::stem_function<Scalar>::type StemFunction;
+  // typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
+
+  /** \brief Constructor. */
+  MatrixLogarithmAtomic() { }
+
+  /** \brief Compute matrix logarithm of atomic matrix
+    * \param[in]  A  argument of matrix logarithm, should be upper triangular and atomic
+    * \returns  The logarithm of \p A.
+    */
+  MatrixType compute(const MatrixType& A);
+
+private:
+
+  void compute2x2(const MatrixType& A, MatrixType& result);
+  void computeBig(const MatrixType& A, MatrixType& result);
+  static Scalar atanh(Scalar x);
+  int getPadeDegree(float normTminusI);
+  int getPadeDegree(double normTminusI);
+  int getPadeDegree(long double normTminusI);
+  void computePade(MatrixType& result, const MatrixType& T, int degree);
+  void computePade3(MatrixType& result, const MatrixType& T);
+  void computePade4(MatrixType& result, const MatrixType& T);
+  void computePade5(MatrixType& result, const MatrixType& T);
+  void computePade6(MatrixType& result, const MatrixType& T);
+  void computePade7(MatrixType& result, const MatrixType& T);
+  void computePade8(MatrixType& result, const MatrixType& T);
+  void computePade9(MatrixType& result, const MatrixType& T);
+  void computePade10(MatrixType& result, const MatrixType& T);
+  void computePade11(MatrixType& result, const MatrixType& T);
+
+  static const int minPadeDegree = 3;
+  static const int maxPadeDegree = std::numeric_limits<RealScalar>::digits<= 24?  5:      // single precision
+                                   std::numeric_limits<RealScalar>::digits<= 53?  7:      // double precision
+                                   std::numeric_limits<RealScalar>::digits<= 64?  8:      // extended precision
+                                   std::numeric_limits<RealScalar>::digits<=106? 10: 11;  // double-double or quadruple precision
+
+  // Prevent copying
+  MatrixLogarithmAtomic(const MatrixLogarithmAtomic&);
+  MatrixLogarithmAtomic& operator=(const MatrixLogarithmAtomic&);
+};
+
+/** \brief Compute logarithm of triangular matrix with clustered eigenvalues. */
+template <typename MatrixType>
+MatrixType MatrixLogarithmAtomic<MatrixType>::compute(const MatrixType& A)
+{
+  using std::log;
+  MatrixType result(A.rows(), A.rows());
+  if (A.rows() == 1)
+    result(0,0) = log(A(0,0));
+  else if (A.rows() == 2)
+    compute2x2(A, result);
+  else
+    computeBig(A, result);
+  return result;
+}
+
+/** \brief Compute atanh (inverse hyperbolic tangent). */
+template <typename MatrixType>
+typename MatrixType::Scalar MatrixLogarithmAtomic<MatrixType>::atanh(typename MatrixType::Scalar x)
+{
+  using std::abs;
+  using std::sqrt;
+  if (abs(x) > sqrt(NumTraits<Scalar>::epsilon()))
+    return Scalar(0.5) * log((Scalar(1) + x) / (Scalar(1) - x));
+  else
+    return x + x*x*x / Scalar(3);
+}
+
+/** \brief Compute logarithm of 2x2 triangular matrix. */
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::compute2x2(const MatrixType& A, MatrixType& result)
+{
+  using std::abs;
+  using std::ceil;
+  using std::imag;
+  using std::log;
+
+  Scalar logA00 = log(A(0,0));
+  Scalar logA11 = log(A(1,1));
+
+  result(0,0) = logA00;
+  result(1,0) = Scalar(0);
+  result(1,1) = logA11;
+
+  if (A(0,0) == A(1,1)) {
+    result(0,1) = A(0,1) / A(0,0);
+  } else if ((abs(A(0,0)) < 0.5*abs(A(1,1))) || (abs(A(0,0)) > 2*abs(A(1,1)))) {
+    result(0,1) = A(0,1) * (logA11 - logA00) / (A(1,1) - A(0,0));
+  } else {
+    // computation in previous branch is inaccurate if A(1,1) \approx A(0,0)
+    int unwindingNumber = static_cast<int>(ceil((imag(logA11 - logA00) - M_PI) / (2*M_PI)));
+    Scalar z = (A(1,1) - A(0,0)) / (A(1,1) + A(0,0));
+    result(0,1) = A(0,1) * (Scalar(2) * atanh(z) + Scalar(0,2*M_PI*unwindingNumber)) / (A(1,1) - A(0,0));
+  }
+}
+
+/** \brief Compute logarithm of triangular matrices with size > 2. 
+  * \details This uses a inverse scale-and-square algorithm. */
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::computeBig(const MatrixType& A, MatrixType& result)
+{
+  int numberOfSquareRoots = 0;
+  int numberOfExtraSquareRoots = 0;
+  int degree;
+  MatrixType T = A;
+  const RealScalar maxNormForPade = maxPadeDegree<= 5? 5.3149729967117310e-1:                     // single precision
+                                    maxPadeDegree<= 7? 2.6429608311114350e-1:                     // double precision
+                                    maxPadeDegree<= 8? 2.32777776523703892094e-1L:                // extended precision
+                                    maxPadeDegree<=10? 1.05026503471351080481093652651105e-1L:    // double-double
+                                                       1.1880960220216759245467951592883642e-1L;  // quadruple precision
+
+  while (true) {
+    RealScalar normTminusI = (T - MatrixType::Identity(T.rows(), T.rows())).cwiseAbs().colwise().sum().maxCoeff();
+    if (normTminusI < maxNormForPade) {
+      degree = getPadeDegree(normTminusI);
+      int degree2 = getPadeDegree(normTminusI / RealScalar(2));
+      if ((degree - degree2 <= 1) || (numberOfExtraSquareRoots == 1)) 
+	break;
+      ++numberOfExtraSquareRoots;
+    }
+    MatrixType sqrtT;
+    MatrixSquareRootTriangular<MatrixType>(T).compute(sqrtT);
+    T = sqrtT;
+    ++numberOfSquareRoots;
+  }
+
+  computePade(result, T, degree);
+  result *= pow(RealScalar(2), numberOfSquareRoots);
+}
+
+/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = float) */
+template <typename MatrixType>
+int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(float normTminusI)
+{
+  const float maxNormForPade[] = { 2.5111573934555054e-1 /* degree = 3 */ , 4.0535837411880493e-1,
+            5.3149729967117310e-1 };
+  for (int degree = 3; degree <= maxPadeDegree; ++degree) 
+    if (normTminusI <= maxNormForPade[degree - minPadeDegree])
+      return degree;
+  assert(false); // this line should never be reached
+}
+
+/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = double) */
+template <typename MatrixType>
+int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(double normTminusI)
+{
+  const double maxNormForPade[] = { 1.6206284795015624e-2 /* degree = 3 */ , 5.3873532631381171e-2,
+            1.1352802267628681e-1, 1.8662860613541288e-1, 2.642960831111435e-1 };
+  for (int degree = 3; degree <= maxPadeDegree; ++degree)
+    if (normTminusI <= maxNormForPade[degree - minPadeDegree])
+      return degree;
+  assert(false); // this line should never be reached
+}
+
+/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = long double) */
+template <typename MatrixType>
+int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(long double normTminusI)
+{
+#if   LDBL_MANT_DIG == 53         // double precision
+  const long double maxNormForPade[] = { 1.6206284795015624e-2L /* degree = 3 */ , 5.3873532631381171e-2L,
+            1.1352802267628681e-1L, 1.8662860613541288e-1L, 2.642960831111435e-1L };
+#elif LDBL_MANT_DIG <= 64         // extended precision
+  const long double maxNormForPade[] = { 5.48256690357782863103e-3L /* degree = 3 */, 2.34559162387971167321e-2L,
+            5.84603923897347449857e-2L, 1.08486423756725170223e-1L, 1.68385767881294446649e-1L,
+            2.32777776523703892094e-1L };
+#elif LDBL_MANT_DIG <= 106        // double-double
+  const long double maxNormForPade[] = { 8.58970550342939562202529664318890e-5L /* degree = 3 */,
+            9.34074328446359654039446552677759e-4L, 4.26117194647672175773064114582860e-3L,
+            1.21546224740281848743149666560464e-2L, 2.61100544998339436713088248557444e-2L,
+            4.66170074627052749243018566390567e-2L, 7.32585144444135027565872014932387e-2L,
+            1.05026503471351080481093652651105e-1L };
+#else                             // quadruple precision
+  const long double maxNormForPade[] = { 4.7419931187193005048501568167858103e-5L /* degree = 3 */,
+            5.8853168473544560470387769480192666e-4L, 2.9216120366601315391789493628113520e-3L,
+            8.8415758124319434347116734705174308e-3L, 1.9850836029449446668518049562565291e-2L,
+            3.6688019729653446926585242192447447e-2L, 5.9290962294020186998954055264528393e-2L,
+            8.6998436081634343903250580992127677e-2L, 1.1880960220216759245467951592883642e-1L };
+#endif
+  for (int degree = 3; degree <= maxPadeDegree; ++degree)
+    if (normTminusI <= maxNormForPade[degree - minPadeDegree])
+      return degree;
+  assert(false); // this line should never be reached
+}
+
+/* \brief Compute Pade approximation to matrix logarithm */
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::computePade(MatrixType& result, const MatrixType& T, int degree)
+{
+  switch (degree) {
+    case 3:  computePade3(result, T);  break;
+    case 4:  computePade4(result, T);  break;
+    case 5:  computePade5(result, T);  break;
+    case 6:  computePade6(result, T);  break;
+    case 7:  computePade7(result, T);  break;
+    case 8:  computePade8(result, T);  break;
+    case 9:  computePade9(result, T);  break;
+    case 10: computePade10(result, T); break;
+    case 11: computePade11(result, T); break;
+    default: assert(false); // should never happen
+  }
+} 
+
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::computePade3(MatrixType& result, const MatrixType& T)
+{
+  const int degree = 3;
+  const RealScalar nodes[]   = { 0.1127016653792583114820734600217600L, 0.5000000000000000000000000000000000L,
+            0.8872983346207416885179265399782400L };
+  const RealScalar weights[] = { 0.2777777777777777777777777777777778L, 0.4444444444444444444444444444444444L,
+            0.2777777777777777777777777777777778L };
+  assert(degree <= maxPadeDegree);
+  MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
+  result.setZero(T.rows(), T.rows());
+  for (int k = 0; k < degree; ++k)
+    result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
+                           .template triangularView<Upper>().solve(TminusI);
+}
+
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::computePade4(MatrixType& result, const MatrixType& T)
+{
+  const int degree = 4;
+  const RealScalar nodes[]   = { 0.0694318442029737123880267555535953L, 0.3300094782075718675986671204483777L,
+            0.6699905217924281324013328795516223L, 0.9305681557970262876119732444464048L };
+  const RealScalar weights[] = { 0.1739274225687269286865319746109997L, 0.3260725774312730713134680253890003L,
+            0.3260725774312730713134680253890003L, 0.1739274225687269286865319746109997L };
+  assert(degree <= maxPadeDegree);
+  MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
+  result.setZero(T.rows(), T.rows());
+  for (int k = 0; k < degree; ++k)
+    result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
+                           .template triangularView<Upper>().solve(TminusI);
+}
+
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::computePade5(MatrixType& result, const MatrixType& T)
+{
+  const int degree = 5;
+  const RealScalar nodes[]   = { 0.0469100770306680036011865608503035L, 0.2307653449471584544818427896498956L,
+            0.5000000000000000000000000000000000L, 0.7692346550528415455181572103501044L,
+            0.9530899229693319963988134391496965L };
+  const RealScalar weights[] = { 0.1184634425280945437571320203599587L, 0.2393143352496832340206457574178191L,
+            0.2844444444444444444444444444444444L, 0.2393143352496832340206457574178191L,
+            0.1184634425280945437571320203599587L };
+  assert(degree <= maxPadeDegree);
+  MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
+  result.setZero(T.rows(), T.rows());
+  for (int k = 0; k < degree; ++k)
+    result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
+                           .template triangularView<Upper>().solve(TminusI);
+}
+
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::computePade6(MatrixType& result, const MatrixType& T)
+{
+  const int degree = 6;
+  const RealScalar nodes[]   = { 0.0337652428984239860938492227530027L, 0.1693953067668677431693002024900473L,
+            0.3806904069584015456847491391596440L, 0.6193095930415984543152508608403560L,
+		        0.8306046932331322568306997975099527L, 0.9662347571015760139061507772469973L };
+  const RealScalar weights[] = { 0.0856622461895851725201480710863665L, 0.1803807865240693037849167569188581L,
+            0.2339569672863455236949351719947755L, 0.2339569672863455236949351719947755L,
+ 		        0.1803807865240693037849167569188581L, 0.0856622461895851725201480710863665L };
+  assert(degree <= maxPadeDegree);
+  MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
+  result.setZero(T.rows(), T.rows());
+  for (int k = 0; k < degree; ++k)
+    result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
+                           .template triangularView<Upper>().solve(TminusI);
+}
+
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::computePade7(MatrixType& result, const MatrixType& T)
+{
+  const int degree = 7;
+  const RealScalar nodes[]   = { 0.0254460438286207377369051579760744L, 0.1292344072003027800680676133596058L,
+            0.2970774243113014165466967939615193L, 0.5000000000000000000000000000000000L,
+            0.7029225756886985834533032060384807L, 0.8707655927996972199319323866403942L,
+            0.9745539561713792622630948420239256L };
+  const RealScalar weights[] = { 0.0647424830844348466353057163395410L, 0.1398526957446383339507338857118898L,
+            0.1909150252525594724751848877444876L, 0.2089795918367346938775510204081633L,
+            0.1909150252525594724751848877444876L, 0.1398526957446383339507338857118898L,
+            0.0647424830844348466353057163395410L };
+  assert(degree <= maxPadeDegree);
+  MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
+  result.setZero(T.rows(), T.rows());
+  for (int k = 0; k < degree; ++k)
+    result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
+                           .template triangularView<Upper>().solve(TminusI);
+}
+
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::computePade8(MatrixType& result, const MatrixType& T)
+{
+  const int degree = 8;
+  const RealScalar nodes[]   = { 0.0198550717512318841582195657152635L, 0.1016667612931866302042230317620848L,
+            0.2372337950418355070911304754053768L, 0.4082826787521750975302619288199080L,
+            0.5917173212478249024697380711800920L, 0.7627662049581644929088695245946232L,
+            0.8983332387068133697957769682379152L, 0.9801449282487681158417804342847365L };
+  const RealScalar weights[] = { 0.0506142681451881295762656771549811L, 0.1111905172266872352721779972131204L,
+            0.1568533229389436436689811009933007L, 0.1813418916891809914825752246385978L,
+            0.1813418916891809914825752246385978L, 0.1568533229389436436689811009933007L,
+            0.1111905172266872352721779972131204L, 0.0506142681451881295762656771549811L };
+  assert(degree <= maxPadeDegree);
+  MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
+  result.setZero(T.rows(), T.rows());
+  for (int k = 0; k < degree; ++k)
+    result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
+                           .template triangularView<Upper>().solve(TminusI);
+}
+
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::computePade9(MatrixType& result, const MatrixType& T)
+{
+  const int degree = 9;
+  const RealScalar nodes[]   = { 0.0159198802461869550822118985481636L, 0.0819844463366821028502851059651326L,
+            0.1933142836497048013456489803292629L, 0.3378732882980955354807309926783317L,
+            0.5000000000000000000000000000000000L, 0.6621267117019044645192690073216683L,
+            0.8066857163502951986543510196707371L, 0.9180155536633178971497148940348674L,
+            0.9840801197538130449177881014518364L };
+  const RealScalar weights[] = { 0.0406371941807872059859460790552618L, 0.0903240803474287020292360156214564L,
+            0.1303053482014677311593714347093164L, 0.1561735385200014200343152032922218L,
+            0.1651196775006298815822625346434870L, 0.1561735385200014200343152032922218L,
+            0.1303053482014677311593714347093164L, 0.0903240803474287020292360156214564L,
+            0.0406371941807872059859460790552618L };
+  assert(degree <= maxPadeDegree);
+  MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
+  result.setZero(T.rows(), T.rows());
+  for (int k = 0; k < degree; ++k)
+    result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
+                           .template triangularView<Upper>().solve(TminusI);
+}
+
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::computePade10(MatrixType& result, const MatrixType& T)
+{
+  const int degree = 10;
+  const RealScalar nodes[]   = { 0.0130467357414141399610179939577740L, 0.0674683166555077446339516557882535L,
+            0.1602952158504877968828363174425632L, 0.2833023029353764046003670284171079L,
+            0.4255628305091843945575869994351400L, 0.5744371694908156054424130005648600L,
+            0.7166976970646235953996329715828921L, 0.8397047841495122031171636825574368L,
+            0.9325316833444922553660483442117465L, 0.9869532642585858600389820060422260L };
+  const RealScalar weights[] = { 0.0333356721543440687967844049466659L, 0.0747256745752902965728881698288487L,
+            0.1095431812579910219977674671140816L, 0.1346333596549981775456134607847347L,
+            0.1477621123573764350869464973256692L, 0.1477621123573764350869464973256692L,
+            0.1346333596549981775456134607847347L, 0.1095431812579910219977674671140816L,
+            0.0747256745752902965728881698288487L, 0.0333356721543440687967844049466659L };
+  assert(degree <= maxPadeDegree);
+  MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
+  result.setZero(T.rows(), T.rows());
+  for (int k = 0; k < degree; ++k)
+    result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
+                           .template triangularView<Upper>().solve(TminusI);
+}
+
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::computePade11(MatrixType& result, const MatrixType& T)
+{
+  const int degree = 11;
+  const RealScalar nodes[]   = { 0.0108856709269715035980309994385713L, 0.0564687001159523504624211153480364L,
+            0.1349239972129753379532918739844233L, 0.2404519353965940920371371652706952L,
+            0.3652284220238275138342340072995692L, 0.5000000000000000000000000000000000L,
+            0.6347715779761724861657659927004308L, 0.7595480646034059079628628347293048L,
+            0.8650760027870246620467081260155767L, 0.9435312998840476495375788846519636L,
+            0.9891143290730284964019690005614287L };
+  const RealScalar weights[] = { 0.0278342835580868332413768602212743L, 0.0627901847324523123173471496119701L,
+            0.0931451054638671257130488207158280L, 0.1165968822959952399592618524215876L,
+            0.1314022722551233310903444349452546L, 0.1364625433889503153572417641681711L,
+            0.1314022722551233310903444349452546L, 0.1165968822959952399592618524215876L,
+            0.0931451054638671257130488207158280L, 0.0627901847324523123173471496119701L,
+            0.0278342835580868332413768602212743L };
+  assert(degree <= maxPadeDegree);
+  MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
+  result.setZero(T.rows(), T.rows());
+  for (int k = 0; k < degree; ++k)
+    result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
+                           .template triangularView<Upper>().solve(TminusI);
+}
+
+/** \ingroup MatrixFunctions_Module
+  *
+  * \brief Proxy for the matrix logarithm of some matrix (expression).
+  *
+  * \tparam Derived  Type of the argument to the matrix function.
+  *
+  * This class holds the argument to the matrix function until it is
+  * assigned or evaluated for some other reason (so the argument
+  * should not be changed in the meantime). It is the return type of
+  * matrixBase::log() and most of the time this is the only way it
+  * is used.
+  */
+template<typename Derived> class MatrixLogarithmReturnValue
+: public ReturnByValue<MatrixLogarithmReturnValue<Derived> >
+{
+public:
+
+  typedef typename Derived::Scalar Scalar;
+  typedef typename Derived::Index Index;
+
+  /** \brief Constructor.
+    *
+    * \param[in]  A  %Matrix (expression) forming the argument of the matrix logarithm.
+    */
+  MatrixLogarithmReturnValue(const Derived& A) : m_A(A) { }
+  
+  /** \brief Compute the matrix logarithm.
+    *
+    * \param[out]  result  Logarithm of \p A, where \A is as specified in the constructor.
+    */
+  template <typename ResultType>
+  inline void evalTo(ResultType& result) const
+  {
+    typedef typename Derived::PlainObject PlainObject;
+    typedef internal::traits<PlainObject> Traits;
+    static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
+    static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
+    static const int Options = PlainObject::Options;
+    typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
+    typedef Matrix<ComplexScalar, Dynamic, Dynamic, Options, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
+    typedef MatrixLogarithmAtomic<DynMatrixType> AtomicType;
+    AtomicType atomic;
+    
+    const PlainObject Aevaluated = m_A.eval();
+    MatrixFunction<PlainObject, AtomicType> mf(Aevaluated, atomic);
+    mf.compute(result);
+  }
+
+  Index rows() const { return m_A.rows(); }
+  Index cols() const { return m_A.cols(); }
+  
+private:
+  typename internal::nested<Derived>::type m_A;
+  
+  MatrixLogarithmReturnValue& operator=(const MatrixLogarithmReturnValue&);
+};
+
+namespace internal {
+  template<typename Derived>
+  struct traits<MatrixLogarithmReturnValue<Derived> >
+  {
+    typedef typename Derived::PlainObject ReturnType;
+  };
+}
+
+
+/********** MatrixBase method **********/
+
+
+template <typename Derived>
+const MatrixLogarithmReturnValue<Derived> MatrixBase<Derived>::log() const
+{
+  eigen_assert(rows() == cols());
+  return MatrixLogarithmReturnValue<Derived>(derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATRIX_LOGARITHM
diff --git a/resources/3rdParty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h b/resources/3rdParty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h
new file mode 100644
index 000000000..10319fa17
--- /dev/null
+++ b/resources/3rdParty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h
@@ -0,0 +1,484 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATRIX_SQUARE_ROOT
+#define EIGEN_MATRIX_SQUARE_ROOT
+
+namespace Eigen { 
+
+/** \ingroup MatrixFunctions_Module
+  * \brief Class for computing matrix square roots of upper quasi-triangular matrices.
+  * \tparam  MatrixType  type of the argument of the matrix square root,
+  *                      expected to be an instantiation of the Matrix class template.
+  *
+  * This class computes the square root of the upper quasi-triangular
+  * matrix stored in the upper Hessenberg part of the matrix passed to
+  * the constructor.
+  *
+  * \sa MatrixSquareRoot, MatrixSquareRootTriangular
+  */
+template <typename MatrixType>
+class MatrixSquareRootQuasiTriangular
+{
+  public:
+
+    /** \brief Constructor. 
+      *
+      * \param[in]  A  upper quasi-triangular matrix whose square root 
+      *                is to be computed.
+      *
+      * The class stores a reference to \p A, so it should not be
+      * changed (or destroyed) before compute() is called.
+      */
+    MatrixSquareRootQuasiTriangular(const MatrixType& A) 
+      : m_A(A) 
+    {
+      eigen_assert(A.rows() == A.cols());
+    }
+    
+    /** \brief Compute the matrix square root
+      *
+      * \param[out] result  square root of \p A, as specified in the constructor.
+      *
+      * Only the upper Hessenberg part of \p result is updated, the
+      * rest is not touched.  See MatrixBase::sqrt() for details on
+      * how this computation is implemented.
+      */
+    template <typename ResultType> void compute(ResultType &result);    
+    
+  private:
+    typedef typename MatrixType::Index Index;
+    typedef typename MatrixType::Scalar Scalar;
+    
+    void computeDiagonalPartOfSqrt(MatrixType& sqrtT, const MatrixType& T);
+    void computeOffDiagonalPartOfSqrt(MatrixType& sqrtT, const MatrixType& T);
+    void compute2x2diagonalBlock(MatrixType& sqrtT, const MatrixType& T, typename MatrixType::Index i);
+    void compute1x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, 
+  				  typename MatrixType::Index i, typename MatrixType::Index j);
+    void compute1x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, 
+  				  typename MatrixType::Index i, typename MatrixType::Index j);
+    void compute2x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, 
+  				  typename MatrixType::Index i, typename MatrixType::Index j);
+    void compute2x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, 
+  				  typename MatrixType::Index i, typename MatrixType::Index j);
+  
+    template <typename SmallMatrixType>
+    static void solveAuxiliaryEquation(SmallMatrixType& X, const SmallMatrixType& A, 
+  				     const SmallMatrixType& B, const SmallMatrixType& C);
+  
+    const MatrixType& m_A;
+};
+
+template <typename MatrixType>
+template <typename ResultType> 
+void MatrixSquareRootQuasiTriangular<MatrixType>::compute(ResultType &result)
+{
+  // Compute Schur decomposition of m_A
+  const RealSchur<MatrixType> schurOfA(m_A);  
+  const MatrixType& T = schurOfA.matrixT();
+  const MatrixType& U = schurOfA.matrixU();
+
+  // Compute square root of T
+  MatrixType sqrtT = MatrixType::Zero(m_A.rows(), m_A.rows());
+  computeDiagonalPartOfSqrt(sqrtT, T);
+  computeOffDiagonalPartOfSqrt(sqrtT, T);
+
+  // Compute square root of m_A
+  result = U * sqrtT * U.adjoint();
+}
+
+// pre:  T is quasi-upper-triangular and sqrtT is a zero matrix of the same size
+// post: the diagonal blocks of sqrtT are the square roots of the diagonal blocks of T
+template <typename MatrixType>
+void MatrixSquareRootQuasiTriangular<MatrixType>::computeDiagonalPartOfSqrt(MatrixType& sqrtT, 
+									  const MatrixType& T)
+{
+  const Index size = m_A.rows();
+  for (Index i = 0; i < size; i++) {
+    if (i == size - 1 || T.coeff(i+1, i) == 0) {
+      eigen_assert(T(i,i) > 0);
+      sqrtT.coeffRef(i,i) = internal::sqrt(T.coeff(i,i));
+    }
+    else {
+      compute2x2diagonalBlock(sqrtT, T, i);
+      ++i;
+    }
+  }
+}
+
+// pre:  T is quasi-upper-triangular and diagonal blocks of sqrtT are square root of diagonal blocks of T.
+// post: sqrtT is the square root of T.
+template <typename MatrixType>
+void MatrixSquareRootQuasiTriangular<MatrixType>::computeOffDiagonalPartOfSqrt(MatrixType& sqrtT, 
+									     const MatrixType& T)
+{
+  const Index size = m_A.rows();
+  for (Index j = 1; j < size; j++) {
+      if (T.coeff(j, j-1) != 0)  // if T(j-1:j, j-1:j) is a 2-by-2 block
+	continue;
+    for (Index i = j-1; i >= 0; i--) {
+      if (i > 0 && T.coeff(i, i-1) != 0)  // if T(i-1:i, i-1:i) is a 2-by-2 block
+	continue;
+      bool iBlockIs2x2 = (i < size - 1) && (T.coeff(i+1, i) != 0);
+      bool jBlockIs2x2 = (j < size - 1) && (T.coeff(j+1, j) != 0);
+      if (iBlockIs2x2 && jBlockIs2x2) 
+	compute2x2offDiagonalBlock(sqrtT, T, i, j);
+      else if (iBlockIs2x2 && !jBlockIs2x2) 
+	compute2x1offDiagonalBlock(sqrtT, T, i, j);
+      else if (!iBlockIs2x2 && jBlockIs2x2) 
+	compute1x2offDiagonalBlock(sqrtT, T, i, j);
+      else if (!iBlockIs2x2 && !jBlockIs2x2) 
+	compute1x1offDiagonalBlock(sqrtT, T, i, j);
+    }
+  }
+}
+
+// pre:  T.block(i,i,2,2) has complex conjugate eigenvalues
+// post: sqrtT.block(i,i,2,2) is square root of T.block(i,i,2,2)
+template <typename MatrixType>
+void MatrixSquareRootQuasiTriangular<MatrixType>
+     ::compute2x2diagonalBlock(MatrixType& sqrtT, const MatrixType& T, typename MatrixType::Index i)
+{
+  // TODO: This case (2-by-2 blocks with complex conjugate eigenvalues) is probably hidden somewhere
+  //       in EigenSolver. If we expose it, we could call it directly from here.
+  Matrix<Scalar,2,2> block = T.template block<2,2>(i,i);
+  EigenSolver<Matrix<Scalar,2,2> > es(block);
+  sqrtT.template block<2,2>(i,i)
+    = (es.eigenvectors() * es.eigenvalues().cwiseSqrt().asDiagonal() * es.eigenvectors().inverse()).real();
+}
+
+// pre:  block structure of T is such that (i,j) is a 1x1 block,
+//       all blocks of sqrtT to left of and below (i,j) are correct
+// post: sqrtT(i,j) has the correct value
+template <typename MatrixType>
+void MatrixSquareRootQuasiTriangular<MatrixType>
+     ::compute1x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, 
+				  typename MatrixType::Index i, typename MatrixType::Index j)
+{
+  Scalar tmp = (sqrtT.row(i).segment(i+1,j-i-1) * sqrtT.col(j).segment(i+1,j-i-1)).value();
+  sqrtT.coeffRef(i,j) = (T.coeff(i,j) - tmp) / (sqrtT.coeff(i,i) + sqrtT.coeff(j,j));
+}
+
+// similar to compute1x1offDiagonalBlock()
+template <typename MatrixType>
+void MatrixSquareRootQuasiTriangular<MatrixType>
+     ::compute1x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, 
+				  typename MatrixType::Index i, typename MatrixType::Index j)
+{
+  Matrix<Scalar,1,2> rhs = T.template block<1,2>(i,j);
+  if (j-i > 1)
+    rhs -= sqrtT.block(i, i+1, 1, j-i-1) * sqrtT.block(i+1, j, j-i-1, 2);
+  Matrix<Scalar,2,2> A = sqrtT.coeff(i,i) * Matrix<Scalar,2,2>::Identity();
+  A += sqrtT.template block<2,2>(j,j).transpose();
+  sqrtT.template block<1,2>(i,j).transpose() = A.fullPivLu().solve(rhs.transpose());
+}
+
+// similar to compute1x1offDiagonalBlock()
+template <typename MatrixType>
+void MatrixSquareRootQuasiTriangular<MatrixType>
+     ::compute2x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, 
+				  typename MatrixType::Index i, typename MatrixType::Index j)
+{
+  Matrix<Scalar,2,1> rhs = T.template block<2,1>(i,j);
+  if (j-i > 2)
+    rhs -= sqrtT.block(i, i+2, 2, j-i-2) * sqrtT.block(i+2, j, j-i-2, 1);
+  Matrix<Scalar,2,2> A = sqrtT.coeff(j,j) * Matrix<Scalar,2,2>::Identity();
+  A += sqrtT.template block<2,2>(i,i);
+  sqrtT.template block<2,1>(i,j) = A.fullPivLu().solve(rhs);
+}
+
+// similar to compute1x1offDiagonalBlock()
+template <typename MatrixType>
+void MatrixSquareRootQuasiTriangular<MatrixType>
+     ::compute2x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, 
+				  typename MatrixType::Index i, typename MatrixType::Index j)
+{
+  Matrix<Scalar,2,2> A = sqrtT.template block<2,2>(i,i);
+  Matrix<Scalar,2,2> B = sqrtT.template block<2,2>(j,j);
+  Matrix<Scalar,2,2> C = T.template block<2,2>(i,j);
+  if (j-i > 2)
+    C -= sqrtT.block(i, i+2, 2, j-i-2) * sqrtT.block(i+2, j, j-i-2, 2);
+  Matrix<Scalar,2,2> X;
+  solveAuxiliaryEquation(X, A, B, C);
+  sqrtT.template block<2,2>(i,j) = X;
+}
+
+// solves the equation A X + X B = C where all matrices are 2-by-2
+template <typename MatrixType>
+template <typename SmallMatrixType>
+void MatrixSquareRootQuasiTriangular<MatrixType>
+     ::solveAuxiliaryEquation(SmallMatrixType& X, const SmallMatrixType& A,
+			      const SmallMatrixType& B, const SmallMatrixType& C)
+{
+  EIGEN_STATIC_ASSERT((internal::is_same<SmallMatrixType, Matrix<Scalar,2,2> >::value),
+		      EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT);
+
+  Matrix<Scalar,4,4> coeffMatrix = Matrix<Scalar,4,4>::Zero();
+  coeffMatrix.coeffRef(0,0) = A.coeff(0,0) + B.coeff(0,0);
+  coeffMatrix.coeffRef(1,1) = A.coeff(0,0) + B.coeff(1,1);
+  coeffMatrix.coeffRef(2,2) = A.coeff(1,1) + B.coeff(0,0);
+  coeffMatrix.coeffRef(3,3) = A.coeff(1,1) + B.coeff(1,1);
+  coeffMatrix.coeffRef(0,1) = B.coeff(1,0);
+  coeffMatrix.coeffRef(0,2) = A.coeff(0,1);
+  coeffMatrix.coeffRef(1,0) = B.coeff(0,1);
+  coeffMatrix.coeffRef(1,3) = A.coeff(0,1);
+  coeffMatrix.coeffRef(2,0) = A.coeff(1,0);
+  coeffMatrix.coeffRef(2,3) = B.coeff(1,0);
+  coeffMatrix.coeffRef(3,1) = A.coeff(1,0);
+  coeffMatrix.coeffRef(3,2) = B.coeff(0,1);
+  
+  Matrix<Scalar,4,1> rhs;
+  rhs.coeffRef(0) = C.coeff(0,0);
+  rhs.coeffRef(1) = C.coeff(0,1);
+  rhs.coeffRef(2) = C.coeff(1,0);
+  rhs.coeffRef(3) = C.coeff(1,1);
+  
+  Matrix<Scalar,4,1> result;
+  result = coeffMatrix.fullPivLu().solve(rhs);
+
+  X.coeffRef(0,0) = result.coeff(0);
+  X.coeffRef(0,1) = result.coeff(1);
+  X.coeffRef(1,0) = result.coeff(2);
+  X.coeffRef(1,1) = result.coeff(3);
+}
+
+
+/** \ingroup MatrixFunctions_Module
+  * \brief Class for computing matrix square roots of upper triangular matrices.
+  * \tparam  MatrixType  type of the argument of the matrix square root,
+  *                      expected to be an instantiation of the Matrix class template.
+  *
+  * This class computes the square root of the upper triangular matrix
+  * stored in the upper triangular part (including the diagonal) of
+  * the matrix passed to the constructor.
+  *
+  * \sa MatrixSquareRoot, MatrixSquareRootQuasiTriangular
+  */
+template <typename MatrixType>
+class MatrixSquareRootTriangular
+{
+  public:
+    MatrixSquareRootTriangular(const MatrixType& A) 
+      : m_A(A) 
+    {
+      eigen_assert(A.rows() == A.cols());
+    }
+
+    /** \brief Compute the matrix square root
+      *
+      * \param[out] result  square root of \p A, as specified in the constructor.
+      *
+      * Only the upper triangular part (including the diagonal) of 
+      * \p result is updated, the rest is not touched.  See
+      * MatrixBase::sqrt() for details on how this computation is
+      * implemented.
+      */
+    template <typename ResultType> void compute(ResultType &result);    
+
+ private:
+    const MatrixType& m_A;
+};
+
+template <typename MatrixType>
+template <typename ResultType> 
+void MatrixSquareRootTriangular<MatrixType>::compute(ResultType &result)
+{
+  // Compute Schur decomposition of m_A
+  const ComplexSchur<MatrixType> schurOfA(m_A);  
+  const MatrixType& T = schurOfA.matrixT();
+  const MatrixType& U = schurOfA.matrixU();
+
+  // Compute square root of T and store it in upper triangular part of result
+  // This uses that the square root of triangular matrices can be computed directly.
+  result.resize(m_A.rows(), m_A.cols());
+  typedef typename MatrixType::Index Index;
+  for (Index i = 0; i < m_A.rows(); i++) {
+    result.coeffRef(i,i) = internal::sqrt(T.coeff(i,i));
+  }
+  for (Index j = 1; j < m_A.cols(); j++) {
+    for (Index i = j-1; i >= 0; i--) {
+      typedef typename MatrixType::Scalar Scalar;
+      // if i = j-1, then segment has length 0 so tmp = 0
+      Scalar tmp = (result.row(i).segment(i+1,j-i-1) * result.col(j).segment(i+1,j-i-1)).value();
+      // denominator may be zero if original matrix is singular
+      result.coeffRef(i,j) = (T.coeff(i,j) - tmp) / (result.coeff(i,i) + result.coeff(j,j));
+    }
+  }
+
+  // Compute square root of m_A as U * result * U.adjoint()
+  MatrixType tmp;
+  tmp.noalias() = U * result.template triangularView<Upper>();
+  result.noalias() = tmp * U.adjoint();
+}
+
+
+/** \ingroup MatrixFunctions_Module
+  * \brief Class for computing matrix square roots of general matrices.
+  * \tparam  MatrixType  type of the argument of the matrix square root,
+  *                      expected to be an instantiation of the Matrix class template.
+  *
+  * \sa MatrixSquareRootTriangular, MatrixSquareRootQuasiTriangular, MatrixBase::sqrt()
+  */
+template <typename MatrixType, int IsComplex = NumTraits<typename internal::traits<MatrixType>::Scalar>::IsComplex>
+class MatrixSquareRoot
+{
+  public:
+
+    /** \brief Constructor. 
+      *
+      * \param[in]  A  matrix whose square root is to be computed.
+      *
+      * The class stores a reference to \p A, so it should not be
+      * changed (or destroyed) before compute() is called.
+      */
+    MatrixSquareRoot(const MatrixType& A); 
+    
+    /** \brief Compute the matrix square root
+      *
+      * \param[out] result  square root of \p A, as specified in the constructor.
+      *
+      * See MatrixBase::sqrt() for details on how this computation is
+      * implemented.
+      */
+    template <typename ResultType> void compute(ResultType &result);    
+};
+
+
+// ********** Partial specialization for real matrices **********
+
+template <typename MatrixType>
+class MatrixSquareRoot<MatrixType, 0>
+{
+  public:
+
+    MatrixSquareRoot(const MatrixType& A) 
+      : m_A(A) 
+    {  
+      eigen_assert(A.rows() == A.cols());
+    }
+  
+    template <typename ResultType> void compute(ResultType &result)
+    {
+      // Compute Schur decomposition of m_A
+      const RealSchur<MatrixType> schurOfA(m_A);  
+      const MatrixType& T = schurOfA.matrixT();
+      const MatrixType& U = schurOfA.matrixU();
+    
+      // Compute square root of T
+      MatrixSquareRootQuasiTriangular<MatrixType> tmp(T);
+      MatrixType sqrtT = MatrixType::Zero(m_A.rows(), m_A.rows());
+      tmp.compute(sqrtT);
+    
+      // Compute square root of m_A
+      result = U * sqrtT * U.adjoint();
+    }
+    
+  private:
+    const MatrixType& m_A;
+};
+
+
+// ********** Partial specialization for complex matrices **********
+
+template <typename MatrixType>
+class MatrixSquareRoot<MatrixType, 1>
+{
+  public:
+
+    MatrixSquareRoot(const MatrixType& A) 
+      : m_A(A) 
+    {  
+      eigen_assert(A.rows() == A.cols());
+    }
+  
+    template <typename ResultType> void compute(ResultType &result)
+    {
+      // Compute Schur decomposition of m_A
+      const ComplexSchur<MatrixType> schurOfA(m_A);  
+      const MatrixType& T = schurOfA.matrixT();
+      const MatrixType& U = schurOfA.matrixU();
+    
+      // Compute square root of T
+      MatrixSquareRootTriangular<MatrixType> tmp(T);
+      MatrixType sqrtT = MatrixType::Zero(m_A.rows(), m_A.rows());
+      tmp.compute(sqrtT);
+    
+      // Compute square root of m_A
+      result = U * sqrtT * U.adjoint();
+    }
+    
+  private:
+    const MatrixType& m_A;
+};
+
+
+/** \ingroup MatrixFunctions_Module
+  *
+  * \brief Proxy for the matrix square root of some matrix (expression).
+  *
+  * \tparam Derived  Type of the argument to the matrix square root.
+  *
+  * This class holds the argument to the matrix square root until it
+  * is assigned or evaluated for some other reason (so the argument
+  * should not be changed in the meantime). It is the return type of
+  * MatrixBase::sqrt() and most of the time this is the only way it is
+  * used.
+  */
+template<typename Derived> class MatrixSquareRootReturnValue
+: public ReturnByValue<MatrixSquareRootReturnValue<Derived> >
+{
+    typedef typename Derived::Index Index;
+  public:
+    /** \brief Constructor.
+      *
+      * \param[in]  src  %Matrix (expression) forming the argument of the
+      * matrix square root.
+      */
+    MatrixSquareRootReturnValue(const Derived& src) : m_src(src) { }
+
+    /** \brief Compute the matrix square root.
+      *
+      * \param[out]  result  the matrix square root of \p src in the
+      * constructor.
+      */
+    template <typename ResultType>
+    inline void evalTo(ResultType& result) const
+    {
+      const typename Derived::PlainObject srcEvaluated = m_src.eval();
+      MatrixSquareRoot<typename Derived::PlainObject> me(srcEvaluated);
+      me.compute(result);
+    }
+
+    Index rows() const { return m_src.rows(); }
+    Index cols() const { return m_src.cols(); }
+
+  protected:
+    const Derived& m_src;
+  private:
+    MatrixSquareRootReturnValue& operator=(const MatrixSquareRootReturnValue&);
+};
+
+namespace internal {
+template<typename Derived>
+struct traits<MatrixSquareRootReturnValue<Derived> >
+{
+  typedef typename Derived::PlainObject ReturnType;
+};
+}
+
+template <typename Derived>
+const MatrixSquareRootReturnValue<Derived> MatrixBase<Derived>::sqrt() const
+{
+  eigen_assert(rows() == cols());
+  return MatrixSquareRootReturnValue<Derived>(derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATRIX_FUNCTION
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/StemFunction.h b/resources/3rdParty/eigen/unsupported/Eigen/src/MatrixFunctions/StemFunction.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/StemFunction.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/MatrixFunctions/StemFunction.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/MoreVectorization/CMakeLists.txt b/resources/3rdParty/eigen/unsupported/Eigen/src/MoreVectorization/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/MoreVectorization/CMakeLists.txt
rename to resources/3rdParty/eigen/unsupported/Eigen/src/MoreVectorization/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/MoreVectorization/MathFunctions.h b/resources/3rdParty/eigen/unsupported/Eigen/src/MoreVectorization/MathFunctions.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/MoreVectorization/MathFunctions.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/MoreVectorization/MathFunctions.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/CMakeLists.txt b/resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/CMakeLists.txt
rename to resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h b/resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h b/resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/chkder.h b/resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/chkder.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/chkder.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/chkder.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/covar.h b/resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/covar.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/covar.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/covar.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/dogleg.h b/resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h b/resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/lmpar.h b/resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h b/resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h b/resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/r1updt.h b/resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h b/resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/NumericalDiff/CMakeLists.txt b/resources/3rdParty/eigen/unsupported/Eigen/src/NumericalDiff/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/NumericalDiff/CMakeLists.txt
rename to resources/3rdParty/eigen/unsupported/Eigen/src/NumericalDiff/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h b/resources/3rdParty/eigen/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/Polynomials/CMakeLists.txt b/resources/3rdParty/eigen/unsupported/Eigen/src/Polynomials/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/Polynomials/CMakeLists.txt
rename to resources/3rdParty/eigen/unsupported/Eigen/src/Polynomials/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/Polynomials/Companion.h b/resources/3rdParty/eigen/unsupported/Eigen/src/Polynomials/Companion.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/Polynomials/Companion.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/Polynomials/Companion.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/Polynomials/PolynomialSolver.h b/resources/3rdParty/eigen/unsupported/Eigen/src/Polynomials/PolynomialSolver.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/Polynomials/PolynomialSolver.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/Polynomials/PolynomialSolver.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/Polynomials/PolynomialUtils.h b/resources/3rdParty/eigen/unsupported/Eigen/src/Polynomials/PolynomialUtils.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/Polynomials/PolynomialUtils.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/Polynomials/PolynomialUtils.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/Skyline/CMakeLists.txt b/resources/3rdParty/eigen/unsupported/Eigen/src/Skyline/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/Skyline/CMakeLists.txt
rename to resources/3rdParty/eigen/unsupported/Eigen/src/Skyline/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h b/resources/3rdParty/eigen/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/Skyline/SkylineMatrix.h b/resources/3rdParty/eigen/unsupported/Eigen/src/Skyline/SkylineMatrix.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/Skyline/SkylineMatrix.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/Skyline/SkylineMatrix.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h b/resources/3rdParty/eigen/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/Skyline/SkylineProduct.h b/resources/3rdParty/eigen/unsupported/Eigen/src/Skyline/SkylineProduct.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/Skyline/SkylineProduct.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/Skyline/SkylineProduct.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/Skyline/SkylineStorage.h b/resources/3rdParty/eigen/unsupported/Eigen/src/Skyline/SkylineStorage.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/Skyline/SkylineStorage.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/Skyline/SkylineStorage.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/Skyline/SkylineUtil.h b/resources/3rdParty/eigen/unsupported/Eigen/src/Skyline/SkylineUtil.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/Skyline/SkylineUtil.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/Skyline/SkylineUtil.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h b/resources/3rdParty/eigen/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/SparseExtra/CMakeLists.txt b/resources/3rdParty/eigen/unsupported/Eigen/src/SparseExtra/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/SparseExtra/CMakeLists.txt
rename to resources/3rdParty/eigen/unsupported/Eigen/src/SparseExtra/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h b/resources/3rdParty/eigen/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/SparseExtra/MarketIO.h b/resources/3rdParty/eigen/unsupported/Eigen/src/SparseExtra/MarketIO.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/SparseExtra/MarketIO.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/SparseExtra/MarketIO.h
diff --git a/resources/3rdParty/eigen/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h b/resources/3rdParty/eigen/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h
new file mode 100644
index 000000000..4716b68e7
--- /dev/null
+++ b/resources/3rdParty/eigen/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h
@@ -0,0 +1,221 @@
+
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Desire NUENTSA WAKAM <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_BROWSE_MATRICES_H
+#define EIGEN_BROWSE_MATRICES_H
+
+namespace Eigen {
+
+enum {
+  SPD = 0x100,
+  NonSymmetric = 0x0
+}; 
+
+/** 
+ * @brief Iterator to browse matrices from a specified folder
+ * 
+ * This is used to load all the matrices from a folder. 
+ * The matrices should be in Matrix Market format
+ * It is assumed that the matrices are named as matname.mtx
+ * and matname_SPD.mtx if the matrix is Symmetric and positive definite (or Hermitian)
+ * The right hand side vectors are loaded as well, if they exist.
+ * They should be named as matname_b.mtx. 
+ * Note that the right hand side for a SPD matrix is named as matname_SPD_b.mtx
+ * 
+ * Sometimes a reference solution is available. In this case, it should be named as matname_x.mtx
+ * 
+ * Sample code
+ * \code
+ * 
+ * \endcode
+ * 
+ * \tparam Scalar The scalar type 
+ */
+template <typename Scalar>
+class MatrixMarketIterator 
+{
+  public:
+    typedef Matrix<Scalar,Dynamic,1> VectorType; 
+    typedef SparseMatrix<Scalar,ColMajor> MatrixType; 
+  
+  public:
+    MatrixMarketIterator(const std::string folder):m_sym(0),m_isvalid(false),m_matIsLoaded(false),m_hasRhs(false),m_hasrefX(false),m_folder(folder)
+    {
+      m_folder_id = opendir(folder.c_str());
+      if (!m_folder_id){
+        m_isvalid = false;
+        std::cerr << "The provided Matrix folder could not be opened \n\n";
+        abort();
+      }
+      Getnextvalidmatrix();
+    }
+    
+    ~MatrixMarketIterator()
+    {
+      if (m_folder_id) closedir(m_folder_id); 
+    }
+    
+    inline MatrixMarketIterator& operator++()
+    {
+      m_matIsLoaded = false;
+      m_hasrefX = false;
+      m_hasRhs = false;
+      Getnextvalidmatrix();
+      return *this;
+    }
+    inline operator bool() const { return m_isvalid;}
+    
+    /** Return the sparse matrix corresponding to the current file */
+    inline MatrixType& matrix() 
+    { 
+      // Read the matrix
+      if (m_matIsLoaded) return m_mat;
+      
+      std::string matrix_file = m_folder + "/" + m_matname + ".mtx";
+      if ( !loadMarket(m_mat, matrix_file)) 
+      {
+        m_matIsLoaded = false;
+        return m_mat;
+      }
+      m_matIsLoaded = true; 
+      
+      if (m_sym != NonSymmetric) 
+      { // Store the upper part of the matrix. It is needed by the solvers dealing with nonsymmetric matrices ??
+        MatrixType B; 
+        B = m_mat;
+        m_mat = B.template selfadjointView<Lower>();
+      }
+      return m_mat; 
+    }
+    
+    /** Return the right hand side corresponding to the current matrix. 
+     * If the rhs file is not provided, a random rhs is generated
+     */
+    inline VectorType& rhs() 
+    { 
+       // Get the right hand side
+      if (m_hasRhs) return m_rhs;
+      
+      std::string rhs_file;
+      rhs_file = m_folder + "/" + m_matname + "_b.mtx"; // The pattern is matname_b.mtx
+      m_hasRhs = Fileexists(rhs_file);
+      if (m_hasRhs)
+      {
+        m_rhs.resize(m_mat.cols());
+        m_hasRhs = loadMarketVector(m_rhs, rhs_file);
+      }
+      if (!m_hasRhs)
+      {
+        // Generate a random right hand side
+        if (!m_matIsLoaded) this->matrix(); 
+        m_refX.resize(m_mat.cols());
+        m_refX.setRandom();
+        m_rhs = m_mat * m_refX;
+        m_hasrefX = true;
+        m_hasRhs = true;
+      }
+      return m_rhs; 
+    }
+    
+    /** Return a reference solution
+     * If it is not provided and if the right hand side is not available
+     * then refX is randomly generated such that A*refX = b 
+     * where A and b are the matrix and the rhs. 
+     * Note that when a rhs is provided, refX is not available 
+     */
+    inline VectorType& refX() 
+    { 
+      // Check if a reference solution is provided
+      if (m_hasrefX) return m_refX;
+      
+      std::string lhs_file;
+      lhs_file = m_folder + "/" + m_matname + "_x.mtx"; 
+      m_hasrefX = Fileexists(lhs_file);
+      if (m_hasrefX)
+      {
+        m_refX.resize(m_mat.cols());
+        m_hasrefX = loadMarketVector(m_refX, lhs_file);
+      }
+      return m_refX; 
+    }
+    
+    inline std::string& matname() { return m_matname; }
+    
+    inline int sym() { return m_sym; }
+    
+    inline bool hasRhs() {return m_hasRhs; }
+    inline bool hasrefX() {return m_hasrefX; }
+    
+  protected:
+    
+    inline bool Fileexists(std::string file)
+    {
+      std::ifstream file_id(file.c_str());
+      if (!file_id.good() ) 
+      {
+        return false;
+      }
+      else 
+      {
+        file_id.close();
+        return true;
+      }
+    }
+    
+    void Getnextvalidmatrix( )
+    {
+      m_isvalid = false;
+      // Here, we return with the next valid matrix in the folder
+      while ( (m_curs_id = readdir(m_folder_id)) != NULL) {
+        m_isvalid = false;
+        std::string curfile;
+        curfile = m_folder + "/" + m_curs_id->d_name;
+        // Discard if it is a folder
+        if (m_curs_id->d_type == DT_DIR) continue; //FIXME This may not be available on non BSD systems
+//         struct stat st_buf; 
+//         stat (curfile.c_str(), &st_buf);
+//         if (S_ISDIR(st_buf.st_mode)) continue;
+        
+        // Determine from the header if it is a matrix or a right hand side 
+        bool isvector,iscomplex;
+        if(!getMarketHeader(curfile,m_sym,iscomplex,isvector)) continue;
+        if(isvector) continue;
+        
+        // Get the matrix name
+        std::string filename = m_curs_id->d_name;
+        m_matname = filename.substr(0, filename.length()-4); 
+        
+        // Find if the matrix is SPD 
+        size_t found = m_matname.find("SPD");
+        if( (found!=std::string::npos) && (m_sym != NonSymmetric) )
+          m_sym = SPD;
+       
+        m_isvalid = true;
+        break; 
+      }
+    }
+    int m_sym; // Symmetry of the matrix
+    MatrixType m_mat; // Current matrix  
+    VectorType m_rhs;  // Current vector
+    VectorType m_refX; // The reference solution, if exists
+    std::string m_matname; // Matrix Name
+    bool m_isvalid; 
+    bool m_matIsLoaded; // Determine if the matrix has already been loaded from the file
+    bool m_hasRhs; // The right hand side exists
+    bool m_hasrefX; // A reference solution is provided
+    std::string m_folder;
+    DIR * m_folder_id;
+    struct dirent *m_curs_id; 
+    
+};
+
+} // end namespace Eigen
+
+#endif
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/SparseExtra/RandomSetter.h b/resources/3rdParty/eigen/unsupported/Eigen/src/SparseExtra/RandomSetter.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/SparseExtra/RandomSetter.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/SparseExtra/RandomSetter.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/Splines/CMakeLists.txt b/resources/3rdParty/eigen/unsupported/Eigen/src/Splines/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/Splines/CMakeLists.txt
rename to resources/3rdParty/eigen/unsupported/Eigen/src/Splines/CMakeLists.txt
diff --git a/resources/3rdParty/eigen/unsupported/Eigen/src/Splines/Spline.h b/resources/3rdParty/eigen/unsupported/Eigen/src/Splines/Spline.h
new file mode 100644
index 000000000..3680f013a
--- /dev/null
+++ b/resources/3rdParty/eigen/unsupported/Eigen/src/Splines/Spline.h
@@ -0,0 +1,464 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 20010-2011 Hauke Heibel <hauke.heibel@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPLINE_H
+#define EIGEN_SPLINE_H
+
+#include "SplineFwd.h"
+
+namespace Eigen
+{
+    /**
+     * \ingroup Splines_Module
+     * \class Spline class
+     * \brief A class representing multi-dimensional spline curves.
+     *
+     * The class represents B-splines with non-uniform knot vectors. Each control
+     * point of the B-spline is associated with a basis function
+     * \f{align*}
+     *   C(u) & = \sum_{i=0}^{n}N_{i,p}(u)P_i
+     * \f}
+     *
+     * \tparam _Scalar The underlying data type (typically float or double)
+     * \tparam _Dim The curve dimension (e.g. 2 or 3)
+     * \tparam _Degree Per default set to Dynamic; could be set to the actual desired
+     *                degree for optimization purposes (would result in stack allocation
+     *                of several temporary variables).
+     **/
+  template <typename _Scalar, int _Dim, int _Degree>
+  class Spline
+  {
+  public:
+    typedef _Scalar Scalar; /*!< The spline curve's scalar type. */
+    enum { Dimension = _Dim /*!< The spline curve's dimension. */ };
+    enum { Degree = _Degree /*!< The spline curve's degree. */ };
+
+    /** \brief The point type the spline is representing. */
+    typedef typename SplineTraits<Spline>::PointType PointType;
+    
+    /** \brief The data type used to store knot vectors. */
+    typedef typename SplineTraits<Spline>::KnotVectorType KnotVectorType;
+    
+    /** \brief The data type used to store non-zero basis functions. */
+    typedef typename SplineTraits<Spline>::BasisVectorType BasisVectorType;
+    
+    /** \brief The data type representing the spline's control points. */
+    typedef typename SplineTraits<Spline>::ControlPointVectorType ControlPointVectorType;
+
+    /**
+    * \brief Creates a spline from a knot vector and control points.
+    * \param knots The spline's knot vector.
+    * \param ctrls The spline's control point vector.
+    **/
+    template <typename OtherVectorType, typename OtherArrayType>
+    Spline(const OtherVectorType& knots, const OtherArrayType& ctrls) : m_knots(knots), m_ctrls(ctrls) {}
+
+    /**
+    * \brief Copy constructor for splines.
+    * \param spline The input spline.
+    **/
+    template <int OtherDegree>
+    Spline(const Spline<Scalar, Dimension, OtherDegree>& spline) : 
+    m_knots(spline.knots()), m_ctrls(spline.ctrls()) {}
+
+    /**
+     * \brief Returns the knots of the underlying spline.
+     **/
+    const KnotVectorType& knots() const { return m_knots; }
+    
+    /**
+     * \brief Returns the knots of the underlying spline.
+     **/    
+    const ControlPointVectorType& ctrls() const { return m_ctrls; }
+
+    /**
+     * \brief Returns the spline value at a given site \f$u\f$.
+     *
+     * The function returns
+     * \f{align*}
+     *   C(u) & = \sum_{i=0}^{n}N_{i,p}P_i
+     * \f}
+     *
+     * \param u Parameter \f$u \in [0;1]\f$ at which the spline is evaluated.
+     * \return The spline value at the given location \f$u\f$.
+     **/
+    PointType operator()(Scalar u) const;
+
+    /**
+     * \brief Evaluation of spline derivatives of up-to given order.
+     *
+     * The function returns
+     * \f{align*}
+     *   \frac{d^i}{du^i}C(u) & = \sum_{i=0}^{n} \frac{d^i}{du^i} N_{i,p}(u)P_i
+     * \f}
+     * for i ranging between 0 and order.
+     *
+     * \param u Parameter \f$u \in [0;1]\f$ at which the spline derivative is evaluated.
+     * \param order The order up to which the derivatives are computed.
+     **/
+    typename SplineTraits<Spline>::DerivativeType
+      derivatives(Scalar u, DenseIndex order) const;
+
+    /**
+     * \copydoc Spline::derivatives
+     * Using the template version of this function is more efficieent since
+     * temporary objects are allocated on the stack whenever this is possible.
+     **/    
+    template <int DerivativeOrder>
+    typename SplineTraits<Spline,DerivativeOrder>::DerivativeType
+      derivatives(Scalar u, DenseIndex order = DerivativeOrder) const;
+
+    /**
+     * \brief Computes the non-zero basis functions at the given site.
+     *
+     * Splines have local support and a point from their image is defined
+     * by exactly \f$p+1\f$ control points \f$P_i\f$ where \f$p\f$ is the
+     * spline degree.
+     *
+     * This function computes the \f$p+1\f$ non-zero basis function values
+     * for a given parameter value \f$u\f$. It returns
+     * \f{align*}{
+     *   N_{i,p}(u), \hdots, N_{i+p+1,p}(u)
+     * \f}
+     *
+     * \param u Parameter \f$u \in [0;1]\f$ at which the non-zero basis functions 
+     *          are computed.
+     **/
+    typename SplineTraits<Spline>::BasisVectorType
+      basisFunctions(Scalar u) const;
+
+    /**
+     * \brief Computes the non-zero spline basis function derivatives up to given order.
+     *
+     * The function computes
+     * \f{align*}{
+     *   \frac{d^i}{du^i} N_{i,p}(u), \hdots, \frac{d^i}{du^i} N_{i+p+1,p}(u)
+     * \f}
+     * with i ranging from 0 up to the specified order.
+     *
+     * \param u Parameter \f$u \in [0;1]\f$ at which the non-zero basis function
+     *          derivatives are computed.
+     * \param order The order up to which the basis function derivatives are computes.
+     **/
+    typename SplineTraits<Spline>::BasisDerivativeType
+      basisFunctionDerivatives(Scalar u, DenseIndex order) const;
+
+    /**
+     * \copydoc Spline::basisFunctionDerivatives
+     * Using the template version of this function is more efficieent since
+     * temporary objects are allocated on the stack whenever this is possible.
+     **/    
+    template <int DerivativeOrder>
+    typename SplineTraits<Spline,DerivativeOrder>::BasisDerivativeType
+      basisFunctionDerivatives(Scalar u, DenseIndex order = DerivativeOrder) const;
+
+    /**
+     * \brief Returns the spline degree.
+     **/ 
+    DenseIndex degree() const;
+
+    /** 
+     * \brief Returns the span within the knot vector in which u is falling.
+     * \param u The site for which the span is determined.
+     **/
+    DenseIndex span(Scalar u) const;
+
+    /**
+     * \brief Computes the spang within the provided knot vector in which u is falling.
+     **/
+    static DenseIndex Span(typename SplineTraits<Spline>::Scalar u, DenseIndex degree, const typename SplineTraits<Spline>::KnotVectorType& knots);
+    
+    /**
+     * \brief Returns the spline's non-zero basis functions.
+     *
+     * The function computes and returns
+     * \f{align*}{
+     *   N_{i,p}(u), \hdots, N_{i+p+1,p}(u)
+     * \f}
+     *
+     * \param u The site at which the basis functions are computed.
+     * \param degree The degree of the underlying spline.
+     * \param knots The underlying spline's knot vector.
+     **/
+    static BasisVectorType BasisFunctions(Scalar u, DenseIndex degree, const KnotVectorType& knots);
+
+
+  private:
+    KnotVectorType m_knots; /*!< Knot vector. */
+    ControlPointVectorType  m_ctrls; /*!< Control points. */
+  };
+
+  template <typename _Scalar, int _Dim, int _Degree>
+  DenseIndex Spline<_Scalar, _Dim, _Degree>::Span(
+    typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::Scalar u,
+    DenseIndex degree,
+    const typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::KnotVectorType& knots)
+  {
+    // Piegl & Tiller, "The NURBS Book", A2.1 (p. 68)
+    if (u <= knots(0)) return degree;
+    const Scalar* pos = std::upper_bound(knots.data()+degree-1, knots.data()+knots.size()-degree-1, u);
+    return static_cast<DenseIndex>( std::distance(knots.data(), pos) - 1 );
+  }
+
+  template <typename _Scalar, int _Dim, int _Degree>
+  typename Spline<_Scalar, _Dim, _Degree>::BasisVectorType
+    Spline<_Scalar, _Dim, _Degree>::BasisFunctions(
+    typename Spline<_Scalar, _Dim, _Degree>::Scalar u,
+    DenseIndex degree,
+    const typename Spline<_Scalar, _Dim, _Degree>::KnotVectorType& knots)
+  {
+    typedef typename Spline<_Scalar, _Dim, _Degree>::BasisVectorType BasisVectorType;
+
+    const DenseIndex p = degree;
+    const DenseIndex i = Spline::Span(u, degree, knots);
+
+    const KnotVectorType& U = knots;
+
+    BasisVectorType left(p+1); left(0) = Scalar(0);
+    BasisVectorType right(p+1); right(0) = Scalar(0);        
+
+    VectorBlock<BasisVectorType,Degree>(left,1,p) = u - VectorBlock<const KnotVectorType,Degree>(U,i+1-p,p).reverse();
+    VectorBlock<BasisVectorType,Degree>(right,1,p) = VectorBlock<const KnotVectorType,Degree>(U,i+1,p) - u;
+
+    BasisVectorType N(1,p+1);
+    N(0) = Scalar(1);
+    for (DenseIndex j=1; j<=p; ++j)
+    {
+      Scalar saved = Scalar(0);
+      for (DenseIndex r=0; r<j; r++)
+      {
+        const Scalar tmp = N(r)/(right(r+1)+left(j-r));
+        N[r] = saved + right(r+1)*tmp;
+        saved = left(j-r)*tmp;
+      }
+      N(j) = saved;
+    }
+    return N;
+  }
+
+  template <typename _Scalar, int _Dim, int _Degree>
+  DenseIndex Spline<_Scalar, _Dim, _Degree>::degree() const
+  {
+    if (_Degree == Dynamic)
+      return m_knots.size() - m_ctrls.cols() - 1;
+    else
+      return _Degree;
+  }
+
+  template <typename _Scalar, int _Dim, int _Degree>
+  DenseIndex Spline<_Scalar, _Dim, _Degree>::span(Scalar u) const
+  {
+    return Spline::Span(u, degree(), knots());
+  }
+
+  template <typename _Scalar, int _Dim, int _Degree>
+  typename Spline<_Scalar, _Dim, _Degree>::PointType Spline<_Scalar, _Dim, _Degree>::operator()(Scalar u) const
+  {
+    enum { Order = SplineTraits<Spline>::OrderAtCompileTime };
+
+    const DenseIndex span = this->span(u);
+    const DenseIndex p = degree();
+    const BasisVectorType basis_funcs = basisFunctions(u);
+
+    const Replicate<BasisVectorType,Dimension,1> ctrl_weights(basis_funcs);
+    const Block<const ControlPointVectorType,Dimension,Order> ctrl_pts(ctrls(),0,span-p,Dimension,p+1);
+    return (ctrl_weights * ctrl_pts).rowwise().sum();
+  }
+
+  /* --------------------------------------------------------------------------------------------- */
+
+  template <typename SplineType, typename DerivativeType>
+  void derivativesImpl(const SplineType& spline, typename SplineType::Scalar u, DenseIndex order, DerivativeType& der)
+  {    
+    enum { Dimension = SplineTraits<SplineType>::Dimension };
+    enum { Order = SplineTraits<SplineType>::OrderAtCompileTime };
+    enum { DerivativeOrder = DerivativeType::ColsAtCompileTime };
+
+    typedef typename SplineTraits<SplineType>::Scalar Scalar;
+
+    typedef typename SplineTraits<SplineType>::BasisVectorType BasisVectorType;
+    typedef typename SplineTraits<SplineType>::ControlPointVectorType ControlPointVectorType;
+
+    typedef typename SplineTraits<SplineType,DerivativeOrder>::BasisDerivativeType BasisDerivativeType;
+    typedef typename BasisDerivativeType::ConstRowXpr BasisDerivativeRowXpr;    
+
+    const DenseIndex p = spline.degree();
+    const DenseIndex span = spline.span(u);
+
+    const DenseIndex n = (std::min)(p, order);
+
+    der.resize(Dimension,n+1);
+
+    // Retrieve the basis function derivatives up to the desired order...    
+    const BasisDerivativeType basis_func_ders = spline.template basisFunctionDerivatives<DerivativeOrder>(u, n+1);
+
+    // ... and perform the linear combinations of the control points.
+    for (DenseIndex der_order=0; der_order<n+1; ++der_order)
+    {
+      const Replicate<BasisDerivativeRowXpr,Dimension,1> ctrl_weights( basis_func_ders.row(der_order) );
+      const Block<const ControlPointVectorType,Dimension,Order> ctrl_pts(spline.ctrls(),0,span-p,Dimension,p+1);
+      der.col(der_order) = (ctrl_weights * ctrl_pts).rowwise().sum();
+    }
+  }
+
+  template <typename _Scalar, int _Dim, int _Degree>
+  typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::DerivativeType
+    Spline<_Scalar, _Dim, _Degree>::derivatives(Scalar u, DenseIndex order) const
+  {
+    typename SplineTraits< Spline >::DerivativeType res;
+    derivativesImpl(*this, u, order, res);
+    return res;
+  }
+
+  template <typename _Scalar, int _Dim, int _Degree>
+  template <int DerivativeOrder>
+  typename SplineTraits< Spline<_Scalar, _Dim, _Degree>, DerivativeOrder >::DerivativeType
+    Spline<_Scalar, _Dim, _Degree>::derivatives(Scalar u, DenseIndex order) const
+  {
+    typename SplineTraits< Spline, DerivativeOrder >::DerivativeType res;
+    derivativesImpl(*this, u, order, res);
+    return res;
+  }
+
+  template <typename _Scalar, int _Dim, int _Degree>
+  typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::BasisVectorType
+    Spline<_Scalar, _Dim, _Degree>::basisFunctions(Scalar u) const
+  {
+    return Spline::BasisFunctions(u, degree(), knots());
+  }
+
+  /* --------------------------------------------------------------------------------------------- */
+
+  template <typename SplineType, typename DerivativeType>
+  void basisFunctionDerivativesImpl(const SplineType& spline, typename SplineType::Scalar u, DenseIndex order, DerivativeType& N_)
+  {
+    enum { Order = SplineTraits<SplineType>::OrderAtCompileTime };
+
+    typedef typename SplineTraits<SplineType>::Scalar Scalar;
+    typedef typename SplineTraits<SplineType>::BasisVectorType BasisVectorType;
+    typedef typename SplineTraits<SplineType>::KnotVectorType KnotVectorType;
+    typedef typename SplineTraits<SplineType>::ControlPointVectorType ControlPointVectorType;
+
+    const KnotVectorType& U = spline.knots();
+
+    const DenseIndex p = spline.degree();
+    const DenseIndex span = spline.span(u);
+
+    const DenseIndex n = (std::min)(p, order);
+
+    N_.resize(n+1, p+1);
+
+    BasisVectorType left = BasisVectorType::Zero(p+1);
+    BasisVectorType right = BasisVectorType::Zero(p+1);
+
+    Matrix<Scalar,Order,Order> ndu(p+1,p+1);
+
+    double saved, temp;
+
+    ndu(0,0) = 1.0;
+
+    DenseIndex j;
+    for (j=1; j<=p; ++j)
+    {
+      left[j] = u-U[span+1-j];
+      right[j] = U[span+j]-u;
+      saved = 0.0;
+
+      for (DenseIndex r=0; r<j; ++r)
+      {
+        /* Lower triangle */
+        ndu(j,r) = right[r+1]+left[j-r];
+        temp = ndu(r,j-1)/ndu(j,r);
+        /* Upper triangle */
+        ndu(r,j) = static_cast<Scalar>(saved+right[r+1] * temp);
+        saved = left[j-r] * temp;
+      }
+
+      ndu(j,j) = static_cast<Scalar>(saved);
+    }
+
+    for (j = p; j>=0; --j) 
+      N_(0,j) = ndu(j,p);
+
+    // Compute the derivatives
+    DerivativeType a(n+1,p+1);
+    DenseIndex r=0;
+    for (; r<=p; ++r)
+    {
+      DenseIndex s1,s2;
+      s1 = 0; s2 = 1; // alternate rows in array a
+      a(0,0) = 1.0;
+
+      // Compute the k-th derivative
+      for (DenseIndex k=1; k<=static_cast<DenseIndex>(n); ++k)
+      {
+        double d = 0.0;
+        DenseIndex rk,pk,j1,j2;
+        rk = r-k; pk = p-k;
+
+        if (r>=k)
+        {
+          a(s2,0) = a(s1,0)/ndu(pk+1,rk);
+          d = a(s2,0)*ndu(rk,pk);
+        }
+
+        if (rk>=-1) j1 = 1;
+        else        j1 = -rk;
+
+        if (r-1 <= pk) j2 = k-1;
+        else           j2 = p-r;
+
+        for (j=j1; j<=j2; ++j)
+        {
+          a(s2,j) = (a(s1,j)-a(s1,j-1))/ndu(pk+1,rk+j);
+          d += a(s2,j)*ndu(rk+j,pk);
+        }
+
+        if (r<=pk)
+        {
+          a(s2,k) = -a(s1,k-1)/ndu(pk+1,r);
+          d += a(s2,k)*ndu(r,pk);
+        }
+
+        N_(k,r) = static_cast<Scalar>(d);
+        j = s1; s1 = s2; s2 = j; // Switch rows
+      }
+    }
+
+    /* Multiply through by the correct factors */
+    /* (Eq. [2.9])                             */
+    r = p;
+    for (DenseIndex k=1; k<=static_cast<DenseIndex>(n); ++k)
+    {
+      for (DenseIndex j=p; j>=0; --j) N_(k,j) *= r;
+      r *= p-k;
+    }
+  }
+
+  template <typename _Scalar, int _Dim, int _Degree>
+  typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::BasisDerivativeType
+    Spline<_Scalar, _Dim, _Degree>::basisFunctionDerivatives(Scalar u, DenseIndex order) const
+  {
+    typename SplineTraits< Spline >::BasisDerivativeType der;
+    basisFunctionDerivativesImpl(*this, u, order, der);
+    return der;
+  }
+
+  template <typename _Scalar, int _Dim, int _Degree>
+  template <int DerivativeOrder>
+  typename SplineTraits< Spline<_Scalar, _Dim, _Degree>, DerivativeOrder >::BasisDerivativeType
+    Spline<_Scalar, _Dim, _Degree>::basisFunctionDerivatives(Scalar u, DenseIndex order) const
+  {
+    typename SplineTraits< Spline, DerivativeOrder >::BasisDerivativeType der;
+    basisFunctionDerivativesImpl(*this, u, order, der);
+    return der;
+  }
+}
+
+#endif // EIGEN_SPLINE_H
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/Splines/SplineFitting.h b/resources/3rdParty/eigen/unsupported/Eigen/src/Splines/SplineFitting.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/Splines/SplineFitting.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/Splines/SplineFitting.h
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/Splines/SplineFwd.h b/resources/3rdParty/eigen/unsupported/Eigen/src/Splines/SplineFwd.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/Eigen/src/Splines/SplineFwd.h
rename to resources/3rdParty/eigen/unsupported/Eigen/src/Splines/SplineFwd.h
diff --git a/resources/3rdparty/eigen/unsupported/README.txt b/resources/3rdParty/eigen/unsupported/README.txt
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/README.txt
rename to resources/3rdParty/eigen/unsupported/README.txt
diff --git a/resources/3rdparty/eigen/unsupported/doc/CMakeLists.txt b/resources/3rdParty/eigen/unsupported/doc/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/doc/CMakeLists.txt
rename to resources/3rdParty/eigen/unsupported/doc/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/unsupported/doc/Doxyfile.in b/resources/3rdParty/eigen/unsupported/doc/Doxyfile.in
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/doc/Doxyfile.in
rename to resources/3rdParty/eigen/unsupported/doc/Doxyfile.in
diff --git a/resources/3rdparty/eigen/unsupported/doc/Overview.dox b/resources/3rdParty/eigen/unsupported/doc/Overview.dox
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/doc/Overview.dox
rename to resources/3rdParty/eigen/unsupported/doc/Overview.dox
diff --git a/resources/3rdparty/eigen/unsupported/doc/examples/BVH_Example.cpp b/resources/3rdParty/eigen/unsupported/doc/examples/BVH_Example.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/doc/examples/BVH_Example.cpp
rename to resources/3rdParty/eigen/unsupported/doc/examples/BVH_Example.cpp
diff --git a/resources/3rdparty/eigen/unsupported/doc/examples/CMakeLists.txt b/resources/3rdParty/eigen/unsupported/doc/examples/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/doc/examples/CMakeLists.txt
rename to resources/3rdParty/eigen/unsupported/doc/examples/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/unsupported/doc/examples/FFT.cpp b/resources/3rdParty/eigen/unsupported/doc/examples/FFT.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/doc/examples/FFT.cpp
rename to resources/3rdParty/eigen/unsupported/doc/examples/FFT.cpp
diff --git a/resources/3rdparty/eigen/unsupported/doc/examples/MatrixExponential.cpp b/resources/3rdParty/eigen/unsupported/doc/examples/MatrixExponential.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/doc/examples/MatrixExponential.cpp
rename to resources/3rdParty/eigen/unsupported/doc/examples/MatrixExponential.cpp
diff --git a/resources/3rdparty/eigen/unsupported/doc/examples/MatrixFunction.cpp b/resources/3rdParty/eigen/unsupported/doc/examples/MatrixFunction.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/doc/examples/MatrixFunction.cpp
rename to resources/3rdParty/eigen/unsupported/doc/examples/MatrixFunction.cpp
diff --git a/resources/3rdparty/eigen/unsupported/doc/examples/MatrixLogarithm.cpp b/resources/3rdParty/eigen/unsupported/doc/examples/MatrixLogarithm.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/doc/examples/MatrixLogarithm.cpp
rename to resources/3rdParty/eigen/unsupported/doc/examples/MatrixLogarithm.cpp
diff --git a/resources/3rdparty/eigen/unsupported/doc/examples/MatrixSine.cpp b/resources/3rdParty/eigen/unsupported/doc/examples/MatrixSine.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/doc/examples/MatrixSine.cpp
rename to resources/3rdParty/eigen/unsupported/doc/examples/MatrixSine.cpp
diff --git a/resources/3rdparty/eigen/unsupported/doc/examples/MatrixSinh.cpp b/resources/3rdParty/eigen/unsupported/doc/examples/MatrixSinh.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/doc/examples/MatrixSinh.cpp
rename to resources/3rdParty/eigen/unsupported/doc/examples/MatrixSinh.cpp
diff --git a/resources/3rdparty/eigen/unsupported/doc/examples/MatrixSquareRoot.cpp b/resources/3rdParty/eigen/unsupported/doc/examples/MatrixSquareRoot.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/doc/examples/MatrixSquareRoot.cpp
rename to resources/3rdParty/eigen/unsupported/doc/examples/MatrixSquareRoot.cpp
diff --git a/resources/3rdparty/eigen/unsupported/doc/examples/PolynomialSolver1.cpp b/resources/3rdParty/eigen/unsupported/doc/examples/PolynomialSolver1.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/doc/examples/PolynomialSolver1.cpp
rename to resources/3rdParty/eigen/unsupported/doc/examples/PolynomialSolver1.cpp
diff --git a/resources/3rdparty/eigen/unsupported/doc/examples/PolynomialUtils1.cpp b/resources/3rdParty/eigen/unsupported/doc/examples/PolynomialUtils1.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/doc/examples/PolynomialUtils1.cpp
rename to resources/3rdParty/eigen/unsupported/doc/examples/PolynomialUtils1.cpp
diff --git a/resources/3rdparty/eigen/unsupported/doc/snippets/CMakeLists.txt b/resources/3rdParty/eigen/unsupported/doc/snippets/CMakeLists.txt
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/doc/snippets/CMakeLists.txt
rename to resources/3rdParty/eigen/unsupported/doc/snippets/CMakeLists.txt
diff --git a/resources/3rdparty/eigen/unsupported/test/BVH.cpp b/resources/3rdParty/eigen/unsupported/test/BVH.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/test/BVH.cpp
rename to resources/3rdParty/eigen/unsupported/test/BVH.cpp
diff --git a/resources/3rdParty/eigen/unsupported/test/CMakeLists.txt b/resources/3rdParty/eigen/unsupported/test/CMakeLists.txt
new file mode 100644
index 000000000..b34b151b1
--- /dev/null
+++ b/resources/3rdParty/eigen/unsupported/test/CMakeLists.txt
@@ -0,0 +1,87 @@
+
+include_directories(../../test ../../unsupported ../../Eigen 
+                    ${CMAKE_CURRENT_BINARY_DIR}/../../test)
+
+find_package(GoogleHash)
+if(GOOGLEHASH_FOUND)
+  add_definitions("-DEIGEN_GOOGLEHASH_SUPPORT")
+  include_directories(${GOOGLEHASH_INCLUDES})
+  ei_add_property(EIGEN_TESTED_BACKENDS  "GoogleHash, ")
+else(GOOGLEHASH_FOUND)
+  ei_add_property(EIGEN_MISSING_BACKENDS  "GoogleHash, ")
+endif(GOOGLEHASH_FOUND)
+
+find_package(Adolc)
+if(ADOLC_FOUND)
+  include_directories(${ADOLC_INCLUDES})
+  ei_add_property(EIGEN_TESTED_BACKENDS "Adolc, ")
+  ei_add_test(forward_adolc "" ${ADOLC_LIBRARIES})
+else(ADOLC_FOUND)
+  ei_add_property(EIGEN_MISSING_BACKENDS "Adolc, ")
+endif(ADOLC_FOUND)
+
+# this test seems to never have been successful on x87, so is considered to contain a FP-related bug.
+# see thread: "non-linear optimization test summary"
+#ei_add_test(NonLinearOptimization)
+
+ei_add_test(NumericalDiff)
+ei_add_test(autodiff)
+
+if (NOT CMAKE_CXX_COMPILER MATCHES "clang\\+\\+$")
+ei_add_test(BVH)
+endif()
+
+ei_add_test(matrix_exponential)
+ei_add_test(matrix_function)
+ei_add_test(matrix_square_root)
+ei_add_test(alignedvector3)
+ei_add_test(FFT)
+
+find_package(MPFR 2.3.0)
+find_package(GMP)
+if(MPFR_FOUND)
+  include_directories(${MPFR_INCLUDES} ./mpreal)
+  ei_add_property(EIGEN_TESTED_BACKENDS "MPFR C++, ")
+  set(EIGEN_MPFR_TEST_LIBRARIES ${MPFR_LIBRARIES} ${GMP_LIBRARIES})
+  ei_add_test(mpreal_support "" "${EIGEN_MPFR_TEST_LIBRARIES}" )
+else()
+  ei_add_property(EIGEN_MISSING_BACKENDS "MPFR C++, ")
+endif()
+
+ei_add_test(sparse_extra   "" "")
+
+find_package(FFTW)
+if(FFTW_FOUND)
+  ei_add_property(EIGEN_TESTED_BACKENDS "fftw, ")
+  include_directories( ${FFTW_INCLUDES} )
+  if(FFTWL_LIB)
+    ei_add_test(FFTW  "-DEIGEN_FFTW_DEFAULT -DEIGEN_HAS_FFTWL" "${FFTW_LIBRARIES}" )
+  else()
+    ei_add_test(FFTW  "-DEIGEN_FFTW_DEFAULT" "${FFTW_LIBRARIES}" )
+  endif()
+else()
+  ei_add_property(EIGEN_MISSING_BACKENDS "fftw, ")
+endif()
+
+option(EIGEN_TEST_NO_OPENGL "Disable OpenGL support in unit tests" OFF)
+if(NOT EIGEN_TEST_NO_OPENGL)
+  find_package(OpenGL)
+  find_package(GLUT)
+  find_package(GLEW)
+  if(OPENGL_FOUND AND GLUT_FOUND AND GLEW_FOUND)
+    ei_add_property(EIGEN_TESTED_BACKENDS "OpenGL, ")
+    set(EIGEN_GL_LIB ${GLUT_LIBRARIES} ${GLEW_LIBRARIES})
+    ei_add_test(openglsupport  "" "${EIGEN_GL_LIB}" )
+  else()
+    ei_add_property(EIGEN_MISSING_BACKENDS "OpenGL, ")
+  endif()
+else()
+    ei_add_property(EIGEN_MISSING_BACKENDS "OpenGL, ")
+endif()
+
+ei_add_test(polynomialsolver)
+ei_add_test(polynomialutils)
+ei_add_test(kronecker_product)
+ei_add_test(splines)
+ei_add_test(gmres)
+
diff --git a/resources/3rdparty/eigen/unsupported/test/FFT.cpp b/resources/3rdParty/eigen/unsupported/test/FFT.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/test/FFT.cpp
rename to resources/3rdParty/eigen/unsupported/test/FFT.cpp
diff --git a/resources/3rdparty/eigen/unsupported/test/FFTW.cpp b/resources/3rdParty/eigen/unsupported/test/FFTW.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/test/FFTW.cpp
rename to resources/3rdParty/eigen/unsupported/test/FFTW.cpp
diff --git a/resources/3rdparty/eigen/unsupported/test/NonLinearOptimization.cpp b/resources/3rdParty/eigen/unsupported/test/NonLinearOptimization.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/test/NonLinearOptimization.cpp
rename to resources/3rdParty/eigen/unsupported/test/NonLinearOptimization.cpp
diff --git a/resources/3rdparty/eigen/unsupported/test/NumericalDiff.cpp b/resources/3rdParty/eigen/unsupported/test/NumericalDiff.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/test/NumericalDiff.cpp
rename to resources/3rdParty/eigen/unsupported/test/NumericalDiff.cpp
diff --git a/resources/3rdparty/eigen/unsupported/test/alignedvector3.cpp b/resources/3rdParty/eigen/unsupported/test/alignedvector3.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/test/alignedvector3.cpp
rename to resources/3rdParty/eigen/unsupported/test/alignedvector3.cpp
diff --git a/resources/3rdparty/eigen/unsupported/test/autodiff.cpp b/resources/3rdParty/eigen/unsupported/test/autodiff.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/test/autodiff.cpp
rename to resources/3rdParty/eigen/unsupported/test/autodiff.cpp
diff --git a/resources/3rdparty/eigen/unsupported/test/forward_adolc.cpp b/resources/3rdParty/eigen/unsupported/test/forward_adolc.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/test/forward_adolc.cpp
rename to resources/3rdParty/eigen/unsupported/test/forward_adolc.cpp
diff --git a/resources/3rdparty/eigen/unsupported/test/gmres.cpp b/resources/3rdParty/eigen/unsupported/test/gmres.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/test/gmres.cpp
rename to resources/3rdParty/eigen/unsupported/test/gmres.cpp
diff --git a/resources/3rdparty/eigen/unsupported/test/kronecker_product.cpp b/resources/3rdParty/eigen/unsupported/test/kronecker_product.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/test/kronecker_product.cpp
rename to resources/3rdParty/eigen/unsupported/test/kronecker_product.cpp
diff --git a/resources/3rdParty/eigen/unsupported/test/matrix_exponential.cpp b/resources/3rdParty/eigen/unsupported/test/matrix_exponential.cpp
new file mode 100644
index 000000000..695472f91
--- /dev/null
+++ b/resources/3rdParty/eigen/unsupported/test/matrix_exponential.cpp
@@ -0,0 +1,149 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include <unsupported/Eigen/MatrixFunctions>
+
+double binom(int n, int k)
+{
+  double res = 1;
+  for (int i=0; i<k; i++)
+    res = res * (n-k+i+1) / (i+1);
+  return res;
+}
+
+template <typename Derived, typename OtherDerived>
+double relerr(const MatrixBase<Derived>& A, const MatrixBase<OtherDerived>& B)
+{
+  return std::sqrt((A - B).cwiseAbs2().sum() / (std::min)(A.cwiseAbs2().sum(), B.cwiseAbs2().sum()));
+}
+
+template <typename T>
+T expfn(T x, int)
+{
+  return std::exp(x);
+}
+
+template <typename T>
+void test2dRotation(double tol)
+{
+  Matrix<T,2,2> A, B, C;
+  T angle;
+
+  A << 0, 1, -1, 0;
+  for (int i=0; i<=20; i++)
+  {
+    angle = static_cast<T>(pow(10, i / 5. - 2));
+    B << std::cos(angle), std::sin(angle), -std::sin(angle), std::cos(angle);
+
+    C = (angle*A).matrixFunction(expfn);
+    std::cout << "test2dRotation: i = " << i << "   error funm = " << relerr(C, B);
+    VERIFY(C.isApprox(B, static_cast<T>(tol)));
+
+    C = (angle*A).exp();
+    std::cout << "   error expm = " << relerr(C, B) << "\n";
+    VERIFY(C.isApprox(B, static_cast<T>(tol)));
+  }
+}
+
+template <typename T>
+void test2dHyperbolicRotation(double tol)
+{
+  Matrix<std::complex<T>,2,2> A, B, C;
+  std::complex<T> imagUnit(0,1);
+  T angle, ch, sh;
+
+  for (int i=0; i<=20; i++)
+  {
+    angle = static_cast<T>((i-10) / 2.0);
+    ch = std::cosh(angle);
+    sh = std::sinh(angle);
+    A << 0, angle*imagUnit, -angle*imagUnit, 0;
+    B << ch, sh*imagUnit, -sh*imagUnit, ch;
+
+    C = A.matrixFunction(expfn);
+    std::cout << "test2dHyperbolicRotation: i = " << i << "   error funm = " << relerr(C, B);
+    VERIFY(C.isApprox(B, static_cast<T>(tol)));
+
+    C = A.exp();
+    std::cout << "   error expm = " << relerr(C, B) << "\n";
+    VERIFY(C.isApprox(B, static_cast<T>(tol)));
+  }
+}
+
+template <typename T>
+void testPascal(double tol)
+{
+  for (int size=1; size<20; size++)
+  {
+    Matrix<T,Dynamic,Dynamic> A(size,size), B(size,size), C(size,size);
+    A.setZero();
+    for (int i=0; i<size-1; i++)
+      A(i+1,i) = static_cast<T>(i+1);
+    B.setZero();
+    for (int i=0; i<size; i++)
+      for (int j=0; j<=i; j++)
+    B(i,j) = static_cast<T>(binom(i,j));
+
+    C = A.matrixFunction(expfn);
+    std::cout << "testPascal: size = " << size << "   error funm = " << relerr(C, B);
+    VERIFY(C.isApprox(B, static_cast<T>(tol)));
+
+    C = A.exp();
+    std::cout << "   error expm = " << relerr(C, B) << "\n";
+    VERIFY(C.isApprox(B, static_cast<T>(tol)));
+  }
+}
+
+template<typename MatrixType>
+void randomTest(const MatrixType& m, double tol)
+{
+  /* this test covers the following files:
+     Inverse.h
+  */
+  typename MatrixType::Index rows = m.rows();
+  typename MatrixType::Index cols = m.cols();
+  MatrixType m1(rows, cols), m2(rows, cols), m3(rows, cols),
+             identity = MatrixType::Identity(rows, rows);
+
+  typedef typename NumTraits<typename internal::traits<MatrixType>::Scalar>::Real RealScalar;
+
+  for(int i = 0; i < g_repeat; i++) {
+    m1 = MatrixType::Random(rows, cols);
+
+    m2 = m1.matrixFunction(expfn) * (-m1).matrixFunction(expfn);
+    std::cout << "randomTest: error funm = " << relerr(identity, m2);
+    VERIFY(identity.isApprox(m2, static_cast<RealScalar>(tol)));
+
+    m2 = m1.exp() * (-m1).exp();
+    std::cout << "   error expm = " << relerr(identity, m2) << "\n";
+    VERIFY(identity.isApprox(m2, static_cast<RealScalar>(tol)));
+  }
+}
+
+void test_matrix_exponential()
+{
+  CALL_SUBTEST_2(test2dRotation<double>(1e-13));
+  CALL_SUBTEST_1(test2dRotation<float>(2e-5));  // was 1e-5, relaxed for clang 2.8 / linux / x86-64
+  CALL_SUBTEST_8(test2dRotation<long double>(1e-13)); 
+  CALL_SUBTEST_2(test2dHyperbolicRotation<double>(1e-14));
+  CALL_SUBTEST_1(test2dHyperbolicRotation<float>(1e-5));
+  CALL_SUBTEST_8(test2dHyperbolicRotation<long double>(1e-14));
+  CALL_SUBTEST_6(testPascal<float>(1e-6));
+  CALL_SUBTEST_5(testPascal<double>(1e-15));
+  CALL_SUBTEST_2(randomTest(Matrix2d(), 1e-13));
+  CALL_SUBTEST_7(randomTest(Matrix<double,3,3,RowMajor>(), 1e-13));
+  CALL_SUBTEST_3(randomTest(Matrix4cd(), 1e-13));
+  CALL_SUBTEST_4(randomTest(MatrixXd(8,8), 1e-13));
+  CALL_SUBTEST_1(randomTest(Matrix2f(), 1e-4));
+  CALL_SUBTEST_5(randomTest(Matrix3cf(), 1e-4));
+  CALL_SUBTEST_1(randomTest(Matrix4f(), 1e-4));
+  CALL_SUBTEST_6(randomTest(MatrixXf(8,8), 1e-4));
+  CALL_SUBTEST_9(randomTest(Matrix<long double,Dynamic,Dynamic>(7,7), 1e-13));
+}
diff --git a/resources/3rdparty/eigen/unsupported/test/matrix_function.cpp b/resources/3rdParty/eigen/unsupported/test/matrix_function.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/test/matrix_function.cpp
rename to resources/3rdParty/eigen/unsupported/test/matrix_function.cpp
diff --git a/resources/3rdParty/eigen/unsupported/test/matrix_square_root.cpp b/resources/3rdParty/eigen/unsupported/test/matrix_square_root.cpp
new file mode 100644
index 000000000..508619a7a
--- /dev/null
+++ b/resources/3rdParty/eigen/unsupported/test/matrix_square_root.cpp
@@ -0,0 +1,62 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include <unsupported/Eigen/MatrixFunctions>
+
+template <typename MatrixType, int IsComplex = NumTraits<typename internal::traits<MatrixType>::Scalar>::IsComplex>
+struct generateTestMatrix;
+
+// for real matrices, make sure none of the eigenvalues are negative
+template <typename MatrixType>
+struct generateTestMatrix<MatrixType,0>
+{
+  static void run(MatrixType& result, typename MatrixType::Index size)
+  {
+    MatrixType mat = MatrixType::Random(size, size);
+    EigenSolver<MatrixType> es(mat);
+    typename EigenSolver<MatrixType>::EigenvalueType eivals = es.eigenvalues();
+    for (typename MatrixType::Index i = 0; i < size; ++i) {
+      if (eivals(i).imag() == 0 && eivals(i).real() < 0)
+	eivals(i) = -eivals(i);
+    }
+    result = (es.eigenvectors() * eivals.asDiagonal() * es.eigenvectors().inverse()).real();
+  }
+};
+
+// for complex matrices, any matrix is fine
+template <typename MatrixType>
+struct generateTestMatrix<MatrixType,1>
+{
+  static void run(MatrixType& result, typename MatrixType::Index size)
+  {
+    result = MatrixType::Random(size, size);
+  }
+};
+
+template<typename MatrixType>
+void testMatrixSqrt(const MatrixType& m)
+{
+  MatrixType A;
+  generateTestMatrix<MatrixType>::run(A, m.rows());
+  MatrixType sqrtA = A.sqrt();
+  VERIFY_IS_APPROX(sqrtA * sqrtA, A);
+}
+
+void test_matrix_square_root()
+{
+  for (int i = 0; i < g_repeat; i++) {
+    CALL_SUBTEST_1(testMatrixSqrt(Matrix3cf()));
+    CALL_SUBTEST_2(testMatrixSqrt(MatrixXcd(12,12)));
+    CALL_SUBTEST_3(testMatrixSqrt(Matrix4f()));
+    CALL_SUBTEST_4(testMatrixSqrt(Matrix<double,Dynamic,Dynamic,RowMajor>(9, 9)));
+    CALL_SUBTEST_5(testMatrixSqrt(Matrix<float,1,1>()));
+    CALL_SUBTEST_5(testMatrixSqrt(Matrix<std::complex<float>,1,1>()));
+  }
+}
diff --git a/resources/3rdparty/eigen/unsupported/test/mpreal/dlmalloc.c b/resources/3rdParty/eigen/unsupported/test/mpreal/dlmalloc.c
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/test/mpreal/dlmalloc.c
rename to resources/3rdParty/eigen/unsupported/test/mpreal/dlmalloc.c
diff --git a/resources/3rdparty/eigen/unsupported/test/mpreal/dlmalloc.h b/resources/3rdParty/eigen/unsupported/test/mpreal/dlmalloc.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/test/mpreal/dlmalloc.h
rename to resources/3rdParty/eigen/unsupported/test/mpreal/dlmalloc.h
diff --git a/resources/3rdparty/eigen/unsupported/test/mpreal/mpreal.cpp b/resources/3rdParty/eigen/unsupported/test/mpreal/mpreal.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/test/mpreal/mpreal.cpp
rename to resources/3rdParty/eigen/unsupported/test/mpreal/mpreal.cpp
diff --git a/resources/3rdparty/eigen/unsupported/test/mpreal/mpreal.h b/resources/3rdParty/eigen/unsupported/test/mpreal/mpreal.h
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/test/mpreal/mpreal.h
rename to resources/3rdParty/eigen/unsupported/test/mpreal/mpreal.h
diff --git a/resources/3rdparty/eigen/unsupported/test/mpreal_support.cpp b/resources/3rdParty/eigen/unsupported/test/mpreal_support.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/test/mpreal_support.cpp
rename to resources/3rdParty/eigen/unsupported/test/mpreal_support.cpp
diff --git a/resources/3rdparty/eigen/unsupported/test/openglsupport.cpp b/resources/3rdParty/eigen/unsupported/test/openglsupport.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/test/openglsupport.cpp
rename to resources/3rdParty/eigen/unsupported/test/openglsupport.cpp
diff --git a/resources/3rdparty/eigen/unsupported/test/polynomialsolver.cpp b/resources/3rdParty/eigen/unsupported/test/polynomialsolver.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/test/polynomialsolver.cpp
rename to resources/3rdParty/eigen/unsupported/test/polynomialsolver.cpp
diff --git a/resources/3rdparty/eigen/unsupported/test/polynomialutils.cpp b/resources/3rdParty/eigen/unsupported/test/polynomialutils.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/test/polynomialutils.cpp
rename to resources/3rdParty/eigen/unsupported/test/polynomialutils.cpp
diff --git a/resources/3rdparty/eigen/unsupported/test/sparse_extra.cpp b/resources/3rdParty/eigen/unsupported/test/sparse_extra.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/test/sparse_extra.cpp
rename to resources/3rdParty/eigen/unsupported/test/sparse_extra.cpp
diff --git a/resources/3rdparty/eigen/unsupported/test/splines.cpp b/resources/3rdParty/eigen/unsupported/test/splines.cpp
similarity index 100%
rename from resources/3rdparty/eigen/unsupported/test/splines.cpp
rename to resources/3rdParty/eigen/unsupported/test/splines.cpp
diff --git a/resources/3rdparty/eigen/.hg_archival.txt b/resources/3rdparty/eigen/.hg_archival.txt
deleted file mode 100644
index ed93eed54..000000000
--- a/resources/3rdparty/eigen/.hg_archival.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-repo: 8a21fd850624c931e448cbcfb38168cb2717c790
-node: 5945cb388ded120eb6dd3a1dfd2766b8e83237a4
-branch: default
-latesttag: 3.1.0-rc2
-latesttagdistance: 147
diff --git a/resources/3rdparty/eigen/.hgtags b/resources/3rdparty/eigen/.hgtags
deleted file mode 100644
index cbbcebdae..000000000
--- a/resources/3rdparty/eigen/.hgtags
+++ /dev/null
@@ -1,22 +0,0 @@
-2db9468678c6480c9633b6272ff0e3599d1e11a3 2.0-beta3
-375224817dce669b6fa31d920d4c895a63fabf32 2.0-beta1
-3b8120f077865e2a072e10f5be33e1d942b83a06 2.0-rc1
-19dfc0e7666bcee26f7a49eb42f39a0280a3485e 2.0-beta5
-7a7d8a9526f003ffa2430dfb0c2c535b5add3023 2.0-beta4
-7d14ad088ac23769c349518762704f0257f6a39b 2.0.1
-b9d48561579fd7d4c05b2aa42235dc9de6484bf2 2.0-beta6
-e17630a40408243cb1a51ad0fe3a99beb75b7450 before-hg-migration
-eda654d4cda2210ce80719addcf854773e6dec5a 2.0.0
-ee9a7c468a9e73fab12f38f02bac24b07f29ed71 2.0-beta2
-d49097c25d8049e730c254a2fed725a240ce4858 after-hg-migration
-655348878731bcb5d9bbe0854077b052e75e5237 actual-start-from-scratch
-12a658962d4e6dfdc9a1c350fe7b69e36e70675c 3.0-beta1
-5c4180ad827b3f869b13b1d82f5a6ce617d6fcee 3.0-beta2
-7ae24ca6f3891d5ac58ddc7db60ad413c8d6ec35 3.0-beta3
-c40708b9088d622567fecc9208ad4a426621d364 3.0-beta4
-b6456624eae74f49ae8683d8e7b2882a2ca0342a 3.0-rc1
-a810d5dbab47acfe65b3350236efdd98f67d4d8a 3.1.0-alpha1
-304c88ca3affc16dd0b008b1104873986edd77af 3.1.0-alpha2
-920fc730b5930daae0a6dbe296d60ce2e3808215 3.1.0-beta1
-8383e883ebcc6f14695ff0b5e20bb631abab43fb 3.1.0-rc1
-bf4cb8c934fa3a79f45f1e629610f0225e93e493 3.1.0-rc2
diff --git a/resources/3rdparty/eigen/Eigen/Core b/resources/3rdparty/eigen/Eigen/Core
deleted file mode 100644
index 502a4fc55..000000000
--- a/resources/3rdparty/eigen/Eigen/Core
+++ /dev/null
@@ -1,380 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2007-2011 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_CORE_H
-#define EIGEN_CORE_H
-
-// first thing Eigen does: stop the compiler from committing suicide
-#include "src/Core/util/DisableStupidWarnings.h"
-
-// then include this file where all our macros are defined. It's really important to do it first because
-// it's where we do all the alignment settings (platform detection and honoring the user's will if he
-// defined e.g. EIGEN_DONT_ALIGN) so it needs to be done before we do anything with vectorization.
-#include "src/Core/util/Macros.h"
-
-#include <complex>
-
-// this include file manages BLAS and MKL related macros
-// and inclusion of their respective header files
-#include "src/Core/util/MKL_support.h"
-
-// if alignment is disabled, then disable vectorization. Note: EIGEN_ALIGN is the proper check, it takes into
-// account both the user's will (EIGEN_DONT_ALIGN) and our own platform checks
-#if !EIGEN_ALIGN
-  #ifndef EIGEN_DONT_VECTORIZE
-    #define EIGEN_DONT_VECTORIZE
-  #endif
-#endif
-
-#ifdef _MSC_VER
-  #include <malloc.h> // for _aligned_malloc -- need it regardless of whether vectorization is enabled
-  #if (_MSC_VER >= 1500) // 2008 or later
-    // Remember that usage of defined() in a #define is undefined by the standard.
-    // a user reported that in 64-bit mode, MSVC doesn't care to define _M_IX86_FP.
-    #if (defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) || defined(_M_X64)
-      #define EIGEN_SSE2_ON_MSVC_2008_OR_LATER
-    #endif
-  #endif
-#else
-  // Remember that usage of defined() in a #define is undefined by the standard
-  #if (defined __SSE2__) && ( (!defined __GNUC__) || EIGEN_GNUC_AT_LEAST(4,2) )
-    #define EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC
-  #endif
-#endif
-
-#ifndef EIGEN_DONT_VECTORIZE
-
-  #if defined (EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC) || defined(EIGEN_SSE2_ON_MSVC_2008_OR_LATER)
-
-    // Defines symbols for compile-time detection of which instructions are
-    // used.
-    // EIGEN_VECTORIZE_YY is defined if and only if the instruction set YY is used
-    #define EIGEN_VECTORIZE
-    #define EIGEN_VECTORIZE_SSE
-    #define EIGEN_VECTORIZE_SSE2
-
-    // Detect sse3/ssse3/sse4:
-    // gcc and icc defines __SSE3__, ...
-    // there is no way to know about this on msvc. You can define EIGEN_VECTORIZE_SSE* if you
-    // want to force the use of those instructions with msvc.
-    #ifdef __SSE3__
-      #define EIGEN_VECTORIZE_SSE3
-    #endif
-    #ifdef __SSSE3__
-      #define EIGEN_VECTORIZE_SSSE3
-    #endif
-    #ifdef __SSE4_1__
-      #define EIGEN_VECTORIZE_SSE4_1
-    #endif
-    #ifdef __SSE4_2__
-      #define EIGEN_VECTORIZE_SSE4_2
-    #endif
-
-    // include files
-
-    // This extern "C" works around a MINGW-w64 compilation issue
-    // https://sourceforge.net/tracker/index.php?func=detail&aid=3018394&group_id=202880&atid=983354
-    // In essence, intrin.h is included by windows.h and also declares intrinsics (just as emmintrin.h etc. below do).
-    // However, intrin.h uses an extern "C" declaration, and g++ thus complains of duplicate declarations
-    // with conflicting linkage.  The linkage for intrinsics doesn't matter, but at that stage the compiler doesn't know;
-    // so, to avoid compile errors when windows.h is included after Eigen/Core, ensure intrinsics are extern "C" here too.
-    // notice that since these are C headers, the extern "C" is theoretically needed anyways.
-    extern "C" {
-      // In theory we should only include immintrin.h and not the other *mmintrin.h header files directly.
-      // Doing so triggers some issues with ICC. However old gcc versions seems to not have this file, thus:
-      #ifdef __INTEL_COMPILER
-        #include <immintrin.h>
-      #else
-        #include <emmintrin.h>
-        #include <xmmintrin.h>
-        #ifdef  EIGEN_VECTORIZE_SSE3
-        #include <pmmintrin.h>
-        #endif
-        #ifdef EIGEN_VECTORIZE_SSSE3
-        #include <tmmintrin.h>
-        #endif
-        #ifdef EIGEN_VECTORIZE_SSE4_1
-        #include <smmintrin.h>
-        #endif
-        #ifdef EIGEN_VECTORIZE_SSE4_2
-        #include <nmmintrin.h>
-        #endif
-      #endif
-    } // end extern "C"
-  #elif defined __ALTIVEC__
-    #define EIGEN_VECTORIZE
-    #define EIGEN_VECTORIZE_ALTIVEC
-    #include <altivec.h>
-    // We need to #undef all these ugly tokens defined in <altivec.h>
-    // => use __vector instead of vector
-    #undef bool
-    #undef vector
-    #undef pixel
-  #elif defined  __ARM_NEON__
-    #define EIGEN_VECTORIZE
-    #define EIGEN_VECTORIZE_NEON
-    #include <arm_neon.h>
-  #endif
-#endif
-
-#if (defined _OPENMP) && (!defined EIGEN_DONT_PARALLELIZE)
-  #define EIGEN_HAS_OPENMP
-#endif
-
-#ifdef EIGEN_HAS_OPENMP
-#include <omp.h>
-#endif
-
-// MSVC for windows mobile does not have the errno.h file
-#if !(defined(_MSC_VER) && defined(_WIN32_WCE)) && !defined(__ARMCC_VERSION)
-#define EIGEN_HAS_ERRNO
-#endif
-
-#ifdef EIGEN_HAS_ERRNO
-#include <cerrno>
-#endif
-#include <cstddef>
-#include <cstdlib>
-#include <cmath>
-#include <cassert>
-#include <functional>
-#include <iosfwd>
-#include <cstring>
-#include <string>
-#include <limits>
-#include <climits> // for CHAR_BIT
-// for min/max:
-#include <algorithm>
-
-// for outputting debug info
-#ifdef EIGEN_DEBUG_ASSIGN
-#include <iostream>
-#endif
-
-// required for __cpuid, needs to be included after cmath
-#if defined(_MSC_VER) && (defined(_M_IX86)||defined(_M_X64))
-  #include <intrin.h>
-#endif
-
-#if defined(_CPPUNWIND) || defined(__EXCEPTIONS)
-  #define EIGEN_EXCEPTIONS
-#endif
-
-#ifdef EIGEN_EXCEPTIONS
-  #include <new>
-#endif
-
-/** \brief Namespace containing all symbols from the %Eigen library. */
-namespace Eigen {
-
-inline static const char *SimdInstructionSetsInUse(void) {
-#if defined(EIGEN_VECTORIZE_SSE4_2)
-  return "SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2";
-#elif defined(EIGEN_VECTORIZE_SSE4_1)
-  return "SSE, SSE2, SSE3, SSSE3, SSE4.1";
-#elif defined(EIGEN_VECTORIZE_SSSE3)
-  return "SSE, SSE2, SSE3, SSSE3";
-#elif defined(EIGEN_VECTORIZE_SSE3)
-  return "SSE, SSE2, SSE3";
-#elif defined(EIGEN_VECTORIZE_SSE2)
-  return "SSE, SSE2";
-#elif defined(EIGEN_VECTORIZE_ALTIVEC)
-  return "AltiVec";
-#elif defined(EIGEN_VECTORIZE_NEON)
-  return "ARM NEON";
-#else
-  return "None";
-#endif
-}
-
-} // end namespace Eigen
-
-#define STAGE10_FULL_EIGEN2_API             10
-#define STAGE20_RESOLVE_API_CONFLICTS       20
-#define STAGE30_FULL_EIGEN3_API             30
-#define STAGE40_FULL_EIGEN3_STRICTNESS      40
-#define STAGE99_NO_EIGEN2_SUPPORT           99
-
-#if   defined EIGEN2_SUPPORT_STAGE40_FULL_EIGEN3_STRICTNESS
-  #define EIGEN2_SUPPORT
-  #define EIGEN2_SUPPORT_STAGE STAGE40_FULL_EIGEN3_STRICTNESS
-#elif defined EIGEN2_SUPPORT_STAGE30_FULL_EIGEN3_API
-  #define EIGEN2_SUPPORT
-  #define EIGEN2_SUPPORT_STAGE STAGE30_FULL_EIGEN3_API
-#elif defined EIGEN2_SUPPORT_STAGE20_RESOLVE_API_CONFLICTS
-  #define EIGEN2_SUPPORT
-  #define EIGEN2_SUPPORT_STAGE STAGE20_RESOLVE_API_CONFLICTS
-#elif defined EIGEN2_SUPPORT_STAGE10_FULL_EIGEN2_API
-  #define EIGEN2_SUPPORT
-  #define EIGEN2_SUPPORT_STAGE STAGE10_FULL_EIGEN2_API
-#elif defined EIGEN2_SUPPORT
-  // default to stage 3, that's what it's always meant
-  #define EIGEN2_SUPPORT_STAGE30_FULL_EIGEN3_API
-  #define EIGEN2_SUPPORT_STAGE STAGE30_FULL_EIGEN3_API
-#else
-  #define EIGEN2_SUPPORT_STAGE STAGE99_NO_EIGEN2_SUPPORT
-#endif
-
-#ifdef EIGEN2_SUPPORT
-#undef minor
-#endif
-
-// we use size_t frequently and we'll never remember to prepend it with std:: everytime just to
-// ensure QNX/QCC support
-using std::size_t;
-// gcc 4.6.0 wants std:: for ptrdiff_t 
-using std::ptrdiff_t;
-
-/** \defgroup Core_Module Core module
-  * This is the main module of Eigen providing dense matrix and vector support
-  * (both fixed and dynamic size) with all the features corresponding to a BLAS library
-  * and much more...
-  *
-  * \code
-  * #include <Eigen/Core>
-  * \endcode
-  */
-
-/** \defgroup Support_modules Support modules [category]
-  * Category of modules which add support for external libraries.
-  */
-
-#include "src/Core/util/Constants.h"
-#include "src/Core/util/ForwardDeclarations.h"
-#include "src/Core/util/Meta.h"
-#include "src/Core/util/XprHelper.h"
-#include "src/Core/util/StaticAssert.h"
-#include "src/Core/util/Memory.h"
-
-#include "src/Core/NumTraits.h"
-#include "src/Core/MathFunctions.h"
-#include "src/Core/GenericPacketMath.h"
-
-#if defined EIGEN_VECTORIZE_SSE
-  #include "src/Core/arch/SSE/PacketMath.h"
-  #include "src/Core/arch/SSE/MathFunctions.h"
-  #include "src/Core/arch/SSE/Complex.h"
-#elif defined EIGEN_VECTORIZE_ALTIVEC
-  #include "src/Core/arch/AltiVec/PacketMath.h"
-  #include "src/Core/arch/AltiVec/Complex.h"
-#elif defined EIGEN_VECTORIZE_NEON
-  #include "src/Core/arch/NEON/PacketMath.h"
-  #include "src/Core/arch/NEON/Complex.h"
-#endif
-
-#include "src/Core/arch/Default/Settings.h"
-
-#include "src/Core/Functors.h"
-#include "src/Core/DenseCoeffsBase.h"
-#include "src/Core/DenseBase.h"
-#include "src/Core/MatrixBase.h"
-#include "src/Core/EigenBase.h"
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN // work around Doxygen bug triggered by Assign.h r814874
-                                // at least confirmed with Doxygen 1.5.5 and 1.5.6
-  #include "src/Core/Assign.h"
-#endif
-
-#include "src/Core/util/BlasUtil.h"
-#include "src/Core/DenseStorage.h"
-#include "src/Core/NestByValue.h"
-#include "src/Core/ForceAlignedAccess.h"
-#include "src/Core/ReturnByValue.h"
-#include "src/Core/NoAlias.h"
-#include "src/Core/PlainObjectBase.h"
-#include "src/Core/Matrix.h"
-#include "src/Core/Array.h"
-#include "src/Core/CwiseBinaryOp.h"
-#include "src/Core/CwiseUnaryOp.h"
-#include "src/Core/CwiseNullaryOp.h"
-#include "src/Core/CwiseUnaryView.h"
-#include "src/Core/SelfCwiseBinaryOp.h"
-#include "src/Core/Dot.h"
-#include "src/Core/StableNorm.h"
-#include "src/Core/MapBase.h"
-#include "src/Core/Stride.h"
-#include "src/Core/Map.h"
-#include "src/Core/Block.h"
-#include "src/Core/VectorBlock.h"
-#include "src/Core/Ref.h"
-#include "src/Core/Transpose.h"
-#include "src/Core/DiagonalMatrix.h"
-#include "src/Core/Diagonal.h"
-#include "src/Core/DiagonalProduct.h"
-#include "src/Core/PermutationMatrix.h"
-#include "src/Core/Transpositions.h"
-#include "src/Core/Redux.h"
-#include "src/Core/Visitor.h"
-#include "src/Core/Fuzzy.h"
-#include "src/Core/IO.h"
-#include "src/Core/Swap.h"
-#include "src/Core/CommaInitializer.h"
-#include "src/Core/Flagged.h"
-#include "src/Core/ProductBase.h"
-#include "src/Core/GeneralProduct.h"
-#include "src/Core/TriangularMatrix.h"
-#include "src/Core/SelfAdjointView.h"
-#include "src/Core/products/GeneralBlockPanelKernel.h"
-#include "src/Core/products/Parallelizer.h"
-#include "src/Core/products/CoeffBasedProduct.h"
-#include "src/Core/products/GeneralMatrixVector.h"
-#include "src/Core/products/GeneralMatrixMatrix.h"
-#include "src/Core/SolveTriangular.h"
-#include "src/Core/products/GeneralMatrixMatrixTriangular.h"
-#include "src/Core/products/SelfadjointMatrixVector.h"
-#include "src/Core/products/SelfadjointMatrixMatrix.h"
-#include "src/Core/products/SelfadjointProduct.h"
-#include "src/Core/products/SelfadjointRank2Update.h"
-#include "src/Core/products/TriangularMatrixVector.h"
-#include "src/Core/products/TriangularMatrixMatrix.h"
-#include "src/Core/products/TriangularSolverMatrix.h"
-#include "src/Core/products/TriangularSolverVector.h"
-#include "src/Core/BandMatrix.h"
-
-#include "src/Core/BooleanRedux.h"
-#include "src/Core/Select.h"
-#include "src/Core/VectorwiseOp.h"
-#include "src/Core/Random.h"
-#include "src/Core/Replicate.h"
-#include "src/Core/Reverse.h"
-#include "src/Core/ArrayBase.h"
-#include "src/Core/ArrayWrapper.h"
-
-#ifdef EIGEN_ENABLE_EVALUATORS
-#include "src/Core/Product.h"
-#include "src/Core/CoreEvaluators.h"
-#include "src/Core/AssignEvaluator.h"
-#include "src/Core/ProductEvaluators.h"
-#endif
-
-#ifdef EIGEN_USE_BLAS
-#include "src/Core/products/GeneralMatrixMatrix_MKL.h"
-#include "src/Core/products/GeneralMatrixVector_MKL.h"
-#include "src/Core/products/GeneralMatrixMatrixTriangular_MKL.h"
-#include "src/Core/products/SelfadjointMatrixMatrix_MKL.h"
-#include "src/Core/products/SelfadjointMatrixVector_MKL.h"
-#include "src/Core/products/TriangularMatrixMatrix_MKL.h"
-#include "src/Core/products/TriangularMatrixVector_MKL.h"
-#include "src/Core/products/TriangularSolverMatrix_MKL.h"
-#endif // EIGEN_USE_BLAS
-
-#ifdef EIGEN_USE_MKL_VML
-#include "src/Core/Assign_MKL.h"
-#endif
-
-#include "src/Core/GlobalFunctions.h"
-
-#include "src/Core/util/ReenableStupidWarnings.h"
-
-#ifdef EIGEN2_SUPPORT
-#include "Eigen2Support"
-#endif
-
-#endif // EIGEN_CORE_H
diff --git a/resources/3rdparty/eigen/Eigen/Eigenvalues b/resources/3rdparty/eigen/Eigen/Eigenvalues
deleted file mode 100644
index 53c5a73a2..000000000
--- a/resources/3rdparty/eigen/Eigen/Eigenvalues
+++ /dev/null
@@ -1,48 +0,0 @@
-#ifndef EIGEN_EIGENVALUES_MODULE_H
-#define EIGEN_EIGENVALUES_MODULE_H
-
-#include "Core"
-
-#include "src/Core/util/DisableStupidWarnings.h"
-
-#include "Cholesky"
-#include "Jacobi"
-#include "Householder"
-#include "LU"
-#include "Geometry"
-
-/** \defgroup Eigenvalues_Module Eigenvalues module
-  *
-  *
-  *
-  * This module mainly provides various eigenvalue solvers.
-  * This module also provides some MatrixBase methods, including:
-  *  - MatrixBase::eigenvalues(),
-  *  - MatrixBase::operatorNorm()
-  *
-  * \code
-  * #include <Eigen/Eigenvalues>
-  * \endcode
-  */
-
-#include "src/Eigenvalues/Tridiagonalization.h"
-#include "src/Eigenvalues/RealSchur.h"
-#include "src/Eigenvalues/EigenSolver.h"
-#include "src/Eigenvalues/SelfAdjointEigenSolver.h"
-#include "src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h"
-#include "src/Eigenvalues/HessenbergDecomposition.h"
-#include "src/Eigenvalues/ComplexSchur.h"
-#include "src/Eigenvalues/ComplexEigenSolver.h"
-#include "src/Eigenvalues/RealQZ.h"
-#include "src/Eigenvalues/GeneralizedEigenSolver.h"
-#include "src/Eigenvalues/MatrixBaseEigenvalues.h"
-#ifdef EIGEN_USE_LAPACKE
-#include "src/Eigenvalues/RealSchur_MKL.h"
-#include "src/Eigenvalues/ComplexSchur_MKL.h"
-#include "src/Eigenvalues/SelfAdjointEigenSolver_MKL.h"
-#endif
-
-#include "src/Core/util/ReenableStupidWarnings.h"
-
-#endif // EIGEN_EIGENVALUES_MODULE_H
-/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/resources/3rdparty/eigen/Eigen/OrderingMethods b/resources/3rdparty/eigen/Eigen/OrderingMethods
deleted file mode 100644
index bb43220e8..000000000
--- a/resources/3rdparty/eigen/Eigen/OrderingMethods
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef EIGEN_ORDERINGMETHODS_MODULE_H
-#define EIGEN_ORDERINGMETHODS_MODULE_H
-
-#include "SparseCore"
-
-#include "src/Core/util/DisableStupidWarnings.h"
-
-/** \ingroup Sparse_modules
-  * \defgroup OrderingMethods_Module OrderingMethods module
-  *
-  * This module is currently for internal use only.
-  *
-  *
-  * \code
-  * #include <Eigen/OrderingMethods>
-  * \endcode
-  */
-
-#include "src/OrderingMethods/Amd.h"
-#include "src/OrderingMethods/Ordering.h"
-#include "src/Core/util/ReenableStupidWarnings.h"
-
-#endif // EIGEN_ORDERINGMETHODS_MODULE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Cholesky/LDLT.h b/resources/3rdparty/eigen/Eigen/src/Cholesky/LDLT.h
deleted file mode 100644
index a73a9c19f..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Cholesky/LDLT.h
+++ /dev/null
@@ -1,599 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2009 Keir Mierle <mierle@gmail.com>
-// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2011 Timothy E. Holy <tim.holy@gmail.com >
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_LDLT_H
-#define EIGEN_LDLT_H
-
-namespace Eigen { 
-
-namespace internal {
-template<typename MatrixType, int UpLo> struct LDLT_Traits;
-}
-
-/** \ingroup Cholesky_Module
-  *
-  * \class LDLT
-  *
-  * \brief Robust Cholesky decomposition of a matrix with pivoting
-  *
-  * \param MatrixType the type of the matrix of which to compute the LDL^T Cholesky decomposition
-  * \param UpLo the triangular part that will be used for the decompositon: Lower (default) or Upper.
-  *             The other triangular part won't be read.
-  *
-  * Perform a robust Cholesky decomposition of a positive semidefinite or negative semidefinite
-  * matrix \f$ A \f$ such that \f$ A =  P^TLDL^*P \f$, where P is a permutation matrix, L
-  * is lower triangular with a unit diagonal and D is a diagonal matrix.
-  *
-  * The decomposition uses pivoting to ensure stability, so that L will have
-  * zeros in the bottom right rank(A) - n submatrix. Avoiding the square root
-  * on D also stabilizes the computation.
-  *
-  * Remember that Cholesky decompositions are not rank-revealing. Also, do not use a Cholesky
-  * decomposition to determine whether a system of equations has a solution.
-  *
-  * \sa MatrixBase::ldlt(), class LLT
-  */
-template<typename _MatrixType, int _UpLo> class LDLT
-{
-  public:
-    typedef _MatrixType MatrixType;
-    enum {
-      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
-      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
-      Options = MatrixType::Options & ~RowMajorBit, // these are the options for the TmpMatrixType, we need a ColMajor matrix here!
-      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
-      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
-      UpLo = _UpLo
-    };
-    typedef typename MatrixType::Scalar Scalar;
-    typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
-    typedef typename MatrixType::Index Index;
-    typedef Matrix<Scalar, RowsAtCompileTime, 1, Options, MaxRowsAtCompileTime, 1> TmpMatrixType;
-
-    typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType;
-    typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationType;
-
-    typedef internal::LDLT_Traits<MatrixType,UpLo> Traits;
-
-    /** \brief Default Constructor.
-      *
-      * The default constructor is useful in cases in which the user intends to
-      * perform decompositions via LDLT::compute(const MatrixType&).
-      */
-    LDLT() : m_matrix(), m_transpositions(), m_isInitialized(false) {}
-
-    /** \brief Default Constructor with memory preallocation
-      *
-      * Like the default constructor but with preallocation of the internal data
-      * according to the specified problem \a size.
-      * \sa LDLT()
-      */
-    LDLT(Index size)
-      : m_matrix(size, size),
-        m_transpositions(size),
-        m_temporary(size),
-        m_isInitialized(false)
-    {}
-
-    /** \brief Constructor with decomposition
-      *
-      * This calculates the decomposition for the input \a matrix.
-      * \sa LDLT(Index size)
-      */
-    LDLT(const MatrixType& matrix)
-      : m_matrix(matrix.rows(), matrix.cols()),
-        m_transpositions(matrix.rows()),
-        m_temporary(matrix.rows()),
-        m_isInitialized(false)
-    {
-      compute(matrix);
-    }
-
-    /** Clear any existing decomposition
-     * \sa rankUpdate(w,sigma)
-     */
-    void setZero()
-    {
-      m_isInitialized = false;
-    }
-
-    /** \returns a view of the upper triangular matrix U */
-    inline typename Traits::MatrixU matrixU() const
-    {
-      eigen_assert(m_isInitialized && "LDLT is not initialized.");
-      return Traits::getU(m_matrix);
-    }
-
-    /** \returns a view of the lower triangular matrix L */
-    inline typename Traits::MatrixL matrixL() const
-    {
-      eigen_assert(m_isInitialized && "LDLT is not initialized.");
-      return Traits::getL(m_matrix);
-    }
-
-    /** \returns the permutation matrix P as a transposition sequence.
-      */
-    inline const TranspositionType& transpositionsP() const
-    {
-      eigen_assert(m_isInitialized && "LDLT is not initialized.");
-      return m_transpositions;
-    }
-
-    /** \returns the coefficients of the diagonal matrix D */
-    inline Diagonal<const MatrixType> vectorD() const
-    {
-      eigen_assert(m_isInitialized && "LDLT is not initialized.");
-      return m_matrix.diagonal();
-    }
-
-    /** \returns true if the matrix is positive (semidefinite) */
-    inline bool isPositive() const
-    {
-      eigen_assert(m_isInitialized && "LDLT is not initialized.");
-      return m_sign == 1;
-    }
-    
-    #ifdef EIGEN2_SUPPORT
-    inline bool isPositiveDefinite() const
-    {
-      return isPositive();
-    }
-    #endif
-
-    /** \returns true if the matrix is negative (semidefinite) */
-    inline bool isNegative(void) const
-    {
-      eigen_assert(m_isInitialized && "LDLT is not initialized.");
-      return m_sign == -1;
-    }
-
-    /** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A.
-      *
-      * This function also supports in-place solves using the syntax <tt>x = decompositionObject.solve(x)</tt> .
-      *
-      * \note_about_checking_solutions
-      *
-      * More precisely, this method solves \f$ A x = b \f$ using the decomposition \f$ A = P^T L D L^* P \f$
-      * by solving the systems \f$ P^T y_1 = b \f$, \f$ L y_2 = y_1 \f$, \f$ D y_3 = y_2 \f$, 
-      * \f$ L^* y_4 = y_3 \f$ and \f$ P x = y_4 \f$ in succession. If the matrix \f$ A \f$ is singular, then
-      * \f$ D \f$ will also be singular (all the other matrices are invertible). In that case, the
-      * least-square solution of \f$ D y_3 = y_2 \f$ is computed. This does not mean that this function
-      * computes the least-square solution of \f$ A x = b \f$ is \f$ A \f$ is singular.
-      *
-      * \sa MatrixBase::ldlt()
-      */
-    template<typename Rhs>
-    inline const internal::solve_retval<LDLT, Rhs>
-    solve(const MatrixBase<Rhs>& b) const
-    {
-      eigen_assert(m_isInitialized && "LDLT is not initialized.");
-      eigen_assert(m_matrix.rows()==b.rows()
-                && "LDLT::solve(): invalid number of rows of the right hand side matrix b");
-      return internal::solve_retval<LDLT, Rhs>(*this, b.derived());
-    }
-
-    #ifdef EIGEN2_SUPPORT
-    template<typename OtherDerived, typename ResultType>
-    bool solve(const MatrixBase<OtherDerived>& b, ResultType *result) const
-    {
-      *result = this->solve(b);
-      return true;
-    }
-    #endif
-
-    template<typename Derived>
-    bool solveInPlace(MatrixBase<Derived> &bAndX) const;
-
-    LDLT& compute(const MatrixType& matrix);
-
-    template <typename Derived>
-    LDLT& rankUpdate(const MatrixBase<Derived>& w,RealScalar alpha=1);
-
-    /** \returns the internal LDLT decomposition matrix
-      *
-      * TODO: document the storage layout
-      */
-    inline const MatrixType& matrixLDLT() const
-    {
-      eigen_assert(m_isInitialized && "LDLT is not initialized.");
-      return m_matrix;
-    }
-
-    MatrixType reconstructedMatrix() const;
-
-    inline Index rows() const { return m_matrix.rows(); }
-    inline Index cols() const { return m_matrix.cols(); }
-
-    /** \brief Reports whether previous computation was successful.
-      *
-      * \returns \c Success if computation was succesful,
-      *          \c NumericalIssue if the matrix.appears to be negative.
-      */
-    ComputationInfo info() const
-    {
-      eigen_assert(m_isInitialized && "LDLT is not initialized.");
-      return Success;
-    }
-
-  protected:
-
-    /** \internal
-      * Used to compute and store the Cholesky decomposition A = L D L^* = U^* D U.
-      * The strict upper part is used during the decomposition, the strict lower
-      * part correspond to the coefficients of L (its diagonal is equal to 1 and
-      * is not stored), and the diagonal entries correspond to D.
-      */
-    MatrixType m_matrix;
-    TranspositionType m_transpositions;
-    TmpMatrixType m_temporary;
-    int m_sign;
-    bool m_isInitialized;
-};
-
-namespace internal {
-
-template<int UpLo> struct ldlt_inplace;
-
-template<> struct ldlt_inplace<Lower>
-{
-  template<typename MatrixType, typename TranspositionType, typename Workspace>
-  static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0)
-  {
-    typedef typename MatrixType::Scalar Scalar;
-    typedef typename MatrixType::RealScalar RealScalar;
-    typedef typename MatrixType::Index Index;
-    eigen_assert(mat.rows()==mat.cols());
-    const Index size = mat.rows();
-
-    if (size <= 1)
-    {
-      transpositions.setIdentity();
-      if(sign)
-        *sign = real(mat.coeff(0,0))>0 ? 1:-1;
-      return true;
-    }
-
-    RealScalar cutoff(0), biggest_in_corner;
-
-    for (Index k = 0; k < size; ++k)
-    {
-      // Find largest diagonal element
-      Index index_of_biggest_in_corner;
-      biggest_in_corner = mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner);
-      index_of_biggest_in_corner += k;
-
-      if(k == 0)
-      {
-        // The biggest overall is the point of reference to which further diagonals
-        // are compared; if any diagonal is negligible compared
-        // to the largest overall, the algorithm bails.
-        cutoff = abs(NumTraits<Scalar>::epsilon() * biggest_in_corner);
-
-        if(sign)
-          *sign = real(mat.diagonal().coeff(index_of_biggest_in_corner)) > 0 ? 1 : -1;
-      }
-      else if(sign)
-      {
-        // LDLT is not guaranteed to work for indefinite matrices, but let's try to get the sign right
-        int newSign = real(mat.diagonal().coeff(index_of_biggest_in_corner)) > 0;
-        if(newSign != *sign)
-          *sign = 0;
-      }
-
-      // Finish early if the matrix is not full rank.
-      if(biggest_in_corner < cutoff)
-      {
-        for(Index i = k; i < size; i++) transpositions.coeffRef(i) = i;
-        break;
-      }
-
-      transpositions.coeffRef(k) = index_of_biggest_in_corner;
-      if(k != index_of_biggest_in_corner)
-      {
-        // apply the transposition while taking care to consider only
-        // the lower triangular part
-        Index s = size-index_of_biggest_in_corner-1; // trailing size after the biggest element
-        mat.row(k).head(k).swap(mat.row(index_of_biggest_in_corner).head(k));
-        mat.col(k).tail(s).swap(mat.col(index_of_biggest_in_corner).tail(s));
-        std::swap(mat.coeffRef(k,k),mat.coeffRef(index_of_biggest_in_corner,index_of_biggest_in_corner));
-        for(int i=k+1;i<index_of_biggest_in_corner;++i)
-        {
-          Scalar tmp = mat.coeffRef(i,k);
-          mat.coeffRef(i,k) = conj(mat.coeffRef(index_of_biggest_in_corner,i));
-          mat.coeffRef(index_of_biggest_in_corner,i) = conj(tmp);
-        }
-        if(NumTraits<Scalar>::IsComplex)
-          mat.coeffRef(index_of_biggest_in_corner,k) = conj(mat.coeff(index_of_biggest_in_corner,k));
-      }
-
-      // partition the matrix:
-      //       A00 |  -  |  -
-      // lu  = A10 | A11 |  -
-      //       A20 | A21 | A22
-      Index rs = size - k - 1;
-      Block<MatrixType,Dynamic,1> A21(mat,k+1,k,rs,1);
-      Block<MatrixType,1,Dynamic> A10(mat,k,0,1,k);
-      Block<MatrixType,Dynamic,Dynamic> A20(mat,k+1,0,rs,k);
-
-      if(k>0)
-      {
-        temp.head(k) = mat.diagonal().head(k).asDiagonal() * A10.adjoint();
-        mat.coeffRef(k,k) -= (A10 * temp.head(k)).value();
-        if(rs>0)
-          A21.noalias() -= A20 * temp.head(k);
-      }
-      if((rs>0) && (abs(mat.coeffRef(k,k)) > cutoff))
-        A21 /= mat.coeffRef(k,k);
-    }
-
-    return true;
-  }
-
-  // Reference for the algorithm: Davis and Hager, "Multiple Rank
-  // Modifications of a Sparse Cholesky Factorization" (Algorithm 1)
-  // Trivial rearrangements of their computations (Timothy E. Holy)
-  // allow their algorithm to work for rank-1 updates even if the
-  // original matrix is not of full rank.
-  // Here only rank-1 updates are implemented, to reduce the
-  // requirement for intermediate storage and improve accuracy
-  template<typename MatrixType, typename WDerived>
-  static bool updateInPlace(MatrixType& mat, MatrixBase<WDerived>& w, typename MatrixType::RealScalar sigma=1)
-  {
-    using internal::isfinite;
-    typedef typename MatrixType::Scalar Scalar;
-    typedef typename MatrixType::RealScalar RealScalar;
-    typedef typename MatrixType::Index Index;
-
-    const Index size = mat.rows();
-    eigen_assert(mat.cols() == size && w.size()==size);
-
-    RealScalar alpha = 1;
-
-    // Apply the update
-    for (Index j = 0; j < size; j++)
-    {
-      // Check for termination due to an original decomposition of low-rank
-      if (!(isfinite)(alpha))
-        break;
-
-      // Update the diagonal terms
-      RealScalar dj = real(mat.coeff(j,j));
-      Scalar wj = w.coeff(j);
-      RealScalar swj2 = sigma*abs2(wj);
-      RealScalar gamma = dj*alpha + swj2;
-
-      mat.coeffRef(j,j) += swj2/alpha;
-      alpha += swj2/dj;
-
-
-      // Update the terms of L
-      Index rs = size-j-1;
-      w.tail(rs) -= wj * mat.col(j).tail(rs);
-      if(gamma != 0)
-        mat.col(j).tail(rs) += (sigma*conj(wj)/gamma)*w.tail(rs);
-    }
-    return true;
-  }
-
-  template<typename MatrixType, typename TranspositionType, typename Workspace, typename WType>
-  static bool update(MatrixType& mat, const TranspositionType& transpositions, Workspace& tmp, const WType& w, typename MatrixType::RealScalar sigma=1)
-  {
-    // Apply the permutation to the input w
-    tmp = transpositions * w;
-
-    return ldlt_inplace<Lower>::updateInPlace(mat,tmp,sigma);
-  }
-};
-
-template<> struct ldlt_inplace<Upper>
-{
-  template<typename MatrixType, typename TranspositionType, typename Workspace>
-  static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0)
-  {
-    Transpose<MatrixType> matt(mat);
-    return ldlt_inplace<Lower>::unblocked(matt, transpositions, temp, sign);
-  }
-
-  template<typename MatrixType, typename TranspositionType, typename Workspace, typename WType>
-  static EIGEN_STRONG_INLINE bool update(MatrixType& mat, TranspositionType& transpositions, Workspace& tmp, WType& w, typename MatrixType::RealScalar sigma=1)
-  {
-    Transpose<MatrixType> matt(mat);
-    return ldlt_inplace<Lower>::update(matt, transpositions, tmp, w.conjugate(), sigma);
-  }
-};
-
-template<typename MatrixType> struct LDLT_Traits<MatrixType,Lower>
-{
-  typedef const TriangularView<const MatrixType, UnitLower> MatrixL;
-  typedef const TriangularView<const typename MatrixType::AdjointReturnType, UnitUpper> MatrixU;
-  static inline MatrixL getL(const MatrixType& m) { return m; }
-  static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); }
-};
-
-template<typename MatrixType> struct LDLT_Traits<MatrixType,Upper>
-{
-  typedef const TriangularView<const typename MatrixType::AdjointReturnType, UnitLower> MatrixL;
-  typedef const TriangularView<const MatrixType, UnitUpper> MatrixU;
-  static inline MatrixL getL(const MatrixType& m) { return m.adjoint(); }
-  static inline MatrixU getU(const MatrixType& m) { return m; }
-};
-
-} // end namespace internal
-
-/** Compute / recompute the LDLT decomposition A = L D L^* = U^* D U of \a matrix
-  */
-template<typename MatrixType, int _UpLo>
-LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
-{
-  eigen_assert(a.rows()==a.cols());
-  const Index size = a.rows();
-
-  m_matrix = a;
-
-  m_transpositions.resize(size);
-  m_isInitialized = false;
-  m_temporary.resize(size);
-
-  internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, &m_sign);
-
-  m_isInitialized = true;
-  return *this;
-}
-
-/** Update the LDLT decomposition:  given A = L D L^T, efficiently compute the decomposition of A + sigma w w^T.
- * \param w a vector to be incorporated into the decomposition.
- * \param sigma a scalar, +1 for updates and -1 for "downdates," which correspond to removing previously-added column vectors. Optional; default value is +1.
- * \sa setZero()
-  */
-template<typename MatrixType, int _UpLo>
-template<typename Derived>
-LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w,typename NumTraits<typename MatrixType::Scalar>::Real sigma)
-{
-  const Index size = w.rows();
-  if (m_isInitialized)
-  {
-    eigen_assert(m_matrix.rows()==size);
-  }
-  else
-  {    
-    m_matrix.resize(size,size);
-    m_matrix.setZero();
-    m_transpositions.resize(size);
-    for (Index i = 0; i < size; i++)
-      m_transpositions.coeffRef(i) = i;
-    m_temporary.resize(size);
-    m_sign = sigma>=0 ? 1 : -1;
-    m_isInitialized = true;
-  }
-
-  internal::ldlt_inplace<UpLo>::update(m_matrix, m_transpositions, m_temporary, w, sigma);
-
-  return *this;
-}
-
-namespace internal {
-template<typename _MatrixType, int _UpLo, typename Rhs>
-struct solve_retval<LDLT<_MatrixType,_UpLo>, Rhs>
-  : solve_retval_base<LDLT<_MatrixType,_UpLo>, Rhs>
-{
-  typedef LDLT<_MatrixType,_UpLo> LDLTType;
-  EIGEN_MAKE_SOLVE_HELPERS(LDLTType,Rhs)
-
-  template<typename Dest> void evalTo(Dest& dst) const
-  {
-    eigen_assert(rhs().rows() == dec().matrixLDLT().rows());
-    // dst = P b
-    dst = dec().transpositionsP() * rhs();
-
-    // dst = L^-1 (P b)
-    dec().matrixL().solveInPlace(dst);
-
-    // dst = D^-1 (L^-1 P b)
-    // more precisely, use pseudo-inverse of D (see bug 241)
-    using std::abs;
-    using std::max;
-    typedef typename LDLTType::MatrixType MatrixType;
-    typedef typename LDLTType::Scalar Scalar;
-    typedef typename LDLTType::RealScalar RealScalar;
-    const Diagonal<const MatrixType> vectorD = dec().vectorD();
-    RealScalar tolerance = (max)(vectorD.array().abs().maxCoeff() * NumTraits<Scalar>::epsilon(),
-				 RealScalar(1) / NumTraits<RealScalar>::highest()); // motivated by LAPACK's xGELSS
-    for (Index i = 0; i < vectorD.size(); ++i) {
-      if(abs(vectorD(i)) > tolerance)
-	dst.row(i) /= vectorD(i);
-      else
-	dst.row(i).setZero();
-    }
-
-    // dst = L^-T (D^-1 L^-1 P b)
-    dec().matrixU().solveInPlace(dst);
-
-    // dst = P^-1 (L^-T D^-1 L^-1 P b) = A^-1 b
-    dst = dec().transpositionsP().transpose() * dst;
-  }
-};
-}
-
-/** \internal use x = ldlt_object.solve(x);
-  *
-  * This is the \em in-place version of solve().
-  *
-  * \param bAndX represents both the right-hand side matrix b and result x.
-  *
-  * \returns true always! If you need to check for existence of solutions, use another decomposition like LU, QR, or SVD.
-  *
-  * This version avoids a copy when the right hand side matrix b is not
-  * needed anymore.
-  *
-  * \sa LDLT::solve(), MatrixBase::ldlt()
-  */
-template<typename MatrixType,int _UpLo>
-template<typename Derived>
-bool LDLT<MatrixType,_UpLo>::solveInPlace(MatrixBase<Derived> &bAndX) const
-{
-  eigen_assert(m_isInitialized && "LDLT is not initialized.");
-  const Index size = m_matrix.rows();
-  eigen_assert(size == bAndX.rows());
-
-  bAndX = this->solve(bAndX);
-
-  return true;
-}
-
-/** \returns the matrix represented by the decomposition,
- * i.e., it returns the product: P^T L D L^* P.
- * This function is provided for debug purpose. */
-template<typename MatrixType, int _UpLo>
-MatrixType LDLT<MatrixType,_UpLo>::reconstructedMatrix() const
-{
-  eigen_assert(m_isInitialized && "LDLT is not initialized.");
-  const Index size = m_matrix.rows();
-  MatrixType res(size,size);
-
-  // P
-  res.setIdentity();
-  res = transpositionsP() * res;
-  // L^* P
-  res = matrixU() * res;
-  // D(L^*P)
-  res = vectorD().asDiagonal() * res;
-  // L(DL^*P)
-  res = matrixL() * res;
-  // P^T (LDL^*P)
-  res = transpositionsP().transpose() * res;
-
-  return res;
-}
-
-/** \cholesky_module
-  * \returns the Cholesky decomposition with full pivoting without square root of \c *this
-  */
-template<typename MatrixType, unsigned int UpLo>
-inline const LDLT<typename SelfAdjointView<MatrixType, UpLo>::PlainObject, UpLo>
-SelfAdjointView<MatrixType, UpLo>::ldlt() const
-{
-  return LDLT<PlainObject,UpLo>(m_matrix);
-}
-
-/** \cholesky_module
-  * \returns the Cholesky decomposition with full pivoting without square root of \c *this
-  */
-template<typename Derived>
-inline const LDLT<typename MatrixBase<Derived>::PlainObject>
-MatrixBase<Derived>::ldlt() const
-{
-  return LDLT<PlainObject>(derived());
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_LDLT_H
diff --git a/resources/3rdparty/eigen/Eigen/src/CholmodSupport/CholmodSupport.h b/resources/3rdparty/eigen/Eigen/src/CholmodSupport/CholmodSupport.h
deleted file mode 100644
index b38821807..000000000
--- a/resources/3rdparty/eigen/Eigen/src/CholmodSupport/CholmodSupport.h
+++ /dev/null
@@ -1,599 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_CHOLMODSUPPORT_H
-#define EIGEN_CHOLMODSUPPORT_H
-
-namespace Eigen { 
-
-namespace internal {
-
-template<typename Scalar, typename CholmodType>
-void cholmod_configure_matrix(CholmodType& mat)
-{
-  if (internal::is_same<Scalar,float>::value)
-  {
-    mat.xtype = CHOLMOD_REAL;
-    mat.dtype = CHOLMOD_SINGLE;
-  }
-  else if (internal::is_same<Scalar,double>::value)
-  {
-    mat.xtype = CHOLMOD_REAL;
-    mat.dtype = CHOLMOD_DOUBLE;
-  }
-  else if (internal::is_same<Scalar,std::complex<float> >::value)
-  {
-    mat.xtype = CHOLMOD_COMPLEX;
-    mat.dtype = CHOLMOD_SINGLE;
-  }
-  else if (internal::is_same<Scalar,std::complex<double> >::value)
-  {
-    mat.xtype = CHOLMOD_COMPLEX;
-    mat.dtype = CHOLMOD_DOUBLE;
-  }
-  else
-  {
-    eigen_assert(false && "Scalar type not supported by CHOLMOD");
-  }
-}
-
-} // namespace internal
-
-/** Wraps the Eigen sparse matrix \a mat into a Cholmod sparse matrix object.
-  * Note that the data are shared.
-  */
-template<typename _Scalar, int _Options, typename _Index>
-cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat)
-{
-  typedef SparseMatrix<_Scalar,_Options,_Index> MatrixType;
-  cholmod_sparse res;
-  res.nzmax   = mat.nonZeros();
-  res.nrow    = mat.rows();;
-  res.ncol    = mat.cols();
-  res.p       = mat.outerIndexPtr();
-  res.i       = mat.innerIndexPtr();
-  res.x       = mat.valuePtr();
-  res.sorted  = 1;
-  if(mat.isCompressed())
-  {
-    res.packed  = 1;
-  }
-  else
-  {
-    res.packed  = 0;
-    res.nz = mat.innerNonZeroPtr();
-  }
-
-  res.dtype   = 0;
-  res.stype   = -1;
-  
-  if (internal::is_same<_Index,int>::value)
-  {
-    res.itype = CHOLMOD_INT;
-  }
-  else
-  {
-    eigen_assert(false && "Index type different than int is not supported yet");
-  }
-
-  // setup res.xtype
-  internal::cholmod_configure_matrix<_Scalar>(res);
-  
-  res.stype = 0;
-  
-  return res;
-}
-
-template<typename _Scalar, int _Options, typename _Index>
-const cholmod_sparse viewAsCholmod(const SparseMatrix<_Scalar,_Options,_Index>& mat)
-{
-  cholmod_sparse res = viewAsCholmod(mat.const_cast_derived());
-  return res;
-}
-
-/** Returns a view of the Eigen sparse matrix \a mat as Cholmod sparse matrix.
-  * The data are not copied but shared. */
-template<typename _Scalar, int _Options, typename _Index, unsigned int UpLo>
-cholmod_sparse viewAsCholmod(const SparseSelfAdjointView<SparseMatrix<_Scalar,_Options,_Index>, UpLo>& mat)
-{
-  cholmod_sparse res = viewAsCholmod(mat.matrix().const_cast_derived());
-  
-  if(UpLo==Upper) res.stype =  1;
-  if(UpLo==Lower) res.stype = -1;
-
-  return res;
-}
-
-/** Returns a view of the Eigen \b dense matrix \a mat as Cholmod dense matrix.
-  * The data are not copied but shared. */
-template<typename Derived>
-cholmod_dense viewAsCholmod(MatrixBase<Derived>& mat)
-{
-  EIGEN_STATIC_ASSERT((internal::traits<Derived>::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
-  typedef typename Derived::Scalar Scalar;
-
-  cholmod_dense res;
-  res.nrow   = mat.rows();
-  res.ncol   = mat.cols();
-  res.nzmax  = res.nrow * res.ncol;
-  res.d      = Derived::IsVectorAtCompileTime ? mat.derived().size() : mat.derived().outerStride();
-  res.x      = mat.derived().data();
-  res.z      = 0;
-
-  internal::cholmod_configure_matrix<Scalar>(res);
-
-  return res;
-}
-
-/** Returns a view of the Cholmod sparse matrix \a cm as an Eigen sparse matrix.
-  * The data are not copied but shared. */
-template<typename Scalar, int Flags, typename Index>
-MappedSparseMatrix<Scalar,Flags,Index> viewAsEigen(cholmod_sparse& cm)
-{
-  return MappedSparseMatrix<Scalar,Flags,Index>
-         (cm.nrow, cm.ncol, reinterpret_cast<Index*>(cm.p)[cm.ncol],
-          reinterpret_cast<Index*>(cm.p), reinterpret_cast<Index*>(cm.i),reinterpret_cast<Scalar*>(cm.x) );
-}
-
-enum CholmodMode {
-  CholmodAuto, CholmodSimplicialLLt, CholmodSupernodalLLt, CholmodLDLt
-};
-
-
-/** \ingroup CholmodSupport_Module
-  * \class CholmodBase
-  * \brief The base class for the direct Cholesky factorization of Cholmod
-  * \sa class CholmodSupernodalLLT, class CholmodSimplicialLDLT, class CholmodSimplicialLLT
-  */
-template<typename _MatrixType, int _UpLo, typename Derived>
-class CholmodBase : internal::noncopyable
-{
-  public:
-    typedef _MatrixType MatrixType;
-    enum { UpLo = _UpLo };
-    typedef typename MatrixType::Scalar Scalar;
-    typedef typename MatrixType::RealScalar RealScalar;
-    typedef MatrixType CholMatrixType;
-    typedef typename MatrixType::Index Index;
-
-  public:
-
-    CholmodBase()
-      : m_cholmodFactor(0), m_info(Success), m_isInitialized(false)
-    {
-      cholmod_start(&m_cholmod);
-    }
-
-    CholmodBase(const MatrixType& matrix)
-      : m_cholmodFactor(0), m_info(Success), m_isInitialized(false)
-    {
-      m_shiftOffset[0] = m_shiftOffset[1] = RealScalar(0.0);
-      cholmod_start(&m_cholmod);
-      compute(matrix);
-    }
-
-    ~CholmodBase()
-    {
-      if(m_cholmodFactor)
-        cholmod_free_factor(&m_cholmodFactor, &m_cholmod);
-      cholmod_finish(&m_cholmod);
-    }
-    
-    inline Index cols() const { return m_cholmodFactor->n; }
-    inline Index rows() const { return m_cholmodFactor->n; }
-    
-    Derived& derived() { return *static_cast<Derived*>(this); }
-    const Derived& derived() const { return *static_cast<const Derived*>(this); }
-    
-    /** \brief Reports whether previous computation was successful.
-      *
-      * \returns \c Success if computation was succesful,
-      *          \c NumericalIssue if the matrix.appears to be negative.
-      */
-    ComputationInfo info() const
-    {
-      eigen_assert(m_isInitialized && "Decomposition is not initialized.");
-      return m_info;
-    }
-
-    /** Computes the sparse Cholesky decomposition of \a matrix */
-    Derived& compute(const MatrixType& matrix)
-    {
-      analyzePattern(matrix);
-      factorize(matrix);
-      return derived();
-    }
-    
-    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
-      *
-      * \sa compute()
-      */
-    template<typename Rhs>
-    inline const internal::solve_retval<CholmodBase, Rhs>
-    solve(const MatrixBase<Rhs>& b) const
-    {
-      eigen_assert(m_isInitialized && "LLT is not initialized.");
-      eigen_assert(rows()==b.rows()
-                && "CholmodDecomposition::solve(): invalid number of rows of the right hand side matrix b");
-      return internal::solve_retval<CholmodBase, Rhs>(*this, b.derived());
-    }
-    
-    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
-      *
-      * \sa compute()
-      */
-    template<typename Rhs>
-    inline const internal::sparse_solve_retval<CholmodBase, Rhs>
-    solve(const SparseMatrixBase<Rhs>& b) const
-    {
-      eigen_assert(m_isInitialized && "LLT is not initialized.");
-      eigen_assert(rows()==b.rows()
-                && "CholmodDecomposition::solve(): invalid number of rows of the right hand side matrix b");
-      return internal::sparse_solve_retval<CholmodBase, Rhs>(*this, b.derived());
-    }
-    
-    /** Performs a symbolic decomposition on the sparcity of \a matrix.
-      *
-      * This function is particularly useful when solving for several problems having the same structure.
-      * 
-      * \sa factorize()
-      */
-    void analyzePattern(const MatrixType& matrix)
-    {
-      if(m_cholmodFactor)
-      {
-        cholmod_free_factor(&m_cholmodFactor, &m_cholmod);
-        m_cholmodFactor = 0;
-      }
-      cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>());
-      m_cholmodFactor = cholmod_analyze(&A, &m_cholmod);
-      
-      this->m_isInitialized = true;
-      this->m_info = Success;
-      m_analysisIsOk = true;
-      m_factorizationIsOk = false;
-    }
-    
-    /** Performs a numeric decomposition of \a matrix
-      *
-      * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
-      *
-      * \sa analyzePattern()
-      */
-    void factorize(const MatrixType& matrix)
-    {
-      eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
-      cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>());
-      cholmod_factorize_p(&A, m_shiftOffset, 0, 0, m_cholmodFactor, &m_cholmod);
-      
-      // If the factorization failed, minor is the column at which it did. On success minor == n.
-      this->m_info = (m_cholmodFactor->minor == m_cholmodFactor->n ? Success : NumericalIssue);
-      m_factorizationIsOk = true;
-    }
-    
-    /** Returns a reference to the Cholmod's configuration structure to get a full control over the performed operations.
-     *  See the Cholmod user guide for details. */
-    cholmod_common& cholmod() { return m_cholmod; }
-    
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    /** \internal */
-    template<typename Rhs,typename Dest>
-    void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const
-    {
-      eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
-      const Index size = m_cholmodFactor->n;
-      EIGEN_UNUSED_VARIABLE(size);
-      eigen_assert(size==b.rows());
-
-      // note: cd stands for Cholmod Dense
-      cholmod_dense b_cd = viewAsCholmod(b.const_cast_derived());
-      cholmod_dense* x_cd = cholmod_solve(CHOLMOD_A, m_cholmodFactor, &b_cd, &m_cholmod);
-      if(!x_cd)
-      {
-        this->m_info = NumericalIssue;
-      }
-      // TODO optimize this copy by swapping when possible (be carreful with alignment, etc.)
-      dest = Matrix<Scalar,Dest::RowsAtCompileTime,Dest::ColsAtCompileTime>::Map(reinterpret_cast<Scalar*>(x_cd->x),b.rows(),b.cols());
-      cholmod_free_dense(&x_cd, &m_cholmod);
-    }
-    
-    /** \internal */
-    template<typename RhsScalar, int RhsOptions, typename RhsIndex, typename DestScalar, int DestOptions, typename DestIndex>
-    void _solve(const SparseMatrix<RhsScalar,RhsOptions,RhsIndex> &b, SparseMatrix<DestScalar,DestOptions,DestIndex> &dest) const
-    {
-      eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
-      const Index size = m_cholmodFactor->n;
-      eigen_assert(size==b.rows());
-
-      // note: cs stands for Cholmod Sparse
-      cholmod_sparse b_cs = viewAsCholmod(b);
-      cholmod_sparse* x_cs = cholmod_spsolve(CHOLMOD_A, m_cholmodFactor, &b_cs, &m_cholmod);
-      if(!x_cs)
-      {
-        this->m_info = NumericalIssue;
-      }
-      // TODO optimize this copy by swapping when possible (be carreful with alignment, etc.)
-      dest = viewAsEigen<DestScalar,DestOptions,DestIndex>(*x_cs);
-      cholmod_free_sparse(&x_cs, &m_cholmod);
-    }
-    #endif // EIGEN_PARSED_BY_DOXYGEN
-    
-    
-    /** Sets the shift parameter that will be used to adjust the diagonal coefficients during the numerical factorization.
-      *
-      * During the numerical factorization, an offset term is added to the diagonal coefficients:\n
-      * \c d_ii = \a offset + \c d_ii
-      *
-      * The default is \a offset=0.
-      *
-      * \returns a reference to \c *this.
-      */
-    Derived& setShift(const RealScalar& offset)
-    {
-      m_shiftOffset[0] = offset;
-      return derived();
-    }
-    
-    template<typename Stream>
-    void dumpMemory(Stream& s)
-    {}
-    
-  protected:
-    mutable cholmod_common m_cholmod;
-    cholmod_factor* m_cholmodFactor;
-    RealScalar m_shiftOffset[2];
-    mutable ComputationInfo m_info;
-    bool m_isInitialized;
-    int m_factorizationIsOk;
-    int m_analysisIsOk;
-};
-
-/** \ingroup CholmodSupport_Module
-  * \class CholmodSimplicialLLT
-  * \brief A simplicial direct Cholesky (LLT) factorization and solver based on Cholmod
-  *
-  * This class allows to solve for A.X = B sparse linear problems via a simplicial LL^T Cholesky factorization
-  * using the Cholmod library.
-  * This simplicial variant is equivalent to Eigen's built-in SimplicialLLT class. Thefore, it has little practical interest.
-  * The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
-  * X and B can be either dense or sparse.
-  *
-  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
-  * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
-  *               or Upper. Default is Lower.
-  *
-  * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.
-  *
-  * \sa \ref TutorialSparseDirectSolvers, class CholmodSupernodalLLT, class SimplicialLLT
-  */
-template<typename _MatrixType, int _UpLo = Lower>
-class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT<_MatrixType, _UpLo> >
-{
-    typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT> Base;
-    using Base::m_cholmod;
-    
-  public:
-    
-    typedef _MatrixType MatrixType;
-    
-    CholmodSimplicialLLT() : Base() { init(); }
-
-    CholmodSimplicialLLT(const MatrixType& matrix) : Base()
-    {
-      init();
-      compute(matrix);
-    }
-
-    ~CholmodSimplicialLLT() {}
-  protected:
-    void init()
-    {
-      m_cholmod.final_asis = 0;
-      m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;
-      m_cholmod.final_ll = 1;
-    }
-};
-
-
-/** \ingroup CholmodSupport_Module
-  * \class CholmodSimplicialLDLT
-  * \brief A simplicial direct Cholesky (LDLT) factorization and solver based on Cholmod
-  *
-  * This class allows to solve for A.X = B sparse linear problems via a simplicial LDL^T Cholesky factorization
-  * using the Cholmod library.
-  * This simplicial variant is equivalent to Eigen's built-in SimplicialLDLT class. Thefore, it has little practical interest.
-  * The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
-  * X and B can be either dense or sparse.
-  *
-  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
-  * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
-  *               or Upper. Default is Lower.
-  *
-  * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.
-  *
-  * \sa \ref TutorialSparseDirectSolvers, class CholmodSupernodalLLT, class SimplicialLDLT
-  */
-template<typename _MatrixType, int _UpLo = Lower>
-class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT<_MatrixType, _UpLo> >
-{
-    typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT> Base;
-    using Base::m_cholmod;
-    
-  public:
-    
-    typedef _MatrixType MatrixType;
-    
-    CholmodSimplicialLDLT() : Base() { init(); }
-
-    CholmodSimplicialLDLT(const MatrixType& matrix) : Base()
-    {
-      init();
-      compute(matrix);
-    }
-
-    ~CholmodSimplicialLDLT() {}
-  protected:
-    void init()
-    {
-      m_cholmod.final_asis = 1;
-      m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;
-    }
-};
-
-/** \ingroup CholmodSupport_Module
-  * \class CholmodSupernodalLLT
-  * \brief A supernodal Cholesky (LLT) factorization and solver based on Cholmod
-  *
-  * This class allows to solve for A.X = B sparse linear problems via a supernodal LL^T Cholesky factorization
-  * using the Cholmod library.
-  * This supernodal variant performs best on dense enough problems, e.g., 3D FEM, or very high order 2D FEM.
-  * The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
-  * X and B can be either dense or sparse.
-  *
-  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
-  * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
-  *               or Upper. Default is Lower.
-  *
-  * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.
-  *
-  * \sa \ref TutorialSparseDirectSolvers
-  */
-template<typename _MatrixType, int _UpLo = Lower>
-class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT<_MatrixType, _UpLo> >
-{
-    typedef CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT> Base;
-    using Base::m_cholmod;
-    
-  public:
-    
-    typedef _MatrixType MatrixType;
-    
-    CholmodSupernodalLLT() : Base() { init(); }
-
-    CholmodSupernodalLLT(const MatrixType& matrix) : Base()
-    {
-      init();
-      compute(matrix);
-    }
-
-    ~CholmodSupernodalLLT() {}
-  protected:
-    void init()
-    {
-      m_cholmod.final_asis = 1;
-      m_cholmod.supernodal = CHOLMOD_SUPERNODAL;
-    }
-};
-
-/** \ingroup CholmodSupport_Module
-  * \class CholmodDecomposition
-  * \brief A general Cholesky factorization and solver based on Cholmod
-  *
-  * This class allows to solve for A.X = B sparse linear problems via a LL^T or LDL^T Cholesky factorization
-  * using the Cholmod library. The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
-  * X and B can be either dense or sparse.
-  *
-  * This variant permits to change the underlying Cholesky method at runtime.
-  * On the other hand, it does not provide access to the result of the factorization.
-  * The default is to let Cholmod automatically choose between a simplicial and supernodal factorization.
-  *
-  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
-  * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
-  *               or Upper. Default is Lower.
-  *
-  * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.
-  *
-  * \sa \ref TutorialSparseDirectSolvers
-  */
-template<typename _MatrixType, int _UpLo = Lower>
-class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecomposition<_MatrixType, _UpLo> >
-{
-    typedef CholmodBase<_MatrixType, _UpLo, CholmodDecomposition> Base;
-    using Base::m_cholmod;
-    
-  public:
-    
-    typedef _MatrixType MatrixType;
-    
-    CholmodDecomposition() : Base() { init(); }
-
-    CholmodDecomposition(const MatrixType& matrix) : Base()
-    {
-      init();
-      compute(matrix);
-    }
-
-    ~CholmodDecomposition() {}
-    
-    void setMode(CholmodMode mode)
-    {
-      switch(mode)
-      {
-        case CholmodAuto:
-          m_cholmod.final_asis = 1;
-          m_cholmod.supernodal = CHOLMOD_AUTO;
-          break;
-        case CholmodSimplicialLLt:
-          m_cholmod.final_asis = 0;
-          m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;
-          m_cholmod.final_ll = 1;
-          break;
-        case CholmodSupernodalLLt:
-          m_cholmod.final_asis = 1;
-          m_cholmod.supernodal = CHOLMOD_SUPERNODAL;
-          break;
-        case CholmodLDLt:
-          m_cholmod.final_asis = 1;
-          m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;
-          break;
-        default:
-          break;
-      }
-    }
-  protected:
-    void init()
-    {
-      m_cholmod.final_asis = 1;
-      m_cholmod.supernodal = CHOLMOD_AUTO;
-    }
-};
-
-namespace internal {
-  
-template<typename _MatrixType, int _UpLo, typename Derived, typename Rhs>
-struct solve_retval<CholmodBase<_MatrixType,_UpLo,Derived>, Rhs>
-  : solve_retval_base<CholmodBase<_MatrixType,_UpLo,Derived>, Rhs>
-{
-  typedef CholmodBase<_MatrixType,_UpLo,Derived> Dec;
-  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
-
-  template<typename Dest> void evalTo(Dest& dst) const
-  {
-    dec()._solve(rhs(),dst);
-  }
-};
-
-template<typename _MatrixType, int _UpLo, typename Derived, typename Rhs>
-struct sparse_solve_retval<CholmodBase<_MatrixType,_UpLo,Derived>, Rhs>
-  : sparse_solve_retval_base<CholmodBase<_MatrixType,_UpLo,Derived>, Rhs>
-{
-  typedef CholmodBase<_MatrixType,_UpLo,Derived> Dec;
-  EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
-
-  template<typename Dest> void evalTo(Dest& dst) const
-  {
-    dec()._solve(rhs(),dst);
-  }
-};
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_CHOLMODSUPPORT_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Array.h b/resources/3rdparty/eigen/Eigen/src/Core/Array.h
deleted file mode 100644
index 539e1d22b..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/Array.h
+++ /dev/null
@@ -1,308 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_ARRAY_H
-#define EIGEN_ARRAY_H
-
-namespace Eigen {
-
-/** \class Array 
-  * \ingroup Core_Module
-  *
-  * \brief General-purpose arrays with easy API for coefficient-wise operations
-  *
-  * The %Array class is very similar to the Matrix class. It provides
-  * general-purpose one- and two-dimensional arrays. The difference between the
-  * %Array and the %Matrix class is primarily in the API: the API for the
-  * %Array class provides easy access to coefficient-wise operations, while the
-  * API for the %Matrix class provides easy access to linear-algebra
-  * operations.
-  *
-  * This class can be extended with the help of the plugin mechanism described on the page
-  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_ARRAY_PLUGIN.
-  *
-  * \sa \ref TutorialArrayClass, \ref TopicClassHierarchy
-  */
-namespace internal {
-template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
-struct traits<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > : traits<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
-{
-  typedef ArrayXpr XprKind;
-  typedef ArrayBase<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > XprBase;
-};
-}
-
-template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
-class Array
-  : public PlainObjectBase<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
-{
-  public:
-
-    typedef PlainObjectBase<Array> Base;
-    EIGEN_DENSE_PUBLIC_INTERFACE(Array)
-
-    enum { Options = _Options };
-    typedef typename Base::PlainObject PlainObject;
-
-  protected:
-    template <typename Derived, typename OtherDerived, bool IsVector>
-    friend struct internal::conservative_resize_like_impl;
-
-    using Base::m_storage;
-
-  public:
-
-    using Base::base;
-    using Base::coeff;
-    using Base::coeffRef;
-
-    /**
-      * The usage of
-      *   using Base::operator=;
-      * fails on MSVC. Since the code below is working with GCC and MSVC, we skipped
-      * the usage of 'using'. This should be done only for operator=.
-      */
-    template<typename OtherDerived>
-    EIGEN_STRONG_INLINE Array& operator=(const EigenBase<OtherDerived> &other)
-    {
-      return Base::operator=(other);
-    }
-
-    /** Copies the value of the expression \a other into \c *this with automatic resizing.
-      *
-      * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized),
-      * it will be initialized.
-      *
-      * Note that copying a row-vector into a vector (and conversely) is allowed.
-      * The resizing, if any, is then done in the appropriate way so that row-vectors
-      * remain row-vectors and vectors remain vectors.
-      */
-    template<typename OtherDerived>
-    EIGEN_STRONG_INLINE Array& operator=(const ArrayBase<OtherDerived>& other)
-    {
-      return Base::_set(other);
-    }
-
-    /** This is a special case of the templated operator=. Its purpose is to
-      * prevent a default operator= from hiding the templated operator=.
-      */
-    EIGEN_STRONG_INLINE Array& operator=(const Array& other)
-    {
-      return Base::_set(other);
-    }
-
-    /** Default constructor.
-      *
-      * For fixed-size matrices, does nothing.
-      *
-      * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix
-      * is called a null matrix. This constructor is the unique way to create null matrices: resizing
-      * a matrix to 0 is not supported.
-      *
-      * \sa resize(Index,Index)
-      */
-    EIGEN_STRONG_INLINE explicit Array() : Base()
-    {
-      Base::_check_template_params();
-      EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
-    }
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN
-    // FIXME is it still needed ??
-    /** \internal */
-    Array(internal::constructor_without_unaligned_array_assert)
-      : Base(internal::constructor_without_unaligned_array_assert())
-    {
-      Base::_check_template_params();
-      EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
-    }
-#endif
-
-    /** Constructs a vector or row-vector with given dimension. \only_for_vectors
-      *
-      * Note that this is only useful for dynamic-size vectors. For fixed-size vectors,
-      * it is redundant to pass the dimension here, so it makes more sense to use the default
-      * constructor Matrix() instead.
-      */
-    EIGEN_STRONG_INLINE explicit Array(Index dim)
-      : Base(dim, RowsAtCompileTime == 1 ? 1 : dim, ColsAtCompileTime == 1 ? 1 : dim)
-    {
-      Base::_check_template_params();
-      EIGEN_STATIC_ASSERT_VECTOR_ONLY(Array)
-      eigen_assert(dim >= 0);
-      eigen_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == dim);
-      EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
-    }
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    template<typename T0, typename T1>
-    EIGEN_STRONG_INLINE Array(const T0& val0, const T1& val1)
-    {
-      Base::_check_template_params();
-      this->template _init2<T0,T1>(val0, val1);
-    }
-    #else
-    /** constructs an uninitialized matrix with \a rows rows and \a cols columns.
-      *
-      * This is useful for dynamic-size matrices. For fixed-size matrices,
-      * it is redundant to pass these parameters, so one should use the default constructor
-      * Matrix() instead. */
-    Array(Index rows, Index cols);
-    /** constructs an initialized 2D vector with given coefficients */
-    Array(const Scalar& val0, const Scalar& val1);
-    #endif
-
-    /** constructs an initialized 3D vector with given coefficients */
-    EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2)
-    {
-      Base::_check_template_params();
-      EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 3)
-      m_storage.data()[0] = val0;
-      m_storage.data()[1] = val1;
-      m_storage.data()[2] = val2;
-    }
-    /** constructs an initialized 4D vector with given coefficients */
-    EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2, const Scalar& val3)
-    {
-      Base::_check_template_params();
-      EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 4)
-      m_storage.data()[0] = val0;
-      m_storage.data()[1] = val1;
-      m_storage.data()[2] = val2;
-      m_storage.data()[3] = val3;
-    }
-
-    explicit Array(const Scalar *data);
-
-    /** Constructor copying the value of the expression \a other */
-    template<typename OtherDerived>
-    EIGEN_STRONG_INLINE Array(const ArrayBase<OtherDerived>& other)
-             : Base(other.rows() * other.cols(), other.rows(), other.cols())
-    {
-      Base::_check_template_params();
-      Base::_set_noalias(other);
-    }
-    /** Copy constructor */
-    EIGEN_STRONG_INLINE Array(const Array& other)
-            : Base(other.rows() * other.cols(), other.rows(), other.cols())
-    {
-      Base::_check_template_params();
-      Base::_set_noalias(other);
-    }
-    /** Copy constructor with in-place evaluation */
-    template<typename OtherDerived>
-    EIGEN_STRONG_INLINE Array(const ReturnByValue<OtherDerived>& other)
-    {
-      Base::_check_template_params();
-      Base::resize(other.rows(), other.cols());
-      other.evalTo(*this);
-    }
-
-    /** \sa MatrixBase::operator=(const EigenBase<OtherDerived>&) */
-    template<typename OtherDerived>
-    EIGEN_STRONG_INLINE Array(const EigenBase<OtherDerived> &other)
-      : Base(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
-    {
-      Base::_check_template_params();
-      Base::resize(other.rows(), other.cols());
-      *this = other;
-    }
-
-    /** Override MatrixBase::swap() since for dynamic-sized matrices of same type it is enough to swap the
-      * data pointers.
-      */
-    template<typename OtherDerived>
-    void swap(ArrayBase<OtherDerived> const & other)
-    { this->_swap(other.derived()); }
-
-    inline Index innerStride() const { return 1; }
-    inline Index outerStride() const { return this->innerSize(); }
-
-    #ifdef EIGEN_ARRAY_PLUGIN
-    #include EIGEN_ARRAY_PLUGIN
-    #endif
-
-  private:
-
-    template<typename MatrixType, typename OtherDerived, bool SwapPointers>
-    friend struct internal::matrix_swap_impl;
-};
-
-/** \defgroup arraytypedefs Global array typedefs
-  * \ingroup Core_Module
-  *
-  * Eigen defines several typedef shortcuts for most common 1D and 2D array types.
-  *
-  * The general patterns are the following:
-  *
-  * \c ArrayRowsColsType where \c Rows and \c Cols can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size,
-  * and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c cd
-  * for complex double.
-  *
-  * For example, \c Array33d is a fixed-size 3x3 array type of doubles, and \c ArrayXXf is a dynamic-size matrix of floats.
-  *
-  * There are also \c ArraySizeType which are self-explanatory. For example, \c Array4cf is
-  * a fixed-size 1D array of 4 complex floats.
-  *
-  * \sa class Array
-  */
-
-#define EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix)   \
-/** \ingroup arraytypedefs */                                    \
-typedef Array<Type, Size, Size> Array##SizeSuffix##SizeSuffix##TypeSuffix;  \
-/** \ingroup arraytypedefs */                                    \
-typedef Array<Type, Size, 1>    Array##SizeSuffix##TypeSuffix;
-
-#define EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, Size)         \
-/** \ingroup arraytypedefs */                                    \
-typedef Array<Type, Size, Dynamic> Array##Size##X##TypeSuffix;  \
-/** \ingroup arraytypedefs */                                    \
-typedef Array<Type, Dynamic, Size> Array##X##Size##TypeSuffix;
-
-#define EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \
-EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 2, 2) \
-EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 3, 3) \
-EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 4, 4) \
-EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \
-EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \
-EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \
-EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 4)
-
-EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(int,                  i)
-EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(float,                f)
-EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(double,               d)
-EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex<float>,  cf)
-EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex<double>, cd)
-
-#undef EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES
-#undef EIGEN_MAKE_ARRAY_TYPEDEFS
-
-#undef EIGEN_MAKE_ARRAY_TYPEDEFS_LARGE
-
-#define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, SizeSuffix) \
-using Eigen::Matrix##SizeSuffix##TypeSuffix; \
-using Eigen::Vector##SizeSuffix##TypeSuffix; \
-using Eigen::RowVector##SizeSuffix##TypeSuffix;
-
-#define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(TypeSuffix) \
-EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 2) \
-EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 3) \
-EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 4) \
-EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, X) \
-
-#define EIGEN_USING_ARRAY_TYPEDEFS \
-EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(i) \
-EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(f) \
-EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(d) \
-EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cf) \
-EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cd)
-
-} // end namespace Eigen
-
-#endif // EIGEN_ARRAY_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/ArrayWrapper.h b/resources/3rdparty/eigen/Eigen/src/Core/ArrayWrapper.h
deleted file mode 100644
index 1e021b0b9..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/ArrayWrapper.h
+++ /dev/null
@@ -1,254 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_ARRAYWRAPPER_H
-#define EIGEN_ARRAYWRAPPER_H
-
-namespace Eigen { 
-
-/** \class ArrayWrapper
-  * \ingroup Core_Module
-  *
-  * \brief Expression of a mathematical vector or matrix as an array object
-  *
-  * This class is the return type of MatrixBase::array(), and most of the time
-  * this is the only way it is use.
-  *
-  * \sa MatrixBase::array(), class MatrixWrapper
-  */
-
-namespace internal {
-template<typename ExpressionType>
-struct traits<ArrayWrapper<ExpressionType> >
-  : public traits<typename remove_all<typename ExpressionType::Nested>::type >
-{
-  typedef ArrayXpr XprKind;
-};
-}
-
-template<typename ExpressionType>
-class ArrayWrapper : public ArrayBase<ArrayWrapper<ExpressionType> >
-{
-  public:
-    typedef ArrayBase<ArrayWrapper> Base;
-    EIGEN_DENSE_PUBLIC_INTERFACE(ArrayWrapper)
-    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ArrayWrapper)
-
-    typedef typename internal::conditional<
-                       internal::is_lvalue<ExpressionType>::value,
-                       Scalar,
-                       const Scalar
-                     >::type ScalarWithConstIfNotLvalue;
-
-    typedef typename internal::nested<ExpressionType>::type NestedExpressionType;
-
-    inline ArrayWrapper(ExpressionType& matrix) : m_expression(matrix) {}
-
-    inline Index rows() const { return m_expression.rows(); }
-    inline Index cols() const { return m_expression.cols(); }
-    inline Index outerStride() const { return m_expression.outerStride(); }
-    inline Index innerStride() const { return m_expression.innerStride(); }
-
-    inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
-    inline const Scalar* data() const { return m_expression.data(); }
-
-    inline CoeffReturnType coeff(Index rowId, Index colId) const
-    {
-      return m_expression.coeff(rowId, colId);
-    }
-
-    inline Scalar& coeffRef(Index rowId, Index colId)
-    {
-      return m_expression.const_cast_derived().coeffRef(rowId, colId);
-    }
-
-    inline const Scalar& coeffRef(Index rowId, Index colId) const
-    {
-      return m_expression.const_cast_derived().coeffRef(rowId, colId);
-    }
-
-    inline CoeffReturnType coeff(Index index) const
-    {
-      return m_expression.coeff(index);
-    }
-
-    inline Scalar& coeffRef(Index index)
-    {
-      return m_expression.const_cast_derived().coeffRef(index);
-    }
-
-    inline const Scalar& coeffRef(Index index) const
-    {
-      return m_expression.const_cast_derived().coeffRef(index);
-    }
-
-    template<int LoadMode>
-    inline const PacketScalar packet(Index rowId, Index colId) const
-    {
-      return m_expression.template packet<LoadMode>(rowId, colId);
-    }
-
-    template<int LoadMode>
-    inline void writePacket(Index rowId, Index colId, const PacketScalar& val)
-    {
-      m_expression.const_cast_derived().template writePacket<LoadMode>(rowId, colId, val);
-    }
-
-    template<int LoadMode>
-    inline const PacketScalar packet(Index index) const
-    {
-      return m_expression.template packet<LoadMode>(index);
-    }
-
-    template<int LoadMode>
-    inline void writePacket(Index index, const PacketScalar& val)
-    {
-      m_expression.const_cast_derived().template writePacket<LoadMode>(index, val);
-    }
-
-    template<typename Dest>
-    inline void evalTo(Dest& dst) const { dst = m_expression; }
-
-    const typename internal::remove_all<NestedExpressionType>::type& 
-    nestedExpression() const 
-    {
-      return m_expression;
-    }
-
-    /** Forwards the resizing request to the nested expression
-      * \sa DenseBase::resize(Index)  */
-    void resize(Index newSize) { m_expression.const_cast_derived().resize(newSize); }
-    /** Forwards the resizing request to the nested expression
-      * \sa DenseBase::resize(Index,Index)*/
-    void resize(Index nbRows, Index nbCols) { m_expression.const_cast_derived().resize(nbRows,nbCols); }
-
-  protected:
-    NestedExpressionType m_expression;
-};
-
-/** \class MatrixWrapper
-  * \ingroup Core_Module
-  *
-  * \brief Expression of an array as a mathematical vector or matrix
-  *
-  * This class is the return type of ArrayBase::matrix(), and most of the time
-  * this is the only way it is use.
-  *
-  * \sa MatrixBase::matrix(), class ArrayWrapper
-  */
-
-namespace internal {
-template<typename ExpressionType>
-struct traits<MatrixWrapper<ExpressionType> >
- : public traits<typename remove_all<typename ExpressionType::Nested>::type >
-{
-  typedef MatrixXpr XprKind;
-};
-}
-
-template<typename ExpressionType>
-class MatrixWrapper : public MatrixBase<MatrixWrapper<ExpressionType> >
-{
-  public:
-    typedef MatrixBase<MatrixWrapper<ExpressionType> > Base;
-    EIGEN_DENSE_PUBLIC_INTERFACE(MatrixWrapper)
-    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(MatrixWrapper)
-
-    typedef typename internal::conditional<
-                       internal::is_lvalue<ExpressionType>::value,
-                       Scalar,
-                       const Scalar
-                     >::type ScalarWithConstIfNotLvalue;
-
-    typedef typename internal::nested<ExpressionType>::type NestedExpressionType;
-
-    inline MatrixWrapper(ExpressionType& a_matrix) : m_expression(a_matrix) {}
-
-    inline Index rows() const { return m_expression.rows(); }
-    inline Index cols() const { return m_expression.cols(); }
-    inline Index outerStride() const { return m_expression.outerStride(); }
-    inline Index innerStride() const { return m_expression.innerStride(); }
-
-    inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
-    inline const Scalar* data() const { return m_expression.data(); }
-
-    inline CoeffReturnType coeff(Index rowId, Index colId) const
-    {
-      return m_expression.coeff(rowId, colId);
-    }
-
-    inline Scalar& coeffRef(Index rowId, Index colId)
-    {
-      return m_expression.const_cast_derived().coeffRef(rowId, colId);
-    }
-
-    inline const Scalar& coeffRef(Index rowId, Index colId) const
-    {
-      return m_expression.derived().coeffRef(rowId, colId);
-    }
-
-    inline CoeffReturnType coeff(Index index) const
-    {
-      return m_expression.coeff(index);
-    }
-
-    inline Scalar& coeffRef(Index index)
-    {
-      return m_expression.const_cast_derived().coeffRef(index);
-    }
-
-    inline const Scalar& coeffRef(Index index) const
-    {
-      return m_expression.const_cast_derived().coeffRef(index);
-    }
-
-    template<int LoadMode>
-    inline const PacketScalar packet(Index rowId, Index colId) const
-    {
-      return m_expression.template packet<LoadMode>(rowId, colId);
-    }
-
-    template<int LoadMode>
-    inline void writePacket(Index rowId, Index colId, const PacketScalar& val)
-    {
-      m_expression.const_cast_derived().template writePacket<LoadMode>(rowId, colId, val);
-    }
-
-    template<int LoadMode>
-    inline const PacketScalar packet(Index index) const
-    {
-      return m_expression.template packet<LoadMode>(index);
-    }
-
-    template<int LoadMode>
-    inline void writePacket(Index index, const PacketScalar& val)
-    {
-      m_expression.const_cast_derived().template writePacket<LoadMode>(index, val);
-    }
-
-    const typename internal::remove_all<NestedExpressionType>::type& 
-    nestedExpression() const 
-    {
-      return m_expression;
-    }
-
-    /** Forwards the resizing request to the nested expression
-      * \sa DenseBase::resize(Index)  */
-    void resize(Index newSize) { m_expression.const_cast_derived().resize(newSize); }
-    /** Forwards the resizing request to the nested expression
-      * \sa DenseBase::resize(Index,Index)*/
-    void resize(Index nbRows, Index nbCols) { m_expression.const_cast_derived().resize(nbRows,nbCols); }
-
-  protected:
-    NestedExpressionType m_expression;
-};
-
-} // end namespace Eigen
-
-#endif // EIGEN_ARRAYWRAPPER_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Assign_MKL.h b/resources/3rdparty/eigen/Eigen/src/Core/Assign_MKL.h
deleted file mode 100644
index 7772951b9..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/Assign_MKL.h
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- Copyright (c) 2011, Intel Corporation. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without modification,
- are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice, this
-   list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright notice,
-   this list of conditions and the following disclaimer in the documentation
-   and/or other materials provided with the distribution.
- * Neither the name of Intel Corporation nor the names of its contributors may
-   be used to endorse or promote products derived from this software without
-   specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
- ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- ********************************************************************************
- *   Content : Eigen bindings to Intel(R) MKL
- *   MKL VML support for coefficient-wise unary Eigen expressions like a=b.sin()
- ********************************************************************************
-*/
-
-#ifndef EIGEN_ASSIGN_VML_H
-#define EIGEN_ASSIGN_VML_H
-
-namespace Eigen { 
-
-namespace internal {
-
-template<typename Op> struct vml_call
-{ enum { IsSupported = 0 }; };
-
-template<typename Dst, typename Src, typename UnaryOp>
-class vml_assign_traits
-{
-  private:
-    enum {
-      DstHasDirectAccess = Dst::Flags & DirectAccessBit,
-      SrcHasDirectAccess = Src::Flags & DirectAccessBit,
-
-      StorageOrdersAgree = (int(Dst::IsRowMajor) == int(Src::IsRowMajor)),
-      InnerSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::SizeAtCompileTime)
-                : int(Dst::Flags)&RowMajorBit ? int(Dst::ColsAtCompileTime)
-                : int(Dst::RowsAtCompileTime),
-      InnerMaxSize  = int(Dst::IsVectorAtCompileTime) ? int(Dst::MaxSizeAtCompileTime)
-                    : int(Dst::Flags)&RowMajorBit ? int(Dst::MaxColsAtCompileTime)
-                    : int(Dst::MaxRowsAtCompileTime),
-      MaxSizeAtCompileTime = Dst::SizeAtCompileTime,
-
-      MightEnableVml =  vml_call<UnaryOp>::IsSupported && StorageOrdersAgree && DstHasDirectAccess && SrcHasDirectAccess
-                     && Src::InnerStrideAtCompileTime==1 && Dst::InnerStrideAtCompileTime==1,
-      MightLinearize = MightEnableVml && (int(Dst::Flags) & int(Src::Flags) & LinearAccessBit),
-      VmlSize = MightLinearize ? MaxSizeAtCompileTime : InnerMaxSize,
-      LargeEnough = VmlSize==Dynamic || VmlSize>=EIGEN_MKL_VML_THRESHOLD,
-      MayEnableVml = MightEnableVml && LargeEnough,
-      MayLinearize = MayEnableVml && MightLinearize
-    };
-  public:
-    enum {
-      Traversal = MayLinearize ? LinearVectorizedTraversal
-                : MayEnableVml ? InnerVectorizedTraversal
-                : DefaultTraversal
-    };
-};
-
-template<typename Derived1, typename Derived2, typename UnaryOp, int Traversal, int Unrolling,
-         int VmlTraversal = vml_assign_traits<Derived1, Derived2, UnaryOp>::Traversal >
-struct vml_assign_impl
-  : assign_impl<Derived1, Eigen::CwiseUnaryOp<UnaryOp, Derived2>,Traversal,Unrolling,BuiltIn>
-{
-};
-
-template<typename Derived1, typename Derived2, typename UnaryOp, int Traversal, int Unrolling>
-struct vml_assign_impl<Derived1, Derived2, UnaryOp, Traversal, Unrolling, InnerVectorizedTraversal>
-{
-  typedef typename Derived1::Scalar Scalar;
-  typedef typename Derived1::Index Index;
-  static inline void run(Derived1& dst, const CwiseUnaryOp<UnaryOp, Derived2>& src)
-  {
-    // in case we want to (or have to) skip VML at runtime we can call:
-    // assign_impl<Derived1,Eigen::CwiseUnaryOp<UnaryOp, Derived2>,Traversal,Unrolling,BuiltIn>::run(dst,src);
-    const Index innerSize = dst.innerSize();
-    const Index outerSize = dst.outerSize();
-    for(Index outer = 0; outer < outerSize; ++outer) {
-      const Scalar *src_ptr = src.IsRowMajor ?  &(src.nestedExpression().coeffRef(outer,0)) :
-                                                &(src.nestedExpression().coeffRef(0, outer));
-      Scalar *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer,0)) : &(dst.coeffRef(0, outer));
-      vml_call<UnaryOp>::run(src.functor(), innerSize, src_ptr, dst_ptr );
-    }
-  }
-};
-
-template<typename Derived1, typename Derived2, typename UnaryOp, int Traversal, int Unrolling>
-struct vml_assign_impl<Derived1, Derived2, UnaryOp, Traversal, Unrolling, LinearVectorizedTraversal>
-{
-  static inline void run(Derived1& dst, const CwiseUnaryOp<UnaryOp, Derived2>& src)
-  {
-    // in case we want to (or have to) skip VML at runtime we can call:
-    // assign_impl<Derived1,Eigen::CwiseUnaryOp<UnaryOp, Derived2>,Traversal,Unrolling,BuiltIn>::run(dst,src);
-    vml_call<UnaryOp>::run(src.functor(), dst.size(), src.nestedExpression().data(), dst.data() );
-  }
-};
-
-// Macroses
-
-#define EIGEN_MKL_VML_SPECIALIZE_ASSIGN(TRAVERSAL,UNROLLING) \
-  template<typename Derived1, typename Derived2, typename UnaryOp> \
-  struct assign_impl<Derived1, Eigen::CwiseUnaryOp<UnaryOp, Derived2>, TRAVERSAL, UNROLLING, Specialized>  {  \
-    static inline void run(Derived1 &dst, const Eigen::CwiseUnaryOp<UnaryOp, Derived2> &src) { \
-      vml_assign_impl<Derived1,Derived2,UnaryOp,TRAVERSAL,UNROLLING>::run(dst, src); \
-    } \
-  };
-
-EIGEN_MKL_VML_SPECIALIZE_ASSIGN(DefaultTraversal,NoUnrolling)
-EIGEN_MKL_VML_SPECIALIZE_ASSIGN(DefaultTraversal,CompleteUnrolling)
-EIGEN_MKL_VML_SPECIALIZE_ASSIGN(DefaultTraversal,InnerUnrolling)
-EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearTraversal,NoUnrolling)
-EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearTraversal,CompleteUnrolling)
-EIGEN_MKL_VML_SPECIALIZE_ASSIGN(InnerVectorizedTraversal,NoUnrolling)
-EIGEN_MKL_VML_SPECIALIZE_ASSIGN(InnerVectorizedTraversal,CompleteUnrolling)
-EIGEN_MKL_VML_SPECIALIZE_ASSIGN(InnerVectorizedTraversal,InnerUnrolling)
-EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearVectorizedTraversal,CompleteUnrolling)
-EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearVectorizedTraversal,NoUnrolling)
-EIGEN_MKL_VML_SPECIALIZE_ASSIGN(SliceVectorizedTraversal,NoUnrolling)
-
-
-#if !defined (EIGEN_FAST_MATH) || (EIGEN_FAST_MATH != 1)
-#define  EIGEN_MKL_VML_MODE VML_HA
-#else
-#define  EIGEN_MKL_VML_MODE VML_LA
-#endif
-
-#define EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE)     \
-  template<> struct vml_call< scalar_##EIGENOP##_op<EIGENTYPE> > {               \
-    enum { IsSupported = 1 };                                                    \
-    static inline void run( const scalar_##EIGENOP##_op<EIGENTYPE>& /*func*/,        \
-                            int size, const EIGENTYPE* src, EIGENTYPE* dst) {    \
-      VMLOP(size, (const VMLTYPE*)src, (VMLTYPE*)dst);                           \
-    }                                                                            \
-  };
-
-#define EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE)  \
-  template<> struct vml_call< scalar_##EIGENOP##_op<EIGENTYPE> > {               \
-    enum { IsSupported = 1 };                                                    \
-    static inline void run( const scalar_##EIGENOP##_op<EIGENTYPE>& /*func*/,        \
-                            int size, const EIGENTYPE* src, EIGENTYPE* dst) {    \
-      MKL_INT64 vmlMode = EIGEN_MKL_VML_MODE;                                    \
-      VMLOP(size, (const VMLTYPE*)src, (VMLTYPE*)dst, vmlMode);                  \
-    }                                                                            \
-  };
-
-#define EIGEN_MKL_VML_DECLARE_POW_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE)       \
-  template<> struct vml_call< scalar_##EIGENOP##_op<EIGENTYPE> > {               \
-    enum { IsSupported = 1 };                                                    \
-    static inline void run( const scalar_##EIGENOP##_op<EIGENTYPE>& func,        \
-                          int size, const EIGENTYPE* src, EIGENTYPE* dst) {      \
-      EIGENTYPE exponent = func.m_exponent;                                      \
-      MKL_INT64 vmlMode = EIGEN_MKL_VML_MODE;                                    \
-      VMLOP(&size, (const VMLTYPE*)src, (const VMLTYPE*)&exponent,               \
-                        (VMLTYPE*)dst, &vmlMode);                                \
-    }                                                                            \
-  };
-
-#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP)                   \
-  EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vs##VMLOP, float, float)             \
-  EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vd##VMLOP, double, double)
-
-#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX(EIGENOP, VMLOP)                \
-  EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vc##VMLOP, scomplex, MKL_Complex8)   \
-  EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vz##VMLOP, dcomplex, MKL_Complex16)
-
-#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS(EIGENOP, VMLOP)                        \
-  EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP)                         \
-  EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX(EIGENOP, VMLOP)
-
-
-#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL_LA(EIGENOP, VMLOP)                \
-  EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vms##VMLOP, float, float)         \
-  EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vmd##VMLOP, double, double)
-
-#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX_LA(EIGENOP, VMLOP)             \
-  EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vmc##VMLOP, scomplex, MKL_Complex8)  \
-  EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vmz##VMLOP, dcomplex, MKL_Complex16)
-
-#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(EIGENOP, VMLOP)                     \
-  EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL_LA(EIGENOP, VMLOP)                      \
-  EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX_LA(EIGENOP, VMLOP)
-
-
-EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(sin,  Sin)
-EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(asin, Asin)
-EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(cos,  Cos)
-EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(acos, Acos)
-EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(tan,  Tan)
-//EIGEN_MKL_VML_DECLARE_UNARY_CALLS(abs,  Abs)
-EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(exp,  Exp)
-EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(log,  Ln)
-EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(sqrt, Sqrt)
-
-EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(square, Sqr)
-
-// The vm*powx functions are not avaibale in the windows version of MKL.
-#ifndef _WIN32
-EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmspowx_, float, float)
-EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmdpowx_, double, double)
-EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmcpowx_, scomplex, MKL_Complex8)
-EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmzpowx_, dcomplex, MKL_Complex16)
-#endif
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_ASSIGN_VML_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Block.h b/resources/3rdparty/eigen/Eigen/src/Core/Block.h
deleted file mode 100644
index 9c3f9acb6..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/Block.h
+++ /dev/null
@@ -1,357 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_BLOCK_H
-#define EIGEN_BLOCK_H
-
-namespace Eigen { 
-
-/** \class Block
-  * \ingroup Core_Module
-  *
-  * \brief Expression of a fixed-size or dynamic-size block
-  *
-  * \param XprType the type of the expression in which we are taking a block
-  * \param BlockRows the number of rows of the block we are taking at compile time (optional)
-  * \param BlockCols the number of columns of the block we are taking at compile time (optional)
-  * \param _DirectAccessStatus \internal used for partial specialization
-  *
-  * This class represents an expression of either a fixed-size or dynamic-size block. It is the return
-  * type of DenseBase::block(Index,Index,Index,Index) and DenseBase::block<int,int>(Index,Index) and
-  * most of the time this is the only way it is used.
-  *
-  * However, if you want to directly maniputate block expressions,
-  * for instance if you want to write a function returning such an expression, you
-  * will need to use this class.
-  *
-  * Here is an example illustrating the dynamic case:
-  * \include class_Block.cpp
-  * Output: \verbinclude class_Block.out
-  *
-  * \note Even though this expression has dynamic size, in the case where \a XprType
-  * has fixed size, this expression inherits a fixed maximal size which means that evaluating
-  * it does not cause a dynamic memory allocation.
-  *
-  * Here is an example illustrating the fixed-size case:
-  * \include class_FixedBlock.cpp
-  * Output: \verbinclude class_FixedBlock.out
-  *
-  * \sa DenseBase::block(Index,Index,Index,Index), DenseBase::block(Index,Index), class VectorBlock
-  */
-
-namespace internal {
-template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool HasDirectAccess>
-struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel, HasDirectAccess> > : traits<XprType>
-{
-  typedef typename traits<XprType>::Scalar Scalar;
-  typedef typename traits<XprType>::StorageKind StorageKind;
-  typedef typename traits<XprType>::XprKind XprKind;
-  typedef typename nested<XprType>::type XprTypeNested;
-  typedef typename remove_reference<XprTypeNested>::type _XprTypeNested;
-  enum{
-    MatrixRows = traits<XprType>::RowsAtCompileTime,
-    MatrixCols = traits<XprType>::ColsAtCompileTime,
-    RowsAtCompileTime = MatrixRows == 0 ? 0 : BlockRows,
-    ColsAtCompileTime = MatrixCols == 0 ? 0 : BlockCols,
-    MaxRowsAtCompileTime = BlockRows==0 ? 0
-                         : RowsAtCompileTime != Dynamic ? int(RowsAtCompileTime)
-                         : int(traits<XprType>::MaxRowsAtCompileTime),
-    MaxColsAtCompileTime = BlockCols==0 ? 0
-                         : ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime)
-                         : int(traits<XprType>::MaxColsAtCompileTime),
-    XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0,
-    IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
-               : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
-               : XprTypeIsRowMajor,
-    HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor),
-    InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
-    InnerStrideAtCompileTime = HasSameStorageOrderAsXprType
-                             ? int(inner_stride_at_compile_time<XprType>::ret)
-                             : int(outer_stride_at_compile_time<XprType>::ret),
-    OuterStrideAtCompileTime = HasSameStorageOrderAsXprType
-                             ? int(outer_stride_at_compile_time<XprType>::ret)
-                             : int(inner_stride_at_compile_time<XprType>::ret),
-    MaskPacketAccessBit = (InnerSize == Dynamic || (InnerSize % packet_traits<Scalar>::size) == 0)
-                       && (InnerStrideAtCompileTime == 1)
-                        ? PacketAccessBit : 0,
-    MaskAlignedBit = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % 16) == 0)) ? AlignedBit : 0,
-    FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0,
-    FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0,
-    FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0,
-    Flags0 = traits<XprType>::Flags & ( (HereditaryBits & ~RowMajorBit) |
-                                        DirectAccessBit |
-                                        MaskPacketAccessBit |
-                                        MaskAlignedBit),
-    Flags = Flags0 | FlagsLinearAccessBit | FlagsLvalueBit | FlagsRowMajorBit
-  };
-};
-}
-
-template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool HasDirectAccess> class Block
-  : public internal::dense_xpr_base<Block<XprType, BlockRows, BlockCols, InnerPanel, HasDirectAccess> >::type
-{
-  public:
-
-    typedef typename internal::dense_xpr_base<Block>::type Base;
-    EIGEN_DENSE_PUBLIC_INTERFACE(Block)
-
-    class InnerIterator;
-
-    /** Column or Row constructor
-      */
-    inline Block(XprType& xpr, Index i)
-      : m_xpr(xpr),
-        // It is a row if and only if BlockRows==1 and BlockCols==XprType::ColsAtCompileTime,
-        // and it is a column if and only if BlockRows==XprType::RowsAtCompileTime and BlockCols==1,
-        // all other cases are invalid.
-        // The case a 1x1 matrix seems ambiguous, but the result is the same anyway.
-        m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
-        m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
-        m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
-        m_blockCols(BlockCols==1 ? 1 : xpr.cols())
-    {
-      eigen_assert( (i>=0) && (
-          ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i<xpr.rows())
-        ||((BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) && i<xpr.cols())));
-    }
-
-    /** Fixed-size constructor
-      */
-    inline Block(XprType& xpr, Index a_startRow, Index a_startCol)
-      : m_xpr(xpr), m_startRow(a_startRow), m_startCol(a_startCol),
-        m_blockRows(BlockRows), m_blockCols(BlockCols)
-    {
-      EIGEN_STATIC_ASSERT(RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic,THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE)
-      eigen_assert(a_startRow >= 0 && BlockRows >= 1 && a_startRow + BlockRows <= xpr.rows()
-             && a_startCol >= 0 && BlockCols >= 1 && a_startCol + BlockCols <= xpr.cols());
-    }
-
-    /** Dynamic-size constructor
-      */
-    inline Block(XprType& xpr,
-          Index a_startRow, Index a_startCol,
-          Index blockRows, Index blockCols)
-      : m_xpr(xpr), m_startRow(a_startRow), m_startCol(a_startCol),
-                          m_blockRows(blockRows), m_blockCols(blockCols)
-    {
-      eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows)
-          && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols));
-      eigen_assert(a_startRow >= 0 && blockRows >= 0 && a_startRow + blockRows <= xpr.rows()
-          && a_startCol >= 0 && blockCols >= 0 && a_startCol + blockCols <= xpr.cols());
-    }
-
-    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block)
-
-    inline Index rows() const { return m_blockRows.value(); }
-    inline Index cols() const { return m_blockCols.value(); }
-
-    inline Scalar& coeffRef(Index rowId, Index colId)
-    {
-      EIGEN_STATIC_ASSERT_LVALUE(XprType)
-      return m_xpr.const_cast_derived()
-               .coeffRef(rowId + m_startRow.value(), colId + m_startCol.value());
-    }
-
-    inline const Scalar& coeffRef(Index rowId, Index colId) const
-    {
-      return m_xpr.derived()
-               .coeffRef(rowId + m_startRow.value(), colId + m_startCol.value());
-    }
-
-    EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index rowId, Index colId) const
-    {
-      return m_xpr.coeff(rowId + m_startRow.value(), colId + m_startCol.value());
-    }
-
-    inline Scalar& coeffRef(Index index)
-    {
-      EIGEN_STATIC_ASSERT_LVALUE(XprType)
-      return m_xpr.const_cast_derived()
-             .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
-                       m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
-    }
-
-    inline const Scalar& coeffRef(Index index) const
-    {
-      return m_xpr.const_cast_derived()
-             .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
-                       m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
-    }
-
-    inline const CoeffReturnType coeff(Index index) const
-    {
-      return m_xpr
-             .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
-                    m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
-    }
-
-    template<int LoadMode>
-    inline PacketScalar packet(Index rowId, Index colId) const
-    {
-      return m_xpr.template packet<Unaligned>
-              (rowId + m_startRow.value(), colId + m_startCol.value());
-    }
-
-    template<int LoadMode>
-    inline void writePacket(Index rowId, Index colId, const PacketScalar& val)
-    {
-      m_xpr.const_cast_derived().template writePacket<Unaligned>
-              (rowId + m_startRow.value(), colId + m_startCol.value(), val);
-    }
-
-    template<int LoadMode>
-    inline PacketScalar packet(Index index) const
-    {
-      return m_xpr.template packet<Unaligned>
-              (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
-               m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
-    }
-
-    template<int LoadMode>
-    inline void writePacket(Index index, const PacketScalar& val)
-    {
-      m_xpr.const_cast_derived().template writePacket<Unaligned>
-         (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
-          m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0), val);
-    }
-
-    #ifdef EIGEN_PARSED_BY_DOXYGEN
-    /** \sa MapBase::data() */
-    inline const Scalar* data() const;
-    inline Index innerStride() const;
-    inline Index outerStride() const;
-    #endif
-
-    const typename internal::remove_all<typename XprType::Nested>::type& nestedExpression() const 
-    { 
-      return m_xpr; 
-    }
-      
-    Index startRow() const 
-    { 
-      return m_startRow.value(); 
-    }
-      
-    Index startCol() const 
-    { 
-      return m_startCol.value(); 
-    }
-
-  protected:
-
-    const typename XprType::Nested m_xpr;
-    const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
-    const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
-    const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
-    const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
-};
-
-/** \internal */
-template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
-class Block<XprType,BlockRows,BlockCols, InnerPanel,true>
-  : public MapBase<Block<XprType, BlockRows, BlockCols, InnerPanel, true> >
-{
-  public:
-
-    typedef MapBase<Block> Base;
-    EIGEN_DENSE_PUBLIC_INTERFACE(Block)
-
-    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block)
-
-    /** Column or Row constructor
-      */
-    inline Block(XprType& xpr, Index i)
-      : Base(internal::const_cast_ptr(&xpr.coeffRef(
-              (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0,
-              (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0)),
-             BlockRows==1 ? 1 : xpr.rows(),
-             BlockCols==1 ? 1 : xpr.cols()),
-        m_xpr(xpr)
-    {
-      eigen_assert( (i>=0) && (
-          ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i<xpr.rows())
-        ||((BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) && i<xpr.cols())));
-      init();
-    }
-
-    /** Fixed-size constructor
-      */
-    inline Block(XprType& xpr, Index startRow, Index startCol)
-      : Base(internal::const_cast_ptr(&xpr.coeffRef(startRow,startCol))), m_xpr(xpr)
-    {
-      eigen_assert(startRow >= 0 && BlockRows >= 1 && startRow + BlockRows <= xpr.rows()
-             && startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= xpr.cols());
-      init();
-    }
-
-    /** Dynamic-size constructor
-      */
-    inline Block(XprType& xpr,
-          Index startRow, Index startCol,
-          Index blockRows, Index blockCols)
-      : Base(internal::const_cast_ptr(&xpr.coeffRef(startRow,startCol)), blockRows, blockCols),
-        m_xpr(xpr)
-    {
-      eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows)
-             && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols));
-      eigen_assert(startRow >= 0 && blockRows >= 0 && startRow + blockRows <= xpr.rows()
-             && startCol >= 0 && blockCols >= 0 && startCol + blockCols <= xpr.cols());
-      init();
-    }
-
-    const typename internal::remove_all<typename XprType::Nested>::type& nestedExpression() const 
-    { 
-      return m_xpr; 
-    }
-      
-    /** \sa MapBase::innerStride() */
-    inline Index innerStride() const
-    {
-      return internal::traits<Block>::HasSameStorageOrderAsXprType
-             ? m_xpr.innerStride()
-             : m_xpr.outerStride();
-    }
-
-    /** \sa MapBase::outerStride() */
-    inline Index outerStride() const
-    {
-      return m_outerStride;
-    }
-
-  #ifndef __SUNPRO_CC
-  // FIXME sunstudio is not friendly with the above friend...
-  // META-FIXME there is no 'friend' keyword around here. Is this obsolete?
-  protected:
-  #endif
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    /** \internal used by allowAligned() */
-    inline Block(XprType& xpr, const Scalar* data, Index blockRows, Index blockCols)
-      : Base(data, blockRows, blockCols), m_xpr(xpr)
-    {
-      init();
-    }
-    #endif
-
-  protected:
-    void init()
-    {
-      m_outerStride = internal::traits<Block>::HasSameStorageOrderAsXprType
-                    ? m_xpr.outerStride()
-                    : m_xpr.innerStride();
-    }
-
-    typename XprType::Nested m_xpr;
-    Index m_outerStride;
-};
-
-} // end namespace Eigen
-
-#endif // EIGEN_BLOCK_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/CommaInitializer.h b/resources/3rdparty/eigen/Eigen/src/Core/CommaInitializer.h
deleted file mode 100644
index 4adce6414..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/CommaInitializer.h
+++ /dev/null
@@ -1,139 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_COMMAINITIALIZER_H
-#define EIGEN_COMMAINITIALIZER_H
-
-namespace Eigen { 
-
-/** \class CommaInitializer
-  * \ingroup Core_Module
-  *
-  * \brief Helper class used by the comma initializer operator
-  *
-  * This class is internally used to implement the comma initializer feature. It is
-  * the return type of MatrixBase::operator<<, and most of the time this is the only
-  * way it is used.
-  *
-  * \sa \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished()
-  */
-template<typename XprType>
-struct CommaInitializer
-{
-  typedef typename XprType::Scalar Scalar;
-  typedef typename XprType::Index Index;
-
-  inline CommaInitializer(XprType& xpr, const Scalar& s)
-    : m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1)
-  {
-    m_xpr.coeffRef(0,0) = s;
-  }
-
-  template<typename OtherDerived>
-  inline CommaInitializer(XprType& xpr, const DenseBase<OtherDerived>& other)
-    : m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows())
-  {
-    m_xpr.block(0, 0, other.rows(), other.cols()) = other;
-  }
-
-  /* inserts a scalar value in the target matrix */
-  CommaInitializer& operator,(const Scalar& s)
-  {
-    if (m_col==m_xpr.cols())
-    {
-      m_row+=m_currentBlockRows;
-      m_col = 0;
-      m_currentBlockRows = 1;
-      eigen_assert(m_row<m_xpr.rows()
-        && "Too many rows passed to comma initializer (operator<<)");
-    }
-    eigen_assert(m_col<m_xpr.cols()
-      && "Too many coefficients passed to comma initializer (operator<<)");
-    eigen_assert(m_currentBlockRows==1);
-    m_xpr.coeffRef(m_row, m_col++) = s;
-    return *this;
-  }
-
-  /* inserts a matrix expression in the target matrix */
-  template<typename OtherDerived>
-  CommaInitializer& operator,(const DenseBase<OtherDerived>& other)
-  {
-    if (m_col==m_xpr.cols())
-    {
-      m_row+=m_currentBlockRows;
-      m_col = 0;
-      m_currentBlockRows = other.rows();
-      eigen_assert(m_row+m_currentBlockRows<=m_xpr.rows()
-        && "Too many rows passed to comma initializer (operator<<)");
-    }
-    eigen_assert(m_col<m_xpr.cols()
-      && "Too many coefficients passed to comma initializer (operator<<)");
-    eigen_assert(m_currentBlockRows==other.rows());
-    if (OtherDerived::SizeAtCompileTime != Dynamic)
-      m_xpr.template block<OtherDerived::RowsAtCompileTime != Dynamic ? OtherDerived::RowsAtCompileTime : 1,
-                              OtherDerived::ColsAtCompileTime != Dynamic ? OtherDerived::ColsAtCompileTime : 1>
-                    (m_row, m_col) = other;
-    else
-      m_xpr.block(m_row, m_col, other.rows(), other.cols()) = other;
-    m_col += other.cols();
-    return *this;
-  }
-
-  inline ~CommaInitializer()
-  {
-    eigen_assert((m_row+m_currentBlockRows) == m_xpr.rows()
-         && m_col == m_xpr.cols()
-         && "Too few coefficients passed to comma initializer (operator<<)");
-  }
-
-  /** \returns the built matrix once all its coefficients have been set.
-    * Calling finished is 100% optional. Its purpose is to write expressions
-    * like this:
-    * \code
-    * quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished());
-    * \endcode
-    */
-  inline XprType& finished() { return m_xpr; }
-
-  XprType& m_xpr;   // target expression
-  Index m_row;              // current row id
-  Index m_col;              // current col id
-  Index m_currentBlockRows; // current block height
-};
-
-/** \anchor MatrixBaseCommaInitRef
-  * Convenient operator to set the coefficients of a matrix.
-  *
-  * The coefficients must be provided in a row major order and exactly match
-  * the size of the matrix. Otherwise an assertion is raised.
-  *
-  * Example: \include MatrixBase_set.cpp
-  * Output: \verbinclude MatrixBase_set.out
-  *
-  * \sa CommaInitializer::finished(), class CommaInitializer
-  */
-template<typename Derived>
-inline CommaInitializer<Derived> DenseBase<Derived>::operator<< (const Scalar& s)
-{
-  return CommaInitializer<Derived>(*static_cast<Derived*>(this), s);
-}
-
-/** \sa operator<<(const Scalar&) */
-template<typename Derived>
-template<typename OtherDerived>
-inline CommaInitializer<Derived>
-DenseBase<Derived>::operator<<(const DenseBase<OtherDerived>& other)
-{
-  return CommaInitializer<Derived>(*static_cast<Derived *>(this), other);
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_COMMAINITIALIZER_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/CwiseBinaryOp.h b/resources/3rdparty/eigen/Eigen/src/Core/CwiseBinaryOp.h
deleted file mode 100644
index 686c2afa3..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/CwiseBinaryOp.h
+++ /dev/null
@@ -1,229 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_CWISE_BINARY_OP_H
-#define EIGEN_CWISE_BINARY_OP_H
-
-namespace Eigen {
-
-/** \class CwiseBinaryOp
-  * \ingroup Core_Module
-  *
-  * \brief Generic expression where a coefficient-wise binary operator is applied to two expressions
-  *
-  * \param BinaryOp template functor implementing the operator
-  * \param Lhs the type of the left-hand side
-  * \param Rhs the type of the right-hand side
-  *
-  * This class represents an expression  where a coefficient-wise binary operator is applied to two expressions.
-  * It is the return type of binary operators, by which we mean only those binary operators where
-  * both the left-hand side and the right-hand side are Eigen expressions.
-  * For example, the return type of matrix1+matrix2 is a CwiseBinaryOp.
-  *
-  * Most of the time, this is the only way that it is used, so you typically don't have to name
-  * CwiseBinaryOp types explicitly.
-  *
-  * \sa MatrixBase::binaryExpr(const MatrixBase<OtherDerived> &,const CustomBinaryOp &) const, class CwiseUnaryOp, class CwiseNullaryOp
-  */
-
-namespace internal {
-template<typename BinaryOp, typename Lhs, typename Rhs>
-struct traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
-{
-  // we must not inherit from traits<Lhs> since it has
-  // the potential to cause problems with MSVC
-  typedef typename remove_all<Lhs>::type Ancestor;
-  typedef typename traits<Ancestor>::XprKind XprKind;
-  enum {
-    RowsAtCompileTime = traits<Ancestor>::RowsAtCompileTime,
-    ColsAtCompileTime = traits<Ancestor>::ColsAtCompileTime,
-    MaxRowsAtCompileTime = traits<Ancestor>::MaxRowsAtCompileTime,
-    MaxColsAtCompileTime = traits<Ancestor>::MaxColsAtCompileTime
-  };
-
-  // even though we require Lhs and Rhs to have the same scalar type (see CwiseBinaryOp constructor),
-  // we still want to handle the case when the result type is different.
-  typedef typename result_of<
-                     BinaryOp(
-                       typename Lhs::Scalar,
-                       typename Rhs::Scalar
-                     )
-                   >::type Scalar;
-  typedef typename promote_storage_type<typename traits<Lhs>::StorageKind,
-                                           typename traits<Rhs>::StorageKind>::ret StorageKind;
-  typedef typename promote_index_type<typename traits<Lhs>::Index,
-                                         typename traits<Rhs>::Index>::type Index;
-  typedef typename Lhs::Nested LhsNested;
-  typedef typename Rhs::Nested RhsNested;
-  typedef typename remove_reference<LhsNested>::type _LhsNested;
-  typedef typename remove_reference<RhsNested>::type _RhsNested;
-  enum {
-    LhsCoeffReadCost = _LhsNested::CoeffReadCost,
-    RhsCoeffReadCost = _RhsNested::CoeffReadCost,
-    LhsFlags = _LhsNested::Flags,
-    RhsFlags = _RhsNested::Flags,
-    SameType = is_same<typename _LhsNested::Scalar,typename _RhsNested::Scalar>::value,
-    StorageOrdersAgree = (int(Lhs::Flags)&RowMajorBit)==(int(Rhs::Flags)&RowMajorBit),
-    Flags0 = (int(LhsFlags) | int(RhsFlags)) & (
-        HereditaryBits
-      | (int(LhsFlags) & int(RhsFlags) &
-           ( AlignedBit
-           | (StorageOrdersAgree ? LinearAccessBit : 0)
-           | (functor_traits<BinaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)
-           )
-        )
-     ),
-    Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit),
-    CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + functor_traits<BinaryOp>::Cost
-  };
-};
-} // end namespace internal
-
-// we require Lhs and Rhs to have the same scalar type. Currently there is no example of a binary functor
-// that would take two operands of different types. If there were such an example, then this check should be
-// moved to the BinaryOp functors, on a per-case basis. This would however require a change in the BinaryOp functors, as
-// currently they take only one typename Scalar template parameter.
-// It is tempting to always allow mixing different types but remember that this is often impossible in the vectorized paths.
-// So allowing mixing different types gives very unexpected errors when enabling vectorization, when the user tries to
-// add together a float matrix and a double matrix.
-#define EIGEN_CHECK_BINARY_COMPATIBILIY(BINOP,LHS,RHS) \
-  EIGEN_STATIC_ASSERT((internal::functor_allows_mixing_real_and_complex<BINOP>::ret \
-                        ? int(internal::is_same<typename NumTraits<LHS>::Real, typename NumTraits<RHS>::Real>::value) \
-                        : int(internal::is_same<LHS, RHS>::value)), \
-    YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
-
-template<typename BinaryOp, typename Lhs, typename Rhs, typename StorageKind>
-class CwiseBinaryOpImpl;
-
-template<typename BinaryOp, typename Lhs, typename Rhs>
-class CwiseBinaryOp : internal::no_assignment_operator,
-  public CwiseBinaryOpImpl<
-          BinaryOp, Lhs, Rhs,
-          typename internal::promote_storage_type<typename internal::traits<Lhs>::StorageKind,
-                                           typename internal::traits<Rhs>::StorageKind>::ret>
-{
-  public:
-
-    typedef typename CwiseBinaryOpImpl<
-        BinaryOp, Lhs, Rhs,
-        typename internal::promote_storage_type<typename internal::traits<Lhs>::StorageKind,
-                                         typename internal::traits<Rhs>::StorageKind>::ret>::Base Base;
-    EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseBinaryOp)
-
-    typedef typename internal::nested<Lhs>::type LhsNested;
-    typedef typename internal::nested<Rhs>::type RhsNested;
-    typedef typename internal::remove_reference<LhsNested>::type _LhsNested;
-    typedef typename internal::remove_reference<RhsNested>::type _RhsNested;
-
-    EIGEN_STRONG_INLINE CwiseBinaryOp(const Lhs& aLhs, const Rhs& aRhs, const BinaryOp& func = BinaryOp())
-      : m_lhs(aLhs), m_rhs(aRhs), m_functor(func)
-    {
-      EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp,typename Lhs::Scalar,typename Rhs::Scalar);
-      // require the sizes to match
-      EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs, Rhs)
-      eigen_assert(aLhs.rows() == aRhs.rows() && aLhs.cols() == aRhs.cols());
-    }
-
-    EIGEN_STRONG_INLINE Index rows() const {
-      // return the fixed size type if available to enable compile time optimizations
-      if (internal::traits<typename internal::remove_all<LhsNested>::type>::RowsAtCompileTime==Dynamic)
-        return m_rhs.rows();
-      else
-        return m_lhs.rows();
-    }
-    EIGEN_STRONG_INLINE Index cols() const {
-      // return the fixed size type if available to enable compile time optimizations
-      if (internal::traits<typename internal::remove_all<LhsNested>::type>::ColsAtCompileTime==Dynamic)
-        return m_rhs.cols();
-      else
-        return m_lhs.cols();
-    }
-
-    /** \returns the left hand side nested expression */
-    const _LhsNested& lhs() const { return m_lhs; }
-    /** \returns the right hand side nested expression */
-    const _RhsNested& rhs() const { return m_rhs; }
-    /** \returns the functor representing the binary operation */
-    const BinaryOp& functor() const { return m_functor; }
-
-  protected:
-    LhsNested m_lhs;
-    RhsNested m_rhs;
-    const BinaryOp m_functor;
-};
-
-template<typename BinaryOp, typename Lhs, typename Rhs>
-class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Dense>
-  : public internal::dense_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >::type
-{
-    typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> Derived;
-  public:
-
-    typedef typename internal::dense_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >::type Base;
-    EIGEN_DENSE_PUBLIC_INTERFACE( Derived )
-
-    EIGEN_STRONG_INLINE const Scalar coeff(Index rowId, Index colId) const
-    {
-      return derived().functor()(derived().lhs().coeff(rowId, colId),
-                                 derived().rhs().coeff(rowId, colId));
-    }
-
-    template<int LoadMode>
-    EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const
-    {
-      return derived().functor().packetOp(derived().lhs().template packet<LoadMode>(rowId, colId),
-                                          derived().rhs().template packet<LoadMode>(rowId, colId));
-    }
-
-    EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
-    {
-      return derived().functor()(derived().lhs().coeff(index),
-                                 derived().rhs().coeff(index));
-    }
-
-    template<int LoadMode>
-    EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
-    {
-      return derived().functor().packetOp(derived().lhs().template packet<LoadMode>(index),
-                                          derived().rhs().template packet<LoadMode>(index));
-    }
-};
-
-/** replaces \c *this by \c *this - \a other.
-  *
-  * \returns a reference to \c *this
-  */
-template<typename Derived>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE Derived &
-MatrixBase<Derived>::operator-=(const MatrixBase<OtherDerived> &other)
-{
-  SelfCwiseBinaryOp<internal::scalar_difference_op<Scalar>, Derived, OtherDerived> tmp(derived());
-  tmp = other.derived();
-  return derived();
-}
-
-/** replaces \c *this by \c *this + \a other.
-  *
-  * \returns a reference to \c *this
-  */
-template<typename Derived>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE Derived &
-MatrixBase<Derived>::operator+=(const MatrixBase<OtherDerived>& other)
-{
-  SelfCwiseBinaryOp<internal::scalar_sum_op<Scalar>, Derived, OtherDerived> tmp(derived());
-  tmp = other.derived();
-  return derived();
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_CWISE_BINARY_OP_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/CwiseNullaryOp.h b/resources/3rdparty/eigen/Eigen/src/Core/CwiseNullaryOp.h
deleted file mode 100644
index edd2bed46..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/CwiseNullaryOp.h
+++ /dev/null
@@ -1,864 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_CWISE_NULLARY_OP_H
-#define EIGEN_CWISE_NULLARY_OP_H
-
-namespace Eigen {
-
-/** \class CwiseNullaryOp
-  * \ingroup Core_Module
-  *
-  * \brief Generic expression of a matrix where all coefficients are defined by a functor
-  *
-  * \param NullaryOp template functor implementing the operator
-  * \param PlainObjectType the underlying plain matrix/array type
-  *
-  * This class represents an expression of a generic nullary operator.
-  * It is the return type of the Ones(), Zero(), Constant(), Identity() and Random() methods,
-  * and most of the time this is the only way it is used.
-  *
-  * However, if you want to write a function returning such an expression, you
-  * will need to use this class.
-  *
-  * \sa class CwiseUnaryOp, class CwiseBinaryOp, DenseBase::NullaryExpr()
-  */
-
-namespace internal {
-template<typename NullaryOp, typename PlainObjectType>
-struct traits<CwiseNullaryOp<NullaryOp, PlainObjectType> > : traits<PlainObjectType>
-{
-  enum {
-    Flags = (traits<PlainObjectType>::Flags
-      & (  HereditaryBits
-         | (functor_has_linear_access<NullaryOp>::ret ? LinearAccessBit : 0)
-         | (functor_traits<NullaryOp>::PacketAccess ? PacketAccessBit : 0)))
-      | (functor_traits<NullaryOp>::IsRepeatable ? 0 : EvalBeforeNestingBit),
-    CoeffReadCost = functor_traits<NullaryOp>::Cost
-  };
-};
-}
-
-template<typename NullaryOp, typename PlainObjectType>
-class CwiseNullaryOp : internal::no_assignment_operator,
-  public internal::dense_xpr_base< CwiseNullaryOp<NullaryOp, PlainObjectType> >::type
-{
-  public:
-
-    typedef typename internal::dense_xpr_base<CwiseNullaryOp>::type Base;
-    EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp)
-
-    CwiseNullaryOp(Index nbRows, Index nbCols, const NullaryOp& func = NullaryOp())
-      : m_rows(nbRows), m_cols(nbCols), m_functor(func)
-    {
-      eigen_assert(nbRows >= 0
-            && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == nbRows)
-            &&  nbCols >= 0
-            && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == nbCols));
-    }
-
-    EIGEN_STRONG_INLINE Index rows() const { return m_rows.value(); }
-    EIGEN_STRONG_INLINE Index cols() const { return m_cols.value(); }
-
-    EIGEN_STRONG_INLINE const Scalar coeff(Index rowId, Index colId) const
-    {
-      return m_functor(rowId, colId);
-    }
-
-    template<int LoadMode>
-    EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const
-    {
-      return m_functor.packetOp(rowId, colId);
-    }
-
-    EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
-    {
-      return m_functor(index);
-    }
-
-    template<int LoadMode>
-    EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
-    {
-      return m_functor.packetOp(index);
-    }
-
-    /** \returns the functor representing the nullary operation */
-    const NullaryOp& functor() const { return m_functor; }
-
-  protected:
-    const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_rows;
-    const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_cols;
-    const NullaryOp m_functor;
-};
-
-
-/** \returns an expression of a matrix defined by a custom functor \a func
-  *
-  * The parameters \a rows and \a cols are the number of rows and of columns of
-  * the returned matrix. Must be compatible with this MatrixBase type.
-  *
-  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
-  * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used
-  * instead.
-  *
-  * The template parameter \a CustomNullaryOp is the type of the functor.
-  *
-  * \sa class CwiseNullaryOp
-  */
-template<typename Derived>
-template<typename CustomNullaryOp>
-EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, Derived>
-DenseBase<Derived>::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func)
-{
-  return CwiseNullaryOp<CustomNullaryOp, Derived>(rows, cols, func);
-}
-
-/** \returns an expression of a matrix defined by a custom functor \a func
-  *
-  * The parameter \a size is the size of the returned vector.
-  * Must be compatible with this MatrixBase type.
-  *
-  * \only_for_vectors
-  *
-  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
-  * it is redundant to pass \a size as argument, so Zero() should be used
-  * instead.
-  *
-  * The template parameter \a CustomNullaryOp is the type of the functor.
-  *
-  * \sa class CwiseNullaryOp
-  */
-template<typename Derived>
-template<typename CustomNullaryOp>
-EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, Derived>
-DenseBase<Derived>::NullaryExpr(Index size, const CustomNullaryOp& func)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  if(RowsAtCompileTime == 1) return CwiseNullaryOp<CustomNullaryOp, Derived>(1, size, func);
-  else return CwiseNullaryOp<CustomNullaryOp, Derived>(size, 1, func);
-}
-
-/** \returns an expression of a matrix defined by a custom functor \a func
-  *
-  * This variant is only for fixed-size DenseBase types. For dynamic-size types, you
-  * need to use the variants taking size arguments.
-  *
-  * The template parameter \a CustomNullaryOp is the type of the functor.
-  *
-  * \sa class CwiseNullaryOp
-  */
-template<typename Derived>
-template<typename CustomNullaryOp>
-EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, Derived>
-DenseBase<Derived>::NullaryExpr(const CustomNullaryOp& func)
-{
-  return CwiseNullaryOp<CustomNullaryOp, Derived>(RowsAtCompileTime, ColsAtCompileTime, func);
-}
-
-/** \returns an expression of a constant matrix of value \a value
-  *
-  * The parameters \a rows and \a cols are the number of rows and of columns of
-  * the returned matrix. Must be compatible with this DenseBase type.
-  *
-  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
-  * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used
-  * instead.
-  *
-  * The template parameter \a CustomNullaryOp is the type of the functor.
-  *
-  * \sa class CwiseNullaryOp
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
-DenseBase<Derived>::Constant(Index rows, Index cols, const Scalar& value)
-{
-  return DenseBase<Derived>::NullaryExpr(rows, cols, internal::scalar_constant_op<Scalar>(value));
-}
-
-/** \returns an expression of a constant matrix of value \a value
-  *
-  * The parameter \a size is the size of the returned vector.
-  * Must be compatible with this DenseBase type.
-  *
-  * \only_for_vectors
-  *
-  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
-  * it is redundant to pass \a size as argument, so Zero() should be used
-  * instead.
-  *
-  * The template parameter \a CustomNullaryOp is the type of the functor.
-  *
-  * \sa class CwiseNullaryOp
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
-DenseBase<Derived>::Constant(Index size, const Scalar& value)
-{
-  return DenseBase<Derived>::NullaryExpr(size, internal::scalar_constant_op<Scalar>(value));
-}
-
-/** \returns an expression of a constant matrix of value \a value
-  *
-  * This variant is only for fixed-size DenseBase types. For dynamic-size types, you
-  * need to use the variants taking size arguments.
-  *
-  * The template parameter \a CustomNullaryOp is the type of the functor.
-  *
-  * \sa class CwiseNullaryOp
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
-DenseBase<Derived>::Constant(const Scalar& value)
-{
-  EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
-  return DenseBase<Derived>::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_constant_op<Scalar>(value));
-}
-
-/**
-  * \brief Sets a linearly space vector.
-  *
-  * The function generates 'size' equally spaced values in the closed interval [low,high].
-  * This particular version of LinSpaced() uses sequential access, i.e. vector access is
-  * assumed to be a(0), a(1), ..., a(size). This assumption allows for better vectorization
-  * and yields faster code than the random access version.
-  *
-  * When size is set to 1, a vector of length 1 containing 'high' is returned.
-  *
-  * \only_for_vectors
-  *
-  * Example: \include DenseBase_LinSpaced_seq.cpp
-  * Output: \verbinclude DenseBase_LinSpaced_seq.out
-  *
-  * \sa setLinSpaced(Index,const Scalar&,const Scalar&), LinSpaced(Index,Scalar,Scalar), CwiseNullaryOp
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::SequentialLinSpacedReturnType
-DenseBase<Derived>::LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,false>(low,high,size));
-}
-
-/**
-  * \copydoc DenseBase::LinSpaced(Sequential_t, Index, const Scalar&, const Scalar&)
-  * Special version for fixed size types which does not require the size parameter.
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::SequentialLinSpacedReturnType
-DenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
-  return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,false>(low,high,Derived::SizeAtCompileTime));
-}
-
-/**
-  * \brief Sets a linearly space vector.
-  *
-  * The function generates 'size' equally spaced values in the closed interval [low,high].
-  * When size is set to 1, a vector of length 1 containing 'high' is returned.
-  *
-  * \only_for_vectors
-  *
-  * Example: \include DenseBase_LinSpaced.cpp
-  * Output: \verbinclude DenseBase_LinSpaced.out
-  *
-  * \sa setLinSpaced(Index,const Scalar&,const Scalar&), LinSpaced(Sequential_t,Index,const Scalar&,const Scalar&,Index), CwiseNullaryOp
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
-DenseBase<Derived>::LinSpaced(Index size, const Scalar& low, const Scalar& high)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,true>(low,high,size));
-}
-
-/**
-  * \copydoc DenseBase::LinSpaced(Index, const Scalar&, const Scalar&)
-  * Special version for fixed size types which does not require the size parameter.
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
-DenseBase<Derived>::LinSpaced(const Scalar& low, const Scalar& high)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
-  return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,true>(low,high,Derived::SizeAtCompileTime));
-}
-
-/** \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */
-template<typename Derived>
-bool DenseBase<Derived>::isApproxToConstant
-(const Scalar& val, const RealScalar& prec) const
-{
-  for(Index j = 0; j < cols(); ++j)
-    for(Index i = 0; i < rows(); ++i)
-      if(!internal::isApprox(this->coeff(i, j), val, prec))
-        return false;
-  return true;
-}
-
-/** This is just an alias for isApproxToConstant().
-  *
-  * \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */
-template<typename Derived>
-bool DenseBase<Derived>::isConstant
-(const Scalar& val, const RealScalar& prec) const
-{
-  return isApproxToConstant(val, prec);
-}
-
-/** Alias for setConstant(): sets all coefficients in this expression to \a value.
-  *
-  * \sa setConstant(), Constant(), class CwiseNullaryOp
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE void DenseBase<Derived>::fill(const Scalar& val)
-{
-  setConstant(val);
-}
-
-/** Sets all coefficients in this expression to \a value.
-  *
-  * \sa fill(), setConstant(Index,const Scalar&), setConstant(Index,Index,const Scalar&), setZero(), setOnes(), Constant(), class CwiseNullaryOp, setZero(), setOnes()
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setConstant(const Scalar& val)
-{
-  return derived() = Constant(rows(), cols(), val);
-}
-
-/** Resizes to the given \a size, and sets all coefficients in this expression to the given \a value.
-  *
-  * \only_for_vectors
-  *
-  * Example: \include Matrix_setConstant_int.cpp
-  * Output: \verbinclude Matrix_setConstant_int.out
-  *
-  * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE Derived&
-PlainObjectBase<Derived>::setConstant(Index size, const Scalar& val)
-{
-  resize(size);
-  return setConstant(val);
-}
-
-/** Resizes to the given size, and sets all coefficients in this expression to the given \a value.
-  *
-  * \param rows the new number of rows
-  * \param cols the new number of columns
-  * \param value the value to which all coefficients are set
-  *
-  * Example: \include Matrix_setConstant_int_int.cpp
-  * Output: \verbinclude Matrix_setConstant_int_int.out
-  *
-  * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE Derived&
-PlainObjectBase<Derived>::setConstant(Index nbRows, Index nbCols, const Scalar& val)
-{
-  resize(nbRows, nbCols);
-  return setConstant(val);
-}
-
-/**
-  * \brief Sets a linearly space vector.
-  *
-  * The function generates 'size' equally spaced values in the closed interval [low,high].
-  * When size is set to 1, a vector of length 1 containing 'high' is returned.
-  *
-  * \only_for_vectors
-  *
-  * Example: \include DenseBase_setLinSpaced.cpp
-  * Output: \verbinclude DenseBase_setLinSpaced.out
-  *
-  * \sa CwiseNullaryOp
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(Index newSize, const Scalar& low, const Scalar& high)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return derived() = Derived::NullaryExpr(newSize, internal::linspaced_op<Scalar,false>(low,high,newSize));
-}
-
-/**
-  * \brief Sets a linearly space vector.
-  *
-  * The function fill *this with equally spaced values in the closed interval [low,high].
-  * When size is set to 1, a vector of length 1 containing 'high' is returned.
-  *
-  * \only_for_vectors
-  *
-  * \sa setLinSpaced(Index, const Scalar&, const Scalar&), CwiseNullaryOp
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(const Scalar& low, const Scalar& high)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return setLinSpaced(size(), low, high);
-}
-
-// zero:
-
-/** \returns an expression of a zero matrix.
-  *
-  * The parameters \a rows and \a cols are the number of rows and of columns of
-  * the returned matrix. Must be compatible with this MatrixBase type.
-  *
-  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
-  * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used
-  * instead.
-  *
-  * Example: \include MatrixBase_zero_int_int.cpp
-  * Output: \verbinclude MatrixBase_zero_int_int.out
-  *
-  * \sa Zero(), Zero(Index)
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
-DenseBase<Derived>::Zero(Index nbRows, Index nbCols)
-{
-  return Constant(nbRows, nbCols, Scalar(0));
-}
-
-/** \returns an expression of a zero vector.
-  *
-  * The parameter \a size is the size of the returned vector.
-  * Must be compatible with this MatrixBase type.
-  *
-  * \only_for_vectors
-  *
-  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
-  * it is redundant to pass \a size as argument, so Zero() should be used
-  * instead.
-  *
-  * Example: \include MatrixBase_zero_int.cpp
-  * Output: \verbinclude MatrixBase_zero_int.out
-  *
-  * \sa Zero(), Zero(Index,Index)
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
-DenseBase<Derived>::Zero(Index size)
-{
-  return Constant(size, Scalar(0));
-}
-
-/** \returns an expression of a fixed-size zero matrix or vector.
-  *
-  * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
-  * need to use the variants taking size arguments.
-  *
-  * Example: \include MatrixBase_zero.cpp
-  * Output: \verbinclude MatrixBase_zero.out
-  *
-  * \sa Zero(Index), Zero(Index,Index)
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
-DenseBase<Derived>::Zero()
-{
-  return Constant(Scalar(0));
-}
-
-/** \returns true if *this is approximately equal to the zero matrix,
-  *          within the precision given by \a prec.
-  *
-  * Example: \include MatrixBase_isZero.cpp
-  * Output: \verbinclude MatrixBase_isZero.out
-  *
-  * \sa class CwiseNullaryOp, Zero()
-  */
-template<typename Derived>
-bool DenseBase<Derived>::isZero(const RealScalar& prec) const
-{
-  for(Index j = 0; j < cols(); ++j)
-    for(Index i = 0; i < rows(); ++i)
-      if(!internal::isMuchSmallerThan(this->coeff(i, j), static_cast<Scalar>(1), prec))
-        return false;
-  return true;
-}
-
-/** Sets all coefficients in this expression to zero.
-  *
-  * Example: \include MatrixBase_setZero.cpp
-  * Output: \verbinclude MatrixBase_setZero.out
-  *
-  * \sa class CwiseNullaryOp, Zero()
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setZero()
-{
-  return setConstant(Scalar(0));
-}
-
-/** Resizes to the given \a size, and sets all coefficients in this expression to zero.
-  *
-  * \only_for_vectors
-  *
-  * Example: \include Matrix_setZero_int.cpp
-  * Output: \verbinclude Matrix_setZero_int.out
-  *
-  * \sa DenseBase::setZero(), setZero(Index,Index), class CwiseNullaryOp, DenseBase::Zero()
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE Derived&
-PlainObjectBase<Derived>::setZero(Index newSize)
-{
-  resize(newSize);
-  return setConstant(Scalar(0));
-}
-
-/** Resizes to the given size, and sets all coefficients in this expression to zero.
-  *
-  * \param rows the new number of rows
-  * \param cols the new number of columns
-  *
-  * Example: \include Matrix_setZero_int_int.cpp
-  * Output: \verbinclude Matrix_setZero_int_int.out
-  *
-  * \sa DenseBase::setZero(), setZero(Index), class CwiseNullaryOp, DenseBase::Zero()
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE Derived&
-PlainObjectBase<Derived>::setZero(Index nbRows, Index nbCols)
-{
-  resize(nbRows, nbCols);
-  return setConstant(Scalar(0));
-}
-
-// ones:
-
-/** \returns an expression of a matrix where all coefficients equal one.
-  *
-  * The parameters \a rows and \a cols are the number of rows and of columns of
-  * the returned matrix. Must be compatible with this MatrixBase type.
-  *
-  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
-  * it is redundant to pass \a rows and \a cols as arguments, so Ones() should be used
-  * instead.
-  *
-  * Example: \include MatrixBase_ones_int_int.cpp
-  * Output: \verbinclude MatrixBase_ones_int_int.out
-  *
-  * \sa Ones(), Ones(Index), isOnes(), class Ones
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
-DenseBase<Derived>::Ones(Index nbRows, Index nbCols)
-{
-  return Constant(nbRows, nbCols, Scalar(1));
-}
-
-/** \returns an expression of a vector where all coefficients equal one.
-  *
-  * The parameter \a size is the size of the returned vector.
-  * Must be compatible with this MatrixBase type.
-  *
-  * \only_for_vectors
-  *
-  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
-  * it is redundant to pass \a size as argument, so Ones() should be used
-  * instead.
-  *
-  * Example: \include MatrixBase_ones_int.cpp
-  * Output: \verbinclude MatrixBase_ones_int.out
-  *
-  * \sa Ones(), Ones(Index,Index), isOnes(), class Ones
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
-DenseBase<Derived>::Ones(Index newSize)
-{
-  return Constant(newSize, Scalar(1));
-}
-
-/** \returns an expression of a fixed-size matrix or vector where all coefficients equal one.
-  *
-  * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
-  * need to use the variants taking size arguments.
-  *
-  * Example: \include MatrixBase_ones.cpp
-  * Output: \verbinclude MatrixBase_ones.out
-  *
-  * \sa Ones(Index), Ones(Index,Index), isOnes(), class Ones
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
-DenseBase<Derived>::Ones()
-{
-  return Constant(Scalar(1));
-}
-
-/** \returns true if *this is approximately equal to the matrix where all coefficients
-  *          are equal to 1, within the precision given by \a prec.
-  *
-  * Example: \include MatrixBase_isOnes.cpp
-  * Output: \verbinclude MatrixBase_isOnes.out
-  *
-  * \sa class CwiseNullaryOp, Ones()
-  */
-template<typename Derived>
-bool DenseBase<Derived>::isOnes
-(const RealScalar& prec) const
-{
-  return isApproxToConstant(Scalar(1), prec);
-}
-
-/** Sets all coefficients in this expression to one.
-  *
-  * Example: \include MatrixBase_setOnes.cpp
-  * Output: \verbinclude MatrixBase_setOnes.out
-  *
-  * \sa class CwiseNullaryOp, Ones()
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setOnes()
-{
-  return setConstant(Scalar(1));
-}
-
-/** Resizes to the given \a size, and sets all coefficients in this expression to one.
-  *
-  * \only_for_vectors
-  *
-  * Example: \include Matrix_setOnes_int.cpp
-  * Output: \verbinclude Matrix_setOnes_int.out
-  *
-  * \sa MatrixBase::setOnes(), setOnes(Index,Index), class CwiseNullaryOp, MatrixBase::Ones()
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE Derived&
-PlainObjectBase<Derived>::setOnes(Index newSize)
-{
-  resize(newSize);
-  return setConstant(Scalar(1));
-}
-
-/** Resizes to the given size, and sets all coefficients in this expression to one.
-  *
-  * \param rows the new number of rows
-  * \param cols the new number of columns
-  *
-  * Example: \include Matrix_setOnes_int_int.cpp
-  * Output: \verbinclude Matrix_setOnes_int_int.out
-  *
-  * \sa MatrixBase::setOnes(), setOnes(Index), class CwiseNullaryOp, MatrixBase::Ones()
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE Derived&
-PlainObjectBase<Derived>::setOnes(Index nbRows, Index nbCols)
-{
-  resize(nbRows, nbCols);
-  return setConstant(Scalar(1));
-}
-
-// Identity:
-
-/** \returns an expression of the identity matrix (not necessarily square).
-  *
-  * The parameters \a rows and \a cols are the number of rows and of columns of
-  * the returned matrix. Must be compatible with this MatrixBase type.
-  *
-  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
-  * it is redundant to pass \a rows and \a cols as arguments, so Identity() should be used
-  * instead.
-  *
-  * Example: \include MatrixBase_identity_int_int.cpp
-  * Output: \verbinclude MatrixBase_identity_int_int.out
-  *
-  * \sa Identity(), setIdentity(), isIdentity()
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType
-MatrixBase<Derived>::Identity(Index nbRows, Index nbCols)
-{
-  return DenseBase<Derived>::NullaryExpr(nbRows, nbCols, internal::scalar_identity_op<Scalar>());
-}
-
-/** \returns an expression of the identity matrix (not necessarily square).
-  *
-  * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
-  * need to use the variant taking size arguments.
-  *
-  * Example: \include MatrixBase_identity.cpp
-  * Output: \verbinclude MatrixBase_identity.out
-  *
-  * \sa Identity(Index,Index), setIdentity(), isIdentity()
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType
-MatrixBase<Derived>::Identity()
-{
-  EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
-  return MatrixBase<Derived>::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_identity_op<Scalar>());
-}
-
-/** \returns true if *this is approximately equal to the identity matrix
-  *          (not necessarily square),
-  *          within the precision given by \a prec.
-  *
-  * Example: \include MatrixBase_isIdentity.cpp
-  * Output: \verbinclude MatrixBase_isIdentity.out
-  *
-  * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), setIdentity()
-  */
-template<typename Derived>
-bool MatrixBase<Derived>::isIdentity
-(const RealScalar& prec) const
-{
-  for(Index j = 0; j < cols(); ++j)
-  {
-    for(Index i = 0; i < rows(); ++i)
-    {
-      if(i == j)
-      {
-        if(!internal::isApprox(this->coeff(i, j), static_cast<Scalar>(1), prec))
-          return false;
-      }
-      else
-      {
-        if(!internal::isMuchSmallerThan(this->coeff(i, j), static_cast<RealScalar>(1), prec))
-          return false;
-      }
-    }
-  }
-  return true;
-}
-
-namespace internal {
-
-template<typename Derived, bool Big = (Derived::SizeAtCompileTime>=16)>
-struct setIdentity_impl
-{
-  static EIGEN_STRONG_INLINE Derived& run(Derived& m)
-  {
-    return m = Derived::Identity(m.rows(), m.cols());
-  }
-};
-
-template<typename Derived>
-struct setIdentity_impl<Derived, true>
-{
-  typedef typename Derived::Index Index;
-  static EIGEN_STRONG_INLINE Derived& run(Derived& m)
-  {
-    m.setZero();
-    const Index size = (std::min)(m.rows(), m.cols());
-    for(Index i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1);
-    return m;
-  }
-};
-
-} // end namespace internal
-
-/** Writes the identity expression (not necessarily square) into *this.
-  *
-  * Example: \include MatrixBase_setIdentity.cpp
-  * Output: \verbinclude MatrixBase_setIdentity.out
-  *
-  * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), isIdentity()
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity()
-{
-  return internal::setIdentity_impl<Derived>::run(derived());
-}
-
-/** \brief Resizes to the given size, and writes the identity expression (not necessarily square) into *this.
-  *
-  * \param rows the new number of rows
-  * \param cols the new number of columns
-  *
-  * Example: \include Matrix_setIdentity_int_int.cpp
-  * Output: \verbinclude Matrix_setIdentity_int_int.out
-  *
-  * \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity()
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity(Index nbRows, Index nbCols)
-{
-  derived().resize(nbRows, nbCols);
-  return setIdentity();
-}
-
-/** \returns an expression of the i-th unit (basis) vector.
-  *
-  * \only_for_vectors
-  *
-  * \sa MatrixBase::Unit(Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index newSize, Index i)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return BasisReturnType(SquareMatrixType::Identity(newSize,newSize), i);
-}
-
-/** \returns an expression of the i-th unit (basis) vector.
-  *
-  * \only_for_vectors
-  *
-  * This variant is for fixed-size vector only.
-  *
-  * \sa MatrixBase::Unit(Index,Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index i)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return BasisReturnType(SquareMatrixType::Identity(),i);
-}
-
-/** \returns an expression of the X axis unit vector (1{,0}^*)
-  *
-  * \only_for_vectors
-  *
-  * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitX()
-{ return Derived::Unit(0); }
-
-/** \returns an expression of the Y axis unit vector (0,1{,0}^*)
-  *
-  * \only_for_vectors
-  *
-  * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitY()
-{ return Derived::Unit(1); }
-
-/** \returns an expression of the Z axis unit vector (0,0,1{,0}^*)
-  *
-  * \only_for_vectors
-  *
-  * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitZ()
-{ return Derived::Unit(2); }
-
-/** \returns an expression of the W axis unit vector (0,0,0,1)
-  *
-  * \only_for_vectors
-  *
-  * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitW()
-{ return Derived::Unit(3); }
-
-} // end namespace Eigen
-
-#endif // EIGEN_CWISE_NULLARY_OP_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/CwiseUnaryOp.h b/resources/3rdparty/eigen/Eigen/src/Core/CwiseUnaryOp.h
deleted file mode 100644
index f2de749f9..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/CwiseUnaryOp.h
+++ /dev/null
@@ -1,126 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_CWISE_UNARY_OP_H
-#define EIGEN_CWISE_UNARY_OP_H
-
-namespace Eigen { 
-
-/** \class CwiseUnaryOp
-  * \ingroup Core_Module
-  *
-  * \brief Generic expression where a coefficient-wise unary operator is applied to an expression
-  *
-  * \param UnaryOp template functor implementing the operator
-  * \param XprType the type of the expression to which we are applying the unary operator
-  *
-  * This class represents an expression where a unary operator is applied to an expression.
-  * It is the return type of all operations taking exactly 1 input expression, regardless of the
-  * presence of other inputs such as scalars. For example, the operator* in the expression 3*matrix
-  * is considered unary, because only the right-hand side is an expression, and its
-  * return type is a specialization of CwiseUnaryOp.
-  *
-  * Most of the time, this is the only way that it is used, so you typically don't have to name
-  * CwiseUnaryOp types explicitly.
-  *
-  * \sa MatrixBase::unaryExpr(const CustomUnaryOp &) const, class CwiseBinaryOp, class CwiseNullaryOp
-  */
-
-namespace internal {
-template<typename UnaryOp, typename XprType>
-struct traits<CwiseUnaryOp<UnaryOp, XprType> >
- : traits<XprType>
-{
-  typedef typename result_of<
-                     UnaryOp(typename XprType::Scalar)
-                   >::type Scalar;
-  typedef typename XprType::Nested XprTypeNested;
-  typedef typename remove_reference<XprTypeNested>::type _XprTypeNested;
-  enum {
-    Flags = _XprTypeNested::Flags & (
-      HereditaryBits | LinearAccessBit | AlignedBit
-      | (functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0)),
-    CoeffReadCost = _XprTypeNested::CoeffReadCost + functor_traits<UnaryOp>::Cost
-  };
-};
-}
-
-template<typename UnaryOp, typename XprType, typename StorageKind>
-class CwiseUnaryOpImpl;
-
-template<typename UnaryOp, typename XprType>
-class CwiseUnaryOp : internal::no_assignment_operator,
-  public CwiseUnaryOpImpl<UnaryOp, XprType, typename internal::traits<XprType>::StorageKind>
-{
-  public:
-
-    typedef typename CwiseUnaryOpImpl<UnaryOp, XprType,typename internal::traits<XprType>::StorageKind>::Base Base;
-    EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryOp)
-
-    inline CwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp())
-      : m_xpr(xpr), m_functor(func) {}
-
-    EIGEN_STRONG_INLINE Index rows() const { return m_xpr.rows(); }
-    EIGEN_STRONG_INLINE Index cols() const { return m_xpr.cols(); }
-
-    /** \returns the functor representing the unary operation */
-    const UnaryOp& functor() const { return m_functor; }
-
-    /** \returns the nested expression */
-    const typename internal::remove_all<typename XprType::Nested>::type&
-    nestedExpression() const { return m_xpr; }
-
-    /** \returns the nested expression */
-    typename internal::remove_all<typename XprType::Nested>::type&
-    nestedExpression() { return m_xpr.const_cast_derived(); }
-
-  protected:
-    typename XprType::Nested m_xpr;
-    const UnaryOp m_functor;
-};
-
-// This is the generic implementation for dense storage.
-// It can be used for any expression types implementing the dense concept.
-template<typename UnaryOp, typename XprType>
-class CwiseUnaryOpImpl<UnaryOp,XprType,Dense>
-  : public internal::dense_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type
-{
-  public:
-
-    typedef CwiseUnaryOp<UnaryOp, XprType> Derived;
-    typedef typename internal::dense_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type Base;
-    EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
-
-    EIGEN_STRONG_INLINE const Scalar coeff(Index rowId, Index colId) const
-    {
-      return derived().functor()(derived().nestedExpression().coeff(rowId, colId));
-    }
-
-    template<int LoadMode>
-    EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const
-    {
-      return derived().functor().packetOp(derived().nestedExpression().template packet<LoadMode>(rowId, colId));
-    }
-
-    EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
-    {
-      return derived().functor()(derived().nestedExpression().coeff(index));
-    }
-
-    template<int LoadMode>
-    EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
-    {
-      return derived().functor().packetOp(derived().nestedExpression().template packet<LoadMode>(index));
-    }
-};
-
-} // end namespace Eigen
-
-#endif // EIGEN_CWISE_UNARY_OP_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/DenseBase.h b/resources/3rdparty/eigen/Eigen/src/Core/DenseBase.h
deleted file mode 100644
index 8dc593174..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/DenseBase.h
+++ /dev/null
@@ -1,533 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_DENSEBASE_H
-#define EIGEN_DENSEBASE_H
-
-namespace Eigen {
-
-/** \class DenseBase
-  * \ingroup Core_Module
-  *
-  * \brief Base class for all dense matrices, vectors, and arrays
-  *
-  * This class is the base that is inherited by all dense objects (matrix, vector, arrays,
-  * and related expression types). The common Eigen API for dense objects is contained in this class.
-  *
-  * \tparam Derived is the derived type, e.g., a matrix type or an expression.
-  *
-  * This class can be extended with the help of the plugin mechanism described on the page
-  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_DENSEBASE_PLUGIN.
-  *
-  * \sa \ref TopicClassHierarchy
-  */
-template<typename Derived> class DenseBase
-#ifndef EIGEN_PARSED_BY_DOXYGEN
-  : public internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
-                                     typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>
-#else
-  : public DenseCoeffsBase<Derived>
-#endif // not EIGEN_PARSED_BY_DOXYGEN
-{
-  public:
-    using internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
-                typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>::operator*;
-
-    class InnerIterator;
-
-    typedef typename internal::traits<Derived>::StorageKind StorageKind;
-
-    /** \brief The type of indices 
-      * \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE.
-      * \sa \ref TopicPreprocessorDirectives.
-      */
-    typedef typename internal::traits<Derived>::Index Index; 
-
-    typedef typename internal::traits<Derived>::Scalar Scalar;
-    typedef typename internal::packet_traits<Scalar>::type PacketScalar;
-    typedef typename NumTraits<Scalar>::Real RealScalar;
-
-    typedef DenseCoeffsBase<Derived> Base;
-    using Base::derived;
-    using Base::const_cast_derived;
-    using Base::rows;
-    using Base::cols;
-    using Base::size;
-    using Base::rowIndexByOuterInner;
-    using Base::colIndexByOuterInner;
-    using Base::coeff;
-    using Base::coeffByOuterInner;
-    using Base::packet;
-    using Base::packetByOuterInner;
-    using Base::writePacket;
-    using Base::writePacketByOuterInner;
-    using Base::coeffRef;
-    using Base::coeffRefByOuterInner;
-    using Base::copyCoeff;
-    using Base::copyCoeffByOuterInner;
-    using Base::copyPacket;
-    using Base::copyPacketByOuterInner;
-    using Base::operator();
-    using Base::operator[];
-    using Base::x;
-    using Base::y;
-    using Base::z;
-    using Base::w;
-    using Base::stride;
-    using Base::innerStride;
-    using Base::outerStride;
-    using Base::rowStride;
-    using Base::colStride;
-    typedef typename Base::CoeffReturnType CoeffReturnType;
-
-    enum {
-
-      RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
-        /**< The number of rows at compile-time. This is just a copy of the value provided
-          * by the \a Derived type. If a value is not known at compile-time,
-          * it is set to the \a Dynamic constant.
-          * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */
-
-      ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
-        /**< The number of columns at compile-time. This is just a copy of the value provided
-          * by the \a Derived type. If a value is not known at compile-time,
-          * it is set to the \a Dynamic constant.
-          * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
-
-
-      SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
-                                                   internal::traits<Derived>::ColsAtCompileTime>::ret),
-        /**< This is equal to the number of coefficients, i.e. the number of
-          * rows times the number of columns, or to \a Dynamic if this is not
-          * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
-
-      MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,
-        /**< This value is equal to the maximum possible number of rows that this expression
-          * might have. If this expression might have an arbitrarily high number of rows,
-          * this value is set to \a Dynamic.
-          *
-          * This value is useful to know when evaluating an expression, in order to determine
-          * whether it is possible to avoid doing a dynamic memory allocation.
-          *
-          * \sa RowsAtCompileTime, MaxColsAtCompileTime, MaxSizeAtCompileTime
-          */
-
-      MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime,
-        /**< This value is equal to the maximum possible number of columns that this expression
-          * might have. If this expression might have an arbitrarily high number of columns,
-          * this value is set to \a Dynamic.
-          *
-          * This value is useful to know when evaluating an expression, in order to determine
-          * whether it is possible to avoid doing a dynamic memory allocation.
-          *
-          * \sa ColsAtCompileTime, MaxRowsAtCompileTime, MaxSizeAtCompileTime
-          */
-
-      MaxSizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::MaxRowsAtCompileTime,
-                                                      internal::traits<Derived>::MaxColsAtCompileTime>::ret),
-        /**< This value is equal to the maximum possible number of coefficients that this expression
-          * might have. If this expression might have an arbitrarily high number of coefficients,
-          * this value is set to \a Dynamic.
-          *
-          * This value is useful to know when evaluating an expression, in order to determine
-          * whether it is possible to avoid doing a dynamic memory allocation.
-          *
-          * \sa SizeAtCompileTime, MaxRowsAtCompileTime, MaxColsAtCompileTime
-          */
-
-      IsVectorAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime == 1
-                           || internal::traits<Derived>::MaxColsAtCompileTime == 1,
-        /**< This is set to true if either the number of rows or the number of
-          * columns is known at compile-time to be equal to 1. Indeed, in that case,
-          * we are dealing with a column-vector (if there is only one column) or with
-          * a row-vector (if there is only one row). */
-
-      Flags = internal::traits<Derived>::Flags,
-        /**< This stores expression \ref flags flags which may or may not be inherited by new expressions
-          * constructed from this one. See the \ref flags "list of flags".
-          */
-
-      IsRowMajor = int(Flags) & RowMajorBit, /**< True if this expression has row-major storage order. */
-
-      InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime)
-                             : int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
-
-      CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
-        /**< This is a rough measure of how expensive it is to read one coefficient from
-          * this expression.
-          */
-
-      InnerStrideAtCompileTime = internal::inner_stride_at_compile_time<Derived>::ret,
-      OuterStrideAtCompileTime = internal::outer_stride_at_compile_time<Derived>::ret
-    };
-
-    enum { ThisConstantIsPrivateInPlainObjectBase };
-
-    /** \returns the number of nonzero coefficients which is in practice the number
-      * of stored coefficients. */
-    inline Index nonZeros() const { return size(); }
-    /** \returns true if either the number of rows or the number of columns is equal to 1.
-      * In other words, this function returns
-      * \code rows()==1 || cols()==1 \endcode
-      * \sa rows(), cols(), IsVectorAtCompileTime. */
-
-    /** \returns the outer size.
-      *
-      * \note For a vector, this returns just 1. For a matrix (non-vector), this is the major dimension
-      * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of columns for a
-      * column-major matrix, and the number of rows for a row-major matrix. */
-    Index outerSize() const
-    {
-      return IsVectorAtCompileTime ? 1
-           : int(IsRowMajor) ? this->rows() : this->cols();
-    }
-
-    /** \returns the inner size.
-      *
-      * \note For a vector, this is just the size. For a matrix (non-vector), this is the minor dimension
-      * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of rows for a 
-      * column-major matrix, and the number of columns for a row-major matrix. */
-    Index innerSize() const
-    {
-      return IsVectorAtCompileTime ? this->size()
-           : int(IsRowMajor) ? this->cols() : this->rows();
-    }
-
-    /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are
-      * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does
-      * nothing else.
-      */
-    void resize(Index newSize)
-    {
-      EIGEN_ONLY_USED_FOR_DEBUG(newSize);
-      eigen_assert(newSize == this->size()
-                && "DenseBase::resize() does not actually allow to resize.");
-    }
-    /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are
-      * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does
-      * nothing else.
-      */
-    void resize(Index nbRows, Index nbCols)
-    {
-      EIGEN_ONLY_USED_FOR_DEBUG(nbRows);
-      EIGEN_ONLY_USED_FOR_DEBUG(nbCols);
-      eigen_assert(nbRows == this->rows() && nbCols == this->cols()
-                && "DenseBase::resize() does not actually allow to resize.");
-    }
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN
-
-    /** \internal Represents a matrix with all coefficients equal to one another*/
-    typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Derived> ConstantReturnType;
-    /** \internal Represents a vector with linearly spaced coefficients that allows sequential access only. */
-    typedef CwiseNullaryOp<internal::linspaced_op<Scalar,false>,Derived> SequentialLinSpacedReturnType;
-    /** \internal Represents a vector with linearly spaced coefficients that allows random access. */
-    typedef CwiseNullaryOp<internal::linspaced_op<Scalar,true>,Derived> RandomAccessLinSpacedReturnType;
-    /** \internal the return type of MatrixBase::eigenvalues() */
-    typedef Matrix<typename NumTraits<typename internal::traits<Derived>::Scalar>::Real, internal::traits<Derived>::ColsAtCompileTime, 1> EigenvaluesReturnType;
-
-#endif // not EIGEN_PARSED_BY_DOXYGEN
-
-    /** Copies \a other into *this. \returns a reference to *this. */
-    template<typename OtherDerived>
-    Derived& operator=(const DenseBase<OtherDerived>& other);
-
-    /** Special case of the template operator=, in order to prevent the compiler
-      * from generating a default operator= (issue hit with g++ 4.1)
-      */
-    Derived& operator=(const DenseBase& other);
-
-    template<typename OtherDerived>
-    Derived& operator=(const EigenBase<OtherDerived> &other);
-
-    template<typename OtherDerived>
-    Derived& operator+=(const EigenBase<OtherDerived> &other);
-
-    template<typename OtherDerived>
-    Derived& operator-=(const EigenBase<OtherDerived> &other);
-
-    template<typename OtherDerived>
-    Derived& operator=(const ReturnByValue<OtherDerived>& func);
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN
-    /** Copies \a other into *this without evaluating other. \returns a reference to *this. */
-    template<typename OtherDerived>
-    Derived& lazyAssign(const DenseBase<OtherDerived>& other);
-#endif // not EIGEN_PARSED_BY_DOXYGEN
-
-    CommaInitializer<Derived> operator<< (const Scalar& s);
-
-    template<unsigned int Added,unsigned int Removed>
-    const Flagged<Derived, Added, Removed> flagged() const;
-
-    template<typename OtherDerived>
-    CommaInitializer<Derived> operator<< (const DenseBase<OtherDerived>& other);
-
-    Eigen::Transpose<Derived> transpose();
-    typedef const Transpose<const Derived> ConstTransposeReturnType;
-    ConstTransposeReturnType transpose() const;
-    void transposeInPlace();
-#ifndef EIGEN_NO_DEBUG
-  protected:
-    template<typename OtherDerived>
-    void checkTransposeAliasing(const OtherDerived& other) const;
-  public:
-#endif
-
-    typedef VectorBlock<Derived> SegmentReturnType;
-    typedef const VectorBlock<const Derived> ConstSegmentReturnType;
-    template<int Size> struct FixedSegmentReturnType { typedef VectorBlock<Derived, Size> Type; };
-    template<int Size> struct ConstFixedSegmentReturnType { typedef const VectorBlock<const Derived, Size> Type; };
-    
-    // Note: The "DenseBase::" prefixes are added to help MSVC9 to match these declarations with the later implementations.
-    SegmentReturnType segment(Index start, Index size);
-    typename DenseBase::ConstSegmentReturnType segment(Index start, Index size) const;
-
-    SegmentReturnType head(Index size);
-    typename DenseBase::ConstSegmentReturnType head(Index size) const;
-
-    SegmentReturnType tail(Index size);
-    typename DenseBase::ConstSegmentReturnType tail(Index size) const;
-
-    template<int Size> typename FixedSegmentReturnType<Size>::Type head();
-    template<int Size> typename ConstFixedSegmentReturnType<Size>::Type head() const;
-
-    template<int Size> typename FixedSegmentReturnType<Size>::Type tail();
-    template<int Size> typename ConstFixedSegmentReturnType<Size>::Type tail() const;
-
-    template<int Size> typename FixedSegmentReturnType<Size>::Type segment(Index start);
-    template<int Size> typename ConstFixedSegmentReturnType<Size>::Type segment(Index start) const;
-
-    static const ConstantReturnType
-    Constant(Index rows, Index cols, const Scalar& value);
-    static const ConstantReturnType
-    Constant(Index size, const Scalar& value);
-    static const ConstantReturnType
-    Constant(const Scalar& value);
-
-    static const SequentialLinSpacedReturnType
-    LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high);
-    static const RandomAccessLinSpacedReturnType
-    LinSpaced(Index size, const Scalar& low, const Scalar& high);
-    static const SequentialLinSpacedReturnType
-    LinSpaced(Sequential_t, const Scalar& low, const Scalar& high);
-    static const RandomAccessLinSpacedReturnType
-    LinSpaced(const Scalar& low, const Scalar& high);
-
-    template<typename CustomNullaryOp>
-    static const CwiseNullaryOp<CustomNullaryOp, Derived>
-    NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func);
-    template<typename CustomNullaryOp>
-    static const CwiseNullaryOp<CustomNullaryOp, Derived>
-    NullaryExpr(Index size, const CustomNullaryOp& func);
-    template<typename CustomNullaryOp>
-    static const CwiseNullaryOp<CustomNullaryOp, Derived>
-    NullaryExpr(const CustomNullaryOp& func);
-
-    static const ConstantReturnType Zero(Index rows, Index cols);
-    static const ConstantReturnType Zero(Index size);
-    static const ConstantReturnType Zero();
-    static const ConstantReturnType Ones(Index rows, Index cols);
-    static const ConstantReturnType Ones(Index size);
-    static const ConstantReturnType Ones();
-
-    void fill(const Scalar& value);
-    Derived& setConstant(const Scalar& value);
-    Derived& setLinSpaced(Index size, const Scalar& low, const Scalar& high);
-    Derived& setLinSpaced(const Scalar& low, const Scalar& high);
-    Derived& setZero();
-    Derived& setOnes();
-    Derived& setRandom();
-
-    template<typename OtherDerived>
-    bool isApprox(const DenseBase<OtherDerived>& other,
-                  const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
-    bool isMuchSmallerThan(const RealScalar& other,
-                           const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
-    template<typename OtherDerived>
-    bool isMuchSmallerThan(const DenseBase<OtherDerived>& other,
-                           const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
-
-    bool isApproxToConstant(const Scalar& value, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
-    bool isConstant(const Scalar& value, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
-    bool isZero(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
-    bool isOnes(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
-
-    inline Derived& operator*=(const Scalar& other);
-    inline Derived& operator/=(const Scalar& other);
-
-    typedef typename internal::add_const_on_value_type<typename internal::eval<Derived>::type>::type EvalReturnType;
-    /** \returns the matrix or vector obtained by evaluating this expression.
-      *
-      * Notice that in the case of a plain matrix or vector (not an expression) this function just returns
-      * a const reference, in order to avoid a useless copy.
-      */
-    EIGEN_STRONG_INLINE EvalReturnType eval() const
-    {
-      // Even though MSVC does not honor strong inlining when the return type
-      // is a dynamic matrix, we desperately need strong inlining for fixed
-      // size types on MSVC.
-      return typename internal::eval<Derived>::type(derived());
-    }
-
-    /** swaps *this with the expression \a other.
-      *
-      */
-    template<typename OtherDerived>
-    void swap(const DenseBase<OtherDerived>& other,
-              int = OtherDerived::ThisConstantIsPrivateInPlainObjectBase)
-    {
-      SwapWrapper<Derived>(derived()).lazyAssign(other.derived());
-    }
-
-    /** swaps *this with the matrix or array \a other.
-      *
-      */
-    template<typename OtherDerived>
-    void swap(PlainObjectBase<OtherDerived>& other)
-    {
-      SwapWrapper<Derived>(derived()).lazyAssign(other.derived());
-    }
-
-
-    inline const NestByValue<Derived> nestByValue() const;
-    inline const ForceAlignedAccess<Derived> forceAlignedAccess() const;
-    inline ForceAlignedAccess<Derived> forceAlignedAccess();
-    template<bool Enable> inline const typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type forceAlignedAccessIf() const;
-    template<bool Enable> inline typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type forceAlignedAccessIf();
-
-    Scalar sum() const;
-    Scalar mean() const;
-    Scalar trace() const;
-
-    Scalar prod() const;
-
-    typename internal::traits<Derived>::Scalar minCoeff() const;
-    typename internal::traits<Derived>::Scalar maxCoeff() const;
-
-    template<typename IndexType>
-    typename internal::traits<Derived>::Scalar minCoeff(IndexType* row, IndexType* col) const;
-    template<typename IndexType>
-    typename internal::traits<Derived>::Scalar maxCoeff(IndexType* row, IndexType* col) const;
-    template<typename IndexType>
-    typename internal::traits<Derived>::Scalar minCoeff(IndexType* index) const;
-    template<typename IndexType>
-    typename internal::traits<Derived>::Scalar maxCoeff(IndexType* index) const;
-
-    template<typename BinaryOp>
-    typename internal::result_of<BinaryOp(typename internal::traits<Derived>::Scalar)>::type
-    redux(const BinaryOp& func) const;
-
-    template<typename Visitor>
-    void visit(Visitor& func) const;
-
-    inline const WithFormat<Derived> format(const IOFormat& fmt) const;
-
-    /** \returns the unique coefficient of a 1x1 expression */
-    CoeffReturnType value() const
-    {
-      EIGEN_STATIC_ASSERT_SIZE_1x1(Derived)
-      eigen_assert(this->rows() == 1 && this->cols() == 1);
-      return derived().coeff(0,0);
-    }
-
-/////////// Array module ///////////
-
-    bool all(void) const;
-    bool any(void) const;
-    Index count() const;
-
-    typedef VectorwiseOp<Derived, Horizontal> RowwiseReturnType;
-    typedef const VectorwiseOp<const Derived, Horizontal> ConstRowwiseReturnType;
-    typedef VectorwiseOp<Derived, Vertical> ColwiseReturnType;
-    typedef const VectorwiseOp<const Derived, Vertical> ConstColwiseReturnType;
-
-    ConstRowwiseReturnType rowwise() const;
-    RowwiseReturnType rowwise();
-    ConstColwiseReturnType colwise() const;
-    ColwiseReturnType colwise();
-
-    static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random(Index rows, Index cols);
-    static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random(Index size);
-    static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random();
-
-    template<typename ThenDerived,typename ElseDerived>
-    const Select<Derived,ThenDerived,ElseDerived>
-    select(const DenseBase<ThenDerived>& thenMatrix,
-           const DenseBase<ElseDerived>& elseMatrix) const;
-
-    template<typename ThenDerived>
-    inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>
-    select(const DenseBase<ThenDerived>& thenMatrix, typename ThenDerived::Scalar elseScalar) const;
-
-    template<typename ElseDerived>
-    inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >
-    select(typename ElseDerived::Scalar thenScalar, const DenseBase<ElseDerived>& elseMatrix) const;
-
-    template<int p> RealScalar lpNorm() const;
-
-    template<int RowFactor, int ColFactor>
-    const Replicate<Derived,RowFactor,ColFactor> replicate() const;
-    const Replicate<Derived,Dynamic,Dynamic> replicate(Index rowFacor,Index colFactor) const;
-
-    typedef Reverse<Derived, BothDirections> ReverseReturnType;
-    typedef const Reverse<const Derived, BothDirections> ConstReverseReturnType;
-    ReverseReturnType reverse();
-    ConstReverseReturnType reverse() const;
-    void reverseInPlace();
-
-#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::DenseBase
-#   include "../plugins/BlockMethods.h"
-#   ifdef EIGEN_DENSEBASE_PLUGIN
-#     include EIGEN_DENSEBASE_PLUGIN
-#   endif
-#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
-
-#ifdef EIGEN2_SUPPORT
-
-    Block<Derived> corner(CornerType type, Index cRows, Index cCols);
-    const Block<Derived> corner(CornerType type, Index cRows, Index cCols) const;
-    template<int CRows, int CCols>
-    Block<Derived, CRows, CCols> corner(CornerType type);
-    template<int CRows, int CCols>
-    const Block<Derived, CRows, CCols> corner(CornerType type) const;
-
-#endif // EIGEN2_SUPPORT
-
-
-    // disable the use of evalTo for dense objects with a nice compilation error
-    template<typename Dest> inline void evalTo(Dest& ) const
-    {
-      EIGEN_STATIC_ASSERT((internal::is_same<Dest,void>::value),THE_EVAL_EVALTO_FUNCTION_SHOULD_NEVER_BE_CALLED_FOR_DENSE_OBJECTS);
-    }
-
-  protected:
-    /** Default constructor. Do nothing. */
-    DenseBase()
-    {
-      /* Just checks for self-consistency of the flags.
-       * Only do it when debugging Eigen, as this borders on paranoiac and could slow compilation down
-       */
-#ifdef EIGEN_INTERNAL_DEBUGGING
-      EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, int(IsRowMajor))
-                        && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, int(!IsRowMajor))),
-                          INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION)
-#endif
-    }
-
-  private:
-    explicit DenseBase(int);
-    DenseBase(int,int);
-    template<typename OtherDerived> explicit DenseBase(const DenseBase<OtherDerived>&);
-};
-
-} // end namespace Eigen
-
-#endif // EIGEN_DENSEBASE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/DenseCoeffsBase.h b/resources/3rdparty/eigen/Eigen/src/Core/DenseCoeffsBase.h
deleted file mode 100644
index 3c890f215..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/DenseCoeffsBase.h
+++ /dev/null
@@ -1,754 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_DENSECOEFFSBASE_H
-#define EIGEN_DENSECOEFFSBASE_H
-
-namespace Eigen {
-
-namespace internal {
-template<typename T> struct add_const_on_value_type_if_arithmetic
-{
-  typedef typename conditional<is_arithmetic<T>::value, T, typename add_const_on_value_type<T>::type>::type type;
-};
-}
-
-/** \brief Base class providing read-only coefficient access to matrices and arrays.
-  * \ingroup Core_Module
-  * \tparam Derived Type of the derived class
-  * \tparam #ReadOnlyAccessors Constant indicating read-only access
-  *
-  * This class defines the \c operator() \c const function and friends, which can be used to read specific
-  * entries of a matrix or array.
-  * 
-  * \sa DenseCoeffsBase<Derived, WriteAccessors>, DenseCoeffsBase<Derived, DirectAccessors>,
-  *     \ref TopicClassHierarchy
-  */
-template<typename Derived>
-class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived>
-{
-  public:
-    typedef typename internal::traits<Derived>::StorageKind StorageKind;
-    typedef typename internal::traits<Derived>::Index Index;
-    typedef typename internal::traits<Derived>::Scalar Scalar;
-    typedef typename internal::packet_traits<Scalar>::type PacketScalar;
-
-    // Explanation for this CoeffReturnType typedef.
-    // - This is the return type of the coeff() method.
-    // - The LvalueBit means exactly that we can offer a coeffRef() method, which means exactly that we can get references
-    // to coeffs, which means exactly that we can have coeff() return a const reference (as opposed to returning a value).
-    // - The is_artihmetic check is required since "const int", "const double", etc. will cause warnings on some systems
-    // while the declaration of "const T", where T is a non arithmetic type does not. Always returning "const Scalar&" is
-    // not possible, since the underlying expressions might not offer a valid address the reference could be referring to.
-    typedef typename internal::conditional<bool(internal::traits<Derived>::Flags&LvalueBit),
-                         const Scalar&,
-                         typename internal::conditional<internal::is_arithmetic<Scalar>::value, Scalar, const Scalar>::type
-                     >::type CoeffReturnType;
-
-    typedef typename internal::add_const_on_value_type_if_arithmetic<
-                         typename internal::packet_traits<Scalar>::type
-                     >::type PacketReturnType;
-
-    typedef EigenBase<Derived> Base;
-    using Base::rows;
-    using Base::cols;
-    using Base::size;
-    using Base::derived;
-
-    EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner) const
-    {
-      return int(Derived::RowsAtCompileTime) == 1 ? 0
-          : int(Derived::ColsAtCompileTime) == 1 ? inner
-          : int(Derived::Flags)&RowMajorBit ? outer
-          : inner;
-    }
-
-    EIGEN_STRONG_INLINE Index colIndexByOuterInner(Index outer, Index inner) const
-    {
-      return int(Derived::ColsAtCompileTime) == 1 ? 0
-          : int(Derived::RowsAtCompileTime) == 1 ? inner
-          : int(Derived::Flags)&RowMajorBit ? inner
-          : outer;
-    }
-
-    /** Short version: don't use this function, use
-      * \link operator()(Index,Index) const \endlink instead.
-      *
-      * Long version: this function is similar to
-      * \link operator()(Index,Index) const \endlink, but without the assertion.
-      * Use this for limiting the performance cost of debugging code when doing
-      * repeated coefficient access. Only use this when it is guaranteed that the
-      * parameters \a row and \a col are in range.
-      *
-      * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
-      * function equivalent to \link operator()(Index,Index) const \endlink.
-      *
-      * \sa operator()(Index,Index) const, coeffRef(Index,Index), coeff(Index) const
-      */
-    EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const
-    {
-      eigen_internal_assert(row >= 0 && row < rows()
-                        && col >= 0 && col < cols());
-      return derived().coeff(row, col);
-    }
-
-    EIGEN_STRONG_INLINE CoeffReturnType coeffByOuterInner(Index outer, Index inner) const
-    {
-      return coeff(rowIndexByOuterInner(outer, inner),
-                   colIndexByOuterInner(outer, inner));
-    }
-
-    /** \returns the coefficient at given the given row and column.
-      *
-      * \sa operator()(Index,Index), operator[](Index)
-      */
-    EIGEN_STRONG_INLINE CoeffReturnType operator()(Index row, Index col) const
-    {
-      eigen_assert(row >= 0 && row < rows()
-          && col >= 0 && col < cols());
-      return derived().coeff(row, col);
-    }
-
-    /** Short version: don't use this function, use
-      * \link operator[](Index) const \endlink instead.
-      *
-      * Long version: this function is similar to
-      * \link operator[](Index) const \endlink, but without the assertion.
-      * Use this for limiting the performance cost of debugging code when doing
-      * repeated coefficient access. Only use this when it is guaranteed that the
-      * parameter \a index is in range.
-      *
-      * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
-      * function equivalent to \link operator[](Index) const \endlink.
-      *
-      * \sa operator[](Index) const, coeffRef(Index), coeff(Index,Index) const
-      */
-
-    EIGEN_STRONG_INLINE CoeffReturnType
-    coeff(Index index) const
-    {
-      eigen_internal_assert(index >= 0 && index < size());
-      return derived().coeff(index);
-    }
-
-
-    /** \returns the coefficient at given index.
-      *
-      * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
-      *
-      * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const,
-      * z() const, w() const
-      */
-
-    EIGEN_STRONG_INLINE CoeffReturnType
-    operator[](Index index) const
-    {
-      #ifndef EIGEN2_SUPPORT
-      EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime,
-                          THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD)
-      #endif
-      eigen_assert(index >= 0 && index < size());
-      return derived().coeff(index);
-    }
-
-    /** \returns the coefficient at given index.
-      *
-      * This is synonymous to operator[](Index) const.
-      *
-      * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
-      *
-      * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const,
-      * z() const, w() const
-      */
-
-    EIGEN_STRONG_INLINE CoeffReturnType
-    operator()(Index index) const
-    {
-      eigen_assert(index >= 0 && index < size());
-      return derived().coeff(index);
-    }
-
-    /** equivalent to operator[](0).  */
-
-    EIGEN_STRONG_INLINE CoeffReturnType
-    x() const { return (*this)[0]; }
-
-    /** equivalent to operator[](1).  */
-
-    EIGEN_STRONG_INLINE CoeffReturnType
-    y() const { return (*this)[1]; }
-
-    /** equivalent to operator[](2).  */
-
-    EIGEN_STRONG_INLINE CoeffReturnType
-    z() const { return (*this)[2]; }
-
-    /** equivalent to operator[](3).  */
-
-    EIGEN_STRONG_INLINE CoeffReturnType
-    w() const { return (*this)[3]; }
-
-    /** \internal
-      * \returns the packet of coefficients starting at the given row and column. It is your responsibility
-      * to ensure that a packet really starts there. This method is only available on expressions having the
-      * PacketAccessBit.
-      *
-      * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select
-      * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
-      * starting at an address which is a multiple of the packet size.
-      */
-
-    template<int LoadMode>
-    EIGEN_STRONG_INLINE PacketReturnType packet(Index row, Index col) const
-    {
-      eigen_internal_assert(row >= 0 && row < rows()
-                      && col >= 0 && col < cols());
-      return derived().template packet<LoadMode>(row,col);
-    }
-
-
-    /** \internal */
-    template<int LoadMode>
-    EIGEN_STRONG_INLINE PacketReturnType packetByOuterInner(Index outer, Index inner) const
-    {
-      return packet<LoadMode>(rowIndexByOuterInner(outer, inner),
-                              colIndexByOuterInner(outer, inner));
-    }
-
-    /** \internal
-      * \returns the packet of coefficients starting at the given index. It is your responsibility
-      * to ensure that a packet really starts there. This method is only available on expressions having the
-      * PacketAccessBit and the LinearAccessBit.
-      *
-      * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select
-      * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
-      * starting at an address which is a multiple of the packet size.
-      */
-
-    template<int LoadMode>
-    EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
-    {
-      eigen_internal_assert(index >= 0 && index < size());
-      return derived().template packet<LoadMode>(index);
-    }
-
-  protected:
-    // explanation: DenseBase is doing "using ..." on the methods from DenseCoeffsBase.
-    // But some methods are only available in the DirectAccess case.
-    // So we add dummy methods here with these names, so that "using... " doesn't fail.
-    // It's not private so that the child class DenseBase can access them, and it's not public
-    // either since it's an implementation detail, so has to be protected.
-    void coeffRef();
-    void coeffRefByOuterInner();
-    void writePacket();
-    void writePacketByOuterInner();
-    void copyCoeff();
-    void copyCoeffByOuterInner();
-    void copyPacket();
-    void copyPacketByOuterInner();
-    void stride();
-    void innerStride();
-    void outerStride();
-    void rowStride();
-    void colStride();
-};
-
-/** \brief Base class providing read/write coefficient access to matrices and arrays.
-  * \ingroup Core_Module
-  * \tparam Derived Type of the derived class
-  * \tparam #WriteAccessors Constant indicating read/write access
-  *
-  * This class defines the non-const \c operator() function and friends, which can be used to write specific
-  * entries of a matrix or array. This class inherits DenseCoeffsBase<Derived, ReadOnlyAccessors> which
-  * defines the const variant for reading specific entries.
-  * 
-  * \sa DenseCoeffsBase<Derived, DirectAccessors>, \ref TopicClassHierarchy
-  */
-template<typename Derived>
-class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived, ReadOnlyAccessors>
-{
-  public:
-
-    typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base;
-
-    typedef typename internal::traits<Derived>::StorageKind StorageKind;
-    typedef typename internal::traits<Derived>::Index Index;
-    typedef typename internal::traits<Derived>::Scalar Scalar;
-    typedef typename internal::packet_traits<Scalar>::type PacketScalar;
-    typedef typename NumTraits<Scalar>::Real RealScalar;
-
-    using Base::coeff;
-    using Base::rows;
-    using Base::cols;
-    using Base::size;
-    using Base::derived;
-    using Base::rowIndexByOuterInner;
-    using Base::colIndexByOuterInner;
-    using Base::operator[];
-    using Base::operator();
-    using Base::x;
-    using Base::y;
-    using Base::z;
-    using Base::w;
-
-    /** Short version: don't use this function, use
-      * \link operator()(Index,Index) \endlink instead.
-      *
-      * Long version: this function is similar to
-      * \link operator()(Index,Index) \endlink, but without the assertion.
-      * Use this for limiting the performance cost of debugging code when doing
-      * repeated coefficient access. Only use this when it is guaranteed that the
-      * parameters \a row and \a col are in range.
-      *
-      * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
-      * function equivalent to \link operator()(Index,Index) \endlink.
-      *
-      * \sa operator()(Index,Index), coeff(Index, Index) const, coeffRef(Index)
-      */
-    EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col)
-    {
-      eigen_internal_assert(row >= 0 && row < rows()
-                        && col >= 0 && col < cols());
-      return derived().coeffRef(row, col);
-    }
-
-    EIGEN_STRONG_INLINE Scalar&
-    coeffRefByOuterInner(Index outer, Index inner)
-    {
-      return coeffRef(rowIndexByOuterInner(outer, inner),
-                      colIndexByOuterInner(outer, inner));
-    }
-
-    /** \returns a reference to the coefficient at given the given row and column.
-      *
-      * \sa operator[](Index)
-      */
-
-    EIGEN_STRONG_INLINE Scalar&
-    operator()(Index row, Index col)
-    {
-      eigen_assert(row >= 0 && row < rows()
-          && col >= 0 && col < cols());
-      return derived().coeffRef(row, col);
-    }
-
-
-    /** Short version: don't use this function, use
-      * \link operator[](Index) \endlink instead.
-      *
-      * Long version: this function is similar to
-      * \link operator[](Index) \endlink, but without the assertion.
-      * Use this for limiting the performance cost of debugging code when doing
-      * repeated coefficient access. Only use this when it is guaranteed that the
-      * parameters \a row and \a col are in range.
-      *
-      * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
-      * function equivalent to \link operator[](Index) \endlink.
-      *
-      * \sa operator[](Index), coeff(Index) const, coeffRef(Index,Index)
-      */
-
-    EIGEN_STRONG_INLINE Scalar&
-    coeffRef(Index index)
-    {
-      eigen_internal_assert(index >= 0 && index < size());
-      return derived().coeffRef(index);
-    }
-
-    /** \returns a reference to the coefficient at given index.
-      *
-      * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
-      *
-      * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w()
-      */
-
-    EIGEN_STRONG_INLINE Scalar&
-    operator[](Index index)
-    {
-      #ifndef EIGEN2_SUPPORT
-      EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime,
-                          THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD)
-      #endif
-      eigen_assert(index >= 0 && index < size());
-      return derived().coeffRef(index);
-    }
-
-    /** \returns a reference to the coefficient at given index.
-      *
-      * This is synonymous to operator[](Index).
-      *
-      * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
-      *
-      * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w()
-      */
-
-    EIGEN_STRONG_INLINE Scalar&
-    operator()(Index index)
-    {
-      eigen_assert(index >= 0 && index < size());
-      return derived().coeffRef(index);
-    }
-
-    /** equivalent to operator[](0).  */
-
-    EIGEN_STRONG_INLINE Scalar&
-    x() { return (*this)[0]; }
-
-    /** equivalent to operator[](1).  */
-
-    EIGEN_STRONG_INLINE Scalar&
-    y() { return (*this)[1]; }
-
-    /** equivalent to operator[](2).  */
-
-    EIGEN_STRONG_INLINE Scalar&
-    z() { return (*this)[2]; }
-
-    /** equivalent to operator[](3).  */
-
-    EIGEN_STRONG_INLINE Scalar&
-    w() { return (*this)[3]; }
-
-    /** \internal
-      * Stores the given packet of coefficients, at the given row and column of this expression. It is your responsibility
-      * to ensure that a packet really starts there. This method is only available on expressions having the
-      * PacketAccessBit.
-      *
-      * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select
-      * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
-      * starting at an address which is a multiple of the packet size.
-      */
-
-    template<int StoreMode>
-    EIGEN_STRONG_INLINE void writePacket
-    (Index row, Index col, const typename internal::packet_traits<Scalar>::type& val)
-    {
-      eigen_internal_assert(row >= 0 && row < rows()
-                        && col >= 0 && col < cols());
-      derived().template writePacket<StoreMode>(row,col,val);
-    }
-
-
-    /** \internal */
-    template<int StoreMode>
-    EIGEN_STRONG_INLINE void writePacketByOuterInner
-    (Index outer, Index inner, const typename internal::packet_traits<Scalar>::type& val)
-    {
-      writePacket<StoreMode>(rowIndexByOuterInner(outer, inner),
-                            colIndexByOuterInner(outer, inner),
-                            val);
-    }
-
-    /** \internal
-      * Stores the given packet of coefficients, at the given index in this expression. It is your responsibility
-      * to ensure that a packet really starts there. This method is only available on expressions having the
-      * PacketAccessBit and the LinearAccessBit.
-      *
-      * The \a LoadMode parameter may have the value \a Aligned or \a Unaligned. Its effect is to select
-      * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
-      * starting at an address which is a multiple of the packet size.
-      */
-    template<int StoreMode>
-    EIGEN_STRONG_INLINE void writePacket
-    (Index index, const typename internal::packet_traits<Scalar>::type& val)
-    {
-      eigen_internal_assert(index >= 0 && index < size());
-      derived().template writePacket<StoreMode>(index,val);
-    }
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN
-
-    /** \internal Copies the coefficient at position (row,col) of other into *this.
-      *
-      * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
-      * with usual assignments.
-      *
-      * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
-      */
-
-    template<typename OtherDerived>
-    EIGEN_STRONG_INLINE void copyCoeff(Index row, Index col, const DenseBase<OtherDerived>& other)
-    {
-      eigen_internal_assert(row >= 0 && row < rows()
-                        && col >= 0 && col < cols());
-      derived().coeffRef(row, col) = other.derived().coeff(row, col);
-    }
-
-    /** \internal Copies the coefficient at the given index of other into *this.
-      *
-      * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
-      * with usual assignments.
-      *
-      * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
-      */
-
-    template<typename OtherDerived>
-    EIGEN_STRONG_INLINE void copyCoeff(Index index, const DenseBase<OtherDerived>& other)
-    {
-      eigen_internal_assert(index >= 0 && index < size());
-      derived().coeffRef(index) = other.derived().coeff(index);
-    }
-
-
-    template<typename OtherDerived>
-    EIGEN_STRONG_INLINE void copyCoeffByOuterInner(Index outer, Index inner, const DenseBase<OtherDerived>& other)
-    {
-      const Index row = rowIndexByOuterInner(outer,inner);
-      const Index col = colIndexByOuterInner(outer,inner);
-      // derived() is important here: copyCoeff() may be reimplemented in Derived!
-      derived().copyCoeff(row, col, other);
-    }
-
-    /** \internal Copies the packet at position (row,col) of other into *this.
-      *
-      * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
-      * with usual assignments.
-      *
-      * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
-      */
-
-    template<typename OtherDerived, int StoreMode, int LoadMode>
-    EIGEN_STRONG_INLINE void copyPacket(Index row, Index col, const DenseBase<OtherDerived>& other)
-    {
-      eigen_internal_assert(row >= 0 && row < rows()
-                        && col >= 0 && col < cols());
-      derived().template writePacket<StoreMode>(row, col,
-        other.derived().template packet<LoadMode>(row, col));
-    }
-
-    /** \internal Copies the packet at the given index of other into *this.
-      *
-      * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
-      * with usual assignments.
-      *
-      * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
-      */
-
-    template<typename OtherDerived, int StoreMode, int LoadMode>
-    EIGEN_STRONG_INLINE void copyPacket(Index index, const DenseBase<OtherDerived>& other)
-    {
-      eigen_internal_assert(index >= 0 && index < size());
-      derived().template writePacket<StoreMode>(index,
-        other.derived().template packet<LoadMode>(index));
-    }
-
-    /** \internal */
-    template<typename OtherDerived, int StoreMode, int LoadMode>
-    EIGEN_STRONG_INLINE void copyPacketByOuterInner(Index outer, Index inner, const DenseBase<OtherDerived>& other)
-    {
-      const Index row = rowIndexByOuterInner(outer,inner);
-      const Index col = colIndexByOuterInner(outer,inner);
-      // derived() is important here: copyCoeff() may be reimplemented in Derived!
-      derived().template copyPacket< OtherDerived, StoreMode, LoadMode>(row, col, other);
-    }
-#endif
-
-};
-
-/** \brief Base class providing direct read-only coefficient access to matrices and arrays.
-  * \ingroup Core_Module
-  * \tparam Derived Type of the derived class
-  * \tparam #DirectAccessors Constant indicating direct access
-  *
-  * This class defines functions to work with strides which can be used to access entries directly. This class
-  * inherits DenseCoeffsBase<Derived, ReadOnlyAccessors> which defines functions to access entries read-only using
-  * \c operator() .
-  *
-  * \sa \ref TopicClassHierarchy
-  */
-template<typename Derived>
-class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived, ReadOnlyAccessors>
-{
-  public:
-
-    typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base;
-    typedef typename internal::traits<Derived>::Index Index;
-    typedef typename internal::traits<Derived>::Scalar Scalar;
-    typedef typename NumTraits<Scalar>::Real RealScalar;
-
-    using Base::rows;
-    using Base::cols;
-    using Base::size;
-    using Base::derived;
-
-    /** \returns the pointer increment between two consecutive elements within a slice in the inner direction.
-      *
-      * \sa outerStride(), rowStride(), colStride()
-      */
-    inline Index innerStride() const
-    {
-      return derived().innerStride();
-    }
-
-    /** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns
-      *          in a column-major matrix).
-      *
-      * \sa innerStride(), rowStride(), colStride()
-      */
-    inline Index outerStride() const
-    {
-      return derived().outerStride();
-    }
-
-    // FIXME shall we remove it ?
-    inline Index stride() const
-    {
-      return Derived::IsVectorAtCompileTime ? innerStride() : outerStride();
-    }
-
-    /** \returns the pointer increment between two consecutive rows.
-      *
-      * \sa innerStride(), outerStride(), colStride()
-      */
-    inline Index rowStride() const
-    {
-      return Derived::IsRowMajor ? outerStride() : innerStride();
-    }
-
-    /** \returns the pointer increment between two consecutive columns.
-      *
-      * \sa innerStride(), outerStride(), rowStride()
-      */
-    inline Index colStride() const
-    {
-      return Derived::IsRowMajor ? innerStride() : outerStride();
-    }
-};
-
-/** \brief Base class providing direct read/write coefficient access to matrices and arrays.
-  * \ingroup Core_Module
-  * \tparam Derived Type of the derived class
-  * \tparam #DirectWriteAccessors Constant indicating direct access
-  *
-  * This class defines functions to work with strides which can be used to access entries directly. This class
-  * inherits DenseCoeffsBase<Derived, WriteAccessors> which defines functions to access entries read/write using
-  * \c operator().
-  *
-  * \sa \ref TopicClassHierarchy
-  */
-template<typename Derived>
-class DenseCoeffsBase<Derived, DirectWriteAccessors>
-  : public DenseCoeffsBase<Derived, WriteAccessors>
-{
-  public:
-
-    typedef DenseCoeffsBase<Derived, WriteAccessors> Base;
-    typedef typename internal::traits<Derived>::Index Index;
-    typedef typename internal::traits<Derived>::Scalar Scalar;
-    typedef typename NumTraits<Scalar>::Real RealScalar;
-
-    using Base::rows;
-    using Base::cols;
-    using Base::size;
-    using Base::derived;
-
-    /** \returns the pointer increment between two consecutive elements within a slice in the inner direction.
-      *
-      * \sa outerStride(), rowStride(), colStride()
-      */
-    inline Index innerStride() const
-    {
-      return derived().innerStride();
-    }
-
-    /** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns
-      *          in a column-major matrix).
-      *
-      * \sa innerStride(), rowStride(), colStride()
-      */
-    inline Index outerStride() const
-    {
-      return derived().outerStride();
-    }
-
-    // FIXME shall we remove it ?
-    inline Index stride() const
-    {
-      return Derived::IsVectorAtCompileTime ? innerStride() : outerStride();
-    }
-
-    /** \returns the pointer increment between two consecutive rows.
-      *
-      * \sa innerStride(), outerStride(), colStride()
-      */
-    inline Index rowStride() const
-    {
-      return Derived::IsRowMajor ? outerStride() : innerStride();
-    }
-
-    /** \returns the pointer increment between two consecutive columns.
-      *
-      * \sa innerStride(), outerStride(), rowStride()
-      */
-    inline Index colStride() const
-    {
-      return Derived::IsRowMajor ? innerStride() : outerStride();
-    }
-};
-
-namespace internal {
-
-template<typename Derived, bool JustReturnZero>
-struct first_aligned_impl
-{
-  static inline typename Derived::Index run(const Derived&)
-  { return 0; }
-};
-
-template<typename Derived>
-struct first_aligned_impl<Derived, false>
-{
-  static inline typename Derived::Index run(const Derived& m)
-  {
-    return internal::first_aligned(&m.const_cast_derived().coeffRef(0,0), m.size());
-  }
-};
-
-/** \internal \returns the index of the first element of the array that is well aligned for vectorization.
-  *
-  * There is also the variant first_aligned(const Scalar*, Integer) defined in Memory.h. See it for more
-  * documentation.
-  */
-template<typename Derived>
-static inline typename Derived::Index first_aligned(const Derived& m)
-{
-  return first_aligned_impl
-          <Derived, (Derived::Flags & AlignedBit) || !(Derived::Flags & DirectAccessBit)>
-          ::run(m);
-}
-
-template<typename Derived, bool HasDirectAccess = has_direct_access<Derived>::ret>
-struct inner_stride_at_compile_time
-{
-  enum { ret = traits<Derived>::InnerStrideAtCompileTime };
-};
-
-template<typename Derived>
-struct inner_stride_at_compile_time<Derived, false>
-{
-  enum { ret = 0 };
-};
-
-template<typename Derived, bool HasDirectAccess = has_direct_access<Derived>::ret>
-struct outer_stride_at_compile_time
-{
-  enum { ret = traits<Derived>::OuterStrideAtCompileTime };
-};
-
-template<typename Derived>
-struct outer_stride_at_compile_time<Derived, false>
-{
-  enum { ret = 0 };
-};
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_DENSECOEFFSBASE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/DenseStorage.h b/resources/3rdparty/eigen/Eigen/src/Core/DenseStorage.h
deleted file mode 100644
index 9d34ec934..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/DenseStorage.h
+++ /dev/null
@@ -1,320 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_MATRIXSTORAGE_H
-#define EIGEN_MATRIXSTORAGE_H
-
-#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
-  #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN EIGEN_DENSE_STORAGE_CTOR_PLUGIN;
-#else
-  #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
-#endif
-
-namespace Eigen {
-
-namespace internal {
-
-struct constructor_without_unaligned_array_assert {};
-
-/** \internal
-  * Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned:
-  * to 16 bytes boundary if the total size is a multiple of 16 bytes.
-  */
-template <typename T, int Size, int MatrixOrArrayOptions,
-          int Alignment = (MatrixOrArrayOptions&DontAlign) ? 0
-                        : (((Size*sizeof(T))%16)==0) ? 16
-                        : 0 >
-struct plain_array
-{
-  T array[Size];
-
-  plain_array() 
-  { 
-    EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
-  }
-
-  plain_array(constructor_without_unaligned_array_assert) 
-  { 
-    EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
-  }
-};
-
-#ifdef EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
-  #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask)
-#else
-  #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \
-    eigen_assert((reinterpret_cast<size_t>(array) & sizemask) == 0 \
-              && "this assertion is explained here: " \
-              "http://eigen.tuxfamily.org/dox-devel/TopicUnalignedArrayAssert.html" \
-              " **** READ THIS WEB PAGE !!! ****");
-#endif
-
-template <typename T, int Size, int MatrixOrArrayOptions>
-struct plain_array<T, Size, MatrixOrArrayOptions, 16>
-{
-  EIGEN_USER_ALIGN16 T array[Size];
-
-  plain_array() 
-  { 
-    EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(0xf);
-    EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
-  }
-
-  plain_array(constructor_without_unaligned_array_assert) 
-  { 
-    EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
-  }
-};
-
-template <typename T, int MatrixOrArrayOptions, int Alignment>
-struct plain_array<T, 0, MatrixOrArrayOptions, Alignment>
-{
-  EIGEN_USER_ALIGN16 T array[1];
-  plain_array() {}
-  plain_array(constructor_without_unaligned_array_assert) {}
-};
-
-} // end namespace internal
-
-/** \internal
-  *
-  * \class DenseStorage
-  * \ingroup Core_Module
-  *
-  * \brief Stores the data of a matrix
-  *
-  * This class stores the data of fixed-size, dynamic-size or mixed matrices
-  * in a way as compact as possible.
-  *
-  * \sa Matrix
-  */
-template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseStorage;
-
-// purely fixed-size matrix
-template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseStorage
-{
-    internal::plain_array<T,Size,_Options> m_data;
-  public:
-    inline explicit DenseStorage() {}
-    inline DenseStorage(internal::constructor_without_unaligned_array_assert)
-      : m_data(internal::constructor_without_unaligned_array_assert()) {}
-    inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
-    inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); }
-    static inline DenseIndex rows(void) {return _Rows;}
-    static inline DenseIndex cols(void) {return _Cols;}
-    inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
-    inline void resize(DenseIndex,DenseIndex,DenseIndex) {}
-    inline const T *data() const { return m_data.array; }
-    inline T *data() { return m_data.array; }
-};
-
-// null matrix
-template<typename T, int _Rows, int _Cols, int _Options> class DenseStorage<T, 0, _Rows, _Cols, _Options>
-{
-  public:
-    inline explicit DenseStorage() {}
-    inline DenseStorage(internal::constructor_without_unaligned_array_assert) {}
-    inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
-    inline void swap(DenseStorage& ) {}
-    static inline DenseIndex rows(void) {return _Rows;}
-    static inline DenseIndex cols(void) {return _Cols;}
-    inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
-    inline void resize(DenseIndex,DenseIndex,DenseIndex) {}
-    inline const T *data() const { return 0; }
-    inline T *data() { return 0; }
-};
-
-// more specializations for null matrices; these are necessary to resolve ambiguities
-template<typename T, int _Options> class DenseStorage<T, 0, Dynamic, Dynamic, _Options>
-: public DenseStorage<T, 0, 0, 0, _Options> { };
-
-template<typename T, int _Rows, int _Options> class DenseStorage<T, 0, _Rows, Dynamic, _Options>
-: public DenseStorage<T, 0, 0, 0, _Options> { };
-
-template<typename T, int _Cols, int _Options> class DenseStorage<T, 0, Dynamic, _Cols, _Options>
-: public DenseStorage<T, 0, 0, 0, _Options> { };
-
-// dynamic-size matrix with fixed-size storage
-template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic, Dynamic, _Options>
-{
-    internal::plain_array<T,Size,_Options> m_data;
-    DenseIndex m_rows;
-    DenseIndex m_cols;
-  public:
-    inline explicit DenseStorage() : m_rows(0), m_cols(0) {}
-    inline DenseStorage(internal::constructor_without_unaligned_array_assert)
-      : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {}
-    inline DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) : m_rows(nbRows), m_cols(nbCols) {}
-    inline void swap(DenseStorage& other)
-    { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
-    inline DenseIndex rows() const {return m_rows;}
-    inline DenseIndex cols() const {return m_cols;}
-    inline void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
-    inline void resize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
-    inline const T *data() const { return m_data.array; }
-    inline T *data() { return m_data.array; }
-};
-
-// dynamic-size matrix with fixed-size storage and fixed width
-template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Size, Dynamic, _Cols, _Options>
-{
-    internal::plain_array<T,Size,_Options> m_data;
-    DenseIndex m_rows;
-  public:
-    inline explicit DenseStorage() : m_rows(0) {}
-    inline DenseStorage(internal::constructor_without_unaligned_array_assert)
-      : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {}
-    inline DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex) : m_rows(nbRows) {}
-    inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
-    inline DenseIndex rows(void) const {return m_rows;}
-    inline DenseIndex cols(void) const {return _Cols;}
-    inline void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
-    inline void resize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
-    inline const T *data() const { return m_data.array; }
-    inline T *data() { return m_data.array; }
-};
-
-// dynamic-size matrix with fixed-size storage and fixed height
-template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Size, _Rows, Dynamic, _Options>
-{
-    internal::plain_array<T,Size,_Options> m_data;
-    DenseIndex m_cols;
-  public:
-    inline explicit DenseStorage() : m_cols(0) {}
-    inline DenseStorage(internal::constructor_without_unaligned_array_assert)
-      : m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {}
-    inline DenseStorage(DenseIndex, DenseIndex, DenseIndex nbCols) : m_cols(nbCols) {}
-    inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
-    inline DenseIndex rows(void) const {return _Rows;}
-    inline DenseIndex cols(void) const {return m_cols;}
-    inline void conservativeResize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
-    inline void resize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
-    inline const T *data() const { return m_data.array; }
-    inline T *data() { return m_data.array; }
-};
-
-// purely dynamic matrix.
-template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynamic, _Options>
-{
-    T *m_data;
-    DenseIndex m_rows;
-    DenseIndex m_cols;
-  public:
-    inline explicit DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
-    inline DenseStorage(internal::constructor_without_unaligned_array_assert)
-       : m_data(0), m_rows(0), m_cols(0) {}
-    inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
-      : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows), m_cols(nbCols)
-    { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
-    inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
-    inline void swap(DenseStorage& other)
-    { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
-    inline DenseIndex rows(void) const {return m_rows;}
-    inline DenseIndex cols(void) const {return m_cols;}
-    inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
-    {
-      m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*m_cols);
-      m_rows = nbRows;
-      m_cols = nbCols;
-    }
-    void resize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
-    {
-      if(size != m_rows*m_cols)
-      {
-        internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols);
-        if (size)
-          m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);
-        else
-          m_data = 0;
-        EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
-      }
-      m_rows = nbRows;
-      m_cols = nbCols;
-    }
-    inline const T *data() const { return m_data; }
-    inline T *data() { return m_data; }
-};
-
-// matrix with dynamic width and fixed height (so that matrix has dynamic size).
-template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Rows, Dynamic, _Options>
-{
-    T *m_data;
-    DenseIndex m_cols;
-  public:
-    inline explicit DenseStorage() : m_data(0), m_cols(0) {}
-    inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
-    inline DenseStorage(DenseIndex size, DenseIndex, DenseIndex nbCols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(nbCols)
-    { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
-    inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
-    inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
-    static inline DenseIndex rows(void) {return _Rows;}
-    inline DenseIndex cols(void) const {return m_cols;}
-    inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex nbCols)
-    {
-      m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols);
-      m_cols = nbCols;
-    }
-    EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex, DenseIndex nbCols)
-    {
-      if(size != _Rows*m_cols)
-      {
-        internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols);
-        if (size)
-          m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);
-        else
-          m_data = 0;
-        EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
-      }
-      m_cols = nbCols;
-    }
-    inline const T *data() const { return m_data; }
-    inline T *data() { return m_data; }
-};
-
-// matrix with dynamic height and fixed width (so that matrix has dynamic size).
-template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dynamic, _Cols, _Options>
-{
-    T *m_data;
-    DenseIndex m_rows;
-  public:
-    inline explicit DenseStorage() : m_data(0), m_rows(0) {}
-    inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
-    inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows)
-    { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
-    inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
-    inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
-    inline DenseIndex rows(void) const {return m_rows;}
-    static inline DenseIndex cols(void) {return _Cols;}
-    inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex)
-    {
-      m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols);
-      m_rows = nbRows;
-    }
-    EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex nbRows, DenseIndex)
-    {
-      if(size != m_rows*_Cols)
-      {
-        internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows);
-        if (size)
-          m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);
-        else
-          m_data = 0;
-        EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
-      }
-      m_rows = nbRows;
-    }
-    inline const T *data() const { return m_data; }
-    inline T *data() { return m_data; }
-};
-
-} // end namespace Eigen
-
-#endif // EIGEN_MATRIX_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Diagonal.h b/resources/3rdparty/eigen/Eigen/src/Core/Diagonal.h
deleted file mode 100644
index 0927e9969..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/Diagonal.h
+++ /dev/null
@@ -1,237 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_DIAGONAL_H
-#define EIGEN_DIAGONAL_H
-
-namespace Eigen { 
-
-/** \class Diagonal
-  * \ingroup Core_Module
-  *
-  * \brief Expression of a diagonal/subdiagonal/superdiagonal in a matrix
-  *
-  * \param MatrixType the type of the object in which we are taking a sub/main/super diagonal
-  * \param DiagIndex the index of the sub/super diagonal. The default is 0 and it means the main diagonal.
-  *              A positive value means a superdiagonal, a negative value means a subdiagonal.
-  *              You can also use Dynamic so the index can be set at runtime.
-  *
-  * The matrix is not required to be square.
-  *
-  * This class represents an expression of the main diagonal, or any sub/super diagonal
-  * of a square matrix. It is the return type of MatrixBase::diagonal() and MatrixBase::diagonal(Index) and most of the
-  * time this is the only way it is used.
-  *
-  * \sa MatrixBase::diagonal(), MatrixBase::diagonal(Index)
-  */
-
-namespace internal {
-template<typename MatrixType, int DiagIndex>
-struct traits<Diagonal<MatrixType,DiagIndex> >
- : traits<MatrixType>
-{
-  typedef typename nested<MatrixType>::type MatrixTypeNested;
-  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
-  typedef typename MatrixType::StorageKind StorageKind;
-  enum {
-    RowsAtCompileTime = (int(DiagIndex) == DynamicIndex || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic
-                      : (EIGEN_PLAIN_ENUM_MIN(MatrixType::RowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0),
-                                              MatrixType::ColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))),
-    ColsAtCompileTime = 1,
-    MaxRowsAtCompileTime = int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic
-                         : DiagIndex == DynamicIndex ? EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::MaxRowsAtCompileTime,
-                                                                              MatrixType::MaxColsAtCompileTime)
-                         : (EIGEN_PLAIN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0),
-                                                 MatrixType::MaxColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))),
-    MaxColsAtCompileTime = 1,
-    MaskLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,
-    Flags = (unsigned int)_MatrixTypeNested::Flags & (HereditaryBits | LinearAccessBit | MaskLvalueBit | DirectAccessBit) & ~RowMajorBit,
-    CoeffReadCost = _MatrixTypeNested::CoeffReadCost,
-    MatrixTypeOuterStride = outer_stride_at_compile_time<MatrixType>::ret,
-    InnerStrideAtCompileTime = MatrixTypeOuterStride == Dynamic ? Dynamic : MatrixTypeOuterStride+1,
-    OuterStrideAtCompileTime = 0
-  };
-};
-}
-
-template<typename MatrixType, int _DiagIndex> class Diagonal
-   : public internal::dense_xpr_base< Diagonal<MatrixType,_DiagIndex> >::type
-{
-  public:
-
-    enum { DiagIndex = _DiagIndex };
-    typedef typename internal::dense_xpr_base<Diagonal>::type Base;
-    EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal)
-
-    inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) : m_matrix(matrix), m_index(a_index) {}
-
-    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal)
-
-    inline Index rows() const
-    { return m_index.value()<0 ? (std::min)(m_matrix.cols(),m_matrix.rows()+m_index.value()) : (std::min)(m_matrix.rows(),m_matrix.cols()-m_index.value()); }
-
-    inline Index cols() const { return 1; }
-
-    inline Index innerStride() const
-    {
-      return m_matrix.outerStride() + 1;
-    }
-
-    inline Index outerStride() const
-    {
-      return 0;
-    }
-
-    typedef typename internal::conditional<
-                       internal::is_lvalue<MatrixType>::value,
-                       Scalar,
-                       const Scalar
-                     >::type ScalarWithConstIfNotLvalue;
-
-    inline ScalarWithConstIfNotLvalue* data() { return &(m_matrix.const_cast_derived().coeffRef(rowOffset(), colOffset())); }
-    inline const Scalar* data() const { return &(m_matrix.const_cast_derived().coeffRef(rowOffset(), colOffset())); }
-
-    inline Scalar& coeffRef(Index row, Index)
-    {
-      EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
-      return m_matrix.const_cast_derived().coeffRef(row+rowOffset(), row+colOffset());
-    }
-
-    inline const Scalar& coeffRef(Index row, Index) const
-    {
-      return m_matrix.const_cast_derived().coeffRef(row+rowOffset(), row+colOffset());
-    }
-
-    inline CoeffReturnType coeff(Index row, Index) const
-    {
-      return m_matrix.coeff(row+rowOffset(), row+colOffset());
-    }
-
-    inline Scalar& coeffRef(Index idx)
-    {
-      EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
-      return m_matrix.const_cast_derived().coeffRef(idx+rowOffset(), idx+colOffset());
-    }
-
-    inline const Scalar& coeffRef(Index idx) const
-    {
-      return m_matrix.const_cast_derived().coeffRef(idx+rowOffset(), idx+colOffset());
-    }
-
-    inline CoeffReturnType coeff(Index idx) const
-    {
-      return m_matrix.coeff(idx+rowOffset(), idx+colOffset());
-    }
-
-    const typename internal::remove_all<typename MatrixType::Nested>::type& 
-    nestedExpression() const 
-    {
-      return m_matrix;
-    }
-
-    int index() const
-    {
-      return m_index.value();
-    }
-
-  protected:
-    typename MatrixType::Nested m_matrix;
-    const internal::variable_if_dynamicindex<Index, DiagIndex> m_index;
-
-  private:
-    // some compilers may fail to optimize std::max etc in case of compile-time constants...
-    EIGEN_STRONG_INLINE Index absDiagIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); }
-    EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); }
-    EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value()>0 ? m_index.value() : 0; }
-    // triger a compile time error is someone try to call packet
-    template<int LoadMode> typename MatrixType::PacketReturnType packet(Index) const;
-    template<int LoadMode> typename MatrixType::PacketReturnType packet(Index,Index) const;
-};
-
-/** \returns an expression of the main diagonal of the matrix \c *this
-  *
-  * \c *this is not required to be square.
-  *
-  * Example: \include MatrixBase_diagonal.cpp
-  * Output: \verbinclude MatrixBase_diagonal.out
-  *
-  * \sa class Diagonal */
-template<typename Derived>
-inline typename MatrixBase<Derived>::DiagonalReturnType
-MatrixBase<Derived>::diagonal()
-{
-  return derived();
-}
-
-/** This is the const version of diagonal(). */
-template<typename Derived>
-inline const typename MatrixBase<Derived>::ConstDiagonalReturnType
-MatrixBase<Derived>::diagonal() const
-{
-  return ConstDiagonalReturnType(derived());
-}
-
-/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
-  *
-  * \c *this is not required to be square.
-  *
-  * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0
-  * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal.
-  *
-  * Example: \include MatrixBase_diagonal_int.cpp
-  * Output: \verbinclude MatrixBase_diagonal_int.out
-  *
-  * \sa MatrixBase::diagonal(), class Diagonal */
-template<typename Derived>
-inline typename MatrixBase<Derived>::template DiagonalIndexReturnType<DynamicIndex>::Type
-MatrixBase<Derived>::diagonal(Index index)
-{
-  return typename DiagonalIndexReturnType<DynamicIndex>::Type(derived(), index);
-}
-
-/** This is the const version of diagonal(Index). */
-template<typename Derived>
-inline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<DynamicIndex>::Type
-MatrixBase<Derived>::diagonal(Index index) const
-{
-  return typename ConstDiagonalIndexReturnType<DynamicIndex>::Type(derived(), index);
-}
-
-/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
-  *
-  * \c *this is not required to be square.
-  *
-  * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0
-  * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal.
-  *
-  * Example: \include MatrixBase_diagonal_template_int.cpp
-  * Output: \verbinclude MatrixBase_diagonal_template_int.out
-  *
-  * \sa MatrixBase::diagonal(), class Diagonal */
-template<typename Derived>
-template<int Index>
-inline typename MatrixBase<Derived>::template DiagonalIndexReturnType<Index>::Type
-MatrixBase<Derived>::diagonal()
-{
-  return derived();
-}
-
-/** This is the const version of diagonal<int>(). */
-template<typename Derived>
-template<int Index>
-inline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<Index>::Type
-MatrixBase<Derived>::diagonal() const
-{
-  return derived();
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_DIAGONAL_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/DiagonalMatrix.h b/resources/3rdparty/eigen/Eigen/src/Core/DiagonalMatrix.h
deleted file mode 100644
index da0264b0e..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/DiagonalMatrix.h
+++ /dev/null
@@ -1,307 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_DIAGONALMATRIX_H
-#define EIGEN_DIAGONALMATRIX_H
-
-namespace Eigen { 
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN
-template<typename Derived>
-class DiagonalBase : public EigenBase<Derived>
-{
-  public:
-    typedef typename internal::traits<Derived>::DiagonalVectorType DiagonalVectorType;
-    typedef typename DiagonalVectorType::Scalar Scalar;
-    typedef typename DiagonalVectorType::RealScalar RealScalar;
-    typedef typename internal::traits<Derived>::StorageKind StorageKind;
-    typedef typename internal::traits<Derived>::Index Index;
-
-    enum {
-      RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
-      ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
-      MaxRowsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,
-      MaxColsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,
-      IsVectorAtCompileTime = 0,
-      Flags = 0
-    };
-
-    typedef Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime, 0, MaxRowsAtCompileTime, MaxColsAtCompileTime> DenseMatrixType;
-    typedef DenseMatrixType DenseType;
-    typedef DiagonalMatrix<Scalar,DiagonalVectorType::SizeAtCompileTime,DiagonalVectorType::MaxSizeAtCompileTime> PlainObject;
-
-    inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
-    inline Derived& derived() { return *static_cast<Derived*>(this); }
-
-    DenseMatrixType toDenseMatrix() const { return derived(); }
-    template<typename DenseDerived>
-    void evalTo(MatrixBase<DenseDerived> &other) const;
-    template<typename DenseDerived>
-    void addTo(MatrixBase<DenseDerived> &other) const
-    { other.diagonal() += diagonal(); }
-    template<typename DenseDerived>
-    void subTo(MatrixBase<DenseDerived> &other) const
-    { other.diagonal() -= diagonal(); }
-
-    inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); }
-    inline DiagonalVectorType& diagonal() { return derived().diagonal(); }
-
-    inline Index rows() const { return diagonal().size(); }
-    inline Index cols() const { return diagonal().size(); }
-
-    template<typename MatrixDerived>
-    const DiagonalProduct<MatrixDerived, Derived, OnTheLeft>
-    operator*(const MatrixBase<MatrixDerived> &matrix) const;
-
-    inline const DiagonalWrapper<const CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const DiagonalVectorType> >
-    inverse() const
-    {
-      return diagonal().cwiseInverse();
-    }
-    
-    inline const DiagonalWrapper<const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DiagonalVectorType> >
-    operator*(const Scalar& scalar) const
-    {
-      return diagonal() * scalar;
-    }
-    friend inline const DiagonalWrapper<const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DiagonalVectorType> >
-    operator*(const Scalar& scalar, const DiagonalBase& other)
-    {
-      return other.diagonal() * scalar;
-    }
-    
-    #ifdef EIGEN2_SUPPORT
-    template<typename OtherDerived>
-    bool isApprox(const DiagonalBase<OtherDerived>& other, typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision()) const
-    {
-      return diagonal().isApprox(other.diagonal(), precision);
-    }
-    template<typename OtherDerived>
-    bool isApprox(const MatrixBase<OtherDerived>& other, typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision()) const
-    {
-      return toDenseMatrix().isApprox(other, precision);
-    }
-    #endif
-};
-
-template<typename Derived>
-template<typename DenseDerived>
-void DiagonalBase<Derived>::evalTo(MatrixBase<DenseDerived> &other) const
-{
-  other.setZero();
-  other.diagonal() = diagonal();
-}
-#endif
-
-/** \class DiagonalMatrix
-  * \ingroup Core_Module
-  *
-  * \brief Represents a diagonal matrix with its storage
-  *
-  * \param _Scalar the type of coefficients
-  * \param SizeAtCompileTime the dimension of the matrix, or Dynamic
-  * \param MaxSizeAtCompileTime the dimension of the matrix, or Dynamic. This parameter is optional and defaults
-  *        to SizeAtCompileTime. Most of the time, you do not need to specify it.
-  *
-  * \sa class DiagonalWrapper
-  */
-
-namespace internal {
-template<typename _Scalar, int SizeAtCompileTime, int MaxSizeAtCompileTime>
-struct traits<DiagonalMatrix<_Scalar,SizeAtCompileTime,MaxSizeAtCompileTime> >
- : traits<Matrix<_Scalar,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >
-{
-  typedef Matrix<_Scalar,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1> DiagonalVectorType;
-  typedef Dense StorageKind;
-  typedef DenseIndex Index;
-  enum {
-    Flags = LvalueBit
-  };
-};
-}
-template<typename _Scalar, int SizeAtCompileTime, int MaxSizeAtCompileTime>
-class DiagonalMatrix
-  : public DiagonalBase<DiagonalMatrix<_Scalar,SizeAtCompileTime,MaxSizeAtCompileTime> >
-{
-  public:
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    typedef typename internal::traits<DiagonalMatrix>::DiagonalVectorType DiagonalVectorType;
-    typedef const DiagonalMatrix& Nested;
-    typedef _Scalar Scalar;
-    typedef typename internal::traits<DiagonalMatrix>::StorageKind StorageKind;
-    typedef typename internal::traits<DiagonalMatrix>::Index Index;
-    #endif
-
-  protected:
-
-    DiagonalVectorType m_diagonal;
-
-  public:
-
-    /** const version of diagonal(). */
-    inline const DiagonalVectorType& diagonal() const { return m_diagonal; }
-    /** \returns a reference to the stored vector of diagonal coefficients. */
-    inline DiagonalVectorType& diagonal() { return m_diagonal; }
-
-    /** Default constructor without initialization */
-    inline DiagonalMatrix() {}
-
-    /** Constructs a diagonal matrix with given dimension  */
-    inline DiagonalMatrix(Index dim) : m_diagonal(dim) {}
-
-    /** 2D constructor. */
-    inline DiagonalMatrix(const Scalar& x, const Scalar& y) : m_diagonal(x,y) {}
-
-    /** 3D constructor. */
-    inline DiagonalMatrix(const Scalar& x, const Scalar& y, const Scalar& z) : m_diagonal(x,y,z) {}
-
-    /** Copy constructor. */
-    template<typename OtherDerived>
-    inline DiagonalMatrix(const DiagonalBase<OtherDerived>& other) : m_diagonal(other.diagonal()) {}
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    /** copy constructor. prevent a default copy constructor from hiding the other templated constructor */
-    inline DiagonalMatrix(const DiagonalMatrix& other) : m_diagonal(other.diagonal()) {}
-    #endif
-
-    /** generic constructor from expression of the diagonal coefficients */
-    template<typename OtherDerived>
-    explicit inline DiagonalMatrix(const MatrixBase<OtherDerived>& other) : m_diagonal(other)
-    {}
-
-    /** Copy operator. */
-    template<typename OtherDerived>
-    DiagonalMatrix& operator=(const DiagonalBase<OtherDerived>& other)
-    {
-      m_diagonal = other.diagonal();
-      return *this;
-    }
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    /** This is a special case of the templated operator=. Its purpose is to
-      * prevent a default operator= from hiding the templated operator=.
-      */
-    DiagonalMatrix& operator=(const DiagonalMatrix& other)
-    {
-      m_diagonal = other.diagonal();
-      return *this;
-    }
-    #endif
-
-    /** Resizes to given size. */
-    inline void resize(Index size) { m_diagonal.resize(size); }
-    /** Sets all coefficients to zero. */
-    inline void setZero() { m_diagonal.setZero(); }
-    /** Resizes and sets all coefficients to zero. */
-    inline void setZero(Index size) { m_diagonal.setZero(size); }
-    /** Sets this matrix to be the identity matrix of the current size. */
-    inline void setIdentity() { m_diagonal.setOnes(); }
-    /** Sets this matrix to be the identity matrix of the given size. */
-    inline void setIdentity(Index size) { m_diagonal.setOnes(size); }
-};
-
-/** \class DiagonalWrapper
-  * \ingroup Core_Module
-  *
-  * \brief Expression of a diagonal matrix
-  *
-  * \param _DiagonalVectorType the type of the vector of diagonal coefficients
-  *
-  * This class is an expression of a diagonal matrix, but not storing its own vector of diagonal coefficients,
-  * instead wrapping an existing vector expression. It is the return type of MatrixBase::asDiagonal()
-  * and most of the time this is the only way that it is used.
-  *
-  * \sa class DiagonalMatrix, class DiagonalBase, MatrixBase::asDiagonal()
-  */
-
-namespace internal {
-template<typename _DiagonalVectorType>
-struct traits<DiagonalWrapper<_DiagonalVectorType> >
-{
-  typedef _DiagonalVectorType DiagonalVectorType;
-  typedef typename DiagonalVectorType::Scalar Scalar;
-  typedef typename DiagonalVectorType::Index Index;
-  typedef typename DiagonalVectorType::StorageKind StorageKind;
-  enum {
-    RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
-    ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
-    MaxRowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
-    MaxColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
-    Flags =  traits<DiagonalVectorType>::Flags & LvalueBit
-  };
-};
-}
-
-template<typename _DiagonalVectorType>
-class DiagonalWrapper
-  : public DiagonalBase<DiagonalWrapper<_DiagonalVectorType> >, internal::no_assignment_operator
-{
-  public:
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    typedef _DiagonalVectorType DiagonalVectorType;
-    typedef DiagonalWrapper Nested;
-    #endif
-
-    /** Constructor from expression of diagonal coefficients to wrap. */
-    inline DiagonalWrapper(DiagonalVectorType& a_diagonal) : m_diagonal(a_diagonal) {}
-
-    /** \returns a const reference to the wrapped expression of diagonal coefficients. */
-    const DiagonalVectorType& diagonal() const { return m_diagonal; }
-
-  protected:
-    typename DiagonalVectorType::Nested m_diagonal;
-};
-
-/** \returns a pseudo-expression of a diagonal matrix with *this as vector of diagonal coefficients
-  *
-  * \only_for_vectors
-  *
-  * Example: \include MatrixBase_asDiagonal.cpp
-  * Output: \verbinclude MatrixBase_asDiagonal.out
-  *
-  * \sa class DiagonalWrapper, class DiagonalMatrix, diagonal(), isDiagonal()
-  **/
-template<typename Derived>
-inline const DiagonalWrapper<const Derived>
-MatrixBase<Derived>::asDiagonal() const
-{
-  return derived();
-}
-
-/** \returns true if *this is approximately equal to a diagonal matrix,
-  *          within the precision given by \a prec.
-  *
-  * Example: \include MatrixBase_isDiagonal.cpp
-  * Output: \verbinclude MatrixBase_isDiagonal.out
-  *
-  * \sa asDiagonal()
-  */
-template<typename Derived>
-bool MatrixBase<Derived>::isDiagonal(const RealScalar& prec) const
-{
-  if(cols() != rows()) return false;
-  RealScalar maxAbsOnDiagonal = static_cast<RealScalar>(-1);
-  for(Index j = 0; j < cols(); ++j)
-  {
-    RealScalar absOnDiagonal = internal::abs(coeff(j,j));
-    if(absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal;
-  }
-  for(Index j = 0; j < cols(); ++j)
-    for(Index i = 0; i < j; ++i)
-    {
-      if(!internal::isMuchSmallerThan(coeff(i, j), maxAbsOnDiagonal, prec)) return false;
-      if(!internal::isMuchSmallerThan(coeff(j, i), maxAbsOnDiagonal, prec)) return false;
-    }
-  return true;
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_DIAGONALMATRIX_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/DiagonalProduct.h b/resources/3rdparty/eigen/Eigen/src/Core/DiagonalProduct.h
deleted file mode 100644
index 8c7b2d978..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/DiagonalProduct.h
+++ /dev/null
@@ -1,123 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_DIAGONALPRODUCT_H
-#define EIGEN_DIAGONALPRODUCT_H
-
-namespace Eigen { 
-
-namespace internal {
-template<typename MatrixType, typename DiagonalType, int ProductOrder>
-struct traits<DiagonalProduct<MatrixType, DiagonalType, ProductOrder> >
- : traits<MatrixType>
-{
-  typedef typename scalar_product_traits<typename MatrixType::Scalar, typename DiagonalType::Scalar>::ReturnType Scalar;
-  enum {
-    RowsAtCompileTime = MatrixType::RowsAtCompileTime,
-    ColsAtCompileTime = MatrixType::ColsAtCompileTime,
-    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
-    MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
-
-    _StorageOrder = MatrixType::Flags & RowMajorBit ? RowMajor : ColMajor,
-    _PacketOnDiag = !((int(_StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft)
-                    ||(int(_StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)),
-    _SameTypes = is_same<typename MatrixType::Scalar, typename DiagonalType::Scalar>::value,
-    // FIXME currently we need same types, but in the future the next rule should be the one
-    //_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && ((!_PacketOnDiag) || (_SameTypes && bool(int(DiagonalType::Flags)&PacketAccessBit))),
-    _Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && ((!_PacketOnDiag) || (bool(int(DiagonalType::Flags)&PacketAccessBit))),
-
-    Flags = (HereditaryBits & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0),
-    CoeffReadCost = NumTraits<Scalar>::MulCost + MatrixType::CoeffReadCost + DiagonalType::DiagonalVectorType::CoeffReadCost
-  };
-};
-}
-
-template<typename MatrixType, typename DiagonalType, int ProductOrder>
-class DiagonalProduct : internal::no_assignment_operator,
-                        public MatrixBase<DiagonalProduct<MatrixType, DiagonalType, ProductOrder> >
-{
-  public:
-
-    typedef MatrixBase<DiagonalProduct> Base;
-    EIGEN_DENSE_PUBLIC_INTERFACE(DiagonalProduct)
-
-    inline DiagonalProduct(const MatrixType& matrix, const DiagonalType& diagonal)
-      : m_matrix(matrix), m_diagonal(diagonal)
-    {
-      eigen_assert(diagonal.diagonal().size() == (ProductOrder == OnTheLeft ? matrix.rows() : matrix.cols()));
-    }
-
-    inline Index rows() const { return m_matrix.rows(); }
-    inline Index cols() const { return m_matrix.cols(); }
-
-    const Scalar coeff(Index row, Index col) const
-    {
-      return m_diagonal.diagonal().coeff(ProductOrder == OnTheLeft ? row : col) * m_matrix.coeff(row, col);
-    }
-
-    template<int LoadMode>
-    EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
-    {
-      enum {
-        StorageOrder = Flags & RowMajorBit ? RowMajor : ColMajor
-      };
-      const Index indexInDiagonalVector = ProductOrder == OnTheLeft ? row : col;
-
-      return packet_impl<LoadMode>(row,col,indexInDiagonalVector,typename internal::conditional<
-        ((int(StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft)
-       ||(int(StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)), internal::true_type, internal::false_type>::type());
-    }
-
-  protected:
-    template<int LoadMode>
-    EIGEN_STRONG_INLINE PacketScalar packet_impl(Index row, Index col, Index id, internal::true_type) const
-    {
-      return internal::pmul(m_matrix.template packet<LoadMode>(row, col),
-                     internal::pset1<PacketScalar>(m_diagonal.diagonal().coeff(id)));
-    }
-
-    template<int LoadMode>
-    EIGEN_STRONG_INLINE PacketScalar packet_impl(Index row, Index col, Index id, internal::false_type) const
-    {
-      enum {
-        InnerSize = (MatrixType::Flags & RowMajorBit) ? MatrixType::ColsAtCompileTime : MatrixType::RowsAtCompileTime,
-        DiagonalVectorPacketLoadMode = (LoadMode == Aligned && ((InnerSize%16) == 0)) ? Aligned : Unaligned
-      };
-      return internal::pmul(m_matrix.template packet<LoadMode>(row, col),
-                     m_diagonal.diagonal().template packet<DiagonalVectorPacketLoadMode>(id));
-    }
-
-    typename MatrixType::Nested m_matrix;
-    typename DiagonalType::Nested m_diagonal;
-};
-
-/** \returns the diagonal matrix product of \c *this by the diagonal matrix \a diagonal.
-  */
-template<typename Derived>
-template<typename DiagonalDerived>
-inline const DiagonalProduct<Derived, DiagonalDerived, OnTheRight>
-MatrixBase<Derived>::operator*(const DiagonalBase<DiagonalDerived> &a_diagonal) const
-{
-  return DiagonalProduct<Derived, DiagonalDerived, OnTheRight>(derived(), a_diagonal.derived());
-}
-
-/** \returns the diagonal matrix product of \c *this by the matrix \a matrix.
-  */
-template<typename DiagonalDerived>
-template<typename MatrixDerived>
-inline const DiagonalProduct<MatrixDerived, DiagonalDerived, OnTheLeft>
-DiagonalBase<DiagonalDerived>::operator*(const MatrixBase<MatrixDerived> &matrix) const
-{
-  return DiagonalProduct<MatrixDerived, DiagonalDerived, OnTheLeft>(matrix.derived(), derived());
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_DIAGONALPRODUCT_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Dot.h b/resources/3rdparty/eigen/Eigen/src/Core/Dot.h
deleted file mode 100644
index a7a18c939..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/Dot.h
+++ /dev/null
@@ -1,261 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2006-2008, 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_DOT_H
-#define EIGEN_DOT_H
-
-namespace Eigen { 
-
-namespace internal {
-
-// helper function for dot(). The problem is that if we put that in the body of dot(), then upon calling dot
-// with mismatched types, the compiler emits errors about failing to instantiate cwiseProduct BEFORE
-// looking at the static assertions. Thus this is a trick to get better compile errors.
-template<typename T, typename U,
-// the NeedToTranspose condition here is taken straight from Assign.h
-         bool NeedToTranspose = T::IsVectorAtCompileTime
-                && U::IsVectorAtCompileTime
-                && ((int(T::RowsAtCompileTime) == 1 && int(U::ColsAtCompileTime) == 1)
-                      |  // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
-                         // revert to || as soon as not needed anymore.
-                    (int(T::ColsAtCompileTime) == 1 && int(U::RowsAtCompileTime) == 1))
->
-struct dot_nocheck
-{
-  typedef typename scalar_product_traits<typename traits<T>::Scalar,typename traits<U>::Scalar>::ReturnType ResScalar;
-  static inline ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)
-  {
-    return a.template binaryExpr<scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> >(b).sum();
-  }
-};
-
-template<typename T, typename U>
-struct dot_nocheck<T, U, true>
-{
-  typedef typename scalar_product_traits<typename traits<T>::Scalar,typename traits<U>::Scalar>::ReturnType ResScalar;
-  static inline ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)
-  {
-    return a.transpose().template binaryExpr<scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> >(b).sum();
-  }
-};
-
-} // end namespace internal
-
-/** \returns the dot product of *this with other.
-  *
-  * \only_for_vectors
-  *
-  * \note If the scalar type is complex numbers, then this function returns the hermitian
-  * (sesquilinear) dot product, conjugate-linear in the first variable and linear in the
-  * second variable.
-  *
-  * \sa squaredNorm(), norm()
-  */
-template<typename Derived>
-template<typename OtherDerived>
-typename internal::scalar_product_traits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType
-MatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
-  EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
-  typedef internal::scalar_conj_product_op<Scalar,typename OtherDerived::Scalar> func;
-  EIGEN_CHECK_BINARY_COMPATIBILIY(func,Scalar,typename OtherDerived::Scalar);
-
-  eigen_assert(size() == other.size());
-
-  return internal::dot_nocheck<Derived,OtherDerived>::run(*this, other);
-}
-
-#ifdef EIGEN2_SUPPORT
-/** \returns the dot product of *this with other, with the Eigen2 convention that the dot product is linear in the first variable
-  * (conjugating the second variable). Of course this only makes a difference in the complex case.
-  *
-  * This method is only available in EIGEN2_SUPPORT mode.
-  *
-  * \only_for_vectors
-  *
-  * \sa dot()
-  */
-template<typename Derived>
-template<typename OtherDerived>
-typename internal::traits<Derived>::Scalar
-MatrixBase<Derived>::eigen2_dot(const MatrixBase<OtherDerived>& other) const
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
-  EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
-  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
-    YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
-
-  eigen_assert(size() == other.size());
-
-  return internal::dot_nocheck<OtherDerived,Derived>::run(other,*this);
-}
-#endif
-
-
-//---------- implementation of L2 norm and related functions ----------
-
-/** \returns, for vectors, the squared \em l2 norm of \c *this, and for matrices the Frobenius norm.
-  * In both cases, it consists in the sum of the square of all the matrix entries.
-  * For vectors, this is also equals to the dot product of \c *this with itself.
-  *
-  * \sa dot(), norm()
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::squaredNorm() const
-{
-  return internal::real((*this).cwiseAbs2().sum());
-}
-
-/** \returns, for vectors, the \em l2 norm of \c *this, and for matrices the Frobenius norm.
-  * In both cases, it consists in the square root of the sum of the square of all the matrix entries.
-  * For vectors, this is also equals to the square root of the dot product of \c *this with itself.
-  *
-  * \sa dot(), squaredNorm()
-  */
-template<typename Derived>
-inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::norm() const
-{
-  return internal::sqrt(squaredNorm());
-}
-
-/** \returns an expression of the quotient of *this by its own norm.
-  *
-  * \only_for_vectors
-  *
-  * \sa norm(), normalize()
-  */
-template<typename Derived>
-inline const typename MatrixBase<Derived>::PlainObject
-MatrixBase<Derived>::normalized() const
-{
-  typedef typename internal::nested<Derived>::type Nested;
-  typedef typename internal::remove_reference<Nested>::type _Nested;
-  _Nested n(derived());
-  return n / n.norm();
-}
-
-/** Normalizes the vector, i.e. divides it by its own norm.
-  *
-  * \only_for_vectors
-  *
-  * \sa norm(), normalized()
-  */
-template<typename Derived>
-inline void MatrixBase<Derived>::normalize()
-{
-  *this /= norm();
-}
-
-//---------- implementation of other norms ----------
-
-namespace internal {
-
-template<typename Derived, int p>
-struct lpNorm_selector
-{
-  typedef typename NumTraits<typename traits<Derived>::Scalar>::Real RealScalar;
-  static inline RealScalar run(const MatrixBase<Derived>& m)
-  {
-    return pow(m.cwiseAbs().array().pow(p).sum(), RealScalar(1)/p);
-  }
-};
-
-template<typename Derived>
-struct lpNorm_selector<Derived, 1>
-{
-  static inline typename NumTraits<typename traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m)
-  {
-    return m.cwiseAbs().sum();
-  }
-};
-
-template<typename Derived>
-struct lpNorm_selector<Derived, 2>
-{
-  static inline typename NumTraits<typename traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m)
-  {
-    return m.norm();
-  }
-};
-
-template<typename Derived>
-struct lpNorm_selector<Derived, Infinity>
-{
-  static inline typename NumTraits<typename traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m)
-  {
-    return m.cwiseAbs().maxCoeff();
-  }
-};
-
-} // end namespace internal
-
-/** \returns the \f$ \ell^p \f$ norm of *this, that is, returns the p-th root of the sum of the p-th powers of the absolute values
-  *          of the coefficients of *this. If \a p is the special value \a Eigen::Infinity, this function returns the \f$ \ell^\infty \f$
-  *          norm, that is the maximum of the absolute values of the coefficients of *this.
-  *
-  * \sa norm()
-  */
-template<typename Derived>
-template<int p>
-inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
-MatrixBase<Derived>::lpNorm() const
-{
-  return internal::lpNorm_selector<Derived, p>::run(*this);
-}
-
-//---------- implementation of isOrthogonal / isUnitary ----------
-
-/** \returns true if *this is approximately orthogonal to \a other,
-  *          within the precision given by \a prec.
-  *
-  * Example: \include MatrixBase_isOrthogonal.cpp
-  * Output: \verbinclude MatrixBase_isOrthogonal.out
-  */
-template<typename Derived>
-template<typename OtherDerived>
-bool MatrixBase<Derived>::isOrthogonal
-(const MatrixBase<OtherDerived>& other, const RealScalar& prec) const
-{
-  typename internal::nested<Derived,2>::type nested(derived());
-  typename internal::nested<OtherDerived,2>::type otherNested(other.derived());
-  return internal::abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm();
-}
-
-/** \returns true if *this is approximately an unitary matrix,
-  *          within the precision given by \a prec. In the case where the \a Scalar
-  *          type is real numbers, a unitary matrix is an orthogonal matrix, whence the name.
-  *
-  * \note This can be used to check whether a family of vectors forms an orthonormal basis.
-  *       Indeed, \c m.isUnitary() returns true if and only if the columns (equivalently, the rows) of m form an
-  *       orthonormal basis.
-  *
-  * Example: \include MatrixBase_isUnitary.cpp
-  * Output: \verbinclude MatrixBase_isUnitary.out
-  */
-template<typename Derived>
-bool MatrixBase<Derived>::isUnitary(const RealScalar& prec) const
-{
-  typename Derived::Nested nested(derived());
-  for(Index i = 0; i < cols(); ++i)
-  {
-    if(!internal::isApprox(nested.col(i).squaredNorm(), static_cast<RealScalar>(1), prec))
-      return false;
-    for(Index j = 0; j < i; ++j)
-      if(!internal::isMuchSmallerThan(nested.col(i).dot(nested.col(j)), static_cast<Scalar>(1), prec))
-        return false;
-  }
-  return true;
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_DOT_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Functors.h b/resources/3rdparty/eigen/Eigen/src/Core/Functors.h
deleted file mode 100644
index 09388972a..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/Functors.h
+++ /dev/null
@@ -1,975 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_FUNCTORS_H
-#define EIGEN_FUNCTORS_H
-
-namespace Eigen {
-
-namespace internal {
-
-// associative functors:
-
-/** \internal
-  * \brief Template functor to compute the sum of two scalars
-  *
-  * \sa class CwiseBinaryOp, MatrixBase::operator+, class VectorwiseOp, MatrixBase::sum()
-  */
-template<typename Scalar> struct scalar_sum_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_sum_op)
-  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a + b; }
-  template<typename Packet>
-  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
-  { return internal::padd(a,b); }
-  template<typename Packet>
-  EIGEN_STRONG_INLINE const Scalar predux(const Packet& a) const
-  { return internal::predux(a); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_sum_op<Scalar> > {
-  enum {
-    Cost = NumTraits<Scalar>::AddCost,
-    PacketAccess = packet_traits<Scalar>::HasAdd
-  };
-};
-
-/** \internal
-  * \brief Template functor to compute the product of two scalars
-  *
-  * \sa class CwiseBinaryOp, Cwise::operator*(), class VectorwiseOp, MatrixBase::redux()
-  */
-template<typename LhsScalar,typename RhsScalar> struct scalar_product_op {
-  enum {
-    // TODO vectorize mixed product
-    Vectorizable = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasMul && packet_traits<RhsScalar>::HasMul
-  };
-  typedef typename scalar_product_traits<LhsScalar,RhsScalar>::ReturnType result_type;
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_product_op)
-  EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a * b; }
-  template<typename Packet>
-  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
-  { return internal::pmul(a,b); }
-  template<typename Packet>
-  EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const
-  { return internal::predux_mul(a); }
-};
-template<typename LhsScalar,typename RhsScalar>
-struct functor_traits<scalar_product_op<LhsScalar,RhsScalar> > {
-  enum {
-    Cost = (NumTraits<LhsScalar>::MulCost + NumTraits<RhsScalar>::MulCost)/2, // rough estimate!
-    PacketAccess = scalar_product_op<LhsScalar,RhsScalar>::Vectorizable
-  };
-};
-
-/** \internal
-  * \brief Template functor to compute the conjugate product of two scalars
-  *
-  * This is a short cut for conj(x) * y which is needed for optimization purpose; in Eigen2 support mode, this becomes x * conj(y)
-  */
-template<typename LhsScalar,typename RhsScalar> struct scalar_conj_product_op {
-
-  enum {
-    Conj = NumTraits<LhsScalar>::IsComplex
-  };
-  
-  typedef typename scalar_product_traits<LhsScalar,RhsScalar>::ReturnType result_type;
-  
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_conj_product_op)
-  EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const
-  { return conj_helper<LhsScalar,RhsScalar,Conj,false>().pmul(a,b); }
-  
-  template<typename Packet>
-  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
-  { return conj_helper<Packet,Packet,Conj,false>().pmul(a,b); }
-};
-template<typename LhsScalar,typename RhsScalar>
-struct functor_traits<scalar_conj_product_op<LhsScalar,RhsScalar> > {
-  enum {
-    Cost = NumTraits<LhsScalar>::MulCost,
-    PacketAccess = internal::is_same<LhsScalar, RhsScalar>::value && packet_traits<LhsScalar>::HasMul
-  };
-};
-
-/** \internal
-  * \brief Template functor to compute the min of two scalars
-  *
-  * \sa class CwiseBinaryOp, MatrixBase::cwiseMin, class VectorwiseOp, MatrixBase::minCoeff()
-  */
-template<typename Scalar> struct scalar_min_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_min_op)
-  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::min; return (min)(a, b); }
-  template<typename Packet>
-  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
-  { return internal::pmin(a,b); }
-  template<typename Packet>
-  EIGEN_STRONG_INLINE const Scalar predux(const Packet& a) const
-  { return internal::predux_min(a); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_min_op<Scalar> > {
-  enum {
-    Cost = NumTraits<Scalar>::AddCost,
-    PacketAccess = packet_traits<Scalar>::HasMin
-  };
-};
-
-/** \internal
-  * \brief Template functor to compute the max of two scalars
-  *
-  * \sa class CwiseBinaryOp, MatrixBase::cwiseMax, class VectorwiseOp, MatrixBase::maxCoeff()
-  */
-template<typename Scalar> struct scalar_max_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_max_op)
-  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::max; return (max)(a, b); }
-  template<typename Packet>
-  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
-  { return internal::pmax(a,b); }
-  template<typename Packet>
-  EIGEN_STRONG_INLINE const Scalar predux(const Packet& a) const
-  { return internal::predux_max(a); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_max_op<Scalar> > {
-  enum {
-    Cost = NumTraits<Scalar>::AddCost,
-    PacketAccess = packet_traits<Scalar>::HasMax
-  };
-};
-
-/** \internal
-  * \brief Template functor to compute the hypot of two scalars
-  *
-  * \sa MatrixBase::stableNorm(), class Redux
-  */
-template<typename Scalar> struct scalar_hypot_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_hypot_op)
-//   typedef typename NumTraits<Scalar>::Real result_type;
-  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& _x, const Scalar& _y) const
-  {
-    using std::max;
-    using std::min;
-    Scalar p = (max)(_x, _y);
-    Scalar q = (min)(_x, _y);
-    Scalar qp = q/p;
-    return p * sqrt(Scalar(1) + qp*qp);
-  }
-};
-template<typename Scalar>
-struct functor_traits<scalar_hypot_op<Scalar> > {
-  enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess=0 };
-};
-
-/** \internal
-  * \brief Template functor to compute the pow of two scalars
-  */
-template<typename Scalar, typename OtherScalar> struct scalar_binary_pow_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_binary_pow_op)
-  inline Scalar operator() (const Scalar& a, const OtherScalar& b) const { return internal::pow(a, b); }
-};
-template<typename Scalar, typename OtherScalar>
-struct functor_traits<scalar_binary_pow_op<Scalar,OtherScalar> > {
-  enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false };
-};
-
-// other binary functors:
-
-/** \internal
-  * \brief Template functor to compute the difference of two scalars
-  *
-  * \sa class CwiseBinaryOp, MatrixBase::operator-
-  */
-template<typename Scalar> struct scalar_difference_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_difference_op)
-  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a - b; }
-  template<typename Packet>
-  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
-  { return internal::psub(a,b); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_difference_op<Scalar> > {
-  enum {
-    Cost = NumTraits<Scalar>::AddCost,
-    PacketAccess = packet_traits<Scalar>::HasSub
-  };
-};
-
-/** \internal
-  * \brief Template functor to compute the quotient of two scalars
-  *
-  * \sa class CwiseBinaryOp, Cwise::operator/()
-  */
-template<typename LhsScalar,typename RhsScalar> struct scalar_quotient_op {
-  enum {
-    // TODO vectorize mixed product
-    Vectorizable = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasDiv && packet_traits<RhsScalar>::HasDiv
-  };
-  typedef typename scalar_product_traits<LhsScalar,RhsScalar>::ReturnType result_type;
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_quotient_op)
-  EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a / b; }
-  template<typename Packet>
-  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
-  { return internal::pdiv(a,b); }
-};
-template<typename LhsScalar,typename RhsScalar>
-struct functor_traits<scalar_quotient_op<LhsScalar,RhsScalar> > {
-  enum {
-    Cost = (NumTraits<LhsScalar>::MulCost + NumTraits<RhsScalar>::MulCost), // rough estimate!
-    PacketAccess = scalar_quotient_op<LhsScalar,RhsScalar>::Vectorizable
-  };
-};
-
-
-
-/** \internal
-  * \brief Template functor to compute the and of two booleans
-  *
-  * \sa class CwiseBinaryOp, ArrayBase::operator&&
-  */
-struct scalar_boolean_and_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_and_op)
-  EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a && b; }
-};
-template<> struct functor_traits<scalar_boolean_and_op> {
-  enum {
-    Cost = NumTraits<bool>::AddCost,
-    PacketAccess = false
-  };
-};
-
-/** \internal
-  * \brief Template functor to compute the or of two booleans
-  *
-  * \sa class CwiseBinaryOp, ArrayBase::operator||
-  */
-struct scalar_boolean_or_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_or_op)
-  EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a || b; }
-};
-template<> struct functor_traits<scalar_boolean_or_op> {
-  enum {
-    Cost = NumTraits<bool>::AddCost,
-    PacketAccess = false
-  };
-};
-
-// unary functors:
-
-/** \internal
-  * \brief Template functor to compute the opposite of a scalar
-  *
-  * \sa class CwiseUnaryOp, MatrixBase::operator-
-  */
-template<typename Scalar> struct scalar_opposite_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_opposite_op)
-  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return -a; }
-  template<typename Packet>
-  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
-  { return internal::pnegate(a); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_opposite_op<Scalar> >
-{ enum {
-    Cost = NumTraits<Scalar>::AddCost,
-    PacketAccess = packet_traits<Scalar>::HasNegate };
-};
-
-/** \internal
-  * \brief Template functor to compute the absolute value of a scalar
-  *
-  * \sa class CwiseUnaryOp, Cwise::abs
-  */
-template<typename Scalar> struct scalar_abs_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_abs_op)
-  typedef typename NumTraits<Scalar>::Real result_type;
-  EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return internal::abs(a); }
-  template<typename Packet>
-  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
-  { return internal::pabs(a); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_abs_op<Scalar> >
-{
-  enum {
-    Cost = NumTraits<Scalar>::AddCost,
-    PacketAccess = packet_traits<Scalar>::HasAbs
-  };
-};
-
-/** \internal
-  * \brief Template functor to compute the squared absolute value of a scalar
-  *
-  * \sa class CwiseUnaryOp, Cwise::abs2
-  */
-template<typename Scalar> struct scalar_abs2_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_abs2_op)
-  typedef typename NumTraits<Scalar>::Real result_type;
-  EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return internal::abs2(a); }
-  template<typename Packet>
-  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
-  { return internal::pmul(a,a); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_abs2_op<Scalar> >
-{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasAbs2 }; };
-
-/** \internal
-  * \brief Template functor to compute the conjugate of a complex value
-  *
-  * \sa class CwiseUnaryOp, MatrixBase::conjugate()
-  */
-template<typename Scalar> struct scalar_conjugate_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_conjugate_op)
-  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return internal::conj(a); }
-  template<typename Packet>
-  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const { return internal::pconj(a); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_conjugate_op<Scalar> >
-{
-  enum {
-    Cost = NumTraits<Scalar>::IsComplex ? NumTraits<Scalar>::AddCost : 0,
-    PacketAccess = packet_traits<Scalar>::HasConj
-  };
-};
-
-/** \internal
-  * \brief Template functor to cast a scalar to another type
-  *
-  * \sa class CwiseUnaryOp, MatrixBase::cast()
-  */
-template<typename Scalar, typename NewType>
-struct scalar_cast_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
-  typedef NewType result_type;
-  EIGEN_STRONG_INLINE const NewType operator() (const Scalar& a) const { return cast<Scalar, NewType>(a); }
-};
-template<typename Scalar, typename NewType>
-struct functor_traits<scalar_cast_op<Scalar,NewType> >
-{ enum { Cost = is_same<Scalar, NewType>::value ? 0 : NumTraits<NewType>::AddCost, PacketAccess = false }; };
-
-/** \internal
-  * \brief Template functor to extract the real part of a complex
-  *
-  * \sa class CwiseUnaryOp, MatrixBase::real()
-  */
-template<typename Scalar>
-struct scalar_real_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_real_op)
-  typedef typename NumTraits<Scalar>::Real result_type;
-  EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return internal::real(a); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_real_op<Scalar> >
-{ enum { Cost = 0, PacketAccess = false }; };
-
-/** \internal
-  * \brief Template functor to extract the imaginary part of a complex
-  *
-  * \sa class CwiseUnaryOp, MatrixBase::imag()
-  */
-template<typename Scalar>
-struct scalar_imag_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_op)
-  typedef typename NumTraits<Scalar>::Real result_type;
-  EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return internal::imag(a); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_imag_op<Scalar> >
-{ enum { Cost = 0, PacketAccess = false }; };
-
-/** \internal
-  * \brief Template functor to extract the real part of a complex as a reference
-  *
-  * \sa class CwiseUnaryOp, MatrixBase::real()
-  */
-template<typename Scalar>
-struct scalar_real_ref_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_real_ref_op)
-  typedef typename NumTraits<Scalar>::Real result_type;
-  EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return internal::real_ref(*const_cast<Scalar*>(&a)); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_real_ref_op<Scalar> >
-{ enum { Cost = 0, PacketAccess = false }; };
-
-/** \internal
-  * \brief Template functor to extract the imaginary part of a complex as a reference
-  *
-  * \sa class CwiseUnaryOp, MatrixBase::imag()
-  */
-template<typename Scalar>
-struct scalar_imag_ref_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_ref_op)
-  typedef typename NumTraits<Scalar>::Real result_type;
-  EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return internal::imag_ref(*const_cast<Scalar*>(&a)); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_imag_ref_op<Scalar> >
-{ enum { Cost = 0, PacketAccess = false }; };
-
-/** \internal
-  *
-  * \brief Template functor to compute the exponential of a scalar
-  *
-  * \sa class CwiseUnaryOp, Cwise::exp()
-  */
-template<typename Scalar> struct scalar_exp_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_exp_op)
-  inline const Scalar operator() (const Scalar& a) const { return internal::exp(a); }
-  typedef typename packet_traits<Scalar>::type Packet;
-  inline Packet packetOp(const Packet& a) const { return internal::pexp(a); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_exp_op<Scalar> >
-{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasExp }; };
-
-/** \internal
-  *
-  * \brief Template functor to compute the logarithm of a scalar
-  *
-  * \sa class CwiseUnaryOp, Cwise::log()
-  */
-template<typename Scalar> struct scalar_log_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_log_op)
-  inline const Scalar operator() (const Scalar& a) const { return internal::log(a); }
-  typedef typename packet_traits<Scalar>::type Packet;
-  inline Packet packetOp(const Packet& a) const { return internal::plog(a); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_log_op<Scalar> >
-{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasLog }; };
-
-/** \internal
-  * \brief Template functor to multiply a scalar by a fixed other one
-  *
-  * \sa class CwiseUnaryOp, MatrixBase::operator*, MatrixBase::operator/
-  */
-/* NOTE why doing the pset1() in packetOp *is* an optimization ?
- * indeed it seems better to declare m_other as a Packet and do the pset1() once
- * in the constructor. However, in practice:
- *  - GCC does not like m_other as a Packet and generate a load every time it needs it
- *  - on the other hand GCC is able to moves the pset1() outside the loop :)
- *  - simpler code ;)
- * (ICC and gcc 4.4 seems to perform well in both cases, the issue is visible with y = a*x + b*y)
- */
-template<typename Scalar>
-struct scalar_multiple_op {
-  typedef typename packet_traits<Scalar>::type Packet;
-  // FIXME default copy constructors seems bugged with std::complex<>
-  EIGEN_STRONG_INLINE scalar_multiple_op(const scalar_multiple_op& other) : m_other(other.m_other) { }
-  EIGEN_STRONG_INLINE scalar_multiple_op(const Scalar& other) : m_other(other) { }
-  EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a * m_other; }
-  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
-  { return internal::pmul(a, pset1<Packet>(m_other)); }
-  typename add_const_on_value_type<typename NumTraits<Scalar>::Nested>::type m_other;
-};
-template<typename Scalar>
-struct functor_traits<scalar_multiple_op<Scalar> >
-{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasMul }; };
-
-template<typename Scalar1, typename Scalar2>
-struct scalar_multiple2_op {
-  typedef typename scalar_product_traits<Scalar1,Scalar2>::ReturnType result_type;
-  EIGEN_STRONG_INLINE scalar_multiple2_op(const scalar_multiple2_op& other) : m_other(other.m_other) { }
-  EIGEN_STRONG_INLINE scalar_multiple2_op(const Scalar2& other) : m_other(other) { }
-  EIGEN_STRONG_INLINE result_type operator() (const Scalar1& a) const { return a * m_other; }
-  typename add_const_on_value_type<typename NumTraits<Scalar2>::Nested>::type m_other;
-};
-template<typename Scalar1,typename Scalar2>
-struct functor_traits<scalar_multiple2_op<Scalar1,Scalar2> >
-{ enum { Cost = NumTraits<Scalar1>::MulCost, PacketAccess = false }; };
-
-/** \internal
-  * \brief Template functor to divide a scalar by a fixed other one
-  *
-  * This functor is used to implement the quotient of a matrix by
-  * a scalar where the scalar type is not necessarily a floating point type.
-  *
-  * \sa class CwiseUnaryOp, MatrixBase::operator/
-  */
-template<typename Scalar>
-struct scalar_quotient1_op {
-  typedef typename packet_traits<Scalar>::type Packet;
-  // FIXME default copy constructors seems bugged with std::complex<>
-  EIGEN_STRONG_INLINE scalar_quotient1_op(const scalar_quotient1_op& other) : m_other(other.m_other) { }
-  EIGEN_STRONG_INLINE scalar_quotient1_op(const Scalar& other) : m_other(other) {}
-  EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a / m_other; }
-  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
-  { return internal::pdiv(a, pset1<Packet>(m_other)); }
-  typename add_const_on_value_type<typename NumTraits<Scalar>::Nested>::type m_other;
-};
-template<typename Scalar>
-struct functor_traits<scalar_quotient1_op<Scalar> >
-{ enum { Cost = 2 * NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasDiv }; };
-
-// nullary functors
-
-template<typename Scalar>
-struct scalar_constant_op {
-  typedef typename packet_traits<Scalar>::type Packet;
-  EIGEN_STRONG_INLINE scalar_constant_op(const scalar_constant_op& other) : m_other(other.m_other) { }
-  EIGEN_STRONG_INLINE scalar_constant_op(const Scalar& other) : m_other(other) { }
-  template<typename Index>
-  EIGEN_STRONG_INLINE const Scalar operator() (Index, Index = 0) const { return m_other; }
-  template<typename Index>
-  EIGEN_STRONG_INLINE const Packet packetOp(Index, Index = 0) const { return internal::pset1<Packet>(m_other); }
-  const Scalar m_other;
-};
-template<typename Scalar>
-struct functor_traits<scalar_constant_op<Scalar> >
-// FIXME replace this packet test by a safe one
-{ enum { Cost = 1, PacketAccess = packet_traits<Scalar>::Vectorizable, IsRepeatable = true }; };
-
-template<typename Scalar> struct scalar_identity_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_identity_op)
-  template<typename Index>
-  EIGEN_STRONG_INLINE const Scalar operator() (Index row, Index col) const { return row==col ? Scalar(1) : Scalar(0); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_identity_op<Scalar> >
-{ enum { Cost = NumTraits<Scalar>::AddCost, PacketAccess = false, IsRepeatable = true }; };
-
-template <typename Scalar, bool RandomAccess> struct linspaced_op_impl;
-
-// linear access for packet ops:
-// 1) initialization
-//   base = [low, ..., low] + ([step, ..., step] * [-size, ..., 0])
-// 2) each step
-//   base += [size*step, ..., size*step]
-template <typename Scalar>
-struct linspaced_op_impl<Scalar,false>
-{
-  typedef typename packet_traits<Scalar>::type Packet;
-
-  linspaced_op_impl(Scalar low, Scalar step) :
-  m_low(low), m_step(step),
-  m_packetStep(pset1<Packet>(packet_traits<Scalar>::size*step)),
-  m_base(padd(pset1<Packet>(low),pmul(pset1<Packet>(step),plset<Scalar>(-packet_traits<Scalar>::size)))) {}
-
-  template<typename Index>
-  EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return m_low+i*m_step; }
-  template<typename Index>
-  EIGEN_STRONG_INLINE const Packet packetOp(Index) const { return m_base = padd(m_base,m_packetStep); }
-
-  const Scalar m_low;
-  const Scalar m_step;
-  const Packet m_packetStep;
-  mutable Packet m_base;
-};
-
-// random access for packet ops:
-// 1) each step
-//   [low, ..., low] + ( [step, ..., step] * ( [i, ..., i] + [0, ..., size] ) )
-template <typename Scalar>
-struct linspaced_op_impl<Scalar,true>
-{
-  typedef typename packet_traits<Scalar>::type Packet;
-
-  linspaced_op_impl(Scalar low, Scalar step) :
-  m_low(low), m_step(step),
-  m_lowPacket(pset1<Packet>(m_low)), m_stepPacket(pset1<Packet>(m_step)), m_interPacket(plset<Scalar>(0)) {}
-
-  template<typename Index>
-  EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return m_low+i*m_step; }
-
-  template<typename Index>
-  EIGEN_STRONG_INLINE const Packet packetOp(Index i) const
-  { return internal::padd(m_lowPacket, pmul(m_stepPacket, padd(pset1<Packet>(i),m_interPacket))); }
-
-  const Scalar m_low;
-  const Scalar m_step;
-  const Packet m_lowPacket;
-  const Packet m_stepPacket;
-  const Packet m_interPacket;
-};
-
-// ----- Linspace functor ----------------------------------------------------------------
-
-// Forward declaration (we default to random access which does not really give
-// us a speed gain when using packet access but it allows to use the functor in
-// nested expressions).
-template <typename Scalar, bool RandomAccess = true> struct linspaced_op;
-template <typename Scalar, bool RandomAccess> struct functor_traits< linspaced_op<Scalar,RandomAccess> >
-{ enum { Cost = 1, PacketAccess = packet_traits<Scalar>::HasSetLinear, IsRepeatable = true }; };
-template <typename Scalar, bool RandomAccess> struct linspaced_op
-{
-  typedef typename packet_traits<Scalar>::type Packet;
-  linspaced_op(Scalar low, Scalar high, int num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/(num_steps-1))) {}
-
-  template<typename Index>
-  EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return impl(i); }
-
-  // We need this function when assigning e.g. a RowVectorXd to a MatrixXd since
-  // there row==0 and col is used for the actual iteration.
-  template<typename Index>
-  EIGEN_STRONG_INLINE const Scalar operator() (Index row, Index col) const 
-  {
-    eigen_assert(col==0 || row==0);
-    return impl(col + row);
-  }
-
-  template<typename Index>
-  EIGEN_STRONG_INLINE const Packet packetOp(Index i) const { return impl.packetOp(i); }
-
-  // We need this function when assigning e.g. a RowVectorXd to a MatrixXd since
-  // there row==0 and col is used for the actual iteration.
-  template<typename Index>
-  EIGEN_STRONG_INLINE const Packet packetOp(Index row, Index col) const
-  {
-    eigen_assert(col==0 || row==0);
-    return impl.packetOp(col + row);
-  }
-
-  // This proxy object handles the actual required temporaries, the different
-  // implementations (random vs. sequential access) as well as the
-  // correct piping to size 2/4 packet operations.
-  const linspaced_op_impl<Scalar,RandomAccess> impl;
-};
-
-// all functors allow linear access, except scalar_identity_op. So we fix here a quick meta
-// to indicate whether a functor allows linear access, just always answering 'yes' except for
-// scalar_identity_op.
-// FIXME move this to functor_traits adding a functor_default
-template<typename Functor> struct functor_has_linear_access { enum { ret = 1 }; };
-template<typename Scalar> struct functor_has_linear_access<scalar_identity_op<Scalar> > { enum { ret = 0 }; };
-
-// in CwiseBinaryOp, we require the Lhs and Rhs to have the same scalar type, except for multiplication
-// where we only require them to have the same _real_ scalar type so one may multiply, say, float by complex<float>.
-// FIXME move this to functor_traits adding a functor_default
-template<typename Functor> struct functor_allows_mixing_real_and_complex { enum { ret = 0 }; };
-template<typename LhsScalar,typename RhsScalar> struct functor_allows_mixing_real_and_complex<scalar_product_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; };
-template<typename LhsScalar,typename RhsScalar> struct functor_allows_mixing_real_and_complex<scalar_conj_product_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; };
-template<typename LhsScalar,typename RhsScalar> struct functor_allows_mixing_real_and_complex<scalar_quotient_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; };
-
-
-/** \internal
-  * \brief Template functor to add a scalar to a fixed other one
-  * \sa class CwiseUnaryOp, Array::operator+
-  */
-/* If you wonder why doing the pset1() in packetOp() is an optimization check scalar_multiple_op */
-template<typename Scalar>
-struct scalar_add_op {
-  typedef typename packet_traits<Scalar>::type Packet;
-  // FIXME default copy constructors seems bugged with std::complex<>
-  inline scalar_add_op(const scalar_add_op& other) : m_other(other.m_other) { }
-  inline scalar_add_op(const Scalar& other) : m_other(other) { }
-  inline Scalar operator() (const Scalar& a) const { return a + m_other; }
-  inline const Packet packetOp(const Packet& a) const
-  { return internal::padd(a, pset1<Packet>(m_other)); }
-  const Scalar m_other;
-};
-template<typename Scalar>
-struct functor_traits<scalar_add_op<Scalar> >
-{ enum { Cost = NumTraits<Scalar>::AddCost, PacketAccess = packet_traits<Scalar>::HasAdd }; };
-
-/** \internal
-  * \brief Template functor to compute the square root of a scalar
-  * \sa class CwiseUnaryOp, Cwise::sqrt()
-  */
-template<typename Scalar> struct scalar_sqrt_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_sqrt_op)
-  inline const Scalar operator() (const Scalar& a) const { return internal::sqrt(a); }
-  typedef typename packet_traits<Scalar>::type Packet;
-  inline Packet packetOp(const Packet& a) const { return internal::psqrt(a); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_sqrt_op<Scalar> >
-{ enum {
-    Cost = 5 * NumTraits<Scalar>::MulCost,
-    PacketAccess = packet_traits<Scalar>::HasSqrt
-  };
-};
-
-/** \internal
-  * \brief Template functor to compute the cosine of a scalar
-  * \sa class CwiseUnaryOp, ArrayBase::cos()
-  */
-template<typename Scalar> struct scalar_cos_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_cos_op)
-  inline Scalar operator() (const Scalar& a) const { return internal::cos(a); }
-  typedef typename packet_traits<Scalar>::type Packet;
-  inline Packet packetOp(const Packet& a) const { return internal::pcos(a); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_cos_op<Scalar> >
-{
-  enum {
-    Cost = 5 * NumTraits<Scalar>::MulCost,
-    PacketAccess = packet_traits<Scalar>::HasCos
-  };
-};
-
-/** \internal
-  * \brief Template functor to compute the sine of a scalar
-  * \sa class CwiseUnaryOp, ArrayBase::sin()
-  */
-template<typename Scalar> struct scalar_sin_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_sin_op)
-  inline const Scalar operator() (const Scalar& a) const { return internal::sin(a); }
-  typedef typename packet_traits<Scalar>::type Packet;
-  inline Packet packetOp(const Packet& a) const { return internal::psin(a); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_sin_op<Scalar> >
-{
-  enum {
-    Cost = 5 * NumTraits<Scalar>::MulCost,
-    PacketAccess = packet_traits<Scalar>::HasSin
-  };
-};
-
-
-/** \internal
-  * \brief Template functor to compute the tan of a scalar
-  * \sa class CwiseUnaryOp, ArrayBase::tan()
-  */
-template<typename Scalar> struct scalar_tan_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_tan_op)
-  inline const Scalar operator() (const Scalar& a) const { return internal::tan(a); }
-  typedef typename packet_traits<Scalar>::type Packet;
-  inline Packet packetOp(const Packet& a) const { return internal::ptan(a); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_tan_op<Scalar> >
-{
-  enum {
-    Cost = 5 * NumTraits<Scalar>::MulCost,
-    PacketAccess = packet_traits<Scalar>::HasTan
-  };
-};
-
-/** \internal
-  * \brief Template functor to compute the arc cosine of a scalar
-  * \sa class CwiseUnaryOp, ArrayBase::acos()
-  */
-template<typename Scalar> struct scalar_acos_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_acos_op)
-  inline const Scalar operator() (const Scalar& a) const { return internal::acos(a); }
-  typedef typename packet_traits<Scalar>::type Packet;
-  inline Packet packetOp(const Packet& a) const { return internal::pacos(a); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_acos_op<Scalar> >
-{
-  enum {
-    Cost = 5 * NumTraits<Scalar>::MulCost,
-    PacketAccess = packet_traits<Scalar>::HasACos
-  };
-};
-
-/** \internal
-  * \brief Template functor to compute the arc sine of a scalar
-  * \sa class CwiseUnaryOp, ArrayBase::asin()
-  */
-template<typename Scalar> struct scalar_asin_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_asin_op)
-  inline const Scalar operator() (const Scalar& a) const { return internal::asin(a); }
-  typedef typename packet_traits<Scalar>::type Packet;
-  inline Packet packetOp(const Packet& a) const { return internal::pasin(a); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_asin_op<Scalar> >
-{
-  enum {
-    Cost = 5 * NumTraits<Scalar>::MulCost,
-    PacketAccess = packet_traits<Scalar>::HasASin
-  };
-};
-
-/** \internal
-  * \brief Template functor to raise a scalar to a power
-  * \sa class CwiseUnaryOp, Cwise::pow
-  */
-template<typename Scalar>
-struct scalar_pow_op {
-  // FIXME default copy constructors seems bugged with std::complex<>
-  inline scalar_pow_op(const scalar_pow_op& other) : m_exponent(other.m_exponent) { }
-  inline scalar_pow_op(const Scalar& exponent) : m_exponent(exponent) {}
-  inline Scalar operator() (const Scalar& a) const { return internal::pow(a, m_exponent); }
-  const Scalar m_exponent;
-};
-template<typename Scalar>
-struct functor_traits<scalar_pow_op<Scalar> >
-{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false }; };
-
-/** \internal
-  * \brief Template functor to compute the quotient between a scalar and array entries.
-  * \sa class CwiseUnaryOp, Cwise::inverse()
-  */
-template<typename Scalar>
-struct scalar_inverse_mult_op {
-  scalar_inverse_mult_op(const Scalar& other) : m_other(other) {}
-  inline Scalar operator() (const Scalar& a) const { return m_other / a; }
-  template<typename Packet>
-  inline const Packet packetOp(const Packet& a) const
-  { return internal::pdiv(pset1<Packet>(m_other),a); }
-  Scalar m_other;
-};
-
-/** \internal
-  * \brief Template functor to compute the inverse of a scalar
-  * \sa class CwiseUnaryOp, Cwise::inverse()
-  */
-template<typename Scalar>
-struct scalar_inverse_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_inverse_op)
-  inline Scalar operator() (const Scalar& a) const { return Scalar(1)/a; }
-  template<typename Packet>
-  inline const Packet packetOp(const Packet& a) const
-  { return internal::pdiv(pset1<Packet>(Scalar(1)),a); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_inverse_op<Scalar> >
-{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasDiv }; };
-
-/** \internal
-  * \brief Template functor to compute the square of a scalar
-  * \sa class CwiseUnaryOp, Cwise::square()
-  */
-template<typename Scalar>
-struct scalar_square_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_square_op)
-  inline Scalar operator() (const Scalar& a) const { return a*a; }
-  template<typename Packet>
-  inline const Packet packetOp(const Packet& a) const
-  { return internal::pmul(a,a); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_square_op<Scalar> >
-{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasMul }; };
-
-/** \internal
-  * \brief Template functor to compute the cube of a scalar
-  * \sa class CwiseUnaryOp, Cwise::cube()
-  */
-template<typename Scalar>
-struct scalar_cube_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_cube_op)
-  inline Scalar operator() (const Scalar& a) const { return a*a*a; }
-  template<typename Packet>
-  inline const Packet packetOp(const Packet& a) const
-  { return internal::pmul(a,pmul(a,a)); }
-};
-template<typename Scalar>
-struct functor_traits<scalar_cube_op<Scalar> >
-{ enum { Cost = 2*NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasMul }; };
-
-// default functor traits for STL functors:
-
-template<typename T>
-struct functor_traits<std::multiplies<T> >
-{ enum { Cost = NumTraits<T>::MulCost, PacketAccess = false }; };
-
-template<typename T>
-struct functor_traits<std::divides<T> >
-{ enum { Cost = NumTraits<T>::MulCost, PacketAccess = false }; };
-
-template<typename T>
-struct functor_traits<std::plus<T> >
-{ enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; };
-
-template<typename T>
-struct functor_traits<std::minus<T> >
-{ enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; };
-
-template<typename T>
-struct functor_traits<std::negate<T> >
-{ enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; };
-
-template<typename T>
-struct functor_traits<std::logical_or<T> >
-{ enum { Cost = 1, PacketAccess = false }; };
-
-template<typename T>
-struct functor_traits<std::logical_and<T> >
-{ enum { Cost = 1, PacketAccess = false }; };
-
-template<typename T>
-struct functor_traits<std::logical_not<T> >
-{ enum { Cost = 1, PacketAccess = false }; };
-
-template<typename T>
-struct functor_traits<std::greater<T> >
-{ enum { Cost = 1, PacketAccess = false }; };
-
-template<typename T>
-struct functor_traits<std::less<T> >
-{ enum { Cost = 1, PacketAccess = false }; };
-
-template<typename T>
-struct functor_traits<std::greater_equal<T> >
-{ enum { Cost = 1, PacketAccess = false }; };
-
-template<typename T>
-struct functor_traits<std::less_equal<T> >
-{ enum { Cost = 1, PacketAccess = false }; };
-
-template<typename T>
-struct functor_traits<std::equal_to<T> >
-{ enum { Cost = 1, PacketAccess = false }; };
-
-template<typename T>
-struct functor_traits<std::not_equal_to<T> >
-{ enum { Cost = 1, PacketAccess = false }; };
-
-template<typename T>
-struct functor_traits<std::binder2nd<T> >
-{ enum { Cost = functor_traits<T>::Cost, PacketAccess = false }; };
-
-template<typename T>
-struct functor_traits<std::binder1st<T> >
-{ enum { Cost = functor_traits<T>::Cost, PacketAccess = false }; };
-
-template<typename T>
-struct functor_traits<std::unary_negate<T> >
-{ enum { Cost = 1 + functor_traits<T>::Cost, PacketAccess = false }; };
-
-template<typename T>
-struct functor_traits<std::binary_negate<T> >
-{ enum { Cost = 1 + functor_traits<T>::Cost, PacketAccess = false }; };
-
-#ifdef EIGEN_STDEXT_SUPPORT
-
-template<typename T0,typename T1>
-struct functor_traits<std::project1st<T0,T1> >
-{ enum { Cost = 0, PacketAccess = false }; };
-
-template<typename T0,typename T1>
-struct functor_traits<std::project2nd<T0,T1> >
-{ enum { Cost = 0, PacketAccess = false }; };
-
-template<typename T0,typename T1>
-struct functor_traits<std::select2nd<std::pair<T0,T1> > >
-{ enum { Cost = 0, PacketAccess = false }; };
-
-template<typename T0,typename T1>
-struct functor_traits<std::select1st<std::pair<T0,T1> > >
-{ enum { Cost = 0, PacketAccess = false }; };
-
-template<typename T0,typename T1>
-struct functor_traits<std::unary_compose<T0,T1> >
-{ enum { Cost = functor_traits<T0>::Cost + functor_traits<T1>::Cost, PacketAccess = false }; };
-
-template<typename T0,typename T1,typename T2>
-struct functor_traits<std::binary_compose<T0,T1,T2> >
-{ enum { Cost = functor_traits<T0>::Cost + functor_traits<T1>::Cost + functor_traits<T2>::Cost, PacketAccess = false }; };
-
-#endif // EIGEN_STDEXT_SUPPORT
-
-// allow to add new functors and specializations of functor_traits from outside Eigen.
-// this macro is really needed because functor_traits must be specialized after it is declared but before it is used...
-#ifdef EIGEN_FUNCTORS_PLUGIN
-#include EIGEN_FUNCTORS_PLUGIN
-#endif
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_FUNCTORS_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Fuzzy.h b/resources/3rdparty/eigen/Eigen/src/Core/Fuzzy.h
deleted file mode 100644
index 8fb9a01dd..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/Fuzzy.h
+++ /dev/null
@@ -1,150 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_FUZZY_H
-#define EIGEN_FUZZY_H
-
-namespace Eigen { 
-
-namespace internal
-{
-
-template<typename Derived, typename OtherDerived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
-struct isApprox_selector
-{
-  static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec)
-  {
-    using std::min;
-    typename internal::nested<Derived,2>::type nested(x);
-    typename internal::nested<OtherDerived,2>::type otherNested(y);
-    return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * (min)(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum());
-  }
-};
-
-template<typename Derived, typename OtherDerived>
-struct isApprox_selector<Derived, OtherDerived, true>
-{
-  static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar&)
-  {
-    return x.matrix() == y.matrix();
-  }
-};
-
-template<typename Derived, typename OtherDerived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
-struct isMuchSmallerThan_object_selector
-{
-  static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec)
-  {
-    return x.cwiseAbs2().sum() <= abs2(prec) * y.cwiseAbs2().sum();
-  }
-};
-
-template<typename Derived, typename OtherDerived>
-struct isMuchSmallerThan_object_selector<Derived, OtherDerived, true>
-{
-  static bool run(const Derived& x, const OtherDerived&, const typename Derived::RealScalar&)
-  {
-    return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix();
-  }
-};
-
-template<typename Derived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
-struct isMuchSmallerThan_scalar_selector
-{
-  static bool run(const Derived& x, const typename Derived::RealScalar& y, const typename Derived::RealScalar& prec)
-  {
-    return x.cwiseAbs2().sum() <= abs2(prec * y);
-  }
-};
-
-template<typename Derived>
-struct isMuchSmallerThan_scalar_selector<Derived, true>
-{
-  static bool run(const Derived& x, const typename Derived::RealScalar&, const typename Derived::RealScalar&)
-  {
-    return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix();
-  }
-};
-
-} // end namespace internal
-
-
-/** \returns \c true if \c *this is approximately equal to \a other, within the precision
-  * determined by \a prec.
-  *
-  * \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$
-  * are considered to be approximately equal within precision \f$ p \f$ if
-  * \f[ \Vert v - w \Vert \leqslant p\,\min(\Vert v\Vert, \Vert w\Vert). \f]
-  * For matrices, the comparison is done using the Hilbert-Schmidt norm (aka Frobenius norm
-  * L2 norm).
-  *
-  * \note Because of the multiplicativeness of this comparison, one can't use this function
-  * to check whether \c *this is approximately equal to the zero matrix or vector.
-  * Indeed, \c isApprox(zero) returns false unless \c *this itself is exactly the zero matrix
-  * or vector. If you want to test whether \c *this is zero, use internal::isMuchSmallerThan(const
-  * RealScalar&, RealScalar) instead.
-  *
-  * \sa internal::isMuchSmallerThan(const RealScalar&, RealScalar) const
-  */
-template<typename Derived>
-template<typename OtherDerived>
-bool DenseBase<Derived>::isApprox(
-  const DenseBase<OtherDerived>& other,
-  const RealScalar& prec
-) const
-{
-  return internal::isApprox_selector<Derived, OtherDerived>::run(derived(), other.derived(), prec);
-}
-
-/** \returns \c true if the norm of \c *this is much smaller than \a other,
-  * within the precision determined by \a prec.
-  *
-  * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is
-  * considered to be much smaller than \f$ x \f$ within precision \f$ p \f$ if
-  * \f[ \Vert v \Vert \leqslant p\,\vert x\vert. \f]
-  *
-  * For matrices, the comparison is done using the Hilbert-Schmidt norm. For this reason,
-  * the value of the reference scalar \a other should come from the Hilbert-Schmidt norm
-  * of a reference matrix of same dimensions.
-  *
-  * \sa isApprox(), isMuchSmallerThan(const DenseBase<OtherDerived>&, RealScalar) const
-  */
-template<typename Derived>
-bool DenseBase<Derived>::isMuchSmallerThan(
-  const typename NumTraits<Scalar>::Real& other,
-  const RealScalar& prec
-) const
-{
-  return internal::isMuchSmallerThan_scalar_selector<Derived>::run(derived(), other, prec);
-}
-
-/** \returns \c true if the norm of \c *this is much smaller than the norm of \a other,
-  * within the precision determined by \a prec.
-  *
-  * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is
-  * considered to be much smaller than a vector \f$ w \f$ within precision \f$ p \f$ if
-  * \f[ \Vert v \Vert \leqslant p\,\Vert w\Vert. \f]
-  * For matrices, the comparison is done using the Hilbert-Schmidt norm.
-  *
-  * \sa isApprox(), isMuchSmallerThan(const RealScalar&, RealScalar) const
-  */
-template<typename Derived>
-template<typename OtherDerived>
-bool DenseBase<Derived>::isMuchSmallerThan(
-  const DenseBase<OtherDerived>& other,
-  const RealScalar& prec
-) const
-{
-  return internal::isMuchSmallerThan_object_selector<Derived, OtherDerived>::run(derived(), other.derived(), prec);
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_FUZZY_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/GeneralProduct.h b/resources/3rdparty/eigen/Eigen/src/Core/GeneralProduct.h
deleted file mode 100644
index 9abc7b286..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/GeneralProduct.h
+++ /dev/null
@@ -1,613 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_GENERAL_PRODUCT_H
-#define EIGEN_GENERAL_PRODUCT_H
-
-namespace Eigen { 
-
-/** \class GeneralProduct
-  * \ingroup Core_Module
-  *
-  * \brief Expression of the product of two general matrices or vectors
-  *
-  * \param LhsNested the type used to store the left-hand side
-  * \param RhsNested the type used to store the right-hand side
-  * \param ProductMode the type of the product
-  *
-  * This class represents an expression of the product of two general matrices.
-  * We call a general matrix, a dense matrix with full storage. For instance,
-  * This excludes triangular, selfadjoint, and sparse matrices.
-  * It is the return type of the operator* between general matrices. Its template
-  * arguments are determined automatically by ProductReturnType. Therefore,
-  * GeneralProduct should never be used direclty. To determine the result type of a
-  * function which involves a matrix product, use ProductReturnType::Type.
-  *
-  * \sa ProductReturnType, MatrixBase::operator*(const MatrixBase<OtherDerived>&)
-  */
-template<typename Lhs, typename Rhs, int ProductType = internal::product_type<Lhs,Rhs>::value>
-class GeneralProduct;
-
-enum {
-  Large = 2,
-  Small = 3
-};
-
-namespace internal {
-
-template<int Rows, int Cols, int Depth> struct product_type_selector;
-
-template<int Size, int MaxSize> struct product_size_category
-{
-  enum { is_large = MaxSize == Dynamic ||
-                    Size >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD,
-         value = is_large  ? Large
-               : Size == 1 ? 1
-                           : Small
-  };
-};
-
-template<typename Lhs, typename Rhs> struct product_type
-{
-  typedef typename remove_all<Lhs>::type _Lhs;
-  typedef typename remove_all<Rhs>::type _Rhs;
-  enum {
-    MaxRows  = _Lhs::MaxRowsAtCompileTime,
-    Rows  = _Lhs::RowsAtCompileTime,
-    MaxCols  = _Rhs::MaxColsAtCompileTime,
-    Cols  = _Rhs::ColsAtCompileTime,
-    MaxDepth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::MaxColsAtCompileTime,
-                                           _Rhs::MaxRowsAtCompileTime),
-    Depth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::ColsAtCompileTime,
-                                        _Rhs::RowsAtCompileTime),
-    LargeThreshold = EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
-  };
-
-  // the splitting into different lines of code here, introducing the _select enums and the typedef below,
-  // is to work around an internal compiler error with gcc 4.1 and 4.2.
-private:
-  enum {
-    rows_select = product_size_category<Rows,MaxRows>::value,
-    cols_select = product_size_category<Cols,MaxCols>::value,
-    depth_select = product_size_category<Depth,MaxDepth>::value
-  };
-  typedef product_type_selector<rows_select, cols_select, depth_select> selector;
-
-public:
-  enum {
-    value = selector::ret
-  };
-#ifdef EIGEN_DEBUG_PRODUCT
-  static void debug()
-  {
-      EIGEN_DEBUG_VAR(Rows);
-      EIGEN_DEBUG_VAR(Cols);
-      EIGEN_DEBUG_VAR(Depth);
-      EIGEN_DEBUG_VAR(rows_select);
-      EIGEN_DEBUG_VAR(cols_select);
-      EIGEN_DEBUG_VAR(depth_select);
-      EIGEN_DEBUG_VAR(value);
-  }
-#endif
-};
-
-
-/* The following allows to select the kind of product at compile time
- * based on the three dimensions of the product.
- * This is a compile time mapping from {1,Small,Large}^3 -> {product types} */
-// FIXME I'm not sure the current mapping is the ideal one.
-template<int M, int N>  struct product_type_selector<M,N,1>              { enum { ret = OuterProduct }; };
-template<int Depth>     struct product_type_selector<1,    1,    Depth>  { enum { ret = InnerProduct }; };
-template<>              struct product_type_selector<1,    1,    1>      { enum { ret = InnerProduct }; };
-template<>              struct product_type_selector<Small,1,    Small>  { enum { ret = CoeffBasedProductMode }; };
-template<>              struct product_type_selector<1,    Small,Small>  { enum { ret = CoeffBasedProductMode }; };
-template<>              struct product_type_selector<Small,Small,Small>  { enum { ret = CoeffBasedProductMode }; };
-template<>              struct product_type_selector<Small, Small, 1>    { enum { ret = LazyCoeffBasedProductMode }; };
-template<>              struct product_type_selector<Small, Large, 1>    { enum { ret = LazyCoeffBasedProductMode }; };
-template<>              struct product_type_selector<Large, Small, 1>    { enum { ret = LazyCoeffBasedProductMode }; };
-template<>              struct product_type_selector<1,    Large,Small>  { enum { ret = CoeffBasedProductMode }; };
-template<>              struct product_type_selector<1,    Large,Large>  { enum { ret = GemvProduct }; };
-template<>              struct product_type_selector<1,    Small,Large>  { enum { ret = CoeffBasedProductMode }; };
-template<>              struct product_type_selector<Large,1,    Small>  { enum { ret = CoeffBasedProductMode }; };
-template<>              struct product_type_selector<Large,1,    Large>  { enum { ret = GemvProduct }; };
-template<>              struct product_type_selector<Small,1,    Large>  { enum { ret = CoeffBasedProductMode }; };
-template<>              struct product_type_selector<Small,Small,Large>  { enum { ret = GemmProduct }; };
-template<>              struct product_type_selector<Large,Small,Large>  { enum { ret = GemmProduct }; };
-template<>              struct product_type_selector<Small,Large,Large>  { enum { ret = GemmProduct }; };
-template<>              struct product_type_selector<Large,Large,Large>  { enum { ret = GemmProduct }; };
-template<>              struct product_type_selector<Large,Small,Small>  { enum { ret = GemmProduct }; };
-template<>              struct product_type_selector<Small,Large,Small>  { enum { ret = GemmProduct }; };
-template<>              struct product_type_selector<Large,Large,Small>  { enum { ret = GemmProduct }; };
-
-} // end namespace internal
-
-/** \class ProductReturnType
-  * \ingroup Core_Module
-  *
-  * \brief Helper class to get the correct and optimized returned type of operator*
-  *
-  * \param Lhs the type of the left-hand side
-  * \param Rhs the type of the right-hand side
-  * \param ProductMode the type of the product (determined automatically by internal::product_mode)
-  *
-  * This class defines the typename Type representing the optimized product expression
-  * between two matrix expressions. In practice, using ProductReturnType<Lhs,Rhs>::Type
-  * is the recommended way to define the result type of a function returning an expression
-  * which involve a matrix product. The class Product should never be
-  * used directly.
-  *
-  * \sa class Product, MatrixBase::operator*(const MatrixBase<OtherDerived>&)
-  */
-template<typename Lhs, typename Rhs, int ProductType>
-struct ProductReturnType
-{
-  // TODO use the nested type to reduce instanciations ????
-//   typedef typename internal::nested<Lhs,Rhs::ColsAtCompileTime>::type LhsNested;
-//   typedef typename internal::nested<Rhs,Lhs::RowsAtCompileTime>::type RhsNested;
-
-  typedef GeneralProduct<Lhs/*Nested*/, Rhs/*Nested*/, ProductType> Type;
-};
-
-template<typename Lhs, typename Rhs>
-struct ProductReturnType<Lhs,Rhs,CoeffBasedProductMode>
-{
-  typedef typename internal::nested<Lhs, Rhs::ColsAtCompileTime, typename internal::plain_matrix_type<Lhs>::type >::type LhsNested;
-  typedef typename internal::nested<Rhs, Lhs::RowsAtCompileTime, typename internal::plain_matrix_type<Rhs>::type >::type RhsNested;
-  typedef CoeffBasedProduct<LhsNested, RhsNested, EvalBeforeAssigningBit | EvalBeforeNestingBit> Type;
-};
-
-template<typename Lhs, typename Rhs>
-struct ProductReturnType<Lhs,Rhs,LazyCoeffBasedProductMode>
-{
-  typedef typename internal::nested<Lhs, Rhs::ColsAtCompileTime, typename internal::plain_matrix_type<Lhs>::type >::type LhsNested;
-  typedef typename internal::nested<Rhs, Lhs::RowsAtCompileTime, typename internal::plain_matrix_type<Rhs>::type >::type RhsNested;
-  typedef CoeffBasedProduct<LhsNested, RhsNested, NestByRefBit> Type;
-};
-
-// this is a workaround for sun CC
-template<typename Lhs, typename Rhs>
-struct LazyProductReturnType : public ProductReturnType<Lhs,Rhs,LazyCoeffBasedProductMode>
-{};
-
-/***********************************************************************
-*  Implementation of Inner Vector Vector Product
-***********************************************************************/
-
-// FIXME : maybe the "inner product" could return a Scalar
-// instead of a 1x1 matrix ??
-// Pro: more natural for the user
-// Cons: this could be a problem if in a meta unrolled algorithm a matrix-matrix
-// product ends up to a row-vector times col-vector product... To tackle this use
-// case, we could have a specialization for Block<MatrixType,1,1> with: operator=(Scalar x);
-
-namespace internal {
-
-template<typename Lhs, typename Rhs>
-struct traits<GeneralProduct<Lhs,Rhs,InnerProduct> >
- : traits<Matrix<typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType,1,1> >
-{};
-
-}
-
-template<typename Lhs, typename Rhs>
-class GeneralProduct<Lhs, Rhs, InnerProduct>
-  : internal::no_assignment_operator,
-    public Matrix<typename internal::scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType,1,1>
-{
-    typedef Matrix<typename internal::scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType,1,1> Base;
-  public:
-    GeneralProduct(const Lhs& lhs, const Rhs& rhs)
-    {
-      EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::RealScalar, typename Rhs::RealScalar>::value),
-        YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
-
-      Base::coeffRef(0,0) = (lhs.transpose().cwiseProduct(rhs)).sum();
-    }
-
-    /** Convertion to scalar */
-    operator const typename Base::Scalar() const {
-      return Base::coeff(0,0);
-    }
-};
-
-/***********************************************************************
-*  Implementation of Outer Vector Vector Product
-***********************************************************************/
-
-namespace internal {
-template<int StorageOrder> struct outer_product_selector;
-
-template<typename Lhs, typename Rhs>
-struct traits<GeneralProduct<Lhs,Rhs,OuterProduct> >
- : traits<ProductBase<GeneralProduct<Lhs,Rhs,OuterProduct>, Lhs, Rhs> >
-{};
-
-}
-
-template<typename Lhs, typename Rhs>
-class GeneralProduct<Lhs, Rhs, OuterProduct>
-  : public ProductBase<GeneralProduct<Lhs,Rhs,OuterProduct>, Lhs, Rhs>
-{
-  public:
-    EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
-
-    GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
-    {
-      EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::RealScalar, typename Rhs::RealScalar>::value),
-        YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
-    }
-
-    template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
-    {
-      internal::outer_product_selector<(int(Dest::Flags)&RowMajorBit) ? RowMajor : ColMajor>::run(*this, dest, alpha);
-    }
-};
-
-namespace internal {
-
-template<> struct outer_product_selector<ColMajor> {
-  template<typename ProductType, typename Dest>
-  static EIGEN_DONT_INLINE void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) {
-    typedef typename Dest::Index Index;
-    // FIXME make sure lhs is sequentially stored
-    // FIXME not very good if rhs is real and lhs complex while alpha is real too
-    const Index cols = dest.cols();
-    for (Index j=0; j<cols; ++j)
-      dest.col(j) += (alpha * prod.rhs().coeff(j)) * prod.lhs();
-  }
-};
-
-template<> struct outer_product_selector<RowMajor> {
-  template<typename ProductType, typename Dest>
-  static EIGEN_DONT_INLINE void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) {
-    typedef typename Dest::Index Index;
-    // FIXME make sure rhs is sequentially stored
-    // FIXME not very good if lhs is real and rhs complex while alpha is real too
-    const Index rows = dest.rows();
-    for (Index i=0; i<rows; ++i)
-      dest.row(i) += (alpha * prod.lhs().coeff(i)) * prod.rhs();
-  }
-};
-
-} // end namespace internal
-
-/***********************************************************************
-*  Implementation of General Matrix Vector Product
-***********************************************************************/
-
-/*  According to the shape/flags of the matrix we have to distinghish 3 different cases:
- *   1 - the matrix is col-major, BLAS compatible and M is large => call fast BLAS-like colmajor routine
- *   2 - the matrix is row-major, BLAS compatible and N is large => call fast BLAS-like rowmajor routine
- *   3 - all other cases are handled using a simple loop along the outer-storage direction.
- *  Therefore we need a lower level meta selector.
- *  Furthermore, if the matrix is the rhs, then the product has to be transposed.
- */
-namespace internal {
-
-template<typename Lhs, typename Rhs>
-struct traits<GeneralProduct<Lhs,Rhs,GemvProduct> >
- : traits<ProductBase<GeneralProduct<Lhs,Rhs,GemvProduct>, Lhs, Rhs> >
-{};
-
-template<int Side, int StorageOrder, bool BlasCompatible>
-struct gemv_selector;
-
-} // end namespace internal
-
-template<typename Lhs, typename Rhs>
-class GeneralProduct<Lhs, Rhs, GemvProduct>
-  : public ProductBase<GeneralProduct<Lhs,Rhs,GemvProduct>, Lhs, Rhs>
-{
-  public:
-    EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
-
-    typedef typename Lhs::Scalar LhsScalar;
-    typedef typename Rhs::Scalar RhsScalar;
-
-    GeneralProduct(const Lhs& a_lhs, const Rhs& a_rhs) : Base(a_lhs,a_rhs)
-    {
-//       EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::Scalar, typename Rhs::Scalar>::value),
-//         YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
-    }
-
-    enum { Side = Lhs::IsVectorAtCompileTime ? OnTheLeft : OnTheRight };
-    typedef typename internal::conditional<int(Side)==OnTheRight,_LhsNested,_RhsNested>::type MatrixType;
-
-    template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
-    {
-      eigen_assert(m_lhs.rows() == dst.rows() && m_rhs.cols() == dst.cols());
-      internal::gemv_selector<Side,(int(MatrixType::Flags)&RowMajorBit) ? RowMajor : ColMajor,
-                       bool(internal::blas_traits<MatrixType>::HasUsableDirectAccess)>::run(*this, dst, alpha);
-    }
-};
-
-namespace internal {
-
-// The vector is on the left => transposition
-template<int StorageOrder, bool BlasCompatible>
-struct gemv_selector<OnTheLeft,StorageOrder,BlasCompatible>
-{
-  template<typename ProductType, typename Dest>
-  static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
-  {
-    Transpose<Dest> destT(dest);
-    enum { OtherStorageOrder = StorageOrder == RowMajor ? ColMajor : RowMajor };
-    gemv_selector<OnTheRight,OtherStorageOrder,BlasCompatible>
-      ::run(GeneralProduct<Transpose<const typename ProductType::_RhsNested>,Transpose<const typename ProductType::_LhsNested>, GemvProduct>
-        (prod.rhs().transpose(), prod.lhs().transpose()), destT, alpha);
-  }
-};
-
-template<typename Scalar,int Size,int MaxSize,bool Cond> struct gemv_static_vector_if;
-
-template<typename Scalar,int Size,int MaxSize>
-struct gemv_static_vector_if<Scalar,Size,MaxSize,false>
-{
-  EIGEN_STRONG_INLINE  Scalar* data() { eigen_internal_assert(false && "should never be called"); return 0; }
-};
-
-template<typename Scalar,int Size>
-struct gemv_static_vector_if<Scalar,Size,Dynamic,true>
-{
-  EIGEN_STRONG_INLINE Scalar* data() { return 0; }
-};
-
-template<typename Scalar,int Size,int MaxSize>
-struct gemv_static_vector_if<Scalar,Size,MaxSize,true>
-{
-  #if EIGEN_ALIGN_STATICALLY
-  internal::plain_array<Scalar,EIGEN_SIZE_MIN_PREFER_FIXED(Size,MaxSize),0> m_data;
-  EIGEN_STRONG_INLINE Scalar* data() { return m_data.array; }
-  #else
-  // Some architectures cannot align on the stack,
-  // => let's manually enforce alignment by allocating more data and return the address of the first aligned element.
-  enum {
-    ForceAlignment  = internal::packet_traits<Scalar>::Vectorizable,
-    PacketSize      = internal::packet_traits<Scalar>::size
-  };
-  internal::plain_array<Scalar,EIGEN_SIZE_MIN_PREFER_FIXED(Size,MaxSize)+(ForceAlignment?PacketSize:0),0> m_data;
-  EIGEN_STRONG_INLINE Scalar* data() {
-    return ForceAlignment
-            ? reinterpret_cast<Scalar*>((reinterpret_cast<size_t>(m_data.array) & ~(size_t(15))) + 16)
-            : m_data.array;
-  }
-  #endif
-};
-
-template<> struct gemv_selector<OnTheRight,ColMajor,true>
-{
-  template<typename ProductType, typename Dest>
-  static inline void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
-  {
-    typedef typename ProductType::Index Index;
-    typedef typename ProductType::LhsScalar   LhsScalar;
-    typedef typename ProductType::RhsScalar   RhsScalar;
-    typedef typename ProductType::Scalar      ResScalar;
-    typedef typename ProductType::RealScalar  RealScalar;
-    typedef typename ProductType::ActualLhsType ActualLhsType;
-    typedef typename ProductType::ActualRhsType ActualRhsType;
-    typedef typename ProductType::LhsBlasTraits LhsBlasTraits;
-    typedef typename ProductType::RhsBlasTraits RhsBlasTraits;
-    typedef Map<Matrix<ResScalar,Dynamic,1>, Aligned> MappedDest;
-
-    ActualLhsType actualLhs = LhsBlasTraits::extract(prod.lhs());
-    ActualRhsType actualRhs = RhsBlasTraits::extract(prod.rhs());
-
-    ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs())
-                                  * RhsBlasTraits::extractScalarFactor(prod.rhs());
-
-    enum {
-      // FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
-      // on, the other hand it is good for the cache to pack the vector anyways...
-      EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime==1,
-      ComplexByReal = (NumTraits<LhsScalar>::IsComplex) && (!NumTraits<RhsScalar>::IsComplex),
-      MightCannotUseDest = (Dest::InnerStrideAtCompileTime!=1) || ComplexByReal
-    };
-
-    gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;
-
-    bool alphaIsCompatible = (!ComplexByReal) || (imag(actualAlpha)==RealScalar(0));
-    bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
-    
-    RhsScalar compatibleAlpha = get_factor<ResScalar,RhsScalar>::run(actualAlpha);
-
-    ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(),
-                                                  evalToDest ? dest.data() : static_dest.data());
-    
-    if(!evalToDest)
-    {
-      #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
-      int size = dest.size();
-      EIGEN_DENSE_STORAGE_CTOR_PLUGIN
-      #endif
-      if(!alphaIsCompatible)
-      {
-        MappedDest(actualDestPtr, dest.size()).setZero();
-        compatibleAlpha = RhsScalar(1);
-      }
-      else
-        MappedDest(actualDestPtr, dest.size()) = dest;
-    }
-
-    general_matrix_vector_product
-      <Index,LhsScalar,ColMajor,LhsBlasTraits::NeedToConjugate,RhsScalar,RhsBlasTraits::NeedToConjugate>::run(
-        actualLhs.rows(), actualLhs.cols(),
-        actualLhs.data(), actualLhs.outerStride(),
-        actualRhs.data(), actualRhs.innerStride(),
-        actualDestPtr, 1,
-        compatibleAlpha);
-
-    if (!evalToDest)
-    {
-      if(!alphaIsCompatible)
-        dest += actualAlpha * MappedDest(actualDestPtr, dest.size());
-      else
-        dest = MappedDest(actualDestPtr, dest.size());
-    }
-  }
-};
-
-template<> struct gemv_selector<OnTheRight,RowMajor,true>
-{
-  template<typename ProductType, typename Dest>
-  static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
-  {
-    typedef typename ProductType::LhsScalar LhsScalar;
-    typedef typename ProductType::RhsScalar RhsScalar;
-    typedef typename ProductType::Scalar    ResScalar;
-    typedef typename ProductType::Index Index;
-    typedef typename ProductType::ActualLhsType ActualLhsType;
-    typedef typename ProductType::ActualRhsType ActualRhsType;
-    typedef typename ProductType::_ActualRhsType _ActualRhsType;
-    typedef typename ProductType::LhsBlasTraits LhsBlasTraits;
-    typedef typename ProductType::RhsBlasTraits RhsBlasTraits;
-
-    typename add_const<ActualLhsType>::type actualLhs = LhsBlasTraits::extract(prod.lhs());
-    typename add_const<ActualRhsType>::type actualRhs = RhsBlasTraits::extract(prod.rhs());
-
-    ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs())
-                                  * RhsBlasTraits::extractScalarFactor(prod.rhs());
-
-    enum {
-      // FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
-      // on, the other hand it is good for the cache to pack the vector anyways...
-      DirectlyUseRhs = _ActualRhsType::InnerStrideAtCompileTime==1
-    };
-
-    gemv_static_vector_if<RhsScalar,_ActualRhsType::SizeAtCompileTime,_ActualRhsType::MaxSizeAtCompileTime,!DirectlyUseRhs> static_rhs;
-
-    ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,actualRhs.size(),
-        DirectlyUseRhs ? const_cast<RhsScalar*>(actualRhs.data()) : static_rhs.data());
-
-    if(!DirectlyUseRhs)
-    {
-      #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
-      int size = actualRhs.size();
-      EIGEN_DENSE_STORAGE_CTOR_PLUGIN
-      #endif
-      Map<typename _ActualRhsType::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs;
-    }
-
-    general_matrix_vector_product
-      <Index,LhsScalar,RowMajor,LhsBlasTraits::NeedToConjugate,RhsScalar,RhsBlasTraits::NeedToConjugate>::run(
-        actualLhs.rows(), actualLhs.cols(),
-        actualLhs.data(), actualLhs.outerStride(),
-        actualRhsPtr, 1,
-        dest.data(), dest.innerStride(),
-        actualAlpha);
-  }
-};
-
-template<> struct gemv_selector<OnTheRight,ColMajor,false>
-{
-  template<typename ProductType, typename Dest>
-  static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
-  {
-    typedef typename Dest::Index Index;
-    // TODO makes sure dest is sequentially stored in memory, otherwise use a temp
-    const Index size = prod.rhs().rows();
-    for(Index k=0; k<size; ++k)
-      dest += (alpha*prod.rhs().coeff(k)) * prod.lhs().col(k);
-  }
-};
-
-template<> struct gemv_selector<OnTheRight,RowMajor,false>
-{
-  template<typename ProductType, typename Dest>
-  static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
-  {
-    typedef typename Dest::Index Index;
-    // TODO makes sure rhs is sequentially stored in memory, otherwise use a temp
-    const Index rows = prod.rows();
-    for(Index i=0; i<rows; ++i)
-      dest.coeffRef(i) += alpha * (prod.lhs().row(i).cwiseProduct(prod.rhs().transpose())).sum();
-  }
-};
-
-} // end namespace internal
-
-/***************************************************************************
-* Implementation of matrix base methods
-***************************************************************************/
-
-/** \returns the matrix product of \c *this and \a other.
-  *
-  * \note If instead of the matrix product you want the coefficient-wise product, see Cwise::operator*().
-  *
-  * \sa lazyProduct(), operator*=(const MatrixBase&), Cwise::operator*()
-  */
-template<typename Derived>
-template<typename OtherDerived>
-inline const typename ProductReturnType<Derived, OtherDerived>::Type
-MatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
-{
-  // A note regarding the function declaration: In MSVC, this function will sometimes
-  // not be inlined since DenseStorage is an unwindable object for dynamic
-  // matrices and product types are holding a member to store the result.
-  // Thus it does not help tagging this function with EIGEN_STRONG_INLINE.
-  enum {
-    ProductIsValid =  Derived::ColsAtCompileTime==Dynamic
-                   || OtherDerived::RowsAtCompileTime==Dynamic
-                   || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime),
-    AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime,
-    SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived)
-  };
-  // note to the lost user:
-  //    * for a dot product use: v1.dot(v2)
-  //    * for a coeff-wise product use: v1.cwiseProduct(v2)
-  EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
-    INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
-  EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
-    INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
-  EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
-#ifdef EIGEN_DEBUG_PRODUCT
-  internal::product_type<Derived,OtherDerived>::debug();
-#endif
-  return typename ProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
-}
-
-/** \returns an expression of the matrix product of \c *this and \a other without implicit evaluation.
-  *
-  * The returned product will behave like any other expressions: the coefficients of the product will be
-  * computed once at a time as requested. This might be useful in some extremely rare cases when only
-  * a small and no coherent fraction of the result's coefficients have to be computed.
-  *
-  * \warning This version of the matrix product can be much much slower. So use it only if you know
-  * what you are doing and that you measured a true speed improvement.
-  *
-  * \sa operator*(const MatrixBase&)
-  */
-template<typename Derived>
-template<typename OtherDerived>
-const typename LazyProductReturnType<Derived,OtherDerived>::Type
-MatrixBase<Derived>::lazyProduct(const MatrixBase<OtherDerived> &other) const
-{
-  enum {
-    ProductIsValid =  Derived::ColsAtCompileTime==Dynamic
-                   || OtherDerived::RowsAtCompileTime==Dynamic
-                   || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime),
-    AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime,
-    SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived)
-  };
-  // note to the lost user:
-  //    * for a dot product use: v1.dot(v2)
-  //    * for a coeff-wise product use: v1.cwiseProduct(v2)
-  EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
-    INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
-  EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
-    INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
-  EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
-
-  return typename LazyProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_PRODUCT_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Map.h b/resources/3rdparty/eigen/Eigen/src/Core/Map.h
deleted file mode 100644
index 2b0a44697..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/Map.h
+++ /dev/null
@@ -1,192 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_MAP_H
-#define EIGEN_MAP_H
-
-namespace Eigen { 
-
-/** \class Map
-  * \ingroup Core_Module
-  *
-  * \brief A matrix or vector expression mapping an existing array of data.
-  *
-  * \tparam PlainObjectType the equivalent matrix type of the mapped data
-  * \tparam MapOptions specifies whether the pointer is \c #Aligned, or \c #Unaligned.
-  *                The default is \c #Unaligned.
-  * \tparam StrideType optionally specifies strides. By default, Map assumes the memory layout
-  *                   of an ordinary, contiguous array. This can be overridden by specifying strides.
-  *                   The type passed here must be a specialization of the Stride template, see examples below.
-  *
-  * This class represents a matrix or vector expression mapping an existing array of data.
-  * It can be used to let Eigen interface without any overhead with non-Eigen data structures,
-  * such as plain C arrays or structures from other libraries. By default, it assumes that the
-  * data is laid out contiguously in memory. You can however override this by explicitly specifying
-  * inner and outer strides.
-  *
-  * Here's an example of simply mapping a contiguous array as a \ref TopicStorageOrders "column-major" matrix:
-  * \include Map_simple.cpp
-  * Output: \verbinclude Map_simple.out
-  *
-  * If you need to map non-contiguous arrays, you can do so by specifying strides:
-  *
-  * Here's an example of mapping an array as a vector, specifying an inner stride, that is, the pointer
-  * increment between two consecutive coefficients. Here, we're specifying the inner stride as a compile-time
-  * fixed value.
-  * \include Map_inner_stride.cpp
-  * Output: \verbinclude Map_inner_stride.out
-  *
-  * Here's an example of mapping an array while specifying an outer stride. Here, since we're mapping
-  * as a column-major matrix, 'outer stride' means the pointer increment between two consecutive columns.
-  * Here, we're specifying the outer stride as a runtime parameter. Note that here \c OuterStride<> is
-  * a short version of \c OuterStride<Dynamic> because the default template parameter of OuterStride
-  * is  \c Dynamic
-  * \include Map_outer_stride.cpp
-  * Output: \verbinclude Map_outer_stride.out
-  *
-  * For more details and for an example of specifying both an inner and an outer stride, see class Stride.
-  *
-  * \b Tip: to change the array of data mapped by a Map object, you can use the C++
-  * placement new syntax:
-  *
-  * Example: \include Map_placement_new.cpp
-  * Output: \verbinclude Map_placement_new.out
-  *
-  * This class is the return type of PlainObjectBase::Map() but can also be used directly.
-  *
-  * \sa PlainObjectBase::Map(), \ref TopicStorageOrders
-  */
-
-namespace internal {
-template<typename PlainObjectType, int MapOptions, typename StrideType>
-struct traits<Map<PlainObjectType, MapOptions, StrideType> >
-  : public traits<PlainObjectType>
-{
-  typedef traits<PlainObjectType> TraitsBase;
-  typedef typename PlainObjectType::Index Index;
-  typedef typename PlainObjectType::Scalar Scalar;
-  enum {
-    InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0
-                             ? int(PlainObjectType::InnerStrideAtCompileTime)
-                             : int(StrideType::InnerStrideAtCompileTime),
-    OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0
-                             ? int(PlainObjectType::OuterStrideAtCompileTime)
-                             : int(StrideType::OuterStrideAtCompileTime),
-    HasNoInnerStride = InnerStrideAtCompileTime == 1,
-    HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0,
-    HasNoStride = HasNoInnerStride && HasNoOuterStride,
-    IsAligned = bool(EIGEN_ALIGN) && ((int(MapOptions)&Aligned)==Aligned),
-    IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic,
-    KeepsPacketAccess = bool(HasNoInnerStride)
-                        && ( bool(IsDynamicSize)
-                           || HasNoOuterStride
-                           || ( OuterStrideAtCompileTime!=Dynamic
-                           && ((static_cast<int>(sizeof(Scalar))*OuterStrideAtCompileTime)%16)==0 ) ),
-    Flags0 = TraitsBase::Flags & (~NestByRefBit),
-    Flags1 = IsAligned ? (int(Flags0) | AlignedBit) : (int(Flags0) & ~AlignedBit),
-    Flags2 = (bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime))
-           ? int(Flags1) : int(Flags1 & ~LinearAccessBit),
-    Flags3 = is_lvalue<PlainObjectType>::value ? int(Flags2) : (int(Flags2) & ~LvalueBit),
-    Flags = KeepsPacketAccess ? int(Flags3) : (int(Flags3) & ~PacketAccessBit)
-  };
-private:
-  enum { Options }; // Expressions don't have Options
-};
-}
-
-template<typename PlainObjectType, int MapOptions, typename StrideType> class Map
-  : public MapBase<Map<PlainObjectType, MapOptions, StrideType> >
-{
-  public:
-
-    typedef MapBase<Map> Base;
-    EIGEN_DENSE_PUBLIC_INTERFACE(Map)
-
-    typedef typename Base::PointerType PointerType;
-#if EIGEN2_SUPPORT_STAGE <= STAGE30_FULL_EIGEN3_API
-    typedef const Scalar* PointerArgType;
-    inline PointerType cast_to_pointer_type(PointerArgType ptr) { return const_cast<PointerType>(ptr); }
-#else
-    typedef PointerType PointerArgType;
-    inline PointerType cast_to_pointer_type(PointerArgType ptr) { return ptr; }
-#endif
-
-    inline Index innerStride() const
-    {
-      return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1;
-    }
-
-    inline Index outerStride() const
-    {
-      return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer()
-           : IsVectorAtCompileTime ? this->size()
-           : int(Flags)&RowMajorBit ? this->cols()
-           : this->rows();
-    }
-
-    /** Constructor in the fixed-size case.
-      *
-      * \param data pointer to the array to map
-      * \param stride optional Stride object, passing the strides.
-      */
-    inline Map(PointerArgType data, const StrideType& stride = StrideType())
-      : Base(cast_to_pointer_type(data)), m_stride(stride)
-    {
-      PlainObjectType::Base::_check_template_params();
-    }
-
-    /** Constructor in the dynamic-size vector case.
-      *
-      * \param data pointer to the array to map
-      * \param size the size of the vector expression
-      * \param stride optional Stride object, passing the strides.
-      */
-    inline Map(PointerArgType dataPtr, Index a_size, const StrideType& a_stride = StrideType())
-      : Base(cast_to_pointer_type(dataPtr), a_size), m_stride(a_stride)
-    {
-      PlainObjectType::Base::_check_template_params();
-    }
-
-    /** Constructor in the dynamic-size matrix case.
-      *
-      * \param data pointer to the array to map
-      * \param rows the number of rows of the matrix expression
-      * \param cols the number of columns of the matrix expression
-      * \param stride optional Stride object, passing the strides.
-      */
-    inline Map(PointerArgType dataPtr, Index nbRows, Index nbCols, const StrideType& a_stride = StrideType())
-      : Base(cast_to_pointer_type(dataPtr), nbRows, nbCols), m_stride(a_stride)
-    {
-      PlainObjectType::Base::_check_template_params();
-    }
-
-    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
-
-  protected:
-    StrideType m_stride;
-};
-
-template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
-inline Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>
-  ::Array(const Scalar *data)
-{
-  this->_set_noalias(Eigen::Map<const Array>(data));
-}
-
-template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
-inline Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>
-  ::Matrix(const Scalar *data)
-{
-  this->_set_noalias(Eigen::Map<const Matrix>(data));
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_MAP_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/MapBase.h b/resources/3rdparty/eigen/Eigen/src/Core/MapBase.h
deleted file mode 100644
index 6876de588..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/MapBase.h
+++ /dev/null
@@ -1,242 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_MAPBASE_H
-#define EIGEN_MAPBASE_H
-
-#define EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) \
-      EIGEN_STATIC_ASSERT((int(internal::traits<Derived>::Flags) & LinearAccessBit) || Derived::IsVectorAtCompileTime, \
-                          YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT)
-
-namespace Eigen { 
-
-/** \class MapBase
-  * \ingroup Core_Module
-  *
-  * \brief Base class for Map and Block expression with direct access
-  *
-  * \sa class Map, class Block
-  */
-template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
-  : public internal::dense_xpr_base<Derived>::type
-{
-  public:
-
-    typedef typename internal::dense_xpr_base<Derived>::type Base;
-    enum {
-      RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
-      ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
-      SizeAtCompileTime = Base::SizeAtCompileTime
-    };
-
-    typedef typename internal::traits<Derived>::StorageKind StorageKind;
-    typedef typename internal::traits<Derived>::Index Index;
-    typedef typename internal::traits<Derived>::Scalar Scalar;
-    typedef typename internal::packet_traits<Scalar>::type PacketScalar;
-    typedef typename NumTraits<Scalar>::Real RealScalar;
-    typedef typename internal::conditional<
-                         bool(internal::is_lvalue<Derived>::value),
-                         Scalar *,
-                         const Scalar *>::type
-                     PointerType;
-
-    using Base::derived;
-//    using Base::RowsAtCompileTime;
-//    using Base::ColsAtCompileTime;
-//    using Base::SizeAtCompileTime;
-    using Base::MaxRowsAtCompileTime;
-    using Base::MaxColsAtCompileTime;
-    using Base::MaxSizeAtCompileTime;
-    using Base::IsVectorAtCompileTime;
-    using Base::Flags;
-    using Base::IsRowMajor;
-
-    using Base::rows;
-    using Base::cols;
-    using Base::size;
-    using Base::coeff;
-    using Base::coeffRef;
-    using Base::lazyAssign;
-    using Base::eval;
-
-    using Base::innerStride;
-    using Base::outerStride;
-    using Base::rowStride;
-    using Base::colStride;
-
-    // bug 217 - compile error on ICC 11.1
-    using Base::operator=;
-
-    typedef typename Base::CoeffReturnType CoeffReturnType;
-
-    inline Index rows() const { return m_rows.value(); }
-    inline Index cols() const { return m_cols.value(); }
-
-    /** Returns a pointer to the first coefficient of the matrix or vector.
-      *
-      * \note When addressing this data, make sure to honor the strides returned by innerStride() and outerStride().
-      *
-      * \sa innerStride(), outerStride()
-      */
-    inline const Scalar* data() const { return m_data; }
-
-    inline const Scalar& coeff(Index rowId, Index colId) const
-    {
-      return m_data[colId * colStride() + rowId * rowStride()];
-    }
-
-    inline const Scalar& coeff(Index index) const
-    {
-      EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
-      return m_data[index * innerStride()];
-    }
-
-    inline const Scalar& coeffRef(Index rowId, Index colId) const
-    {
-      return this->m_data[colId * colStride() + rowId * rowStride()];
-    }
-
-    inline const Scalar& coeffRef(Index index) const
-    {
-      EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
-      return this->m_data[index * innerStride()];
-    }
-
-    template<int LoadMode>
-    inline PacketScalar packet(Index rowId, Index colId) const
-    {
-      return internal::ploadt<PacketScalar, LoadMode>
-               (m_data + (colId * colStride() + rowId * rowStride()));
-    }
-
-    template<int LoadMode>
-    inline PacketScalar packet(Index index) const
-    {
-      EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
-      return internal::ploadt<PacketScalar, LoadMode>(m_data + index * innerStride());
-    }
-
-    inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)
-    {
-      EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
-      checkSanity();
-    }
-
-    inline MapBase(PointerType dataPtr, Index vecSize)
-            : m_data(dataPtr),
-              m_rows(RowsAtCompileTime == Dynamic ? vecSize : Index(RowsAtCompileTime)),
-              m_cols(ColsAtCompileTime == Dynamic ? vecSize : Index(ColsAtCompileTime))
-    {
-      EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-      eigen_assert(vecSize >= 0);
-      eigen_assert(dataPtr == 0 || SizeAtCompileTime == Dynamic || SizeAtCompileTime == vecSize);
-      checkSanity();
-    }
-
-    inline MapBase(PointerType dataPtr, Index nbRows, Index nbCols)
-            : m_data(dataPtr), m_rows(nbRows), m_cols(nbCols)
-    {
-      eigen_assert( (dataPtr == 0)
-              || (   nbRows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == nbRows)
-                  && nbCols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == nbCols)));
-      checkSanity();
-    }
-
-  protected:
-
-    void checkSanity() const
-    {
-      EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(internal::traits<Derived>::Flags&PacketAccessBit,
-                                        internal::inner_stride_at_compile_time<Derived>::ret==1),
-                          PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1);
-      eigen_assert(EIGEN_IMPLIES(internal::traits<Derived>::Flags&AlignedBit, (size_t(m_data) % 16) == 0)
-                   && "data is not aligned");
-    }
-
-    PointerType m_data;
-    const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_rows;
-    const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_cols;
-};
-
-template<typename Derived> class MapBase<Derived, WriteAccessors>
-  : public MapBase<Derived, ReadOnlyAccessors>
-{
-  public:
-
-    typedef MapBase<Derived, ReadOnlyAccessors> Base;
-
-    typedef typename Base::Scalar Scalar;
-    typedef typename Base::PacketScalar PacketScalar;
-    typedef typename Base::Index Index;
-    typedef typename Base::PointerType PointerType;
-
-    using Base::derived;
-    using Base::rows;
-    using Base::cols;
-    using Base::size;
-    using Base::coeff;
-    using Base::coeffRef;
-
-    using Base::innerStride;
-    using Base::outerStride;
-    using Base::rowStride;
-    using Base::colStride;
-
-    typedef typename internal::conditional<
-                    internal::is_lvalue<Derived>::value,
-                    Scalar,
-                    const Scalar
-                  >::type ScalarWithConstIfNotLvalue;
-
-    inline const Scalar* data() const { return this->m_data; }
-    inline ScalarWithConstIfNotLvalue* data() { return this->m_data; } // no const-cast here so non-const-correct code will give a compile error
-
-    inline ScalarWithConstIfNotLvalue& coeffRef(Index row, Index col)
-    {
-      return this->m_data[col * colStride() + row * rowStride()];
-    }
-
-    inline ScalarWithConstIfNotLvalue& coeffRef(Index index)
-    {
-      EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
-      return this->m_data[index * innerStride()];
-    }
-
-    template<int StoreMode>
-    inline void writePacket(Index row, Index col, const PacketScalar& val)
-    {
-      internal::pstoret<Scalar, PacketScalar, StoreMode>
-               (this->m_data + (col * colStride() + row * rowStride()), val);
-    }
-
-    template<int StoreMode>
-    inline void writePacket(Index index, const PacketScalar& val)
-    {
-      EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
-      internal::pstoret<Scalar, PacketScalar, StoreMode>
-                (this->m_data + index * innerStride(), val);
-    }
-
-    explicit inline MapBase(PointerType dataPtr) : Base(dataPtr) {}
-    inline MapBase(PointerType dataPtr, Index vecSize) : Base(dataPtr, vecSize) {}
-    inline MapBase(PointerType dataPtr, Index nbRows, Index nbCols) : Base(dataPtr, nbRows, nbCols) {}
-
-    Derived& operator=(const MapBase& other)
-    {
-      Base::Base::operator=(other);
-      return derived();
-    }
-
-    using Base::Base::operator=;
-};
-
-} // end namespace Eigen
-
-#endif // EIGEN_MAPBASE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/MathFunctions.h b/resources/3rdparty/eigen/Eigen/src/Core/MathFunctions.h
deleted file mode 100644
index 5b57c2ff2..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/MathFunctions.h
+++ /dev/null
@@ -1,889 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_MATHFUNCTIONS_H
-#define EIGEN_MATHFUNCTIONS_H
-
-namespace Eigen {
-
-namespace internal {
-
-/** \internal \struct global_math_functions_filtering_base
-  *
-  * What it does:
-  * Defines a typedef 'type' as follows:
-  * - if type T has a member typedef Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl, then
-  *   global_math_functions_filtering_base<T>::type is a typedef for it.
-  * - otherwise, global_math_functions_filtering_base<T>::type is a typedef for T.
-  *
-  * How it's used:
-  * To allow to defined the global math functions (like sin...) in certain cases, like the Array expressions.
-  * When you do sin(array1+array2), the object array1+array2 has a complicated expression type, all what you want to know
-  * is that it inherits ArrayBase. So we implement a partial specialization of sin_impl for ArrayBase<Derived>.
-  * So we must make sure to use sin_impl<ArrayBase<Derived> > and not sin_impl<Derived>, otherwise our partial specialization
-  * won't be used. How does sin know that? That's exactly what global_math_functions_filtering_base tells it.
-  *
-  * How it's implemented:
-  * SFINAE in the style of enable_if. Highly susceptible of breaking compilers. With GCC, it sure does work, but if you replace
-  * the typename dummy by an integer template parameter, it doesn't work anymore!
-  */
-
-template<typename T, typename dummy = void>
-struct global_math_functions_filtering_base
-{
-  typedef T type;
-};
-
-template<typename T> struct always_void { typedef void type; };
-
-template<typename T>
-struct global_math_functions_filtering_base
-  <T,
-   typename always_void<typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl>::type
-  >
-{
-  typedef typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl type;
-};
-
-#define EIGEN_MATHFUNC_IMPL(func, scalar) func##_impl<typename global_math_functions_filtering_base<scalar>::type>
-#define EIGEN_MATHFUNC_RETVAL(func, scalar) typename func##_retval<typename global_math_functions_filtering_base<scalar>::type>::type
-
-
-/****************************************************************************
-* Implementation of real                                                 *
-****************************************************************************/
-
-template<typename Scalar>
-struct real_impl
-{
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  static inline RealScalar run(const Scalar& x)
-  {
-    return x;
-  }
-};
-
-template<typename RealScalar>
-struct real_impl<std::complex<RealScalar> >
-{
-  static inline RealScalar run(const std::complex<RealScalar>& x)
-  {
-    using std::real;
-    return real(x);
-  }
-};
-
-template<typename Scalar>
-struct real_retval
-{
-  typedef typename NumTraits<Scalar>::Real type;
-};
-
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(real, Scalar) real(const Scalar& x)
-{
-  return EIGEN_MATHFUNC_IMPL(real, Scalar)::run(x);
-}
-
-/****************************************************************************
-* Implementation of imag                                                 *
-****************************************************************************/
-
-template<typename Scalar>
-struct imag_impl
-{
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  static inline RealScalar run(const Scalar&)
-  {
-    return RealScalar(0);
-  }
-};
-
-template<typename RealScalar>
-struct imag_impl<std::complex<RealScalar> >
-{
-  static inline RealScalar run(const std::complex<RealScalar>& x)
-  {
-    using std::imag;
-    return imag(x);
-  }
-};
-
-template<typename Scalar>
-struct imag_retval
-{
-  typedef typename NumTraits<Scalar>::Real type;
-};
-
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(imag, Scalar) imag(const Scalar& x)
-{
-  return EIGEN_MATHFUNC_IMPL(imag, Scalar)::run(x);
-}
-
-/****************************************************************************
-* Implementation of real_ref                                             *
-****************************************************************************/
-
-template<typename Scalar>
-struct real_ref_impl
-{
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  static inline RealScalar& run(Scalar& x)
-  {
-    return reinterpret_cast<RealScalar*>(&x)[0];
-  }
-  static inline const RealScalar& run(const Scalar& x)
-  {
-    return reinterpret_cast<const RealScalar*>(&x)[0];
-  }
-};
-
-template<typename Scalar>
-struct real_ref_retval
-{
-  typedef typename NumTraits<Scalar>::Real & type;
-};
-
-template<typename Scalar>
-inline typename add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) >::type real_ref(const Scalar& x)
-{
-  return real_ref_impl<Scalar>::run(x);
-}
-
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) real_ref(Scalar& x)
-{
-  return EIGEN_MATHFUNC_IMPL(real_ref, Scalar)::run(x);
-}
-
-/****************************************************************************
-* Implementation of imag_ref                                             *
-****************************************************************************/
-
-template<typename Scalar, bool IsComplex>
-struct imag_ref_default_impl
-{
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  static inline RealScalar& run(Scalar& x)
-  {
-    return reinterpret_cast<RealScalar*>(&x)[1];
-  }
-  static inline const RealScalar& run(const Scalar& x)
-  {
-    return reinterpret_cast<RealScalar*>(&x)[1];
-  }
-};
-
-template<typename Scalar>
-struct imag_ref_default_impl<Scalar, false>
-{
-  static inline Scalar run(Scalar&)
-  {
-    return Scalar(0);
-  }
-  static inline const Scalar run(const Scalar&)
-  {
-    return Scalar(0);
-  }
-};
-
-template<typename Scalar>
-struct imag_ref_impl : imag_ref_default_impl<Scalar, NumTraits<Scalar>::IsComplex> {};
-
-template<typename Scalar>
-struct imag_ref_retval
-{
-  typedef typename NumTraits<Scalar>::Real & type;
-};
-
-template<typename Scalar>
-inline typename add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) >::type imag_ref(const Scalar& x)
-{
-  return imag_ref_impl<Scalar>::run(x);
-}
-
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) imag_ref(Scalar& x)
-{
-  return EIGEN_MATHFUNC_IMPL(imag_ref, Scalar)::run(x);
-}
-
-/****************************************************************************
-* Implementation of conj                                                 *
-****************************************************************************/
-
-template<typename Scalar>
-struct conj_impl
-{
-  static inline Scalar run(const Scalar& x)
-  {
-    return x;
-  }
-};
-
-template<typename RealScalar>
-struct conj_impl<std::complex<RealScalar> >
-{
-  static inline std::complex<RealScalar> run(const std::complex<RealScalar>& x)
-  {
-    using std::conj;
-    return conj(x);
-  }
-};
-
-template<typename Scalar>
-struct conj_retval
-{
-  typedef Scalar type;
-};
-
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(conj, Scalar) conj(const Scalar& x)
-{
-  return EIGEN_MATHFUNC_IMPL(conj, Scalar)::run(x);
-}
-
-/****************************************************************************
-* Implementation of abs                                                  *
-****************************************************************************/
-
-template<typename Scalar>
-struct abs_impl
-{
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  static inline RealScalar run(const Scalar& x)
-  {
-    using std::abs;
-    return abs(x);
-  }
-};
-
-template<typename Scalar>
-struct abs_retval
-{
-  typedef typename NumTraits<Scalar>::Real type;
-};
-
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(abs, Scalar) abs(const Scalar& x)
-{
-  return EIGEN_MATHFUNC_IMPL(abs, Scalar)::run(x);
-}
-
-/****************************************************************************
-* Implementation of abs2                                                 *
-****************************************************************************/
-
-template<typename Scalar>
-struct abs2_impl
-{
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  static inline RealScalar run(const Scalar& x)
-  {
-    return x*x;
-  }
-};
-
-template<typename RealScalar>
-struct abs2_impl<std::complex<RealScalar> >
-{
-  static inline RealScalar run(const std::complex<RealScalar>& x)
-  {
-    return real(x)*real(x) + imag(x)*imag(x);
-  }
-};
-
-template<typename Scalar>
-struct abs2_retval
-{
-  typedef typename NumTraits<Scalar>::Real type;
-};
-
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(abs2, Scalar) abs2(const Scalar& x)
-{
-  return EIGEN_MATHFUNC_IMPL(abs2, Scalar)::run(x);
-}
-
-/****************************************************************************
-* Implementation of norm1                                                *
-****************************************************************************/
-
-template<typename Scalar, bool IsComplex>
-struct norm1_default_impl
-{
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  static inline RealScalar run(const Scalar& x)
-  {
-    return abs(real(x)) + abs(imag(x));
-  }
-};
-
-template<typename Scalar>
-struct norm1_default_impl<Scalar, false>
-{
-  static inline Scalar run(const Scalar& x)
-  {
-    return abs(x);
-  }
-};
-
-template<typename Scalar>
-struct norm1_impl : norm1_default_impl<Scalar, NumTraits<Scalar>::IsComplex> {};
-
-template<typename Scalar>
-struct norm1_retval
-{
-  typedef typename NumTraits<Scalar>::Real type;
-};
-
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(norm1, Scalar) norm1(const Scalar& x)
-{
-  return EIGEN_MATHFUNC_IMPL(norm1, Scalar)::run(x);
-}
-
-/****************************************************************************
-* Implementation of hypot                                                *
-****************************************************************************/
-
-template<typename Scalar>
-struct hypot_impl
-{
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  static inline RealScalar run(const Scalar& x, const Scalar& y)
-  {
-    using std::max;
-    using std::min;
-    RealScalar _x = abs(x);
-    RealScalar _y = abs(y);
-    RealScalar p = (max)(_x, _y);
-    RealScalar q = (min)(_x, _y);
-    RealScalar qp = q/p;
-    return p * sqrt(RealScalar(1) + qp*qp);
-  }
-};
-
-template<typename Scalar>
-struct hypot_retval
-{
-  typedef typename NumTraits<Scalar>::Real type;
-};
-
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(hypot, Scalar) hypot(const Scalar& x, const Scalar& y)
-{
-  return EIGEN_MATHFUNC_IMPL(hypot, Scalar)::run(x, y);
-}
-
-/****************************************************************************
-* Implementation of cast                                                 *
-****************************************************************************/
-
-template<typename OldType, typename NewType>
-struct cast_impl
-{
-  static inline NewType run(const OldType& x)
-  {
-    return static_cast<NewType>(x);
-  }
-};
-
-// here, for once, we're plainly returning NewType: we don't want cast to do weird things.
-
-template<typename OldType, typename NewType>
-inline NewType cast(const OldType& x)
-{
-  return cast_impl<OldType, NewType>::run(x);
-}
-
-/****************************************************************************
-* Implementation of sqrt                                                 *
-****************************************************************************/
-
-template<typename Scalar, bool IsInteger>
-struct sqrt_default_impl
-{
-  static inline Scalar run(const Scalar& x)
-  {
-    using std::sqrt;
-    return sqrt(x);
-  }
-};
-
-template<typename Scalar>
-struct sqrt_default_impl<Scalar, true>
-{
-  static inline Scalar run(const Scalar&)
-  {
-#ifdef EIGEN2_SUPPORT
-    eigen_assert(!NumTraits<Scalar>::IsInteger);
-#else
-    EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)
-#endif
-    return Scalar(0);
-  }
-};
-
-template<typename Scalar>
-struct sqrt_impl : sqrt_default_impl<Scalar, NumTraits<Scalar>::IsInteger> {};
-
-template<typename Scalar>
-struct sqrt_retval
-{
-  typedef Scalar type;
-};
-
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(sqrt, Scalar) sqrt(const Scalar& x)
-{
-  return EIGEN_MATHFUNC_IMPL(sqrt, Scalar)::run(x);
-}
-
-/****************************************************************************
-* Implementation of standard unary real functions (exp, log, sin, cos, ...  *
-****************************************************************************/
-
-// This macro instanciate all the necessary template mechanism which is common to all unary real functions.
-#define EIGEN_MATHFUNC_STANDARD_REAL_UNARY(NAME) \
-  template<typename Scalar, bool IsInteger> struct NAME##_default_impl {            \
-    static inline Scalar run(const Scalar& x) { using std::NAME; return NAME(x); }  \
-  };                                                                                \
-  template<typename Scalar> struct NAME##_default_impl<Scalar, true> {              \
-    static inline Scalar run(const Scalar&) {                                       \
-      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)                                       \
-      return Scalar(0);                                                             \
-    }                                                                               \
-  };                                                                                \
-  template<typename Scalar> struct NAME##_impl                                      \
-    : NAME##_default_impl<Scalar, NumTraits<Scalar>::IsInteger>                     \
-  {};                                                                               \
-  template<typename Scalar> struct NAME##_retval { typedef Scalar type; };          \
-  template<typename Scalar>                                                         \
-  inline EIGEN_MATHFUNC_RETVAL(NAME, Scalar) NAME(const Scalar& x) {                \
-    return EIGEN_MATHFUNC_IMPL(NAME, Scalar)::run(x);                               \
-  }
-
-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(exp)
-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(log)
-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(sin)
-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(cos)
-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(tan)
-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(asin)
-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(acos)
-
-/****************************************************************************
-* Implementation of atan2                                                *
-****************************************************************************/
-
-template<typename Scalar, bool IsInteger>
-struct atan2_default_impl
-{
-  typedef Scalar retval;
-  static inline Scalar run(const Scalar& x, const Scalar& y)
-  {
-    using std::atan2;
-    return atan2(x, y);
-  }
-};
-
-template<typename Scalar>
-struct atan2_default_impl<Scalar, true>
-{
-  static inline Scalar run(const Scalar&, const Scalar&)
-  {
-    EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)
-    return Scalar(0);
-  }
-};
-
-template<typename Scalar>
-struct atan2_impl : atan2_default_impl<Scalar, NumTraits<Scalar>::IsInteger> {};
-
-template<typename Scalar>
-struct atan2_retval
-{
-  typedef Scalar type;
-};
-
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(atan2, Scalar) atan2(const Scalar& x, const Scalar& y)
-{
-  return EIGEN_MATHFUNC_IMPL(atan2, Scalar)::run(x, y);
-}
-
-/****************************************************************************
-* Implementation of atanh2                                                *
-****************************************************************************/
-
-template<typename Scalar, bool IsInteger>
-struct atanh2_default_impl
-{
-  typedef Scalar retval;
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  static inline Scalar run(const Scalar& x, const Scalar& y)
-  {
-    using std::abs;
-    using std::log;
-    using std::sqrt;
-    Scalar z = x / y;
-    if (abs(z) > sqrt(NumTraits<RealScalar>::epsilon()))
-      return RealScalar(0.5) * log((y + x) / (y - x));
-    else
-      return z + z*z*z / RealScalar(3);
-  }
-};
-
-template<typename Scalar>
-struct atanh2_default_impl<Scalar, true>
-{
-  static inline Scalar run(const Scalar&, const Scalar&)
-  {
-    EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)
-    return Scalar(0);
-  }
-};
-
-template<typename Scalar>
-struct atanh2_impl : atanh2_default_impl<Scalar, NumTraits<Scalar>::IsInteger> {};
-
-template<typename Scalar>
-struct atanh2_retval
-{
-  typedef Scalar type;
-};
-
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(atanh2, Scalar) atanh2(const Scalar& x, const Scalar& y)
-{
-  return EIGEN_MATHFUNC_IMPL(atanh2, Scalar)::run(x, y);
-}
-
-/****************************************************************************
-* Implementation of pow                                                  *
-****************************************************************************/
-
-template<typename Scalar, bool IsInteger>
-struct pow_default_impl
-{
-  typedef Scalar retval;
-  static inline Scalar run(const Scalar& x, const Scalar& y)
-  {
-    using std::pow;
-    return pow(x, y);
-  }
-};
-
-template<typename Scalar>
-struct pow_default_impl<Scalar, true>
-{
-  static inline Scalar run(Scalar x, Scalar y)
-  {
-    Scalar res(1);
-    eigen_assert(!NumTraits<Scalar>::IsSigned || y >= 0);
-    if(y & 1) res *= x;
-    y >>= 1;
-    while(y)
-    {
-      x *= x;
-      if(y&1) res *= x;
-      y >>= 1;
-    }
-    return res;
-  }
-};
-
-template<typename Scalar>
-struct pow_impl : pow_default_impl<Scalar, NumTraits<Scalar>::IsInteger> {};
-
-template<typename Scalar>
-struct pow_retval
-{
-  typedef Scalar type;
-};
-
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(pow, Scalar) pow(const Scalar& x, const Scalar& y)
-{
-  return EIGEN_MATHFUNC_IMPL(pow, Scalar)::run(x, y);
-}
-
-/****************************************************************************
-* Implementation of random                                               *
-****************************************************************************/
-
-template<typename Scalar,
-         bool IsComplex,
-         bool IsInteger>
-struct random_default_impl {};
-
-template<typename Scalar>
-struct random_impl : random_default_impl<Scalar, NumTraits<Scalar>::IsComplex, NumTraits<Scalar>::IsInteger> {};
-
-template<typename Scalar>
-struct random_retval
-{
-  typedef Scalar type;
-};
-
-template<typename Scalar> inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y);
-template<typename Scalar> inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random();
-
-template<typename Scalar>
-struct random_default_impl<Scalar, false, false>
-{
-  static inline Scalar run(const Scalar& x, const Scalar& y)
-  {
-    return x + (y-x) * Scalar(std::rand()) / Scalar(RAND_MAX);
-  }
-  static inline Scalar run()
-  {
-    return run(Scalar(NumTraits<Scalar>::IsSigned ? -1 : 0), Scalar(1));
-  }
-};
-
-enum {
-  floor_log2_terminate,
-  floor_log2_move_up,
-  floor_log2_move_down,
-  floor_log2_bogus
-};
-
-template<unsigned int n, int lower, int upper> struct floor_log2_selector
-{
-  enum { middle = (lower + upper) / 2,
-         value = (upper <= lower + 1) ? int(floor_log2_terminate)
-               : (n < (1 << middle)) ? int(floor_log2_move_down)
-               : (n==0) ? int(floor_log2_bogus)
-               : int(floor_log2_move_up)
-  };
-};
-
-template<unsigned int n,
-         int lower = 0,
-         int upper = sizeof(unsigned int) * CHAR_BIT - 1,
-         int selector = floor_log2_selector<n, lower, upper>::value>
-struct floor_log2 {};
-
-template<unsigned int n, int lower, int upper>
-struct floor_log2<n, lower, upper, floor_log2_move_down>
-{
-  enum { value = floor_log2<n, lower, floor_log2_selector<n, lower, upper>::middle>::value };
-};
-
-template<unsigned int n, int lower, int upper>
-struct floor_log2<n, lower, upper, floor_log2_move_up>
-{
-  enum { value = floor_log2<n, floor_log2_selector<n, lower, upper>::middle, upper>::value };
-};
-
-template<unsigned int n, int lower, int upper>
-struct floor_log2<n, lower, upper, floor_log2_terminate>
-{
-  enum { value = (n >= ((unsigned int)(1) << (lower+1))) ? lower+1 : lower };
-};
-
-template<unsigned int n, int lower, int upper>
-struct floor_log2<n, lower, upper, floor_log2_bogus>
-{
-  // no value, error at compile time
-};
-
-template<typename Scalar>
-struct random_default_impl<Scalar, false, true>
-{
-  typedef typename NumTraits<Scalar>::NonInteger NonInteger;
-
-  static inline Scalar run(const Scalar& x, const Scalar& y)
-  {
-    return x + Scalar((NonInteger(y)-x+1) * std::rand() / (RAND_MAX + NonInteger(1)));
-  }
-
-  static inline Scalar run()
-  {
-#ifdef EIGEN_MAKING_DOCS
-    return run(Scalar(NumTraits<Scalar>::IsSigned ? -10 : 0), Scalar(10));
-#else
-    enum { rand_bits = floor_log2<(unsigned int)(RAND_MAX)+1>::value,
-           scalar_bits = sizeof(Scalar) * CHAR_BIT,
-           shift = EIGEN_PLAIN_ENUM_MAX(0, int(rand_bits) - int(scalar_bits))
-    };
-    Scalar x = Scalar(std::rand() >> shift);
-    Scalar offset = NumTraits<Scalar>::IsSigned ? Scalar(1 << (rand_bits-1)) : Scalar(0);
-    return x - offset;
-#endif
-  }
-};
-
-template<typename Scalar>
-struct random_default_impl<Scalar, true, false>
-{
-  static inline Scalar run(const Scalar& x, const Scalar& y)
-  {
-    return Scalar(random(real(x), real(y)),
-                  random(imag(x), imag(y)));
-  }
-  static inline Scalar run()
-  {
-    typedef typename NumTraits<Scalar>::Real RealScalar;
-    return Scalar(random<RealScalar>(), random<RealScalar>());
-  }
-};
-
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y)
-{
-  return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(x, y);
-}
-
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random()
-{
-  return EIGEN_MATHFUNC_IMPL(random, Scalar)::run();
-}
-
-/****************************************************************************
-* Implementation of fuzzy comparisons                                       *
-****************************************************************************/
-
-template<typename Scalar,
-         bool IsComplex,
-         bool IsInteger>
-struct scalar_fuzzy_default_impl {};
-
-template<typename Scalar>
-struct scalar_fuzzy_default_impl<Scalar, false, false>
-{
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  template<typename OtherScalar>
-  static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec)
-  {
-    return abs(x) <= abs(y) * prec;
-  }
-  static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec)
-  {
-    using std::min;
-    return abs(x - y) <= (min)(abs(x), abs(y)) * prec;
-  }
-  static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar& prec)
-  {
-    return x <= y || isApprox(x, y, prec);
-  }
-};
-
-template<typename Scalar>
-struct scalar_fuzzy_default_impl<Scalar, false, true>
-{
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  template<typename OtherScalar>
-  static inline bool isMuchSmallerThan(const Scalar& x, const Scalar&, const RealScalar&)
-  {
-    return x == Scalar(0);
-  }
-  static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar&)
-  {
-    return x == y;
-  }
-  static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar&)
-  {
-    return x <= y;
-  }
-};
-
-template<typename Scalar>
-struct scalar_fuzzy_default_impl<Scalar, true, false>
-{
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  template<typename OtherScalar>
-  static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec)
-  {
-    return abs2(x) <= abs2(y) * prec * prec;
-  }
-  static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec)
-  {
-    using std::min;
-    return abs2(x - y) <= (min)(abs2(x), abs2(y)) * prec * prec;
-  }
-};
-
-template<typename Scalar>
-struct scalar_fuzzy_impl : scalar_fuzzy_default_impl<Scalar, NumTraits<Scalar>::IsComplex, NumTraits<Scalar>::IsInteger> {};
-
-template<typename Scalar, typename OtherScalar>
-inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y,
-                                   typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
-{
-  return scalar_fuzzy_impl<Scalar>::template isMuchSmallerThan<OtherScalar>(x, y, precision);
-}
-
-template<typename Scalar>
-inline bool isApprox(const Scalar& x, const Scalar& y,
-                          typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
-{
-  return scalar_fuzzy_impl<Scalar>::isApprox(x, y, precision);
-}
-
-template<typename Scalar>
-inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y,
-                                    typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
-{
-  return scalar_fuzzy_impl<Scalar>::isApproxOrLessThan(x, y, precision);
-}
-
-/******************************************
-***  The special case of the  bool type ***
-******************************************/
-
-template<> struct random_impl<bool>
-{
-  static inline bool run()
-  {
-    return random<int>(0,1)==0 ? false : true;
-  }
-};
-
-template<> struct scalar_fuzzy_impl<bool>
-{
-  typedef bool RealScalar;
-  
-  template<typename OtherScalar>
-  static inline bool isMuchSmallerThan(const bool& x, const bool&, const bool&)
-  {
-    return !x;
-  }
-  
-  static inline bool isApprox(bool x, bool y, bool)
-  {
-    return x == y;
-  }
-
-  static inline bool isApproxOrLessThan(const bool& x, const bool& y, const bool&)
-  {
-    return (!x) || y;
-  }
-  
-};
-
-/****************************************************************************
-* Special functions                                                          *
-****************************************************************************/
-
-// std::isfinite is non standard, so let's define our own version,
-// even though it is not very efficient.
-template<typename T> bool (isfinite)(const T& x)
-{
-  return x<NumTraits<T>::highest() && x>NumTraits<T>::lowest();
-}
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_MATHFUNCTIONS_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/MatrixBase.h b/resources/3rdparty/eigen/Eigen/src/Core/MatrixBase.h
deleted file mode 100644
index 521bba18a..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/MatrixBase.h
+++ /dev/null
@@ -1,515 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_MATRIXBASE_H
-#define EIGEN_MATRIXBASE_H
-
-namespace Eigen {
-
-/** \class MatrixBase
-  * \ingroup Core_Module
-  *
-  * \brief Base class for all dense matrices, vectors, and expressions
-  *
-  * This class is the base that is inherited by all matrix, vector, and related expression
-  * types. Most of the Eigen API is contained in this class, and its base classes. Other important
-  * classes for the Eigen API are Matrix, and VectorwiseOp.
-  *
-  * Note that some methods are defined in other modules such as the \ref LU_Module LU module
-  * for all functions related to matrix inversions.
-  *
-  * \tparam Derived is the derived type, e.g. a matrix type, or an expression, etc.
-  *
-  * When writing a function taking Eigen objects as argument, if you want your function
-  * to take as argument any matrix, vector, or expression, just let it take a
-  * MatrixBase argument. As an example, here is a function printFirstRow which, given
-  * a matrix, vector, or expression \a x, prints the first row of \a x.
-  *
-  * \code
-    template<typename Derived>
-    void printFirstRow(const Eigen::MatrixBase<Derived>& x)
-    {
-      cout << x.row(0) << endl;
-    }
-  * \endcode
-  *
-  * This class can be extended with the help of the plugin mechanism described on the page
-  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_MATRIXBASE_PLUGIN.
-  *
-  * \sa \ref TopicClassHierarchy
-  */
-template<typename Derived> class MatrixBase
-  : public DenseBase<Derived>
-{
-  public:
-#ifndef EIGEN_PARSED_BY_DOXYGEN
-    typedef MatrixBase StorageBaseType;
-    typedef typename internal::traits<Derived>::StorageKind StorageKind;
-    typedef typename internal::traits<Derived>::Index Index;
-    typedef typename internal::traits<Derived>::Scalar Scalar;
-    typedef typename internal::packet_traits<Scalar>::type PacketScalar;
-    typedef typename NumTraits<Scalar>::Real RealScalar;
-
-    typedef DenseBase<Derived> Base;
-    using Base::RowsAtCompileTime;
-    using Base::ColsAtCompileTime;
-    using Base::SizeAtCompileTime;
-    using Base::MaxRowsAtCompileTime;
-    using Base::MaxColsAtCompileTime;
-    using Base::MaxSizeAtCompileTime;
-    using Base::IsVectorAtCompileTime;
-    using Base::Flags;
-    using Base::CoeffReadCost;
-
-    using Base::derived;
-    using Base::const_cast_derived;
-    using Base::rows;
-    using Base::cols;
-    using Base::size;
-    using Base::coeff;
-    using Base::coeffRef;
-    using Base::lazyAssign;
-    using Base::eval;
-    using Base::operator+=;
-    using Base::operator-=;
-    using Base::operator*=;
-    using Base::operator/=;
-
-    typedef typename Base::CoeffReturnType CoeffReturnType;
-    typedef typename Base::ConstTransposeReturnType ConstTransposeReturnType;
-    typedef typename Base::RowXpr RowXpr;
-    typedef typename Base::ColXpr ColXpr;
-#endif // not EIGEN_PARSED_BY_DOXYGEN
-
-
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN
-    /** type of the equivalent square matrix */
-    typedef Matrix<Scalar,EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime),
-                          EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
-#endif // not EIGEN_PARSED_BY_DOXYGEN
-
-    /** \returns the size of the main diagonal, which is min(rows(),cols()).
-      * \sa rows(), cols(), SizeAtCompileTime. */
-    inline Index diagonalSize() const { return (std::min)(rows(),cols()); }
-
-    /** \brief The plain matrix type corresponding to this expression.
-      *
-      * This is not necessarily exactly the return type of eval(). In the case of plain matrices,
-      * the return type of eval() is a const reference to a matrix, not a matrix! It is however guaranteed
-      * that the return type of eval() is either PlainObject or const PlainObject&.
-      */
-    typedef Matrix<typename internal::traits<Derived>::Scalar,
-                internal::traits<Derived>::RowsAtCompileTime,
-                internal::traits<Derived>::ColsAtCompileTime,
-                AutoAlign | (internal::traits<Derived>::Flags&RowMajorBit ? RowMajor : ColMajor),
-                internal::traits<Derived>::MaxRowsAtCompileTime,
-                internal::traits<Derived>::MaxColsAtCompileTime
-          > PlainObject;
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN
-    /** \internal Represents a matrix with all coefficients equal to one another*/
-    typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Derived> ConstantReturnType;
-    /** \internal the return type of MatrixBase::adjoint() */
-    typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
-                        CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, ConstTransposeReturnType>,
-                        ConstTransposeReturnType
-                     >::type AdjointReturnType;
-    /** \internal Return type of eigenvalues() */
-    typedef Matrix<std::complex<RealScalar>, internal::traits<Derived>::ColsAtCompileTime, 1, ColMajor> EigenvaluesReturnType;
-    /** \internal the return type of identity */
-    typedef CwiseNullaryOp<internal::scalar_identity_op<Scalar>,Derived> IdentityReturnType;
-    /** \internal the return type of unit vectors */
-    typedef Block<const CwiseNullaryOp<internal::scalar_identity_op<Scalar>, SquareMatrixType>,
-                  internal::traits<Derived>::RowsAtCompileTime,
-                  internal::traits<Derived>::ColsAtCompileTime> BasisReturnType;
-#endif // not EIGEN_PARSED_BY_DOXYGEN
-
-#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::MatrixBase
-#   include "../plugins/CommonCwiseUnaryOps.h"
-#   include "../plugins/CommonCwiseBinaryOps.h"
-#   include "../plugins/MatrixCwiseUnaryOps.h"
-#   include "../plugins/MatrixCwiseBinaryOps.h"
-#   ifdef EIGEN_MATRIXBASE_PLUGIN
-#     include EIGEN_MATRIXBASE_PLUGIN
-#   endif
-#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
-
-    /** Special case of the template operator=, in order to prevent the compiler
-      * from generating a default operator= (issue hit with g++ 4.1)
-      */
-    Derived& operator=(const MatrixBase& other);
-
-    // We cannot inherit here via Base::operator= since it is causing
-    // trouble with MSVC.
-
-    template <typename OtherDerived>
-    Derived& operator=(const DenseBase<OtherDerived>& other);
-
-    template <typename OtherDerived>
-    Derived& operator=(const EigenBase<OtherDerived>& other);
-
-    template<typename OtherDerived>
-    Derived& operator=(const ReturnByValue<OtherDerived>& other);
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN
-    template<typename ProductDerived, typename Lhs, typename Rhs>
-    Derived& lazyAssign(const ProductBase<ProductDerived, Lhs,Rhs>& other);
-
-    template<typename ProductDerived, typename Lhs, typename Rhs>
-    Derived& lazyAssign(const MatrixPowerProductBase<ProductDerived, Lhs,Rhs>& other);
-#endif // not EIGEN_PARSED_BY_DOXYGEN
-
-    template<typename OtherDerived>
-    Derived& operator+=(const MatrixBase<OtherDerived>& other);
-    template<typename OtherDerived>
-    Derived& operator-=(const MatrixBase<OtherDerived>& other);
-
-    template<typename OtherDerived>
-    const typename ProductReturnType<Derived,OtherDerived>::Type
-    operator*(const MatrixBase<OtherDerived> &other) const;
-
-    template<typename OtherDerived>
-    const typename LazyProductReturnType<Derived,OtherDerived>::Type
-    lazyProduct(const MatrixBase<OtherDerived> &other) const;
-
-    template<typename OtherDerived>
-    Derived& operator*=(const EigenBase<OtherDerived>& other);
-
-    template<typename OtherDerived>
-    void applyOnTheLeft(const EigenBase<OtherDerived>& other);
-
-    template<typename OtherDerived>
-    void applyOnTheRight(const EigenBase<OtherDerived>& other);
-
-    template<typename DiagonalDerived>
-    const DiagonalProduct<Derived, DiagonalDerived, OnTheRight>
-    operator*(const DiagonalBase<DiagonalDerived> &diagonal) const;
-
-    template<typename OtherDerived>
-    typename internal::scalar_product_traits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType
-    dot(const MatrixBase<OtherDerived>& other) const;
-
-    #ifdef EIGEN2_SUPPORT
-      template<typename OtherDerived>
-      Scalar eigen2_dot(const MatrixBase<OtherDerived>& other) const;
-    #endif
-
-    RealScalar squaredNorm() const;
-    RealScalar norm() const;
-    RealScalar stableNorm() const;
-    RealScalar blueNorm() const;
-    RealScalar hypotNorm() const;
-    const PlainObject normalized() const;
-    void normalize();
-
-    const AdjointReturnType adjoint() const;
-    void adjointInPlace();
-
-    typedef Diagonal<Derived> DiagonalReturnType;
-    DiagonalReturnType diagonal();
-    typedef const Diagonal<const Derived> ConstDiagonalReturnType;
-    const ConstDiagonalReturnType diagonal() const;
-
-    template<int Index> struct DiagonalIndexReturnType { typedef Diagonal<Derived,Index> Type; };
-    template<int Index> struct ConstDiagonalIndexReturnType { typedef const Diagonal<const Derived,Index> Type; };
-
-    template<int Index> typename DiagonalIndexReturnType<Index>::Type diagonal();
-    template<int Index> typename ConstDiagonalIndexReturnType<Index>::Type diagonal() const;
-
-    // Note: The "MatrixBase::" prefixes are added to help MSVC9 to match these declarations with the later implementations.
-    // On the other hand they confuse MSVC8...
-    #if (defined _MSC_VER) && (_MSC_VER >= 1500) // 2008 or later
-    typename MatrixBase::template DiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index);
-    typename MatrixBase::template ConstDiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index) const;
-    #else
-    typename DiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index);
-    typename ConstDiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index) const;
-    #endif
-
-    #ifdef EIGEN2_SUPPORT
-    template<unsigned int Mode> typename internal::eigen2_part_return_type<Derived, Mode>::type part();
-    template<unsigned int Mode> const typename internal::eigen2_part_return_type<Derived, Mode>::type part() const;
-    
-    // huuuge hack. make Eigen2's matrix.part<Diagonal>() work in eigen3. Problem: Diagonal is now a class template instead
-    // of an integer constant. Solution: overload the part() method template wrt template parameters list.
-    template<template<typename T, int N> class U>
-    const DiagonalWrapper<ConstDiagonalReturnType> part() const
-    { return diagonal().asDiagonal(); }
-    #endif // EIGEN2_SUPPORT
-
-    template<unsigned int Mode> struct TriangularViewReturnType { typedef TriangularView<Derived, Mode> Type; };
-    template<unsigned int Mode> struct ConstTriangularViewReturnType { typedef const TriangularView<const Derived, Mode> Type; };
-
-    template<unsigned int Mode> typename TriangularViewReturnType<Mode>::Type triangularView();
-    template<unsigned int Mode> typename ConstTriangularViewReturnType<Mode>::Type triangularView() const;
-
-    template<unsigned int UpLo> struct SelfAdjointViewReturnType { typedef SelfAdjointView<Derived, UpLo> Type; };
-    template<unsigned int UpLo> struct ConstSelfAdjointViewReturnType { typedef const SelfAdjointView<const Derived, UpLo> Type; };
-
-    template<unsigned int UpLo> typename SelfAdjointViewReturnType<UpLo>::Type selfadjointView();
-    template<unsigned int UpLo> typename ConstSelfAdjointViewReturnType<UpLo>::Type selfadjointView() const;
-
-    const SparseView<Derived> sparseView(const Scalar& m_reference = Scalar(0),
-                                         const typename NumTraits<Scalar>::Real& m_epsilon = NumTraits<Scalar>::dummy_precision()) const;
-    static const IdentityReturnType Identity();
-    static const IdentityReturnType Identity(Index rows, Index cols);
-    static const BasisReturnType Unit(Index size, Index i);
-    static const BasisReturnType Unit(Index i);
-    static const BasisReturnType UnitX();
-    static const BasisReturnType UnitY();
-    static const BasisReturnType UnitZ();
-    static const BasisReturnType UnitW();
-
-    const DiagonalWrapper<const Derived> asDiagonal() const;
-    const PermutationWrapper<const Derived> asPermutation() const;
-
-    Derived& setIdentity();
-    Derived& setIdentity(Index rows, Index cols);
-
-    bool isIdentity(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
-    bool isDiagonal(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
-
-    bool isUpperTriangular(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
-    bool isLowerTriangular(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
-
-    template<typename OtherDerived>
-    bool isOrthogonal(const MatrixBase<OtherDerived>& other,
-                      const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
-    bool isUnitary(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
-
-    /** \returns true if each coefficients of \c *this and \a other are all exactly equal.
-      * \warning When using floating point scalar values you probably should rather use a
-      *          fuzzy comparison such as isApprox()
-      * \sa isApprox(), operator!= */
-    template<typename OtherDerived>
-    inline bool operator==(const MatrixBase<OtherDerived>& other) const
-    { return cwiseEqual(other).all(); }
-
-    /** \returns true if at least one pair of coefficients of \c *this and \a other are not exactly equal to each other.
-      * \warning When using floating point scalar values you probably should rather use a
-      *          fuzzy comparison such as isApprox()
-      * \sa isApprox(), operator== */
-    template<typename OtherDerived>
-    inline bool operator!=(const MatrixBase<OtherDerived>& other) const
-    { return cwiseNotEqual(other).any(); }
-
-    NoAlias<Derived,Eigen::MatrixBase > noalias();
-
-    inline const ForceAlignedAccess<Derived> forceAlignedAccess() const;
-    inline ForceAlignedAccess<Derived> forceAlignedAccess();
-    template<bool Enable> inline typename internal::add_const_on_value_type<typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type>::type forceAlignedAccessIf() const;
-    template<bool Enable> inline typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type forceAlignedAccessIf();
-
-    Scalar trace() const;
-
-/////////// Array module ///////////
-
-    template<int p> RealScalar lpNorm() const;
-
-    MatrixBase<Derived>& matrix() { return *this; }
-    const MatrixBase<Derived>& matrix() const { return *this; }
-
-    /** \returns an \link ArrayBase Array \endlink expression of this matrix
-      * \sa ArrayBase::matrix() */
-    ArrayWrapper<Derived> array() { return derived(); }
-    const ArrayWrapper<const Derived> array() const { return derived(); }
-
-/////////// LU module ///////////
-
-    const FullPivLU<PlainObject> fullPivLu() const;
-    const PartialPivLU<PlainObject> partialPivLu() const;
-
-    #if EIGEN2_SUPPORT_STAGE < STAGE20_RESOLVE_API_CONFLICTS
-    const LU<PlainObject> lu() const;
-    #endif
-
-    #ifdef EIGEN2_SUPPORT
-    const LU<PlainObject> eigen2_lu() const;
-    #endif
-
-    #if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS
-    const PartialPivLU<PlainObject> lu() const;
-    #endif
-    
-    #ifdef EIGEN2_SUPPORT
-    template<typename ResultType>
-    void computeInverse(MatrixBase<ResultType> *result) const {
-      *result = this->inverse();
-    }
-    #endif
-
-    const internal::inverse_impl<Derived> inverse() const;
-    template<typename ResultType>
-    void computeInverseAndDetWithCheck(
-      ResultType& inverse,
-      typename ResultType::Scalar& determinant,
-      bool& invertible,
-      const RealScalar& absDeterminantThreshold = NumTraits<Scalar>::dummy_precision()
-    ) const;
-    template<typename ResultType>
-    void computeInverseWithCheck(
-      ResultType& inverse,
-      bool& invertible,
-      const RealScalar& absDeterminantThreshold = NumTraits<Scalar>::dummy_precision()
-    ) const;
-    Scalar determinant() const;
-
-/////////// Cholesky module ///////////
-
-    const LLT<PlainObject>  llt() const;
-    const LDLT<PlainObject> ldlt() const;
-
-/////////// QR module ///////////
-
-    const HouseholderQR<PlainObject> householderQr() const;
-    const ColPivHouseholderQR<PlainObject> colPivHouseholderQr() const;
-    const FullPivHouseholderQR<PlainObject> fullPivHouseholderQr() const;
-    
-    #ifdef EIGEN2_SUPPORT
-    const QR<PlainObject> qr() const;
-    #endif
-
-    EigenvaluesReturnType eigenvalues() const;
-    RealScalar operatorNorm() const;
-
-/////////// SVD module ///////////
-
-    JacobiSVD<PlainObject> jacobiSvd(unsigned int computationOptions = 0) const;
-
-    #ifdef EIGEN2_SUPPORT
-    SVD<PlainObject> svd() const;
-    #endif
-
-/////////// Geometry module ///////////
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    /// \internal helper struct to form the return type of the cross product
-    template<typename OtherDerived> struct cross_product_return_type {
-      typedef typename internal::scalar_product_traits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType Scalar;
-      typedef Matrix<Scalar,MatrixBase::RowsAtCompileTime,MatrixBase::ColsAtCompileTime> type;
-    };
-    #endif // EIGEN_PARSED_BY_DOXYGEN
-    template<typename OtherDerived>
-    typename cross_product_return_type<OtherDerived>::type
-    cross(const MatrixBase<OtherDerived>& other) const;
-    template<typename OtherDerived>
-    PlainObject cross3(const MatrixBase<OtherDerived>& other) const;
-    PlainObject unitOrthogonal(void) const;
-    Matrix<Scalar,3,1> eulerAngles(Index a0, Index a1, Index a2) const;
-    
-    #if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS
-    ScalarMultipleReturnType operator*(const UniformScaling<Scalar>& s) const;
-    // put this as separate enum value to work around possible GCC 4.3 bug (?)
-    enum { HomogeneousReturnTypeDirection = ColsAtCompileTime==1?Vertical:Horizontal };
-    typedef Homogeneous<Derived, HomogeneousReturnTypeDirection> HomogeneousReturnType;
-    HomogeneousReturnType homogeneous() const;
-    #endif
-    
-    enum {
-      SizeMinusOne = SizeAtCompileTime==Dynamic ? Dynamic : SizeAtCompileTime-1
-    };
-    typedef Block<const Derived,
-                  internal::traits<Derived>::ColsAtCompileTime==1 ? SizeMinusOne : 1,
-                  internal::traits<Derived>::ColsAtCompileTime==1 ? 1 : SizeMinusOne> ConstStartMinusOne;
-    typedef CwiseUnaryOp<internal::scalar_quotient1_op<typename internal::traits<Derived>::Scalar>,
-                const ConstStartMinusOne > HNormalizedReturnType;
-
-    const HNormalizedReturnType hnormalized() const;
-
-////////// Householder module ///////////
-
-    void makeHouseholderInPlace(Scalar& tau, RealScalar& beta);
-    template<typename EssentialPart>
-    void makeHouseholder(EssentialPart& essential,
-                         Scalar& tau, RealScalar& beta) const;
-    template<typename EssentialPart>
-    void applyHouseholderOnTheLeft(const EssentialPart& essential,
-                                   const Scalar& tau,
-                                   Scalar* workspace);
-    template<typename EssentialPart>
-    void applyHouseholderOnTheRight(const EssentialPart& essential,
-                                    const Scalar& tau,
-                                    Scalar* workspace);
-
-///////// Jacobi module /////////
-
-    template<typename OtherScalar>
-    void applyOnTheLeft(Index p, Index q, const JacobiRotation<OtherScalar>& j);
-    template<typename OtherScalar>
-    void applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j);
-
-///////// MatrixFunctions module /////////
-
-    typedef typename internal::stem_function<Scalar>::type StemFunction;
-    const MatrixExponentialReturnValue<Derived> exp() const;
-    const MatrixFunctionReturnValue<Derived> matrixFunction(StemFunction f) const;
-    const MatrixFunctionReturnValue<Derived> cosh() const;
-    const MatrixFunctionReturnValue<Derived> sinh() const;
-    const MatrixFunctionReturnValue<Derived> cos() const;
-    const MatrixFunctionReturnValue<Derived> sin() const;
-    const MatrixSquareRootReturnValue<Derived> sqrt() const;
-    const MatrixLogarithmReturnValue<Derived> log() const;
-    const MatrixPowerReturnValue<Derived> pow(RealScalar p) const;
-
-#ifdef EIGEN2_SUPPORT
-    template<typename ProductDerived, typename Lhs, typename Rhs>
-    Derived& operator+=(const Flagged<ProductBase<ProductDerived, Lhs,Rhs>, 0,
-                                      EvalBeforeAssigningBit>& other);
-
-    template<typename ProductDerived, typename Lhs, typename Rhs>
-    Derived& operator-=(const Flagged<ProductBase<ProductDerived, Lhs,Rhs>, 0,
-                                      EvalBeforeAssigningBit>& other);
-
-    /** \deprecated because .lazy() is deprecated
-      * Overloaded for cache friendly product evaluation */
-    template<typename OtherDerived>
-    Derived& lazyAssign(const Flagged<OtherDerived, 0, EvalBeforeAssigningBit>& other)
-    { return lazyAssign(other._expression()); }
-
-    template<unsigned int Added>
-    const Flagged<Derived, Added, 0> marked() const;
-    const Flagged<Derived, 0, EvalBeforeAssigningBit> lazy() const;
-
-    inline const Cwise<Derived> cwise() const;
-    inline Cwise<Derived> cwise();
-
-    VectorBlock<Derived> start(Index size);
-    const VectorBlock<const Derived> start(Index size) const;
-    VectorBlock<Derived> end(Index size);
-    const VectorBlock<const Derived> end(Index size) const;
-    template<int Size> VectorBlock<Derived,Size> start();
-    template<int Size> const VectorBlock<const Derived,Size> start() const;
-    template<int Size> VectorBlock<Derived,Size> end();
-    template<int Size> const VectorBlock<const Derived,Size> end() const;
-
-    Minor<Derived> minor(Index row, Index col);
-    const Minor<Derived> minor(Index row, Index col) const;
-#endif
-
-  protected:
-    MatrixBase() : Base() {}
-
-  private:
-    explicit MatrixBase(int);
-    MatrixBase(int,int);
-    template<typename OtherDerived> explicit MatrixBase(const MatrixBase<OtherDerived>&);
-  protected:
-    // mixing arrays and matrices is not legal
-    template<typename OtherDerived> Derived& operator+=(const ArrayBase<OtherDerived>& )
-    {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;}
-    // mixing arrays and matrices is not legal
-    template<typename OtherDerived> Derived& operator-=(const ArrayBase<OtherDerived>& )
-    {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;}
-};
-
-} // end namespace Eigen
-
-#endif // EIGEN_MATRIXBASE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/NoAlias.h b/resources/3rdparty/eigen/Eigen/src/Core/NoAlias.h
deleted file mode 100644
index 0112c865b..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/NoAlias.h
+++ /dev/null
@@ -1,130 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_NOALIAS_H
-#define EIGEN_NOALIAS_H
-
-namespace Eigen {
-
-/** \class NoAlias
-  * \ingroup Core_Module
-  *
-  * \brief Pseudo expression providing an operator = assuming no aliasing
-  *
-  * \param ExpressionType the type of the object on which to do the lazy assignment
-  *
-  * This class represents an expression with special assignment operators
-  * assuming no aliasing between the target expression and the source expression.
-  * More precisely it alloas to bypass the EvalBeforeAssignBit flag of the source expression.
-  * It is the return type of MatrixBase::noalias()
-  * and most of the time this is the only way it is used.
-  *
-  * \sa MatrixBase::noalias()
-  */
-template<typename ExpressionType, template <typename> class StorageBase>
-class NoAlias
-{
-    typedef typename ExpressionType::Scalar Scalar;
-  public:
-    NoAlias(ExpressionType& expression) : m_expression(expression) {}
-
-    /** Behaves like MatrixBase::lazyAssign(other)
-      * \sa MatrixBase::lazyAssign() */
-    template<typename OtherDerived>
-    EIGEN_STRONG_INLINE ExpressionType& operator=(const StorageBase<OtherDerived>& other)
-    { return internal::assign_selector<ExpressionType,OtherDerived,false>::run(m_expression,other.derived()); }
-
-    /** \sa MatrixBase::operator+= */
-    template<typename OtherDerived>
-    EIGEN_STRONG_INLINE ExpressionType& operator+=(const StorageBase<OtherDerived>& other)
-    {
-      typedef SelfCwiseBinaryOp<internal::scalar_sum_op<Scalar>, ExpressionType, OtherDerived> SelfAdder;
-      SelfAdder tmp(m_expression);
-      typedef typename internal::nested<OtherDerived>::type OtherDerivedNested;
-      typedef typename internal::remove_all<OtherDerivedNested>::type _OtherDerivedNested;
-      internal::assign_selector<SelfAdder,_OtherDerivedNested,false>::run(tmp,OtherDerivedNested(other.derived()));
-      return m_expression;
-    }
-
-    /** \sa MatrixBase::operator-= */
-    template<typename OtherDerived>
-    EIGEN_STRONG_INLINE ExpressionType& operator-=(const StorageBase<OtherDerived>& other)
-    {
-      typedef SelfCwiseBinaryOp<internal::scalar_difference_op<Scalar>, ExpressionType, OtherDerived> SelfAdder;
-      SelfAdder tmp(m_expression);
-      typedef typename internal::nested<OtherDerived>::type OtherDerivedNested;
-      typedef typename internal::remove_all<OtherDerivedNested>::type _OtherDerivedNested;
-      internal::assign_selector<SelfAdder,_OtherDerivedNested,false>::run(tmp,OtherDerivedNested(other.derived()));
-      return m_expression;
-    }
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN
-    template<typename ProductDerived, typename Lhs, typename Rhs>
-    EIGEN_STRONG_INLINE ExpressionType& operator+=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
-    { other.derived().addTo(m_expression); return m_expression; }
-
-    template<typename ProductDerived, typename Lhs, typename Rhs>
-    EIGEN_STRONG_INLINE ExpressionType& operator-=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
-    { other.derived().subTo(m_expression); return m_expression; }
-
-    template<typename Lhs, typename Rhs, int NestingFlags>
-    EIGEN_STRONG_INLINE ExpressionType& operator+=(const CoeffBasedProduct<Lhs,Rhs,NestingFlags>& other)
-    { return m_expression.derived() += CoeffBasedProduct<Lhs,Rhs,NestByRefBit>(other.lhs(), other.rhs()); }
-
-    template<typename Lhs, typename Rhs, int NestingFlags>
-    EIGEN_STRONG_INLINE ExpressionType& operator-=(const CoeffBasedProduct<Lhs,Rhs,NestingFlags>& other)
-    { return m_expression.derived() -= CoeffBasedProduct<Lhs,Rhs,NestByRefBit>(other.lhs(), other.rhs()); }
-#endif
-
-    ExpressionType& expression() const
-    {
-      return m_expression;
-    }
-
-  protected:
-    ExpressionType& m_expression;
-};
-
-/** \returns a pseudo expression of \c *this with an operator= assuming
-  * no aliasing between \c *this and the source expression.
-  *
-  * More precisely, noalias() allows to bypass the EvalBeforeAssignBit flag.
-  * Currently, even though several expressions may alias, only product
-  * expressions have this flag. Therefore, noalias() is only usefull when
-  * the source expression contains a matrix product.
-  *
-  * Here are some examples where noalias is usefull:
-  * \code
-  * D.noalias()  = A * B;
-  * D.noalias() += A.transpose() * B;
-  * D.noalias() -= 2 * A * B.adjoint();
-  * \endcode
-  *
-  * On the other hand the following example will lead to a \b wrong result:
-  * \code
-  * A.noalias() = A * B;
-  * \endcode
-  * because the result matrix A is also an operand of the matrix product. Therefore,
-  * there is no alternative than evaluating A * B in a temporary, that is the default
-  * behavior when you write:
-  * \code
-  * A = A * B;
-  * \endcode
-  *
-  * \sa class NoAlias
-  */
-template<typename Derived>
-NoAlias<Derived,MatrixBase> MatrixBase<Derived>::noalias()
-{
-  return derived();
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_NOALIAS_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/PermutationMatrix.h b/resources/3rdparty/eigen/Eigen/src/Core/PermutationMatrix.h
deleted file mode 100644
index 86b63ea14..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/PermutationMatrix.h
+++ /dev/null
@@ -1,687 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2009-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_PERMUTATIONMATRIX_H
-#define EIGEN_PERMUTATIONMATRIX_H
-
-namespace Eigen { 
-
-template<int RowCol,typename IndicesType,typename MatrixType, typename StorageKind> class PermutedImpl;
-
-/** \class PermutationBase
-  * \ingroup Core_Module
-  *
-  * \brief Base class for permutations
-  *
-  * \param Derived the derived class
-  *
-  * This class is the base class for all expressions representing a permutation matrix,
-  * internally stored as a vector of integers.
-  * The convention followed here is that if \f$ \sigma \f$ is a permutation, the corresponding permutation matrix
-  * \f$ P_\sigma \f$ is such that if \f$ (e_1,\ldots,e_p) \f$ is the canonical basis, we have:
-  *  \f[ P_\sigma(e_i) = e_{\sigma(i)}. \f]
-  * This convention ensures that for any two permutations \f$ \sigma, \tau \f$, we have:
-  *  \f[ P_{\sigma\circ\tau} = P_\sigma P_\tau. \f]
-  *
-  * Permutation matrices are square and invertible.
-  *
-  * Notice that in addition to the member functions and operators listed here, there also are non-member
-  * operator* to multiply any kind of permutation object with any kind of matrix expression (MatrixBase)
-  * on either side.
-  *
-  * \sa class PermutationMatrix, class PermutationWrapper
-  */
-
-namespace internal {
-
-template<typename PermutationType, typename MatrixType, int Side, bool Transposed=false>
-struct permut_matrix_product_retval;
-template<typename PermutationType, typename MatrixType, int Side, bool Transposed=false>
-struct permut_sparsematrix_product_retval;
-enum PermPermProduct_t {PermPermProduct};
-
-} // end namespace internal
-
-template<typename Derived>
-class PermutationBase : public EigenBase<Derived>
-{
-    typedef internal::traits<Derived> Traits;
-    typedef EigenBase<Derived> Base;
-  public:
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    typedef typename Traits::IndicesType IndicesType;
-    enum {
-      Flags = Traits::Flags,
-      CoeffReadCost = Traits::CoeffReadCost,
-      RowsAtCompileTime = Traits::RowsAtCompileTime,
-      ColsAtCompileTime = Traits::ColsAtCompileTime,
-      MaxRowsAtCompileTime = Traits::MaxRowsAtCompileTime,
-      MaxColsAtCompileTime = Traits::MaxColsAtCompileTime
-    };
-    typedef typename Traits::Scalar Scalar;
-    typedef typename Traits::Index Index;
-    typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime,0,MaxRowsAtCompileTime,MaxColsAtCompileTime>
-            DenseMatrixType;
-    typedef PermutationMatrix<IndicesType::SizeAtCompileTime,IndicesType::MaxSizeAtCompileTime,Index>
-            PlainPermutationType;
-    using Base::derived;
-    #endif
-
-    /** Copies the other permutation into *this */
-    template<typename OtherDerived>
-    Derived& operator=(const PermutationBase<OtherDerived>& other)
-    {
-      indices() = other.indices();
-      return derived();
-    }
-
-    /** Assignment from the Transpositions \a tr */
-    template<typename OtherDerived>
-    Derived& operator=(const TranspositionsBase<OtherDerived>& tr)
-    {
-      setIdentity(tr.size());
-      for(Index k=size()-1; k>=0; --k)
-        applyTranspositionOnTheRight(k,tr.coeff(k));
-      return derived();
-    }
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    /** This is a special case of the templated operator=. Its purpose is to
-      * prevent a default operator= from hiding the templated operator=.
-      */
-    Derived& operator=(const PermutationBase& other)
-    {
-      indices() = other.indices();
-      return derived();
-    }
-    #endif
-
-    /** \returns the number of rows */
-    inline Index rows() const { return indices().size(); }
-
-    /** \returns the number of columns */
-    inline Index cols() const { return indices().size(); }
-
-    /** \returns the size of a side of the respective square matrix, i.e., the number of indices */
-    inline Index size() const { return indices().size(); }
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    template<typename DenseDerived>
-    void evalTo(MatrixBase<DenseDerived>& other) const
-    {
-      other.setZero();
-      for (int i=0; i<rows();++i)
-        other.coeffRef(indices().coeff(i),i) = typename DenseDerived::Scalar(1);
-    }
-    #endif
-
-    /** \returns a Matrix object initialized from this permutation matrix. Notice that it
-      * is inefficient to return this Matrix object by value. For efficiency, favor using
-      * the Matrix constructor taking EigenBase objects.
-      */
-    DenseMatrixType toDenseMatrix() const
-    {
-      return derived();
-    }
-
-    /** const version of indices(). */
-    const IndicesType& indices() const { return derived().indices(); }
-    /** \returns a reference to the stored array representing the permutation. */
-    IndicesType& indices() { return derived().indices(); }
-
-    /** Resizes to given size.
-      */
-    inline void resize(Index newSize)
-    {
-      indices().resize(newSize);
-    }
-
-    /** Sets *this to be the identity permutation matrix */
-    void setIdentity()
-    {
-      for(Index i = 0; i < size(); ++i)
-        indices().coeffRef(i) = i;
-    }
-
-    /** Sets *this to be the identity permutation matrix of given size.
-      */
-    void setIdentity(Index newSize)
-    {
-      resize(newSize);
-      setIdentity();
-    }
-
-    /** Multiplies *this by the transposition \f$(ij)\f$ on the left.
-      *
-      * \returns a reference to *this.
-      *
-      * \warning This is much slower than applyTranspositionOnTheRight(int,int):
-      * this has linear complexity and requires a lot of branching.
-      *
-      * \sa applyTranspositionOnTheRight(int,int)
-      */
-    Derived& applyTranspositionOnTheLeft(Index i, Index j)
-    {
-      eigen_assert(i>=0 && j>=0 && i<size() && j<size());
-      for(Index k = 0; k < size(); ++k)
-      {
-        if(indices().coeff(k) == i) indices().coeffRef(k) = j;
-        else if(indices().coeff(k) == j) indices().coeffRef(k) = i;
-      }
-      return derived();
-    }
-
-    /** Multiplies *this by the transposition \f$(ij)\f$ on the right.
-      *
-      * \returns a reference to *this.
-      *
-      * This is a fast operation, it only consists in swapping two indices.
-      *
-      * \sa applyTranspositionOnTheLeft(int,int)
-      */
-    Derived& applyTranspositionOnTheRight(Index i, Index j)
-    {
-      eigen_assert(i>=0 && j>=0 && i<size() && j<size());
-      std::swap(indices().coeffRef(i), indices().coeffRef(j));
-      return derived();
-    }
-
-    /** \returns the inverse permutation matrix.
-      *
-      * \note \note_try_to_help_rvo
-      */
-    inline Transpose<PermutationBase> inverse() const
-    { return derived(); }
-    /** \returns the tranpose permutation matrix.
-      *
-      * \note \note_try_to_help_rvo
-      */
-    inline Transpose<PermutationBase> transpose() const
-    { return derived(); }
-
-    /**** multiplication helpers to hopefully get RVO ****/
-
-  
-#ifndef EIGEN_PARSED_BY_DOXYGEN
-  protected:
-    template<typename OtherDerived>
-    void assignTranspose(const PermutationBase<OtherDerived>& other)
-    {
-      for (int i=0; i<rows();++i) indices().coeffRef(other.indices().coeff(i)) = i;
-    }
-    template<typename Lhs,typename Rhs>
-    void assignProduct(const Lhs& lhs, const Rhs& rhs)
-    {
-      eigen_assert(lhs.cols() == rhs.rows());
-      for (int i=0; i<rows();++i) indices().coeffRef(i) = lhs.indices().coeff(rhs.indices().coeff(i));
-    }
-#endif
-
-  public:
-
-    /** \returns the product permutation matrix.
-      *
-      * \note \note_try_to_help_rvo
-      */
-    template<typename Other>
-    inline PlainPermutationType operator*(const PermutationBase<Other>& other) const
-    { return PlainPermutationType(internal::PermPermProduct, derived(), other.derived()); }
-
-    /** \returns the product of a permutation with another inverse permutation.
-      *
-      * \note \note_try_to_help_rvo
-      */
-    template<typename Other>
-    inline PlainPermutationType operator*(const Transpose<PermutationBase<Other> >& other) const
-    { return PlainPermutationType(internal::PermPermProduct, *this, other.eval()); }
-
-    /** \returns the product of an inverse permutation with another permutation.
-      *
-      * \note \note_try_to_help_rvo
-      */
-    template<typename Other> friend
-    inline PlainPermutationType operator*(const Transpose<PermutationBase<Other> >& other, const PermutationBase& perm)
-    { return PlainPermutationType(internal::PermPermProduct, other.eval(), perm); }
-
-  protected:
-
-};
-
-/** \class PermutationMatrix
-  * \ingroup Core_Module
-  *
-  * \brief Permutation matrix
-  *
-  * \param SizeAtCompileTime the number of rows/cols, or Dynamic
-  * \param MaxSizeAtCompileTime the maximum number of rows/cols, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it.
-  * \param IndexType the interger type of the indices
-  *
-  * This class represents a permutation matrix, internally stored as a vector of integers.
-  *
-  * \sa class PermutationBase, class PermutationWrapper, class DiagonalMatrix
-  */
-
-namespace internal {
-template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType>
-struct traits<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType> >
- : traits<Matrix<IndexType,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >
-{
-  typedef IndexType Index;
-  typedef Matrix<IndexType, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;
-};
-}
-
-template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType>
-class PermutationMatrix : public PermutationBase<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType> >
-{
-    typedef PermutationBase<PermutationMatrix> Base;
-    typedef internal::traits<PermutationMatrix> Traits;
-  public:
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    typedef typename Traits::IndicesType IndicesType;
-    #endif
-
-    inline PermutationMatrix()
-    {}
-
-    /** Constructs an uninitialized permutation matrix of given size.
-      */
-    inline PermutationMatrix(int size) : m_indices(size)
-    {}
-
-    /** Copy constructor. */
-    template<typename OtherDerived>
-    inline PermutationMatrix(const PermutationBase<OtherDerived>& other)
-      : m_indices(other.indices()) {}
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    /** Standard copy constructor. Defined only to prevent a default copy constructor
-      * from hiding the other templated constructor */
-    inline PermutationMatrix(const PermutationMatrix& other) : m_indices(other.indices()) {}
-    #endif
-
-    /** Generic constructor from expression of the indices. The indices
-      * array has the meaning that the permutations sends each integer i to indices[i].
-      *
-      * \warning It is your responsibility to check that the indices array that you passes actually
-      * describes a permutation, i.e., each value between 0 and n-1 occurs exactly once, where n is the
-      * array's size.
-      */
-    template<typename Other>
-    explicit inline PermutationMatrix(const MatrixBase<Other>& a_indices) : m_indices(a_indices)
-    {}
-
-    /** Convert the Transpositions \a tr to a permutation matrix */
-    template<typename Other>
-    explicit PermutationMatrix(const TranspositionsBase<Other>& tr)
-      : m_indices(tr.size())
-    {
-      *this = tr;
-    }
-
-    /** Copies the other permutation into *this */
-    template<typename Other>
-    PermutationMatrix& operator=(const PermutationBase<Other>& other)
-    {
-      m_indices = other.indices();
-      return *this;
-    }
-
-    /** Assignment from the Transpositions \a tr */
-    template<typename Other>
-    PermutationMatrix& operator=(const TranspositionsBase<Other>& tr)
-    {
-      return Base::operator=(tr.derived());
-    }
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    /** This is a special case of the templated operator=. Its purpose is to
-      * prevent a default operator= from hiding the templated operator=.
-      */
-    PermutationMatrix& operator=(const PermutationMatrix& other)
-    {
-      m_indices = other.m_indices;
-      return *this;
-    }
-    #endif
-
-    /** const version of indices(). */
-    const IndicesType& indices() const { return m_indices; }
-    /** \returns a reference to the stored array representing the permutation. */
-    IndicesType& indices() { return m_indices; }
-
-
-    /**** multiplication helpers to hopefully get RVO ****/
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN
-    template<typename Other>
-    PermutationMatrix(const Transpose<PermutationBase<Other> >& other)
-      : m_indices(other.nestedPermutation().size())
-    {
-      for (int i=0; i<m_indices.size();++i) m_indices.coeffRef(other.nestedPermutation().indices().coeff(i)) = i;
-    }
-    template<typename Lhs,typename Rhs>
-    PermutationMatrix(internal::PermPermProduct_t, const Lhs& lhs, const Rhs& rhs)
-      : m_indices(lhs.indices().size())
-    {
-      Base::assignProduct(lhs,rhs);
-    }
-#endif
-
-  protected:
-
-    IndicesType m_indices;
-};
-
-
-namespace internal {
-template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType, int _PacketAccess>
-struct traits<Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType>,_PacketAccess> >
- : traits<Matrix<IndexType,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >
-{
-  typedef IndexType Index;
-  typedef Map<const Matrix<IndexType, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1>, _PacketAccess> IndicesType;
-};
-}
-
-template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType, int _PacketAccess>
-class Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType>,_PacketAccess>
-  : public PermutationBase<Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType>,_PacketAccess> >
-{
-    typedef PermutationBase<Map> Base;
-    typedef internal::traits<Map> Traits;
-  public:
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    typedef typename Traits::IndicesType IndicesType;
-    typedef typename IndicesType::Scalar Index;
-    #endif
-
-    inline Map(const Index* indicesPtr)
-      : m_indices(indicesPtr)
-    {}
-
-    inline Map(const Index* indicesPtr, Index size)
-      : m_indices(indicesPtr,size)
-    {}
-
-    /** Copies the other permutation into *this */
-    template<typename Other>
-    Map& operator=(const PermutationBase<Other>& other)
-    { return Base::operator=(other.derived()); }
-
-    /** Assignment from the Transpositions \a tr */
-    template<typename Other>
-    Map& operator=(const TranspositionsBase<Other>& tr)
-    { return Base::operator=(tr.derived()); }
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    /** This is a special case of the templated operator=. Its purpose is to
-      * prevent a default operator= from hiding the templated operator=.
-      */
-    Map& operator=(const Map& other)
-    {
-      m_indices = other.m_indices;
-      return *this;
-    }
-    #endif
-
-    /** const version of indices(). */
-    const IndicesType& indices() const { return m_indices; }
-    /** \returns a reference to the stored array representing the permutation. */
-    IndicesType& indices() { return m_indices; }
-
-  protected:
-
-    IndicesType m_indices;
-};
-
-/** \class PermutationWrapper
-  * \ingroup Core_Module
-  *
-  * \brief Class to view a vector of integers as a permutation matrix
-  *
-  * \param _IndicesType the type of the vector of integer (can be any compatible expression)
-  *
-  * This class allows to view any vector expression of integers as a permutation matrix.
-  *
-  * \sa class PermutationBase, class PermutationMatrix
-  */
-
-struct PermutationStorage {};
-
-template<typename _IndicesType> class TranspositionsWrapper;
-namespace internal {
-template<typename _IndicesType>
-struct traits<PermutationWrapper<_IndicesType> >
-{
-  typedef PermutationStorage StorageKind;
-  typedef typename _IndicesType::Scalar Scalar;
-  typedef typename _IndicesType::Scalar Index;
-  typedef _IndicesType IndicesType;
-  enum {
-    RowsAtCompileTime = _IndicesType::SizeAtCompileTime,
-    ColsAtCompileTime = _IndicesType::SizeAtCompileTime,
-    MaxRowsAtCompileTime = IndicesType::MaxRowsAtCompileTime,
-    MaxColsAtCompileTime = IndicesType::MaxColsAtCompileTime,
-    Flags = 0,
-    CoeffReadCost = _IndicesType::CoeffReadCost
-  };
-};
-}
-
-template<typename _IndicesType>
-class PermutationWrapper : public PermutationBase<PermutationWrapper<_IndicesType> >
-{
-    typedef PermutationBase<PermutationWrapper> Base;
-    typedef internal::traits<PermutationWrapper> Traits;
-  public:
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    typedef typename Traits::IndicesType IndicesType;
-    #endif
-
-    inline PermutationWrapper(const IndicesType& a_indices)
-      : m_indices(a_indices)
-    {}
-
-    /** const version of indices(). */
-    const typename internal::remove_all<typename IndicesType::Nested>::type&
-    indices() const { return m_indices; }
-
-  protected:
-
-    typename IndicesType::Nested m_indices;
-};
-
-/** \returns the matrix with the permutation applied to the columns.
-  */
-template<typename Derived, typename PermutationDerived>
-inline const internal::permut_matrix_product_retval<PermutationDerived, Derived, OnTheRight>
-operator*(const MatrixBase<Derived>& matrix,
-          const PermutationBase<PermutationDerived> &permutation)
-{
-  return internal::permut_matrix_product_retval
-           <PermutationDerived, Derived, OnTheRight>
-           (permutation.derived(), matrix.derived());
-}
-
-/** \returns the matrix with the permutation applied to the rows.
-  */
-template<typename Derived, typename PermutationDerived>
-inline const internal::permut_matrix_product_retval
-               <PermutationDerived, Derived, OnTheLeft>
-operator*(const PermutationBase<PermutationDerived> &permutation,
-          const MatrixBase<Derived>& matrix)
-{
-  return internal::permut_matrix_product_retval
-           <PermutationDerived, Derived, OnTheLeft>
-           (permutation.derived(), matrix.derived());
-}
-
-namespace internal {
-
-template<typename PermutationType, typename MatrixType, int Side, bool Transposed>
-struct traits<permut_matrix_product_retval<PermutationType, MatrixType, Side, Transposed> >
-{
-  typedef typename MatrixType::PlainObject ReturnType;
-};
-
-template<typename PermutationType, typename MatrixType, int Side, bool Transposed>
-struct permut_matrix_product_retval
- : public ReturnByValue<permut_matrix_product_retval<PermutationType, MatrixType, Side, Transposed> >
-{
-    typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
-
-    permut_matrix_product_retval(const PermutationType& perm, const MatrixType& matrix)
-      : m_permutation(perm), m_matrix(matrix)
-    {}
-
-    inline int rows() const { return m_matrix.rows(); }
-    inline int cols() const { return m_matrix.cols(); }
-
-    template<typename Dest> inline void evalTo(Dest& dst) const
-    {
-      const int n = Side==OnTheLeft ? rows() : cols();
-
-      if(is_same<MatrixTypeNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_matrix))
-      {
-        // apply the permutation inplace
-        Matrix<bool,PermutationType::RowsAtCompileTime,1,0,PermutationType::MaxRowsAtCompileTime> mask(m_permutation.size());
-        mask.fill(false);
-        int r = 0;
-        while(r < m_permutation.size())
-        {
-          // search for the next seed
-          while(r<m_permutation.size() && mask[r]) r++;
-          if(r>=m_permutation.size())
-            break;
-          // we got one, let's follow it until we are back to the seed
-          int k0 = r++;
-          int kPrev = k0;
-          mask.coeffRef(k0) = true;
-          for(int k=m_permutation.indices().coeff(k0); k!=k0; k=m_permutation.indices().coeff(k))
-          {
-                  Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime>(dst, k)
-            .swap(Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime>
-                       (dst,((Side==OnTheLeft) ^ Transposed) ? k0 : kPrev));
-
-            mask.coeffRef(k) = true;
-            kPrev = k;
-          }
-        }
-      }
-      else
-      {
-        for(int i = 0; i < n; ++i)
-        {
-          Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime>
-               (dst, ((Side==OnTheLeft) ^ Transposed) ? m_permutation.indices().coeff(i) : i)
-
-          =
-
-          Block<const MatrixTypeNestedCleaned,Side==OnTheLeft ? 1 : MatrixType::RowsAtCompileTime,Side==OnTheRight ? 1 : MatrixType::ColsAtCompileTime>
-               (m_matrix, ((Side==OnTheRight) ^ Transposed) ? m_permutation.indices().coeff(i) : i);
-        }
-      }
-    }
-
-  protected:
-    const PermutationType& m_permutation;
-    typename MatrixType::Nested m_matrix;
-};
-
-/* Template partial specialization for transposed/inverse permutations */
-
-template<typename Derived>
-struct traits<Transpose<PermutationBase<Derived> > >
- : traits<Derived>
-{};
-
-} // end namespace internal
-
-template<typename Derived>
-class Transpose<PermutationBase<Derived> >
-  : public EigenBase<Transpose<PermutationBase<Derived> > >
-{
-    typedef Derived PermutationType;
-    typedef typename PermutationType::IndicesType IndicesType;
-    typedef typename PermutationType::PlainPermutationType PlainPermutationType;
-  public:
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    typedef internal::traits<PermutationType> Traits;
-    typedef typename Derived::DenseMatrixType DenseMatrixType;
-    enum {
-      Flags = Traits::Flags,
-      CoeffReadCost = Traits::CoeffReadCost,
-      RowsAtCompileTime = Traits::RowsAtCompileTime,
-      ColsAtCompileTime = Traits::ColsAtCompileTime,
-      MaxRowsAtCompileTime = Traits::MaxRowsAtCompileTime,
-      MaxColsAtCompileTime = Traits::MaxColsAtCompileTime
-    };
-    typedef typename Traits::Scalar Scalar;
-    #endif
-
-    Transpose(const PermutationType& p) : m_permutation(p) {}
-
-    inline int rows() const { return m_permutation.rows(); }
-    inline int cols() const { return m_permutation.cols(); }
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    template<typename DenseDerived>
-    void evalTo(MatrixBase<DenseDerived>& other) const
-    {
-      other.setZero();
-      for (int i=0; i<rows();++i)
-        other.coeffRef(i, m_permutation.indices().coeff(i)) = typename DenseDerived::Scalar(1);
-    }
-    #endif
-
-    /** \return the equivalent permutation matrix */
-    PlainPermutationType eval() const { return *this; }
-
-    DenseMatrixType toDenseMatrix() const { return *this; }
-
-    /** \returns the matrix with the inverse permutation applied to the columns.
-      */
-    template<typename OtherDerived> friend
-    inline const internal::permut_matrix_product_retval<PermutationType, OtherDerived, OnTheRight, true>
-    operator*(const MatrixBase<OtherDerived>& matrix, const Transpose& trPerm)
-    {
-      return internal::permut_matrix_product_retval<PermutationType, OtherDerived, OnTheRight, true>(trPerm.m_permutation, matrix.derived());
-    }
-
-    /** \returns the matrix with the inverse permutation applied to the rows.
-      */
-    template<typename OtherDerived>
-    inline const internal::permut_matrix_product_retval<PermutationType, OtherDerived, OnTheLeft, true>
-    operator*(const MatrixBase<OtherDerived>& matrix) const
-    {
-      return internal::permut_matrix_product_retval<PermutationType, OtherDerived, OnTheLeft, true>(m_permutation, matrix.derived());
-    }
-
-    const PermutationType& nestedPermutation() const { return m_permutation; }
-
-  protected:
-    const PermutationType& m_permutation;
-};
-
-template<typename Derived>
-const PermutationWrapper<const Derived> MatrixBase<Derived>::asPermutation() const
-{
-  return derived();
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_PERMUTATIONMATRIX_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/PlainObjectBase.h b/resources/3rdparty/eigen/Eigen/src/Core/PlainObjectBase.h
deleted file mode 100644
index bef79d3d7..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/PlainObjectBase.h
+++ /dev/null
@@ -1,776 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_DENSESTORAGEBASE_H
-#define EIGEN_DENSESTORAGEBASE_H
-
-#ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
-# define EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED for(int i=0;i<base().size();++i) coeffRef(i)=Scalar(0);
-#else
-# define EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
-#endif
-
-namespace Eigen {
-
-namespace internal {
-
-template<int MaxSizeAtCompileTime> struct check_rows_cols_for_overflow {
-  template<typename Index>
-  static EIGEN_ALWAYS_INLINE void run(Index, Index)
-  {
-  }
-};
-
-template<> struct check_rows_cols_for_overflow<Dynamic> {
-  template<typename Index>
-  static EIGEN_ALWAYS_INLINE void run(Index rows, Index cols)
-  {
-    // http://hg.mozilla.org/mozilla-central/file/6c8a909977d3/xpcom/ds/CheckedInt.h#l242
-    // we assume Index is signed
-    Index max_index = (size_t(1) << (8 * sizeof(Index) - 1)) - 1; // assume Index is signed
-    bool error = (rows == 0 || cols == 0) ? false
-               : (rows > max_index / cols);
-    if (error)
-      throw_std_bad_alloc();
-  }
-};
-
-template <typename Derived, typename OtherDerived = Derived, bool IsVector = bool(Derived::IsVectorAtCompileTime)> struct conservative_resize_like_impl;
-
-template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers> struct matrix_swap_impl;
-
-} // end namespace internal
-
-/** \class PlainObjectBase
-  * \brief %Dense storage base class for matrices and arrays.
-  *
-  * This class can be extended with the help of the plugin mechanism described on the page
-  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_PLAINOBJECTBASE_PLUGIN.
-  *
-  * \sa \ref TopicClassHierarchy
-  */
-#ifdef EIGEN_PARSED_BY_DOXYGEN
-namespace internal {
-
-// this is a warkaround to doxygen not being able to understand the inheritence logic
-// when it is hidden by the dense_xpr_base helper struct.
-template<typename Derived> struct dense_xpr_base_dispatcher_for_doxygen;// : public MatrixBase<Derived> {};
-/** This class is just a workaround for Doxygen and it does not not actually exist. */
-template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
-struct dense_xpr_base_dispatcher_for_doxygen<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
-    : public MatrixBase<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > {};
-/** This class is just a workaround for Doxygen and it does not not actually exist. */
-template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
-struct dense_xpr_base_dispatcher_for_doxygen<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
-    : public ArrayBase<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > {};
-
-} // namespace internal
-
-template<typename Derived>
-class PlainObjectBase : public internal::dense_xpr_base_dispatcher_for_doxygen<Derived>
-#else
-template<typename Derived>
-class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
-#endif
-{
-  public:
-    enum { Options = internal::traits<Derived>::Options };
-    typedef typename internal::dense_xpr_base<Derived>::type Base;
-
-    typedef typename internal::traits<Derived>::StorageKind StorageKind;
-    typedef typename internal::traits<Derived>::Index Index;
-    typedef typename internal::traits<Derived>::Scalar Scalar;
-    typedef typename internal::packet_traits<Scalar>::type PacketScalar;
-    typedef typename NumTraits<Scalar>::Real RealScalar;
-    typedef Derived DenseType;
-
-    using Base::RowsAtCompileTime;
-    using Base::ColsAtCompileTime;
-    using Base::SizeAtCompileTime;
-    using Base::MaxRowsAtCompileTime;
-    using Base::MaxColsAtCompileTime;
-    using Base::MaxSizeAtCompileTime;
-    using Base::IsVectorAtCompileTime;
-    using Base::Flags;
-
-    template<typename PlainObjectType, int MapOptions, typename StrideType> friend class Eigen::Map;
-    friend  class Eigen::Map<Derived, Unaligned>;
-    typedef Eigen::Map<Derived, Unaligned>  MapType;
-    friend  class Eigen::Map<const Derived, Unaligned>;
-    typedef const Eigen::Map<const Derived, Unaligned> ConstMapType;
-    friend  class Eigen::Map<Derived, Aligned>;
-    typedef Eigen::Map<Derived, Aligned> AlignedMapType;
-    friend  class Eigen::Map<const Derived, Aligned>;
-    typedef const Eigen::Map<const Derived, Aligned> ConstAlignedMapType;
-    template<typename StrideType> struct StridedMapType { typedef Eigen::Map<Derived, Unaligned, StrideType> type; };
-    template<typename StrideType> struct StridedConstMapType { typedef Eigen::Map<const Derived, Unaligned, StrideType> type; };
-    template<typename StrideType> struct StridedAlignedMapType { typedef Eigen::Map<Derived, Aligned, StrideType> type; };
-    template<typename StrideType> struct StridedConstAlignedMapType { typedef Eigen::Map<const Derived, Aligned, StrideType> type; };
-
-  protected:
-    DenseStorage<Scalar, Base::MaxSizeAtCompileTime, Base::RowsAtCompileTime, Base::ColsAtCompileTime, Options> m_storage;
-
-  public:
-    enum { NeedsToAlign = SizeAtCompileTime != Dynamic && (internal::traits<Derived>::Flags & AlignedBit) != 0 };
-    EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
-
-    Base& base() { return *static_cast<Base*>(this); }
-    const Base& base() const { return *static_cast<const Base*>(this); }
-
-    EIGEN_STRONG_INLINE Index rows() const { return m_storage.rows(); }
-    EIGEN_STRONG_INLINE Index cols() const { return m_storage.cols(); }
-
-    EIGEN_STRONG_INLINE const Scalar& coeff(Index rowId, Index colId) const
-    {
-      if(Flags & RowMajorBit)
-        return m_storage.data()[colId + rowId * m_storage.cols()];
-      else // column-major
-        return m_storage.data()[rowId + colId * m_storage.rows()];
-    }
-
-    EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
-    {
-      return m_storage.data()[index];
-    }
-
-    EIGEN_STRONG_INLINE Scalar& coeffRef(Index rowId, Index colId)
-    {
-      if(Flags & RowMajorBit)
-        return m_storage.data()[colId + rowId * m_storage.cols()];
-      else // column-major
-        return m_storage.data()[rowId + colId * m_storage.rows()];
-    }
-
-    EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
-    {
-      return m_storage.data()[index];
-    }
-
-    EIGEN_STRONG_INLINE const Scalar& coeffRef(Index rowId, Index colId) const
-    {
-      if(Flags & RowMajorBit)
-        return m_storage.data()[colId + rowId * m_storage.cols()];
-      else // column-major
-        return m_storage.data()[rowId + colId * m_storage.rows()];
-    }
-
-    EIGEN_STRONG_INLINE const Scalar& coeffRef(Index index) const
-    {
-      return m_storage.data()[index];
-    }
-
-    /** \internal */
-    template<int LoadMode>
-    EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const
-    {
-      return internal::ploadt<PacketScalar, LoadMode>
-               (m_storage.data() + (Flags & RowMajorBit
-                                   ? colId + rowId * m_storage.cols()
-                                   : rowId + colId * m_storage.rows()));
-    }
-
-    /** \internal */
-    template<int LoadMode>
-    EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
-    {
-      return internal::ploadt<PacketScalar, LoadMode>(m_storage.data() + index);
-    }
-
-    /** \internal */
-    template<int StoreMode>
-    EIGEN_STRONG_INLINE void writePacket(Index rowId, Index colId, const PacketScalar& val)
-    {
-      internal::pstoret<Scalar, PacketScalar, StoreMode>
-              (m_storage.data() + (Flags & RowMajorBit
-                                   ? colId + rowId * m_storage.cols()
-                                   : rowId + colId * m_storage.rows()), val);
-    }
-
-    /** \internal */
-    template<int StoreMode>
-    EIGEN_STRONG_INLINE void writePacket(Index index, const PacketScalar& val)
-    {
-      internal::pstoret<Scalar, PacketScalar, StoreMode>(m_storage.data() + index, val);
-    }
-
-    /** \returns a const pointer to the data array of this matrix */
-    EIGEN_STRONG_INLINE const Scalar *data() const
-    { return m_storage.data(); }
-
-    /** \returns a pointer to the data array of this matrix */
-    EIGEN_STRONG_INLINE Scalar *data()
-    { return m_storage.data(); }
-
-    /** Resizes \c *this to a \a rows x \a cols matrix.
-      *
-      * This method is intended for dynamic-size matrices, although it is legal to call it on any
-      * matrix as long as fixed dimensions are left unchanged. If you only want to change the number
-      * of rows and/or of columns, you can use resize(NoChange_t, Index), resize(Index, NoChange_t).
-      *
-      * If the current number of coefficients of \c *this exactly matches the
-      * product \a rows * \a cols, then no memory allocation is performed and
-      * the current values are left unchanged. In all other cases, including
-      * shrinking, the data is reallocated and all previous values are lost.
-      *
-      * Example: \include Matrix_resize_int_int.cpp
-      * Output: \verbinclude Matrix_resize_int_int.out
-      *
-      * \sa resize(Index) for vectors, resize(NoChange_t, Index), resize(Index, NoChange_t)
-      */
-    EIGEN_STRONG_INLINE void resize(Index nbRows, Index nbCols)
-    {
-      eigen_assert(   EIGEN_IMPLIES(RowsAtCompileTime!=Dynamic,nbRows==RowsAtCompileTime)
-                   && EIGEN_IMPLIES(ColsAtCompileTime!=Dynamic,nbCols==ColsAtCompileTime)
-                   && EIGEN_IMPLIES(RowsAtCompileTime==Dynamic && MaxRowsAtCompileTime!=Dynamic,nbRows<=MaxRowsAtCompileTime)
-                   && EIGEN_IMPLIES(ColsAtCompileTime==Dynamic && MaxColsAtCompileTime!=Dynamic,nbCols<=MaxColsAtCompileTime)
-                   && nbRows>=0 && nbCols>=0 && "Invalid sizes when resizing a matrix or array.");
-      internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(nbRows, nbCols);
-      #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
-        Index size = nbRows*nbCols;
-        bool size_changed = size != this->size();
-        m_storage.resize(size, nbRows, nbCols);
-        if(size_changed) EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
-      #else
-        internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(nbRows, nbCols);
-        m_storage.resize(nbRows*nbCols, nbRows, nbCols);
-      #endif
-    }
-
-    /** Resizes \c *this to a vector of length \a size
-      *
-      * \only_for_vectors. This method does not work for
-      * partially dynamic matrices when the static dimension is anything other
-      * than 1. For example it will not work with Matrix<double, 2, Dynamic>.
-      *
-      * Example: \include Matrix_resize_int.cpp
-      * Output: \verbinclude Matrix_resize_int.out
-      *
-      * \sa resize(Index,Index), resize(NoChange_t, Index), resize(Index, NoChange_t)
-      */
-    inline void resize(Index size)
-    {
-      EIGEN_STATIC_ASSERT_VECTOR_ONLY(PlainObjectBase)
-      eigen_assert(((SizeAtCompileTime == Dynamic && (MaxSizeAtCompileTime==Dynamic || size<=MaxSizeAtCompileTime)) || SizeAtCompileTime == size) && size>=0);
-      #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
-        bool size_changed = size != this->size();
-      #endif
-      if(RowsAtCompileTime == 1)
-        m_storage.resize(size, 1, size);
-      else
-        m_storage.resize(size, size, 1);
-      #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
-        if(size_changed) EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
-      #endif
-    }
-
-    /** Resizes the matrix, changing only the number of columns. For the parameter of type NoChange_t, just pass the special value \c NoChange
-      * as in the example below.
-      *
-      * Example: \include Matrix_resize_NoChange_int.cpp
-      * Output: \verbinclude Matrix_resize_NoChange_int.out
-      *
-      * \sa resize(Index,Index)
-      */
-    inline void resize(NoChange_t, Index nbCols)
-    {
-      resize(rows(), nbCols);
-    }
-
-    /** Resizes the matrix, changing only the number of rows. For the parameter of type NoChange_t, just pass the special value \c NoChange
-      * as in the example below.
-      *
-      * Example: \include Matrix_resize_int_NoChange.cpp
-      * Output: \verbinclude Matrix_resize_int_NoChange.out
-      *
-      * \sa resize(Index,Index)
-      */
-    inline void resize(Index nbRows, NoChange_t)
-    {
-      resize(nbRows, cols());
-    }
-
-    /** Resizes \c *this to have the same dimensions as \a other.
-      * Takes care of doing all the checking that's needed.
-      *
-      * Note that copying a row-vector into a vector (and conversely) is allowed.
-      * The resizing, if any, is then done in the appropriate way so that row-vectors
-      * remain row-vectors and vectors remain vectors.
-      */
-    template<typename OtherDerived>
-    EIGEN_STRONG_INLINE void resizeLike(const EigenBase<OtherDerived>& _other)
-    {
-      const OtherDerived& other = _other.derived();
-      internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(other.rows(), other.cols());
-      const Index othersize = other.rows()*other.cols();
-      if(RowsAtCompileTime == 1)
-      {
-        eigen_assert(other.rows() == 1 || other.cols() == 1);
-        resize(1, othersize);
-      }
-      else if(ColsAtCompileTime == 1)
-      {
-        eigen_assert(other.rows() == 1 || other.cols() == 1);
-        resize(othersize, 1);
-      }
-      else resize(other.rows(), other.cols());
-    }
-
-    /** Resizes the matrix to \a rows x \a cols while leaving old values untouched.
-      *
-      * The method is intended for matrices of dynamic size. If you only want to change the number
-      * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or
-      * conservativeResize(Index, NoChange_t).
-      *
-      * Matrices are resized relative to the top-left element. In case values need to be 
-      * appended to the matrix they will be uninitialized.
-      */
-    EIGEN_STRONG_INLINE void conservativeResize(Index nbRows, Index nbCols)
-    {
-      internal::conservative_resize_like_impl<Derived>::run(*this, nbRows, nbCols);
-    }
-
-    /** Resizes the matrix to \a rows x \a cols while leaving old values untouched.
-      *
-      * As opposed to conservativeResize(Index rows, Index cols), this version leaves
-      * the number of columns unchanged.
-      *
-      * In case the matrix is growing, new rows will be uninitialized.
-      */
-    EIGEN_STRONG_INLINE void conservativeResize(Index nbRows, NoChange_t)
-    {
-      // Note: see the comment in conservativeResize(Index,Index)
-      conservativeResize(nbRows, cols());
-    }
-
-    /** Resizes the matrix to \a rows x \a cols while leaving old values untouched.
-      *
-      * As opposed to conservativeResize(Index rows, Index cols), this version leaves
-      * the number of rows unchanged.
-      *
-      * In case the matrix is growing, new columns will be uninitialized.
-      */
-    EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, Index nbCols)
-    {
-      // Note: see the comment in conservativeResize(Index,Index)
-      conservativeResize(rows(), nbCols);
-    }
-
-    /** Resizes the vector to \a size while retaining old values.
-      *
-      * \only_for_vectors. This method does not work for
-      * partially dynamic matrices when the static dimension is anything other
-      * than 1. For example it will not work with Matrix<double, 2, Dynamic>.
-      *
-      * When values are appended, they will be uninitialized.
-      */
-    EIGEN_STRONG_INLINE void conservativeResize(Index size)
-    {
-      internal::conservative_resize_like_impl<Derived>::run(*this, size);
-    }
-
-    /** Resizes the matrix to \a rows x \a cols of \c other, while leaving old values untouched.
-      *
-      * The method is intended for matrices of dynamic size. If you only want to change the number
-      * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or
-      * conservativeResize(Index, NoChange_t).
-      *
-      * Matrices are resized relative to the top-left element. In case values need to be 
-      * appended to the matrix they will copied from \c other.
-      */
-    template<typename OtherDerived>
-    EIGEN_STRONG_INLINE void conservativeResizeLike(const DenseBase<OtherDerived>& other)
-    {
-      internal::conservative_resize_like_impl<Derived,OtherDerived>::run(*this, other);
-    }
-
-    /** This is a special case of the templated operator=. Its purpose is to
-      * prevent a default operator= from hiding the templated operator=.
-      */
-    EIGEN_STRONG_INLINE Derived& operator=(const PlainObjectBase& other)
-    {
-      return _set(other);
-    }
-
-    /** \sa MatrixBase::lazyAssign() */
-    template<typename OtherDerived>
-    EIGEN_STRONG_INLINE Derived& lazyAssign(const DenseBase<OtherDerived>& other)
-    {
-      _resize_to_match(other);
-      return Base::lazyAssign(other.derived());
-    }
-
-    template<typename OtherDerived>
-    EIGEN_STRONG_INLINE Derived& operator=(const ReturnByValue<OtherDerived>& func)
-    {
-      resize(func.rows(), func.cols());
-      return Base::operator=(func);
-    }
-
-    EIGEN_STRONG_INLINE explicit PlainObjectBase() : m_storage()
-    {
-//       _check_template_params();
-//       EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
-    }
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN
-    // FIXME is it still needed ?
-    /** \internal */
-    PlainObjectBase(internal::constructor_without_unaligned_array_assert)
-      : m_storage(internal::constructor_without_unaligned_array_assert())
-    {
-//       _check_template_params(); EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
-    }
-#endif
-
-    EIGEN_STRONG_INLINE PlainObjectBase(Index a_size, Index nbRows, Index nbCols)
-      : m_storage(a_size, nbRows, nbCols)
-    {
-//       _check_template_params();
-//       EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
-    }
-
-    /** \copydoc MatrixBase::operator=(const EigenBase<OtherDerived>&)
-      */
-    template<typename OtherDerived>
-    EIGEN_STRONG_INLINE Derived& operator=(const EigenBase<OtherDerived> &other)
-    {
-      _resize_to_match(other);
-      Base::operator=(other.derived());
-      return this->derived();
-    }
-
-    /** \sa MatrixBase::operator=(const EigenBase<OtherDerived>&) */
-    template<typename OtherDerived>
-    EIGEN_STRONG_INLINE PlainObjectBase(const EigenBase<OtherDerived> &other)
-      : m_storage(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
-    {
-      _check_template_params();
-      internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(other.derived().rows(), other.derived().cols());
-      Base::operator=(other.derived());
-    }
-
-    /** \name Map
-      * These are convenience functions returning Map objects. The Map() static functions return unaligned Map objects,
-      * while the AlignedMap() functions return aligned Map objects and thus should be called only with 16-byte-aligned
-      * \a data pointers.
-      *
-      * \see class Map
-      */
-    //@{
-    static inline ConstMapType Map(const Scalar* data)
-    { return ConstMapType(data); }
-    static inline MapType Map(Scalar* data)
-    { return MapType(data); }
-    static inline ConstMapType Map(const Scalar* data, Index size)
-    { return ConstMapType(data, size); }
-    static inline MapType Map(Scalar* data, Index size)
-    { return MapType(data, size); }
-    static inline ConstMapType Map(const Scalar* data, Index rows, Index cols)
-    { return ConstMapType(data, rows, cols); }
-    static inline MapType Map(Scalar* data, Index rows, Index cols)
-    { return MapType(data, rows, cols); }
-
-    static inline ConstAlignedMapType MapAligned(const Scalar* data)
-    { return ConstAlignedMapType(data); }
-    static inline AlignedMapType MapAligned(Scalar* data)
-    { return AlignedMapType(data); }
-    static inline ConstAlignedMapType MapAligned(const Scalar* data, Index size)
-    { return ConstAlignedMapType(data, size); }
-    static inline AlignedMapType MapAligned(Scalar* data, Index size)
-    { return AlignedMapType(data, size); }
-    static inline ConstAlignedMapType MapAligned(const Scalar* data, Index rows, Index cols)
-    { return ConstAlignedMapType(data, rows, cols); }
-    static inline AlignedMapType MapAligned(Scalar* data, Index rows, Index cols)
-    { return AlignedMapType(data, rows, cols); }
-
-    template<int Outer, int Inner>
-    static inline typename StridedConstMapType<Stride<Outer, Inner> >::type Map(const Scalar* data, const Stride<Outer, Inner>& stride)
-    { return typename StridedConstMapType<Stride<Outer, Inner> >::type(data, stride); }
-    template<int Outer, int Inner>
-    static inline typename StridedMapType<Stride<Outer, Inner> >::type Map(Scalar* data, const Stride<Outer, Inner>& stride)
-    { return typename StridedMapType<Stride<Outer, Inner> >::type(data, stride); }
-    template<int Outer, int Inner>
-    static inline typename StridedConstMapType<Stride<Outer, Inner> >::type Map(const Scalar* data, Index size, const Stride<Outer, Inner>& stride)
-    { return typename StridedConstMapType<Stride<Outer, Inner> >::type(data, size, stride); }
-    template<int Outer, int Inner>
-    static inline typename StridedMapType<Stride<Outer, Inner> >::type Map(Scalar* data, Index size, const Stride<Outer, Inner>& stride)
-    { return typename StridedMapType<Stride<Outer, Inner> >::type(data, size, stride); }
-    template<int Outer, int Inner>
-    static inline typename StridedConstMapType<Stride<Outer, Inner> >::type Map(const Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride)
-    { return typename StridedConstMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); }
-    template<int Outer, int Inner>
-    static inline typename StridedMapType<Stride<Outer, Inner> >::type Map(Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride)
-    { return typename StridedMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); }
-
-    template<int Outer, int Inner>
-    static inline typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type MapAligned(const Scalar* data, const Stride<Outer, Inner>& stride)
-    { return typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type(data, stride); }
-    template<int Outer, int Inner>
-    static inline typename StridedAlignedMapType<Stride<Outer, Inner> >::type MapAligned(Scalar* data, const Stride<Outer, Inner>& stride)
-    { return typename StridedAlignedMapType<Stride<Outer, Inner> >::type(data, stride); }
-    template<int Outer, int Inner>
-    static inline typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type MapAligned(const Scalar* data, Index size, const Stride<Outer, Inner>& stride)
-    { return typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type(data, size, stride); }
-    template<int Outer, int Inner>
-    static inline typename StridedAlignedMapType<Stride<Outer, Inner> >::type MapAligned(Scalar* data, Index size, const Stride<Outer, Inner>& stride)
-    { return typename StridedAlignedMapType<Stride<Outer, Inner> >::type(data, size, stride); }
-    template<int Outer, int Inner>
-    static inline typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type MapAligned(const Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride)
-    { return typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); }
-    template<int Outer, int Inner>
-    static inline typename StridedAlignedMapType<Stride<Outer, Inner> >::type MapAligned(Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride)
-    { return typename StridedAlignedMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); }
-    //@}
-
-    using Base::setConstant;
-    Derived& setConstant(Index size, const Scalar& value);
-    Derived& setConstant(Index rows, Index cols, const Scalar& value);
-
-    using Base::setZero;
-    Derived& setZero(Index size);
-    Derived& setZero(Index rows, Index cols);
-
-    using Base::setOnes;
-    Derived& setOnes(Index size);
-    Derived& setOnes(Index rows, Index cols);
-
-    using Base::setRandom;
-    Derived& setRandom(Index size);
-    Derived& setRandom(Index rows, Index cols);
-
-    #ifdef EIGEN_PLAINOBJECTBASE_PLUGIN
-    #include EIGEN_PLAINOBJECTBASE_PLUGIN
-    #endif
-
-  protected:
-    /** \internal Resizes *this in preparation for assigning \a other to it.
-      * Takes care of doing all the checking that's needed.
-      *
-      * Note that copying a row-vector into a vector (and conversely) is allowed.
-      * The resizing, if any, is then done in the appropriate way so that row-vectors
-      * remain row-vectors and vectors remain vectors.
-      */
-    template<typename OtherDerived>
-    EIGEN_STRONG_INLINE void _resize_to_match(const EigenBase<OtherDerived>& other)
-    {
-      #ifdef EIGEN_NO_AUTOMATIC_RESIZING
-      eigen_assert((this->size()==0 || (IsVectorAtCompileTime ? (this->size() == other.size())
-                 : (rows() == other.rows() && cols() == other.cols())))
-        && "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined");
-      #else
-      resizeLike(other);
-      #endif
-    }
-
-    /**
-      * \brief Copies the value of the expression \a other into \c *this with automatic resizing.
-      *
-      * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized),
-      * it will be initialized.
-      *
-      * Note that copying a row-vector into a vector (and conversely) is allowed.
-      * The resizing, if any, is then done in the appropriate way so that row-vectors
-      * remain row-vectors and vectors remain vectors.
-      *
-      * \sa operator=(const MatrixBase<OtherDerived>&), _set_noalias()
-      *
-      * \internal
-      */
-    template<typename OtherDerived>
-    EIGEN_STRONG_INLINE Derived& _set(const DenseBase<OtherDerived>& other)
-    {
-      _set_selector(other.derived(), typename internal::conditional<static_cast<bool>(int(OtherDerived::Flags) & EvalBeforeAssigningBit), internal::true_type, internal::false_type>::type());
-      return this->derived();
-    }
-
-    template<typename OtherDerived>
-    EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const internal::true_type&) { _set_noalias(other.eval()); }
-
-    template<typename OtherDerived>
-    EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const internal::false_type&) { _set_noalias(other); }
-
-    /** \internal Like _set() but additionally makes the assumption that no aliasing effect can happen (which
-      * is the case when creating a new matrix) so one can enforce lazy evaluation.
-      *
-      * \sa operator=(const MatrixBase<OtherDerived>&), _set()
-      */
-    template<typename OtherDerived>
-    EIGEN_STRONG_INLINE Derived& _set_noalias(const DenseBase<OtherDerived>& other)
-    {
-      // I don't think we need this resize call since the lazyAssign will anyways resize
-      // and lazyAssign will be called by the assign selector.
-      //_resize_to_match(other);
-      // the 'false' below means to enforce lazy evaluation. We don't use lazyAssign() because
-      // it wouldn't allow to copy a row-vector into a column-vector.
-      return internal::assign_selector<Derived,OtherDerived,false>::run(this->derived(), other.derived());
-    }
-
-    template<typename T0, typename T1>
-    EIGEN_STRONG_INLINE void _init2(Index nbRows, Index nbCols, typename internal::enable_if<Base::SizeAtCompileTime!=2,T0>::type* = 0)
-    {
-      EIGEN_STATIC_ASSERT(bool(NumTraits<T0>::IsInteger) &&
-                          bool(NumTraits<T1>::IsInteger),
-                          FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED)
-      resize(nbRows,nbCols);
-    }
-    template<typename T0, typename T1>
-    EIGEN_STRONG_INLINE void _init2(const Scalar& val0, const Scalar& val1, typename internal::enable_if<Base::SizeAtCompileTime==2,T0>::type* = 0)
-    {
-      EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 2)
-      m_storage.data()[0] = val0;
-      m_storage.data()[1] = val1;
-    }
-
-    template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers>
-    friend struct internal::matrix_swap_impl;
-
-    /** \internal generic implementation of swap for dense storage since for dynamic-sized matrices of same type it is enough to swap the
-      * data pointers.
-      */
-    template<typename OtherDerived>
-    void _swap(DenseBase<OtherDerived> const & other)
-    {
-      enum { SwapPointers = internal::is_same<Derived, OtherDerived>::value && Base::SizeAtCompileTime==Dynamic };
-      internal::matrix_swap_impl<Derived, OtherDerived, bool(SwapPointers)>::run(this->derived(), other.const_cast_derived());
-    }
-
-  public:
-#ifndef EIGEN_PARSED_BY_DOXYGEN
-    static EIGEN_STRONG_INLINE void _check_template_params()
-    {
-      EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, (Options&RowMajor)==RowMajor)
-                        && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, (Options&RowMajor)==0)
-                        && ((RowsAtCompileTime == Dynamic) || (RowsAtCompileTime >= 0))
-                        && ((ColsAtCompileTime == Dynamic) || (ColsAtCompileTime >= 0))
-                        && ((MaxRowsAtCompileTime == Dynamic) || (MaxRowsAtCompileTime >= 0))
-                        && ((MaxColsAtCompileTime == Dynamic) || (MaxColsAtCompileTime >= 0))
-                        && (MaxRowsAtCompileTime == RowsAtCompileTime || RowsAtCompileTime==Dynamic)
-                        && (MaxColsAtCompileTime == ColsAtCompileTime || ColsAtCompileTime==Dynamic)
-                        && (Options & (DontAlign|RowMajor)) == Options),
-        INVALID_MATRIX_TEMPLATE_PARAMETERS)
-    }
-#endif
-
-private:
-    enum { ThisConstantIsPrivateInPlainObjectBase };
-};
-
-template <typename Derived, typename OtherDerived, bool IsVector>
-struct internal::conservative_resize_like_impl
-{
-  typedef typename Derived::Index Index;
-  static void run(DenseBase<Derived>& _this, Index rows, Index cols)
-  {
-    if (_this.rows() == rows && _this.cols() == cols) return;
-    EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived)
-
-    if ( ( Derived::IsRowMajor && _this.cols() == cols) || // row-major and we change only the number of rows
-         (!Derived::IsRowMajor && _this.rows() == rows) )  // column-major and we change only the number of columns
-    {
-      internal::check_rows_cols_for_overflow<Derived::MaxSizeAtCompileTime>::run(rows, cols);
-      _this.derived().m_storage.conservativeResize(rows*cols,rows,cols);
-    }
-    else
-    {
-      // The storage order does not allow us to use reallocation.
-      typename Derived::PlainObject tmp(rows,cols);
-      const Index common_rows = (std::min)(rows, _this.rows());
-      const Index common_cols = (std::min)(cols, _this.cols());
-      tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
-      _this.derived().swap(tmp);
-    }
-  }
-
-  static void run(DenseBase<Derived>& _this, const DenseBase<OtherDerived>& other)
-  {
-    if (_this.rows() == other.rows() && _this.cols() == other.cols()) return;
-
-    // Note: Here is space for improvement. Basically, for conservativeResize(Index,Index),
-    // neither RowsAtCompileTime or ColsAtCompileTime must be Dynamic. If only one of the
-    // dimensions is dynamic, one could use either conservativeResize(Index rows, NoChange_t) or
-    // conservativeResize(NoChange_t, Index cols). For these methods new static asserts like
-    // EIGEN_STATIC_ASSERT_DYNAMIC_ROWS and EIGEN_STATIC_ASSERT_DYNAMIC_COLS would be good.
-    EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived)
-    EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(OtherDerived)
-
-    if ( ( Derived::IsRowMajor && _this.cols() == other.cols()) || // row-major and we change only the number of rows
-         (!Derived::IsRowMajor && _this.rows() == other.rows()) )  // column-major and we change only the number of columns
-    {
-      const Index new_rows = other.rows() - _this.rows();
-      const Index new_cols = other.cols() - _this.cols();
-      _this.derived().m_storage.conservativeResize(other.size(),other.rows(),other.cols());
-      if (new_rows>0)
-        _this.bottomRightCorner(new_rows, other.cols()) = other.bottomRows(new_rows);
-      else if (new_cols>0)
-        _this.bottomRightCorner(other.rows(), new_cols) = other.rightCols(new_cols);
-    }
-    else
-    {
-      // The storage order does not allow us to use reallocation.
-      typename Derived::PlainObject tmp(other);
-      const Index common_rows = (std::min)(tmp.rows(), _this.rows());
-      const Index common_cols = (std::min)(tmp.cols(), _this.cols());
-      tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
-      _this.derived().swap(tmp);
-    }
-  }
-};
-
-namespace internal {
-
-template <typename Derived, typename OtherDerived>
-struct conservative_resize_like_impl<Derived,OtherDerived,true>
-{
-  typedef typename Derived::Index Index;
-  static void run(DenseBase<Derived>& _this, Index size)
-  {
-    const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : size;
-    const Index new_cols = Derived::RowsAtCompileTime==1 ? size : 1;
-    _this.derived().m_storage.conservativeResize(size,new_rows,new_cols);
-  }
-
-  static void run(DenseBase<Derived>& _this, const DenseBase<OtherDerived>& other)
-  {
-    if (_this.rows() == other.rows() && _this.cols() == other.cols()) return;
-
-    const Index num_new_elements = other.size() - _this.size();
-
-    const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : other.rows();
-    const Index new_cols = Derived::RowsAtCompileTime==1 ? other.cols() : 1;
-    _this.derived().m_storage.conservativeResize(other.size(),new_rows,new_cols);
-
-    if (num_new_elements > 0)
-      _this.tail(num_new_elements) = other.tail(num_new_elements);
-  }
-};
-
-template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers>
-struct matrix_swap_impl
-{
-  static inline void run(MatrixTypeA& a, MatrixTypeB& b)
-  {
-    a.base().swap(b);
-  }
-};
-
-template<typename MatrixTypeA, typename MatrixTypeB>
-struct matrix_swap_impl<MatrixTypeA, MatrixTypeB, true>
-{
-  static inline void run(MatrixTypeA& a, MatrixTypeB& b)
-  {
-    static_cast<typename MatrixTypeA::Base&>(a).m_storage.swap(static_cast<typename MatrixTypeB::Base&>(b).m_storage);
-  }
-};
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_DENSESTORAGEBASE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Product.h b/resources/3rdparty/eigen/Eigen/src/Core/Product.h
deleted file mode 100644
index 314851d2e..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/Product.h
+++ /dev/null
@@ -1,107 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_PRODUCT_H
-#define EIGEN_PRODUCT_H
-
-namespace Eigen {
-
-template<typename Lhs, typename Rhs> class Product;
-template<typename Lhs, typename Rhs, typename StorageKind> class ProductImpl;
-
-/** \class Product
-  * \ingroup Core_Module
-  *
-  * \brief Expression of the product of two arbitrary matrices or vectors
-  *
-  * \param Lhs the type of the left-hand side expression
-  * \param Rhs the type of the right-hand side expression
-  *
-  * This class represents an expression of the product of two arbitrary matrices.
-  *
-  */
-
-// Use ProductReturnType to get correct traits, in particular vectorization flags
-namespace internal {
-template<typename Lhs, typename Rhs>
-struct traits<Product<Lhs, Rhs> >
-  : traits<typename ProductReturnType<Lhs, Rhs>::Type>
-{ 
-  // We want A+B*C to be of type Product<Matrix, Sum> and not Product<Matrix, Matrix>
-  // TODO: This flag should eventually go in a separate evaluator traits class
-  enum {
-    Flags = traits<typename ProductReturnType<Lhs, Rhs>::Type>::Flags & ~EvalBeforeNestingBit
-  };
-};
-} // end namespace internal
-
-
-template<typename Lhs, typename Rhs>
-class Product : public ProductImpl<Lhs,Rhs,typename internal::promote_storage_type<typename internal::traits<Lhs>::StorageKind,
-                                                                            typename internal::traits<Rhs>::StorageKind>::ret>
-{
-  public:
-    
-    typedef typename ProductImpl<
-        Lhs, Rhs,
-        typename internal::promote_storage_type<typename Lhs::StorageKind,
-                                                typename Rhs::StorageKind>::ret>::Base Base;
-    EIGEN_GENERIC_PUBLIC_INTERFACE(Product)
-
-    typedef typename Lhs::Nested LhsNested;
-    typedef typename Rhs::Nested RhsNested;
-    typedef typename internal::remove_all<LhsNested>::type LhsNestedCleaned;
-    typedef typename internal::remove_all<RhsNested>::type RhsNestedCleaned;
-
-    Product(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs)
-    {
-      eigen_assert(lhs.cols() == rhs.rows()
-        && "invalid matrix product"
-        && "if you wanted a coeff-wise or a dot product use the respective explicit functions");
-    }
-
-    inline Index rows() const { return m_lhs.rows(); }
-    inline Index cols() const { return m_rhs.cols(); }
-
-    const LhsNestedCleaned& lhs() const { return m_lhs; }
-    const RhsNestedCleaned& rhs() const { return m_rhs; }
-
-  protected:
-
-    const LhsNested m_lhs;
-    const RhsNested m_rhs;
-};
-
-template<typename Lhs, typename Rhs>
-class ProductImpl<Lhs,Rhs,Dense> : public internal::dense_xpr_base<Product<Lhs,Rhs> >::type
-{
-    typedef Product<Lhs, Rhs> Derived;
-  public:
-
-    typedef typename internal::dense_xpr_base<Product<Lhs, Rhs> >::type Base;
-    EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
-};
-
-/***************************************************************************
-* Implementation of matrix base methods
-***************************************************************************/
-
-
-/** \internal used to test the evaluator only
-  */
-template<typename Lhs,typename Rhs>
-const Product<Lhs,Rhs>
-prod(const Lhs& lhs, const Rhs& rhs)
-{
-  return Product<Lhs,Rhs>(lhs,rhs);
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_PRODUCT_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/ProductBase.h b/resources/3rdparty/eigen/Eigen/src/Core/ProductBase.h
deleted file mode 100644
index 9748167a5..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/ProductBase.h
+++ /dev/null
@@ -1,278 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_PRODUCTBASE_H
-#define EIGEN_PRODUCTBASE_H
-
-namespace Eigen { 
-
-/** \class ProductBase
-  * \ingroup Core_Module
-  *
-  */
-
-namespace internal {
-template<typename Derived, typename _Lhs, typename _Rhs>
-struct traits<ProductBase<Derived,_Lhs,_Rhs> >
-{
-  typedef MatrixXpr XprKind;
-  typedef typename remove_all<_Lhs>::type Lhs;
-  typedef typename remove_all<_Rhs>::type Rhs;
-  typedef typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
-  typedef typename promote_storage_type<typename traits<Lhs>::StorageKind,
-                                           typename traits<Rhs>::StorageKind>::ret StorageKind;
-  typedef typename promote_index_type<typename traits<Lhs>::Index,
-                                         typename traits<Rhs>::Index>::type Index;
-  enum {
-    RowsAtCompileTime = traits<Lhs>::RowsAtCompileTime,
-    ColsAtCompileTime = traits<Rhs>::ColsAtCompileTime,
-    MaxRowsAtCompileTime = traits<Lhs>::MaxRowsAtCompileTime,
-    MaxColsAtCompileTime = traits<Rhs>::MaxColsAtCompileTime,
-    Flags = (MaxRowsAtCompileTime==1 ? RowMajorBit : 0)
-          | EvalBeforeNestingBit | EvalBeforeAssigningBit | NestByRefBit,
-                  // Note that EvalBeforeNestingBit and NestByRefBit
-                  // are not used in practice because nested is overloaded for products
-    CoeffReadCost = 0 // FIXME why is it needed ?
-  };
-};
-}
-
-#define EIGEN_PRODUCT_PUBLIC_INTERFACE(Derived) \
-  typedef ProductBase<Derived, Lhs, Rhs > Base; \
-  EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \
-  typedef typename Base::LhsNested LhsNested; \
-  typedef typename Base::_LhsNested _LhsNested; \
-  typedef typename Base::LhsBlasTraits LhsBlasTraits; \
-  typedef typename Base::ActualLhsType ActualLhsType; \
-  typedef typename Base::_ActualLhsType _ActualLhsType; \
-  typedef typename Base::RhsNested RhsNested; \
-  typedef typename Base::_RhsNested _RhsNested; \
-  typedef typename Base::RhsBlasTraits RhsBlasTraits; \
-  typedef typename Base::ActualRhsType ActualRhsType; \
-  typedef typename Base::_ActualRhsType _ActualRhsType; \
-  using Base::m_lhs; \
-  using Base::m_rhs;
-
-template<typename Derived, typename Lhs, typename Rhs>
-class ProductBase : public MatrixBase<Derived>
-{
-  public:
-    typedef MatrixBase<Derived> Base;
-    EIGEN_DENSE_PUBLIC_INTERFACE(ProductBase)
-    
-    typedef typename Lhs::Nested LhsNested;
-    typedef typename internal::remove_all<LhsNested>::type _LhsNested;
-    typedef internal::blas_traits<_LhsNested> LhsBlasTraits;
-    typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
-    typedef typename internal::remove_all<ActualLhsType>::type _ActualLhsType;
-    typedef typename internal::traits<Lhs>::Scalar LhsScalar;
-
-    typedef typename Rhs::Nested RhsNested;
-    typedef typename internal::remove_all<RhsNested>::type _RhsNested;
-    typedef internal::blas_traits<_RhsNested> RhsBlasTraits;
-    typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
-    typedef typename internal::remove_all<ActualRhsType>::type _ActualRhsType;
-    typedef typename internal::traits<Rhs>::Scalar RhsScalar;
-
-    // Diagonal of a product: no need to evaluate the arguments because they are going to be evaluated only once
-    typedef CoeffBasedProduct<LhsNested, RhsNested, 0> FullyLazyCoeffBaseProductType;
-
-  public:
-
-    typedef typename Base::PlainObject PlainObject;
-
-    ProductBase(const Lhs& a_lhs, const Rhs& a_rhs)
-      : m_lhs(a_lhs), m_rhs(a_rhs)
-    {
-      eigen_assert(a_lhs.cols() == a_rhs.rows()
-        && "invalid matrix product"
-        && "if you wanted a coeff-wise or a dot product use the respective explicit functions");
-    }
-
-    inline Index rows() const { return m_lhs.rows(); }
-    inline Index cols() const { return m_rhs.cols(); }
-
-    template<typename Dest>
-    inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst,Scalar(1)); }
-
-    template<typename Dest>
-    inline void addTo(Dest& dst) const { scaleAndAddTo(dst,Scalar(1)); }
-
-    template<typename Dest>
-    inline void subTo(Dest& dst) const { scaleAndAddTo(dst,Scalar(-1)); }
-
-    template<typename Dest>
-    inline void scaleAndAddTo(Dest& dst,Scalar alpha) const { derived().scaleAndAddTo(dst,alpha); }
-
-    const _LhsNested& lhs() const { return m_lhs; }
-    const _RhsNested& rhs() const { return m_rhs; }
-
-    // Implicit conversion to the nested type (trigger the evaluation of the product)
-    operator const PlainObject& () const
-    {
-      m_result.resize(m_lhs.rows(), m_rhs.cols());
-      derived().evalTo(m_result);
-      return m_result;
-    }
-
-    const Diagonal<const FullyLazyCoeffBaseProductType,0> diagonal() const
-    { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs); }
-
-    template<int Index>
-    const Diagonal<FullyLazyCoeffBaseProductType,Index> diagonal() const
-    { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs); }
-
-    const Diagonal<FullyLazyCoeffBaseProductType,Dynamic> diagonal(Index index) const
-    { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs).diagonal(index); }
-
-    // restrict coeff accessors to 1x1 expressions. No need to care about mutators here since this isnt a Lvalue expression
-    typename Base::CoeffReturnType coeff(Index row, Index col) const
-    {
-#ifdef EIGEN2_SUPPORT
-      return lhs().row(row).cwiseProduct(rhs().col(col).transpose()).sum();
-#else
-      EIGEN_STATIC_ASSERT_SIZE_1x1(Derived)
-      eigen_assert(this->rows() == 1 && this->cols() == 1);
-      Matrix<Scalar,1,1> result = *this;
-      return result.coeff(row,col);
-#endif
-    }
-
-    typename Base::CoeffReturnType coeff(Index i) const
-    {
-      EIGEN_STATIC_ASSERT_SIZE_1x1(Derived)
-      eigen_assert(this->rows() == 1 && this->cols() == 1);
-      Matrix<Scalar,1,1> result = *this;
-      return result.coeff(i);
-    }
-
-    const Scalar& coeffRef(Index row, Index col) const
-    {
-      EIGEN_STATIC_ASSERT_SIZE_1x1(Derived)
-      eigen_assert(this->rows() == 1 && this->cols() == 1);
-      return derived().coeffRef(row,col);
-    }
-
-    const Scalar& coeffRef(Index i) const
-    {
-      EIGEN_STATIC_ASSERT_SIZE_1x1(Derived)
-      eigen_assert(this->rows() == 1 && this->cols() == 1);
-      return derived().coeffRef(i);
-    }
-
-  protected:
-
-    LhsNested m_lhs;
-    RhsNested m_rhs;
-
-    mutable PlainObject m_result;
-};
-
-// here we need to overload the nested rule for products
-// such that the nested type is a const reference to a plain matrix
-namespace internal {
-template<typename Lhs, typename Rhs, int Mode, int N, typename PlainObject>
-struct nested<GeneralProduct<Lhs,Rhs,Mode>, N, PlainObject>
-{
-  typedef PlainObject const& type;
-};
-}
-
-template<typename NestedProduct>
-class ScaledProduct;
-
-// Note that these two operator* functions are not defined as member
-// functions of ProductBase, because, otherwise we would have to
-// define all overloads defined in MatrixBase. Furthermore, Using
-// "using Base::operator*" would not work with MSVC.
-//
-// Also note that here we accept any compatible scalar types
-template<typename Derived,typename Lhs,typename Rhs>
-const ScaledProduct<Derived>
-operator*(const ProductBase<Derived,Lhs,Rhs>& prod, typename Derived::Scalar x)
-{ return ScaledProduct<Derived>(prod.derived(), x); }
-
-template<typename Derived,typename Lhs,typename Rhs>
-typename internal::enable_if<!internal::is_same<typename Derived::Scalar,typename Derived::RealScalar>::value,
-                      const ScaledProduct<Derived> >::type
-operator*(const ProductBase<Derived,Lhs,Rhs>& prod, const typename Derived::RealScalar& x)
-{ return ScaledProduct<Derived>(prod.derived(), x); }
-
-
-template<typename Derived,typename Lhs,typename Rhs>
-const ScaledProduct<Derived>
-operator*(typename Derived::Scalar x,const ProductBase<Derived,Lhs,Rhs>& prod)
-{ return ScaledProduct<Derived>(prod.derived(), x); }
-
-template<typename Derived,typename Lhs,typename Rhs>
-typename internal::enable_if<!internal::is_same<typename Derived::Scalar,typename Derived::RealScalar>::value,
-                      const ScaledProduct<Derived> >::type
-operator*(const typename Derived::RealScalar& x,const ProductBase<Derived,Lhs,Rhs>& prod)
-{ return ScaledProduct<Derived>(prod.derived(), x); }
-
-namespace internal {
-template<typename NestedProduct>
-struct traits<ScaledProduct<NestedProduct> >
- : traits<ProductBase<ScaledProduct<NestedProduct>,
-                         typename NestedProduct::_LhsNested,
-                         typename NestedProduct::_RhsNested> >
-{
-  typedef typename traits<NestedProduct>::StorageKind StorageKind;
-};
-}
-
-template<typename NestedProduct>
-class ScaledProduct
-  : public ProductBase<ScaledProduct<NestedProduct>,
-                       typename NestedProduct::_LhsNested,
-                       typename NestedProduct::_RhsNested>
-{
-  public:
-    typedef ProductBase<ScaledProduct<NestedProduct>,
-                       typename NestedProduct::_LhsNested,
-                       typename NestedProduct::_RhsNested> Base;
-    typedef typename Base::Scalar Scalar;
-    typedef typename Base::PlainObject PlainObject;
-//     EIGEN_PRODUCT_PUBLIC_INTERFACE(ScaledProduct)
-
-    ScaledProduct(const NestedProduct& prod, Scalar x)
-    : Base(prod.lhs(),prod.rhs()), m_prod(prod), m_alpha(x) {}
-
-    template<typename Dest>
-    inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst, Scalar(1)); }
-
-    template<typename Dest>
-    inline void addTo(Dest& dst) const { scaleAndAddTo(dst, Scalar(1)); }
-
-    template<typename Dest>
-    inline void subTo(Dest& dst) const { scaleAndAddTo(dst, Scalar(-1)); }
-
-    template<typename Dest>
-    inline void scaleAndAddTo(Dest& dst,Scalar a_alpha) const { m_prod.derived().scaleAndAddTo(dst,a_alpha * m_alpha); }
-
-    const Scalar& alpha() const { return m_alpha; }
-    
-  protected:
-    const NestedProduct& m_prod;
-    Scalar m_alpha;
-};
-
-/** \internal
-  * Overloaded to perform an efficient C = (A*B).lazy() */
-template<typename Derived>
-template<typename ProductDerived, typename Lhs, typename Rhs>
-Derived& MatrixBase<Derived>::lazyAssign(const ProductBase<ProductDerived, Lhs,Rhs>& other)
-{
-  other.derived().evalTo(derived());
-  return derived();
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_PRODUCTBASE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Random.h b/resources/3rdparty/eigen/Eigen/src/Core/Random.h
deleted file mode 100644
index bba99fc7c..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/Random.h
+++ /dev/null
@@ -1,152 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_RANDOM_H
-#define EIGEN_RANDOM_H
-
-namespace Eigen { 
-
-namespace internal {
-
-template<typename Scalar> struct scalar_random_op {
-  EIGEN_EMPTY_STRUCT_CTOR(scalar_random_op)
-  template<typename Index>
-  inline const Scalar operator() (Index, Index = 0) const { return random<Scalar>(); }
-};
-
-template<typename Scalar>
-struct functor_traits<scalar_random_op<Scalar> >
-{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false, IsRepeatable = false }; };
-
-} // end namespace internal
-
-/** \returns a random matrix expression
-  *
-  * The parameters \a rows and \a cols are the number of rows and of columns of
-  * the returned matrix. Must be compatible with this MatrixBase type.
-  *
-  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
-  * it is redundant to pass \a rows and \a cols as arguments, so Random() should be used
-  * instead.
-  *
-  * Example: \include MatrixBase_random_int_int.cpp
-  * Output: \verbinclude MatrixBase_random_int_int.out
-  *
-  * This expression has the "evaluate before nesting" flag so that it will be evaluated into
-  * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected
-  * behavior with expressions involving random matrices.
-  *
-  * \sa MatrixBase::setRandom(), MatrixBase::Random(Index), MatrixBase::Random()
-  */
-template<typename Derived>
-inline const CwiseNullaryOp<internal::scalar_random_op<typename internal::traits<Derived>::Scalar>, Derived>
-DenseBase<Derived>::Random(Index rows, Index cols)
-{
-  return NullaryExpr(rows, cols, internal::scalar_random_op<Scalar>());
-}
-
-/** \returns a random vector expression
-  *
-  * The parameter \a size is the size of the returned vector.
-  * Must be compatible with this MatrixBase type.
-  *
-  * \only_for_vectors
-  *
-  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
-  * it is redundant to pass \a size as argument, so Random() should be used
-  * instead.
-  *
-  * Example: \include MatrixBase_random_int.cpp
-  * Output: \verbinclude MatrixBase_random_int.out
-  *
-  * This expression has the "evaluate before nesting" flag so that it will be evaluated into
-  * a temporary vector whenever it is nested in a larger expression. This prevents unexpected
-  * behavior with expressions involving random matrices.
-  *
-  * \sa MatrixBase::setRandom(), MatrixBase::Random(Index,Index), MatrixBase::Random()
-  */
-template<typename Derived>
-inline const CwiseNullaryOp<internal::scalar_random_op<typename internal::traits<Derived>::Scalar>, Derived>
-DenseBase<Derived>::Random(Index size)
-{
-  return NullaryExpr(size, internal::scalar_random_op<Scalar>());
-}
-
-/** \returns a fixed-size random matrix or vector expression
-  *
-  * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
-  * need to use the variants taking size arguments.
-  *
-  * Example: \include MatrixBase_random.cpp
-  * Output: \verbinclude MatrixBase_random.out
-  *
-  * This expression has the "evaluate before nesting" flag so that it will be evaluated into
-  * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected
-  * behavior with expressions involving random matrices.
-  *
-  * \sa MatrixBase::setRandom(), MatrixBase::Random(Index,Index), MatrixBase::Random(Index)
-  */
-template<typename Derived>
-inline const CwiseNullaryOp<internal::scalar_random_op<typename internal::traits<Derived>::Scalar>, Derived>
-DenseBase<Derived>::Random()
-{
-  return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_random_op<Scalar>());
-}
-
-/** Sets all coefficients in this expression to random values.
-  *
-  * Example: \include MatrixBase_setRandom.cpp
-  * Output: \verbinclude MatrixBase_setRandom.out
-  *
-  * \sa class CwiseNullaryOp, setRandom(Index), setRandom(Index,Index)
-  */
-template<typename Derived>
-inline Derived& DenseBase<Derived>::setRandom()
-{
-  return *this = Random(rows(), cols());
-}
-
-/** Resizes to the given \a size, and sets all coefficients in this expression to random values.
-  *
-  * \only_for_vectors
-  *
-  * Example: \include Matrix_setRandom_int.cpp
-  * Output: \verbinclude Matrix_setRandom_int.out
-  *
-  * \sa MatrixBase::setRandom(), setRandom(Index,Index), class CwiseNullaryOp, MatrixBase::Random()
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE Derived&
-PlainObjectBase<Derived>::setRandom(Index size)
-{
-  resize(size);
-  return setRandom();
-}
-
-/** Resizes to the given size, and sets all coefficients in this expression to random values.
-  *
-  * \param rows the new number of rows
-  * \param cols the new number of columns
-  *
-  * Example: \include Matrix_setRandom_int_int.cpp
-  * Output: \verbinclude Matrix_setRandom_int_int.out
-  *
-  * \sa MatrixBase::setRandom(), setRandom(Index), class CwiseNullaryOp, MatrixBase::Random()
-  */
-template<typename Derived>
-EIGEN_STRONG_INLINE Derived&
-PlainObjectBase<Derived>::setRandom(Index nbRows, Index nbCols)
-{
-  resize(nbRows, nbCols);
-  return setRandom();
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_RANDOM_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Replicate.h b/resources/3rdparty/eigen/Eigen/src/Core/Replicate.h
deleted file mode 100644
index dde86a834..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/Replicate.h
+++ /dev/null
@@ -1,177 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_REPLICATE_H
-#define EIGEN_REPLICATE_H
-
-namespace Eigen { 
-
-/**
-  * \class Replicate
-  * \ingroup Core_Module
-  *
-  * \brief Expression of the multiple replication of a matrix or vector
-  *
-  * \param MatrixType the type of the object we are replicating
-  *
-  * This class represents an expression of the multiple replication of a matrix or vector.
-  * It is the return type of DenseBase::replicate() and most of the time
-  * this is the only way it is used.
-  *
-  * \sa DenseBase::replicate()
-  */
-
-namespace internal {
-template<typename MatrixType,int RowFactor,int ColFactor>
-struct traits<Replicate<MatrixType,RowFactor,ColFactor> >
- : traits<MatrixType>
-{
-  typedef typename MatrixType::Scalar Scalar;
-  typedef typename traits<MatrixType>::StorageKind StorageKind;
-  typedef typename traits<MatrixType>::XprKind XprKind;
-  enum {
-    Factor = (RowFactor==Dynamic || ColFactor==Dynamic) ? Dynamic : RowFactor*ColFactor
-  };
-  typedef typename nested<MatrixType,Factor>::type MatrixTypeNested;
-  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
-  enum {
-    RowsAtCompileTime = RowFactor==Dynamic || int(MatrixType::RowsAtCompileTime)==Dynamic
-                      ? Dynamic
-                      : RowFactor * MatrixType::RowsAtCompileTime,
-    ColsAtCompileTime = ColFactor==Dynamic || int(MatrixType::ColsAtCompileTime)==Dynamic
-                      ? Dynamic
-                      : ColFactor * MatrixType::ColsAtCompileTime,
-   //FIXME we don't propagate the max sizes !!!
-    MaxRowsAtCompileTime = RowsAtCompileTime,
-    MaxColsAtCompileTime = ColsAtCompileTime,
-    IsRowMajor = MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1 ? 1
-               : MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1 ? 0
-               : (MatrixType::Flags & RowMajorBit) ? 1 : 0,
-    Flags = (_MatrixTypeNested::Flags & HereditaryBits & ~RowMajorBit) | (IsRowMajor ? RowMajorBit : 0),
-    CoeffReadCost = _MatrixTypeNested::CoeffReadCost
-  };
-};
-}
-
-template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
-  : public internal::dense_xpr_base< Replicate<MatrixType,RowFactor,ColFactor> >::type
-{
-    typedef typename internal::traits<Replicate>::MatrixTypeNested MatrixTypeNested;
-    typedef typename internal::traits<Replicate>::_MatrixTypeNested _MatrixTypeNested;
-  public:
-
-    typedef typename internal::dense_xpr_base<Replicate>::type Base;
-    EIGEN_DENSE_PUBLIC_INTERFACE(Replicate)
-
-    template<typename OriginalMatrixType>
-    inline explicit Replicate(const OriginalMatrixType& a_matrix)
-      : m_matrix(a_matrix), m_rowFactor(RowFactor), m_colFactor(ColFactor)
-    {
-      EIGEN_STATIC_ASSERT((internal::is_same<typename internal::remove_const<MatrixType>::type,OriginalMatrixType>::value),
-                          THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)
-      eigen_assert(RowFactor!=Dynamic && ColFactor!=Dynamic);
-    }
-
-    template<typename OriginalMatrixType>
-    inline Replicate(const OriginalMatrixType& a_matrix, Index rowFactor, Index colFactor)
-      : m_matrix(a_matrix), m_rowFactor(rowFactor), m_colFactor(colFactor)
-    {
-      EIGEN_STATIC_ASSERT((internal::is_same<typename internal::remove_const<MatrixType>::type,OriginalMatrixType>::value),
-                          THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)
-    }
-
-    inline Index rows() const { return m_matrix.rows() * m_rowFactor.value(); }
-    inline Index cols() const { return m_matrix.cols() * m_colFactor.value(); }
-
-    inline Scalar coeff(Index rowId, Index colId) const
-    {
-      // try to avoid using modulo; this is a pure optimization strategy
-      const Index actual_row  = internal::traits<MatrixType>::RowsAtCompileTime==1 ? 0
-                            : RowFactor==1 ? rowId
-                            : rowId%m_matrix.rows();
-      const Index actual_col  = internal::traits<MatrixType>::ColsAtCompileTime==1 ? 0
-                            : ColFactor==1 ? colId
-                            : colId%m_matrix.cols();
-
-      return m_matrix.coeff(actual_row, actual_col);
-    }
-    template<int LoadMode>
-    inline PacketScalar packet(Index rowId, Index colId) const
-    {
-      const Index actual_row  = internal::traits<MatrixType>::RowsAtCompileTime==1 ? 0
-                            : RowFactor==1 ? rowId
-                            : rowId%m_matrix.rows();
-      const Index actual_col  = internal::traits<MatrixType>::ColsAtCompileTime==1 ? 0
-                            : ColFactor==1 ? colId
-                            : colId%m_matrix.cols();
-
-      return m_matrix.template packet<LoadMode>(actual_row, actual_col);
-    }
-
-    const _MatrixTypeNested& nestedExpression() const
-    { 
-      return m_matrix; 
-    }
-
-  protected:
-    MatrixTypeNested m_matrix;
-    const internal::variable_if_dynamic<Index, RowFactor> m_rowFactor;
-    const internal::variable_if_dynamic<Index, ColFactor> m_colFactor;
-};
-
-/**
-  * \return an expression of the replication of \c *this
-  *
-  * Example: \include MatrixBase_replicate.cpp
-  * Output: \verbinclude MatrixBase_replicate.out
-  *
-  * \sa VectorwiseOp::replicate(), DenseBase::replicate(Index,Index), class Replicate
-  */
-template<typename Derived>
-template<int RowFactor, int ColFactor>
-inline const Replicate<Derived,RowFactor,ColFactor>
-DenseBase<Derived>::replicate() const
-{
-  return Replicate<Derived,RowFactor,ColFactor>(derived());
-}
-
-/**
-  * \return an expression of the replication of \c *this
-  *
-  * Example: \include MatrixBase_replicate_int_int.cpp
-  * Output: \verbinclude MatrixBase_replicate_int_int.out
-  *
-  * \sa VectorwiseOp::replicate(), DenseBase::replicate<int,int>(), class Replicate
-  */
-template<typename Derived>
-inline const Replicate<Derived,Dynamic,Dynamic>
-DenseBase<Derived>::replicate(Index rowFactor,Index colFactor) const
-{
-  return Replicate<Derived,Dynamic,Dynamic>(derived(),rowFactor,colFactor);
-}
-
-/**
-  * \return an expression of the replication of each column (or row) of \c *this
-  *
-  * Example: \include DirectionWise_replicate_int.cpp
-  * Output: \verbinclude DirectionWise_replicate_int.out
-  *
-  * \sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate
-  */
-template<typename ExpressionType, int Direction>
-const typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType
-VectorwiseOp<ExpressionType,Direction>::replicate(Index factor) const
-{
-  return typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType
-          (_expression(),Direction==Vertical?factor:1,Direction==Horizontal?factor:1);
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_REPLICATE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Select.h b/resources/3rdparty/eigen/Eigen/src/Core/Select.h
deleted file mode 100644
index 7ee8f23ba..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/Select.h
+++ /dev/null
@@ -1,162 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_SELECT_H
-#define EIGEN_SELECT_H
-
-namespace Eigen { 
-
-/** \class Select
-  * \ingroup Core_Module
-  *
-  * \brief Expression of a coefficient wise version of the C++ ternary operator ?:
-  *
-  * \param ConditionMatrixType the type of the \em condition expression which must be a boolean matrix
-  * \param ThenMatrixType the type of the \em then expression
-  * \param ElseMatrixType the type of the \em else expression
-  *
-  * This class represents an expression of a coefficient wise version of the C++ ternary operator ?:.
-  * It is the return type of DenseBase::select() and most of the time this is the only way it is used.
-  *
-  * \sa DenseBase::select(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const
-  */
-
-namespace internal {
-template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>
-struct traits<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
- : traits<ThenMatrixType>
-{
-  typedef typename traits<ThenMatrixType>::Scalar Scalar;
-  typedef Dense StorageKind;
-  typedef typename traits<ThenMatrixType>::XprKind XprKind;
-  typedef typename ConditionMatrixType::Nested ConditionMatrixNested;
-  typedef typename ThenMatrixType::Nested ThenMatrixNested;
-  typedef typename ElseMatrixType::Nested ElseMatrixNested;
-  enum {
-    RowsAtCompileTime = ConditionMatrixType::RowsAtCompileTime,
-    ColsAtCompileTime = ConditionMatrixType::ColsAtCompileTime,
-    MaxRowsAtCompileTime = ConditionMatrixType::MaxRowsAtCompileTime,
-    MaxColsAtCompileTime = ConditionMatrixType::MaxColsAtCompileTime,
-    Flags = (unsigned int)ThenMatrixType::Flags & ElseMatrixType::Flags & HereditaryBits,
-    CoeffReadCost = traits<typename remove_all<ConditionMatrixNested>::type>::CoeffReadCost
-                  + EIGEN_SIZE_MAX(traits<typename remove_all<ThenMatrixNested>::type>::CoeffReadCost,
-                                   traits<typename remove_all<ElseMatrixNested>::type>::CoeffReadCost)
-  };
-};
-}
-
-template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>
-class Select : internal::no_assignment_operator,
-  public internal::dense_xpr_base< Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >::type
-{
-  public:
-
-    typedef typename internal::dense_xpr_base<Select>::type Base;
-    EIGEN_DENSE_PUBLIC_INTERFACE(Select)
-
-    Select(const ConditionMatrixType& a_conditionMatrix,
-           const ThenMatrixType& a_thenMatrix,
-           const ElseMatrixType& a_elseMatrix)
-      : m_condition(a_conditionMatrix), m_then(a_thenMatrix), m_else(a_elseMatrix)
-    {
-      eigen_assert(m_condition.rows() == m_then.rows() && m_condition.rows() == m_else.rows());
-      eigen_assert(m_condition.cols() == m_then.cols() && m_condition.cols() == m_else.cols());
-    }
-
-    Index rows() const { return m_condition.rows(); }
-    Index cols() const { return m_condition.cols(); }
-
-    const Scalar coeff(Index i, Index j) const
-    {
-      if (m_condition.coeff(i,j))
-        return m_then.coeff(i,j);
-      else
-        return m_else.coeff(i,j);
-    }
-
-    const Scalar coeff(Index i) const
-    {
-      if (m_condition.coeff(i))
-        return m_then.coeff(i);
-      else
-        return m_else.coeff(i);
-    }
-
-    const ConditionMatrixType& conditionMatrix() const
-    {
-      return m_condition;
-    }
-
-    const ThenMatrixType& thenMatrix() const
-    {
-      return m_then;
-    }
-
-    const ElseMatrixType& elseMatrix() const
-    {
-      return m_else;
-    }
-
-  protected:
-    typename ConditionMatrixType::Nested m_condition;
-    typename ThenMatrixType::Nested m_then;
-    typename ElseMatrixType::Nested m_else;
-};
-
-
-/** \returns a matrix where each coefficient (i,j) is equal to \a thenMatrix(i,j)
-  * if \c *this(i,j), and \a elseMatrix(i,j) otherwise.
-  *
-  * Example: \include MatrixBase_select.cpp
-  * Output: \verbinclude MatrixBase_select.out
-  *
-  * \sa class Select
-  */
-template<typename Derived>
-template<typename ThenDerived,typename ElseDerived>
-inline const Select<Derived,ThenDerived,ElseDerived>
-DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix,
-                            const DenseBase<ElseDerived>& elseMatrix) const
-{
-  return Select<Derived,ThenDerived,ElseDerived>(derived(), thenMatrix.derived(), elseMatrix.derived());
-}
-
-/** Version of DenseBase::select(const DenseBase&, const DenseBase&) with
-  * the \em else expression being a scalar value.
-  *
-  * \sa DenseBase::select(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const, class Select
-  */
-template<typename Derived>
-template<typename ThenDerived>
-inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>
-DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix,
-                            typename ThenDerived::Scalar elseScalar) const
-{
-  return Select<Derived,ThenDerived,typename ThenDerived::ConstantReturnType>(
-    derived(), thenMatrix.derived(), ThenDerived::Constant(rows(),cols(),elseScalar));
-}
-
-/** Version of DenseBase::select(const DenseBase&, const DenseBase&) with
-  * the \em then expression being a scalar value.
-  *
-  * \sa DenseBase::select(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const, class Select
-  */
-template<typename Derived>
-template<typename ElseDerived>
-inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >
-DenseBase<Derived>::select(typename ElseDerived::Scalar thenScalar,
-                            const DenseBase<ElseDerived>& elseMatrix) const
-{
-  return Select<Derived,typename ElseDerived::ConstantReturnType,ElseDerived>(
-    derived(), ElseDerived::Constant(rows(),cols(),thenScalar), elseMatrix.derived());
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_SELECT_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Swap.h b/resources/3rdparty/eigen/Eigen/src/Core/Swap.h
deleted file mode 100644
index bf58bd599..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/Swap.h
+++ /dev/null
@@ -1,126 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_SWAP_H
-#define EIGEN_SWAP_H
-
-namespace Eigen { 
-
-/** \class SwapWrapper
-  * \ingroup Core_Module
-  *
-  * \internal
-  *
-  * \brief Internal helper class for swapping two expressions
-  */
-namespace internal {
-template<typename ExpressionType>
-struct traits<SwapWrapper<ExpressionType> > : traits<ExpressionType> {};
-}
-
-template<typename ExpressionType> class SwapWrapper
-  : public internal::dense_xpr_base<SwapWrapper<ExpressionType> >::type
-{
-  public:
-
-    typedef typename internal::dense_xpr_base<SwapWrapper>::type Base;
-    EIGEN_DENSE_PUBLIC_INTERFACE(SwapWrapper)
-    typedef typename internal::packet_traits<Scalar>::type Packet;
-
-    inline SwapWrapper(ExpressionType& xpr) : m_expression(xpr) {}
-
-    inline Index rows() const { return m_expression.rows(); }
-    inline Index cols() const { return m_expression.cols(); }
-    inline Index outerStride() const { return m_expression.outerStride(); }
-    inline Index innerStride() const { return m_expression.innerStride(); }
-    
-    typedef typename internal::conditional<
-                       internal::is_lvalue<ExpressionType>::value,
-                       Scalar,
-                       const Scalar
-                     >::type ScalarWithConstIfNotLvalue;
-                     
-    inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
-    inline const Scalar* data() const { return m_expression.data(); }
-
-    inline Scalar& coeffRef(Index rowId, Index colId)
-    {
-      return m_expression.const_cast_derived().coeffRef(rowId, colId);
-    }
-
-    inline Scalar& coeffRef(Index index)
-    {
-      return m_expression.const_cast_derived().coeffRef(index);
-    }
-
-    inline Scalar& coeffRef(Index rowId, Index colId) const
-    {
-      return m_expression.coeffRef(rowId, colId);
-    }
-
-    inline Scalar& coeffRef(Index index) const
-    {
-      return m_expression.coeffRef(index);
-    }
-
-    template<typename OtherDerived>
-    void copyCoeff(Index rowId, Index colId, const DenseBase<OtherDerived>& other)
-    {
-      OtherDerived& _other = other.const_cast_derived();
-      eigen_internal_assert(rowId >= 0 && rowId < rows()
-                         && colId >= 0 && colId < cols());
-      Scalar tmp = m_expression.coeff(rowId, colId);
-      m_expression.coeffRef(rowId, colId) = _other.coeff(rowId, colId);
-      _other.coeffRef(rowId, colId) = tmp;
-    }
-
-    template<typename OtherDerived>
-    void copyCoeff(Index index, const DenseBase<OtherDerived>& other)
-    {
-      OtherDerived& _other = other.const_cast_derived();
-      eigen_internal_assert(index >= 0 && index < m_expression.size());
-      Scalar tmp = m_expression.coeff(index);
-      m_expression.coeffRef(index) = _other.coeff(index);
-      _other.coeffRef(index) = tmp;
-    }
-
-    template<typename OtherDerived, int StoreMode, int LoadMode>
-    void copyPacket(Index rowId, Index colId, const DenseBase<OtherDerived>& other)
-    {
-      OtherDerived& _other = other.const_cast_derived();
-      eigen_internal_assert(rowId >= 0 && rowId < rows()
-                        && colId >= 0 && colId < cols());
-      Packet tmp = m_expression.template packet<StoreMode>(rowId, colId);
-      m_expression.template writePacket<StoreMode>(rowId, colId,
-        _other.template packet<LoadMode>(rowId, colId)
-      );
-      _other.template writePacket<LoadMode>(rowId, colId, tmp);
-    }
-
-    template<typename OtherDerived, int StoreMode, int LoadMode>
-    void copyPacket(Index index, const DenseBase<OtherDerived>& other)
-    {
-      OtherDerived& _other = other.const_cast_derived();
-      eigen_internal_assert(index >= 0 && index < m_expression.size());
-      Packet tmp = m_expression.template packet<StoreMode>(index);
-      m_expression.template writePacket<StoreMode>(index,
-        _other.template packet<LoadMode>(index)
-      );
-      _other.template writePacket<LoadMode>(index, tmp);
-    }
-
-    ExpressionType& expression() const { return m_expression; }
-
-  protected:
-    ExpressionType& m_expression;
-};
-
-} // end namespace Eigen
-
-#endif // EIGEN_SWAP_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Transpose.h b/resources/3rdparty/eigen/Eigen/src/Core/Transpose.h
deleted file mode 100644
index 34944e055..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/Transpose.h
+++ /dev/null
@@ -1,414 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_TRANSPOSE_H
-#define EIGEN_TRANSPOSE_H
-
-namespace Eigen { 
-
-/** \class Transpose
-  * \ingroup Core_Module
-  *
-  * \brief Expression of the transpose of a matrix
-  *
-  * \param MatrixType the type of the object of which we are taking the transpose
-  *
-  * This class represents an expression of the transpose of a matrix.
-  * It is the return type of MatrixBase::transpose() and MatrixBase::adjoint()
-  * and most of the time this is the only way it is used.
-  *
-  * \sa MatrixBase::transpose(), MatrixBase::adjoint()
-  */
-
-namespace internal {
-template<typename MatrixType>
-struct traits<Transpose<MatrixType> > : traits<MatrixType>
-{
-  typedef typename MatrixType::Scalar Scalar;
-  typedef typename nested<MatrixType>::type MatrixTypeNested;
-  typedef typename remove_reference<MatrixTypeNested>::type MatrixTypeNestedPlain;
-  typedef typename traits<MatrixType>::StorageKind StorageKind;
-  typedef typename traits<MatrixType>::XprKind XprKind;
-  enum {
-    RowsAtCompileTime = MatrixType::ColsAtCompileTime,
-    ColsAtCompileTime = MatrixType::RowsAtCompileTime,
-    MaxRowsAtCompileTime = MatrixType::MaxColsAtCompileTime,
-    MaxColsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
-    FlagsLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,
-    Flags0 = MatrixTypeNestedPlain::Flags & ~(LvalueBit | NestByRefBit),
-    Flags1 = Flags0 | FlagsLvalueBit,
-    Flags = Flags1 ^ RowMajorBit,
-    CoeffReadCost = MatrixTypeNestedPlain::CoeffReadCost,
-    InnerStrideAtCompileTime = inner_stride_at_compile_time<MatrixType>::ret,
-    OuterStrideAtCompileTime = outer_stride_at_compile_time<MatrixType>::ret
-  };
-};
-}
-
-template<typename MatrixType, typename StorageKind> class TransposeImpl;
-
-template<typename MatrixType> class Transpose
-  : public TransposeImpl<MatrixType,typename internal::traits<MatrixType>::StorageKind>
-{
-  public:
-
-    typedef typename TransposeImpl<MatrixType,typename internal::traits<MatrixType>::StorageKind>::Base Base;
-    EIGEN_GENERIC_PUBLIC_INTERFACE(Transpose)
-
-    inline Transpose(MatrixType& a_matrix) : m_matrix(a_matrix) {}
-
-    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose)
-
-    inline Index rows() const { return m_matrix.cols(); }
-    inline Index cols() const { return m_matrix.rows(); }
-
-    /** \returns the nested expression */
-    const typename internal::remove_all<typename MatrixType::Nested>::type&
-    nestedExpression() const { return m_matrix; }
-
-    /** \returns the nested expression */
-    typename internal::remove_all<typename MatrixType::Nested>::type&
-    nestedExpression() { return m_matrix.const_cast_derived(); }
-
-  protected:
-    typename MatrixType::Nested m_matrix;
-};
-
-namespace internal {
-
-template<typename MatrixType, bool HasDirectAccess = has_direct_access<MatrixType>::ret>
-struct TransposeImpl_base
-{
-  typedef typename dense_xpr_base<Transpose<MatrixType> >::type type;
-};
-
-template<typename MatrixType>
-struct TransposeImpl_base<MatrixType, false>
-{
-  typedef typename dense_xpr_base<Transpose<MatrixType> >::type type;
-};
-
-} // end namespace internal
-
-template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
-  : public internal::TransposeImpl_base<MatrixType>::type
-{
-  public:
-
-    typedef typename internal::TransposeImpl_base<MatrixType>::type Base;
-    EIGEN_DENSE_PUBLIC_INTERFACE(Transpose<MatrixType>)
-
-    inline Index innerStride() const { return derived().nestedExpression().innerStride(); }
-    inline Index outerStride() const { return derived().nestedExpression().outerStride(); }
-
-    typedef typename internal::conditional<
-                       internal::is_lvalue<MatrixType>::value,
-                       Scalar,
-                       const Scalar
-                     >::type ScalarWithConstIfNotLvalue;
-
-    inline ScalarWithConstIfNotLvalue* data() { return derived().nestedExpression().data(); }
-    inline const Scalar* data() const { return derived().nestedExpression().data(); }
-
-    inline ScalarWithConstIfNotLvalue& coeffRef(Index rowId, Index colId)
-    {
-      EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
-      return derived().nestedExpression().const_cast_derived().coeffRef(colId, rowId);
-    }
-
-    inline ScalarWithConstIfNotLvalue& coeffRef(Index index)
-    {
-      EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
-      return derived().nestedExpression().const_cast_derived().coeffRef(index);
-    }
-
-    inline const Scalar& coeffRef(Index rowId, Index colId) const
-    {
-      return derived().nestedExpression().coeffRef(colId, rowId);
-    }
-
-    inline const Scalar& coeffRef(Index index) const
-    {
-      return derived().nestedExpression().coeffRef(index);
-    }
-
-    inline CoeffReturnType coeff(Index rowId, Index colId) const
-    {
-      return derived().nestedExpression().coeff(colId, rowId);
-    }
-
-    inline CoeffReturnType coeff(Index index) const
-    {
-      return derived().nestedExpression().coeff(index);
-    }
-
-    template<int LoadMode>
-    inline const PacketScalar packet(Index rowId, Index colId) const
-    {
-      return derived().nestedExpression().template packet<LoadMode>(colId, rowId);
-    }
-
-    template<int LoadMode>
-    inline void writePacket(Index rowId, Index colId, const PacketScalar& x)
-    {
-      derived().nestedExpression().const_cast_derived().template writePacket<LoadMode>(colId, rowId, x);
-    }
-
-    template<int LoadMode>
-    inline const PacketScalar packet(Index index) const
-    {
-      return derived().nestedExpression().template packet<LoadMode>(index);
-    }
-
-    template<int LoadMode>
-    inline void writePacket(Index index, const PacketScalar& x)
-    {
-      derived().nestedExpression().const_cast_derived().template writePacket<LoadMode>(index, x);
-    }
-};
-
-/** \returns an expression of the transpose of *this.
-  *
-  * Example: \include MatrixBase_transpose.cpp
-  * Output: \verbinclude MatrixBase_transpose.out
-  *
-  * \warning If you want to replace a matrix by its own transpose, do \b NOT do this:
-  * \code
-  * m = m.transpose(); // bug!!! caused by aliasing effect
-  * \endcode
-  * Instead, use the transposeInPlace() method:
-  * \code
-  * m.transposeInPlace();
-  * \endcode
-  * which gives Eigen good opportunities for optimization, or alternatively you can also do:
-  * \code
-  * m = m.transpose().eval();
-  * \endcode
-  *
-  * \sa transposeInPlace(), adjoint() */
-template<typename Derived>
-inline Transpose<Derived>
-DenseBase<Derived>::transpose()
-{
-  return derived();
-}
-
-/** This is the const version of transpose().
-  *
-  * Make sure you read the warning for transpose() !
-  *
-  * \sa transposeInPlace(), adjoint() */
-template<typename Derived>
-inline const typename DenseBase<Derived>::ConstTransposeReturnType
-DenseBase<Derived>::transpose() const
-{
-  return ConstTransposeReturnType(derived());
-}
-
-/** \returns an expression of the adjoint (i.e. conjugate transpose) of *this.
-  *
-  * Example: \include MatrixBase_adjoint.cpp
-  * Output: \verbinclude MatrixBase_adjoint.out
-  *
-  * \warning If you want to replace a matrix by its own adjoint, do \b NOT do this:
-  * \code
-  * m = m.adjoint(); // bug!!! caused by aliasing effect
-  * \endcode
-  * Instead, use the adjointInPlace() method:
-  * \code
-  * m.adjointInPlace();
-  * \endcode
-  * which gives Eigen good opportunities for optimization, or alternatively you can also do:
-  * \code
-  * m = m.adjoint().eval();
-  * \endcode
-  *
-  * \sa adjointInPlace(), transpose(), conjugate(), class Transpose, class internal::scalar_conjugate_op */
-template<typename Derived>
-inline const typename MatrixBase<Derived>::AdjointReturnType
-MatrixBase<Derived>::adjoint() const
-{
-  return this->transpose(); // in the complex case, the .conjugate() is be implicit here
-                            // due to implicit conversion to return type
-}
-
-/***************************************************************************
-* "in place" transpose implementation
-***************************************************************************/
-
-namespace internal {
-
-template<typename MatrixType,
-  bool IsSquare = (MatrixType::RowsAtCompileTime == MatrixType::ColsAtCompileTime) && MatrixType::RowsAtCompileTime!=Dynamic>
-struct inplace_transpose_selector;
-
-template<typename MatrixType>
-struct inplace_transpose_selector<MatrixType,true> { // square matrix
-  static void run(MatrixType& m) {
-    m.template triangularView<StrictlyUpper>().swap(m.transpose());
-  }
-};
-
-template<typename MatrixType>
-struct inplace_transpose_selector<MatrixType,false> { // non square matrix
-  static void run(MatrixType& m) {
-    if (m.rows()==m.cols())
-      m.template triangularView<StrictlyUpper>().swap(m.transpose());
-    else
-      m = m.transpose().eval();
-  }
-};
-
-} // end namespace internal
-
-/** This is the "in place" version of transpose(): it replaces \c *this by its own transpose.
-  * Thus, doing
-  * \code
-  * m.transposeInPlace();
-  * \endcode
-  * has the same effect on m as doing
-  * \code
-  * m = m.transpose().eval();
-  * \endcode
-  * and is faster and also safer because in the latter line of code, forgetting the eval() results
-  * in a bug caused by aliasing.
-  *
-  * Notice however that this method is only useful if you want to replace a matrix by its own transpose.
-  * If you just need the transpose of a matrix, use transpose().
-  *
-  * \note if the matrix is not square, then \c *this must be a resizable matrix.
-  *
-  * \sa transpose(), adjoint(), adjointInPlace() */
-template<typename Derived>
-inline void DenseBase<Derived>::transposeInPlace()
-{
-  internal::inplace_transpose_selector<Derived>::run(derived());
-}
-
-/***************************************************************************
-* "in place" adjoint implementation
-***************************************************************************/
-
-/** This is the "in place" version of adjoint(): it replaces \c *this by its own transpose.
-  * Thus, doing
-  * \code
-  * m.adjointInPlace();
-  * \endcode
-  * has the same effect on m as doing
-  * \code
-  * m = m.adjoint().eval();
-  * \endcode
-  * and is faster and also safer because in the latter line of code, forgetting the eval() results
-  * in a bug caused by aliasing.
-  *
-  * Notice however that this method is only useful if you want to replace a matrix by its own adjoint.
-  * If you just need the adjoint of a matrix, use adjoint().
-  *
-  * \note if the matrix is not square, then \c *this must be a resizable matrix.
-  *
-  * \sa transpose(), adjoint(), transposeInPlace() */
-template<typename Derived>
-inline void MatrixBase<Derived>::adjointInPlace()
-{
-  derived() = adjoint().eval();
-}
-
-#ifndef EIGEN_NO_DEBUG
-
-// The following is to detect aliasing problems in most common cases.
-
-namespace internal {
-
-template<typename BinOp,typename NestedXpr,typename Rhs>
-struct blas_traits<SelfCwiseBinaryOp<BinOp,NestedXpr,Rhs> >
- : blas_traits<NestedXpr>
-{
-  typedef SelfCwiseBinaryOp<BinOp,NestedXpr,Rhs> XprType;
-  static inline const XprType extract(const XprType& x) { return x; }
-};
-
-template<bool DestIsTransposed, typename OtherDerived>
-struct check_transpose_aliasing_compile_time_selector
-{
-  enum { ret = bool(blas_traits<OtherDerived>::IsTransposed) != DestIsTransposed };
-};
-
-template<bool DestIsTransposed, typename BinOp, typename DerivedA, typename DerivedB>
-struct check_transpose_aliasing_compile_time_selector<DestIsTransposed,CwiseBinaryOp<BinOp,DerivedA,DerivedB> >
-{
-  enum { ret =    bool(blas_traits<DerivedA>::IsTransposed) != DestIsTransposed
-               || bool(blas_traits<DerivedB>::IsTransposed) != DestIsTransposed
-  };
-};
-
-template<typename Scalar, bool DestIsTransposed, typename OtherDerived>
-struct check_transpose_aliasing_run_time_selector
-{
-  static bool run(const Scalar* dest, const OtherDerived& src)
-  {
-    return (bool(blas_traits<OtherDerived>::IsTransposed) != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src));
-  }
-};
-
-template<typename Scalar, bool DestIsTransposed, typename BinOp, typename DerivedA, typename DerivedB>
-struct check_transpose_aliasing_run_time_selector<Scalar,DestIsTransposed,CwiseBinaryOp<BinOp,DerivedA,DerivedB> >
-{
-  static bool run(const Scalar* dest, const CwiseBinaryOp<BinOp,DerivedA,DerivedB>& src)
-  {
-    return ((blas_traits<DerivedA>::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src.lhs())))
-        || ((blas_traits<DerivedB>::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src.rhs())));
-  }
-};
-
-// the following selector, checkTransposeAliasing_impl, based on MightHaveTransposeAliasing,
-// is because when the condition controlling the assert is known at compile time, ICC emits a warning.
-// This is actually a good warning: in expressions that don't have any transposing, the condition is
-// known at compile time to be false, and using that, we can avoid generating the code of the assert again
-// and again for all these expressions that don't need it.
-
-template<typename Derived, typename OtherDerived,
-         bool MightHaveTransposeAliasing
-                 = check_transpose_aliasing_compile_time_selector
-                     <blas_traits<Derived>::IsTransposed,OtherDerived>::ret
-        >
-struct checkTransposeAliasing_impl
-{
-    static void run(const Derived& dst, const OtherDerived& other)
-    {
-        eigen_assert((!check_transpose_aliasing_run_time_selector
-                      <typename Derived::Scalar,blas_traits<Derived>::IsTransposed,OtherDerived>
-                      ::run(extract_data(dst), other))
-          && "aliasing detected during tranposition, use transposeInPlace() "
-             "or evaluate the rhs into a temporary using .eval()");
-
-    }
-};
-
-template<typename Derived, typename OtherDerived>
-struct checkTransposeAliasing_impl<Derived, OtherDerived, false>
-{
-    static void run(const Derived&, const OtherDerived&)
-    {
-    }
-};
-
-} // end namespace internal
-
-template<typename Derived>
-template<typename OtherDerived>
-void DenseBase<Derived>::checkTransposeAliasing(const OtherDerived& other) const
-{
-    internal::checkTransposeAliasing_impl<Derived, OtherDerived>::run(derived(), other);
-}
-#endif
-
-} // end namespace Eigen
-
-#endif // EIGEN_TRANSPOSE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Transpositions.h b/resources/3rdparty/eigen/Eigen/src/Core/Transpositions.h
deleted file mode 100644
index e4ba0756f..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/Transpositions.h
+++ /dev/null
@@ -1,436 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2010-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_TRANSPOSITIONS_H
-#define EIGEN_TRANSPOSITIONS_H
-
-namespace Eigen { 
-
-/** \class Transpositions
-  * \ingroup Core_Module
-  *
-  * \brief Represents a sequence of transpositions (row/column interchange)
-  *
-  * \param SizeAtCompileTime the number of transpositions, or Dynamic
-  * \param MaxSizeAtCompileTime the maximum number of transpositions, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it.
-  *
-  * This class represents a permutation transformation as a sequence of \em n transpositions
-  * \f$[T_{n-1} \ldots T_{i} \ldots T_{0}]\f$. It is internally stored as a vector of integers \c indices.
-  * Each transposition \f$ T_{i} \f$ applied on the left of a matrix (\f$ T_{i} M\f$) interchanges
-  * the rows \c i and \c indices[i] of the matrix \c M.
-  * A transposition applied on the right (e.g., \f$ M T_{i}\f$) yields a column interchange.
-  *
-  * Compared to the class PermutationMatrix, such a sequence of transpositions is what is
-  * computed during a decomposition with pivoting, and it is faster when applying the permutation in-place.
-  * 
-  * To apply a sequence of transpositions to a matrix, simply use the operator * as in the following example:
-  * \code
-  * Transpositions tr;
-  * MatrixXf mat;
-  * mat = tr * mat;
-  * \endcode
-  * In this example, we detect that the matrix appears on both side, and so the transpositions
-  * are applied in-place without any temporary or extra copy.
-  *
-  * \sa class PermutationMatrix
-  */
-
-namespace internal {
-template<typename TranspositionType, typename MatrixType, int Side, bool Transposed=false> struct transposition_matrix_product_retval;
-}
-
-template<typename Derived>
-class TranspositionsBase
-{
-    typedef internal::traits<Derived> Traits;
-    
-  public:
-
-    typedef typename Traits::IndicesType IndicesType;
-    typedef typename IndicesType::Scalar Index;
-
-    Derived& derived() { return *static_cast<Derived*>(this); }
-    const Derived& derived() const { return *static_cast<const Derived*>(this); }
-
-    /** Copies the \a other transpositions into \c *this */
-    template<typename OtherDerived>
-    Derived& operator=(const TranspositionsBase<OtherDerived>& other)
-    {
-      indices() = other.indices();
-      return derived();
-    }
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    /** This is a special case of the templated operator=. Its purpose is to
-      * prevent a default operator= from hiding the templated operator=.
-      */
-    Derived& operator=(const TranspositionsBase& other)
-    {
-      indices() = other.indices();
-      return derived();
-    }
-    #endif
-
-    /** \returns the number of transpositions */
-    inline Index size() const { return indices().size(); }
-
-    /** Direct access to the underlying index vector */
-    inline const Index& coeff(Index i) const { return indices().coeff(i); }
-    /** Direct access to the underlying index vector */
-    inline Index& coeffRef(Index i) { return indices().coeffRef(i); }
-    /** Direct access to the underlying index vector */
-    inline const Index& operator()(Index i) const { return indices()(i); }
-    /** Direct access to the underlying index vector */
-    inline Index& operator()(Index i) { return indices()(i); }
-    /** Direct access to the underlying index vector */
-    inline const Index& operator[](Index i) const { return indices()(i); }
-    /** Direct access to the underlying index vector */
-    inline Index& operator[](Index i) { return indices()(i); }
-
-    /** const version of indices(). */
-    const IndicesType& indices() const { return derived().indices(); }
-    /** \returns a reference to the stored array representing the transpositions. */
-    IndicesType& indices() { return derived().indices(); }
-
-    /** Resizes to given size. */
-    inline void resize(int newSize)
-    {
-      indices().resize(newSize);
-    }
-
-    /** Sets \c *this to represents an identity transformation */
-    void setIdentity()
-    {
-      for(int i = 0; i < indices().size(); ++i)
-        coeffRef(i) = i;
-    }
-
-    // FIXME: do we want such methods ?
-    // might be usefull when the target matrix expression is complex, e.g.:
-    // object.matrix().block(..,..,..,..) = trans * object.matrix().block(..,..,..,..);
-    /*
-    template<typename MatrixType>
-    void applyForwardToRows(MatrixType& mat) const
-    {
-      for(Index k=0 ; k<size() ; ++k)
-        if(m_indices(k)!=k)
-          mat.row(k).swap(mat.row(m_indices(k)));
-    }
-
-    template<typename MatrixType>
-    void applyBackwardToRows(MatrixType& mat) const
-    {
-      for(Index k=size()-1 ; k>=0 ; --k)
-        if(m_indices(k)!=k)
-          mat.row(k).swap(mat.row(m_indices(k)));
-    }
-    */
-
-    /** \returns the inverse transformation */
-    inline Transpose<TranspositionsBase> inverse() const
-    { return Transpose<TranspositionsBase>(derived()); }
-
-    /** \returns the tranpose transformation */
-    inline Transpose<TranspositionsBase> transpose() const
-    { return Transpose<TranspositionsBase>(derived()); }
-
-  protected:
-};
-
-namespace internal {
-template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType>
-struct traits<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType> >
-{
-  typedef IndexType Index;
-  typedef Matrix<Index, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;
-};
-}
-
-template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType>
-class Transpositions : public TranspositionsBase<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType> >
-{
-    typedef internal::traits<Transpositions> Traits;
-  public:
-
-    typedef TranspositionsBase<Transpositions> Base;
-    typedef typename Traits::IndicesType IndicesType;
-    typedef typename IndicesType::Scalar Index;
-
-    inline Transpositions() {}
-
-    /** Copy constructor. */
-    template<typename OtherDerived>
-    inline Transpositions(const TranspositionsBase<OtherDerived>& other)
-      : m_indices(other.indices()) {}
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    /** Standard copy constructor. Defined only to prevent a default copy constructor
-      * from hiding the other templated constructor */
-    inline Transpositions(const Transpositions& other) : m_indices(other.indices()) {}
-    #endif
-
-    /** Generic constructor from expression of the transposition indices. */
-    template<typename Other>
-    explicit inline Transpositions(const MatrixBase<Other>& a_indices) : m_indices(a_indices)
-    {}
-
-    /** Copies the \a other transpositions into \c *this */
-    template<typename OtherDerived>
-    Transpositions& operator=(const TranspositionsBase<OtherDerived>& other)
-    {
-      return Base::operator=(other);
-    }
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    /** This is a special case of the templated operator=. Its purpose is to
-      * prevent a default operator= from hiding the templated operator=.
-      */
-    Transpositions& operator=(const Transpositions& other)
-    {
-      m_indices = other.m_indices;
-      return *this;
-    }
-    #endif
-
-    /** Constructs an uninitialized permutation matrix of given size.
-      */
-    inline Transpositions(Index size) : m_indices(size)
-    {}
-
-    /** const version of indices(). */
-    const IndicesType& indices() const { return m_indices; }
-    /** \returns a reference to the stored array representing the transpositions. */
-    IndicesType& indices() { return m_indices; }
-
-  protected:
-
-    IndicesType m_indices;
-};
-
-
-namespace internal {
-template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType, int _PacketAccess>
-struct traits<Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType>,_PacketAccess> >
-{
-  typedef IndexType Index;
-  typedef Map<const Matrix<Index,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1>, _PacketAccess> IndicesType;
-};
-}
-
-template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType, int PacketAccess>
-class Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType>,PacketAccess>
- : public TranspositionsBase<Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType>,PacketAccess> >
-{
-    typedef internal::traits<Map> Traits;
-  public:
-
-    typedef TranspositionsBase<Map> Base;
-    typedef typename Traits::IndicesType IndicesType;
-    typedef typename IndicesType::Scalar Index;
-
-    inline Map(const Index* indicesPtr)
-      : m_indices(indicesPtr)
-    {}
-
-    inline Map(const Index* indicesPtr, Index size)
-      : m_indices(indicesPtr,size)
-    {}
-
-    /** Copies the \a other transpositions into \c *this */
-    template<typename OtherDerived>
-    Map& operator=(const TranspositionsBase<OtherDerived>& other)
-    {
-      return Base::operator=(other);
-    }
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    /** This is a special case of the templated operator=. Its purpose is to
-      * prevent a default operator= from hiding the templated operator=.
-      */
-    Map& operator=(const Map& other)
-    {
-      m_indices = other.m_indices;
-      return *this;
-    }
-    #endif
-
-    /** const version of indices(). */
-    const IndicesType& indices() const { return m_indices; }
-    
-    /** \returns a reference to the stored array representing the transpositions. */
-    IndicesType& indices() { return m_indices; }
-
-  protected:
-
-    IndicesType m_indices;
-};
-
-namespace internal {
-template<typename _IndicesType>
-struct traits<TranspositionsWrapper<_IndicesType> >
-{
-  typedef typename _IndicesType::Scalar Index;
-  typedef _IndicesType IndicesType;
-};
-}
-
-template<typename _IndicesType>
-class TranspositionsWrapper
- : public TranspositionsBase<TranspositionsWrapper<_IndicesType> >
-{
-    typedef internal::traits<TranspositionsWrapper> Traits;
-  public:
-
-    typedef TranspositionsBase<TranspositionsWrapper> Base;
-    typedef typename Traits::IndicesType IndicesType;
-    typedef typename IndicesType::Scalar Index;
-
-    inline TranspositionsWrapper(IndicesType& a_indices)
-      : m_indices(a_indices)
-    {}
-
-    /** Copies the \a other transpositions into \c *this */
-    template<typename OtherDerived>
-    TranspositionsWrapper& operator=(const TranspositionsBase<OtherDerived>& other)
-    {
-      return Base::operator=(other);
-    }
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    /** This is a special case of the templated operator=. Its purpose is to
-      * prevent a default operator= from hiding the templated operator=.
-      */
-    TranspositionsWrapper& operator=(const TranspositionsWrapper& other)
-    {
-      m_indices = other.m_indices;
-      return *this;
-    }
-    #endif
-
-    /** const version of indices(). */
-    const IndicesType& indices() const { return m_indices; }
-
-    /** \returns a reference to the stored array representing the transpositions. */
-    IndicesType& indices() { return m_indices; }
-
-  protected:
-
-    const typename IndicesType::Nested m_indices;
-};
-
-/** \returns the \a matrix with the \a transpositions applied to the columns.
-  */
-template<typename Derived, typename TranspositionsDerived>
-inline const internal::transposition_matrix_product_retval<TranspositionsDerived, Derived, OnTheRight>
-operator*(const MatrixBase<Derived>& matrix,
-          const TranspositionsBase<TranspositionsDerived> &transpositions)
-{
-  return internal::transposition_matrix_product_retval
-           <TranspositionsDerived, Derived, OnTheRight>
-           (transpositions.derived(), matrix.derived());
-}
-
-/** \returns the \a matrix with the \a transpositions applied to the rows.
-  */
-template<typename Derived, typename TranspositionDerived>
-inline const internal::transposition_matrix_product_retval
-               <TranspositionDerived, Derived, OnTheLeft>
-operator*(const TranspositionsBase<TranspositionDerived> &transpositions,
-          const MatrixBase<Derived>& matrix)
-{
-  return internal::transposition_matrix_product_retval
-           <TranspositionDerived, Derived, OnTheLeft>
-           (transpositions.derived(), matrix.derived());
-}
-
-namespace internal {
-
-template<typename TranspositionType, typename MatrixType, int Side, bool Transposed>
-struct traits<transposition_matrix_product_retval<TranspositionType, MatrixType, Side, Transposed> >
-{
-  typedef typename MatrixType::PlainObject ReturnType;
-};
-
-template<typename TranspositionType, typename MatrixType, int Side, bool Transposed>
-struct transposition_matrix_product_retval
- : public ReturnByValue<transposition_matrix_product_retval<TranspositionType, MatrixType, Side, Transposed> >
-{
-    typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
-    typedef typename TranspositionType::Index Index;
-
-    transposition_matrix_product_retval(const TranspositionType& tr, const MatrixType& matrix)
-      : m_transpositions(tr), m_matrix(matrix)
-    {}
-
-    inline int rows() const { return m_matrix.rows(); }
-    inline int cols() const { return m_matrix.cols(); }
-
-    template<typename Dest> inline void evalTo(Dest& dst) const
-    {
-      const int size = m_transpositions.size();
-      Index j = 0;
-
-      if(!(is_same<MatrixTypeNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_matrix)))
-        dst = m_matrix;
-
-      for(int k=(Transposed?size-1:0) ; Transposed?k>=0:k<size ; Transposed?--k:++k)
-        if((j=m_transpositions.coeff(k))!=k)
-        {
-          if(Side==OnTheLeft)
-            dst.row(k).swap(dst.row(j));
-          else if(Side==OnTheRight)
-            dst.col(k).swap(dst.col(j));
-        }
-    }
-
-  protected:
-    const TranspositionType& m_transpositions;
-    typename MatrixType::Nested m_matrix;
-};
-
-} // end namespace internal
-
-/* Template partial specialization for transposed/inverse transpositions */
-
-template<typename TranspositionsDerived>
-class Transpose<TranspositionsBase<TranspositionsDerived> >
-{
-    typedef TranspositionsDerived TranspositionType;
-    typedef typename TranspositionType::IndicesType IndicesType;
-  public:
-
-    Transpose(const TranspositionType& t) : m_transpositions(t) {}
-
-    inline int size() const { return m_transpositions.size(); }
-
-    /** \returns the \a matrix with the inverse transpositions applied to the columns.
-      */
-    template<typename Derived> friend
-    inline const internal::transposition_matrix_product_retval<TranspositionType, Derived, OnTheRight, true>
-    operator*(const MatrixBase<Derived>& matrix, const Transpose& trt)
-    {
-      return internal::transposition_matrix_product_retval<TranspositionType, Derived, OnTheRight, true>(trt.m_transpositions, matrix.derived());
-    }
-
-    /** \returns the \a matrix with the inverse transpositions applied to the rows.
-      */
-    template<typename Derived>
-    inline const internal::transposition_matrix_product_retval<TranspositionType, Derived, OnTheLeft, true>
-    operator*(const MatrixBase<Derived>& matrix) const
-    {
-      return internal::transposition_matrix_product_retval<TranspositionType, Derived, OnTheLeft, true>(m_transpositions, matrix.derived());
-    }
-
-  protected:
-    const TranspositionType& m_transpositions;
-};
-
-} // end namespace Eigen
-
-#endif // EIGEN_TRANSPOSITIONS_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/TriangularMatrix.h b/resources/3rdparty/eigen/Eigen/src/Core/TriangularMatrix.h
deleted file mode 100644
index fcd40e32f..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/TriangularMatrix.h
+++ /dev/null
@@ -1,828 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_TRIANGULARMATRIX_H
-#define EIGEN_TRIANGULARMATRIX_H
-
-namespace Eigen { 
-
-namespace internal {
-  
-template<int Side, typename TriangularType, typename Rhs> struct triangular_solve_retval;
-  
-}
-
-/** \internal
-  *
-  * \class TriangularBase
-  * \ingroup Core_Module
-  *
-  * \brief Base class for triangular part in a matrix
-  */
-template<typename Derived> class TriangularBase : public EigenBase<Derived>
-{
-  public:
-
-    enum {
-      Mode = internal::traits<Derived>::Mode,
-      CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
-      RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
-      ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
-      MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,
-      MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime
-    };
-    typedef typename internal::traits<Derived>::Scalar Scalar;
-    typedef typename internal::traits<Derived>::StorageKind StorageKind;
-    typedef typename internal::traits<Derived>::Index Index;
-    typedef typename internal::traits<Derived>::DenseMatrixType DenseMatrixType;
-    typedef DenseMatrixType DenseType;
-
-    inline TriangularBase() { eigen_assert(!((Mode&UnitDiag) && (Mode&ZeroDiag))); }
-
-    inline Index rows() const { return derived().rows(); }
-    inline Index cols() const { return derived().cols(); }
-    inline Index outerStride() const { return derived().outerStride(); }
-    inline Index innerStride() const { return derived().innerStride(); }
-
-    inline Scalar coeff(Index row, Index col) const  { return derived().coeff(row,col); }
-    inline Scalar& coeffRef(Index row, Index col) { return derived().coeffRef(row,col); }
-
-    /** \see MatrixBase::copyCoeff(row,col)
-      */
-    template<typename Other>
-    EIGEN_STRONG_INLINE void copyCoeff(Index row, Index col, Other& other)
-    {
-      derived().coeffRef(row, col) = other.coeff(row, col);
-    }
-
-    inline Scalar operator()(Index row, Index col) const
-    {
-      check_coordinates(row, col);
-      return coeff(row,col);
-    }
-    inline Scalar& operator()(Index row, Index col)
-    {
-      check_coordinates(row, col);
-      return coeffRef(row,col);
-    }
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
-    inline Derived& derived() { return *static_cast<Derived*>(this); }
-    #endif // not EIGEN_PARSED_BY_DOXYGEN
-
-    template<typename DenseDerived>
-    void evalTo(MatrixBase<DenseDerived> &other) const;
-    template<typename DenseDerived>
-    void evalToLazy(MatrixBase<DenseDerived> &other) const;
-
-    DenseMatrixType toDenseMatrix() const
-    {
-      DenseMatrixType res(rows(), cols());
-      evalToLazy(res);
-      return res;
-    }
-
-  protected:
-
-    void check_coordinates(Index row, Index col) const
-    {
-      EIGEN_ONLY_USED_FOR_DEBUG(row);
-      EIGEN_ONLY_USED_FOR_DEBUG(col);
-      eigen_assert(col>=0 && col<cols() && row>=0 && row<rows());
-      const int mode = int(Mode) & ~SelfAdjoint;
-      EIGEN_ONLY_USED_FOR_DEBUG(mode);
-      eigen_assert((mode==Upper && col>=row)
-                || (mode==Lower && col<=row)
-                || ((mode==StrictlyUpper || mode==UnitUpper) && col>row)
-                || ((mode==StrictlyLower || mode==UnitLower) && col<row));
-    }
-
-    #ifdef EIGEN_INTERNAL_DEBUGGING
-    void check_coordinates_internal(Index row, Index col) const
-    {
-      check_coordinates(row, col);
-    }
-    #else
-    void check_coordinates_internal(Index , Index ) const {}
-    #endif
-
-};
-
-/** \class TriangularView
-  * \ingroup Core_Module
-  *
-  * \brief Base class for triangular part in a matrix
-  *
-  * \param MatrixType the type of the object in which we are taking the triangular part
-  * \param Mode the kind of triangular matrix expression to construct. Can be #Upper,
-  *             #Lower, #UnitUpper, #UnitLower, #StrictlyUpper, or #StrictlyLower.
-  *             This is in fact a bit field; it must have either #Upper or #Lower, 
-  *             and additionnaly it may have #UnitDiag or #ZeroDiag or neither.
-  *
-  * This class represents a triangular part of a matrix, not necessarily square. Strictly speaking, for rectangular
-  * matrices one should speak of "trapezoid" parts. This class is the return type
-  * of MatrixBase::triangularView() and most of the time this is the only way it is used.
-  *
-  * \sa MatrixBase::triangularView()
-  */
-namespace internal {
-template<typename MatrixType, unsigned int _Mode>
-struct traits<TriangularView<MatrixType, _Mode> > : traits<MatrixType>
-{
-  typedef typename nested<MatrixType>::type MatrixTypeNested;
-  typedef typename remove_reference<MatrixTypeNested>::type MatrixTypeNestedNonRef;
-  typedef typename remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;
-  typedef MatrixType ExpressionType;
-  typedef typename MatrixType::PlainObject DenseMatrixType;
-  enum {
-    Mode = _Mode,
-    Flags = (MatrixTypeNestedCleaned::Flags & (HereditaryBits) & (~(PacketAccessBit | DirectAccessBit | LinearAccessBit))) | Mode,
-    CoeffReadCost = MatrixTypeNestedCleaned::CoeffReadCost
-  };
-};
-}
-
-template<int Mode, bool LhsIsTriangular,
-         typename Lhs, bool LhsIsVector,
-         typename Rhs, bool RhsIsVector>
-struct TriangularProduct;
-
-template<typename _MatrixType, unsigned int _Mode> class TriangularView
-  : public TriangularBase<TriangularView<_MatrixType, _Mode> >
-{
-  public:
-
-    typedef TriangularBase<TriangularView> Base;
-    typedef typename internal::traits<TriangularView>::Scalar Scalar;
-
-    typedef _MatrixType MatrixType;
-    typedef typename internal::traits<TriangularView>::DenseMatrixType DenseMatrixType;
-    typedef DenseMatrixType PlainObject;
-
-  protected:
-    typedef typename internal::traits<TriangularView>::MatrixTypeNested MatrixTypeNested;
-    typedef typename internal::traits<TriangularView>::MatrixTypeNestedNonRef MatrixTypeNestedNonRef;
-    typedef typename internal::traits<TriangularView>::MatrixTypeNestedCleaned MatrixTypeNestedCleaned;
-
-    typedef typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type MatrixConjugateReturnType;
-    
-  public:
-    using Base::evalToLazy;
-  
-
-    typedef typename internal::traits<TriangularView>::StorageKind StorageKind;
-    typedef typename internal::traits<TriangularView>::Index Index;
-
-    enum {
-      Mode = _Mode,
-      TransposeMode = (Mode & Upper ? Lower : 0)
-                    | (Mode & Lower ? Upper : 0)
-                    | (Mode & (UnitDiag))
-                    | (Mode & (ZeroDiag))
-    };
-
-    inline TriangularView(const MatrixType& matrix) : m_matrix(matrix)
-    {}
-
-    inline Index rows() const { return m_matrix.rows(); }
-    inline Index cols() const { return m_matrix.cols(); }
-    inline Index outerStride() const { return m_matrix.outerStride(); }
-    inline Index innerStride() const { return m_matrix.innerStride(); }
-
-    /** \sa MatrixBase::operator+=() */
-    template<typename Other> TriangularView&  operator+=(const DenseBase<Other>& other) { return *this = m_matrix + other.derived(); }
-    /** \sa MatrixBase::operator-=() */
-    template<typename Other> TriangularView&  operator-=(const DenseBase<Other>& other) { return *this = m_matrix - other.derived(); }
-    /** \sa MatrixBase::operator*=() */
-    TriangularView&  operator*=(const typename internal::traits<MatrixType>::Scalar& other) { return *this = m_matrix * other; }
-    /** \sa MatrixBase::operator/=() */
-    TriangularView&  operator/=(const typename internal::traits<MatrixType>::Scalar& other) { return *this = m_matrix / other; }
-
-    /** \sa MatrixBase::fill() */
-    void fill(const Scalar& value) { setConstant(value); }
-    /** \sa MatrixBase::setConstant() */
-    TriangularView& setConstant(const Scalar& value)
-    { return *this = MatrixType::Constant(rows(), cols(), value); }
-    /** \sa MatrixBase::setZero() */
-    TriangularView& setZero() { return setConstant(Scalar(0)); }
-    /** \sa MatrixBase::setOnes() */
-    TriangularView& setOnes() { return setConstant(Scalar(1)); }
-
-    /** \sa MatrixBase::coeff()
-      * \warning the coordinates must fit into the referenced triangular part
-      */
-    inline Scalar coeff(Index row, Index col) const
-    {
-      Base::check_coordinates_internal(row, col);
-      return m_matrix.coeff(row, col);
-    }
-
-    /** \sa MatrixBase::coeffRef()
-      * \warning the coordinates must fit into the referenced triangular part
-      */
-    inline Scalar& coeffRef(Index row, Index col)
-    {
-      Base::check_coordinates_internal(row, col);
-      return m_matrix.const_cast_derived().coeffRef(row, col);
-    }
-
-    const MatrixTypeNestedCleaned& nestedExpression() const { return m_matrix; }
-    MatrixTypeNestedCleaned& nestedExpression() { return *const_cast<MatrixTypeNestedCleaned*>(&m_matrix); }
-
-    /** Assigns a triangular matrix to a triangular part of a dense matrix */
-    template<typename OtherDerived>
-    TriangularView& operator=(const TriangularBase<OtherDerived>& other);
-
-    template<typename OtherDerived>
-    TriangularView& operator=(const MatrixBase<OtherDerived>& other);
-
-    TriangularView& operator=(const TriangularView& other)
-    { return *this = other.nestedExpression(); }
-
-    template<typename OtherDerived>
-    void lazyAssign(const TriangularBase<OtherDerived>& other);
-
-    template<typename OtherDerived>
-    void lazyAssign(const MatrixBase<OtherDerived>& other);
-
-    /** \sa MatrixBase::conjugate() */
-    inline TriangularView<MatrixConjugateReturnType,Mode> conjugate()
-    { return m_matrix.conjugate(); }
-    /** \sa MatrixBase::conjugate() const */
-    inline const TriangularView<MatrixConjugateReturnType,Mode> conjugate() const
-    { return m_matrix.conjugate(); }
-
-    /** \sa MatrixBase::adjoint() const */
-    inline const TriangularView<const typename MatrixType::AdjointReturnType,TransposeMode> adjoint() const
-    { return m_matrix.adjoint(); }
-
-    /** \sa MatrixBase::transpose() */
-    inline TriangularView<Transpose<MatrixType>,TransposeMode> transpose()
-    {
-      EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
-      return m_matrix.const_cast_derived().transpose();
-    }
-    /** \sa MatrixBase::transpose() const */
-    inline const TriangularView<Transpose<MatrixType>,TransposeMode> transpose() const
-    {
-      return m_matrix.transpose();
-    }
-
-    /** Efficient triangular matrix times vector/matrix product */
-    template<typename OtherDerived>
-    TriangularProduct<Mode,true,MatrixType,false,OtherDerived, OtherDerived::IsVectorAtCompileTime>
-    operator*(const MatrixBase<OtherDerived>& rhs) const
-    {
-      return TriangularProduct
-              <Mode,true,MatrixType,false,OtherDerived,OtherDerived::IsVectorAtCompileTime>
-              (m_matrix, rhs.derived());
-    }
-
-    /** Efficient vector/matrix times triangular matrix product */
-    template<typename OtherDerived> friend
-    TriangularProduct<Mode,false,OtherDerived,OtherDerived::IsVectorAtCompileTime,MatrixType,false>
-    operator*(const MatrixBase<OtherDerived>& lhs, const TriangularView& rhs)
-    {
-      return TriangularProduct
-              <Mode,false,OtherDerived,OtherDerived::IsVectorAtCompileTime,MatrixType,false>
-              (lhs.derived(),rhs.m_matrix);
-    }
-
-    #ifdef EIGEN2_SUPPORT
-    template<typename OtherDerived>
-    struct eigen2_product_return_type
-    {
-      typedef typename TriangularView<MatrixType,Mode>::DenseMatrixType DenseMatrixType;
-      typedef typename OtherDerived::PlainObject::DenseType OtherPlainObject;
-      typedef typename ProductReturnType<DenseMatrixType, OtherPlainObject>::Type ProdRetType;
-      typedef typename ProdRetType::PlainObject type;
-    };
-    template<typename OtherDerived>
-    const typename eigen2_product_return_type<OtherDerived>::type
-    operator*(const EigenBase<OtherDerived>& rhs) const
-    {
-      typename OtherDerived::PlainObject::DenseType rhsPlainObject;
-      rhs.evalTo(rhsPlainObject);
-      return this->toDenseMatrix() * rhsPlainObject;
-    }
-    template<typename OtherMatrixType>
-    bool isApprox(const TriangularView<OtherMatrixType, Mode>& other, typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision()) const
-    {
-      return this->toDenseMatrix().isApprox(other.toDenseMatrix(), precision);
-    }
-    template<typename OtherDerived>
-    bool isApprox(const MatrixBase<OtherDerived>& other, typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision()) const
-    {
-      return this->toDenseMatrix().isApprox(other, precision);
-    }
-    #endif // EIGEN2_SUPPORT
-
-    template<int Side, typename Other>
-    inline const internal::triangular_solve_retval<Side,TriangularView, Other>
-    solve(const MatrixBase<Other>& other) const;
-
-    template<int Side, typename OtherDerived>
-    void solveInPlace(const MatrixBase<OtherDerived>& other) const;
-
-    template<typename Other>
-    inline const internal::triangular_solve_retval<OnTheLeft,TriangularView, Other> 
-    solve(const MatrixBase<Other>& other) const
-    { return solve<OnTheLeft>(other); }
-
-    template<typename OtherDerived>
-    void solveInPlace(const MatrixBase<OtherDerived>& other) const
-    { return solveInPlace<OnTheLeft>(other); }
-
-    const SelfAdjointView<MatrixTypeNestedNonRef,Mode> selfadjointView() const
-    {
-      EIGEN_STATIC_ASSERT((Mode&UnitDiag)==0,PROGRAMMING_ERROR);
-      return SelfAdjointView<MatrixTypeNestedNonRef,Mode>(m_matrix);
-    }
-    SelfAdjointView<MatrixTypeNestedNonRef,Mode> selfadjointView()
-    {
-      EIGEN_STATIC_ASSERT((Mode&UnitDiag)==0,PROGRAMMING_ERROR);
-      return SelfAdjointView<MatrixTypeNestedNonRef,Mode>(m_matrix);
-    }
-
-    template<typename OtherDerived>
-    void swap(TriangularBase<OtherDerived> const & other)
-    {
-      TriangularView<SwapWrapper<MatrixType>,Mode>(const_cast<MatrixType&>(m_matrix)).lazyAssign(other.derived());
-    }
-
-    template<typename OtherDerived>
-    void swap(MatrixBase<OtherDerived> const & other)
-    {
-      SwapWrapper<MatrixType> swaper(const_cast<MatrixType&>(m_matrix));
-      TriangularView<SwapWrapper<MatrixType>,Mode>(swaper).lazyAssign(other.derived());
-    }
-
-    Scalar determinant() const
-    {
-      if (Mode & UnitDiag)
-        return 1;
-      else if (Mode & ZeroDiag)
-        return 0;
-      else
-        return m_matrix.diagonal().prod();
-    }
-    
-    // TODO simplify the following:
-    template<typename ProductDerived, typename Lhs, typename Rhs>
-    EIGEN_STRONG_INLINE TriangularView& operator=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
-    {
-      setZero();
-      return assignProduct(other,1);
-    }
-    
-    template<typename ProductDerived, typename Lhs, typename Rhs>
-    EIGEN_STRONG_INLINE TriangularView& operator+=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
-    {
-      return assignProduct(other,1);
-    }
-    
-    template<typename ProductDerived, typename Lhs, typename Rhs>
-    EIGEN_STRONG_INLINE TriangularView& operator-=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
-    {
-      return assignProduct(other,-1);
-    }
-    
-    
-    template<typename ProductDerived>
-    EIGEN_STRONG_INLINE TriangularView& operator=(const ScaledProduct<ProductDerived>& other)
-    {
-      setZero();
-      return assignProduct(other,other.alpha());
-    }
-    
-    template<typename ProductDerived>
-    EIGEN_STRONG_INLINE TriangularView& operator+=(const ScaledProduct<ProductDerived>& other)
-    {
-      return assignProduct(other,other.alpha());
-    }
-    
-    template<typename ProductDerived>
-    EIGEN_STRONG_INLINE TriangularView& operator-=(const ScaledProduct<ProductDerived>& other)
-    {
-      return assignProduct(other,-other.alpha());
-    }
-    
-  protected:
-    
-    template<typename ProductDerived, typename Lhs, typename Rhs>
-    EIGEN_STRONG_INLINE TriangularView& assignProduct(const ProductBase<ProductDerived, Lhs,Rhs>& prod, const Scalar& alpha);
-
-    MatrixTypeNested m_matrix;
-};
-
-/***************************************************************************
-* Implementation of triangular evaluation/assignment
-***************************************************************************/
-
-namespace internal {
-
-template<typename Derived1, typename Derived2, unsigned int Mode, int UnrollCount, bool ClearOpposite>
-struct triangular_assignment_selector
-{
-  enum {
-    col = (UnrollCount-1) / Derived1::RowsAtCompileTime,
-    row = (UnrollCount-1) % Derived1::RowsAtCompileTime
-  };
-  
-  typedef typename Derived1::Scalar Scalar;
-
-  static inline void run(Derived1 &dst, const Derived2 &src)
-  {
-    triangular_assignment_selector<Derived1, Derived2, Mode, UnrollCount-1, ClearOpposite>::run(dst, src);
-
-    eigen_assert( Mode == Upper || Mode == Lower
-            || Mode == StrictlyUpper || Mode == StrictlyLower
-            || Mode == UnitUpper || Mode == UnitLower);
-    if((Mode == Upper && row <= col)
-    || (Mode == Lower && row >= col)
-    || (Mode == StrictlyUpper && row < col)
-    || (Mode == StrictlyLower && row > col)
-    || (Mode == UnitUpper && row < col)
-    || (Mode == UnitLower && row > col))
-      dst.copyCoeff(row, col, src);
-    else if(ClearOpposite)
-    {
-      if (Mode&UnitDiag && row==col)
-        dst.coeffRef(row, col) = Scalar(1);
-      else
-        dst.coeffRef(row, col) = Scalar(0);
-    }
-  }
-};
-
-// prevent buggy user code from causing an infinite recursion
-template<typename Derived1, typename Derived2, unsigned int Mode, bool ClearOpposite>
-struct triangular_assignment_selector<Derived1, Derived2, Mode, 0, ClearOpposite>
-{
-  static inline void run(Derived1 &, const Derived2 &) {}
-};
-
-template<typename Derived1, typename Derived2, bool ClearOpposite>
-struct triangular_assignment_selector<Derived1, Derived2, Upper, Dynamic, ClearOpposite>
-{
-  typedef typename Derived1::Index Index;
-  typedef typename Derived1::Scalar Scalar;
-  static inline void run(Derived1 &dst, const Derived2 &src)
-  {
-    for(Index j = 0; j < dst.cols(); ++j)
-    {
-      Index maxi = (std::min)(j, dst.rows()-1);
-      for(Index i = 0; i <= maxi; ++i)
-        dst.copyCoeff(i, j, src);
-      if (ClearOpposite)
-        for(Index i = maxi+1; i < dst.rows(); ++i)
-          dst.coeffRef(i, j) = Scalar(0);
-    }
-  }
-};
-
-template<typename Derived1, typename Derived2, bool ClearOpposite>
-struct triangular_assignment_selector<Derived1, Derived2, Lower, Dynamic, ClearOpposite>
-{
-  typedef typename Derived1::Index Index;
-  static inline void run(Derived1 &dst, const Derived2 &src)
-  {
-    for(Index j = 0; j < dst.cols(); ++j)
-    {
-      for(Index i = j; i < dst.rows(); ++i)
-        dst.copyCoeff(i, j, src);
-      Index maxi = (std::min)(j, dst.rows());
-      if (ClearOpposite)
-        for(Index i = 0; i < maxi; ++i)
-          dst.coeffRef(i, j) = static_cast<typename Derived1::Scalar>(0);
-    }
-  }
-};
-
-template<typename Derived1, typename Derived2, bool ClearOpposite>
-struct triangular_assignment_selector<Derived1, Derived2, StrictlyUpper, Dynamic, ClearOpposite>
-{
-  typedef typename Derived1::Index Index;
-  typedef typename Derived1::Scalar Scalar;
-  static inline void run(Derived1 &dst, const Derived2 &src)
-  {
-    for(Index j = 0; j < dst.cols(); ++j)
-    {
-      Index maxi = (std::min)(j, dst.rows());
-      for(Index i = 0; i < maxi; ++i)
-        dst.copyCoeff(i, j, src);
-      if (ClearOpposite)
-        for(Index i = maxi; i < dst.rows(); ++i)
-          dst.coeffRef(i, j) = Scalar(0);
-    }
-  }
-};
-
-template<typename Derived1, typename Derived2, bool ClearOpposite>
-struct triangular_assignment_selector<Derived1, Derived2, StrictlyLower, Dynamic, ClearOpposite>
-{
-  typedef typename Derived1::Index Index;
-  static inline void run(Derived1 &dst, const Derived2 &src)
-  {
-    for(Index j = 0; j < dst.cols(); ++j)
-    {
-      for(Index i = j+1; i < dst.rows(); ++i)
-        dst.copyCoeff(i, j, src);
-      Index maxi = (std::min)(j, dst.rows()-1);
-      if (ClearOpposite)
-        for(Index i = 0; i <= maxi; ++i)
-          dst.coeffRef(i, j) = static_cast<typename Derived1::Scalar>(0);
-    }
-  }
-};
-
-template<typename Derived1, typename Derived2, bool ClearOpposite>
-struct triangular_assignment_selector<Derived1, Derived2, UnitUpper, Dynamic, ClearOpposite>
-{
-  typedef typename Derived1::Index Index;
-  static inline void run(Derived1 &dst, const Derived2 &src)
-  {
-    for(Index j = 0; j < dst.cols(); ++j)
-    {
-      Index maxi = (std::min)(j, dst.rows());
-      for(Index i = 0; i < maxi; ++i)
-        dst.copyCoeff(i, j, src);
-      if (ClearOpposite)
-      {
-        for(Index i = maxi+1; i < dst.rows(); ++i)
-          dst.coeffRef(i, j) = 0;
-      }
-    }
-    dst.diagonal().setOnes();
-  }
-};
-template<typename Derived1, typename Derived2, bool ClearOpposite>
-struct triangular_assignment_selector<Derived1, Derived2, UnitLower, Dynamic, ClearOpposite>
-{
-  typedef typename Derived1::Index Index;
-  static inline void run(Derived1 &dst, const Derived2 &src)
-  {
-    for(Index j = 0; j < dst.cols(); ++j)
-    {
-      Index maxi = (std::min)(j, dst.rows());
-      for(Index i = maxi+1; i < dst.rows(); ++i)
-        dst.copyCoeff(i, j, src);
-      if (ClearOpposite)
-      {
-        for(Index i = 0; i < maxi; ++i)
-          dst.coeffRef(i, j) = 0;
-      }
-    }
-    dst.diagonal().setOnes();
-  }
-};
-
-} // end namespace internal
-
-// FIXME should we keep that possibility
-template<typename MatrixType, unsigned int Mode>
-template<typename OtherDerived>
-inline TriangularView<MatrixType, Mode>&
-TriangularView<MatrixType, Mode>::operator=(const MatrixBase<OtherDerived>& other)
-{
-  if(OtherDerived::Flags & EvalBeforeAssigningBit)
-  {
-    typename internal::plain_matrix_type<OtherDerived>::type other_evaluated(other.rows(), other.cols());
-    other_evaluated.template triangularView<Mode>().lazyAssign(other.derived());
-    lazyAssign(other_evaluated);
-  }
-  else
-    lazyAssign(other.derived());
-  return *this;
-}
-
-// FIXME should we keep that possibility
-template<typename MatrixType, unsigned int Mode>
-template<typename OtherDerived>
-void TriangularView<MatrixType, Mode>::lazyAssign(const MatrixBase<OtherDerived>& other)
-{
-  enum {
-    unroll = MatrixType::SizeAtCompileTime != Dynamic
-          && internal::traits<OtherDerived>::CoeffReadCost != Dynamic
-          && MatrixType::SizeAtCompileTime*internal::traits<OtherDerived>::CoeffReadCost/2 <= EIGEN_UNROLLING_LIMIT
-  };
-  eigen_assert(m_matrix.rows() == other.rows() && m_matrix.cols() == other.cols());
-
-  internal::triangular_assignment_selector
-    <MatrixType, OtherDerived, int(Mode),
-    unroll ? int(MatrixType::SizeAtCompileTime) : Dynamic,
-    false // do not change the opposite triangular part
-    >::run(m_matrix.const_cast_derived(), other.derived());
-}
-
-
-
-template<typename MatrixType, unsigned int Mode>
-template<typename OtherDerived>
-inline TriangularView<MatrixType, Mode>&
-TriangularView<MatrixType, Mode>::operator=(const TriangularBase<OtherDerived>& other)
-{
-  eigen_assert(Mode == int(OtherDerived::Mode));
-  if(internal::traits<OtherDerived>::Flags & EvalBeforeAssigningBit)
-  {
-    typename OtherDerived::DenseMatrixType other_evaluated(other.rows(), other.cols());
-    other_evaluated.template triangularView<Mode>().lazyAssign(other.derived().nestedExpression());
-    lazyAssign(other_evaluated);
-  }
-  else
-    lazyAssign(other.derived().nestedExpression());
-  return *this;
-}
-
-template<typename MatrixType, unsigned int Mode>
-template<typename OtherDerived>
-void TriangularView<MatrixType, Mode>::lazyAssign(const TriangularBase<OtherDerived>& other)
-{
-  enum {
-    unroll = MatrixType::SizeAtCompileTime != Dynamic
-                   && internal::traits<OtherDerived>::CoeffReadCost != Dynamic
-                   && MatrixType::SizeAtCompileTime * internal::traits<OtherDerived>::CoeffReadCost / 2
-                        <= EIGEN_UNROLLING_LIMIT
-  };
-  eigen_assert(m_matrix.rows() == other.rows() && m_matrix.cols() == other.cols());
-
-  internal::triangular_assignment_selector
-    <MatrixType, OtherDerived, int(Mode),
-    unroll ? int(MatrixType::SizeAtCompileTime) : Dynamic,
-    false // preserve the opposite triangular part
-    >::run(m_matrix.const_cast_derived(), other.derived().nestedExpression());
-}
-
-/***************************************************************************
-* Implementation of TriangularBase methods
-***************************************************************************/
-
-/** Assigns a triangular or selfadjoint matrix to a dense matrix.
-  * If the matrix is triangular, the opposite part is set to zero. */
-template<typename Derived>
-template<typename DenseDerived>
-void TriangularBase<Derived>::evalTo(MatrixBase<DenseDerived> &other) const
-{
-  if(internal::traits<Derived>::Flags & EvalBeforeAssigningBit)
-  {
-    typename internal::plain_matrix_type<Derived>::type other_evaluated(rows(), cols());
-    evalToLazy(other_evaluated);
-    other.derived().swap(other_evaluated);
-  }
-  else
-    evalToLazy(other.derived());
-}
-
-/** Assigns a triangular or selfadjoint matrix to a dense matrix.
-  * If the matrix is triangular, the opposite part is set to zero. */
-template<typename Derived>
-template<typename DenseDerived>
-void TriangularBase<Derived>::evalToLazy(MatrixBase<DenseDerived> &other) const
-{
-  enum {
-    unroll = DenseDerived::SizeAtCompileTime != Dynamic
-                   && internal::traits<Derived>::CoeffReadCost != Dynamic
-                   && DenseDerived::SizeAtCompileTime * internal::traits<Derived>::CoeffReadCost / 2
-                        <= EIGEN_UNROLLING_LIMIT
-  };
-  other.derived().resize(this->rows(), this->cols());
-
-  internal::triangular_assignment_selector
-    <DenseDerived, typename internal::traits<Derived>::MatrixTypeNestedCleaned, Derived::Mode,
-    unroll ? int(DenseDerived::SizeAtCompileTime) : Dynamic,
-    true // clear the opposite triangular part
-    >::run(other.derived(), derived().nestedExpression());
-}
-
-/***************************************************************************
-* Implementation of TriangularView methods
-***************************************************************************/
-
-/***************************************************************************
-* Implementation of MatrixBase methods
-***************************************************************************/
-
-#ifdef EIGEN2_SUPPORT
-
-// implementation of part<>(), including the SelfAdjoint case.
-
-namespace internal {
-template<typename MatrixType, unsigned int Mode>
-struct eigen2_part_return_type
-{
-  typedef TriangularView<MatrixType, Mode> type;
-};
-
-template<typename MatrixType>
-struct eigen2_part_return_type<MatrixType, SelfAdjoint>
-{
-  typedef SelfAdjointView<MatrixType, Upper> type;
-};
-}
-
-/** \deprecated use MatrixBase::triangularView() */
-template<typename Derived>
-template<unsigned int Mode>
-const typename internal::eigen2_part_return_type<Derived, Mode>::type MatrixBase<Derived>::part() const
-{
-  return derived();
-}
-
-/** \deprecated use MatrixBase::triangularView() */
-template<typename Derived>
-template<unsigned int Mode>
-typename internal::eigen2_part_return_type<Derived, Mode>::type MatrixBase<Derived>::part()
-{
-  return derived();
-}
-#endif
-
-/**
-  * \returns an expression of a triangular view extracted from the current matrix
-  *
-  * The parameter \a Mode can have the following values: \c #Upper, \c #StrictlyUpper, \c #UnitUpper,
-  * \c #Lower, \c #StrictlyLower, \c #UnitLower.
-  *
-  * Example: \include MatrixBase_extract.cpp
-  * Output: \verbinclude MatrixBase_extract.out
-  *
-  * \sa class TriangularView
-  */
-template<typename Derived>
-template<unsigned int Mode>
-typename MatrixBase<Derived>::template TriangularViewReturnType<Mode>::Type
-MatrixBase<Derived>::triangularView()
-{
-  return derived();
-}
-
-/** This is the const version of MatrixBase::triangularView() */
-template<typename Derived>
-template<unsigned int Mode>
-typename MatrixBase<Derived>::template ConstTriangularViewReturnType<Mode>::Type
-MatrixBase<Derived>::triangularView() const
-{
-  return derived();
-}
-
-/** \returns true if *this is approximately equal to an upper triangular matrix,
-  *          within the precision given by \a prec.
-  *
-  * \sa isLowerTriangular()
-  */
-template<typename Derived>
-bool MatrixBase<Derived>::isUpperTriangular(const RealScalar& prec) const
-{
-  RealScalar maxAbsOnUpperPart = static_cast<RealScalar>(-1);
-  for(Index j = 0; j < cols(); ++j)
-  {
-    Index maxi = (std::min)(j, rows()-1);
-    for(Index i = 0; i <= maxi; ++i)
-    {
-      RealScalar absValue = internal::abs(coeff(i,j));
-      if(absValue > maxAbsOnUpperPart) maxAbsOnUpperPart = absValue;
-    }
-  }
-  RealScalar threshold = maxAbsOnUpperPart * prec;
-  for(Index j = 0; j < cols(); ++j)
-    for(Index i = j+1; i < rows(); ++i)
-      if(internal::abs(coeff(i, j)) > threshold) return false;
-  return true;
-}
-
-/** \returns true if *this is approximately equal to a lower triangular matrix,
-  *          within the precision given by \a prec.
-  *
-  * \sa isUpperTriangular()
-  */
-template<typename Derived>
-bool MatrixBase<Derived>::isLowerTriangular(const RealScalar& prec) const
-{
-  RealScalar maxAbsOnLowerPart = static_cast<RealScalar>(-1);
-  for(Index j = 0; j < cols(); ++j)
-    for(Index i = j; i < rows(); ++i)
-    {
-      RealScalar absValue = internal::abs(coeff(i,j));
-      if(absValue > maxAbsOnLowerPart) maxAbsOnLowerPart = absValue;
-    }
-  RealScalar threshold = maxAbsOnLowerPart * prec;
-  for(Index j = 1; j < cols(); ++j)
-  {
-    Index maxi = (std::min)(j, rows()-1);
-    for(Index i = 0; i < maxi; ++i)
-      if(internal::abs(coeff(i, j)) > threshold) return false;
-  }
-  return true;
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_TRIANGULARMATRIX_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/VectorBlock.h b/resources/3rdparty/eigen/Eigen/src/Core/VectorBlock.h
deleted file mode 100644
index d0526dc95..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/VectorBlock.h
+++ /dev/null
@@ -1,284 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_VECTORBLOCK_H
-#define EIGEN_VECTORBLOCK_H
-
-namespace Eigen { 
-
-/** \class VectorBlock
-  * \ingroup Core_Module
-  *
-  * \brief Expression of a fixed-size or dynamic-size sub-vector
-  *
-  * \param VectorType the type of the object in which we are taking a sub-vector
-  * \param Size size of the sub-vector we are taking at compile time (optional)
-  *
-  * This class represents an expression of either a fixed-size or dynamic-size sub-vector.
-  * It is the return type of DenseBase::segment(Index,Index) and DenseBase::segment<int>(Index) and
-  * most of the time this is the only way it is used.
-  *
-  * However, if you want to directly maniputate sub-vector expressions,
-  * for instance if you want to write a function returning such an expression, you
-  * will need to use this class.
-  *
-  * Here is an example illustrating the dynamic case:
-  * \include class_VectorBlock.cpp
-  * Output: \verbinclude class_VectorBlock.out
-  *
-  * \note Even though this expression has dynamic size, in the case where \a VectorType
-  * has fixed size, this expression inherits a fixed maximal size which means that evaluating
-  * it does not cause a dynamic memory allocation.
-  *
-  * Here is an example illustrating the fixed-size case:
-  * \include class_FixedVectorBlock.cpp
-  * Output: \verbinclude class_FixedVectorBlock.out
-  *
-  * \sa class Block, DenseBase::segment(Index,Index,Index,Index), DenseBase::segment(Index,Index)
-  */
-
-namespace internal {
-template<typename VectorType, int Size>
-struct traits<VectorBlock<VectorType, Size> >
-  : public traits<Block<VectorType,
-                     traits<VectorType>::Flags & RowMajorBit ? 1 : Size,
-                     traits<VectorType>::Flags & RowMajorBit ? Size : 1> >
-{
-};
-}
-
-template<typename VectorType, int Size> class VectorBlock
-  : public Block<VectorType,
-                     internal::traits<VectorType>::Flags & RowMajorBit ? 1 : Size,
-                     internal::traits<VectorType>::Flags & RowMajorBit ? Size : 1>
-{
-    typedef Block<VectorType,
-                     internal::traits<VectorType>::Flags & RowMajorBit ? 1 : Size,
-                     internal::traits<VectorType>::Flags & RowMajorBit ? Size : 1> Base;
-    enum {
-      IsColVector = !(internal::traits<VectorType>::Flags & RowMajorBit)
-    };
-  public:
-    EIGEN_DENSE_PUBLIC_INTERFACE(VectorBlock)
-
-    using Base::operator=;
-
-    /** Dynamic-size constructor
-      */
-    inline VectorBlock(VectorType& vector, Index start, Index size)
-      : Base(vector,
-             IsColVector ? start : 0, IsColVector ? 0 : start,
-             IsColVector ? size  : 1, IsColVector ? 1 : size)
-    {
-      EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock);
-    }
-
-    /** Fixed-size constructor
-      */
-    inline VectorBlock(VectorType& vector, Index start)
-      : Base(vector, IsColVector ? start : 0, IsColVector ? 0 : start)
-    {
-      EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock);
-    }
-};
-
-
-/** \returns a dynamic-size expression of a segment (i.e. a vector block) in *this.
-  *
-  * \only_for_vectors
-  *
-  * \param start the first coefficient in the segment
-  * \param size the number of coefficients in the segment
-  *
-  * Example: \include MatrixBase_segment_int_int.cpp
-  * Output: \verbinclude MatrixBase_segment_int_int.out
-  *
-  * \note Even though the returned expression has dynamic size, in the case
-  * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
-  * which means that evaluating it does not cause a dynamic memory allocation.
-  *
-  * \sa class Block, segment(Index)
-  */
-template<typename Derived>
-inline typename DenseBase<Derived>::SegmentReturnType
-DenseBase<Derived>::segment(Index start, Index vecSize)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return SegmentReturnType(derived(), start, vecSize);
-}
-
-/** This is the const version of segment(Index,Index).*/
-template<typename Derived>
-inline typename DenseBase<Derived>::ConstSegmentReturnType
-DenseBase<Derived>::segment(Index start, Index vecSize) const
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return ConstSegmentReturnType(derived(), start, vecSize);
-}
-
-/** \returns a dynamic-size expression of the first coefficients of *this.
-  *
-  * \only_for_vectors
-  *
-  * \param size the number of coefficients in the block
-  *
-  * Example: \include MatrixBase_start_int.cpp
-  * Output: \verbinclude MatrixBase_start_int.out
-  *
-  * \note Even though the returned expression has dynamic size, in the case
-  * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
-  * which means that evaluating it does not cause a dynamic memory allocation.
-  *
-  * \sa class Block, block(Index,Index)
-  */
-template<typename Derived>
-inline typename DenseBase<Derived>::SegmentReturnType
-DenseBase<Derived>::head(Index vecsize)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return SegmentReturnType(derived(), 0, vecsize);
-}
-
-/** This is the const version of head(Index).*/
-template<typename Derived>
-inline typename DenseBase<Derived>::ConstSegmentReturnType
-DenseBase<Derived>::head(Index vecSize) const
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return ConstSegmentReturnType(derived(), 0, vecSize);
-}
-
-/** \returns a dynamic-size expression of the last coefficients of *this.
-  *
-  * \only_for_vectors
-  *
-  * \param size the number of coefficients in the block
-  *
-  * Example: \include MatrixBase_end_int.cpp
-  * Output: \verbinclude MatrixBase_end_int.out
-  *
-  * \note Even though the returned expression has dynamic size, in the case
-  * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
-  * which means that evaluating it does not cause a dynamic memory allocation.
-  *
-  * \sa class Block, block(Index,Index)
-  */
-template<typename Derived>
-inline typename DenseBase<Derived>::SegmentReturnType
-DenseBase<Derived>::tail(Index vecSize)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return SegmentReturnType(derived(), this->size() - vecSize, vecSize);
-}
-
-/** This is the const version of tail(Index).*/
-template<typename Derived>
-inline typename DenseBase<Derived>::ConstSegmentReturnType
-DenseBase<Derived>::tail(Index vecSize) const
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return ConstSegmentReturnType(derived(), this->size() - vecSize, vecSize);
-}
-
-/** \returns a fixed-size expression of a segment (i.e. a vector block) in \c *this
-  *
-  * \only_for_vectors
-  *
-  * The template parameter \a Size is the number of coefficients in the block
-  *
-  * \param start the index of the first element of the sub-vector
-  *
-  * Example: \include MatrixBase_template_int_segment.cpp
-  * Output: \verbinclude MatrixBase_template_int_segment.out
-  *
-  * \sa class Block
-  */
-template<typename Derived>
-template<int Size>
-inline typename DenseBase<Derived>::template FixedSegmentReturnType<Size>::Type
-DenseBase<Derived>::segment(Index start)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return typename FixedSegmentReturnType<Size>::Type(derived(), start);
-}
-
-/** This is the const version of segment<int>(Index).*/
-template<typename Derived>
-template<int Size>
-inline typename DenseBase<Derived>::template ConstFixedSegmentReturnType<Size>::Type
-DenseBase<Derived>::segment(Index start) const
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return typename ConstFixedSegmentReturnType<Size>::Type(derived(), start);
-}
-
-/** \returns a fixed-size expression of the first coefficients of *this.
-  *
-  * \only_for_vectors
-  *
-  * The template parameter \a Size is the number of coefficients in the block
-  *
-  * Example: \include MatrixBase_template_int_start.cpp
-  * Output: \verbinclude MatrixBase_template_int_start.out
-  *
-  * \sa class Block
-  */
-template<typename Derived>
-template<int Size>
-inline typename DenseBase<Derived>::template FixedSegmentReturnType<Size>::Type
-DenseBase<Derived>::head()
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return typename FixedSegmentReturnType<Size>::Type(derived(), 0);
-}
-
-/** This is the const version of head<int>().*/
-template<typename Derived>
-template<int Size>
-inline typename DenseBase<Derived>::template ConstFixedSegmentReturnType<Size>::Type
-DenseBase<Derived>::head() const
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return typename ConstFixedSegmentReturnType<Size>::Type(derived(), 0);
-}
-
-/** \returns a fixed-size expression of the last coefficients of *this.
-  *
-  * \only_for_vectors
-  *
-  * The template parameter \a Size is the number of coefficients in the block
-  *
-  * Example: \include MatrixBase_template_int_end.cpp
-  * Output: \verbinclude MatrixBase_template_int_end.out
-  *
-  * \sa class Block
-  */
-template<typename Derived>
-template<int Size>
-inline typename DenseBase<Derived>::template FixedSegmentReturnType<Size>::Type
-DenseBase<Derived>::tail()
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return typename FixedSegmentReturnType<Size>::Type(derived(), size() - Size);
-}
-
-/** This is the const version of tail<int>.*/
-template<typename Derived>
-template<int Size>
-inline typename DenseBase<Derived>::template ConstFixedSegmentReturnType<Size>::Type
-DenseBase<Derived>::tail() const
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return typename ConstFixedSegmentReturnType<Size>::Type(derived(), size() - Size);
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_VECTORBLOCK_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Visitor.h b/resources/3rdparty/eigen/Eigen/src/Core/Visitor.h
deleted file mode 100644
index abf8d8e8c..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/Visitor.h
+++ /dev/null
@@ -1,237 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_VISITOR_H
-#define EIGEN_VISITOR_H
-
-namespace Eigen { 
-
-namespace internal {
-
-template<typename Visitor, typename Derived, int UnrollCount>
-struct visitor_impl
-{
-  enum {
-    col = (UnrollCount-1) / Derived::RowsAtCompileTime,
-    row = (UnrollCount-1) % Derived::RowsAtCompileTime
-  };
-
-  static inline void run(const Derived &mat, Visitor& visitor)
-  {
-    visitor_impl<Visitor, Derived, UnrollCount-1>::run(mat, visitor);
-    visitor(mat.coeff(row, col), row, col);
-  }
-};
-
-template<typename Visitor, typename Derived>
-struct visitor_impl<Visitor, Derived, 1>
-{
-  static inline void run(const Derived &mat, Visitor& visitor)
-  {
-    return visitor.init(mat.coeff(0, 0), 0, 0);
-  }
-};
-
-template<typename Visitor, typename Derived>
-struct visitor_impl<Visitor, Derived, Dynamic>
-{
-  typedef typename Derived::Index Index;
-  static inline void run(const Derived& mat, Visitor& visitor)
-  {
-    visitor.init(mat.coeff(0,0), 0, 0);
-    for(Index i = 1; i < mat.rows(); ++i)
-      visitor(mat.coeff(i, 0), i, 0);
-    for(Index j = 1; j < mat.cols(); ++j)
-      for(Index i = 0; i < mat.rows(); ++i)
-        visitor(mat.coeff(i, j), i, j);
-  }
-};
-
-} // end namespace internal
-
-/** Applies the visitor \a visitor to the whole coefficients of the matrix or vector.
-  *
-  * The template parameter \a Visitor is the type of the visitor and provides the following interface:
-  * \code
-  * struct MyVisitor {
-  *   // called for the first coefficient
-  *   void init(const Scalar& value, Index i, Index j);
-  *   // called for all other coefficients
-  *   void operator() (const Scalar& value, Index i, Index j);
-  * };
-  * \endcode
-  *
-  * \note compared to one or two \em for \em loops, visitors offer automatic
-  * unrolling for small fixed size matrix.
-  *
-  * \sa minCoeff(Index*,Index*), maxCoeff(Index*,Index*), DenseBase::redux()
-  */
-template<typename Derived>
-template<typename Visitor>
-void DenseBase<Derived>::visit(Visitor& visitor) const
-{
-  enum { unroll = SizeAtCompileTime != Dynamic
-                   && CoeffReadCost != Dynamic
-                   && (SizeAtCompileTime == 1 || internal::functor_traits<Visitor>::Cost != Dynamic)
-                   && SizeAtCompileTime * CoeffReadCost + (SizeAtCompileTime-1) * internal::functor_traits<Visitor>::Cost
-                      <= EIGEN_UNROLLING_LIMIT };
-  return internal::visitor_impl<Visitor, Derived,
-      unroll ? int(SizeAtCompileTime) : Dynamic
-    >::run(derived(), visitor);
-}
-
-namespace internal {
-
-/** \internal
-  * \brief Base class to implement min and max visitors
-  */
-template <typename Derived>
-struct coeff_visitor
-{
-  typedef typename Derived::Index Index;
-  typedef typename Derived::Scalar Scalar;
-  Index row, col;
-  Scalar res;
-  inline void init(const Scalar& value, Index i, Index j)
-  {
-    res = value;
-    row = i;
-    col = j;
-  }
-};
-
-/** \internal
-  * \brief Visitor computing the min coefficient with its value and coordinates
-  *
-  * \sa DenseBase::minCoeff(Index*, Index*)
-  */
-template <typename Derived>
-struct min_coeff_visitor : coeff_visitor<Derived>
-{
-  typedef typename Derived::Index Index;
-  typedef typename Derived::Scalar Scalar;
-  void operator() (const Scalar& value, Index i, Index j)
-  {
-    if(value < this->res)
-    {
-      this->res = value;
-      this->row = i;
-      this->col = j;
-    }
-  }
-};
-
-template<typename Scalar>
-struct functor_traits<min_coeff_visitor<Scalar> > {
-  enum {
-    Cost = NumTraits<Scalar>::AddCost
-  };
-};
-
-/** \internal
-  * \brief Visitor computing the max coefficient with its value and coordinates
-  *
-  * \sa DenseBase::maxCoeff(Index*, Index*)
-  */
-template <typename Derived>
-struct max_coeff_visitor : coeff_visitor<Derived>
-{
-  typedef typename Derived::Index Index;
-  typedef typename Derived::Scalar Scalar;
-  void operator() (const Scalar& value, Index i, Index j)
-  {
-    if(value > this->res)
-    {
-      this->res = value;
-      this->row = i;
-      this->col = j;
-    }
-  }
-};
-
-template<typename Scalar>
-struct functor_traits<max_coeff_visitor<Scalar> > {
-  enum {
-    Cost = NumTraits<Scalar>::AddCost
-  };
-};
-
-} // end namespace internal
-
-/** \returns the minimum of all coefficients of *this
-  * and puts in *row and *col its location.
-  *
-  * \sa DenseBase::minCoeff(Index*), DenseBase::maxCoeff(Index*,Index*), DenseBase::visitor(), DenseBase::minCoeff()
-  */
-template<typename Derived>
-template<typename IndexType>
-typename internal::traits<Derived>::Scalar
-DenseBase<Derived>::minCoeff(IndexType* rowId, IndexType* colId) const
-{
-  internal::min_coeff_visitor<Derived> minVisitor;
-  this->visit(minVisitor);
-  *rowId = minVisitor.row;
-  if (colId) *colId = minVisitor.col;
-  return minVisitor.res;
-}
-
-/** \returns the minimum of all coefficients of *this
-  * and puts in *index its location.
-  *
-  * \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::minCoeff()
-  */
-template<typename Derived>
-template<typename IndexType>
-typename internal::traits<Derived>::Scalar
-DenseBase<Derived>::minCoeff(IndexType* index) const
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  internal::min_coeff_visitor<Derived> minVisitor;
-  this->visit(minVisitor);
-  *index = (RowsAtCompileTime==1) ? minVisitor.col : minVisitor.row;
-  return minVisitor.res;
-}
-
-/** \returns the maximum of all coefficients of *this
-  * and puts in *row and *col its location.
-  *
-  * \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::maxCoeff()
-  */
-template<typename Derived>
-template<typename IndexType>
-typename internal::traits<Derived>::Scalar
-DenseBase<Derived>::maxCoeff(IndexType* rowPtr, IndexType* colPtr) const
-{
-  internal::max_coeff_visitor<Derived> maxVisitor;
-  this->visit(maxVisitor);
-  *rowPtr = maxVisitor.row;
-  if (colPtr) *colPtr = maxVisitor.col;
-  return maxVisitor.res;
-}
-
-/** \returns the maximum of all coefficients of *this
-  * and puts in *index its location.
-  *
-  * \sa DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::maxCoeff()
-  */
-template<typename Derived>
-template<typename IndexType>
-typename internal::traits<Derived>::Scalar
-DenseBase<Derived>::maxCoeff(IndexType* index) const
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  internal::max_coeff_visitor<Derived> maxVisitor;
-  this->visit(maxVisitor);
-  *index = (RowsAtCompileTime==1) ? maxVisitor.col : maxVisitor.row;
-  return maxVisitor.res;
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_VISITOR_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/arch/NEON/PacketMath.h b/resources/3rdparty/eigen/Eigen/src/Core/arch/NEON/PacketMath.h
deleted file mode 100644
index 2662e2ebf..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/arch/NEON/PacketMath.h
+++ /dev/null
@@ -1,407 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2010 Konstantinos Margaritis <markos@codex.gr>
-// Heavily based on Gael's SSE version.
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_PACKET_MATH_NEON_H
-#define EIGEN_PACKET_MATH_NEON_H
-
-namespace Eigen {
-
-namespace internal {
-
-#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
-#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
-#endif
-
-// FIXME NEON has 16 quad registers, but since the current register allocator
-// is so bad, it is much better to reduce it to 8
-#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
-#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 8
-#endif
-
-typedef float32x4_t Packet4f;
-typedef int32x4_t   Packet4i;
-typedef uint32x4_t  Packet4ui;
-
-#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
-  const Packet4f p4f_##NAME = pset1<Packet4f>(X)
-
-#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
-  const Packet4f p4f_##NAME = vreinterpretq_f32_u32(pset1<int>(X))
-
-#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
-  const Packet4i p4i_##NAME = pset1<Packet4i>(X)
-
-#if defined(__llvm__) && !defined(__clang__)
-  //Special treatment for Apple's llvm-gcc, its NEON packet types are unions
-  #define EIGEN_INIT_NEON_PACKET2(X, Y)       {{X, Y}}
-  #define EIGEN_INIT_NEON_PACKET4(X, Y, Z, W) {{X, Y, Z, W}}
-#else
-  //Default initializer for packets
-  #define EIGEN_INIT_NEON_PACKET2(X, Y)       {X, Y}
-  #define EIGEN_INIT_NEON_PACKET4(X, Y, Z, W) {X, Y, Z, W}
-#endif
-    
-#ifndef __pld
-#define __pld(x) asm volatile ( "   pld [%[addr]]\n" :: [addr] "r" (x) : "cc" );
-#endif
-
-template<> struct packet_traits<float>  : default_packet_traits
-{
-  typedef Packet4f type;
-  enum {
-    Vectorizable = 1,
-    AlignedOnScalar = 1,
-    size = 4,
-   
-    HasDiv  = 1,
-    // FIXME check the Has*
-    HasSin  = 0,
-    HasCos  = 0,
-    HasLog  = 0,
-    HasExp  = 0,
-    HasSqrt = 0
-  };
-};
-template<> struct packet_traits<int>    : default_packet_traits
-{
-  typedef Packet4i type;
-  enum {
-    Vectorizable = 1,
-    AlignedOnScalar = 1,
-    size=4
-    // FIXME check the Has*
-  };
-};
-
-#if EIGEN_GNUC_AT_MOST(4,4) && !defined(__llvm__)
-// workaround gcc 4.2, 4.3 and 4.4 compilatin issue
-EIGEN_STRONG_INLINE float32x4_t vld1q_f32(const float* x) { return ::vld1q_f32((const float32_t*)x); }
-EIGEN_STRONG_INLINE float32x2_t vld1_f32 (const float* x) { return ::vld1_f32 ((const float32_t*)x); }
-EIGEN_STRONG_INLINE void        vst1q_f32(float* to, float32x4_t from) { ::vst1q_f32((float32_t*)to,from); }
-EIGEN_STRONG_INLINE void        vst1_f32 (float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); }
-#endif
-
-template<> struct unpacket_traits<Packet4f> { typedef float  type; enum {size=4}; };
-template<> struct unpacket_traits<Packet4i> { typedef int    type; enum {size=4}; };
-
-template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&  from) { return vdupq_n_f32(from); }
-template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int&    from)   { return vdupq_n_s32(from); }
-
-template<> EIGEN_STRONG_INLINE Packet4f plset<float>(const float& a)
-{
-  Packet4f countdown = EIGEN_INIT_NEON_PACKET4(0, 1, 2, 3);
-  return vaddq_f32(pset1<Packet4f>(a), countdown);
-}
-template<> EIGEN_STRONG_INLINE Packet4i plset<int>(const int& a)
-{
-  Packet4i countdown = EIGEN_INIT_NEON_PACKET4(0, 1, 2, 3);
-  return vaddq_s32(pset1<Packet4i>(a), countdown);
-}
-
-template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return vaddq_f32(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return vaddq_s32(a,b); }
-
-template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return vsubq_f32(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return vsubq_s32(a,b); }
-
-template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return vnegq_f32(a); }
-template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return vnegq_s32(a); }
-
-template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmulq_f32(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmulq_s32(a,b); }
-
-template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b)
-{
-  Packet4f inv, restep, div;
-
-  // NEON does not offer a divide instruction, we have to do a reciprocal approximation
-  // However NEON in contrast to other SIMD engines (AltiVec/SSE), offers
-  // a reciprocal estimate AND a reciprocal step -which saves a few instructions
-  // vrecpeq_f32() returns an estimate to 1/b, which we will finetune with
-  // Newton-Raphson and vrecpsq_f32()
-  inv = vrecpeq_f32(b);
-
-  // This returns a differential, by which we will have to multiply inv to get a better
-  // approximation of 1/b.
-  restep = vrecpsq_f32(b, inv);
-  inv = vmulq_f32(restep, inv);
-
-  // Finally, multiply a by 1/b and get the wanted result of the division.
-  div = vmulq_f32(a, inv);
-
-  return div;
-}
-template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/)
-{ eigen_assert(false && "packet integer division are not supported by NEON");
-  return pset1<Packet4i>(0);
-}
-
-// for some weird raisons, it has to be overloaded for packet of integers
-template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vmlaq_f32(c,a,b); }
-template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return vmlaq_s32(c,a,b); }
-
-template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return vminq_f32(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vminq_s32(a,b); }
-
-template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmaxq_f32(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmaxq_s32(a,b); }
-
-// Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics
-template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b)
-{
-  return vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
-}
-template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vandq_s32(a,b); }
-
-template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b)
-{
-  return vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
-}
-template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return vorrq_s32(a,b); }
-
-template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b)
-{
-  return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
-}
-template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return veorq_s32(a,b); }
-
-template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b)
-{
-  return vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
-}
-template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return vbicq_s32(a,b); }
-
-template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f32(from); }
-template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int*   from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s32(from); }
-
-template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f32(from); }
-template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)   { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s32(from); }
-
-template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float*   from)
-{
-  float32x2_t lo, hi;
-  lo = vdup_n_f32(*from);
-  hi = vdup_n_f32(*(from+1));
-  return vcombine_f32(lo, hi);
-}
-template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int*     from)
-{
-  int32x2_t lo, hi;
-  lo = vdup_n_s32(*from);
-  hi = vdup_n_s32(*(from+1));
-  return vcombine_s32(lo, hi);
-}
-
-template<> EIGEN_STRONG_INLINE void pstore<float>(float*   to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f32(to, from); }
-template<> EIGEN_STRONG_INLINE void pstore<int>(int*       to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_s32(to, from); }
-
-template<> EIGEN_STRONG_INLINE void pstoreu<float>(float*  to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); }
-template<> EIGEN_STRONG_INLINE void pstoreu<int>(int*      to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); }
-
-template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { __pld(addr); }
-template<> EIGEN_STRONG_INLINE void prefetch<int>(const int*     addr) { __pld(addr); }
-
-// FIXME only store the 2 first elements ?
-template<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vst1q_f32(x, a); return x[0]; }
-template<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { int   EIGEN_ALIGN16 x[4]; vst1q_s32(x, a); return x[0]; }
-
-template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) {
-  float32x2_t a_lo, a_hi;
-  Packet4f a_r64;
-
-  a_r64 = vrev64q_f32(a);
-  a_lo = vget_low_f32(a_r64);
-  a_hi = vget_high_f32(a_r64);
-  return vcombine_f32(a_hi, a_lo);
-}
-template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) {
-  int32x2_t a_lo, a_hi;
-  Packet4i a_r64;
-
-  a_r64 = vrev64q_s32(a);
-  a_lo = vget_low_s32(a_r64);
-  a_hi = vget_high_s32(a_r64);
-  return vcombine_s32(a_hi, a_lo);
-}
-template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vabsq_f32(a); }
-template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vabsq_s32(a); }
-
-template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
-{
-  float32x2_t a_lo, a_hi, sum;
-
-  a_lo = vget_low_f32(a);
-  a_hi = vget_high_f32(a);
-  sum = vpadd_f32(a_lo, a_hi);
-  sum = vpadd_f32(sum, sum);
-  return vget_lane_f32(sum, 0);
-}
-
-template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
-{
-  float32x4x2_t vtrn1, vtrn2, res1, res2;
-  Packet4f sum1, sum2, sum;
-
-  // NEON zip performs interleaving of the supplied vectors.
-  // We perform two interleaves in a row to acquire the transposed vector
-  vtrn1 = vzipq_f32(vecs[0], vecs[2]);
-  vtrn2 = vzipq_f32(vecs[1], vecs[3]);
-  res1 = vzipq_f32(vtrn1.val[0], vtrn2.val[0]);
-  res2 = vzipq_f32(vtrn1.val[1], vtrn2.val[1]);
-
-  // Do the addition of the resulting vectors
-  sum1 = vaddq_f32(res1.val[0], res1.val[1]);
-  sum2 = vaddq_f32(res2.val[0], res2.val[1]);
-  sum = vaddq_f32(sum1, sum2);
-
-  return sum;
-}
-
-template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
-{
-  int32x2_t a_lo, a_hi, sum;
-
-  a_lo = vget_low_s32(a);
-  a_hi = vget_high_s32(a);
-  sum = vpadd_s32(a_lo, a_hi);
-  sum = vpadd_s32(sum, sum);
-  return vget_lane_s32(sum, 0);
-}
-
-template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
-{
-  int32x4x2_t vtrn1, vtrn2, res1, res2;
-  Packet4i sum1, sum2, sum;
-
-  // NEON zip performs interleaving of the supplied vectors.
-  // We perform two interleaves in a row to acquire the transposed vector
-  vtrn1 = vzipq_s32(vecs[0], vecs[2]);
-  vtrn2 = vzipq_s32(vecs[1], vecs[3]);
-  res1 = vzipq_s32(vtrn1.val[0], vtrn2.val[0]);
-  res2 = vzipq_s32(vtrn1.val[1], vtrn2.val[1]);
-
-  // Do the addition of the resulting vectors
-  sum1 = vaddq_s32(res1.val[0], res1.val[1]);
-  sum2 = vaddq_s32(res2.val[0], res2.val[1]);
-  sum = vaddq_s32(sum1, sum2);
-
-  return sum;
-}
-
-// Other reduction functions:
-// mul
-template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
-{
-  float32x2_t a_lo, a_hi, prod;
-
-  // Get a_lo = |a1|a2| and a_hi = |a3|a4|
-  a_lo = vget_low_f32(a);
-  a_hi = vget_high_f32(a);
-  // Get the product of a_lo * a_hi -> |a1*a3|a2*a4|
-  prod = vmul_f32(a_lo, a_hi);
-  // Multiply prod with its swapped value |a2*a4|a1*a3|
-  prod = vmul_f32(prod, vrev64_f32(prod));
-
-  return vget_lane_f32(prod, 0);
-}
-template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
-{
-  int32x2_t a_lo, a_hi, prod;
-
-  // Get a_lo = |a1|a2| and a_hi = |a3|a4|
-  a_lo = vget_low_s32(a);
-  a_hi = vget_high_s32(a);
-  // Get the product of a_lo * a_hi -> |a1*a3|a2*a4|
-  prod = vmul_s32(a_lo, a_hi);
-  // Multiply prod with its swapped value |a2*a4|a1*a3|
-  prod = vmul_s32(prod, vrev64_s32(prod));
-
-  return vget_lane_s32(prod, 0);
-}
-
-// min
-template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
-{
-  float32x2_t a_lo, a_hi, min;
-
-  a_lo = vget_low_f32(a);
-  a_hi = vget_high_f32(a);
-  min = vpmin_f32(a_lo, a_hi);
-  min = vpmin_f32(min, min);
-
-  return vget_lane_f32(min, 0);
-}
-
-template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
-{
-  int32x2_t a_lo, a_hi, min;
-
-  a_lo = vget_low_s32(a);
-  a_hi = vget_high_s32(a);
-  min = vpmin_s32(a_lo, a_hi);
-  min = vpmin_s32(min, min);
-  
-  return vget_lane_s32(min, 0);
-}
-
-// max
-template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
-{
-  float32x2_t a_lo, a_hi, max;
-
-  a_lo = vget_low_f32(a);
-  a_hi = vget_high_f32(a);
-  max = vpmax_f32(a_lo, a_hi);
-  max = vpmax_f32(max, max);
-
-  return vget_lane_f32(max, 0);
-}
-
-template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
-{
-  int32x2_t a_lo, a_hi, max;
-
-  a_lo = vget_low_s32(a);
-  a_hi = vget_high_s32(a);
-  max = vpmax_s32(a_lo, a_hi);
-
-  return vget_lane_s32(max, 0);
-}
-
-// this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors,
-// see bug 347 and this LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=11074
-#define PALIGN_NEON(Offset,Type,Command) \
-template<>\
-struct palign_impl<Offset,Type>\
-{\
-    EIGEN_STRONG_INLINE static void run(Type& first, const Type& second)\
-    {\
-        if (Offset!=0)\
-            first = Command(first, second, Offset);\
-    }\
-};\
-
-PALIGN_NEON(0,Packet4f,vextq_f32)
-PALIGN_NEON(1,Packet4f,vextq_f32)
-PALIGN_NEON(2,Packet4f,vextq_f32)
-PALIGN_NEON(3,Packet4f,vextq_f32)
-PALIGN_NEON(0,Packet4i,vextq_s32)
-PALIGN_NEON(1,Packet4i,vextq_s32)
-PALIGN_NEON(2,Packet4i,vextq_s32)
-PALIGN_NEON(3,Packet4i,vextq_s32)
-    
-#undef PALIGN_NEON
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_PACKET_MATH_NEON_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h b/resources/3rdparty/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h
deleted file mode 100644
index 557af8455..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h
+++ /dev/null
@@ -1,460 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2007 Julien Pommier
-// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-/* The sin, cos, exp, and log functions of this file come from
- * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/
- */
-
-#ifndef EIGEN_MATH_FUNCTIONS_SSE_H
-#define EIGEN_MATH_FUNCTIONS_SSE_H
-
-namespace Eigen {
-
-namespace internal {
-
-template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
-Packet4f plog<Packet4f>(const Packet4f& _x)
-{
-  Packet4f x = _x;
-  _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
-  _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
-  _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
-
-  _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inv_mant_mask, ~0x7f800000);
-
-  /* the smallest non denormalized float number */
-  _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(min_norm_pos,  0x00800000);
-
-  /* natural logarithm computed for 4 simultaneous float
-    return NaN for x <= 0
-  */
-  _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f);
-  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292E-2f);
-  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, - 1.1514610310E-1f);
-  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740E-1f);
-  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, - 1.2420140846E-1f);
-  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, + 1.4249322787E-1f);
-  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, - 1.6668057665E-1f);
-  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, + 2.0000714765E-1f);
-  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, - 2.4999993993E-1f);
-  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, + 3.3333331174E-1f);
-  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f);
-  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f);
-
-
-  Packet4i emm0;
-
-  Packet4f invalid_mask = _mm_cmple_ps(x, _mm_setzero_ps());
-
-  x = pmax(x, p4f_min_norm_pos);  /* cut off denormalized stuff */
-  emm0 = _mm_srli_epi32(_mm_castps_si128(x), 23);
-
-  /* keep only the fractional part */
-  x = _mm_and_ps(x, p4f_inv_mant_mask);
-  x = _mm_or_ps(x, p4f_half);
-
-  emm0 = _mm_sub_epi32(emm0, p4i_0x7f);
-  Packet4f e = padd(_mm_cvtepi32_ps(emm0), p4f_1);
-
-  /* part2:
-     if( x < SQRTHF ) {
-       e -= 1;
-       x = x + x - 1.0;
-     } else { x = x - 1.0; }
-  */
-  Packet4f mask = _mm_cmplt_ps(x, p4f_cephes_SQRTHF);
-  Packet4f tmp = _mm_and_ps(x, mask);
-  x = psub(x, p4f_1);
-  e = psub(e, _mm_and_ps(p4f_1, mask));
-  x = padd(x, tmp);
-
-  Packet4f x2 = pmul(x,x);
-  Packet4f x3 = pmul(x2,x);
-
-  Packet4f y, y1, y2;
-  y  = pmadd(p4f_cephes_log_p0, x, p4f_cephes_log_p1);
-  y1 = pmadd(p4f_cephes_log_p3, x, p4f_cephes_log_p4);
-  y2 = pmadd(p4f_cephes_log_p6, x, p4f_cephes_log_p7);
-  y  = pmadd(y , x, p4f_cephes_log_p2);
-  y1 = pmadd(y1, x, p4f_cephes_log_p5);
-  y2 = pmadd(y2, x, p4f_cephes_log_p8);
-  y = pmadd(y, x3, y1);
-  y = pmadd(y, x3, y2);
-  y = pmul(y, x3);
-
-  y1 = pmul(e, p4f_cephes_log_q1);
-  tmp = pmul(x2, p4f_half);
-  y = padd(y, y1);
-  x = psub(x, tmp);
-  y2 = pmul(e, p4f_cephes_log_q2);
-  x = padd(x, y);
-  x = padd(x, y2);
-  return _mm_or_ps(x, invalid_mask); // negative arg will be NAN
-}
-
-template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
-Packet4f pexp<Packet4f>(const Packet4f& _x)
-{
-  Packet4f x = _x;
-  _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
-  _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
-  _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
-
-
-  _EIGEN_DECLARE_CONST_Packet4f(exp_hi,  88.3762626647950f);
-  _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f);
-
-  _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f);
-  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f);
-  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f);
-
-  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f);
-  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f);
-  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f);
-  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f);
-  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);
-  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);
-
-  Packet4f tmp = _mm_setzero_ps(), fx;
-  Packet4i emm0;
-
-  // clamp x
-  x = pmax(pmin(x, p4f_exp_hi), p4f_exp_lo);
-
-  /* express exp(x) as exp(g + n*log(2)) */
-  fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half);
-
-#ifdef EIGEN_VECTORIZE_SSE4_1
-  fx = _mm_floor_ps(fx);
-#else
-  emm0 = _mm_cvttps_epi32(fx);
-  tmp  = _mm_cvtepi32_ps(emm0);
-  /* if greater, substract 1 */
-  Packet4f mask = _mm_cmpgt_ps(tmp, fx);
-  mask = _mm_and_ps(mask, p4f_1);
-  fx = psub(tmp, mask);
-#endif
-
-  tmp = pmul(fx, p4f_cephes_exp_C1);
-  Packet4f z = pmul(fx, p4f_cephes_exp_C2);
-  x = psub(x, tmp);
-  x = psub(x, z);
-
-  z = pmul(x,x);
-
-  Packet4f y = p4f_cephes_exp_p0;
-  y = pmadd(y, x, p4f_cephes_exp_p1);
-  y = pmadd(y, x, p4f_cephes_exp_p2);
-  y = pmadd(y, x, p4f_cephes_exp_p3);
-  y = pmadd(y, x, p4f_cephes_exp_p4);
-  y = pmadd(y, x, p4f_cephes_exp_p5);
-  y = pmadd(y, z, x);
-  y = padd(y, p4f_1);
-
-  // build 2^n
-  emm0 = _mm_cvttps_epi32(fx);
-  emm0 = _mm_add_epi32(emm0, p4i_0x7f);
-  emm0 = _mm_slli_epi32(emm0, 23);
-  return pmul(y, _mm_castsi128_ps(emm0));
-}
-template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
-Packet2d pexp<Packet2d>(const Packet2d& _x)
-{
-  Packet2d x = _x;
-
-  _EIGEN_DECLARE_CONST_Packet2d(1 , 1.0);
-  _EIGEN_DECLARE_CONST_Packet2d(2 , 2.0);
-  _EIGEN_DECLARE_CONST_Packet2d(half, 0.5);
-
-  _EIGEN_DECLARE_CONST_Packet2d(exp_hi,  709.437);
-  _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -709.436139303);
-
-  _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599);
-
-  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4);
-  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2);
-  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1);
-
-  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6);
-  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3);
-  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1);
-  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0);
-
-  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125);
-  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6);
-  static const __m128i p4i_1023_0 = _mm_setr_epi32(1023, 1023, 0, 0);
-
-  Packet2d tmp = _mm_setzero_pd(), fx;
-  Packet4i emm0;
-
-  // clamp x
-  x = pmax(pmin(x, p2d_exp_hi), p2d_exp_lo);
-  /* express exp(x) as exp(g + n*log(2)) */
-  fx = pmadd(p2d_cephes_LOG2EF, x, p2d_half);
-
-#ifdef EIGEN_VECTORIZE_SSE4_1
-  fx = _mm_floor_pd(fx);
-#else
-  emm0 = _mm_cvttpd_epi32(fx);
-  tmp  = _mm_cvtepi32_pd(emm0);
-  /* if greater, substract 1 */
-  Packet2d mask = _mm_cmpgt_pd(tmp, fx);
-  mask = _mm_and_pd(mask, p2d_1);
-  fx = psub(tmp, mask);
-#endif
-
-  tmp = pmul(fx, p2d_cephes_exp_C1);
-  Packet2d z = pmul(fx, p2d_cephes_exp_C2);
-  x = psub(x, tmp);
-  x = psub(x, z);
-
-  Packet2d x2 = pmul(x,x);
-
-  Packet2d px = p2d_cephes_exp_p0;
-  px = pmadd(px, x2, p2d_cephes_exp_p1);
-  px = pmadd(px, x2, p2d_cephes_exp_p2);
-  px = pmul (px, x);
-
-  Packet2d qx = p2d_cephes_exp_q0;
-  qx = pmadd(qx, x2, p2d_cephes_exp_q1);
-  qx = pmadd(qx, x2, p2d_cephes_exp_q2);
-  qx = pmadd(qx, x2, p2d_cephes_exp_q3);
-
-  x = pdiv(px,psub(qx,px));
-  x = pmadd(p2d_2,x,p2d_1);
-
-  // build 2^n
-  emm0 = _mm_cvttpd_epi32(fx);
-  emm0 = _mm_add_epi32(emm0, p4i_1023_0);
-  emm0 = _mm_slli_epi32(emm0, 20);
-  emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(1,2,0,3));
-  return pmul(x, _mm_castsi128_pd(emm0));
-}
-
-/* evaluation of 4 sines at onces, using SSE2 intrinsics.
-
-   The code is the exact rewriting of the cephes sinf function.
-   Precision is excellent as long as x < 8192 (I did not bother to
-   take into account the special handling they have for greater values
-   -- it does not return garbage for arguments over 8192, though, but
-   the extra precision is missing).
-
-   Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the
-   surprising but correct result.
-*/
-
-template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
-Packet4f psin<Packet4f>(const Packet4f& _x)
-{
-  Packet4f x = _x;
-  _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
-  _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
-
-  _EIGEN_DECLARE_CONST_Packet4i(1, 1);
-  _EIGEN_DECLARE_CONST_Packet4i(not1, ~1);
-  _EIGEN_DECLARE_CONST_Packet4i(2, 2);
-  _EIGEN_DECLARE_CONST_Packet4i(4, 4);
-
-  _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(sign_mask, 0x80000000);
-
-  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1,-0.78515625f);
-  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f);
-  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f);
-  _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891E-4f);
-  _EIGEN_DECLARE_CONST_Packet4f(sincof_p1,  8.3321608736E-3f);
-  _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611E-1f);
-  _EIGEN_DECLARE_CONST_Packet4f(coscof_p0,  2.443315711809948E-005f);
-  _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765E-003f);
-  _EIGEN_DECLARE_CONST_Packet4f(coscof_p2,  4.166664568298827E-002f);
-  _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
-
-  Packet4f xmm1, xmm2 = _mm_setzero_ps(), xmm3, sign_bit, y;
-
-  Packet4i emm0, emm2;
-  sign_bit = x;
-  /* take the absolute value */
-  x = pabs(x);
-
-  /* take the modulo */
-
-  /* extract the sign bit (upper one) */
-  sign_bit = _mm_and_ps(sign_bit, p4f_sign_mask);
-
-  /* scale by 4/Pi */
-  y = pmul(x, p4f_cephes_FOPI);
-
-  /* store the integer part of y in mm0 */
-  emm2 = _mm_cvttps_epi32(y);
-  /* j=(j+1) & (~1) (see the cephes sources) */
-  emm2 = _mm_add_epi32(emm2, p4i_1);
-  emm2 = _mm_and_si128(emm2, p4i_not1);
-  y = _mm_cvtepi32_ps(emm2);
-  /* get the swap sign flag */
-  emm0 = _mm_and_si128(emm2, p4i_4);
-  emm0 = _mm_slli_epi32(emm0, 29);
-  /* get the polynom selection mask
-     there is one polynom for 0 <= x <= Pi/4
-     and another one for Pi/4<x<=Pi/2
-
-     Both branches will be computed.
-  */
-  emm2 = _mm_and_si128(emm2, p4i_2);
-  emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
-
-  Packet4f swap_sign_bit = _mm_castsi128_ps(emm0);
-  Packet4f poly_mask = _mm_castsi128_ps(emm2);
-  sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
-
-  /* The magic pass: "Extended precision modular arithmetic"
-     x = ((x - y * DP1) - y * DP2) - y * DP3; */
-  xmm1 = pmul(y, p4f_minus_cephes_DP1);
-  xmm2 = pmul(y, p4f_minus_cephes_DP2);
-  xmm3 = pmul(y, p4f_minus_cephes_DP3);
-  x = padd(x, xmm1);
-  x = padd(x, xmm2);
-  x = padd(x, xmm3);
-
-  /* Evaluate the first polynom  (0 <= x <= Pi/4) */
-  y = p4f_coscof_p0;
-  Packet4f z = _mm_mul_ps(x,x);
-
-  y = pmadd(y, z, p4f_coscof_p1);
-  y = pmadd(y, z, p4f_coscof_p2);
-  y = pmul(y, z);
-  y = pmul(y, z);
-  Packet4f tmp = pmul(z, p4f_half);
-  y = psub(y, tmp);
-  y = padd(y, p4f_1);
-
-  /* Evaluate the second polynom  (Pi/4 <= x <= 0) */
-
-  Packet4f y2 = p4f_sincof_p0;
-  y2 = pmadd(y2, z, p4f_sincof_p1);
-  y2 = pmadd(y2, z, p4f_sincof_p2);
-  y2 = pmul(y2, z);
-  y2 = pmul(y2, x);
-  y2 = padd(y2, x);
-
-  /* select the correct result from the two polynoms */
-  y2 = _mm_and_ps(poly_mask, y2);
-  y = _mm_andnot_ps(poly_mask, y);
-  y = _mm_or_ps(y,y2);
-  /* update the sign */
-  return _mm_xor_ps(y, sign_bit);
-}
-
-/* almost the same as psin */
-template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
-Packet4f pcos<Packet4f>(const Packet4f& _x)
-{
-  Packet4f x = _x;
-  _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
-  _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
-
-  _EIGEN_DECLARE_CONST_Packet4i(1, 1);
-  _EIGEN_DECLARE_CONST_Packet4i(not1, ~1);
-  _EIGEN_DECLARE_CONST_Packet4i(2, 2);
-  _EIGEN_DECLARE_CONST_Packet4i(4, 4);
-
-  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1,-0.78515625f);
-  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f);
-  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f);
-  _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891E-4f);
-  _EIGEN_DECLARE_CONST_Packet4f(sincof_p1,  8.3321608736E-3f);
-  _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611E-1f);
-  _EIGEN_DECLARE_CONST_Packet4f(coscof_p0,  2.443315711809948E-005f);
-  _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765E-003f);
-  _EIGEN_DECLARE_CONST_Packet4f(coscof_p2,  4.166664568298827E-002f);
-  _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
-
-  Packet4f xmm1, xmm2 = _mm_setzero_ps(), xmm3, y;
-  Packet4i emm0, emm2;
-
-  x = pabs(x);
-
-  /* scale by 4/Pi */
-  y = pmul(x, p4f_cephes_FOPI);
-
-  /* get the integer part of y */
-  emm2 = _mm_cvttps_epi32(y);
-  /* j=(j+1) & (~1) (see the cephes sources) */
-  emm2 = _mm_add_epi32(emm2, p4i_1);
-  emm2 = _mm_and_si128(emm2, p4i_not1);
-  y = _mm_cvtepi32_ps(emm2);
-
-  emm2 = _mm_sub_epi32(emm2, p4i_2);
-
-  /* get the swap sign flag */
-  emm0 = _mm_andnot_si128(emm2, p4i_4);
-  emm0 = _mm_slli_epi32(emm0, 29);
-  /* get the polynom selection mask */
-  emm2 = _mm_and_si128(emm2, p4i_2);
-  emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
-
-  Packet4f sign_bit = _mm_castsi128_ps(emm0);
-  Packet4f poly_mask = _mm_castsi128_ps(emm2);
-
-  /* The magic pass: "Extended precision modular arithmetic"
-     x = ((x - y * DP1) - y * DP2) - y * DP3; */
-  xmm1 = pmul(y, p4f_minus_cephes_DP1);
-  xmm2 = pmul(y, p4f_minus_cephes_DP2);
-  xmm3 = pmul(y, p4f_minus_cephes_DP3);
-  x = padd(x, xmm1);
-  x = padd(x, xmm2);
-  x = padd(x, xmm3);
-
-  /* Evaluate the first polynom  (0 <= x <= Pi/4) */
-  y = p4f_coscof_p0;
-  Packet4f z = pmul(x,x);
-
-  y = pmadd(y,z,p4f_coscof_p1);
-  y = pmadd(y,z,p4f_coscof_p2);
-  y = pmul(y, z);
-  y = pmul(y, z);
-  Packet4f tmp = _mm_mul_ps(z, p4f_half);
-  y = psub(y, tmp);
-  y = padd(y, p4f_1);
-
-  /* Evaluate the second polynom  (Pi/4 <= x <= 0) */
-  Packet4f y2 = p4f_sincof_p0;
-  y2 = pmadd(y2, z, p4f_sincof_p1);
-  y2 = pmadd(y2, z, p4f_sincof_p2);
-  y2 = pmul(y2, z);
-  y2 = pmadd(y2, x, x);
-
-  /* select the correct result from the two polynoms */
-  y2 = _mm_and_ps(poly_mask, y2);
-  y  = _mm_andnot_ps(poly_mask, y);
-  y  = _mm_or_ps(y,y2);
-
-  /* update the sign */
-  return _mm_xor_ps(y, sign_bit);
-}
-
-// This is based on Quake3's fast inverse square root.
-// For detail see here: http://www.beyond3d.com/content/articles/8/
-template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
-Packet4f psqrt<Packet4f>(const Packet4f& _x)
-{
-  Packet4f half = pmul(_x, pset1<Packet4f>(.5f));
-
-  /* select only the inverse sqrt of non-zero inputs */
-  Packet4f non_zero_mask = _mm_cmpgt_ps(_x, pset1<Packet4f>(std::numeric_limits<float>::epsilon()));
-  Packet4f x = _mm_and_ps(non_zero_mask, _mm_rsqrt_ps(_x));
-
-  x = pmul(x, psub(pset1<Packet4f>(1.5f), pmul(half, pmul(x,x))));
-  return pmul(_x,x);
-}
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_MATH_FUNCTIONS_SSE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/arch/SSE/PacketMath.h b/resources/3rdparty/eigen/Eigen/src/Core/arch/SSE/PacketMath.h
deleted file mode 100644
index f84e5b3ec..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/arch/SSE/PacketMath.h
+++ /dev/null
@@ -1,636 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_PACKET_MATH_SSE_H
-#define EIGEN_PACKET_MATH_SSE_H
-
-namespace Eigen {
-
-namespace internal {
-
-#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
-#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
-#endif
-
-#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
-#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
-#endif
-
-typedef __m128  Packet4f;
-typedef __m128i Packet4i;
-typedef __m128d Packet2d;
-
-template<> struct is_arithmetic<__m128>  { enum { value = true }; };
-template<> struct is_arithmetic<__m128i> { enum { value = true }; };
-template<> struct is_arithmetic<__m128d> { enum { value = true }; };
-
-#define vec4f_swizzle1(v,p,q,r,s) \
-  (_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), ((s)<<6|(r)<<4|(q)<<2|(p)))))
-
-#define vec4i_swizzle1(v,p,q,r,s) \
-  (_mm_shuffle_epi32( v, ((s)<<6|(r)<<4|(q)<<2|(p))))
-
-#define vec2d_swizzle1(v,p,q) \
-  (_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), ((q*2+1)<<6|(q*2)<<4|(p*2+1)<<2|(p*2)))))
-  
-#define vec4f_swizzle2(a,b,p,q,r,s) \
-  (_mm_shuffle_ps( (a), (b), ((s)<<6|(r)<<4|(q)<<2|(p))))
-
-#define vec4i_swizzle2(a,b,p,q,r,s) \
-  (_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), ((s)<<6|(r)<<4|(q)<<2|(p))))))
-
-#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
-  const Packet4f p4f_##NAME = pset1<Packet4f>(X)
-
-#define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \
-  const Packet2d p2d_##NAME = pset1<Packet2d>(X)
-
-#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
-  const Packet4f p4f_##NAME = _mm_castsi128_ps(pset1<Packet4i>(X))
-
-#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
-  const Packet4i p4i_##NAME = pset1<Packet4i>(X)
-
-
-template<> struct packet_traits<float>  : default_packet_traits
-{
-  typedef Packet4f type;
-  enum {
-    Vectorizable = 1,
-    AlignedOnScalar = 1,
-    size=4,
-
-    HasDiv  = 1,
-    HasSin  = EIGEN_FAST_MATH,
-    HasCos  = EIGEN_FAST_MATH,
-    HasLog  = 1,
-    HasExp  = 1,
-    HasSqrt = 1
-  };
-};
-template<> struct packet_traits<double> : default_packet_traits
-{
-  typedef Packet2d type;
-  enum {
-    Vectorizable = 1,
-    AlignedOnScalar = 1,
-    size=2,
-
-    HasDiv  = 1,
-    HasExp  = 1
-  };
-};
-template<> struct packet_traits<int>    : default_packet_traits
-{
-  typedef Packet4i type;
-  enum {
-    // FIXME check the Has*
-    Vectorizable = 1,
-    AlignedOnScalar = 1,
-    size=4
-  };
-};
-
-template<> struct unpacket_traits<Packet4f> { typedef float  type; enum {size=4}; };
-template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2}; };
-template<> struct unpacket_traits<Packet4i> { typedef int    type; enum {size=4}; };
-
-#if defined(_MSC_VER) && (_MSC_VER==1500)
-// Workaround MSVC 9 internal compiler error.
-// TODO: It has been detected with win64 builds (amd64), so let's check whether it also happens in 32bits+SSE mode
-// TODO: let's check whether there does not exist a better fix, like adding a pset0() function. (it crashed on pset1(0)).
-template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&  from) { return _mm_set_ps(from,from,from,from); }
-template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set_pd(from,from); }
-template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int&    from) { return _mm_set_epi32(from,from,from,from); }
-#else
-template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&  from) { return _mm_set1_ps(from); }
-template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set1_pd(from); }
-template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int&    from) { return _mm_set1_epi32(from); }
-#endif
-
-template<> EIGEN_STRONG_INLINE Packet4f plset<float>(const float& a) { return _mm_add_ps(pset1<Packet4f>(a), _mm_set_ps(3,2,1,0)); }
-template<> EIGEN_STRONG_INLINE Packet2d plset<double>(const double& a) { return _mm_add_pd(pset1<Packet2d>(a),_mm_set_pd(1,0)); }
-template<> EIGEN_STRONG_INLINE Packet4i plset<int>(const int& a) { return _mm_add_epi32(pset1<Packet4i>(a),_mm_set_epi32(3,2,1,0)); }
-
-template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_add_ps(a,b); }
-template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); }
-
-template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); }
-template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); }
-
-template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a)
-{
-  const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000));
-  return _mm_xor_ps(a,mask);
-}
-template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a)
-{
-  const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x80000000));
-  return _mm_xor_pd(a,mask);
-}
-template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a)
-{
-  return psub(_mm_setr_epi32(0,0,0,0), a);
-}
-
-template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); }
-template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b)
-{
-#ifdef EIGEN_VECTORIZE_SSE4_1
-  return _mm_mullo_epi32(a,b);
-#else
-  // this version is slightly faster than 4 scalar products
-  return vec4i_swizzle1(
-            vec4i_swizzle2(
-              _mm_mul_epu32(a,b),
-              _mm_mul_epu32(vec4i_swizzle1(a,1,0,3,2),
-                            vec4i_swizzle1(b,1,0,3,2)),
-              0,2,0,2),
-            0,2,1,3);
-#endif
-}
-
-template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_div_ps(a,b); }
-template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_div_pd(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/)
-{ eigen_assert(false && "packet integer division are not supported by SSE");
-  return pset1<Packet4i>(0);
-}
-
-// for some weird raisons, it has to be overloaded for packet of integers
-template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); }
-
-template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_min_ps(a,b); }
-template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_min_pd(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b)
-{
-  // after some bench, this version *is* faster than a scalar implementation
-  Packet4i mask = _mm_cmplt_epi32(a,b);
-  return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
-}
-
-template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_max_ps(a,b); }
-template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_max_pd(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b)
-{
-  // after some bench, this version *is* faster than a scalar implementation
-  Packet4i mask = _mm_cmpgt_epi32(a,b);
-  return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
-}
-
-template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); }
-template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); }
-
-template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); }
-template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); }
-
-template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); }
-template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); }
-
-template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(a,b); }
-template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(a,b); }
-
-template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float*   from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); }
-template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double*  from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); }
-template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int*     from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast<const Packet4i*>(from)); }
-
-#if defined(_MSC_VER)
-  template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float*  from) {
-    EIGEN_DEBUG_UNALIGNED_LOAD
-    #if (_MSC_VER==1600)
-    // NOTE Some version of MSVC10 generates bad code when using _mm_loadu_ps
-    // (i.e., it does not generate an unaligned load!!
-    // TODO On most architectures this version should also be faster than a single _mm_loadu_ps
-    // so we could also enable it for MSVC08 but first we have to make this later does not generate crap when doing so...
-    __m128 res = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)(from));
-    res = _mm_loadh_pi(res, (const __m64*)(from+2));
-    return res;
-    #else
-    return _mm_loadu_ps(from);
-    #endif
-  }
-  template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_pd(from); }
-  template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int*    from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(reinterpret_cast<const Packet4i*>(from)); }
-#else
-// Fast unaligned loads. Note that here we cannot directly use intrinsics: this would
-// require pointer casting to incompatible pointer types and leads to invalid code
-// because of the strict aliasing rule. The "dummy" stuff are required to enforce
-// a correct instruction dependency.
-// TODO: do the same for MSVC (ICC is compatible)
-// NOTE: with the code below, MSVC's compiler crashes!
-
-#if defined(__GNUC__) && defined(__i386__)
-  // bug 195: gcc/i386 emits weird x87 fldl/fstpl instructions for _mm_load_sd
-  #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1
-#elif defined(__clang__)
-  // bug 201: Segfaults in __mm_loadh_pd with clang 2.8
-  #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1
-#else
-  #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 0
-#endif
-
-template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
-{
-  EIGEN_DEBUG_UNALIGNED_LOAD
-#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
-  return _mm_loadu_ps(from);
-#else
-  __m128d res;
-  res =  _mm_load_sd((const double*)(from)) ;
-  res =  _mm_loadh_pd(res, (const double*)(from+2)) ;
-  return _mm_castpd_ps(res);
-#endif
-}
-template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from)
-{
-  EIGEN_DEBUG_UNALIGNED_LOAD
-#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
-  return _mm_loadu_pd(from);
-#else
-  __m128d res;
-  res = _mm_load_sd(from) ;
-  res = _mm_loadh_pd(res,from+1);
-  return res;
-#endif
-}
-template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
-{
-  EIGEN_DEBUG_UNALIGNED_LOAD
-#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
-  return _mm_loadu_si128(reinterpret_cast<const Packet4i*>(from));
-#else
-  __m128d res;
-  res =  _mm_load_sd((const double*)(from)) ;
-  res =  _mm_loadh_pd(res, (const double*)(from+2)) ;
-  return _mm_castpd_si128(res);
-#endif
-}
-#endif
-
-template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float*   from)
-{
-  return vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd(reinterpret_cast<const double*>(from))), 0, 0, 1, 1);
-}
-template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double*  from)
-{ return pset1<Packet2d>(from[0]); }
-template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int*     from)
-{
-  Packet4i tmp;
-  tmp = _mm_loadl_epi64(reinterpret_cast<const Packet4i*>(from));
-  return vec4i_swizzle1(tmp, 0, 0, 1, 1);
-}
-
-template<> EIGEN_STRONG_INLINE void pstore<float>(float*   to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); }
-template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); }
-template<> EIGEN_STRONG_INLINE void pstore<int>(int*       to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<Packet4i*>(to), from); }
-
-template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) {
-  EIGEN_DEBUG_UNALIGNED_STORE
-  _mm_storel_pd((to), from);
-  _mm_storeh_pd((to+1), from);
-}
-template<> EIGEN_STRONG_INLINE void pstoreu<float>(float*  to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), _mm_castps_pd(from)); }
-template<> EIGEN_STRONG_INLINE void pstoreu<int>(int*      to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), _mm_castsi128_pd(from)); }
-
-// some compilers might be tempted to perform multiple moves instead of using a vector path.
-template<> EIGEN_STRONG_INLINE void pstore1<Packet4f>(float* to, const float& a)
-{
-  Packet4f pa = _mm_set_ss(a);
-  pstore(to, vec4f_swizzle1(pa,0,0,0,0));
-}
-// some compilers might be tempted to perform multiple moves instead of using a vector path.
-template<> EIGEN_STRONG_INLINE void pstore1<Packet2d>(double* to, const double& a)
-{
-  Packet2d pa = _mm_set_sd(a);
-  pstore(to, vec2d_swizzle1(pa,0,0));
-}
-
-template<> EIGEN_STRONG_INLINE void prefetch<float>(const float*   addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
-template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
-template<> EIGEN_STRONG_INLINE void prefetch<int>(const int*       addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
-
-#if defined(_MSC_VER) && defined(_WIN64) && !defined(__INTEL_COMPILER)
-// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
-// Direct of the struct members fixed bug #62.
-template<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { return a.m128_f32[0]; }
-template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return a.m128d_f64[0]; }
-template<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
-#elif defined(_MSC_VER) && !defined(__INTEL_COMPILER)
-// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
-template<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { float x = _mm_cvtss_f32(a); return x; }
-template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { double x = _mm_cvtsd_f64(a); return x; }
-template<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
-#else
-template<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { return _mm_cvtss_f32(a); }
-template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return _mm_cvtsd_f64(a); }
-template<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { return _mm_cvtsi128_si32(a); }
-#endif
-
-template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
-{ return _mm_shuffle_ps(a,a,0x1B); }
-template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)
-{ return _mm_shuffle_pd(a,a,0x1); }
-template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)
-{ return _mm_shuffle_epi32(a,0x1B); }
-
-
-template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a)
-{
-  const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
-  return _mm_and_ps(a,mask);
-}
-template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a)
-{
-  const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
-  return _mm_and_pd(a,mask);
-}
-template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a)
-{
-  #ifdef EIGEN_VECTORIZE_SSSE3
-  return _mm_abs_epi32(a);
-  #else
-  Packet4i aux = _mm_srai_epi32(a,31);
-  return _mm_sub_epi32(_mm_xor_si128(a,aux),aux);
-  #endif
-}
-
-EIGEN_STRONG_INLINE void punpackp(Packet4f* vecs)
-{
-  vecs[1] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x55));
-  vecs[2] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xAA));
-  vecs[3] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xFF));
-  vecs[0] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x00));
-}
-
-#ifdef EIGEN_VECTORIZE_SSE3
-// TODO implement SSE2 versions as well as integer versions
-template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
-{
-  return _mm_hadd_ps(_mm_hadd_ps(vecs[0], vecs[1]),_mm_hadd_ps(vecs[2], vecs[3]));
-}
-template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
-{
-  return _mm_hadd_pd(vecs[0], vecs[1]);
-}
-// SSSE3 version:
-// EIGEN_STRONG_INLINE Packet4i preduxp(const Packet4i* vecs)
-// {
-//   return _mm_hadd_epi32(_mm_hadd_epi32(vecs[0], vecs[1]),_mm_hadd_epi32(vecs[2], vecs[3]));
-// }
-
-template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
-{
-  Packet4f tmp0 = _mm_hadd_ps(a,a);
-  return pfirst(_mm_hadd_ps(tmp0, tmp0));
-}
-
-template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) { return pfirst(_mm_hadd_pd(a, a)); }
-
-// SSSE3 version:
-// EIGEN_STRONG_INLINE float predux(const Packet4i& a)
-// {
-//   Packet4i tmp0 = _mm_hadd_epi32(a,a);
-//   return pfirst(_mm_hadd_epi32(tmp0, tmp0));
-// }
-#else
-// SSE2 versions
-template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
-{
-  Packet4f tmp = _mm_add_ps(a, _mm_movehl_ps(a,a));
-  return pfirst(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
-}
-template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
-{
-  return pfirst(_mm_add_sd(a, _mm_unpackhi_pd(a,a)));
-}
-
-template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
-{
-  Packet4f tmp0, tmp1, tmp2;
-  tmp0 = _mm_unpacklo_ps(vecs[0], vecs[1]);
-  tmp1 = _mm_unpackhi_ps(vecs[0], vecs[1]);
-  tmp2 = _mm_unpackhi_ps(vecs[2], vecs[3]);
-  tmp0 = _mm_add_ps(tmp0, tmp1);
-  tmp1 = _mm_unpacklo_ps(vecs[2], vecs[3]);
-  tmp1 = _mm_add_ps(tmp1, tmp2);
-  tmp2 = _mm_movehl_ps(tmp1, tmp0);
-  tmp0 = _mm_movelh_ps(tmp0, tmp1);
-  return _mm_add_ps(tmp0, tmp2);
-}
-
-template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
-{
-  return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1]));
-}
-#endif  // SSE3
-
-template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
-{
-  Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a));
-  return pfirst(tmp) + pfirst(_mm_shuffle_epi32(tmp, 1));
-}
-
-template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
-{
-  Packet4i tmp0, tmp1, tmp2;
-  tmp0 = _mm_unpacklo_epi32(vecs[0], vecs[1]);
-  tmp1 = _mm_unpackhi_epi32(vecs[0], vecs[1]);
-  tmp2 = _mm_unpackhi_epi32(vecs[2], vecs[3]);
-  tmp0 = _mm_add_epi32(tmp0, tmp1);
-  tmp1 = _mm_unpacklo_epi32(vecs[2], vecs[3]);
-  tmp1 = _mm_add_epi32(tmp1, tmp2);
-  tmp2 = _mm_unpacklo_epi64(tmp0, tmp1);
-  tmp0 = _mm_unpackhi_epi64(tmp0, tmp1);
-  return _mm_add_epi32(tmp0, tmp2);
-}
-
-// Other reduction functions:
-
-// mul
-template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
-{
-  Packet4f tmp = _mm_mul_ps(a, _mm_movehl_ps(a,a));
-  return pfirst(_mm_mul_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
-}
-template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)
-{
-  return pfirst(_mm_mul_sd(a, _mm_unpackhi_pd(a,a)));
-}
-template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
-{
-  // after some experiments, it is seems this is the fastest way to implement it
-  // for GCC (eg., reusing pmul is very slow !)
-  // TODO try to call _mm_mul_epu32 directly
-  EIGEN_ALIGN16 int aux[4];
-  pstore(aux, a);
-  return  (aux[0] * aux[1]) * (aux[2] * aux[3]);;
-}
-
-// min
-template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
-{
-  Packet4f tmp = _mm_min_ps(a, _mm_movehl_ps(a,a));
-  return pfirst(_mm_min_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
-}
-template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a)
-{
-  return pfirst(_mm_min_sd(a, _mm_unpackhi_pd(a,a)));
-}
-template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
-{
-  // after some experiments, it is seems this is the fastest way to implement it
-  // for GCC (eg., it does not like using std::min after the pstore !!)
-  EIGEN_ALIGN16 int aux[4];
-  pstore(aux, a);
-  register int aux0 = aux[0]<aux[1] ? aux[0] : aux[1];
-  register int aux2 = aux[2]<aux[3] ? aux[2] : aux[3];
-  return aux0<aux2 ? aux0 : aux2;
-}
-
-// max
-template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
-{
-  Packet4f tmp = _mm_max_ps(a, _mm_movehl_ps(a,a));
-  return pfirst(_mm_max_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
-}
-template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a)
-{
-  return pfirst(_mm_max_sd(a, _mm_unpackhi_pd(a,a)));
-}
-template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
-{
-  // after some experiments, it is seems this is the fastest way to implement it
-  // for GCC (eg., it does not like using std::min after the pstore !!)
-  EIGEN_ALIGN16 int aux[4];
-  pstore(aux, a);
-  register int aux0 = aux[0]>aux[1] ? aux[0] : aux[1];
-  register int aux2 = aux[2]>aux[3] ? aux[2] : aux[3];
-  return aux0>aux2 ? aux0 : aux2;
-}
-
-#if (defined __GNUC__)
-// template <> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f&  a, const Packet4f&  b, const Packet4f&  c)
-// {
-//   Packet4f res = b;
-//   asm("mulps %[a], %[b] \n\taddps %[c], %[b]" : [b] "+x" (res) : [a] "x" (a), [c] "x" (c));
-//   return res;
-// }
-// EIGEN_STRONG_INLINE Packet4i _mm_alignr_epi8(const Packet4i&  a, const Packet4i&  b, const int i)
-// {
-//   Packet4i res = a;
-//   asm("palignr %[i], %[a], %[b] " : [b] "+x" (res) : [a] "x" (a), [i] "i" (i));
-//   return res;
-// }
-#endif
-
-#ifdef EIGEN_VECTORIZE_SSSE3
-// SSSE3 versions
-template<int Offset>
-struct palign_impl<Offset,Packet4f>
-{
-  static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
-  {
-    if (Offset!=0)
-      first = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(second), _mm_castps_si128(first), Offset*4));
-  }
-};
-
-template<int Offset>
-struct palign_impl<Offset,Packet4i>
-{
-  static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
-  {
-    if (Offset!=0)
-      first = _mm_alignr_epi8(second,first, Offset*4);
-  }
-};
-
-template<int Offset>
-struct palign_impl<Offset,Packet2d>
-{
-  static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
-  {
-    if (Offset==1)
-      first = _mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(second), _mm_castpd_si128(first), 8));
-  }
-};
-#else
-// SSE2 versions
-template<int Offset>
-struct palign_impl<Offset,Packet4f>
-{
-  static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
-  {
-    if (Offset==1)
-    {
-      first = _mm_move_ss(first,second);
-      first = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(first),0x39));
-    }
-    else if (Offset==2)
-    {
-      first = _mm_movehl_ps(first,first);
-      first = _mm_movelh_ps(first,second);
-    }
-    else if (Offset==3)
-    {
-      first = _mm_move_ss(first,second);
-      first = _mm_shuffle_ps(first,second,0x93);
-    }
-  }
-};
-
-template<int Offset>
-struct palign_impl<Offset,Packet4i>
-{
-  static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
-  {
-    if (Offset==1)
-    {
-      first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
-      first = _mm_shuffle_epi32(first,0x39);
-    }
-    else if (Offset==2)
-    {
-      first = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(first)));
-      first = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
-    }
-    else if (Offset==3)
-    {
-      first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
-      first = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second),0x93));
-    }
-  }
-};
-
-template<int Offset>
-struct palign_impl<Offset,Packet2d>
-{
-  static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
-  {
-    if (Offset==1)
-    {
-      first = _mm_castps_pd(_mm_movehl_ps(_mm_castpd_ps(first),_mm_castpd_ps(first)));
-      first = _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(first),_mm_castpd_ps(second)));
-    }
-  }
-};
-#endif
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_PACKET_MATH_SSE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/resources/3rdparty/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h
deleted file mode 100644
index 09912fafb..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h
+++ /dev/null
@@ -1,1319 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_GENERAL_BLOCK_PANEL_H
-#define EIGEN_GENERAL_BLOCK_PANEL_H
-
-namespace Eigen { 
-  
-namespace internal {
-
-template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs=false, bool _ConjRhs=false>
-class gebp_traits;
-
-
-/** \internal \returns b if a<=0, and returns a otherwise. */
-inline std::ptrdiff_t manage_caching_sizes_helper(std::ptrdiff_t a, std::ptrdiff_t b)
-{
-  return a<=0 ? b : a;
-}
-
-/** \internal */
-inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1=0, std::ptrdiff_t* l2=0)
-{
-  static std::ptrdiff_t m_l1CacheSize = 0;
-  static std::ptrdiff_t m_l2CacheSize = 0;
-  if(m_l2CacheSize==0)
-  {
-    m_l1CacheSize = manage_caching_sizes_helper(queryL1CacheSize(),8 * 1024);
-    m_l2CacheSize = manage_caching_sizes_helper(queryTopLevelCacheSize(),1*1024*1024);
-  }
-  
-  if(action==SetAction)
-  {
-    // set the cpu cache size and cache all block sizes from a global cache size in byte
-    eigen_internal_assert(l1!=0 && l2!=0);
-    m_l1CacheSize = *l1;
-    m_l2CacheSize = *l2;
-  }
-  else if(action==GetAction)
-  {
-    eigen_internal_assert(l1!=0 && l2!=0);
-    *l1 = m_l1CacheSize;
-    *l2 = m_l2CacheSize;
-  }
-  else
-  {
-    eigen_internal_assert(false);
-  }
-}
-
-/** \brief Computes the blocking parameters for a m x k times k x n matrix product
-  *
-  * \param[in,out] k Input: the third dimension of the product. Output: the blocking size along the same dimension.
-  * \param[in,out] m Input: the number of rows of the left hand side. Output: the blocking size along the same dimension.
-  * \param[in,out] n Input: the number of columns of the right hand side. Output: the blocking size along the same dimension.
-  *
-  * Given a m x k times k x n matrix product of scalar types \c LhsScalar and \c RhsScalar,
-  * this function computes the blocking size parameters along the respective dimensions
-  * for matrix products and related algorithms. The blocking sizes depends on various
-  * parameters:
-  * - the L1 and L2 cache sizes,
-  * - the register level blocking sizes defined by gebp_traits,
-  * - the number of scalars that fit into a packet (when vectorization is enabled).
-  *
-  * \sa setCpuCacheSizes */
-template<typename LhsScalar, typename RhsScalar, int KcFactor>
-void computeProductBlockingSizes(std::ptrdiff_t& k, std::ptrdiff_t& m, std::ptrdiff_t& n)
-{
-  EIGEN_UNUSED_VARIABLE(n);
-  // Explanations:
-  // Let's recall the product algorithms form kc x nc horizontal panels B' on the rhs and
-  // mc x kc blocks A' on the lhs. A' has to fit into L2 cache. Moreover, B' is processed
-  // per kc x nr vertical small panels where nr is the blocking size along the n dimension
-  // at the register level. For vectorization purpose, these small vertical panels are unpacked,
-  // e.g., each coefficient is replicated to fit a packet. This small vertical panel has to
-  // stay in L1 cache.
-  std::ptrdiff_t l1, l2;
-
-  typedef gebp_traits<LhsScalar,RhsScalar> Traits;
-  enum {
-    kdiv = KcFactor * 2 * Traits::nr
-         * Traits::RhsProgress * sizeof(RhsScalar),
-    mr = gebp_traits<LhsScalar,RhsScalar>::mr,
-    mr_mask = (0xffffffff/mr)*mr
-  };
-
-  manage_caching_sizes(GetAction, &l1, &l2);
-  k = std::min<std::ptrdiff_t>(k, l1/kdiv);
-  std::ptrdiff_t _m = k>0 ? l2/(4 * sizeof(LhsScalar) * k) : 0;
-  if(_m<m) m = _m & mr_mask;
-}
-
-template<typename LhsScalar, typename RhsScalar>
-inline void computeProductBlockingSizes(std::ptrdiff_t& k, std::ptrdiff_t& m, std::ptrdiff_t& n)
-{
-  computeProductBlockingSizes<LhsScalar,RhsScalar,1>(k, m, n);
-}
-
-#ifdef EIGEN_HAS_FUSE_CJMADD
-  #define MADD(CJ,A,B,C,T)  C = CJ.pmadd(A,B,C);
-#else
-
-  // FIXME (a bit overkill maybe ?)
-
-  template<typename CJ, typename A, typename B, typename C, typename T> struct gebp_madd_selector {
-    EIGEN_ALWAYS_INLINE static void run(const CJ& cj, A& a, B& b, C& c, T& /*t*/)
-    {
-      c = cj.pmadd(a,b,c);
-    }
-  };
-
-  template<typename CJ, typename T> struct gebp_madd_selector<CJ,T,T,T,T> {
-    EIGEN_ALWAYS_INLINE static void run(const CJ& cj, T& a, T& b, T& c, T& t)
-    {
-      t = b; t = cj.pmul(a,t); c = padd(c,t);
-    }
-  };
-
-  template<typename CJ, typename A, typename B, typename C, typename T>
-  EIGEN_STRONG_INLINE void gebp_madd(const CJ& cj, A& a, B& b, C& c, T& t)
-  {
-    gebp_madd_selector<CJ,A,B,C,T>::run(cj,a,b,c,t);
-  }
-
-  #define MADD(CJ,A,B,C,T)  gebp_madd(CJ,A,B,C,T);
-//   #define MADD(CJ,A,B,C,T)  T = B; T = CJ.pmul(A,T); C = padd(C,T);
-#endif
-
-/* Vectorization logic
- *  real*real: unpack rhs to constant packets, ...
- * 
- *  cd*cd : unpack rhs to (b_r,b_r), (b_i,b_i), mul to get (a_r b_r,a_i b_r) (a_r b_i,a_i b_i),
- *          storing each res packet into two packets (2x2),
- *          at the end combine them: swap the second and addsub them 
- *  cf*cf : same but with 2x4 blocks
- *  cplx*real : unpack rhs to constant packets, ...
- *  real*cplx : load lhs as (a0,a0,a1,a1), and mul as usual
- */
-template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs, bool _ConjRhs>
-class gebp_traits
-{
-public:
-  typedef _LhsScalar LhsScalar;
-  typedef _RhsScalar RhsScalar;
-  typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
-
-  enum {
-    ConjLhs = _ConjLhs,
-    ConjRhs = _ConjRhs,
-    Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
-    LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
-    RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
-    ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
-    
-    NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
-
-    // register block size along the N direction (must be either 2 or 4)
-    nr = NumberOfRegisters/4,
-
-    // register block size along the M direction (currently, this one cannot be modified)
-    mr = 2 * LhsPacketSize,
-    
-    WorkSpaceFactor = nr * RhsPacketSize,
-
-    LhsProgress = LhsPacketSize,
-    RhsProgress = RhsPacketSize
-  };
-
-  typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
-  typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
-  typedef typename packet_traits<ResScalar>::type  _ResPacket;
-
-  typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
-  typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
-  typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
-
-  typedef ResPacket AccPacket;
-  
-  EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
-  {
-    p = pset1<ResPacket>(ResScalar(0));
-  }
-
-  EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const RhsScalar* rhs, RhsScalar* b)
-  {
-    for(DenseIndex k=0; k<n; k++)
-      pstore1<RhsPacket>(&b[k*RhsPacketSize], rhs[k]);
-  }
-
-  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
-  {
-    dest = pload<RhsPacket>(b);
-  }
-
-  EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
-  {
-    dest = pload<LhsPacket>(a);
-  }
-
-  EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, AccPacket& tmp) const
-  {
-    tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp);
-  }
-
-  EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
-  {
-    r = pmadd(c,alpha,r);
-  }
-
-protected:
-//   conj_helper<LhsScalar,RhsScalar,ConjLhs,ConjRhs> cj;
-//   conj_helper<LhsPacket,RhsPacket,ConjLhs,ConjRhs> pcj;
-};
-
-template<typename RealScalar, bool _ConjLhs>
-class gebp_traits<std::complex<RealScalar>, RealScalar, _ConjLhs, false>
-{
-public:
-  typedef std::complex<RealScalar> LhsScalar;
-  typedef RealScalar RhsScalar;
-  typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
-
-  enum {
-    ConjLhs = _ConjLhs,
-    ConjRhs = false,
-    Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
-    LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
-    RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
-    ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
-    
-    NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
-    nr = NumberOfRegisters/4,
-    mr = 2 * LhsPacketSize,
-    WorkSpaceFactor = nr*RhsPacketSize,
-
-    LhsProgress = LhsPacketSize,
-    RhsProgress = RhsPacketSize
-  };
-
-  typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
-  typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
-  typedef typename packet_traits<ResScalar>::type  _ResPacket;
-
-  typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
-  typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
-  typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
-
-  typedef ResPacket AccPacket;
-
-  EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
-  {
-    p = pset1<ResPacket>(ResScalar(0));
-  }
-
-  EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const RhsScalar* rhs, RhsScalar* b)
-  {
-    for(DenseIndex k=0; k<n; k++)
-      pstore1<RhsPacket>(&b[k*RhsPacketSize], rhs[k]);
-  }
-
-  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
-  {
-    dest = pload<RhsPacket>(b);
-  }
-
-  EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
-  {
-    dest = pload<LhsPacket>(a);
-  }
-
-  EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const
-  {
-    madd_impl(a, b, c, tmp, typename conditional<Vectorizable,true_type,false_type>::type());
-  }
-
-  EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const
-  {
-    tmp = b; tmp = pmul(a.v,tmp); c.v = padd(c.v,tmp);
-  }
-
-  EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const
-  {
-    c += a * b;
-  }
-
-  EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
-  {
-    r = cj.pmadd(c,alpha,r);
-  }
-
-protected:
-  conj_helper<ResPacket,ResPacket,ConjLhs,false> cj;
-};
-
-template<typename RealScalar, bool _ConjLhs, bool _ConjRhs>
-class gebp_traits<std::complex<RealScalar>, std::complex<RealScalar>, _ConjLhs, _ConjRhs >
-{
-public:
-  typedef std::complex<RealScalar>  Scalar;
-  typedef std::complex<RealScalar>  LhsScalar;
-  typedef std::complex<RealScalar>  RhsScalar;
-  typedef std::complex<RealScalar>  ResScalar;
-  
-  enum {
-    ConjLhs = _ConjLhs,
-    ConjRhs = _ConjRhs,
-    Vectorizable = packet_traits<RealScalar>::Vectorizable
-                && packet_traits<Scalar>::Vectorizable,
-    RealPacketSize  = Vectorizable ? packet_traits<RealScalar>::size : 1,
-    ResPacketSize   = Vectorizable ? packet_traits<ResScalar>::size : 1,
-    
-    nr = 2,
-    mr = 2 * ResPacketSize,
-    WorkSpaceFactor = Vectorizable ? 2*nr*RealPacketSize : nr,
-
-    LhsProgress = ResPacketSize,
-    RhsProgress = Vectorizable ? 2*ResPacketSize : 1
-  };
-  
-  typedef typename packet_traits<RealScalar>::type RealPacket;
-  typedef typename packet_traits<Scalar>::type     ScalarPacket;
-  struct DoublePacket
-  {
-    RealPacket first;
-    RealPacket second;
-  };
-
-  typedef typename conditional<Vectorizable,RealPacket,  Scalar>::type LhsPacket;
-  typedef typename conditional<Vectorizable,DoublePacket,Scalar>::type RhsPacket;
-  typedef typename conditional<Vectorizable,ScalarPacket,Scalar>::type ResPacket;
-  typedef typename conditional<Vectorizable,DoublePacket,Scalar>::type AccPacket;
-  
-  EIGEN_STRONG_INLINE void initAcc(Scalar& p) { p = Scalar(0); }
-
-  EIGEN_STRONG_INLINE void initAcc(DoublePacket& p)
-  {
-    p.first   = pset1<RealPacket>(RealScalar(0));
-    p.second  = pset1<RealPacket>(RealScalar(0));
-  }
-
-  /* Unpack the rhs coeff such that each complex coefficient is spread into
-   * two packects containing respectively the real and imaginary coefficient
-   * duplicated as many time as needed: (x+iy) => [x, ..., x] [y, ..., y]
-   */
-  EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const Scalar* rhs, Scalar* b)
-  {
-    for(DenseIndex k=0; k<n; k++)
-    {
-      if(Vectorizable)
-      {
-        pstore1<RealPacket>((RealScalar*)&b[k*ResPacketSize*2+0],             real(rhs[k]));
-        pstore1<RealPacket>((RealScalar*)&b[k*ResPacketSize*2+ResPacketSize], imag(rhs[k]));
-      }
-      else
-        b[k] = rhs[k];
-    }
-  }
-
-  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, ResPacket& dest) const { dest = *b; }
-
-  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, DoublePacket& dest) const
-  {
-    dest.first  = pload<RealPacket>((const RealScalar*)b);
-    dest.second = pload<RealPacket>((const RealScalar*)(b+ResPacketSize));
-  }
-
-  // nothing special here
-  EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
-  {
-    dest = pload<LhsPacket>((const typename unpacket_traits<LhsPacket>::type*)(a));
-  }
-
-  EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, DoublePacket& c, RhsPacket& /*tmp*/) const
-  {
-    c.first   = padd(pmul(a,b.first), c.first);
-    c.second  = padd(pmul(a,b.second),c.second);
-  }
-
-  EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, ResPacket& c, RhsPacket& /*tmp*/) const
-  {
-    c = cj.pmadd(a,b,c);
-  }
-  
-  EIGEN_STRONG_INLINE void acc(const Scalar& c, const Scalar& alpha, Scalar& r) const { r += alpha * c; }
-  
-  EIGEN_STRONG_INLINE void acc(const DoublePacket& c, const ResPacket& alpha, ResPacket& r) const
-  {
-    // assemble c
-    ResPacket tmp;
-    if((!ConjLhs)&&(!ConjRhs))
-    {
-      tmp = pcplxflip(pconj(ResPacket(c.second)));
-      tmp = padd(ResPacket(c.first),tmp);
-    }
-    else if((!ConjLhs)&&(ConjRhs))
-    {
-      tmp = pconj(pcplxflip(ResPacket(c.second)));
-      tmp = padd(ResPacket(c.first),tmp);
-    }
-    else if((ConjLhs)&&(!ConjRhs))
-    {
-      tmp = pcplxflip(ResPacket(c.second));
-      tmp = padd(pconj(ResPacket(c.first)),tmp);
-    }
-    else if((ConjLhs)&&(ConjRhs))
-    {
-      tmp = pcplxflip(ResPacket(c.second));
-      tmp = psub(pconj(ResPacket(c.first)),tmp);
-    }
-    
-    r = pmadd(tmp,alpha,r);
-  }
-
-protected:
-  conj_helper<LhsScalar,RhsScalar,ConjLhs,ConjRhs> cj;
-};
-
-template<typename RealScalar, bool _ConjRhs>
-class gebp_traits<RealScalar, std::complex<RealScalar>, false, _ConjRhs >
-{
-public:
-  typedef std::complex<RealScalar>  Scalar;
-  typedef RealScalar  LhsScalar;
-  typedef Scalar      RhsScalar;
-  typedef Scalar      ResScalar;
-
-  enum {
-    ConjLhs = false,
-    ConjRhs = _ConjRhs,
-    Vectorizable = packet_traits<RealScalar>::Vectorizable
-                && packet_traits<Scalar>::Vectorizable,
-    LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
-    RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
-    ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
-    
-    NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
-    nr = 4,
-    mr = 2*ResPacketSize,
-    WorkSpaceFactor = nr*RhsPacketSize,
-
-    LhsProgress = ResPacketSize,
-    RhsProgress = ResPacketSize
-  };
-
-  typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
-  typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
-  typedef typename packet_traits<ResScalar>::type  _ResPacket;
-
-  typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
-  typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
-  typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
-
-  typedef ResPacket AccPacket;
-
-  EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
-  {
-    p = pset1<ResPacket>(ResScalar(0));
-  }
-
-  EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const RhsScalar* rhs, RhsScalar* b)
-  {
-    for(DenseIndex k=0; k<n; k++)
-      pstore1<RhsPacket>(&b[k*RhsPacketSize], rhs[k]);
-  }
-
-  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
-  {
-    dest = pload<RhsPacket>(b);
-  }
-
-  EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
-  {
-    dest = ploaddup<LhsPacket>(a);
-  }
-
-  EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const
-  {
-    madd_impl(a, b, c, tmp, typename conditional<Vectorizable,true_type,false_type>::type());
-  }
-
-  EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const
-  {
-    tmp = b; tmp.v = pmul(a,tmp.v); c = padd(c,tmp);
-  }
-
-  EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const
-  {
-    c += a * b;
-  }
-
-  EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
-  {
-    r = cj.pmadd(alpha,c,r);
-  }
-
-protected:
-  conj_helper<ResPacket,ResPacket,false,ConjRhs> cj;
-};
-
-/* optimized GEneral packed Block * packed Panel product kernel
- *
- * Mixing type logic: C += A * B
- *  |  A  |  B  | comments
- *  |real |cplx | no vectorization yet, would require to pack A with duplication
- *  |cplx |real | easy vectorization
- */
-template<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
-struct gebp_kernel
-{
-  typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> Traits;
-  typedef typename Traits::ResScalar ResScalar;
-  typedef typename Traits::LhsPacket LhsPacket;
-  typedef typename Traits::RhsPacket RhsPacket;
-  typedef typename Traits::ResPacket ResPacket;
-  typedef typename Traits::AccPacket AccPacket;
-
-  enum {
-    Vectorizable  = Traits::Vectorizable,
-    LhsProgress   = Traits::LhsProgress,
-    RhsProgress   = Traits::RhsProgress,
-    ResPacketSize = Traits::ResPacketSize
-  };
-
-  EIGEN_DONT_INLINE
-  void operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index rows, Index depth, Index cols, ResScalar alpha,
-                  Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0, RhsScalar* unpackedB = 0)
-  {
-    Traits traits;
-    
-    if(strideA==-1) strideA = depth;
-    if(strideB==-1) strideB = depth;
-    conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
-//     conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
-    Index packet_cols = (cols/nr) * nr;
-    const Index peeled_mc = (rows/mr)*mr;
-    // FIXME:
-    const Index peeled_mc2 = peeled_mc + (rows-peeled_mc >= LhsProgress ? LhsProgress : 0);
-    const Index peeled_kc = (depth/4)*4;
-
-    if(unpackedB==0)
-      unpackedB = const_cast<RhsScalar*>(blockB - strideB * nr * RhsProgress);
-
-    // loops on each micro vertical panel of rhs (depth x nr)
-    for(Index j2=0; j2<packet_cols; j2+=nr)
-    {
-      traits.unpackRhs(depth*nr,&blockB[j2*strideB+offsetB*nr],unpackedB); 
-
-      // loops on each largest micro horizontal panel of lhs (mr x depth)
-      // => we select a mr x nr micro block of res which is entirely
-      //    stored into mr/packet_size x nr registers.
-      for(Index i=0; i<peeled_mc; i+=mr)
-      {
-        const LhsScalar* blA = &blockA[i*strideA+offsetA*mr];
-        prefetch(&blA[0]);
-
-        // gets res block as register
-        AccPacket C0, C1, C2, C3, C4, C5, C6, C7;
-                  traits.initAcc(C0);
-                  traits.initAcc(C1);
-        if(nr==4) traits.initAcc(C2);
-        if(nr==4) traits.initAcc(C3);
-                  traits.initAcc(C4);
-                  traits.initAcc(C5);
-        if(nr==4) traits.initAcc(C6);
-        if(nr==4) traits.initAcc(C7);
-
-        ResScalar* r0 = &res[(j2+0)*resStride + i];
-        ResScalar* r1 = r0 + resStride;
-        ResScalar* r2 = r1 + resStride;
-        ResScalar* r3 = r2 + resStride;
-
-        prefetch(r0+16);
-        prefetch(r1+16);
-        prefetch(r2+16);
-        prefetch(r3+16);
-
-        // performs "inner" product
-        // TODO let's check wether the folowing peeled loop could not be
-        //      optimized via optimal prefetching from one loop to the other
-        const RhsScalar* blB = unpackedB;
-        for(Index k=0; k<peeled_kc; k+=4)
-        {
-          if(nr==2)
-          {
-            LhsPacket A0, A1;
-            RhsPacket B_0;
-            RhsPacket T0;
-            
-EIGEN_ASM_COMMENT("mybegin2");
-            traits.loadLhs(&blA[0*LhsProgress], A0);
-            traits.loadLhs(&blA[1*LhsProgress], A1);
-            traits.loadRhs(&blB[0*RhsProgress], B_0);
-            traits.madd(A0,B_0,C0,T0);
-            traits.madd(A1,B_0,C4,B_0);
-            traits.loadRhs(&blB[1*RhsProgress], B_0);
-            traits.madd(A0,B_0,C1,T0);
-            traits.madd(A1,B_0,C5,B_0);
-
-            traits.loadLhs(&blA[2*LhsProgress], A0);
-            traits.loadLhs(&blA[3*LhsProgress], A1);
-            traits.loadRhs(&blB[2*RhsProgress], B_0);
-            traits.madd(A0,B_0,C0,T0);
-            traits.madd(A1,B_0,C4,B_0);
-            traits.loadRhs(&blB[3*RhsProgress], B_0);
-            traits.madd(A0,B_0,C1,T0);
-            traits.madd(A1,B_0,C5,B_0);
-
-            traits.loadLhs(&blA[4*LhsProgress], A0);
-            traits.loadLhs(&blA[5*LhsProgress], A1);
-            traits.loadRhs(&blB[4*RhsProgress], B_0);
-            traits.madd(A0,B_0,C0,T0);
-            traits.madd(A1,B_0,C4,B_0);
-            traits.loadRhs(&blB[5*RhsProgress], B_0);
-            traits.madd(A0,B_0,C1,T0);
-            traits.madd(A1,B_0,C5,B_0);
-
-            traits.loadLhs(&blA[6*LhsProgress], A0);
-            traits.loadLhs(&blA[7*LhsProgress], A1);
-            traits.loadRhs(&blB[6*RhsProgress], B_0);
-            traits.madd(A0,B_0,C0,T0);
-            traits.madd(A1,B_0,C4,B_0);
-            traits.loadRhs(&blB[7*RhsProgress], B_0);
-            traits.madd(A0,B_0,C1,T0);
-            traits.madd(A1,B_0,C5,B_0);
-EIGEN_ASM_COMMENT("myend");
-          }
-          else
-          {
-EIGEN_ASM_COMMENT("mybegin4");
-            LhsPacket A0, A1;
-            RhsPacket B_0, B1, B2, B3;
-            RhsPacket T0;
-            
-            traits.loadLhs(&blA[0*LhsProgress], A0);
-            traits.loadLhs(&blA[1*LhsProgress], A1);
-            traits.loadRhs(&blB[0*RhsProgress], B_0);
-            traits.loadRhs(&blB[1*RhsProgress], B1);
-
-            traits.madd(A0,B_0,C0,T0);
-            traits.loadRhs(&blB[2*RhsProgress], B2);
-            traits.madd(A1,B_0,C4,B_0);
-            traits.loadRhs(&blB[3*RhsProgress], B3);
-            traits.loadRhs(&blB[4*RhsProgress], B_0);
-            traits.madd(A0,B1,C1,T0);
-            traits.madd(A1,B1,C5,B1);
-            traits.loadRhs(&blB[5*RhsProgress], B1);
-            traits.madd(A0,B2,C2,T0);
-            traits.madd(A1,B2,C6,B2);
-            traits.loadRhs(&blB[6*RhsProgress], B2);
-            traits.madd(A0,B3,C3,T0);
-            traits.loadLhs(&blA[2*LhsProgress], A0);
-            traits.madd(A1,B3,C7,B3);
-            traits.loadLhs(&blA[3*LhsProgress], A1);
-            traits.loadRhs(&blB[7*RhsProgress], B3);
-            traits.madd(A0,B_0,C0,T0);
-            traits.madd(A1,B_0,C4,B_0);
-            traits.loadRhs(&blB[8*RhsProgress], B_0);
-            traits.madd(A0,B1,C1,T0);
-            traits.madd(A1,B1,C5,B1);
-            traits.loadRhs(&blB[9*RhsProgress], B1);
-            traits.madd(A0,B2,C2,T0);
-            traits.madd(A1,B2,C6,B2);
-            traits.loadRhs(&blB[10*RhsProgress], B2);
-            traits.madd(A0,B3,C3,T0);
-            traits.loadLhs(&blA[4*LhsProgress], A0);
-            traits.madd(A1,B3,C7,B3);
-            traits.loadLhs(&blA[5*LhsProgress], A1);
-            traits.loadRhs(&blB[11*RhsProgress], B3);
-
-            traits.madd(A0,B_0,C0,T0);
-            traits.madd(A1,B_0,C4,B_0);
-            traits.loadRhs(&blB[12*RhsProgress], B_0);
-            traits.madd(A0,B1,C1,T0);
-            traits.madd(A1,B1,C5,B1);
-            traits.loadRhs(&blB[13*RhsProgress], B1);
-            traits.madd(A0,B2,C2,T0);
-            traits.madd(A1,B2,C6,B2);
-            traits.loadRhs(&blB[14*RhsProgress], B2);
-            traits.madd(A0,B3,C3,T0);
-            traits.loadLhs(&blA[6*LhsProgress], A0);
-            traits.madd(A1,B3,C7,B3);
-            traits.loadLhs(&blA[7*LhsProgress], A1);
-            traits.loadRhs(&blB[15*RhsProgress], B3);
-            traits.madd(A0,B_0,C0,T0);
-            traits.madd(A1,B_0,C4,B_0);
-            traits.madd(A0,B1,C1,T0);
-            traits.madd(A1,B1,C5,B1);
-            traits.madd(A0,B2,C2,T0);
-            traits.madd(A1,B2,C6,B2);
-            traits.madd(A0,B3,C3,T0);
-            traits.madd(A1,B3,C7,B3);
-          }
-
-          blB += 4*nr*RhsProgress;
-          blA += 4*mr;
-        }
-        // process remaining peeled loop
-        for(Index k=peeled_kc; k<depth; k++)
-        {
-          if(nr==2)
-          {
-            LhsPacket A0, A1;
-            RhsPacket B_0;
-            RhsPacket T0;
-
-            traits.loadLhs(&blA[0*LhsProgress], A0);
-            traits.loadLhs(&blA[1*LhsProgress], A1);
-            traits.loadRhs(&blB[0*RhsProgress], B_0);
-            traits.madd(A0,B_0,C0,T0);
-            traits.madd(A1,B_0,C4,B_0);
-            traits.loadRhs(&blB[1*RhsProgress], B_0);
-            traits.madd(A0,B_0,C1,T0);
-            traits.madd(A1,B_0,C5,B_0);
-          }
-          else
-          {
-            LhsPacket A0, A1;
-            RhsPacket B_0, B1, B2, B3;
-            RhsPacket T0;
-
-            traits.loadLhs(&blA[0*LhsProgress], A0);
-            traits.loadLhs(&blA[1*LhsProgress], A1);
-            traits.loadRhs(&blB[0*RhsProgress], B_0);
-            traits.loadRhs(&blB[1*RhsProgress], B1);
-
-            traits.madd(A0,B_0,C0,T0);
-            traits.loadRhs(&blB[2*RhsProgress], B2);
-            traits.madd(A1,B_0,C4,B_0);
-            traits.loadRhs(&blB[3*RhsProgress], B3);
-            traits.madd(A0,B1,C1,T0);
-            traits.madd(A1,B1,C5,B1);
-            traits.madd(A0,B2,C2,T0);
-            traits.madd(A1,B2,C6,B2);
-            traits.madd(A0,B3,C3,T0);
-            traits.madd(A1,B3,C7,B3);
-          }
-
-          blB += nr*RhsProgress;
-          blA += mr;
-        }
-
-        if(nr==4)
-        {
-          ResPacket R0, R1, R2, R3, R4, R5, R6;
-          ResPacket alphav = pset1<ResPacket>(alpha);
-
-          R0 = ploadu<ResPacket>(r0);
-          R1 = ploadu<ResPacket>(r1);
-          R2 = ploadu<ResPacket>(r2);
-          R3 = ploadu<ResPacket>(r3);
-          R4 = ploadu<ResPacket>(r0 + ResPacketSize);
-          R5 = ploadu<ResPacket>(r1 + ResPacketSize);
-          R6 = ploadu<ResPacket>(r2 + ResPacketSize);
-          traits.acc(C0, alphav, R0);
-          pstoreu(r0, R0);
-          R0 = ploadu<ResPacket>(r3 + ResPacketSize);
-
-          traits.acc(C1, alphav, R1);
-          traits.acc(C2, alphav, R2);
-          traits.acc(C3, alphav, R3);
-          traits.acc(C4, alphav, R4);
-          traits.acc(C5, alphav, R5);
-          traits.acc(C6, alphav, R6);
-          traits.acc(C7, alphav, R0);
-          
-          pstoreu(r1, R1);
-          pstoreu(r2, R2);
-          pstoreu(r3, R3);
-          pstoreu(r0 + ResPacketSize, R4);
-          pstoreu(r1 + ResPacketSize, R5);
-          pstoreu(r2 + ResPacketSize, R6);
-          pstoreu(r3 + ResPacketSize, R0);
-        }
-        else
-        {
-          ResPacket R0, R1, R4;
-          ResPacket alphav = pset1<ResPacket>(alpha);
-
-          R0 = ploadu<ResPacket>(r0);
-          R1 = ploadu<ResPacket>(r1);
-          R4 = ploadu<ResPacket>(r0 + ResPacketSize);
-          traits.acc(C0, alphav, R0);
-          pstoreu(r0, R0);
-          R0 = ploadu<ResPacket>(r1 + ResPacketSize);
-          traits.acc(C1, alphav, R1);
-          traits.acc(C4, alphav, R4);
-          traits.acc(C5, alphav, R0);
-          pstoreu(r1, R1);
-          pstoreu(r0 + ResPacketSize, R4);
-          pstoreu(r1 + ResPacketSize, R0);
-        }
-        
-      }
-      
-      if(rows-peeled_mc>=LhsProgress)
-      {
-        Index i = peeled_mc;
-        const LhsScalar* blA = &blockA[i*strideA+offsetA*LhsProgress];
-        prefetch(&blA[0]);
-
-        // gets res block as register
-        AccPacket C0, C1, C2, C3;
-                  traits.initAcc(C0);
-                  traits.initAcc(C1);
-        if(nr==4) traits.initAcc(C2);
-        if(nr==4) traits.initAcc(C3);
-
-        // performs "inner" product
-        const RhsScalar* blB = unpackedB;
-        for(Index k=0; k<peeled_kc; k+=4)
-        {
-          if(nr==2)
-          {
-            LhsPacket A0;
-            RhsPacket B_0, B1;
-
-            traits.loadLhs(&blA[0*LhsProgress], A0);
-            traits.loadRhs(&blB[0*RhsProgress], B_0);
-            traits.loadRhs(&blB[1*RhsProgress], B1);
-            traits.madd(A0,B_0,C0,B_0);
-            traits.loadRhs(&blB[2*RhsProgress], B_0);
-            traits.madd(A0,B1,C1,B1);
-            traits.loadLhs(&blA[1*LhsProgress], A0);
-            traits.loadRhs(&blB[3*RhsProgress], B1);
-            traits.madd(A0,B_0,C0,B_0);
-            traits.loadRhs(&blB[4*RhsProgress], B_0);
-            traits.madd(A0,B1,C1,B1);
-            traits.loadLhs(&blA[2*LhsProgress], A0);
-            traits.loadRhs(&blB[5*RhsProgress], B1);
-            traits.madd(A0,B_0,C0,B_0);
-            traits.loadRhs(&blB[6*RhsProgress], B_0);
-            traits.madd(A0,B1,C1,B1);
-            traits.loadLhs(&blA[3*LhsProgress], A0);
-            traits.loadRhs(&blB[7*RhsProgress], B1);
-            traits.madd(A0,B_0,C0,B_0);
-            traits.madd(A0,B1,C1,B1);
-          }
-          else
-          {
-            LhsPacket A0;
-            RhsPacket B_0, B1, B2, B3;
-
-            traits.loadLhs(&blA[0*LhsProgress], A0);
-            traits.loadRhs(&blB[0*RhsProgress], B_0);
-            traits.loadRhs(&blB[1*RhsProgress], B1);
-
-            traits.madd(A0,B_0,C0,B_0);
-            traits.loadRhs(&blB[2*RhsProgress], B2);
-            traits.loadRhs(&blB[3*RhsProgress], B3);
-            traits.loadRhs(&blB[4*RhsProgress], B_0);
-            traits.madd(A0,B1,C1,B1);
-            traits.loadRhs(&blB[5*RhsProgress], B1);
-            traits.madd(A0,B2,C2,B2);
-            traits.loadRhs(&blB[6*RhsProgress], B2);
-            traits.madd(A0,B3,C3,B3);
-            traits.loadLhs(&blA[1*LhsProgress], A0);
-            traits.loadRhs(&blB[7*RhsProgress], B3);
-            traits.madd(A0,B_0,C0,B_0);
-            traits.loadRhs(&blB[8*RhsProgress], B_0);
-            traits.madd(A0,B1,C1,B1);
-            traits.loadRhs(&blB[9*RhsProgress], B1);
-            traits.madd(A0,B2,C2,B2);
-            traits.loadRhs(&blB[10*RhsProgress], B2);
-            traits.madd(A0,B3,C3,B3);
-            traits.loadLhs(&blA[2*LhsProgress], A0);
-            traits.loadRhs(&blB[11*RhsProgress], B3);
-
-            traits.madd(A0,B_0,C0,B_0);
-            traits.loadRhs(&blB[12*RhsProgress], B_0);
-            traits.madd(A0,B1,C1,B1);
-            traits.loadRhs(&blB[13*RhsProgress], B1);
-            traits.madd(A0,B2,C2,B2);
-            traits.loadRhs(&blB[14*RhsProgress], B2);
-            traits.madd(A0,B3,C3,B3);
-
-            traits.loadLhs(&blA[3*LhsProgress], A0);
-            traits.loadRhs(&blB[15*RhsProgress], B3);
-            traits.madd(A0,B_0,C0,B_0);
-            traits.madd(A0,B1,C1,B1);
-            traits.madd(A0,B2,C2,B2);
-            traits.madd(A0,B3,C3,B3);
-          }
-
-          blB += nr*4*RhsProgress;
-          blA += 4*LhsProgress;
-        }
-        // process remaining peeled loop
-        for(Index k=peeled_kc; k<depth; k++)
-        {
-          if(nr==2)
-          {
-            LhsPacket A0;
-            RhsPacket B_0, B1;
-
-            traits.loadLhs(&blA[0*LhsProgress], A0);
-            traits.loadRhs(&blB[0*RhsProgress], B_0);
-            traits.loadRhs(&blB[1*RhsProgress], B1);
-            traits.madd(A0,B_0,C0,B_0);
-            traits.madd(A0,B1,C1,B1);
-          }
-          else
-          {
-            LhsPacket A0;
-            RhsPacket B_0, B1, B2, B3;
-
-            traits.loadLhs(&blA[0*LhsProgress], A0);
-            traits.loadRhs(&blB[0*RhsProgress], B_0);
-            traits.loadRhs(&blB[1*RhsProgress], B1);
-            traits.loadRhs(&blB[2*RhsProgress], B2);
-            traits.loadRhs(&blB[3*RhsProgress], B3);
-
-            traits.madd(A0,B_0,C0,B_0);
-            traits.madd(A0,B1,C1,B1);
-            traits.madd(A0,B2,C2,B2);
-            traits.madd(A0,B3,C3,B3);
-          }
-
-          blB += nr*RhsProgress;
-          blA += LhsProgress;
-        }
-
-        ResPacket R0, R1, R2, R3;
-        ResPacket alphav = pset1<ResPacket>(alpha);
-
-        ResScalar* r0 = &res[(j2+0)*resStride + i];
-        ResScalar* r1 = r0 + resStride;
-        ResScalar* r2 = r1 + resStride;
-        ResScalar* r3 = r2 + resStride;
-
-                  R0 = ploadu<ResPacket>(r0);
-                  R1 = ploadu<ResPacket>(r1);
-        if(nr==4) R2 = ploadu<ResPacket>(r2);
-        if(nr==4) R3 = ploadu<ResPacket>(r3);
-
-                  traits.acc(C0, alphav, R0);
-                  traits.acc(C1, alphav, R1);
-        if(nr==4) traits.acc(C2, alphav, R2);
-        if(nr==4) traits.acc(C3, alphav, R3);
-
-                  pstoreu(r0, R0);
-                  pstoreu(r1, R1);
-        if(nr==4) pstoreu(r2, R2);
-        if(nr==4) pstoreu(r3, R3);
-      }
-      for(Index i=peeled_mc2; i<rows; i++)
-      {
-        const LhsScalar* blA = &blockA[i*strideA+offsetA];
-        prefetch(&blA[0]);
-
-        // gets a 1 x nr res block as registers
-        ResScalar C0(0), C1(0), C2(0), C3(0);
-        // TODO directly use blockB ???
-        const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
-        for(Index k=0; k<depth; k++)
-        {
-          if(nr==2)
-          {
-            LhsScalar A0;
-            RhsScalar B_0, B1;
-
-            A0 = blA[k];
-            B_0 = blB[0];
-            B1 = blB[1];
-            MADD(cj,A0,B_0,C0,B_0);
-            MADD(cj,A0,B1,C1,B1);
-          }
-          else
-          {
-            LhsScalar A0;
-            RhsScalar B_0, B1, B2, B3;
-
-            A0 = blA[k];
-            B_0 = blB[0];
-            B1 = blB[1];
-            B2 = blB[2];
-            B3 = blB[3];
-
-            MADD(cj,A0,B_0,C0,B_0);
-            MADD(cj,A0,B1,C1,B1);
-            MADD(cj,A0,B2,C2,B2);
-            MADD(cj,A0,B3,C3,B3);
-          }
-
-          blB += nr;
-        }
-                  res[(j2+0)*resStride + i] += alpha*C0;
-                  res[(j2+1)*resStride + i] += alpha*C1;
-        if(nr==4) res[(j2+2)*resStride + i] += alpha*C2;
-        if(nr==4) res[(j2+3)*resStride + i] += alpha*C3;
-      }
-    }
-    // process remaining rhs/res columns one at a time
-    // => do the same but with nr==1
-    for(Index j2=packet_cols; j2<cols; j2++)
-    {
-      // unpack B
-      traits.unpackRhs(depth, &blockB[j2*strideB+offsetB], unpackedB);
-
-      for(Index i=0; i<peeled_mc; i+=mr)
-      {
-        const LhsScalar* blA = &blockA[i*strideA+offsetA*mr];
-        prefetch(&blA[0]);
-
-        // TODO move the res loads to the stores
-
-        // get res block as registers
-        AccPacket C0, C4;
-        traits.initAcc(C0);
-        traits.initAcc(C4);
-
-        const RhsScalar* blB = unpackedB;
-        for(Index k=0; k<depth; k++)
-        {
-          LhsPacket A0, A1;
-          RhsPacket B_0;
-          RhsPacket T0;
-
-          traits.loadLhs(&blA[0*LhsProgress], A0);
-          traits.loadLhs(&blA[1*LhsProgress], A1);
-          traits.loadRhs(&blB[0*RhsProgress], B_0);
-          traits.madd(A0,B_0,C0,T0);
-          traits.madd(A1,B_0,C4,B_0);
-
-          blB += RhsProgress;
-          blA += 2*LhsProgress;
-        }
-        ResPacket R0, R4;
-        ResPacket alphav = pset1<ResPacket>(alpha);
-
-        ResScalar* r0 = &res[(j2+0)*resStride + i];
-
-        R0 = ploadu<ResPacket>(r0);
-        R4 = ploadu<ResPacket>(r0+ResPacketSize);
-
-        traits.acc(C0, alphav, R0);
-        traits.acc(C4, alphav, R4);
-
-        pstoreu(r0,               R0);
-        pstoreu(r0+ResPacketSize, R4);
-      }
-      if(rows-peeled_mc>=LhsProgress)
-      {
-        Index i = peeled_mc;
-        const LhsScalar* blA = &blockA[i*strideA+offsetA*LhsProgress];
-        prefetch(&blA[0]);
-
-        AccPacket C0;
-        traits.initAcc(C0);
-
-        const RhsScalar* blB = unpackedB;
-        for(Index k=0; k<depth; k++)
-        {
-          LhsPacket A0;
-          RhsPacket B_0;
-          traits.loadLhs(blA, A0);
-          traits.loadRhs(blB, B_0);
-          traits.madd(A0, B_0, C0, B_0);
-          blB += RhsProgress;
-          blA += LhsProgress;
-        }
-
-        ResPacket alphav = pset1<ResPacket>(alpha);
-        ResPacket R0 = ploadu<ResPacket>(&res[(j2+0)*resStride + i]);
-        traits.acc(C0, alphav, R0);
-        pstoreu(&res[(j2+0)*resStride + i], R0);
-      }
-      for(Index i=peeled_mc2; i<rows; i++)
-      {
-        const LhsScalar* blA = &blockA[i*strideA+offsetA];
-        prefetch(&blA[0]);
-
-        // gets a 1 x 1 res block as registers
-        ResScalar C0(0);
-        // FIXME directly use blockB ??
-        const RhsScalar* blB = &blockB[j2*strideB+offsetB];
-        for(Index k=0; k<depth; k++)
-        {
-          LhsScalar A0 = blA[k];
-          RhsScalar B_0 = blB[k];
-          MADD(cj, A0, B_0, C0, B_0);
-        }
-        res[(j2+0)*resStride + i] += alpha*C0;
-      }
-    }
-  }
-};
-
-#undef CJMADD
-
-// pack a block of the lhs
-// The traversal is as follow (mr==4):
-//   0  4  8 12 ...
-//   1  5  9 13 ...
-//   2  6 10 14 ...
-//   3  7 11 15 ...
-//
-//  16 20 24 28 ...
-//  17 21 25 29 ...
-//  18 22 26 30 ...
-//  19 23 27 31 ...
-//
-//  32 33 34 35 ...
-//  36 36 38 39 ...
-template<typename Scalar, typename Index, int Pack1, int Pack2, int StorageOrder, bool Conjugate, bool PanelMode>
-struct gemm_pack_lhs
-{
-  EIGEN_DONT_INLINE void operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows,
-                  Index stride=0, Index offset=0)
-  {
-    typedef typename packet_traits<Scalar>::type Packet;
-    enum { PacketSize = packet_traits<Scalar>::size };
-
-    EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS");
-    eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
-    eigen_assert( (StorageOrder==RowMajor) || ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) );
-    conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
-    const_blas_data_mapper<Scalar, Index, StorageOrder> lhs(_lhs,lhsStride);
-    Index count = 0;
-    Index peeled_mc = (rows/Pack1)*Pack1;
-    for(Index i=0; i<peeled_mc; i+=Pack1)
-    {
-      if(PanelMode) count += Pack1 * offset;
-
-      if(StorageOrder==ColMajor)
-      {
-        for(Index k=0; k<depth; k++)
-        {
-          Packet A, B, C, D;
-          if(Pack1>=1*PacketSize) A = ploadu<Packet>(&lhs(i+0*PacketSize, k));
-          if(Pack1>=2*PacketSize) B = ploadu<Packet>(&lhs(i+1*PacketSize, k));
-          if(Pack1>=3*PacketSize) C = ploadu<Packet>(&lhs(i+2*PacketSize, k));
-          if(Pack1>=4*PacketSize) D = ploadu<Packet>(&lhs(i+3*PacketSize, k));
-          if(Pack1>=1*PacketSize) { pstore(blockA+count, cj.pconj(A)); count+=PacketSize; }
-          if(Pack1>=2*PacketSize) { pstore(blockA+count, cj.pconj(B)); count+=PacketSize; }
-          if(Pack1>=3*PacketSize) { pstore(blockA+count, cj.pconj(C)); count+=PacketSize; }
-          if(Pack1>=4*PacketSize) { pstore(blockA+count, cj.pconj(D)); count+=PacketSize; }
-        }
-      }
-      else
-      {
-        for(Index k=0; k<depth; k++)
-        {
-          // TODO add a vectorized transpose here
-          Index w=0;
-          for(; w<Pack1-3; w+=4)
-          {
-            Scalar a(cj(lhs(i+w+0, k))),
-                   b(cj(lhs(i+w+1, k))),
-                   c(cj(lhs(i+w+2, k))),
-                   d(cj(lhs(i+w+3, k)));
-            blockA[count++] = a;
-            blockA[count++] = b;
-            blockA[count++] = c;
-            blockA[count++] = d;
-          }
-          if(Pack1%4)
-            for(;w<Pack1;++w)
-              blockA[count++] = cj(lhs(i+w, k));
-        }
-      }
-      if(PanelMode) count += Pack1 * (stride-offset-depth);
-    }
-    if(rows-peeled_mc>=Pack2)
-    {
-      if(PanelMode) count += Pack2*offset;
-      for(Index k=0; k<depth; k++)
-        for(Index w=0; w<Pack2; w++)
-          blockA[count++] = cj(lhs(peeled_mc+w, k));
-      if(PanelMode) count += Pack2 * (stride-offset-depth);
-      peeled_mc += Pack2;
-    }
-    for(Index i=peeled_mc; i<rows; i++)
-    {
-      if(PanelMode) count += offset;
-      for(Index k=0; k<depth; k++)
-        blockA[count++] = cj(lhs(i, k));
-      if(PanelMode) count += (stride-offset-depth);
-    }
-  }
-};
-
-// copy a complete panel of the rhs
-// this version is optimized for column major matrices
-// The traversal order is as follow: (nr==4):
-//  0  1  2  3   12 13 14 15   24 27
-//  4  5  6  7   16 17 18 19   25 28
-//  8  9 10 11   20 21 22 23   26 29
-//  .  .  .  .    .  .  .  .    .  .
-template<typename Scalar, typename Index, int nr, bool Conjugate, bool PanelMode>
-struct gemm_pack_rhs<Scalar, Index, nr, ColMajor, Conjugate, PanelMode>
-{
-  typedef typename packet_traits<Scalar>::type Packet;
-  enum { PacketSize = packet_traits<Scalar>::size };
-  EIGEN_DONT_INLINE void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols,
-                  Index stride=0, Index offset=0)
-  {
-    EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS COLMAJOR");
-    eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
-    conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
-    Index packet_cols = (cols/nr) * nr;
-    Index count = 0;
-    for(Index j2=0; j2<packet_cols; j2+=nr)
-    {
-      // skip what we have before
-      if(PanelMode) count += nr * offset;
-      const Scalar* b0 = &rhs[(j2+0)*rhsStride];
-      const Scalar* b1 = &rhs[(j2+1)*rhsStride];
-      const Scalar* b2 = &rhs[(j2+2)*rhsStride];
-      const Scalar* b3 = &rhs[(j2+3)*rhsStride];
-      for(Index k=0; k<depth; k++)
-      {
-                  blockB[count+0] = cj(b0[k]);
-                  blockB[count+1] = cj(b1[k]);
-        if(nr==4) blockB[count+2] = cj(b2[k]);
-        if(nr==4) blockB[count+3] = cj(b3[k]);
-        count += nr;
-      }
-      // skip what we have after
-      if(PanelMode) count += nr * (stride-offset-depth);
-    }
-
-    // copy the remaining columns one at a time (nr==1)
-    for(Index j2=packet_cols; j2<cols; ++j2)
-    {
-      if(PanelMode) count += offset;
-      const Scalar* b0 = &rhs[(j2+0)*rhsStride];
-      for(Index k=0; k<depth; k++)
-      {
-        blockB[count] = cj(b0[k]);
-        count += 1;
-      }
-      if(PanelMode) count += (stride-offset-depth);
-    }
-  }
-};
-
-// this version is optimized for row major matrices
-template<typename Scalar, typename Index, int nr, bool Conjugate, bool PanelMode>
-struct gemm_pack_rhs<Scalar, Index, nr, RowMajor, Conjugate, PanelMode>
-{
-  enum { PacketSize = packet_traits<Scalar>::size };
-  EIGEN_DONT_INLINE void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols,
-                  Index stride=0, Index offset=0)
-  {
-    EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS ROWMAJOR");
-    eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
-    conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
-    Index packet_cols = (cols/nr) * nr;
-    Index count = 0;
-    for(Index j2=0; j2<packet_cols; j2+=nr)
-    {
-      // skip what we have before
-      if(PanelMode) count += nr * offset;
-      for(Index k=0; k<depth; k++)
-      {
-        const Scalar* b0 = &rhs[k*rhsStride + j2];
-                  blockB[count+0] = cj(b0[0]);
-                  blockB[count+1] = cj(b0[1]);
-        if(nr==4) blockB[count+2] = cj(b0[2]);
-        if(nr==4) blockB[count+3] = cj(b0[3]);
-        count += nr;
-      }
-      // skip what we have after
-      if(PanelMode) count += nr * (stride-offset-depth);
-    }
-    // copy the remaining columns one at a time (nr==1)
-    for(Index j2=packet_cols; j2<cols; ++j2)
-    {
-      if(PanelMode) count += offset;
-      const Scalar* b0 = &rhs[j2];
-      for(Index k=0; k<depth; k++)
-      {
-        blockB[count] = cj(b0[k*rhsStride]);
-        count += 1;
-      }
-      if(PanelMode) count += stride-offset-depth;
-    }
-  }
-};
-
-} // end namespace internal
-
-/** \returns the currently set level 1 cpu cache size (in bytes) used to estimate the ideal blocking size parameters.
-  * \sa setCpuCacheSize */
-inline std::ptrdiff_t l1CacheSize()
-{
-  std::ptrdiff_t l1, l2;
-  internal::manage_caching_sizes(GetAction, &l1, &l2);
-  return l1;
-}
-
-/** \returns the currently set level 2 cpu cache size (in bytes) used to estimate the ideal blocking size parameters.
-  * \sa setCpuCacheSize */
-inline std::ptrdiff_t l2CacheSize()
-{
-  std::ptrdiff_t l1, l2;
-  internal::manage_caching_sizes(GetAction, &l1, &l2);
-  return l2;
-}
-
-/** Set the cpu L1 and L2 cache sizes (in bytes).
-  * These values are use to adjust the size of the blocks
-  * for the algorithms working per blocks.
-  *
-  * \sa computeProductBlockingSizes */
-inline void setCpuCacheSizes(std::ptrdiff_t l1, std::ptrdiff_t l2)
-{
-  internal::manage_caching_sizes(SetAction, &l1, &l2);
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_GENERAL_BLOCK_PANEL_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/products/GeneralMatrixVector.h b/resources/3rdparty/eigen/Eigen/src/Core/products/GeneralMatrixVector.h
deleted file mode 100644
index 8895d3ab2..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/products/GeneralMatrixVector.h
+++ /dev/null
@@ -1,554 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_GENERAL_MATRIX_VECTOR_H
-#define EIGEN_GENERAL_MATRIX_VECTOR_H
-
-namespace Eigen { 
-
-namespace internal {
-
-/* Optimized col-major matrix * vector product:
- * This algorithm processes 4 columns at onces that allows to both reduce
- * the number of load/stores of the result by a factor 4 and to reduce
- * the instruction dependency. Moreover, we know that all bands have the
- * same alignment pattern.
- *
- * Mixing type logic: C += alpha * A * B
- *  |  A  |  B  |alpha| comments
- *  |real |cplx |cplx | no vectorization
- *  |real |cplx |real | alpha is converted to a cplx when calling the run function, no vectorization
- *  |cplx |real |cplx | invalid, the caller has to do tmp: = A * B; C += alpha*tmp
- *  |cplx |real |real | optimal case, vectorization possible via real-cplx mul
- */
-template<typename Index, typename LhsScalar, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs, int Version>
-struct general_matrix_vector_product<Index,LhsScalar,ColMajor,ConjugateLhs,RhsScalar,ConjugateRhs,Version>
-{
-typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
-
-enum {
-  Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable
-              && int(packet_traits<LhsScalar>::size)==int(packet_traits<RhsScalar>::size),
-  LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
-  RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
-  ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1
-};
-
-typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
-typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
-typedef typename packet_traits<ResScalar>::type  _ResPacket;
-
-typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
-typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
-typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
-
-EIGEN_DONT_INLINE static void run(
-  Index rows, Index cols,
-  const LhsScalar* lhs, Index lhsStride,
-  const RhsScalar* rhs, Index rhsIncr,
-  ResScalar* res, Index
-  #ifdef EIGEN_INTERNAL_DEBUGGING
-    resIncr
-  #endif
-  , RhsScalar alpha)
-{
-  eigen_internal_assert(resIncr==1);
-  #ifdef _EIGEN_ACCUMULATE_PACKETS
-  #error _EIGEN_ACCUMULATE_PACKETS has already been defined
-  #endif
-  #define _EIGEN_ACCUMULATE_PACKETS(A0,A13,A2) \
-    pstore(&res[j], \
-      padd(pload<ResPacket>(&res[j]), \
-        padd( \
-          padd(pcj.pmul(EIGEN_CAT(ploa , A0)<LhsPacket>(&lhs0[j]),    ptmp0), \
-                  pcj.pmul(EIGEN_CAT(ploa , A13)<LhsPacket>(&lhs1[j]),   ptmp1)), \
-          padd(pcj.pmul(EIGEN_CAT(ploa , A2)<LhsPacket>(&lhs2[j]),    ptmp2), \
-                  pcj.pmul(EIGEN_CAT(ploa , A13)<LhsPacket>(&lhs3[j]),   ptmp3)) )))
-
-  conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
-  conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
-  if(ConjugateRhs)
-    alpha = conj(alpha);
-
-  enum { AllAligned = 0, EvenAligned, FirstAligned, NoneAligned };
-  const Index columnsAtOnce = 4;
-  const Index peels = 2;
-  const Index LhsPacketAlignedMask = LhsPacketSize-1;
-  const Index ResPacketAlignedMask = ResPacketSize-1;
-//  const Index PeelAlignedMask = ResPacketSize*peels-1;
-  const Index size = rows;
-  
-  // How many coeffs of the result do we have to skip to be aligned.
-  // Here we assume data are at least aligned on the base scalar type.
-  Index alignedStart = internal::first_aligned(res,size);
-  Index alignedSize = ResPacketSize>1 ? alignedStart + ((size-alignedStart) & ~ResPacketAlignedMask) : 0;
-  const Index peeledSize = alignedSize - RhsPacketSize*peels - RhsPacketSize + 1;
-
-  const Index alignmentStep = LhsPacketSize>1 ? (LhsPacketSize - lhsStride % LhsPacketSize) & LhsPacketAlignedMask : 0;
-  Index alignmentPattern = alignmentStep==0 ? AllAligned
-                       : alignmentStep==(LhsPacketSize/2) ? EvenAligned
-                       : FirstAligned;
-
-  // we cannot assume the first element is aligned because of sub-matrices
-  const Index lhsAlignmentOffset = internal::first_aligned(lhs,size);
-
-  // find how many columns do we have to skip to be aligned with the result (if possible)
-  Index skipColumns = 0;
-  // if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats)
-  if( (size_t(lhs)%sizeof(LhsScalar)) || (size_t(res)%sizeof(ResScalar)) )
-  {
-    alignedSize = 0;
-    alignedStart = 0;
-  }
-  else if (LhsPacketSize>1)
-  {
-    eigen_internal_assert(size_t(lhs+lhsAlignmentOffset)%sizeof(LhsPacket)==0 || size<LhsPacketSize);
-
-    while (skipColumns<LhsPacketSize &&
-          alignedStart != ((lhsAlignmentOffset + alignmentStep*skipColumns)%LhsPacketSize))
-      ++skipColumns;
-    if (skipColumns==LhsPacketSize)
-    {
-      // nothing can be aligned, no need to skip any column
-      alignmentPattern = NoneAligned;
-      skipColumns = 0;
-    }
-    else
-    {
-      skipColumns = (std::min)(skipColumns,cols);
-      // note that the skiped columns are processed later.
-    }
-
-    eigen_internal_assert(  (alignmentPattern==NoneAligned)
-                      || (skipColumns + columnsAtOnce >= cols)
-                      || LhsPacketSize > size
-                      || (size_t(lhs+alignedStart+lhsStride*skipColumns)%sizeof(LhsPacket))==0);
-  }
-  else if(Vectorizable)
-  {
-    alignedStart = 0;
-    alignedSize = size;
-    alignmentPattern = AllAligned;
-  }
-
-  Index offset1 = (FirstAligned && alignmentStep==1?3:1);
-  Index offset3 = (FirstAligned && alignmentStep==1?1:3);
-
-  Index columnBound = ((cols-skipColumns)/columnsAtOnce)*columnsAtOnce + skipColumns;
-  for (Index i=skipColumns; i<columnBound; i+=columnsAtOnce)
-  {
-    RhsPacket ptmp0 = pset1<RhsPacket>(alpha*rhs[i*rhsIncr]),
-              ptmp1 = pset1<RhsPacket>(alpha*rhs[(i+offset1)*rhsIncr]),
-              ptmp2 = pset1<RhsPacket>(alpha*rhs[(i+2)*rhsIncr]),
-              ptmp3 = pset1<RhsPacket>(alpha*rhs[(i+offset3)*rhsIncr]);
-
-    // this helps a lot generating better binary code
-    const LhsScalar *lhs0 = lhs + i*lhsStride,     *lhs1 = lhs + (i+offset1)*lhsStride,
-                    *lhs2 = lhs + (i+2)*lhsStride, *lhs3 = lhs + (i+offset3)*lhsStride;
-
-    if (Vectorizable)
-    {
-      /* explicit vectorization */
-      // process initial unaligned coeffs
-      for (Index j=0; j<alignedStart; ++j)
-      {
-        res[j] = cj.pmadd(lhs0[j], pfirst(ptmp0), res[j]);
-        res[j] = cj.pmadd(lhs1[j], pfirst(ptmp1), res[j]);
-        res[j] = cj.pmadd(lhs2[j], pfirst(ptmp2), res[j]);
-        res[j] = cj.pmadd(lhs3[j], pfirst(ptmp3), res[j]);
-      }
-
-      if (alignedSize>alignedStart)
-      {
-        switch(alignmentPattern)
-        {
-          case AllAligned:
-            for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize)
-              _EIGEN_ACCUMULATE_PACKETS(d,d,d);
-            break;
-          case EvenAligned:
-            for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize)
-              _EIGEN_ACCUMULATE_PACKETS(d,du,d);
-            break;
-          case FirstAligned:
-          {
-            Index j = alignedStart;
-            if(peels>1)
-            {
-              LhsPacket A00, A01, A02, A03, A10, A11, A12, A13;
-              ResPacket T0, T1;
-
-              A01 = pload<LhsPacket>(&lhs1[alignedStart-1]);
-              A02 = pload<LhsPacket>(&lhs2[alignedStart-2]);
-              A03 = pload<LhsPacket>(&lhs3[alignedStart-3]);
-
-              for (; j<peeledSize; j+=peels*ResPacketSize)
-              {
-                A11 = pload<LhsPacket>(&lhs1[j-1+LhsPacketSize]);  palign<1>(A01,A11);
-                A12 = pload<LhsPacket>(&lhs2[j-2+LhsPacketSize]);  palign<2>(A02,A12);
-                A13 = pload<LhsPacket>(&lhs3[j-3+LhsPacketSize]);  palign<3>(A03,A13);
-
-                A00 = pload<LhsPacket>(&lhs0[j]);
-                A10 = pload<LhsPacket>(&lhs0[j+LhsPacketSize]);
-                T0  = pcj.pmadd(A00, ptmp0, pload<ResPacket>(&res[j]));
-                T1  = pcj.pmadd(A10, ptmp0, pload<ResPacket>(&res[j+ResPacketSize]));
-
-                T0  = pcj.pmadd(A01, ptmp1, T0);
-                A01 = pload<LhsPacket>(&lhs1[j-1+2*LhsPacketSize]);  palign<1>(A11,A01);
-                T0  = pcj.pmadd(A02, ptmp2, T0);
-                A02 = pload<LhsPacket>(&lhs2[j-2+2*LhsPacketSize]);  palign<2>(A12,A02);
-                T0  = pcj.pmadd(A03, ptmp3, T0);
-                pstore(&res[j],T0);
-                A03 = pload<LhsPacket>(&lhs3[j-3+2*LhsPacketSize]);  palign<3>(A13,A03);
-                T1  = pcj.pmadd(A11, ptmp1, T1);
-                T1  = pcj.pmadd(A12, ptmp2, T1);
-                T1  = pcj.pmadd(A13, ptmp3, T1);
-                pstore(&res[j+ResPacketSize],T1);
-              }
-            }
-            for (; j<alignedSize; j+=ResPacketSize)
-              _EIGEN_ACCUMULATE_PACKETS(d,du,du);
-            break;
-          }
-          default:
-            for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize)
-              _EIGEN_ACCUMULATE_PACKETS(du,du,du);
-            break;
-        }
-      }
-    } // end explicit vectorization
-
-    /* process remaining coeffs (or all if there is no explicit vectorization) */
-    for (Index j=alignedSize; j<size; ++j)
-    {
-      res[j] = cj.pmadd(lhs0[j], pfirst(ptmp0), res[j]);
-      res[j] = cj.pmadd(lhs1[j], pfirst(ptmp1), res[j]);
-      res[j] = cj.pmadd(lhs2[j], pfirst(ptmp2), res[j]);
-      res[j] = cj.pmadd(lhs3[j], pfirst(ptmp3), res[j]);
-    }
-  }
-
-  // process remaining first and last columns (at most columnsAtOnce-1)
-  Index end = cols;
-  Index start = columnBound;
-  do
-  {
-    for (Index k=start; k<end; ++k)
-    {
-      RhsPacket ptmp0 = pset1<RhsPacket>(alpha*rhs[k*rhsIncr]);
-      const LhsScalar* lhs0 = lhs + k*lhsStride;
-
-      if (Vectorizable)
-      {
-        /* explicit vectorization */
-        // process first unaligned result's coeffs
-        for (Index j=0; j<alignedStart; ++j)
-          res[j] += cj.pmul(lhs0[j], pfirst(ptmp0));
-        // process aligned result's coeffs
-        if ((size_t(lhs0+alignedStart)%sizeof(LhsPacket))==0)
-          for (Index i = alignedStart;i<alignedSize;i+=ResPacketSize)
-            pstore(&res[i], pcj.pmadd(ploadu<LhsPacket>(&lhs0[i]), ptmp0, pload<ResPacket>(&res[i])));
-        else
-          for (Index i = alignedStart;i<alignedSize;i+=ResPacketSize)
-            pstore(&res[i], pcj.pmadd(ploadu<LhsPacket>(&lhs0[i]), ptmp0, pload<ResPacket>(&res[i])));
-      }
-
-      // process remaining scalars (or all if no explicit vectorization)
-      for (Index i=alignedSize; i<size; ++i)
-        res[i] += cj.pmul(lhs0[i], pfirst(ptmp0));
-    }
-    if (skipColumns)
-    {
-      start = 0;
-      end = skipColumns;
-      skipColumns = 0;
-    }
-    else
-      break;
-  } while(Vectorizable);
-  #undef _EIGEN_ACCUMULATE_PACKETS
-}
-};
-
-/* Optimized row-major matrix * vector product:
- * This algorithm processes 4 rows at onces that allows to both reduce
- * the number of load/stores of the result by a factor 4 and to reduce
- * the instruction dependency. Moreover, we know that all bands have the
- * same alignment pattern.
- *
- * Mixing type logic:
- *  - alpha is always a complex (or converted to a complex)
- *  - no vectorization
- */
-template<typename Index, typename LhsScalar, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs, int Version>
-struct general_matrix_vector_product<Index,LhsScalar,RowMajor,ConjugateLhs,RhsScalar,ConjugateRhs,Version>
-{
-typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
-
-enum {
-  Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable
-              && int(packet_traits<LhsScalar>::size)==int(packet_traits<RhsScalar>::size),
-  LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
-  RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
-  ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1
-};
-
-typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
-typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
-typedef typename packet_traits<ResScalar>::type  _ResPacket;
-
-typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
-typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
-typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
-  
-EIGEN_DONT_INLINE static void run(
-  Index rows, Index cols,
-  const LhsScalar* lhs, Index lhsStride,
-  const RhsScalar* rhs, Index rhsIncr,
-  ResScalar* res, Index resIncr,
-  ResScalar alpha)
-{
-  EIGEN_UNUSED_VARIABLE(rhsIncr);
-  eigen_internal_assert(rhsIncr==1);
-  #ifdef _EIGEN_ACCUMULATE_PACKETS
-  #error _EIGEN_ACCUMULATE_PACKETS has already been defined
-  #endif
-
-  #define _EIGEN_ACCUMULATE_PACKETS(A0,A13,A2) {\
-    RhsPacket b = pload<RhsPacket>(&rhs[j]); \
-    ptmp0 = pcj.pmadd(EIGEN_CAT(ploa,A0) <LhsPacket>(&lhs0[j]), b, ptmp0); \
-    ptmp1 = pcj.pmadd(EIGEN_CAT(ploa,A13)<LhsPacket>(&lhs1[j]), b, ptmp1); \
-    ptmp2 = pcj.pmadd(EIGEN_CAT(ploa,A2) <LhsPacket>(&lhs2[j]), b, ptmp2); \
-    ptmp3 = pcj.pmadd(EIGEN_CAT(ploa,A13)<LhsPacket>(&lhs3[j]), b, ptmp3); }
-
-  conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
-  conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
-
-  enum { AllAligned=0, EvenAligned=1, FirstAligned=2, NoneAligned=3 };
-  const Index rowsAtOnce = 4;
-  const Index peels = 2;
-  const Index RhsPacketAlignedMask = RhsPacketSize-1;
-  const Index LhsPacketAlignedMask = LhsPacketSize-1;
-//   const Index PeelAlignedMask = RhsPacketSize*peels-1;
-  const Index depth = cols;
-
-  // How many coeffs of the result do we have to skip to be aligned.
-  // Here we assume data are at least aligned on the base scalar type
-  // if that's not the case then vectorization is discarded, see below.
-  Index alignedStart = internal::first_aligned(rhs, depth);
-  Index alignedSize = RhsPacketSize>1 ? alignedStart + ((depth-alignedStart) & ~RhsPacketAlignedMask) : 0;
-  const Index peeledSize = alignedSize - RhsPacketSize*peels - RhsPacketSize + 1;
-
-  const Index alignmentStep = LhsPacketSize>1 ? (LhsPacketSize - lhsStride % LhsPacketSize) & LhsPacketAlignedMask : 0;
-  Index alignmentPattern = alignmentStep==0 ? AllAligned
-                         : alignmentStep==(LhsPacketSize/2) ? EvenAligned
-                         : FirstAligned;
-
-  // we cannot assume the first element is aligned because of sub-matrices
-  const Index lhsAlignmentOffset = internal::first_aligned(lhs,depth);
-
-  // find how many rows do we have to skip to be aligned with rhs (if possible)
-  Index skipRows = 0;
-  // if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats)
-  if( (sizeof(LhsScalar)!=sizeof(RhsScalar)) || (size_t(lhs)%sizeof(LhsScalar)) || (size_t(rhs)%sizeof(RhsScalar)) )
-  {
-    alignedSize = 0;
-    alignedStart = 0;
-  }
-  else if (LhsPacketSize>1)
-  {
-    eigen_internal_assert(size_t(lhs+lhsAlignmentOffset)%sizeof(LhsPacket)==0  || depth<LhsPacketSize);
-
-    while (skipRows<LhsPacketSize &&
-           alignedStart != ((lhsAlignmentOffset + alignmentStep*skipRows)%LhsPacketSize))
-      ++skipRows;
-    if (skipRows==LhsPacketSize)
-    {
-      // nothing can be aligned, no need to skip any column
-      alignmentPattern = NoneAligned;
-      skipRows = 0;
-    }
-    else
-    {
-      skipRows = (std::min)(skipRows,Index(rows));
-      // note that the skiped columns are processed later.
-    }
-    eigen_internal_assert(  alignmentPattern==NoneAligned
-                      || LhsPacketSize==1
-                      || (skipRows + rowsAtOnce >= rows)
-                      || LhsPacketSize > depth
-                      || (size_t(lhs+alignedStart+lhsStride*skipRows)%sizeof(LhsPacket))==0);
-  }
-  else if(Vectorizable)
-  {
-    alignedStart = 0;
-    alignedSize = depth;
-    alignmentPattern = AllAligned;
-  }
-
-  Index offset1 = (FirstAligned && alignmentStep==1?3:1);
-  Index offset3 = (FirstAligned && alignmentStep==1?1:3);
-
-  Index rowBound = ((rows-skipRows)/rowsAtOnce)*rowsAtOnce + skipRows;
-  for (Index i=skipRows; i<rowBound; i+=rowsAtOnce)
-  {
-    EIGEN_ALIGN16 ResScalar tmp0 = ResScalar(0);
-    ResScalar tmp1 = ResScalar(0), tmp2 = ResScalar(0), tmp3 = ResScalar(0);
-
-    // this helps the compiler generating good binary code
-    const LhsScalar *lhs0 = lhs + i*lhsStride,     *lhs1 = lhs + (i+offset1)*lhsStride,
-                    *lhs2 = lhs + (i+2)*lhsStride, *lhs3 = lhs + (i+offset3)*lhsStride;
-
-    if (Vectorizable)
-    {
-      /* explicit vectorization */
-      ResPacket ptmp0 = pset1<ResPacket>(ResScalar(0)), ptmp1 = pset1<ResPacket>(ResScalar(0)),
-                ptmp2 = pset1<ResPacket>(ResScalar(0)), ptmp3 = pset1<ResPacket>(ResScalar(0));
-
-      // process initial unaligned coeffs
-      // FIXME this loop get vectorized by the compiler !
-      for (Index j=0; j<alignedStart; ++j)
-      {
-        RhsScalar b = rhs[j];
-        tmp0 += cj.pmul(lhs0[j],b); tmp1 += cj.pmul(lhs1[j],b);
-        tmp2 += cj.pmul(lhs2[j],b); tmp3 += cj.pmul(lhs3[j],b);
-      }
-
-      if (alignedSize>alignedStart)
-      {
-        switch(alignmentPattern)
-        {
-          case AllAligned:
-            for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize)
-              _EIGEN_ACCUMULATE_PACKETS(d,d,d);
-            break;
-          case EvenAligned:
-            for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize)
-              _EIGEN_ACCUMULATE_PACKETS(d,du,d);
-            break;
-          case FirstAligned:
-          {
-            Index j = alignedStart;
-            if (peels>1)
-            {
-              /* Here we proccess 4 rows with with two peeled iterations to hide
-               * the overhead of unaligned loads. Moreover unaligned loads are handled
-               * using special shift/move operations between the two aligned packets
-               * overlaping the desired unaligned packet. This is *much* more efficient
-               * than basic unaligned loads.
-               */
-              LhsPacket A01, A02, A03, A11, A12, A13;
-              A01 = pload<LhsPacket>(&lhs1[alignedStart-1]);
-              A02 = pload<LhsPacket>(&lhs2[alignedStart-2]);
-              A03 = pload<LhsPacket>(&lhs3[alignedStart-3]);
-
-              for (; j<peeledSize; j+=peels*RhsPacketSize)
-              {
-                RhsPacket b = pload<RhsPacket>(&rhs[j]);
-                A11 = pload<LhsPacket>(&lhs1[j-1+LhsPacketSize]);  palign<1>(A01,A11);
-                A12 = pload<LhsPacket>(&lhs2[j-2+LhsPacketSize]);  palign<2>(A02,A12);
-                A13 = pload<LhsPacket>(&lhs3[j-3+LhsPacketSize]);  palign<3>(A03,A13);
-
-                ptmp0 = pcj.pmadd(pload<LhsPacket>(&lhs0[j]), b, ptmp0);
-                ptmp1 = pcj.pmadd(A01, b, ptmp1);
-                A01 = pload<LhsPacket>(&lhs1[j-1+2*LhsPacketSize]);  palign<1>(A11,A01);
-                ptmp2 = pcj.pmadd(A02, b, ptmp2);
-                A02 = pload<LhsPacket>(&lhs2[j-2+2*LhsPacketSize]);  palign<2>(A12,A02);
-                ptmp3 = pcj.pmadd(A03, b, ptmp3);
-                A03 = pload<LhsPacket>(&lhs3[j-3+2*LhsPacketSize]);  palign<3>(A13,A03);
-
-                b = pload<RhsPacket>(&rhs[j+RhsPacketSize]);
-                ptmp0 = pcj.pmadd(pload<LhsPacket>(&lhs0[j+LhsPacketSize]), b, ptmp0);
-                ptmp1 = pcj.pmadd(A11, b, ptmp1);
-                ptmp2 = pcj.pmadd(A12, b, ptmp2);
-                ptmp3 = pcj.pmadd(A13, b, ptmp3);
-              }
-            }
-            for (; j<alignedSize; j+=RhsPacketSize)
-              _EIGEN_ACCUMULATE_PACKETS(d,du,du);
-            break;
-          }
-          default:
-            for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize)
-              _EIGEN_ACCUMULATE_PACKETS(du,du,du);
-            break;
-        }
-        tmp0 += predux(ptmp0);
-        tmp1 += predux(ptmp1);
-        tmp2 += predux(ptmp2);
-        tmp3 += predux(ptmp3);
-      }
-    } // end explicit vectorization
-
-    // process remaining coeffs (or all if no explicit vectorization)
-    // FIXME this loop get vectorized by the compiler !
-    for (Index j=alignedSize; j<depth; ++j)
-    {
-      RhsScalar b = rhs[j];
-      tmp0 += cj.pmul(lhs0[j],b); tmp1 += cj.pmul(lhs1[j],b);
-      tmp2 += cj.pmul(lhs2[j],b); tmp3 += cj.pmul(lhs3[j],b);
-    }
-    res[i*resIncr]            += alpha*tmp0;
-    res[(i+offset1)*resIncr]  += alpha*tmp1;
-    res[(i+2)*resIncr]        += alpha*tmp2;
-    res[(i+offset3)*resIncr]  += alpha*tmp3;
-  }
-
-  // process remaining first and last rows (at most columnsAtOnce-1)
-  Index end = rows;
-  Index start = rowBound;
-  do
-  {
-    for (Index i=start; i<end; ++i)
-    {
-      EIGEN_ALIGN16 ResScalar tmp0 = ResScalar(0);
-      ResPacket ptmp0 = pset1<ResPacket>(tmp0);
-      const LhsScalar* lhs0 = lhs + i*lhsStride;
-      // process first unaligned result's coeffs
-      // FIXME this loop get vectorized by the compiler !
-      for (Index j=0; j<alignedStart; ++j)
-        tmp0 += cj.pmul(lhs0[j], rhs[j]);
-
-      if (alignedSize>alignedStart)
-      {
-        // process aligned rhs coeffs
-        if ((size_t(lhs0+alignedStart)%sizeof(LhsPacket))==0)
-          for (Index j = alignedStart;j<alignedSize;j+=RhsPacketSize)
-            ptmp0 = pcj.pmadd(pload<LhsPacket>(&lhs0[j]), pload<RhsPacket>(&rhs[j]), ptmp0);
-        else
-          for (Index j = alignedStart;j<alignedSize;j+=RhsPacketSize)
-            ptmp0 = pcj.pmadd(ploadu<LhsPacket>(&lhs0[j]), pload<RhsPacket>(&rhs[j]), ptmp0);
-        tmp0 += predux(ptmp0);
-      }
-
-      // process remaining scalars
-      // FIXME this loop get vectorized by the compiler !
-      for (Index j=alignedSize; j<depth; ++j)
-        tmp0 += cj.pmul(lhs0[j], rhs[j]);
-      res[i*resIncr] += alpha*tmp0;
-    }
-    if (skipRows)
-    {
-      start = 0;
-      end = skipRows;
-      skipRows = 0;
-    }
-    else
-      break;
-  } while(Vectorizable);
-
-  #undef _EIGEN_ACCUMULATE_PACKETS
-}
-};
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_GENERAL_MATRIX_VECTOR_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/util/Constants.h b/resources/3rdparty/eigen/Eigen/src/Core/util/Constants.h
deleted file mode 100644
index 1732c1d80..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/util/Constants.h
+++ /dev/null
@@ -1,438 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_CONSTANTS_H
-#define EIGEN_CONSTANTS_H
-
-namespace Eigen {
-
-/** This value means that a positive quantity (e.g., a size) is not known at compile-time, and that instead the value is
-  * stored in some runtime variable.
-  *
-  * Changing the value of Dynamic breaks the ABI, as Dynamic is often used as a template parameter for Matrix.
-  */
-const int Dynamic = -1;
-
-/** This value means that a signed quantity (e.g., a signed index) is not known at compile-time, and that instead its value
-  * has to be specified at runtime.
-  */
-const int DynamicIndex = 0xffffff;
-
-/** This value means +Infinity; it is currently used only as the p parameter to MatrixBase::lpNorm<int>().
-  * The value Infinity there means the L-infinity norm.
-  */
-const int Infinity = -1;
-
-/** \defgroup flags Flags
-  * \ingroup Core_Module
-  *
-  * These are the possible bits which can be OR'ed to constitute the flags of a matrix or
-  * expression.
-  *
-  * It is important to note that these flags are a purely compile-time notion. They are a compile-time property of
-  * an expression type, implemented as enum's. They are not stored in memory at runtime, and they do not incur any
-  * runtime overhead.
-  *
-  * \sa MatrixBase::Flags
-  */
-
-/** \ingroup flags
-  *
-  * for a matrix, this means that the storage order is row-major.
-  * If this bit is not set, the storage order is column-major.
-  * For an expression, this determines the storage order of
-  * the matrix created by evaluation of that expression. 
-  * \sa \ref TopicStorageOrders */
-const unsigned int RowMajorBit = 0x1;
-
-/** \ingroup flags
-  *
-  * means the expression should be evaluated by the calling expression */
-const unsigned int EvalBeforeNestingBit = 0x2;
-
-/** \ingroup flags
-  *
-  * means the expression should be evaluated before any assignment */
-const unsigned int EvalBeforeAssigningBit = 0x4;
-
-/** \ingroup flags
-  *
-  * Short version: means the expression might be vectorized
-  *
-  * Long version: means that the coefficients can be handled by packets
-  * and start at a memory location whose alignment meets the requirements
-  * of the present CPU architecture for optimized packet access. In the fixed-size
-  * case, there is the additional condition that it be possible to access all the
-  * coefficients by packets (this implies the requirement that the size be a multiple of 16 bytes,
-  * and that any nontrivial strides don't break the alignment). In the dynamic-size case,
-  * there is no such condition on the total size and strides, so it might not be possible to access
-  * all coeffs by packets.
-  *
-  * \note This bit can be set regardless of whether vectorization is actually enabled.
-  *       To check for actual vectorizability, see \a ActualPacketAccessBit.
-  */
-const unsigned int PacketAccessBit = 0x8;
-
-#ifdef EIGEN_VECTORIZE
-/** \ingroup flags
-  *
-  * If vectorization is enabled (EIGEN_VECTORIZE is defined) this constant
-  * is set to the value \a PacketAccessBit.
-  *
-  * If vectorization is not enabled (EIGEN_VECTORIZE is not defined) this constant
-  * is set to the value 0.
-  */
-const unsigned int ActualPacketAccessBit = PacketAccessBit;
-#else
-const unsigned int ActualPacketAccessBit = 0x0;
-#endif
-
-/** \ingroup flags
-  *
-  * Short version: means the expression can be seen as 1D vector.
-  *
-  * Long version: means that one can access the coefficients
-  * of this expression by coeff(int), and coeffRef(int) in the case of a lvalue expression. These
-  * index-based access methods are guaranteed
-  * to not have to do any runtime computation of a (row, col)-pair from the index, so that it
-  * is guaranteed that whenever it is available, index-based access is at least as fast as
-  * (row,col)-based access. Expressions for which that isn't possible don't have the LinearAccessBit.
-  *
-  * If both PacketAccessBit and LinearAccessBit are set, then the
-  * packets of this expression can be accessed by packet(int), and writePacket(int) in the case of a
-  * lvalue expression.
-  *
-  * Typically, all vector expressions have the LinearAccessBit, but there is one exception:
-  * Product expressions don't have it, because it would be troublesome for vectorization, even when the
-  * Product is a vector expression. Thus, vector Product expressions allow index-based coefficient access but
-  * not index-based packet access, so they don't have the LinearAccessBit.
-  */
-const unsigned int LinearAccessBit = 0x10;
-
-/** \ingroup flags
-  *
-  * Means the expression has a coeffRef() method, i.e. is writable as its individual coefficients are directly addressable.
-  * This rules out read-only expressions.
-  *
-  * Note that DirectAccessBit and LvalueBit are mutually orthogonal, as there are examples of expression having one but note
-  * the other:
-  *   \li writable expressions that don't have a very simple memory layout as a strided array, have LvalueBit but not DirectAccessBit
-  *   \li Map-to-const expressions, for example Map<const Matrix>, have DirectAccessBit but not LvalueBit
-  *
-  * Expressions having LvalueBit also have their coeff() method returning a const reference instead of returning a new value.
-  */
-const unsigned int LvalueBit = 0x20;
-
-/** \ingroup flags
-  *
-  * Means that the underlying array of coefficients can be directly accessed as a plain strided array. The memory layout
-  * of the array of coefficients must be exactly the natural one suggested by rows(), cols(),
-  * outerStride(), innerStride(), and the RowMajorBit. This rules out expressions such as Diagonal, whose coefficients,
-  * though referencable, do not have such a regular memory layout.
-  *
-  * See the comment on LvalueBit for an explanation of how LvalueBit and DirectAccessBit are mutually orthogonal.
-  */
-const unsigned int DirectAccessBit = 0x40;
-
-/** \ingroup flags
-  *
-  * means the first coefficient packet is guaranteed to be aligned */
-const unsigned int AlignedBit = 0x80;
-
-const unsigned int NestByRefBit = 0x100;
-
-// list of flags that are inherited by default
-const unsigned int HereditaryBits = RowMajorBit
-                                  | EvalBeforeNestingBit
-                                  | EvalBeforeAssigningBit;
-
-/** \defgroup enums Enumerations
-  * \ingroup Core_Module
-  *
-  * Various enumerations used in %Eigen. Many of these are used as template parameters.
-  */
-
-/** \ingroup enums
-  * Enum containing possible values for the \p Mode parameter of 
-  * MatrixBase::selfadjointView() and MatrixBase::triangularView(). */
-enum {
-  /** View matrix as a lower triangular matrix. */
-  Lower=0x1,                      
-  /** View matrix as an upper triangular matrix. */
-  Upper=0x2,                      
-  /** %Matrix has ones on the diagonal; to be used in combination with #Lower or #Upper. */
-  UnitDiag=0x4, 
-  /** %Matrix has zeros on the diagonal; to be used in combination with #Lower or #Upper. */
-  ZeroDiag=0x8,
-  /** View matrix as a lower triangular matrix with ones on the diagonal. */
-  UnitLower=UnitDiag|Lower, 
-  /** View matrix as an upper triangular matrix with ones on the diagonal. */
-  UnitUpper=UnitDiag|Upper,
-  /** View matrix as a lower triangular matrix with zeros on the diagonal. */
-  StrictlyLower=ZeroDiag|Lower, 
-  /** View matrix as an upper triangular matrix with zeros on the diagonal. */
-  StrictlyUpper=ZeroDiag|Upper,
-  /** Used in BandMatrix and SelfAdjointView to indicate that the matrix is self-adjoint. */
-  SelfAdjoint=0x10,
-  /** Used to support symmetric, non-selfadjoint, complex matrices. */
-  Symmetric=0x20
-};
-
-/** \ingroup enums
-  * Enum for indicating whether an object is aligned or not. */
-enum { 
-  /** Object is not correctly aligned for vectorization. */
-  Unaligned=0, 
-  /** Object is aligned for vectorization. */
-  Aligned=1 
-};
-
-/** \ingroup enums
- * Enum used by DenseBase::corner() in Eigen2 compatibility mode. */
-// FIXME after the corner() API change, this was not needed anymore, except by AlignedBox
-// TODO: find out what to do with that. Adapt the AlignedBox API ?
-enum CornerType { TopLeft, TopRight, BottomLeft, BottomRight };
-
-/** \ingroup enums
-  * Enum containing possible values for the \p Direction parameter of
-  * Reverse, PartialReduxExpr and VectorwiseOp. */
-enum DirectionType { 
-  /** For Reverse, all columns are reversed; 
-    * for PartialReduxExpr and VectorwiseOp, act on columns. */
-  Vertical, 
-  /** For Reverse, all rows are reversed; 
-    * for PartialReduxExpr and VectorwiseOp, act on rows. */
-  Horizontal, 
-  /** For Reverse, both rows and columns are reversed; 
-    * not used for PartialReduxExpr and VectorwiseOp. */
-  BothDirections 
-};
-
-/** \internal \ingroup enums
-  * Enum to specify how to traverse the entries of a matrix. */
-enum {
-  /** \internal Default traversal, no vectorization, no index-based access */
-  DefaultTraversal,
-  /** \internal No vectorization, use index-based access to have only one for loop instead of 2 nested loops */
-  LinearTraversal,
-  /** \internal Equivalent to a slice vectorization for fixed-size matrices having good alignment
-    * and good size */
-  InnerVectorizedTraversal,
-  /** \internal Vectorization path using a single loop plus scalar loops for the
-    * unaligned boundaries */
-  LinearVectorizedTraversal,
-  /** \internal Generic vectorization path using one vectorized loop per row/column with some
-    * scalar loops to handle the unaligned boundaries */
-  SliceVectorizedTraversal,
-  /** \internal Special case to properly handle incompatible scalar types or other defecting cases*/
-  InvalidTraversal,
-  /** \internal Evaluate all entries at once */
-  AllAtOnceTraversal
-};
-
-/** \internal \ingroup enums
-  * Enum to specify whether to unroll loops when traversing over the entries of a matrix. */
-enum {
-  /** \internal Do not unroll loops. */
-  NoUnrolling,
-  /** \internal Unroll only the inner loop, but not the outer loop. */
-  InnerUnrolling,
-  /** \internal Unroll both the inner and the outer loop. If there is only one loop, 
-    * because linear traversal is used, then unroll that loop. */
-  CompleteUnrolling
-};
-
-/** \internal \ingroup enums
-  * Enum to specify whether to use the default (built-in) implementation or the specialization. */
-enum {
-  Specialized,
-  BuiltIn
-};
-
-/** \ingroup enums
-  * Enum containing possible values for the \p _Options template parameter of
-  * Matrix, Array and BandMatrix. */
-enum {
-  /** Storage order is column major (see \ref TopicStorageOrders). */
-  ColMajor = 0,
-  /** Storage order is row major (see \ref TopicStorageOrders). */
-  RowMajor = 0x1,  // it is only a coincidence that this is equal to RowMajorBit -- don't rely on that
-  /** \internal Align the matrix itself if it is vectorizable fixed-size */
-  AutoAlign = 0,
-  /** \internal Don't require alignment for the matrix itself (the array of coefficients, if dynamically allocated, may still be requested to be aligned) */ // FIXME --- clarify the situation
-  DontAlign = 0x2
-};
-
-/** \ingroup enums
-  * Enum for specifying whether to apply or solve on the left or right. */
-enum {
-  /** Apply transformation on the left. */
-  OnTheLeft = 1,  
-  /** Apply transformation on the right. */
-  OnTheRight = 2  
-};
-
-/* the following used to be written as:
- *
- *   struct NoChange_t {};
- *   namespace {
- *     EIGEN_UNUSED NoChange_t NoChange;
- *   }
- *
- * on the ground that it feels dangerous to disambiguate overloaded functions on enum/integer types.  
- * However, this leads to "variable declared but never referenced" warnings on Intel Composer XE,
- * and we do not know how to get rid of them (bug 450).
- */
-
-enum NoChange_t   { NoChange };
-enum Sequential_t { Sequential };
-enum Default_t    { Default };
-
-/** \internal \ingroup enums
-  * Used in AmbiVector. */
-enum {
-  IsDense         = 0,
-  IsSparse
-};
-
-/** \ingroup enums
-  * Used as template parameter in DenseCoeffBase and MapBase to indicate 
-  * which accessors should be provided. */
-enum AccessorLevels {
-  /** Read-only access via a member function. */
-  ReadOnlyAccessors, 
-  /** Read/write access via member functions. */
-  WriteAccessors, 
-  /** Direct read-only access to the coefficients. */
-  DirectAccessors, 
-  /** Direct read/write access to the coefficients. */
-  DirectWriteAccessors
-};
-
-/** \ingroup enums
-  * Enum with options to give to various decompositions. */
-enum DecompositionOptions {
-  /** \internal Not used (meant for LDLT?). */
-  Pivoting            = 0x01, 
-  /** \internal Not used (meant for LDLT?). */
-  NoPivoting          = 0x02, 
-  /** Used in JacobiSVD to indicate that the square matrix U is to be computed. */
-  ComputeFullU        = 0x04,
-  /** Used in JacobiSVD to indicate that the thin matrix U is to be computed. */
-  ComputeThinU        = 0x08,
-  /** Used in JacobiSVD to indicate that the square matrix V is to be computed. */
-  ComputeFullV        = 0x10,
-  /** Used in JacobiSVD to indicate that the thin matrix V is to be computed. */
-  ComputeThinV        = 0x20,
-  /** Used in SelfAdjointEigenSolver and GeneralizedSelfAdjointEigenSolver to specify
-    * that only the eigenvalues are to be computed and not the eigenvectors. */
-  EigenvaluesOnly     = 0x40,
-  /** Used in SelfAdjointEigenSolver and GeneralizedSelfAdjointEigenSolver to specify
-    * that both the eigenvalues and the eigenvectors are to be computed. */
-  ComputeEigenvectors = 0x80,
-  /** \internal */
-  EigVecMask = EigenvaluesOnly | ComputeEigenvectors,
-  /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should
-    * solve the generalized eigenproblem \f$ Ax = \lambda B x \f$. */
-  Ax_lBx              = 0x100,
-  /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should
-    * solve the generalized eigenproblem \f$ ABx = \lambda x \f$. */
-  ABx_lx              = 0x200,
-  /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should
-    * solve the generalized eigenproblem \f$ BAx = \lambda x \f$. */
-  BAx_lx              = 0x400,
-  /** \internal */
-  GenEigMask = Ax_lBx | ABx_lx | BAx_lx
-};
-
-/** \ingroup enums
-  * Possible values for the \p QRPreconditioner template parameter of JacobiSVD. */
-enum QRPreconditioners {
-  /** Do not specify what is to be done if the SVD of a non-square matrix is asked for. */
-  NoQRPreconditioner,
-  /** Use a QR decomposition without pivoting as the first step. */
-  HouseholderQRPreconditioner,
-  /** Use a QR decomposition with column pivoting as the first step. */
-  ColPivHouseholderQRPreconditioner,
-  /** Use a QR decomposition with full pivoting as the first step. */
-  FullPivHouseholderQRPreconditioner
-};
-
-#ifdef Success
-#error The preprocessor symbol 'Success' is defined, possibly by the X11 header file X.h
-#endif
-
-/** \ingroup enums
-  * Enum for reporting the status of a computation. */
-enum ComputationInfo {
-  /** Computation was successful. */
-  Success = 0,        
-  /** The provided data did not satisfy the prerequisites. */
-  NumericalIssue = 1, 
-  /** Iterative procedure did not converge. */
-  NoConvergence = 2,
-  /** The inputs are invalid, or the algorithm has been improperly called.
-    * When assertions are enabled, such errors trigger an assert. */
-  InvalidInput = 3
-};
-
-/** \ingroup enums
-  * Enum used to specify how a particular transformation is stored in a matrix.
-  * \sa Transform, Hyperplane::transform(). */
-enum TransformTraits {
-  /** Transformation is an isometry. */
-  Isometry      = 0x1,
-  /** Transformation is an affine transformation stored as a (Dim+1)^2 matrix whose last row is 
-    * assumed to be [0 ... 0 1]. */
-  Affine        = 0x2,
-  /** Transformation is an affine transformation stored as a (Dim) x (Dim+1) matrix. */
-  AffineCompact = 0x10 | Affine,
-  /** Transformation is a general projective transformation stored as a (Dim+1)^2 matrix. */
-  Projective    = 0x20
-};
-
-/** \internal \ingroup enums
-  * Enum used to choose between implementation depending on the computer architecture. */
-namespace Architecture
-{
-  enum Type {
-    Generic = 0x0,
-    SSE = 0x1,
-    AltiVec = 0x2,
-#if defined EIGEN_VECTORIZE_SSE
-    Target = SSE
-#elif defined EIGEN_VECTORIZE_ALTIVEC
-    Target = AltiVec
-#else
-    Target = Generic
-#endif
-  };
-}
-
-/** \internal \ingroup enums
-  * Enum used as template parameter in GeneralProduct. */
-enum { CoeffBasedProductMode, LazyCoeffBasedProductMode, OuterProduct, InnerProduct, GemvProduct, GemmProduct };
-
-/** \internal \ingroup enums
-  * Enum used in experimental parallel implementation. */
-enum Action {GetAction, SetAction};
-
-/** The type used to identify a dense storage. */
-struct Dense {};
-
-/** The type used to identify a matrix expression */
-struct MatrixXpr {};
-
-/** The type used to identify an array expression */
-struct ArrayXpr {};
-
-} // end namespace Eigen
-
-#endif // EIGEN_CONSTANTS_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/util/ForwardDeclarations.h b/resources/3rdparty/eigen/Eigen/src/Core/util/ForwardDeclarations.h
deleted file mode 100644
index 58e1d87dc..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/util/ForwardDeclarations.h
+++ /dev/null
@@ -1,300 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_FORWARDDECLARATIONS_H
-#define EIGEN_FORWARDDECLARATIONS_H
-
-namespace Eigen {
-namespace internal {
-
-template<typename T> struct traits;
-
-// here we say once and for all that traits<const T> == traits<T>
-// When constness must affect traits, it has to be constness on template parameters on which T itself depends.
-// For example, traits<Map<const T> > != traits<Map<T> >, but
-//              traits<const Map<T> > == traits<Map<T> >
-template<typename T> struct traits<const T> : traits<T> {};
-
-template<typename Derived> struct has_direct_access
-{
-  enum { ret = (traits<Derived>::Flags & DirectAccessBit) ? 1 : 0 };
-};
-
-template<typename Derived> struct accessors_level
-{
-  enum { has_direct_access = (traits<Derived>::Flags & DirectAccessBit) ? 1 : 0,
-         has_write_access = (traits<Derived>::Flags & LvalueBit) ? 1 : 0,
-         value = has_direct_access ? (has_write_access ? DirectWriteAccessors : DirectAccessors)
-                                   : (has_write_access ? WriteAccessors       : ReadOnlyAccessors)
-  };
-};
-
-} // end namespace internal
-
-template<typename T> struct NumTraits;
-
-template<typename Derived> struct EigenBase;
-template<typename Derived> class DenseBase;
-template<typename Derived> class PlainObjectBase;
-
-
-template<typename Derived,
-         int Level = internal::accessors_level<Derived>::value >
-class DenseCoeffsBase;
-
-template<typename _Scalar, int _Rows, int _Cols,
-         int _Options = AutoAlign |
-#if defined(__GNUC__) && __GNUC__==3 && __GNUC_MINOR__==4
-    // workaround a bug in at least gcc 3.4.6
-    // the innermost ?: ternary operator is misparsed. We write it slightly
-    // differently and this makes gcc 3.4.6 happy, but it's ugly.
-    // The error would only show up with EIGEN_DEFAULT_TO_ROW_MAJOR is defined
-    // (when EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION is RowMajor)
-                          ( (_Rows==1 && _Cols!=1) ? RowMajor
-                          : !(_Cols==1 && _Rows!=1) ?  EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION
-                          : ColMajor ),
-#else
-                          ( (_Rows==1 && _Cols!=1) ? RowMajor
-                          : (_Cols==1 && _Rows!=1) ? ColMajor
-                          : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
-#endif
-         int _MaxRows = _Rows,
-         int _MaxCols = _Cols
-> class Matrix;
-
-template<typename Derived> class MatrixBase;
-template<typename Derived> class ArrayBase;
-
-template<typename ExpressionType, unsigned int Added, unsigned int Removed> class Flagged;
-template<typename ExpressionType, template <typename> class StorageBase > class NoAlias;
-template<typename ExpressionType> class NestByValue;
-template<typename ExpressionType> class ForceAlignedAccess;
-template<typename ExpressionType> class SwapWrapper;
-
-template<typename XprType, int BlockRows=Dynamic, int BlockCols=Dynamic, bool InnerPanel = false,
-         bool HasDirectAccess = internal::has_direct_access<XprType>::ret> class Block;
-
-template<typename MatrixType, int Size=Dynamic> class VectorBlock;
-template<typename MatrixType> class Transpose;
-template<typename MatrixType> class Conjugate;
-template<typename NullaryOp, typename MatrixType>         class CwiseNullaryOp;
-template<typename UnaryOp,   typename MatrixType>         class CwiseUnaryOp;
-template<typename ViewOp,    typename MatrixType>         class CwiseUnaryView;
-template<typename BinaryOp,  typename Lhs, typename Rhs>  class CwiseBinaryOp;
-template<typename BinOp,     typename Lhs, typename Rhs>  class SelfCwiseBinaryOp;
-template<typename Derived,   typename Lhs, typename Rhs>  class ProductBase;
-template<typename Lhs, typename Rhs, int Mode>            class GeneralProduct;
-template<typename Lhs, typename Rhs, int NestingFlags>    class CoeffBasedProduct;
-
-template<typename Derived> class DiagonalBase;
-template<typename _DiagonalVectorType> class DiagonalWrapper;
-template<typename _Scalar, int SizeAtCompileTime, int MaxSizeAtCompileTime=SizeAtCompileTime> class DiagonalMatrix;
-template<typename MatrixType, typename DiagonalType, int ProductOrder> class DiagonalProduct;
-template<typename MatrixType, int Index = 0> class Diagonal;
-template<int SizeAtCompileTime, int MaxSizeAtCompileTime = SizeAtCompileTime, typename IndexType=int> class PermutationMatrix;
-template<int SizeAtCompileTime, int MaxSizeAtCompileTime = SizeAtCompileTime, typename IndexType=int> class Transpositions;
-template<typename Derived> class PermutationBase;
-template<typename Derived> class TranspositionsBase;
-template<typename _IndicesType> class PermutationWrapper;
-template<typename _IndicesType> class TranspositionsWrapper;
-
-template<typename Derived,
-         int Level = internal::accessors_level<Derived>::has_write_access ? WriteAccessors : ReadOnlyAccessors
-> class MapBase;
-template<int InnerStrideAtCompileTime, int OuterStrideAtCompileTime> class Stride;
-template<typename MatrixType, int MapOptions=Unaligned, typename StrideType = Stride<0,0> > class Map;
-
-template<typename Derived> class TriangularBase;
-template<typename MatrixType, unsigned int Mode> class TriangularView;
-template<typename MatrixType, unsigned int Mode> class SelfAdjointView;
-template<typename MatrixType> class SparseView;
-template<typename ExpressionType> class WithFormat;
-template<typename MatrixType> struct CommaInitializer;
-template<typename Derived> class ReturnByValue;
-template<typename ExpressionType> class ArrayWrapper;
-template<typename ExpressionType> class MatrixWrapper;
-
-namespace internal {
-template<typename DecompositionType, typename Rhs> struct solve_retval_base;
-template<typename DecompositionType, typename Rhs> struct solve_retval;
-template<typename DecompositionType> struct kernel_retval_base;
-template<typename DecompositionType> struct kernel_retval;
-template<typename DecompositionType> struct image_retval_base;
-template<typename DecompositionType> struct image_retval;
-} // end namespace internal
-
-namespace internal {
-template<typename _Scalar, int Rows=Dynamic, int Cols=Dynamic, int Supers=Dynamic, int Subs=Dynamic, int Options=0> class BandMatrix;
-}
-
-namespace internal {
-template<typename Lhs, typename Rhs> struct product_type;
-}
-
-template<typename Lhs, typename Rhs,
-         int ProductType = internal::product_type<Lhs,Rhs>::value>
-struct ProductReturnType;
-
-// this is a workaround for sun CC
-template<typename Lhs, typename Rhs> struct LazyProductReturnType;
-
-namespace internal {
-
-// Provides scalar/packet-wise product and product with accumulation
-// with optional conjugation of the arguments.
-template<typename LhsScalar, typename RhsScalar, bool ConjLhs=false, bool ConjRhs=false> struct conj_helper;
-
-template<typename Scalar> struct scalar_sum_op;
-template<typename Scalar> struct scalar_difference_op;
-template<typename LhsScalar,typename RhsScalar> struct scalar_conj_product_op;
-template<typename Scalar> struct scalar_opposite_op;
-template<typename Scalar> struct scalar_conjugate_op;
-template<typename Scalar> struct scalar_real_op;
-template<typename Scalar> struct scalar_imag_op;
-template<typename Scalar> struct scalar_abs_op;
-template<typename Scalar> struct scalar_abs2_op;
-template<typename Scalar> struct scalar_sqrt_op;
-template<typename Scalar> struct scalar_exp_op;
-template<typename Scalar> struct scalar_log_op;
-template<typename Scalar> struct scalar_cos_op;
-template<typename Scalar> struct scalar_sin_op;
-template<typename Scalar> struct scalar_acos_op;
-template<typename Scalar> struct scalar_asin_op;
-template<typename Scalar> struct scalar_tan_op;
-template<typename Scalar> struct scalar_pow_op;
-template<typename Scalar> struct scalar_inverse_op;
-template<typename Scalar> struct scalar_square_op;
-template<typename Scalar> struct scalar_cube_op;
-template<typename Scalar, typename NewType> struct scalar_cast_op;
-template<typename Scalar> struct scalar_multiple_op;
-template<typename Scalar> struct scalar_quotient1_op;
-template<typename Scalar> struct scalar_min_op;
-template<typename Scalar> struct scalar_max_op;
-template<typename Scalar> struct scalar_random_op;
-template<typename Scalar> struct scalar_add_op;
-template<typename Scalar> struct scalar_constant_op;
-template<typename Scalar> struct scalar_identity_op;
-
-template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_product_op;
-template<typename LhsScalar,typename RhsScalar> struct scalar_multiple2_op;
-template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_quotient_op;
-
-} // end namespace internal
-
-struct IOFormat;
-
-// Array module
-template<typename _Scalar, int _Rows, int _Cols,
-         int _Options = AutoAlign |
-#if defined(__GNUC__) && __GNUC__==3 && __GNUC_MINOR__==4
-    // workaround a bug in at least gcc 3.4.6
-    // the innermost ?: ternary operator is misparsed. We write it slightly
-    // differently and this makes gcc 3.4.6 happy, but it's ugly.
-    // The error would only show up with EIGEN_DEFAULT_TO_ROW_MAJOR is defined
-    // (when EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION is RowMajor)
-                          ( (_Rows==1 && _Cols!=1) ? RowMajor
-                          : !(_Cols==1 && _Rows!=1) ?  EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION
-                          : ColMajor ),
-#else
-                          ( (_Rows==1 && _Cols!=1) ? RowMajor
-                          : (_Cols==1 && _Rows!=1) ? ColMajor
-                          : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
-#endif
-         int _MaxRows = _Rows, int _MaxCols = _Cols> class Array;
-template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType> class Select;
-template<typename MatrixType, typename BinaryOp, int Direction> class PartialReduxExpr;
-template<typename ExpressionType, int Direction> class VectorwiseOp;
-template<typename MatrixType,int RowFactor,int ColFactor> class Replicate;
-template<typename MatrixType, int Direction = BothDirections> class Reverse;
-
-template<typename MatrixType> class FullPivLU;
-template<typename MatrixType> class PartialPivLU;
-namespace internal {
-template<typename MatrixType> struct inverse_impl;
-}
-template<typename MatrixType> class HouseholderQR;
-template<typename MatrixType> class ColPivHouseholderQR;
-template<typename MatrixType> class FullPivHouseholderQR;
-template<typename MatrixType, int QRPreconditioner = ColPivHouseholderQRPreconditioner> class JacobiSVD;
-template<typename MatrixType, int UpLo = Lower> class LLT;
-template<typename MatrixType, int UpLo = Lower> class LDLT;
-template<typename VectorsType, typename CoeffsType, int Side=OnTheLeft> class HouseholderSequence;
-template<typename Scalar>     class JacobiRotation;
-
-// Geometry module:
-template<typename Derived, int _Dim> class RotationBase;
-template<typename Lhs, typename Rhs> class Cross;
-template<typename Derived> class QuaternionBase;
-template<typename Scalar> class Rotation2D;
-template<typename Scalar> class AngleAxis;
-template<typename Scalar,int Dim> class Translation;
-
-#ifdef EIGEN2_SUPPORT
-template<typename Derived, int _Dim> class eigen2_RotationBase;
-template<typename Lhs, typename Rhs> class eigen2_Cross;
-template<typename Scalar> class eigen2_Quaternion;
-template<typename Scalar> class eigen2_Rotation2D;
-template<typename Scalar> class eigen2_AngleAxis;
-template<typename Scalar,int Dim> class eigen2_Transform;
-template <typename _Scalar, int _AmbientDim> class eigen2_ParametrizedLine;
-template <typename _Scalar, int _AmbientDim> class eigen2_Hyperplane;
-template<typename Scalar,int Dim> class eigen2_Translation;
-template<typename Scalar,int Dim> class eigen2_Scaling;
-#endif
-
-#if EIGEN2_SUPPORT_STAGE < STAGE20_RESOLVE_API_CONFLICTS
-template<typename Scalar> class Quaternion;
-template<typename Scalar,int Dim> class Transform;
-template <typename _Scalar, int _AmbientDim> class ParametrizedLine;
-template <typename _Scalar, int _AmbientDim> class Hyperplane;
-template<typename Scalar,int Dim> class Scaling;
-#endif
-
-#if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS
-template<typename Scalar, int Options = AutoAlign> class Quaternion;
-template<typename Scalar,int Dim,int Mode,int _Options=AutoAlign> class Transform;
-template <typename _Scalar, int _AmbientDim, int Options=AutoAlign> class ParametrizedLine;
-template <typename _Scalar, int _AmbientDim, int Options=AutoAlign> class Hyperplane;
-template<typename Scalar> class UniformScaling;
-template<typename MatrixType,int Direction> class Homogeneous;
-#endif
-
-// MatrixFunctions module
-template<typename Derived> struct MatrixExponentialReturnValue;
-template<typename Derived> class MatrixFunctionReturnValue;
-template<typename Derived> class MatrixSquareRootReturnValue;
-template<typename Derived> class MatrixLogarithmReturnValue;
-template<typename Derived> class MatrixPowerReturnValue;
-template<typename Derived, typename Lhs, typename Rhs> class MatrixPowerProductBase;
-
-namespace internal {
-template <typename Scalar>
-struct stem_function
-{
-  typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
-  typedef ComplexScalar type(ComplexScalar, int);
-};
-}
-
-
-#ifdef EIGEN2_SUPPORT
-template<typename ExpressionType> class Cwise;
-template<typename MatrixType> class Minor;
-template<typename MatrixType> class LU;
-template<typename MatrixType> class QR;
-template<typename MatrixType> class SVD;
-namespace internal {
-template<typename MatrixType, unsigned int Mode> struct eigen2_part_return_type;
-}
-#endif
-
-} // end namespace Eigen
-
-#endif // EIGEN_FORWARDDECLARATIONS_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/util/Macros.h b/resources/3rdparty/eigen/Eigen/src/Core/util/Macros.h
deleted file mode 100644
index 1bbd24b8b..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/util/Macros.h
+++ /dev/null
@@ -1,410 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_MACROS_H
-#define EIGEN_MACROS_H
-
-#define EIGEN_WORLD_VERSION 3
-#define EIGEN_MAJOR_VERSION 1
-#define EIGEN_MINOR_VERSION 90
-
-#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
-                                      (EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
-                                                                 EIGEN_MINOR_VERSION>=z))))
-#ifdef __GNUC__
-  #define EIGEN_GNUC_AT_LEAST(x,y) ((__GNUC__==x && __GNUC_MINOR__>=y) || __GNUC__>x)
-#else
-  #define EIGEN_GNUC_AT_LEAST(x,y) 0
-#endif
- 
-#ifdef __GNUC__
-  #define EIGEN_GNUC_AT_MOST(x,y) ((__GNUC__==x && __GNUC_MINOR__<=y) || __GNUC__<x)
-#else
-  #define EIGEN_GNUC_AT_MOST(x,y) 0
-#endif
-
-#if EIGEN_GNUC_AT_MOST(4,3) && !defined(__clang__)
-  // see bug 89
-  #define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 0
-#else
-  #define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 1
-#endif
-
-#if defined(__GNUC__) && (__GNUC__ <= 3)
-#define EIGEN_GCC3_OR_OLDER 1
-#else
-#define EIGEN_GCC3_OR_OLDER 0
-#endif
-
-// 16 byte alignment is only useful for vectorization. Since it affects the ABI, we need to enable
-// 16 byte alignment on all platforms where vectorization might be enabled. In theory we could always
-// enable alignment, but it can be a cause of problems on some platforms, so we just disable it in
-// certain common platform (compiler+architecture combinations) to avoid these problems.
-// Only static alignment is really problematic (relies on nonstandard compiler extensions that don't
-// work everywhere, for example don't work on GCC/ARM), try to keep heap alignment even
-// when we have to disable static alignment.
-#if defined(__GNUC__) && !(defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || defined(__ppc__) || defined(__ia64__))
-#define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1
-#else
-#define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 0
-#endif
-
-// static alignment is completely disabled with GCC 3, Sun Studio, and QCC/QNX
-#if !EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT \
- && !EIGEN_GCC3_OR_OLDER \
- && !defined(__SUNPRO_CC) \
- && !defined(__QNXNTO__)
-  #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 1
-#else
-  #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 0
-#endif
-
-#ifdef EIGEN_DONT_ALIGN
-  #ifndef EIGEN_DONT_ALIGN_STATICALLY
-    #define EIGEN_DONT_ALIGN_STATICALLY
-  #endif
-  #define EIGEN_ALIGN 0
-#else
-  #define EIGEN_ALIGN 1
-#endif
-
-// EIGEN_ALIGN_STATICALLY is the true test whether we want to align arrays on the stack or not. It takes into account both the user choice to explicitly disable
-// alignment (EIGEN_DONT_ALIGN_STATICALLY) and the architecture config (EIGEN_ARCH_WANTS_STACK_ALIGNMENT). Henceforth, only EIGEN_ALIGN_STATICALLY should be used.
-#if EIGEN_ARCH_WANTS_STACK_ALIGNMENT && !defined(EIGEN_DONT_ALIGN_STATICALLY)
-  #define EIGEN_ALIGN_STATICALLY 1
-#else
-  #define EIGEN_ALIGN_STATICALLY 0
-  #ifndef EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
-    #define EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
-  #endif
-#endif
-
-#ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
-#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION RowMajor
-#else
-#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ColMajor
-#endif
-
-#ifndef EIGEN_DEFAULT_DENSE_INDEX_TYPE
-#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t
-#endif
-
-/** Allows to disable some optimizations which might affect the accuracy of the result.
-  * Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them.
-  * They currently include:
-  *   - single precision Cwise::sin() and Cwise::cos() when SSE vectorization is enabled.
-  */
-#ifndef EIGEN_FAST_MATH
-#define EIGEN_FAST_MATH 1
-#endif
-
-#define EIGEN_DEBUG_VAR(x) std::cerr << #x << " = " << x << std::endl;
-
-// concatenate two tokens
-#define EIGEN_CAT2(a,b) a ## b
-#define EIGEN_CAT(a,b) EIGEN_CAT2(a,b)
-
-// convert a token to a string
-#define EIGEN_MAKESTRING2(a) #a
-#define EIGEN_MAKESTRING(a) EIGEN_MAKESTRING2(a)
-
-// EIGEN_STRONG_INLINE is a stronger version of the inline, using __forceinline on MSVC,
-// but it still doesn't use GCC's always_inline. This is useful in (common) situations where MSVC needs forceinline
-// but GCC is still doing fine with just inline.
-#if (defined _MSC_VER) || (defined __INTEL_COMPILER)
-#define EIGEN_STRONG_INLINE __forceinline
-#else
-#define EIGEN_STRONG_INLINE inline
-#endif
-
-// EIGEN_ALWAYS_INLINE is the stronget, it has the effect of making the function inline and adding every possible
-// attribute to maximize inlining. This should only be used when really necessary: in particular,
-// it uses __attribute__((always_inline)) on GCC, which most of the time is useless and can severely harm compile times.
-// FIXME with the always_inline attribute,
-// gcc 3.4.x reports the following compilation error:
-//   Eval.h:91: sorry, unimplemented: inlining failed in call to 'const Eigen::Eval<Derived> Eigen::MatrixBase<Scalar, Derived>::eval() const'
-//    : function body not available
-#if EIGEN_GNUC_AT_LEAST(4,0)
-#define EIGEN_ALWAYS_INLINE __attribute__((always_inline)) inline
-#else
-#define EIGEN_ALWAYS_INLINE EIGEN_STRONG_INLINE
-#endif
-
-#if (defined __GNUC__)
-#define EIGEN_DONT_INLINE __attribute__((noinline))
-#elif (defined _MSC_VER)
-#define EIGEN_DONT_INLINE __declspec(noinline)
-#else
-#define EIGEN_DONT_INLINE
-#endif
-
-// this macro allows to get rid of linking errors about multiply defined functions.
-//  - static is not very good because it prevents definitions from different object files to be merged.
-//           So static causes the resulting linked executable to be bloated with multiple copies of the same function.
-//  - inline is not perfect either as it unwantedly hints the compiler toward inlining the function.
-#define EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-#define EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS inline
-
-#ifdef NDEBUG
-# ifndef EIGEN_NO_DEBUG
-#  define EIGEN_NO_DEBUG
-# endif
-#endif
-
-// eigen_plain_assert is where we implement the workaround for the assert() bug in GCC <= 4.3, see bug 89
-#ifdef EIGEN_NO_DEBUG
-  #define eigen_plain_assert(x)
-#else
-  #if EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO
-    namespace Eigen {
-    namespace internal {
-    inline bool copy_bool(bool b) { return b; }
-    }
-    }
-    #define eigen_plain_assert(x) assert(x)
-  #else
-    // work around bug 89
-    #include <cstdlib>   // for abort
-    #include <iostream>  // for std::cerr
-
-    namespace Eigen {
-    namespace internal {
-    // trivial function copying a bool. Must be EIGEN_DONT_INLINE, so we implement it after including Eigen headers.
-    // see bug 89.
-    namespace {
-    EIGEN_DONT_INLINE bool copy_bool(bool b) { return b; }
-    }
-    inline void assert_fail(const char *condition, const char *function, const char *file, int line)
-    {
-      std::cerr << "assertion failed: " << condition << " in function " << function << " at " << file << ":" << line << std::endl;
-      abort();
-    }
-    }
-    }
-    #define eigen_plain_assert(x) \
-      do { \
-        if(!Eigen::internal::copy_bool(x)) \
-          Eigen::internal::assert_fail(EIGEN_MAKESTRING(x), __PRETTY_FUNCTION__, __FILE__, __LINE__); \
-      } while(false)
-  #endif
-#endif
-
-// eigen_assert can be overridden
-#ifndef eigen_assert
-#define eigen_assert(x) eigen_plain_assert(x)
-#endif
-
-#ifdef EIGEN_INTERNAL_DEBUGGING
-#define eigen_internal_assert(x) eigen_assert(x)
-#else
-#define eigen_internal_assert(x)
-#endif
-
-#ifdef EIGEN_NO_DEBUG
-#define EIGEN_ONLY_USED_FOR_DEBUG(x) (void)x
-#else
-#define EIGEN_ONLY_USED_FOR_DEBUG(x)
-#endif
-
-#ifndef EIGEN_NO_DEPRECATED_WARNING
-  #if (defined __GNUC__)
-    #define EIGEN_DEPRECATED __attribute__((deprecated))
-  #elif (defined _MSC_VER)
-    #define EIGEN_DEPRECATED __declspec(deprecated)
-  #else
-    #define EIGEN_DEPRECATED
-  #endif
-#else
-  #define EIGEN_DEPRECATED
-#endif
-
-#if (defined __GNUC__)
-#define EIGEN_UNUSED __attribute__((unused))
-#else
-#define EIGEN_UNUSED
-#endif
-
-// Suppresses 'unused variable' warnings.
-#define EIGEN_UNUSED_VARIABLE(var) (void)var;
-
-#if !defined(EIGEN_ASM_COMMENT) && (defined __GNUC__)
-#define EIGEN_ASM_COMMENT(X)  asm("#" X)
-#else
-#define EIGEN_ASM_COMMENT(X)
-#endif
-
-/* EIGEN_ALIGN_TO_BOUNDARY(n) forces data to be n-byte aligned. This is used to satisfy SIMD requirements.
- * However, we do that EVEN if vectorization (EIGEN_VECTORIZE) is disabled,
- * so that vectorization doesn't affect binary compatibility.
- *
- * If we made alignment depend on whether or not EIGEN_VECTORIZE is defined, it would be impossible to link
- * vectorized and non-vectorized code.
- */
-#if (defined __GNUC__) || (defined __PGI) || (defined __IBMCPP__) || (defined __ARMCC_VERSION)
-  #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
-#elif (defined _MSC_VER)
-  #define EIGEN_ALIGN_TO_BOUNDARY(n) __declspec(align(n))
-#elif (defined __SUNPRO_CC)
-  // FIXME not sure about this one:
-  #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
-#else
-  #error Please tell me what is the equivalent of __attribute__((aligned(n))) for your compiler
-#endif
-
-#define EIGEN_ALIGN16 EIGEN_ALIGN_TO_BOUNDARY(16)
-
-#if EIGEN_ALIGN_STATICALLY
-#define EIGEN_USER_ALIGN_TO_BOUNDARY(n) EIGEN_ALIGN_TO_BOUNDARY(n)
-#define EIGEN_USER_ALIGN16 EIGEN_ALIGN16
-#else
-#define EIGEN_USER_ALIGN_TO_BOUNDARY(n)
-#define EIGEN_USER_ALIGN16
-#endif
-
-#ifdef EIGEN_DONT_USE_RESTRICT_KEYWORD
-  #define EIGEN_RESTRICT
-#endif
-#ifndef EIGEN_RESTRICT
-  #define EIGEN_RESTRICT __restrict
-#endif
-
-#ifndef EIGEN_STACK_ALLOCATION_LIMIT
-#define EIGEN_STACK_ALLOCATION_LIMIT 20000
-#endif
-
-#ifndef EIGEN_DEFAULT_IO_FORMAT
-#ifdef EIGEN_MAKING_DOCS
-// format used in Eigen's documentation
-// needed to define it here as escaping characters in CMake add_definition's argument seems very problematic.
-#define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat(3, 0, " ", "\n", "", "")
-#else
-#define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat()
-#endif
-#endif
-
-// just an empty macro !
-#define EIGEN_EMPTY
-
-#if defined(_MSC_VER) && (!defined(__INTEL_COMPILER))
-#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
-  using Base::operator =;
-#elif defined(__clang__) // workaround clang bug (see http://forum.kde.org/viewtopic.php?f=74&t=102653)
-#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
-  using Base::operator =; \
-  EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) { Base::operator=(other); return *this; } \
-  template <typename OtherDerived> \
-  EIGEN_STRONG_INLINE Derived& operator=(const DenseBase<OtherDerived>& other) { Base::operator=(other.derived()); return *this; }
-#else
-#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
-  using Base::operator =; \
-  EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) \
-  { \
-    Base::operator=(other); \
-    return *this; \
-  }
-#endif
-
-#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
-  EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived)
-
-/**
-* Just a side note. Commenting within defines works only by documenting
-* behind the object (via '!<'). Comments cannot be multi-line and thus
-* we have these extra long lines. What is confusing doxygen over here is
-* that we use '\' and basically have a bunch of typedefs with their
-* documentation in a single line.
-**/
-
-#define EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \
-  typedef typename Eigen::internal::traits<Derived>::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex<float>. */ \
-  typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex<T>, T were corresponding to RealScalar. */ \
-  typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
-  typedef typename Eigen::internal::nested<Derived>::type Nested; \
-  typedef typename Eigen::internal::traits<Derived>::StorageKind StorageKind; \
-  typedef typename Eigen::internal::traits<Derived>::Index Index; \
-  enum { RowsAtCompileTime = Eigen::internal::traits<Derived>::RowsAtCompileTime, \
-        ColsAtCompileTime = Eigen::internal::traits<Derived>::ColsAtCompileTime, \
-        Flags = Eigen::internal::traits<Derived>::Flags, \
-        CoeffReadCost = Eigen::internal::traits<Derived>::CoeffReadCost, \
-        SizeAtCompileTime = Base::SizeAtCompileTime, \
-        MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \
-        IsVectorAtCompileTime = Base::IsVectorAtCompileTime };
-
-
-#define EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \
-  typedef typename Eigen::internal::traits<Derived>::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex<float>. */ \
-  typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex<T>, T were corresponding to RealScalar. */ \
-  typedef typename Base::PacketScalar PacketScalar; \
-  typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
-  typedef typename Eigen::internal::nested<Derived>::type Nested; \
-  typedef typename Eigen::internal::traits<Derived>::StorageKind StorageKind; \
-  typedef typename Eigen::internal::traits<Derived>::Index Index; \
-  enum { RowsAtCompileTime = Eigen::internal::traits<Derived>::RowsAtCompileTime, \
-        ColsAtCompileTime = Eigen::internal::traits<Derived>::ColsAtCompileTime, \
-        MaxRowsAtCompileTime = Eigen::internal::traits<Derived>::MaxRowsAtCompileTime, \
-        MaxColsAtCompileTime = Eigen::internal::traits<Derived>::MaxColsAtCompileTime, \
-        Flags = Eigen::internal::traits<Derived>::Flags, \
-        CoeffReadCost = Eigen::internal::traits<Derived>::CoeffReadCost, \
-        SizeAtCompileTime = Base::SizeAtCompileTime, \
-        MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \
-        IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \
-  using Base::derived; \
-  using Base::const_cast_derived;
-
-
-#define EIGEN_PLAIN_ENUM_MIN(a,b) (((int)a <= (int)b) ? (int)a : (int)b)
-#define EIGEN_PLAIN_ENUM_MAX(a,b) (((int)a >= (int)b) ? (int)a : (int)b)
-
-// EIGEN_SIZE_MIN_PREFER_DYNAMIC gives the min between compile-time sizes. 0 has absolute priority, followed by 1,
-// followed by Dynamic, followed by other finite values. The reason for giving Dynamic the priority over
-// finite values is that min(3, Dynamic) should be Dynamic, since that could be anything between 0 and 3.
-#define EIGEN_SIZE_MIN_PREFER_DYNAMIC(a,b) (((int)a == 0 || (int)b == 0) ? 0 \
-                           : ((int)a == 1 || (int)b == 1) ? 1 \
-                           : ((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \
-                           : ((int)a <= (int)b) ? (int)a : (int)b)
-
-// EIGEN_SIZE_MIN_PREFER_FIXED is a variant of EIGEN_SIZE_MIN_PREFER_DYNAMIC comparing MaxSizes. The difference is that finite values
-// now have priority over Dynamic, so that min(3, Dynamic) gives 3. Indeed, whatever the actual value is
-// (between 0 and 3), it is not more than 3.
-#define EIGEN_SIZE_MIN_PREFER_FIXED(a,b)  (((int)a == 0 || (int)b == 0) ? 0 \
-                           : ((int)a == 1 || (int)b == 1) ? 1 \
-                           : ((int)a == Dynamic && (int)b == Dynamic) ? Dynamic \
-                           : ((int)a == Dynamic) ? (int)b \
-                           : ((int)b == Dynamic) ? (int)a \
-                           : ((int)a <= (int)b) ? (int)a : (int)b)
-
-// see EIGEN_SIZE_MIN_PREFER_DYNAMIC. No need for a separate variant for MaxSizes here.
-#define EIGEN_SIZE_MAX(a,b) (((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \
-                           : ((int)a >= (int)b) ? (int)a : (int)b)
-
-#define EIGEN_LOGICAL_XOR(a,b) (((a) || (b)) && !((a) && (b)))
-
-#define EIGEN_IMPLIES(a,b) (!(a) || (b))
-
-#define EIGEN_MAKE_CWISE_BINARY_OP(METHOD,FUNCTOR) \
-  template<typename OtherDerived> \
-  EIGEN_STRONG_INLINE const CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived> \
-  (METHOD)(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
-  { \
-    return CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); \
-  }
-
-// the expression type of a cwise product
-#define EIGEN_CWISE_PRODUCT_RETURN_TYPE(LHS,RHS) \
-    CwiseBinaryOp< \
-      internal::scalar_product_op< \
-          typename internal::traits<LHS>::Scalar, \
-          typename internal::traits<RHS>::Scalar \
-      >, \
-      const LHS, \
-      const RHS \
-    >
-
-#endif // EIGEN_MACROS_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/util/Memory.h b/resources/3rdparty/eigen/Eigen/src/Core/util/Memory.h
deleted file mode 100644
index 6e06ace44..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/util/Memory.h
+++ /dev/null
@@ -1,952 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2008-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2009 Kenneth Riddile <kfriddile@yahoo.com>
-// Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com>
-// Copyright (C) 2010 Thomas Capricelli <orzel@freehackers.org>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-
-/*****************************************************************************
-*** Platform checks for aligned malloc functions                           ***
-*****************************************************************************/
-
-#ifndef EIGEN_MEMORY_H
-#define EIGEN_MEMORY_H
-
-// On 64-bit systems, glibc's malloc returns 16-byte-aligned pointers, see:
-//   http://www.gnu.org/s/libc/manual/html_node/Aligned-Memory-Blocks.html
-// This is true at least since glibc 2.8.
-// This leaves the question how to detect 64-bit. According to this document,
-//   http://gcc.fyxm.net/summit/2003/Porting%20to%2064%20bit.pdf
-// page 114, "[The] LP64 model [...] is used by all 64-bit UNIX ports" so it's indeed
-// quite safe, at least within the context of glibc, to equate 64-bit with LP64.
-#if defined(__GLIBC__) && ((__GLIBC__>=2 && __GLIBC_MINOR__ >= 8) || __GLIBC__>2) \
- && defined(__LP64__)
-  #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 1
-#else
-  #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 0
-#endif
-
-// FreeBSD 6 seems to have 16-byte aligned malloc
-//   See http://svn.freebsd.org/viewvc/base/stable/6/lib/libc/stdlib/malloc.c?view=markup
-// FreeBSD 7 seems to have 16-byte aligned malloc except on ARM and MIPS architectures
-//   See http://svn.freebsd.org/viewvc/base/stable/7/lib/libc/stdlib/malloc.c?view=markup
-#if defined(__FreeBSD__) && !defined(__arm__) && !defined(__mips__)
-  #define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 1
-#else
-  #define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 0
-#endif
-
-#if defined(__APPLE__) \
- || defined(_WIN64) \
- || EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED \
- || EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED
-  #define EIGEN_MALLOC_ALREADY_ALIGNED 1
-#else
-  #define EIGEN_MALLOC_ALREADY_ALIGNED 0
-#endif
-
-#if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) \
- && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0)
-  #define EIGEN_HAS_POSIX_MEMALIGN 1
-#else
-  #define EIGEN_HAS_POSIX_MEMALIGN 0
-#endif
-
-#ifdef EIGEN_VECTORIZE_SSE
-  #define EIGEN_HAS_MM_MALLOC 1
-#else
-  #define EIGEN_HAS_MM_MALLOC 0
-#endif
-
-namespace Eigen {
-
-namespace internal {
-
-inline void throw_std_bad_alloc()
-{
-  #ifdef EIGEN_EXCEPTIONS
-    throw std::bad_alloc();
-  #else
-    std::size_t huge = -1;
-    new int[huge];
-  #endif
-}
-
-/*****************************************************************************
-*** Implementation of handmade aligned functions                           ***
-*****************************************************************************/
-
-/* ----- Hand made implementations of aligned malloc/free and realloc ----- */
-
-/** \internal Like malloc, but the returned pointer is guaranteed to be 16-byte aligned.
-  * Fast, but wastes 16 additional bytes of memory. Does not throw any exception.
-  */
-inline void* handmade_aligned_malloc(size_t size)
-{
-  void *original = std::malloc(size+16);
-  if (original == 0) return 0;
-  void *aligned = reinterpret_cast<void*>((reinterpret_cast<size_t>(original) & ~(size_t(15))) + 16);
-  *(reinterpret_cast<void**>(aligned) - 1) = original;
-  return aligned;
-}
-
-/** \internal Frees memory allocated with handmade_aligned_malloc */
-inline void handmade_aligned_free(void *ptr)
-{
-  if (ptr) std::free(*(reinterpret_cast<void**>(ptr) - 1));
-}
-
-/** \internal
-  * \brief Reallocates aligned memory.
-  * Since we know that our handmade version is based on std::realloc
-  * we can use std::realloc to implement efficient reallocation.
-  */
-inline void* handmade_aligned_realloc(void* ptr, size_t size, size_t = 0)
-{
-  if (ptr == 0) return handmade_aligned_malloc(size);
-  void *original = *(reinterpret_cast<void**>(ptr) - 1);
-  original = std::realloc(original,size+16);
-  if (original == 0) return 0;
-  void *aligned = reinterpret_cast<void*>((reinterpret_cast<size_t>(original) & ~(size_t(15))) + 16);
-  *(reinterpret_cast<void**>(aligned) - 1) = original;
-  return aligned;
-}
-
-/*****************************************************************************
-*** Implementation of generic aligned realloc (when no realloc can be used)***
-*****************************************************************************/
-
-void* aligned_malloc(size_t size);
-void  aligned_free(void *ptr);
-
-/** \internal
-  * \brief Reallocates aligned memory.
-  * Allows reallocation with aligned ptr types. This implementation will
-  * always create a new memory chunk and copy the old data.
-  */
-inline void* generic_aligned_realloc(void* ptr, size_t size, size_t old_size)
-{
-  if (ptr==0)
-    return aligned_malloc(size);
-
-  if (size==0)
-  {
-    aligned_free(ptr);
-    return 0;
-  }
-
-  void* newptr = aligned_malloc(size);
-  if (newptr == 0)
-  {
-    #ifdef EIGEN_HAS_ERRNO
-    errno = ENOMEM; // according to the standard
-    #endif
-    return 0;
-  }
-
-  if (ptr != 0)
-  {
-    std::memcpy(newptr, ptr, (std::min)(size,old_size));
-    aligned_free(ptr);
-  }
-
-  return newptr;
-}
-
-/*****************************************************************************
-*** Implementation of portable aligned versions of malloc/free/realloc     ***
-*****************************************************************************/
-
-#ifdef EIGEN_NO_MALLOC
-inline void check_that_malloc_is_allowed()
-{
-  eigen_assert(false && "heap allocation is forbidden (EIGEN_NO_MALLOC is defined)");
-}
-#elif defined EIGEN_RUNTIME_NO_MALLOC
-inline bool is_malloc_allowed_impl(bool update, bool new_value = false)
-{
-  static bool value = true;
-  if (update == 1)
-    value = new_value;
-  return value;
-}
-inline bool is_malloc_allowed() { return is_malloc_allowed_impl(false); }
-inline bool set_is_malloc_allowed(bool new_value) { return is_malloc_allowed_impl(true, new_value); }
-inline void check_that_malloc_is_allowed()
-{
-  eigen_assert(is_malloc_allowed() && "heap allocation is forbidden (EIGEN_RUNTIME_NO_MALLOC is defined and g_is_malloc_allowed is false)");
-}
-#else 
-inline void check_that_malloc_is_allowed()
-{}
-#endif
-
-/** \internal Allocates \a size bytes. The returned pointer is guaranteed to have 16 bytes alignment.
-  * On allocation error, the returned pointer is null, and std::bad_alloc is thrown.
-  */
-inline void* aligned_malloc(size_t size)
-{
-  check_that_malloc_is_allowed();
-
-  void *result;
-  #if !EIGEN_ALIGN
-    result = std::malloc(size);
-  #elif EIGEN_MALLOC_ALREADY_ALIGNED
-    result = std::malloc(size);
-  #elif EIGEN_HAS_POSIX_MEMALIGN
-    if(posix_memalign(&result, 16, size)) result = 0;
-  #elif EIGEN_HAS_MM_MALLOC
-    result = _mm_malloc(size, 16);
-  #elif (defined _MSC_VER)
-    result = _aligned_malloc(size, 16);
-  #else
-    result = handmade_aligned_malloc(size);
-  #endif
-
-  if(!result && size)
-    throw_std_bad_alloc();
-
-  return result;
-}
-
-/** \internal Frees memory allocated with aligned_malloc. */
-inline void aligned_free(void *ptr)
-{
-  #if !EIGEN_ALIGN
-    std::free(ptr);
-  #elif EIGEN_MALLOC_ALREADY_ALIGNED
-    std::free(ptr);
-  #elif EIGEN_HAS_POSIX_MEMALIGN
-    std::free(ptr);
-  #elif EIGEN_HAS_MM_MALLOC
-    _mm_free(ptr);
-  #elif defined(_MSC_VER)
-    _aligned_free(ptr);
-  #else
-    handmade_aligned_free(ptr);
-  #endif
-}
-
-/**
-* \internal
-* \brief Reallocates an aligned block of memory.
-* \throws std::bad_alloc on allocation failure
-**/
-inline void* aligned_realloc(void *ptr, size_t new_size, size_t old_size)
-{
-  EIGEN_UNUSED_VARIABLE(old_size);
-
-  void *result;
-#if !EIGEN_ALIGN
-  result = std::realloc(ptr,new_size);
-#elif EIGEN_MALLOC_ALREADY_ALIGNED
-  result = std::realloc(ptr,new_size);
-#elif EIGEN_HAS_POSIX_MEMALIGN
-  result = generic_aligned_realloc(ptr,new_size,old_size);
-#elif EIGEN_HAS_MM_MALLOC
-  // The defined(_mm_free) is just here to verify that this MSVC version
-  // implements _mm_malloc/_mm_free based on the corresponding _aligned_
-  // functions. This may not always be the case and we just try to be safe.
-  #if defined(_MSC_VER) && defined(_mm_free)
-    result = _aligned_realloc(ptr,new_size,16);
-  #else
-    result = generic_aligned_realloc(ptr,new_size,old_size);
-  #endif
-#elif defined(_MSC_VER)
-  result = _aligned_realloc(ptr,new_size,16);
-#else
-  result = handmade_aligned_realloc(ptr,new_size,old_size);
-#endif
-
-  if (!result && new_size)
-    throw_std_bad_alloc();
-
-  return result;
-}
-
-/*****************************************************************************
-*** Implementation of conditionally aligned functions                      ***
-*****************************************************************************/
-
-/** \internal Allocates \a size bytes. If Align is true, then the returned ptr is 16-byte-aligned.
-  * On allocation error, the returned pointer is null, and a std::bad_alloc is thrown.
-  */
-template<bool Align> inline void* conditional_aligned_malloc(size_t size)
-{
-  return aligned_malloc(size);
-}
-
-template<> inline void* conditional_aligned_malloc<false>(size_t size)
-{
-  check_that_malloc_is_allowed();
-
-  void *result = std::malloc(size);
-  if(!result && size)
-    throw_std_bad_alloc();
-  return result;
-}
-
-/** \internal Frees memory allocated with conditional_aligned_malloc */
-template<bool Align> inline void conditional_aligned_free(void *ptr)
-{
-  aligned_free(ptr);
-}
-
-template<> inline void conditional_aligned_free<false>(void *ptr)
-{
-  std::free(ptr);
-}
-
-template<bool Align> inline void* conditional_aligned_realloc(void* ptr, size_t new_size, size_t old_size)
-{
-  return aligned_realloc(ptr, new_size, old_size);
-}
-
-template<> inline void* conditional_aligned_realloc<false>(void* ptr, size_t new_size, size_t)
-{
-  return std::realloc(ptr, new_size);
-}
-
-/*****************************************************************************
-*** Construction/destruction of array elements                             ***
-*****************************************************************************/
-
-/** \internal Constructs the elements of an array.
-  * The \a size parameter tells on how many objects to call the constructor of T.
-  */
-template<typename T> inline T* construct_elements_of_array(T *ptr, size_t size)
-{
-  for (size_t i=0; i < size; ++i) ::new (ptr + i) T;
-  return ptr;
-}
-
-/** \internal Destructs the elements of an array.
-  * The \a size parameters tells on how many objects to call the destructor of T.
-  */
-template<typename T> inline void destruct_elements_of_array(T *ptr, size_t size)
-{
-  // always destruct an array starting from the end.
-  if(ptr)
-    while(size) ptr[--size].~T();
-}
-
-/*****************************************************************************
-*** Implementation of aligned new/delete-like functions                    ***
-*****************************************************************************/
-
-template<typename T>
-EIGEN_ALWAYS_INLINE void check_size_for_overflow(size_t size)
-{
-  if(size > size_t(-1) / sizeof(T))
-    throw_std_bad_alloc();
-}
-
-/** \internal Allocates \a size objects of type T. The returned pointer is guaranteed to have 16 bytes alignment.
-  * On allocation error, the returned pointer is undefined, but a std::bad_alloc is thrown.
-  * The default constructor of T is called.
-  */
-template<typename T> inline T* aligned_new(size_t size)
-{
-  check_size_for_overflow<T>(size);
-  T *result = reinterpret_cast<T*>(aligned_malloc(sizeof(T)*size));
-  return construct_elements_of_array(result, size);
-}
-
-template<typename T, bool Align> inline T* conditional_aligned_new(size_t size)
-{
-  check_size_for_overflow<T>(size);
-  T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
-  return construct_elements_of_array(result, size);
-}
-
-/** \internal Deletes objects constructed with aligned_new
-  * The \a size parameters tells on how many objects to call the destructor of T.
-  */
-template<typename T> inline void aligned_delete(T *ptr, size_t size)
-{
-  destruct_elements_of_array<T>(ptr, size);
-  aligned_free(ptr);
-}
-
-/** \internal Deletes objects constructed with conditional_aligned_new
-  * The \a size parameters tells on how many objects to call the destructor of T.
-  */
-template<typename T, bool Align> inline void conditional_aligned_delete(T *ptr, size_t size)
-{
-  destruct_elements_of_array<T>(ptr, size);
-  conditional_aligned_free<Align>(ptr);
-}
-
-template<typename T, bool Align> inline T* conditional_aligned_realloc_new(T* pts, size_t new_size, size_t old_size)
-{
-  check_size_for_overflow<T>(new_size);
-  check_size_for_overflow<T>(old_size);
-  if(new_size < old_size)
-    destruct_elements_of_array(pts+new_size, old_size-new_size);
-  T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
-  if(new_size > old_size)
-    construct_elements_of_array(result+old_size, new_size-old_size);
-  return result;
-}
-
-
-template<typename T, bool Align> inline T* conditional_aligned_new_auto(size_t size)
-{
-  check_size_for_overflow<T>(size);
-  T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
-  if(NumTraits<T>::RequireInitialization)
-    construct_elements_of_array(result, size);
-  return result;
-}
-
-template<typename T, bool Align> inline T* conditional_aligned_realloc_new_auto(T* pts, size_t new_size, size_t old_size)
-{
-  check_size_for_overflow<T>(new_size);
-  check_size_for_overflow<T>(old_size);
-  if(NumTraits<T>::RequireInitialization && (new_size < old_size))
-    destruct_elements_of_array(pts+new_size, old_size-new_size);
-  T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
-  if(NumTraits<T>::RequireInitialization && (new_size > old_size))
-    construct_elements_of_array(result+old_size, new_size-old_size);
-  return result;
-}
-
-template<typename T, bool Align> inline void conditional_aligned_delete_auto(T *ptr, size_t size)
-{
-  if(NumTraits<T>::RequireInitialization)
-    destruct_elements_of_array<T>(ptr, size);
-  conditional_aligned_free<Align>(ptr);
-}
-
-/****************************************************************************/
-
-/** \internal Returns the index of the first element of the array that is well aligned for vectorization.
-  *
-  * \param array the address of the start of the array
-  * \param size the size of the array
-  *
-  * \note If no element of the array is well aligned, the size of the array is returned. Typically,
-  * for example with SSE, "well aligned" means 16-byte-aligned. If vectorization is disabled or if the
-  * packet size for the given scalar type is 1, then everything is considered well-aligned.
-  *
-  * \note If the scalar type is vectorizable, we rely on the following assumptions: sizeof(Scalar) is a
-  * power of 2, the packet size in bytes is also a power of 2, and is a multiple of sizeof(Scalar). On the
-  * other hand, we do not assume that the array address is a multiple of sizeof(Scalar), as that fails for
-  * example with Scalar=double on certain 32-bit platforms, see bug #79.
-  *
-  * There is also the variant first_aligned(const MatrixBase&) defined in DenseCoeffsBase.h.
-  */
-template<typename Scalar, typename Index>
-static inline Index first_aligned(const Scalar* array, Index size)
-{
-  typedef typename packet_traits<Scalar>::type Packet;
-  enum { PacketSize = packet_traits<Scalar>::size,
-         PacketAlignedMask = PacketSize-1
-  };
-
-  if(PacketSize==1)
-  {
-    // Either there is no vectorization, or a packet consists of exactly 1 scalar so that all elements
-    // of the array have the same alignment.
-    return 0;
-  }
-  else if(size_t(array) & (sizeof(Scalar)-1))
-  {
-    // There is vectorization for this scalar type, but the array is not aligned to the size of a single scalar.
-    // Consequently, no element of the array is well aligned.
-    return size;
-  }
-  else
-  {
-    return std::min<Index>( (PacketSize - (Index((size_t(array)/sizeof(Scalar))) & PacketAlignedMask))
-                           & PacketAlignedMask, size);
-  }
-}
-
-
-// std::copy is much slower than memcpy, so let's introduce a smart_copy which
-// use memcpy on trivial types, i.e., on types that does not require an initialization ctor.
-template<typename T, bool UseMemcpy> struct smart_copy_helper;
-
-template<typename T> void smart_copy(const T* start, const T* end, T* target)
-{
-  smart_copy_helper<T,!NumTraits<T>::RequireInitialization>::run(start, end, target);
-}
-
-template<typename T> struct smart_copy_helper<T,true> {
-  static inline void run(const T* start, const T* end, T* target)
-  { memcpy(target, start, std::ptrdiff_t(end)-std::ptrdiff_t(start)); }
-};
-
-template<typename T> struct smart_copy_helper<T,false> {
-  static inline void run(const T* start, const T* end, T* target)
-  { std::copy(start, end, target); }
-};
-
-
-/*****************************************************************************
-*** Implementation of runtime stack allocation (falling back to malloc)    ***
-*****************************************************************************/
-
-// you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA
-// to the appropriate stack allocation function
-#ifndef EIGEN_ALLOCA
-  #if (defined __linux__)
-    #define EIGEN_ALLOCA alloca
-  #elif defined(_MSC_VER)
-    #define EIGEN_ALLOCA _alloca
-  #endif
-#endif
-
-// This helper class construct the allocated memory, and takes care of destructing and freeing the handled data
-// at destruction time. In practice this helper class is mainly useful to avoid memory leak in case of exceptions.
-template<typename T> class aligned_stack_memory_handler
-{
-  public:
-    /* Creates a stack_memory_handler responsible for the buffer \a ptr of size \a size.
-     * Note that \a ptr can be 0 regardless of the other parameters.
-     * This constructor takes care of constructing/initializing the elements of the buffer if required by the scalar type T (see NumTraits<T>::RequireInitialization).
-     * In this case, the buffer elements will also be destructed when this handler will be destructed.
-     * Finally, if \a dealloc is true, then the pointer \a ptr is freed.
-     **/
-    aligned_stack_memory_handler(T* ptr, size_t size, bool dealloc)
-      : m_ptr(ptr), m_size(size), m_deallocate(dealloc)
-    {
-      if(NumTraits<T>::RequireInitialization && m_ptr)
-        Eigen::internal::construct_elements_of_array(m_ptr, size);
-    }
-    ~aligned_stack_memory_handler()
-    {
-      if(NumTraits<T>::RequireInitialization && m_ptr)
-        Eigen::internal::destruct_elements_of_array<T>(m_ptr, m_size);
-      if(m_deallocate)
-        Eigen::internal::aligned_free(m_ptr);
-    }
-  protected:
-    T* m_ptr;
-    size_t m_size;
-    bool m_deallocate;
-};
-
-} // end namespace internal
-
-/** \internal
-  * Declares, allocates and construct an aligned buffer named NAME of SIZE elements of type TYPE on the stack
-  * if SIZE is smaller than EIGEN_STACK_ALLOCATION_LIMIT, and if stack allocation is supported by the platform
-  * (currently, this is Linux and Visual Studio only). Otherwise the memory is allocated on the heap.
-  * The allocated buffer is automatically deleted when exiting the scope of this declaration.
-  * If BUFFER is non null, then the declared variable is simply an alias for BUFFER, and no allocation/deletion occurs.
-  * Here is an example:
-  * \code
-  * {
-  *   ei_declare_aligned_stack_constructed_variable(float,data,size,0);
-  *   // use data[0] to data[size-1]
-  * }
-  * \endcode
-  * The underlying stack allocation function can controlled with the EIGEN_ALLOCA preprocessor token.
-  */
-#ifdef EIGEN_ALLOCA
-
-  #ifdef __arm__
-    #define EIGEN_ALIGNED_ALLOCA(SIZE) reinterpret_cast<void*>((reinterpret_cast<size_t>(EIGEN_ALLOCA(SIZE+16)) & ~(size_t(15))) + 16)
-  #else
-    #define EIGEN_ALIGNED_ALLOCA EIGEN_ALLOCA
-  #endif
-
-  #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \
-    Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \
-    TYPE* NAME = (BUFFER)!=0 ? (BUFFER) \
-               : reinterpret_cast<TYPE*>( \
-                      (sizeof(TYPE)*SIZE<=EIGEN_STACK_ALLOCATION_LIMIT) ? EIGEN_ALIGNED_ALLOCA(sizeof(TYPE)*SIZE) \
-                    : Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE) );  \
-    Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,sizeof(TYPE)*SIZE>EIGEN_STACK_ALLOCATION_LIMIT)
-
-#else
-
-  #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \
-    Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \
-    TYPE* NAME = (BUFFER)!=0 ? BUFFER : reinterpret_cast<TYPE*>(Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE));    \
-    Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,true)
-    
-#endif
-
-
-/*****************************************************************************
-*** Implementation of EIGEN_MAKE_ALIGNED_OPERATOR_NEW [_IF]                ***
-*****************************************************************************/
-
-#if EIGEN_ALIGN
-  #ifdef EIGEN_EXCEPTIONS
-    #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
-      void* operator new(size_t size, const std::nothrow_t&) throw() { \
-        try { return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); } \
-        catch (...) { return 0; } \
-        return 0; \
-      }
-  #else
-    #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
-      void* operator new(size_t size, const std::nothrow_t&) throw() { \
-        return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
-      }
-  #endif
-
-  #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \
-      void *operator new(size_t size) { \
-        return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
-      } \
-      void *operator new[](size_t size) { \
-        return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
-      } \
-      void operator delete(void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
-      void operator delete[](void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
-      /* in-place new and delete. since (at least afaik) there is no actual   */ \
-      /* memory allocated we can safely let the default implementation handle */ \
-      /* this particular case. */ \
-      static void *operator new(size_t size, void *ptr) { return ::operator new(size,ptr); } \
-      void operator delete(void * memory, void *ptr) throw() { return ::operator delete(memory,ptr); } \
-      /* nothrow-new (returns zero instead of std::bad_alloc) */ \
-      EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
-      void operator delete(void *ptr, const std::nothrow_t&) throw() { \
-        Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); \
-      } \
-      typedef void eigen_aligned_operator_new_marker_type;
-#else
-  #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
-#endif
-
-#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(true)
-#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size) \
-  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool(((Size)!=Eigen::Dynamic) && ((sizeof(Scalar)*(Size))%16==0)))
-
-/****************************************************************************/
-
-/** \class aligned_allocator
-* \ingroup Core_Module
-*
-* \brief STL compatible allocator to use with with 16 byte aligned types
-*
-* Example:
-* \code
-* // Matrix4f requires 16 bytes alignment:
-* std::map< int, Matrix4f, std::less<int>, 
-*           aligned_allocator<std::pair<const int, Matrix4f> > > my_map_mat4;
-* // Vector3f does not require 16 bytes alignment, no need to use Eigen's allocator:
-* std::map< int, Vector3f > my_map_vec3;
-* \endcode
-*
-* \sa \ref TopicStlContainers.
-*/
-template<class T>
-class aligned_allocator
-{
-public:
-    typedef size_t    size_type;
-    typedef std::ptrdiff_t difference_type;
-    typedef T*        pointer;
-    typedef const T*  const_pointer;
-    typedef T&        reference;
-    typedef const T&  const_reference;
-    typedef T         value_type;
-
-    template<class U>
-    struct rebind
-    {
-        typedef aligned_allocator<U> other;
-    };
-
-    pointer address( reference value ) const
-    {
-        return &value;
-    }
-
-    const_pointer address( const_reference value ) const
-    {
-        return &value;
-    }
-
-    aligned_allocator()
-    {
-    }
-
-    aligned_allocator( const aligned_allocator& )
-    {
-    }
-
-    template<class U>
-    aligned_allocator( const aligned_allocator<U>& )
-    {
-    }
-
-    ~aligned_allocator()
-    {
-    }
-
-    size_type max_size() const
-    {
-        return (std::numeric_limits<size_type>::max)();
-    }
-
-    pointer allocate( size_type num, const void* hint = 0 )
-    {
-        EIGEN_UNUSED_VARIABLE(hint);
-        internal::check_size_for_overflow<T>(num);
-        return static_cast<pointer>( internal::aligned_malloc( num * sizeof(T) ) );
-    }
-
-    void construct( pointer p, const T& value )
-    {
-        ::new( p ) T( value );
-    }
-
-    // Support for c++11
-#if (__cplusplus >= 201103L)
-    template<typename... Args>
-    void  construct(pointer p, Args&&... args)
-    {
-      ::new(p) T(std::forward<Args>(args)...);
-    }
-#endif
-
-    void destroy( pointer p )
-    {
-        p->~T();
-    }
-
-    void deallocate( pointer p, size_type /*num*/ )
-    {
-        internal::aligned_free( p );
-    }
-
-    bool operator!=(const aligned_allocator<T>& ) const
-    { return false; }
-
-    bool operator==(const aligned_allocator<T>& ) const
-    { return true; }
-};
-
-//---------- Cache sizes ----------
-
-#if !defined(EIGEN_NO_CPUID)
-#  if defined(__GNUC__) && ( defined(__i386__) || defined(__x86_64__) )
-#    if defined(__PIC__) && defined(__i386__)
-       // Case for x86 with PIC
-#      define EIGEN_CPUID(abcd,func,id) \
-         __asm__ __volatile__ ("xchgl %%ebx, %%esi;cpuid; xchgl %%ebx,%%esi": "=a" (abcd[0]), "=S" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id));
-#    else
-       // Case for x86_64 or x86 w/o PIC
-#      define EIGEN_CPUID(abcd,func,id) \
-         __asm__ __volatile__ ("cpuid": "=a" (abcd[0]), "=b" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id) );
-#    endif
-#  elif defined(_MSC_VER)
-#    if (_MSC_VER > 1500)
-#      define EIGEN_CPUID(abcd,func,id) __cpuidex((int*)abcd,func,id)
-#    endif
-#  endif
-#endif
-
-namespace internal {
-
-#ifdef EIGEN_CPUID
-
-inline bool cpuid_is_vendor(int abcd[4], const char* vendor)
-{
-  return abcd[1]==(reinterpret_cast<const int*>(vendor))[0] && abcd[3]==(reinterpret_cast<const int*>(vendor))[1] && abcd[2]==(reinterpret_cast<const int*>(vendor))[2];
-}
-
-inline void queryCacheSizes_intel_direct(int& l1, int& l2, int& l3)
-{
-  int abcd[4];
-  l1 = l2 = l3 = 0;
-  int cache_id = 0;
-  int cache_type = 0;
-  do {
-    abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
-    EIGEN_CPUID(abcd,0x4,cache_id);
-    cache_type  = (abcd[0] & 0x0F) >> 0;
-    if(cache_type==1||cache_type==3) // data or unified cache
-    {
-      int cache_level = (abcd[0] & 0xE0) >> 5;  // A[7:5]
-      int ways        = (abcd[1] & 0xFFC00000) >> 22; // B[31:22]
-      int partitions  = (abcd[1] & 0x003FF000) >> 12; // B[21:12]
-      int line_size   = (abcd[1] & 0x00000FFF) >>  0; // B[11:0]
-      int sets        = (abcd[2]);                    // C[31:0]
-
-      int cache_size = (ways+1) * (partitions+1) * (line_size+1) * (sets+1);
-
-      switch(cache_level)
-      {
-        case 1: l1 = cache_size; break;
-        case 2: l2 = cache_size; break;
-        case 3: l3 = cache_size; break;
-        default: break;
-      }
-    }
-    cache_id++;
-  } while(cache_type>0 && cache_id<16);
-}
-
-inline void queryCacheSizes_intel_codes(int& l1, int& l2, int& l3)
-{
-  int abcd[4];
-  abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
-  l1 = l2 = l3 = 0;
-  EIGEN_CPUID(abcd,0x00000002,0);
-  unsigned char * bytes = reinterpret_cast<unsigned char *>(abcd)+2;
-  bool check_for_p2_core2 = false;
-  for(int i=0; i<14; ++i)
-  {
-    switch(bytes[i])
-    {
-      case 0x0A: l1 = 8; break;   // 0Ah   data L1 cache, 8 KB, 2 ways, 32 byte lines
-      case 0x0C: l1 = 16; break;  // 0Ch   data L1 cache, 16 KB, 4 ways, 32 byte lines
-      case 0x0E: l1 = 24; break;  // 0Eh   data L1 cache, 24 KB, 6 ways, 64 byte lines
-      case 0x10: l1 = 16; break;  // 10h   data L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64)
-      case 0x15: l1 = 16; break;  // 15h   code L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64)
-      case 0x2C: l1 = 32; break;  // 2Ch   data L1 cache, 32 KB, 8 ways, 64 byte lines
-      case 0x30: l1 = 32; break;  // 30h   code L1 cache, 32 KB, 8 ways, 64 byte lines
-      case 0x60: l1 = 16; break;  // 60h   data L1 cache, 16 KB, 8 ways, 64 byte lines, sectored
-      case 0x66: l1 = 8; break;   // 66h   data L1 cache, 8 KB, 4 ways, 64 byte lines, sectored
-      case 0x67: l1 = 16; break;  // 67h   data L1 cache, 16 KB, 4 ways, 64 byte lines, sectored
-      case 0x68: l1 = 32; break;  // 68h   data L1 cache, 32 KB, 4 ways, 64 byte lines, sectored
-      case 0x1A: l2 = 96; break;   // code and data L2 cache, 96 KB, 6 ways, 64 byte lines (IA-64)
-      case 0x22: l3 = 512; break;   // code and data L3 cache, 512 KB, 4 ways (!), 64 byte lines, dual-sectored
-      case 0x23: l3 = 1024; break;   // code and data L3 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored
-      case 0x25: l3 = 2048; break;   // code and data L3 cache, 2048 KB, 8 ways, 64 byte lines, dual-sectored
-      case 0x29: l3 = 4096; break;   // code and data L3 cache, 4096 KB, 8 ways, 64 byte lines, dual-sectored
-      case 0x39: l2 = 128; break;   // code and data L2 cache, 128 KB, 4 ways, 64 byte lines, sectored
-      case 0x3A: l2 = 192; break;   // code and data L2 cache, 192 KB, 6 ways, 64 byte lines, sectored
-      case 0x3B: l2 = 128; break;   // code and data L2 cache, 128 KB, 2 ways, 64 byte lines, sectored
-      case 0x3C: l2 = 256; break;   // code and data L2 cache, 256 KB, 4 ways, 64 byte lines, sectored
-      case 0x3D: l2 = 384; break;   // code and data L2 cache, 384 KB, 6 ways, 64 byte lines, sectored
-      case 0x3E: l2 = 512; break;   // code and data L2 cache, 512 KB, 4 ways, 64 byte lines, sectored
-      case 0x40: l2 = 0; break;   // no integrated L2 cache (P6 core) or L3 cache (P4 core)
-      case 0x41: l2 = 128; break;   // code and data L2 cache, 128 KB, 4 ways, 32 byte lines
-      case 0x42: l2 = 256; break;   // code and data L2 cache, 256 KB, 4 ways, 32 byte lines
-      case 0x43: l2 = 512; break;   // code and data L2 cache, 512 KB, 4 ways, 32 byte lines
-      case 0x44: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 4 ways, 32 byte lines
-      case 0x45: l2 = 2048; break;   // code and data L2 cache, 2048 KB, 4 ways, 32 byte lines
-      case 0x46: l3 = 4096; break;   // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines
-      case 0x47: l3 = 8192; break;   // code and data L3 cache, 8192 KB, 8 ways, 64 byte lines
-      case 0x48: l2 = 3072; break;   // code and data L2 cache, 3072 KB, 12 ways, 64 byte lines
-      case 0x49: if(l2!=0) l3 = 4096; else {check_for_p2_core2=true; l3 = l2 = 4096;} break;// code and data L3 cache, 4096 KB, 16 ways, 64 byte lines (P4) or L2 for core2
-      case 0x4A: l3 = 6144; break;   // code and data L3 cache, 6144 KB, 12 ways, 64 byte lines
-      case 0x4B: l3 = 8192; break;   // code and data L3 cache, 8192 KB, 16 ways, 64 byte lines
-      case 0x4C: l3 = 12288; break;   // code and data L3 cache, 12288 KB, 12 ways, 64 byte lines
-      case 0x4D: l3 = 16384; break;   // code and data L3 cache, 16384 KB, 16 ways, 64 byte lines
-      case 0x4E: l2 = 6144; break;   // code and data L2 cache, 6144 KB, 24 ways, 64 byte lines
-      case 0x78: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 4 ways, 64 byte lines
-      case 0x79: l2 = 128; break;   // code and data L2 cache, 128 KB, 8 ways, 64 byte lines, dual-sectored
-      case 0x7A: l2 = 256; break;   // code and data L2 cache, 256 KB, 8 ways, 64 byte lines, dual-sectored
-      case 0x7B: l2 = 512; break;   // code and data L2 cache, 512 KB, 8 ways, 64 byte lines, dual-sectored
-      case 0x7C: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored
-      case 0x7D: l2 = 2048; break;   // code and data L2 cache, 2048 KB, 8 ways, 64 byte lines
-      case 0x7E: l2 = 256; break;   // code and data L2 cache, 256 KB, 8 ways, 128 byte lines, sect. (IA-64)
-      case 0x7F: l2 = 512; break;   // code and data L2 cache, 512 KB, 2 ways, 64 byte lines
-      case 0x80: l2 = 512; break;   // code and data L2 cache, 512 KB, 8 ways, 64 byte lines
-      case 0x81: l2 = 128; break;   // code and data L2 cache, 128 KB, 8 ways, 32 byte lines
-      case 0x82: l2 = 256; break;   // code and data L2 cache, 256 KB, 8 ways, 32 byte lines
-      case 0x83: l2 = 512; break;   // code and data L2 cache, 512 KB, 8 ways, 32 byte lines
-      case 0x84: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 8 ways, 32 byte lines
-      case 0x85: l2 = 2048; break;   // code and data L2 cache, 2048 KB, 8 ways, 32 byte lines
-      case 0x86: l2 = 512; break;   // code and data L2 cache, 512 KB, 4 ways, 64 byte lines
-      case 0x87: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines
-      case 0x88: l3 = 2048; break;   // code and data L3 cache, 2048 KB, 4 ways, 64 byte lines (IA-64)
-      case 0x89: l3 = 4096; break;   // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines (IA-64)
-      case 0x8A: l3 = 8192; break;   // code and data L3 cache, 8192 KB, 4 ways, 64 byte lines (IA-64)
-      case 0x8D: l3 = 3072; break;   // code and data L3 cache, 3072 KB, 12 ways, 128 byte lines (IA-64)
-
-      default: break;
-    }
-  }
-  if(check_for_p2_core2 && l2 == l3)
-    l3 = 0;
-  l1 *= 1024;
-  l2 *= 1024;
-  l3 *= 1024;
-}
-
-inline void queryCacheSizes_intel(int& l1, int& l2, int& l3, int max_std_funcs)
-{
-  if(max_std_funcs>=4)
-    queryCacheSizes_intel_direct(l1,l2,l3);
-  else
-    queryCacheSizes_intel_codes(l1,l2,l3);
-}
-
-inline void queryCacheSizes_amd(int& l1, int& l2, int& l3)
-{
-  int abcd[4];
-  abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
-  EIGEN_CPUID(abcd,0x80000005,0);
-  l1 = (abcd[2] >> 24) * 1024; // C[31:24] = L1 size in KB
-  abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
-  EIGEN_CPUID(abcd,0x80000006,0);
-  l2 = (abcd[2] >> 16) * 1024; // C[31;16] = l2 cache size in KB
-  l3 = ((abcd[3] & 0xFFFC000) >> 18) * 512 * 1024; // D[31;18] = l3 cache size in 512KB
-}
-#endif
-
-/** \internal
- * Queries and returns the cache sizes in Bytes of the L1, L2, and L3 data caches respectively */
-inline void queryCacheSizes(int& l1, int& l2, int& l3)
-{
-  #ifdef EIGEN_CPUID
-  int abcd[4];
-
-  // identify the CPU vendor
-  EIGEN_CPUID(abcd,0x0,0);
-  int max_std_funcs = abcd[1];
-  if(cpuid_is_vendor(abcd,"GenuineIntel"))
-    queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
-  else if(cpuid_is_vendor(abcd,"AuthenticAMD") || cpuid_is_vendor(abcd,"AMDisbetter!"))
-    queryCacheSizes_amd(l1,l2,l3);
-  else
-    // by default let's use Intel's API
-    queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
-
-  // here is the list of other vendors:
-//   ||cpuid_is_vendor(abcd,"VIA VIA VIA ")
-//   ||cpuid_is_vendor(abcd,"CyrixInstead")
-//   ||cpuid_is_vendor(abcd,"CentaurHauls")
-//   ||cpuid_is_vendor(abcd,"GenuineTMx86")
-//   ||cpuid_is_vendor(abcd,"TransmetaCPU")
-//   ||cpuid_is_vendor(abcd,"RiseRiseRise")
-//   ||cpuid_is_vendor(abcd,"Geode by NSC")
-//   ||cpuid_is_vendor(abcd,"SiS SiS SiS ")
-//   ||cpuid_is_vendor(abcd,"UMC UMC UMC ")
-//   ||cpuid_is_vendor(abcd,"NexGenDriven")
-  #else
-  l1 = l2 = l3 = -1;
-  #endif
-}
-
-/** \internal
- * \returns the size in Bytes of the L1 data cache */
-inline int queryL1CacheSize()
-{
-  int l1(-1), l2, l3;
-  queryCacheSizes(l1,l2,l3);
-  return l1;
-}
-
-/** \internal
- * \returns the size in Bytes of the L2 or L3 cache if this later is present */
-inline int queryTopLevelCacheSize()
-{
-  int l1, l2(-1), l3(-1);
-  queryCacheSizes(l1,l2,l3);
-  return (std::max)(l2,l3);
-}
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_MEMORY_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/util/StaticAssert.h b/resources/3rdparty/eigen/Eigen/src/Core/util/StaticAssert.h
deleted file mode 100644
index 8872c5b64..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/util/StaticAssert.h
+++ /dev/null
@@ -1,206 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_STATIC_ASSERT_H
-#define EIGEN_STATIC_ASSERT_H
-
-/* Some notes on Eigen's static assertion mechanism:
- *
- *  - in EIGEN_STATIC_ASSERT(CONDITION,MSG) the parameter CONDITION must be a compile time boolean
- *    expression, and MSG an enum listed in struct internal::static_assertion<true>
- *
- *  - define EIGEN_NO_STATIC_ASSERT to disable them (and save compilation time)
- *    in that case, the static assertion is converted to the following runtime assert:
- *      eigen_assert(CONDITION && "MSG")
- *
- *  - currently EIGEN_STATIC_ASSERT can only be used in function scope
- *
- */
-
-#ifndef EIGEN_NO_STATIC_ASSERT
-
-  #if defined(__GXX_EXPERIMENTAL_CXX0X__) || (defined(_MSC_VER) && (_MSC_VER >= 1600))
-
-    // if native static_assert is enabled, let's use it
-    #define EIGEN_STATIC_ASSERT(X,MSG) static_assert(X,#MSG);
-
-  #else // not CXX0X
-
-    namespace Eigen {
-
-    namespace internal {
-
-    template<bool condition>
-    struct static_assertion {};
-
-    template<>
-    struct static_assertion<true>
-    {
-      enum {
-        YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX,
-        YOU_MIXED_VECTORS_OF_DIFFERENT_SIZES,
-        YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES,
-        THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE,
-        THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE,
-        THIS_METHOD_IS_ONLY_FOR_OBJECTS_OF_A_SPECIFIC_SIZE,
-        YOU_MADE_A_PROGRAMMING_MISTAKE,
-        EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT,
-        EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE,
-        YOU_CALLED_A_FIXED_SIZE_METHOD_ON_A_DYNAMIC_SIZE_MATRIX_OR_VECTOR,
-        YOU_CALLED_A_DYNAMIC_SIZE_METHOD_ON_A_FIXED_SIZE_MATRIX_OR_VECTOR,
-        UNALIGNED_LOAD_AND_STORE_OPERATIONS_UNIMPLEMENTED_ON_ALTIVEC,
-        THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES,
-        FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED,
-        NUMERIC_TYPE_MUST_BE_REAL,
-        COEFFICIENT_WRITE_ACCESS_TO_SELFADJOINT_NOT_SUPPORTED,
-        WRITING_TO_TRIANGULAR_PART_WITH_UNIT_DIAGONAL_IS_NOT_SUPPORTED,
-        THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE,
-        INVALID_MATRIX_PRODUCT,
-        INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS,
-        INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION,
-        YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY,
-        THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES,
-        THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES,
-        INVALID_MATRIX_TEMPLATE_PARAMETERS,
-        INVALID_MATRIXBASE_TEMPLATE_PARAMETERS,
-        BOTH_MATRICES_MUST_HAVE_THE_SAME_STORAGE_ORDER,
-        THIS_METHOD_IS_ONLY_FOR_DIAGONAL_MATRIX,
-        THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE,
-        THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_WITH_DIRECT_MEMORY_ACCESS_SUCH_AS_MAP_OR_PLAIN_MATRICES,
-        YOU_ALREADY_SPECIFIED_THIS_STRIDE,
-        INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION,
-        THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD,
-        PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1,
-        THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS,
-        YOU_CANNOT_MIX_ARRAYS_AND_MATRICES,
-        YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION,
-        THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY,
-        YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT,
-        THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS,
-        THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL,
-        THIS_METHOD_IS_ONLY_FOR_ARRAYS_NOT_MATRICES,
-        YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED,
-        YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED,
-        THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE,
-        THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH,
-        OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG
-      };
-    };
-
-    } // end namespace internal
-
-    } // end namespace Eigen
-
-    // Specialized implementation for MSVC to avoid "conditional
-    // expression is constant" warnings.  This implementation doesn't
-    // appear to work under GCC, hence the multiple implementations.
-    #ifdef _MSC_VER
-
-      #define EIGEN_STATIC_ASSERT(CONDITION,MSG) \
-        {Eigen::internal::static_assertion<bool(CONDITION)>::MSG;}
-
-    #else
-
-      #define EIGEN_STATIC_ASSERT(CONDITION,MSG) \
-        if (Eigen::internal::static_assertion<bool(CONDITION)>::MSG) {}
-
-    #endif
-
-  #endif // not CXX0X
-
-#else // EIGEN_NO_STATIC_ASSERT
-
-  #define EIGEN_STATIC_ASSERT(CONDITION,MSG) eigen_assert((CONDITION) && #MSG);
-
-#endif // EIGEN_NO_STATIC_ASSERT
-
-
-// static assertion failing if the type \a TYPE is not a vector type
-#define EIGEN_STATIC_ASSERT_VECTOR_ONLY(TYPE) \
-  EIGEN_STATIC_ASSERT(TYPE::IsVectorAtCompileTime, \
-                      YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX)
-
-// static assertion failing if the type \a TYPE is not fixed-size
-#define EIGEN_STATIC_ASSERT_FIXED_SIZE(TYPE) \
-  EIGEN_STATIC_ASSERT(TYPE::SizeAtCompileTime!=Eigen::Dynamic, \
-                      YOU_CALLED_A_FIXED_SIZE_METHOD_ON_A_DYNAMIC_SIZE_MATRIX_OR_VECTOR)
-
-// static assertion failing if the type \a TYPE is not dynamic-size
-#define EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(TYPE) \
-  EIGEN_STATIC_ASSERT(TYPE::SizeAtCompileTime==Eigen::Dynamic, \
-                      YOU_CALLED_A_DYNAMIC_SIZE_METHOD_ON_A_FIXED_SIZE_MATRIX_OR_VECTOR)
-
-// static assertion failing if the type \a TYPE is not a vector type of the given size
-#define EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(TYPE, SIZE) \
-  EIGEN_STATIC_ASSERT(TYPE::IsVectorAtCompileTime && TYPE::SizeAtCompileTime==SIZE, \
-                      THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE)
-
-// static assertion failing if the type \a TYPE is not a vector type of the given size
-#define EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(TYPE, ROWS, COLS) \
-  EIGEN_STATIC_ASSERT(TYPE::RowsAtCompileTime==ROWS && TYPE::ColsAtCompileTime==COLS, \
-                      THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE)
-
-// static assertion failing if the two vector expression types are not compatible (same fixed-size or dynamic size)
-#define EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(TYPE0,TYPE1) \
-  EIGEN_STATIC_ASSERT( \
-      (int(TYPE0::SizeAtCompileTime)==Eigen::Dynamic \
-    || int(TYPE1::SizeAtCompileTime)==Eigen::Dynamic \
-    || int(TYPE0::SizeAtCompileTime)==int(TYPE1::SizeAtCompileTime)),\
-    YOU_MIXED_VECTORS_OF_DIFFERENT_SIZES)
-
-#define EIGEN_PREDICATE_SAME_MATRIX_SIZE(TYPE0,TYPE1) \
-     ( \
-        (int(TYPE0::SizeAtCompileTime)==0 && int(TYPE1::SizeAtCompileTime)==0) \
-    || (\
-          (int(TYPE0::RowsAtCompileTime)==Eigen::Dynamic \
-        || int(TYPE1::RowsAtCompileTime)==Eigen::Dynamic \
-        || int(TYPE0::RowsAtCompileTime)==int(TYPE1::RowsAtCompileTime)) \
-      &&  (int(TYPE0::ColsAtCompileTime)==Eigen::Dynamic \
-        || int(TYPE1::ColsAtCompileTime)==Eigen::Dynamic \
-        || int(TYPE0::ColsAtCompileTime)==int(TYPE1::ColsAtCompileTime))\
-       ) \
-     )
-
-#ifdef EIGEN2_SUPPORT
-  #define EIGEN_STATIC_ASSERT_NON_INTEGER(TYPE) \
-    eigen_assert(!NumTraits<Scalar>::IsInteger);
-#else
-  #define EIGEN_STATIC_ASSERT_NON_INTEGER(TYPE) \
-    EIGEN_STATIC_ASSERT(!NumTraits<TYPE>::IsInteger, THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES)
-#endif
-
-
-// static assertion failing if it is guaranteed at compile-time that the two matrix expression types have different sizes
-#define EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(TYPE0,TYPE1) \
-  EIGEN_STATIC_ASSERT( \
-     EIGEN_PREDICATE_SAME_MATRIX_SIZE(TYPE0,TYPE1),\
-    YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES)
-
-#define EIGEN_STATIC_ASSERT_SIZE_1x1(TYPE) \
-      EIGEN_STATIC_ASSERT((TYPE::RowsAtCompileTime == 1 || TYPE::RowsAtCompileTime == Dynamic) && \
-                          (TYPE::ColsAtCompileTime == 1 || TYPE::ColsAtCompileTime == Dynamic), \
-                          THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS)
-
-#define EIGEN_STATIC_ASSERT_LVALUE(Derived) \
-      EIGEN_STATIC_ASSERT(internal::is_lvalue<Derived>::value, \
-                          THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY)
-
-#define EIGEN_STATIC_ASSERT_ARRAYXPR(Derived) \
-      EIGEN_STATIC_ASSERT((internal::is_same<typename internal::traits<Derived>::XprKind, ArrayXpr>::value), \
-                          THIS_METHOD_IS_ONLY_FOR_ARRAYS_NOT_MATRICES)
-
-#define EIGEN_STATIC_ASSERT_SAME_XPR_KIND(Derived1, Derived2) \
-      EIGEN_STATIC_ASSERT((internal::is_same<typename internal::traits<Derived1>::XprKind, \
-                                             typename internal::traits<Derived2>::XprKind \
-                                            >::value), \
-                          YOU_CANNOT_MIX_ARRAYS_AND_MATRICES)
-
-
-#endif // EIGEN_STATIC_ASSERT_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/util/XprHelper.h b/resources/3rdparty/eigen/Eigen/src/Core/util/XprHelper.h
deleted file mode 100644
index 3d1290cd2..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Core/util/XprHelper.h
+++ /dev/null
@@ -1,468 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_XPRHELPER_H
-#define EIGEN_XPRHELPER_H
-
-// just a workaround because GCC seems to not really like empty structs
-// FIXME: gcc 4.3 generates bad code when strict-aliasing is enabled
-// so currently we simply disable this optimization for gcc 4.3
-#if (defined __GNUG__) && !((__GNUC__==4) && (__GNUC_MINOR__==3))
-  #define EIGEN_EMPTY_STRUCT_CTOR(X) \
-    EIGEN_STRONG_INLINE X() {} \
-    EIGEN_STRONG_INLINE X(const X& ) {}
-#else
-  #define EIGEN_EMPTY_STRUCT_CTOR(X)
-#endif
-
-namespace Eigen {
-
-typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE DenseIndex;
-
-namespace internal {
-
-//classes inheriting no_assignment_operator don't generate a default operator=.
-class no_assignment_operator
-{
-  private:
-    no_assignment_operator& operator=(const no_assignment_operator&);
-};
-
-/** \internal return the index type with the largest number of bits */
-template<typename I1, typename I2>
-struct promote_index_type
-{
-  typedef typename conditional<(sizeof(I1)<sizeof(I2)), I2, I1>::type type;
-};
-
-/** \internal If the template parameter Value is Dynamic, this class is just a wrapper around a T variable that
-  * can be accessed using value() and setValue().
-  * Otherwise, this class is an empty structure and value() just returns the template parameter Value.
-  */
-template<typename T, int Value> class variable_if_dynamic
-{
-  public:
-    EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamic)
-    explicit variable_if_dynamic(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); assert(v == T(Value)); }
-    static T value() { return T(Value); }
-    void setValue(T) {}
-};
-
-template<typename T> class variable_if_dynamic<T, Dynamic>
-{
-    T m_value;
-    variable_if_dynamic() { assert(false); }
-  public:
-    explicit variable_if_dynamic(T value) : m_value(value) {}
-    T value() const { return m_value; }
-    void setValue(T value) { m_value = value; }
-};
-
-/** \internal like variable_if_dynamic but for DynamicIndex
-  */
-template<typename T, int Value> class variable_if_dynamicindex
-{
-  public:
-    EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamicindex)
-    explicit variable_if_dynamicindex(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); assert(v == T(Value)); }
-    static T value() { return T(Value); }
-    void setValue(T) {}
-};
-
-template<typename T> class variable_if_dynamicindex<T, DynamicIndex>
-{
-    T m_value;
-    variable_if_dynamicindex() { assert(false); }
-  public:
-    explicit variable_if_dynamicindex(T value) : m_value(value) {}
-    T value() const { return m_value; }
-    void setValue(T value) { m_value = value; }
-};
-
-template<typename T> struct functor_traits
-{
-  enum
-  {
-    Cost = 10,
-    PacketAccess = false
-  };
-};
-
-template<typename T> struct packet_traits;
-
-template<typename T> struct unpacket_traits
-{
-  typedef T type;
-  enum {size=1};
-};
-
-template<typename _Scalar, int _Rows, int _Cols,
-         int _Options = AutoAlign |
-                          ( (_Rows==1 && _Cols!=1) ? RowMajor
-                          : (_Cols==1 && _Rows!=1) ? ColMajor
-                          : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
-         int _MaxRows = _Rows,
-         int _MaxCols = _Cols
-> class make_proper_matrix_type
-{
-    enum {
-      IsColVector = _Cols==1 && _Rows!=1,
-      IsRowVector = _Rows==1 && _Cols!=1,
-      Options = IsColVector ? (_Options | ColMajor) & ~RowMajor
-              : IsRowVector ? (_Options | RowMajor) & ~ColMajor
-              : _Options
-    };
-  public:
-    typedef Matrix<_Scalar, _Rows, _Cols, Options, _MaxRows, _MaxCols> type;
-};
-
-template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
-class compute_matrix_flags
-{
-    enum {
-      row_major_bit = Options&RowMajor ? RowMajorBit : 0,
-      is_dynamic_size_storage = MaxRows==Dynamic || MaxCols==Dynamic,
-
-      aligned_bit =
-      (
-            ((Options&DontAlign)==0)
-        && (
-#if EIGEN_ALIGN_STATICALLY
-             ((!is_dynamic_size_storage) && (((MaxCols*MaxRows*int(sizeof(Scalar))) % 16) == 0))
-#else
-             0
-#endif
-
-          ||
-
-#if EIGEN_ALIGN
-             is_dynamic_size_storage
-#else
-             0
-#endif
-
-          )
-      ) ? AlignedBit : 0,
-      packet_access_bit = packet_traits<Scalar>::Vectorizable && aligned_bit ? PacketAccessBit : 0
-    };
-
-  public:
-    enum { ret = LinearAccessBit | LvalueBit | DirectAccessBit | NestByRefBit | packet_access_bit | row_major_bit | aligned_bit };
-};
-
-template<int _Rows, int _Cols> struct size_at_compile_time
-{
-  enum { ret = (_Rows==Dynamic || _Cols==Dynamic) ? Dynamic : _Rows * _Cols };
-};
-
-/* plain_matrix_type : the difference from eval is that plain_matrix_type is always a plain matrix type,
- * whereas eval is a const reference in the case of a matrix
- */
-
-template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct plain_matrix_type;
-template<typename T, typename BaseClassType> struct plain_matrix_type_dense;
-template<typename T> struct plain_matrix_type<T,Dense>
-{
-  typedef typename plain_matrix_type_dense<T,typename traits<T>::XprKind>::type type;
-};
-
-template<typename T> struct plain_matrix_type_dense<T,MatrixXpr>
-{
-  typedef Matrix<typename traits<T>::Scalar,
-                traits<T>::RowsAtCompileTime,
-                traits<T>::ColsAtCompileTime,
-                AutoAlign | (traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),
-                traits<T>::MaxRowsAtCompileTime,
-                traits<T>::MaxColsAtCompileTime
-          > type;
-};
-
-template<typename T> struct plain_matrix_type_dense<T,ArrayXpr>
-{
-  typedef Array<typename traits<T>::Scalar,
-                traits<T>::RowsAtCompileTime,
-                traits<T>::ColsAtCompileTime,
-                AutoAlign | (traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),
-                traits<T>::MaxRowsAtCompileTime,
-                traits<T>::MaxColsAtCompileTime
-          > type;
-};
-
-/* eval : the return type of eval(). For matrices, this is just a const reference
- * in order to avoid a useless copy
- */
-
-template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct eval;
-
-template<typename T> struct eval<T,Dense>
-{
-  typedef typename plain_matrix_type<T>::type type;
-//   typedef typename T::PlainObject type;
-//   typedef T::Matrix<typename traits<T>::Scalar,
-//                 traits<T>::RowsAtCompileTime,
-//                 traits<T>::ColsAtCompileTime,
-//                 AutoAlign | (traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),
-//                 traits<T>::MaxRowsAtCompileTime,
-//                 traits<T>::MaxColsAtCompileTime
-//           > type;
-};
-
-// for matrices, no need to evaluate, just use a const reference to avoid a useless copy
-template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
-struct eval<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>
-{
-  typedef const Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;
-};
-
-template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
-struct eval<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>
-{
-  typedef const Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;
-};
-
-
-
-/* plain_matrix_type_column_major : same as plain_matrix_type but guaranteed to be column-major
- */
-template<typename T> struct plain_matrix_type_column_major
-{
-  enum { Rows = traits<T>::RowsAtCompileTime,
-         Cols = traits<T>::ColsAtCompileTime,
-         MaxRows = traits<T>::MaxRowsAtCompileTime,
-         MaxCols = traits<T>::MaxColsAtCompileTime
-  };
-  typedef Matrix<typename traits<T>::Scalar,
-                Rows,
-                Cols,
-                (MaxRows==1&&MaxCols!=1) ? RowMajor : ColMajor,
-                MaxRows,
-                MaxCols
-          > type;
-};
-
-/* plain_matrix_type_row_major : same as plain_matrix_type but guaranteed to be row-major
- */
-template<typename T> struct plain_matrix_type_row_major
-{
-  enum { Rows = traits<T>::RowsAtCompileTime,
-         Cols = traits<T>::ColsAtCompileTime,
-         MaxRows = traits<T>::MaxRowsAtCompileTime,
-         MaxCols = traits<T>::MaxColsAtCompileTime
-  };
-  typedef Matrix<typename traits<T>::Scalar,
-                Rows,
-                Cols,
-                (MaxCols==1&&MaxRows!=1) ? RowMajor : ColMajor,
-                MaxRows,
-                MaxCols
-          > type;
-};
-
-// we should be able to get rid of this one too
-template<typename T> struct must_nest_by_value { enum { ret = false }; };
-
-/** \internal The reference selector for template expressions. The idea is that we don't
-  * need to use references for expressions since they are light weight proxy
-  * objects which should generate no copying overhead. */
-template <typename T>
-struct ref_selector
-{
-  typedef typename conditional<
-    bool(traits<T>::Flags & NestByRefBit),
-    T const&,
-    const T
-  >::type type;
-};
-
-/** \internal Adds the const qualifier on the value-type of T2 if and only if T1 is a const type */
-template<typename T1, typename T2>
-struct transfer_constness
-{
-  typedef typename conditional<
-    bool(internal::is_const<T1>::value),
-    typename internal::add_const_on_value_type<T2>::type,
-    T2
-  >::type type;
-};
-
-/** \internal Determines how a given expression should be nested into another one.
-  * For example, when you do a * (b+c), Eigen will determine how the expression b+c should be
-  * nested into the bigger product expression. The choice is between nesting the expression b+c as-is, or
-  * evaluating that expression b+c into a temporary variable d, and nest d so that the resulting expression is
-  * a*d. Evaluating can be beneficial for example if every coefficient access in the resulting expression causes
-  * many coefficient accesses in the nested expressions -- as is the case with matrix product for example.
-  *
-  * \param T the type of the expression being nested
-  * \param n the number of coefficient accesses in the nested expression for each coefficient access in the bigger expression.
-  *
-  * Note that if no evaluation occur, then the constness of T is preserved.
-  *
-  * Example. Suppose that a, b, and c are of type Matrix3d. The user forms the expression a*(b+c).
-  * b+c is an expression "sum of matrices", which we will denote by S. In order to determine how to nest it,
-  * the Product expression uses: nested<S, 3>::ret, which turns out to be Matrix3d because the internal logic of
-  * nested determined that in this case it was better to evaluate the expression b+c into a temporary. On the other hand,
-  * since a is of type Matrix3d, the Product expression nests it as nested<Matrix3d, 3>::ret, which turns out to be
-  * const Matrix3d&, because the internal logic of nested determined that since a was already a matrix, there was no point
-  * in copying it into another matrix.
-  */
-template<typename T, int n=1, typename PlainObject = typename eval<T>::type> struct nested
-{
-  enum {
-    // for the purpose of this test, to keep it reasonably simple, we arbitrarily choose a value of Dynamic values.
-    // the choice of 10000 makes it larger than any practical fixed value and even most dynamic values.
-    // in extreme cases where these assumptions would be wrong, we would still at worst suffer performance issues
-    // (poor choice of temporaries).
-    // it's important that this value can still be squared without integer overflowing.
-    DynamicAsInteger = 10000,
-    ScalarReadCost = NumTraits<typename traits<T>::Scalar>::ReadCost,
-    ScalarReadCostAsInteger = ScalarReadCost == Dynamic ? int(DynamicAsInteger) : int(ScalarReadCost),
-    CoeffReadCost = traits<T>::CoeffReadCost,
-    CoeffReadCostAsInteger = CoeffReadCost == Dynamic ? int(DynamicAsInteger) : int(CoeffReadCost),
-    NAsInteger = n == Dynamic ? int(DynamicAsInteger) : n,
-    CostEvalAsInteger   = (NAsInteger+1) * ScalarReadCostAsInteger + CoeffReadCostAsInteger,
-    CostNoEvalAsInteger = NAsInteger * CoeffReadCostAsInteger
-  };
-
-  typedef typename conditional<
-      ( (int(traits<T>::Flags) & EvalBeforeNestingBit) ||
-        int(CostEvalAsInteger) < int(CostNoEvalAsInteger)
-      ),
-      PlainObject,
-      typename ref_selector<T>::type
-  >::type type;
-};
-
-template<typename T>
-T* const_cast_ptr(const T* ptr)
-{
-  return const_cast<T*>(ptr);
-}
-
-template<typename Derived, typename XprKind = typename traits<Derived>::XprKind>
-struct dense_xpr_base
-{
-  /* dense_xpr_base should only ever be used on dense expressions, thus falling either into the MatrixXpr or into the ArrayXpr cases */
-};
-
-template<typename Derived>
-struct dense_xpr_base<Derived, MatrixXpr>
-{
-  typedef MatrixBase<Derived> type;
-};
-
-template<typename Derived>
-struct dense_xpr_base<Derived, ArrayXpr>
-{
-  typedef ArrayBase<Derived> type;
-};
-
-/** \internal Helper base class to add a scalar multiple operator
-  * overloads for complex types */
-template<typename Derived,typename Scalar,typename OtherScalar,
-         bool EnableIt = !is_same<Scalar,OtherScalar>::value >
-struct special_scalar_op_base : public DenseCoeffsBase<Derived>
-{
-  // dummy operator* so that the
-  // "using special_scalar_op_base::operator*" compiles
-  void operator*() const;
-};
-
-template<typename Derived,typename Scalar,typename OtherScalar>
-struct special_scalar_op_base<Derived,Scalar,OtherScalar,true>  : public DenseCoeffsBase<Derived>
-{
-  const CwiseUnaryOp<scalar_multiple2_op<Scalar,OtherScalar>, Derived>
-  operator*(const OtherScalar& scalar) const
-  {
-    return CwiseUnaryOp<scalar_multiple2_op<Scalar,OtherScalar>, Derived>
-      (*static_cast<const Derived*>(this), scalar_multiple2_op<Scalar,OtherScalar>(scalar));
-  }
-
-  inline friend const CwiseUnaryOp<scalar_multiple2_op<Scalar,OtherScalar>, Derived>
-  operator*(const OtherScalar& scalar, const Derived& matrix)
-  { return static_cast<const special_scalar_op_base&>(matrix).operator*(scalar); }
-};
-
-template<typename XprType, typename CastType> struct cast_return_type
-{
-  typedef typename XprType::Scalar CurrentScalarType;
-  typedef typename remove_all<CastType>::type _CastType;
-  typedef typename _CastType::Scalar NewScalarType;
-  typedef typename conditional<is_same<CurrentScalarType,NewScalarType>::value,
-                              const XprType&,CastType>::type type;
-};
-
-template <typename A, typename B> struct promote_storage_type;
-
-template <typename A> struct promote_storage_type<A,A>
-{
-  typedef A ret;
-};
-
-/** \internal gives the plain matrix or array type to store a row/column/diagonal of a matrix type.
-  * \param Scalar optional parameter allowing to pass a different scalar type than the one of the MatrixType.
-  */
-template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
-struct plain_row_type
-{
-  typedef Matrix<Scalar, 1, ExpressionType::ColsAtCompileTime,
-                 ExpressionType::PlainObject::Options | RowMajor, 1, ExpressionType::MaxColsAtCompileTime> MatrixRowType;
-  typedef Array<Scalar, 1, ExpressionType::ColsAtCompileTime,
-                 ExpressionType::PlainObject::Options | RowMajor, 1, ExpressionType::MaxColsAtCompileTime> ArrayRowType;
-
-  typedef typename conditional<
-    is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
-    MatrixRowType,
-    ArrayRowType 
-  >::type type;
-};
-
-template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
-struct plain_col_type
-{
-  typedef Matrix<Scalar, ExpressionType::RowsAtCompileTime, 1,
-                 ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> MatrixColType;
-  typedef Array<Scalar, ExpressionType::RowsAtCompileTime, 1,
-                 ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> ArrayColType;
-
-  typedef typename conditional<
-    is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
-    MatrixColType,
-    ArrayColType 
-  >::type type;
-};
-
-template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
-struct plain_diag_type
-{
-  enum { diag_size = EIGEN_SIZE_MIN_PREFER_DYNAMIC(ExpressionType::RowsAtCompileTime, ExpressionType::ColsAtCompileTime),
-         max_diag_size = EIGEN_SIZE_MIN_PREFER_FIXED(ExpressionType::MaxRowsAtCompileTime, ExpressionType::MaxColsAtCompileTime)
-  };
-  typedef Matrix<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> MatrixDiagType;
-  typedef Array<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> ArrayDiagType;
-
-  typedef typename conditional<
-    is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
-    MatrixDiagType,
-    ArrayDiagType 
-  >::type type;
-};
-
-template<typename ExpressionType>
-struct is_lvalue
-{
-  enum { value = !bool(is_const<ExpressionType>::value) &&
-                 bool(traits<ExpressionType>::Flags & LvalueBit) };
-};
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_XPRHELPER_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/AlignedBox.h b/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/AlignedBox.h
deleted file mode 100644
index 7b2b865eb..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/AlignedBox.h
+++ /dev/null
@@ -1,159 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
-
-namespace Eigen { 
-
-/** \geometry_module \ingroup Geometry_Module
-  * \nonstableyet
-  *
-  * \class AlignedBox
-  *
-  * \brief An axis aligned box
-  *
-  * \param _Scalar the type of the scalar coefficients
-  * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
-  *
-  * This class represents an axis aligned box as a pair of the minimal and maximal corners.
-  */
-template <typename _Scalar, int _AmbientDim>
-class AlignedBox
-{
-public:
-EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1)
-  enum { AmbientDimAtCompileTime = _AmbientDim };
-  typedef _Scalar Scalar;
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
-
-  /** Default constructor initializing a null box. */
-  inline explicit AlignedBox()
-  { if (AmbientDimAtCompileTime!=Dynamic) setNull(); }
-
-  /** Constructs a null box with \a _dim the dimension of the ambient space. */
-  inline explicit AlignedBox(int _dim) : m_min(_dim), m_max(_dim)
-  { setNull(); }
-
-  /** Constructs a box with extremities \a _min and \a _max. */
-  inline AlignedBox(const VectorType& _min, const VectorType& _max) : m_min(_min), m_max(_max) {}
-
-  /** Constructs a box containing a single point \a p. */
-  inline explicit AlignedBox(const VectorType& p) : m_min(p), m_max(p) {}
-
-  ~AlignedBox() {}
-
-  /** \returns the dimension in which the box holds */
-  inline int dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : AmbientDimAtCompileTime; }
-
-  /** \returns true if the box is null, i.e, empty. */
-  inline bool isNull() const { return (m_min.cwise() > m_max).any(); }
-
-  /** Makes \c *this a null/empty box. */
-  inline void setNull()
-  {
-    m_min.setConstant( (std::numeric_limits<Scalar>::max)());
-    m_max.setConstant(-(std::numeric_limits<Scalar>::max)());
-  }
-
-  /** \returns the minimal corner */
-  inline const VectorType& (min)() const { return m_min; }
-  /** \returns a non const reference to the minimal corner */
-  inline VectorType& (min)() { return m_min; }
-  /** \returns the maximal corner */
-  inline const VectorType& (max)() const { return m_max; }
-  /** \returns a non const reference to the maximal corner */
-  inline VectorType& (max)() { return m_max; }
-
-  /** \returns true if the point \a p is inside the box \c *this. */
-  inline bool contains(const VectorType& p) const
-  { return (m_min.cwise()<=p).all() && (p.cwise()<=m_max).all(); }
-
-  /** \returns true if the box \a b is entirely inside the box \c *this. */
-  inline bool contains(const AlignedBox& b) const
-  { return (m_min.cwise()<=(b.min)()).all() && ((b.max)().cwise()<=m_max).all(); }
-
-  /** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */
-  inline AlignedBox& extend(const VectorType& p)
-  { m_min = (m_min.cwise().min)(p); m_max = (m_max.cwise().max)(p); return *this; }
-
-  /** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */
-  inline AlignedBox& extend(const AlignedBox& b)
-  { m_min = (m_min.cwise().min)(b.m_min); m_max = (m_max.cwise().max)(b.m_max); return *this; }
-
-  /** Clamps \c *this by the box \a b and returns a reference to \c *this. */
-  inline AlignedBox& clamp(const AlignedBox& b)
-  { m_min = (m_min.cwise().max)(b.m_min); m_max = (m_max.cwise().min)(b.m_max); return *this; }
-
-  /** Translate \c *this by the vector \a t and returns a reference to \c *this. */
-  inline AlignedBox& translate(const VectorType& t)
-  { m_min += t; m_max += t; return *this; }
-
-  /** \returns the squared distance between the point \a p and the box \c *this,
-    * and zero if \a p is inside the box.
-    * \sa exteriorDistance()
-    */
-  inline Scalar squaredExteriorDistance(const VectorType& p) const;
-
-  /** \returns the distance between the point \a p and the box \c *this,
-    * and zero if \a p is inside the box.
-    * \sa squaredExteriorDistance()
-    */
-  inline Scalar exteriorDistance(const VectorType& p) const
-  { return ei_sqrt(squaredExteriorDistance(p)); }
-
-  /** \returns \c *this with scalar type casted to \a NewScalarType
-    *
-    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
-    * then this function smartly returns a const reference to \c *this.
-    */
-  template<typename NewScalarType>
-  inline typename internal::cast_return_type<AlignedBox,
-           AlignedBox<NewScalarType,AmbientDimAtCompileTime> >::type cast() const
-  {
-    return typename internal::cast_return_type<AlignedBox,
-                    AlignedBox<NewScalarType,AmbientDimAtCompileTime> >::type(*this);
-  }
-
-  /** Copy constructor with scalar type conversion */
-  template<typename OtherScalarType>
-  inline explicit AlignedBox(const AlignedBox<OtherScalarType,AmbientDimAtCompileTime>& other)
-  {
-    m_min = (other.min)().template cast<Scalar>();
-    m_max = (other.max)().template cast<Scalar>();
-  }
-
-  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
-    * determined by \a prec.
-    *
-    * \sa MatrixBase::isApprox() */
-  bool isApprox(const AlignedBox& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
-  { return m_min.isApprox(other.m_min, prec) && m_max.isApprox(other.m_max, prec); }
-
-protected:
-
-  VectorType m_min, m_max;
-};
-
-template<typename Scalar,int AmbiantDim>
-inline Scalar AlignedBox<Scalar,AmbiantDim>::squaredExteriorDistance(const VectorType& p) const
-{
-  Scalar dist2(0);
-  Scalar aux;
-  for (int k=0; k<dim(); ++k)
-  {
-    if ((aux = (p[k]-m_min[k]))<Scalar(0))
-      dist2 += aux*aux;
-    else if ( (aux = (m_max[k]-p[k]))<Scalar(0))
-      dist2 += aux*aux;
-  }
-  return dist2;
-}
-
-} // end namespace Eigen
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/AngleAxis.h b/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/AngleAxis.h
deleted file mode 100644
index af598a403..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/AngleAxis.h
+++ /dev/null
@@ -1,214 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
-
-namespace Eigen { 
-
-/** \geometry_module \ingroup Geometry_Module
-  *
-  * \class AngleAxis
-  *
-  * \brief Represents a 3D rotation as a rotation angle around an arbitrary 3D axis
-  *
-  * \param _Scalar the scalar type, i.e., the type of the coefficients.
-  *
-  * The following two typedefs are provided for convenience:
-  * \li \c AngleAxisf for \c float
-  * \li \c AngleAxisd for \c double
-  *
-  * \addexample AngleAxisForEuler \label How to define a rotation from Euler-angles
-  *
-  * Combined with MatrixBase::Unit{X,Y,Z}, AngleAxis can be used to easily
-  * mimic Euler-angles. Here is an example:
-  * \include AngleAxis_mimic_euler.cpp
-  * Output: \verbinclude AngleAxis_mimic_euler.out
-  *
-  * \note This class is not aimed to be used to store a rotation transformation,
-  * but rather to make easier the creation of other rotation (Quaternion, rotation Matrix)
-  * and transformation objects.
-  *
-  * \sa class Quaternion, class Transform, MatrixBase::UnitX()
-  */
-
-template<typename _Scalar> struct ei_traits<AngleAxis<_Scalar> >
-{
-  typedef _Scalar Scalar;
-};
-
-template<typename _Scalar>
-class AngleAxis : public RotationBase<AngleAxis<_Scalar>,3>
-{
-  typedef RotationBase<AngleAxis<_Scalar>,3> Base;
-
-public:
-
-  using Base::operator*;
-
-  enum { Dim = 3 };
-  /** the scalar type of the coefficients */
-  typedef _Scalar Scalar;
-  typedef Matrix<Scalar,3,3> Matrix3;
-  typedef Matrix<Scalar,3,1> Vector3;
-  typedef Quaternion<Scalar> QuaternionType;
-
-protected:
-
-  Vector3 m_axis;
-  Scalar m_angle;
-
-public:
-
-  /** Default constructor without initialization. */
-  AngleAxis() {}
-  /** Constructs and initialize the angle-axis rotation from an \a angle in radian
-    * and an \a axis which must be normalized. */
-  template<typename Derived>
-  inline AngleAxis(Scalar angle, const MatrixBase<Derived>& axis) : m_axis(axis), m_angle(angle) {}
-  /** Constructs and initialize the angle-axis rotation from a quaternion \a q. */
-  inline AngleAxis(const QuaternionType& q) { *this = q; }
-  /** Constructs and initialize the angle-axis rotation from a 3x3 rotation matrix. */
-  template<typename Derived>
-  inline explicit AngleAxis(const MatrixBase<Derived>& m) { *this = m; }
-
-  Scalar angle() const { return m_angle; }
-  Scalar& angle() { return m_angle; }
-
-  const Vector3& axis() const { return m_axis; }
-  Vector3& axis() { return m_axis; }
-
-  /** Concatenates two rotations */
-  inline QuaternionType operator* (const AngleAxis& other) const
-  { return QuaternionType(*this) * QuaternionType(other); }
-
-  /** Concatenates two rotations */
-  inline QuaternionType operator* (const QuaternionType& other) const
-  { return QuaternionType(*this) * other; }
-
-  /** Concatenates two rotations */
-  friend inline QuaternionType operator* (const QuaternionType& a, const AngleAxis& b)
-  { return a * QuaternionType(b); }
-
-  /** Concatenates two rotations */
-  inline Matrix3 operator* (const Matrix3& other) const
-  { return toRotationMatrix() * other; }
-
-  /** Concatenates two rotations */
-  inline friend Matrix3 operator* (const Matrix3& a, const AngleAxis& b)
-  { return a * b.toRotationMatrix(); }
-
-  /** Applies rotation to vector */
-  inline Vector3 operator* (const Vector3& other) const
-  { return toRotationMatrix() * other; }
-
-  /** \returns the inverse rotation, i.e., an angle-axis with opposite rotation angle */
-  AngleAxis inverse() const
-  { return AngleAxis(-m_angle, m_axis); }
-
-  AngleAxis& operator=(const QuaternionType& q);
-  template<typename Derived>
-  AngleAxis& operator=(const MatrixBase<Derived>& m);
-
-  template<typename Derived>
-  AngleAxis& fromRotationMatrix(const MatrixBase<Derived>& m);
-  Matrix3 toRotationMatrix(void) const;
-
-  /** \returns \c *this with scalar type casted to \a NewScalarType
-    *
-    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
-    * then this function smartly returns a const reference to \c *this.
-    */
-  template<typename NewScalarType>
-  inline typename internal::cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type cast() const
-  { return typename internal::cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type(*this); }
-
-  /** Copy constructor with scalar type conversion */
-  template<typename OtherScalarType>
-  inline explicit AngleAxis(const AngleAxis<OtherScalarType>& other)
-  {
-    m_axis = other.axis().template cast<Scalar>();
-    m_angle = Scalar(other.angle());
-  }
-
-  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
-    * determined by \a prec.
-    *
-    * \sa MatrixBase::isApprox() */
-  bool isApprox(const AngleAxis& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
-  { return m_axis.isApprox(other.m_axis, prec) && ei_isApprox(m_angle,other.m_angle, prec); }
-};
-
-/** \ingroup Geometry_Module
-  * single precision angle-axis type */
-typedef AngleAxis<float> AngleAxisf;
-/** \ingroup Geometry_Module
-  * double precision angle-axis type */
-typedef AngleAxis<double> AngleAxisd;
-
-/** Set \c *this from a quaternion.
-  * The axis is normalized.
-  */
-template<typename Scalar>
-AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionType& q)
-{
-  Scalar n2 = q.vec().squaredNorm();
-  if (n2 < precision<Scalar>()*precision<Scalar>())
-  {
-    m_angle = 0;
-    m_axis << 1, 0, 0;
-  }
-  else
-  {
-    m_angle = 2*std::acos(q.w());
-    m_axis = q.vec() / ei_sqrt(n2);
-  }
-  return *this;
-}
-
-/** Set \c *this from a 3x3 rotation matrix \a mat.
-  */
-template<typename Scalar>
-template<typename Derived>
-AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const MatrixBase<Derived>& mat)
-{
-  // Since a direct conversion would not be really faster,
-  // let's use the robust Quaternion implementation:
-  return *this = QuaternionType(mat);
-}
-
-/** Constructs and \returns an equivalent 3x3 rotation matrix.
-  */
-template<typename Scalar>
-typename AngleAxis<Scalar>::Matrix3
-AngleAxis<Scalar>::toRotationMatrix(void) const
-{
-  Matrix3 res;
-  Vector3 sin_axis  = ei_sin(m_angle) * m_axis;
-  Scalar c = ei_cos(m_angle);
-  Vector3 cos1_axis = (Scalar(1)-c) * m_axis;
-
-  Scalar tmp;
-  tmp = cos1_axis.x() * m_axis.y();
-  res.coeffRef(0,1) = tmp - sin_axis.z();
-  res.coeffRef(1,0) = tmp + sin_axis.z();
-
-  tmp = cos1_axis.x() * m_axis.z();
-  res.coeffRef(0,2) = tmp + sin_axis.y();
-  res.coeffRef(2,0) = tmp - sin_axis.y();
-
-  tmp = cos1_axis.y() * m_axis.z();
-  res.coeffRef(1,2) = tmp - sin_axis.x();
-  res.coeffRef(2,1) = tmp + sin_axis.x();
-
-  res.diagonal() = (cos1_axis.cwise() * m_axis).cwise() + c;
-
-  return res;
-}
-
-} // end namespace Eigen
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Hyperplane.h b/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Hyperplane.h
deleted file mode 100644
index 49e37392d..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Hyperplane.h
+++ /dev/null
@@ -1,254 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
-
-namespace Eigen { 
-
-/** \geometry_module \ingroup Geometry_Module
-  *
-  * \class Hyperplane
-  *
-  * \brief A hyperplane
-  *
-  * A hyperplane is an affine subspace of dimension n-1 in a space of dimension n.
-  * For example, a hyperplane in a plane is a line; a hyperplane in 3-space is a plane.
-  *
-  * \param _Scalar the scalar type, i.e., the type of the coefficients
-  * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
-  *             Notice that the dimension of the hyperplane is _AmbientDim-1.
-  *
-  * This class represents an hyperplane as the zero set of the implicit equation
-  * \f$ n \cdot x + d = 0 \f$ where \f$ n \f$ is a unit normal vector of the plane (linear part)
-  * and \f$ d \f$ is the distance (offset) to the origin.
-  */
-template <typename _Scalar, int _AmbientDim>
-class Hyperplane
-{
-public:
-  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1)
-  enum { AmbientDimAtCompileTime = _AmbientDim };
-  typedef _Scalar Scalar;
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
-  typedef Matrix<Scalar,int(AmbientDimAtCompileTime)==Dynamic
-                        ? Dynamic
-                        : int(AmbientDimAtCompileTime)+1,1> Coefficients;
-  typedef Block<Coefficients,AmbientDimAtCompileTime,1> NormalReturnType;
-
-  /** Default constructor without initialization */
-  inline explicit Hyperplane() {}
-
-  /** Constructs a dynamic-size hyperplane with \a _dim the dimension
-    * of the ambient space */
-  inline explicit Hyperplane(int _dim) : m_coeffs(_dim+1) {}
-
-  /** Construct a plane from its normal \a n and a point \a e onto the plane.
-    * \warning the vector normal is assumed to be normalized.
-    */
-  inline Hyperplane(const VectorType& n, const VectorType& e)
-    : m_coeffs(n.size()+1)
-  {
-    normal() = n;
-    offset() = -e.eigen2_dot(n);
-  }
-
-  /** Constructs a plane from its normal \a n and distance to the origin \a d
-    * such that the algebraic equation of the plane is \f$ n \cdot x + d = 0 \f$.
-    * \warning the vector normal is assumed to be normalized.
-    */
-  inline Hyperplane(const VectorType& n, Scalar d)
-    : m_coeffs(n.size()+1)
-  {
-    normal() = n;
-    offset() = d;
-  }
-
-  /** Constructs a hyperplane passing through the two points. If the dimension of the ambient space
-    * is greater than 2, then there isn't uniqueness, so an arbitrary choice is made.
-    */
-  static inline Hyperplane Through(const VectorType& p0, const VectorType& p1)
-  {
-    Hyperplane result(p0.size());
-    result.normal() = (p1 - p0).unitOrthogonal();
-    result.offset() = -result.normal().eigen2_dot(p0);
-    return result;
-  }
-
-  /** Constructs a hyperplane passing through the three points. The dimension of the ambient space
-    * is required to be exactly 3.
-    */
-  static inline Hyperplane Through(const VectorType& p0, const VectorType& p1, const VectorType& p2)
-  {
-    EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 3)
-    Hyperplane result(p0.size());
-    result.normal() = (p2 - p0).cross(p1 - p0).normalized();
-    result.offset() = -result.normal().eigen2_dot(p0);
-    return result;
-  }
-
-  /** Constructs a hyperplane passing through the parametrized line \a parametrized.
-    * If the dimension of the ambient space is greater than 2, then there isn't uniqueness,
-    * so an arbitrary choice is made.
-    */
-  // FIXME to be consitent with the rest this could be implemented as a static Through function ??
-  explicit Hyperplane(const ParametrizedLine<Scalar, AmbientDimAtCompileTime>& parametrized)
-  {
-    normal() = parametrized.direction().unitOrthogonal();
-    offset() = -normal().eigen2_dot(parametrized.origin());
-  }
-
-  ~Hyperplane() {}
-
-  /** \returns the dimension in which the plane holds */
-  inline int dim() const { return int(AmbientDimAtCompileTime)==Dynamic ? m_coeffs.size()-1 : int(AmbientDimAtCompileTime); }
-
-  /** normalizes \c *this */
-  void normalize(void)
-  {
-    m_coeffs /= normal().norm();
-  }
-
-  /** \returns the signed distance between the plane \c *this and a point \a p.
-    * \sa absDistance()
-    */
-  inline Scalar signedDistance(const VectorType& p) const { return p.eigen2_dot(normal()) + offset(); }
-
-  /** \returns the absolute distance between the plane \c *this and a point \a p.
-    * \sa signedDistance()
-    */
-  inline Scalar absDistance(const VectorType& p) const { return ei_abs(signedDistance(p)); }
-
-  /** \returns the projection of a point \a p onto the plane \c *this.
-    */
-  inline VectorType projection(const VectorType& p) const { return p - signedDistance(p) * normal(); }
-
-  /** \returns a constant reference to the unit normal vector of the plane, which corresponds
-    * to the linear part of the implicit equation.
-    */
-  inline const NormalReturnType normal() const { return NormalReturnType(*const_cast<Coefficients*>(&m_coeffs),0,0,dim(),1); }
-
-  /** \returns a non-constant reference to the unit normal vector of the plane, which corresponds
-    * to the linear part of the implicit equation.
-    */
-  inline NormalReturnType normal() { return NormalReturnType(m_coeffs,0,0,dim(),1); }
-
-  /** \returns the distance to the origin, which is also the "constant term" of the implicit equation
-    * \warning the vector normal is assumed to be normalized.
-    */
-  inline const Scalar& offset() const { return m_coeffs.coeff(dim()); }
-
-  /** \returns a non-constant reference to the distance to the origin, which is also the constant part
-    * of the implicit equation */
-  inline Scalar& offset() { return m_coeffs(dim()); }
-
-  /** \returns a constant reference to the coefficients c_i of the plane equation:
-    * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$
-    */
-  inline const Coefficients& coeffs() const { return m_coeffs; }
-
-  /** \returns a non-constant reference to the coefficients c_i of the plane equation:
-    * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$
-    */
-  inline Coefficients& coeffs() { return m_coeffs; }
-
-  /** \returns the intersection of *this with \a other.
-    *
-    * \warning The ambient space must be a plane, i.e. have dimension 2, so that \c *this and \a other are lines.
-    *
-    * \note If \a other is approximately parallel to *this, this method will return any point on *this.
-    */
-  VectorType intersection(const Hyperplane& other)
-  {
-    EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2)
-    Scalar det = coeffs().coeff(0) * other.coeffs().coeff(1) - coeffs().coeff(1) * other.coeffs().coeff(0);
-    // since the line equations ax+by=c are normalized with a^2+b^2=1, the following tests
-    // whether the two lines are approximately parallel.
-    if(ei_isMuchSmallerThan(det, Scalar(1)))
-    {   // special case where the two lines are approximately parallel. Pick any point on the first line.
-        if(ei_abs(coeffs().coeff(1))>ei_abs(coeffs().coeff(0)))
-            return VectorType(coeffs().coeff(1), -coeffs().coeff(2)/coeffs().coeff(1)-coeffs().coeff(0));
-        else
-            return VectorType(-coeffs().coeff(2)/coeffs().coeff(0)-coeffs().coeff(1), coeffs().coeff(0));
-    }
-    else
-    {   // general case
-        Scalar invdet = Scalar(1) / det;
-        return VectorType(invdet*(coeffs().coeff(1)*other.coeffs().coeff(2)-other.coeffs().coeff(1)*coeffs().coeff(2)),
-                          invdet*(other.coeffs().coeff(0)*coeffs().coeff(2)-coeffs().coeff(0)*other.coeffs().coeff(2)));
-    }
-  }
-
-  /** Applies the transformation matrix \a mat to \c *this and returns a reference to \c *this.
-    *
-    * \param mat the Dim x Dim transformation matrix
-    * \param traits specifies whether the matrix \a mat represents an Isometry
-    *               or a more generic Affine transformation. The default is Affine.
-    */
-  template<typename XprType>
-  inline Hyperplane& transform(const MatrixBase<XprType>& mat, TransformTraits traits = Affine)
-  {
-    if (traits==Affine)
-      normal() = mat.inverse().transpose() * normal();
-    else if (traits==Isometry)
-      normal() = mat * normal();
-    else
-    {
-      ei_assert("invalid traits value in Hyperplane::transform()");
-    }
-    return *this;
-  }
-
-  /** Applies the transformation \a t to \c *this and returns a reference to \c *this.
-    *
-    * \param t the transformation of dimension Dim
-    * \param traits specifies whether the transformation \a t represents an Isometry
-    *               or a more generic Affine transformation. The default is Affine.
-    *               Other kind of transformations are not supported.
-    */
-  inline Hyperplane& transform(const Transform<Scalar,AmbientDimAtCompileTime>& t,
-                                TransformTraits traits = Affine)
-  {
-    transform(t.linear(), traits);
-    offset() -= t.translation().eigen2_dot(normal());
-    return *this;
-  }
-
-  /** \returns \c *this with scalar type casted to \a NewScalarType
-    *
-    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
-    * then this function smartly returns a const reference to \c *this.
-    */
-  template<typename NewScalarType>
-  inline typename internal::cast_return_type<Hyperplane,
-           Hyperplane<NewScalarType,AmbientDimAtCompileTime> >::type cast() const
-  {
-    return typename internal::cast_return_type<Hyperplane,
-                    Hyperplane<NewScalarType,AmbientDimAtCompileTime> >::type(*this);
-  }
-
-  /** Copy constructor with scalar type conversion */
-  template<typename OtherScalarType>
-  inline explicit Hyperplane(const Hyperplane<OtherScalarType,AmbientDimAtCompileTime>& other)
-  { m_coeffs = other.coeffs().template cast<Scalar>(); }
-
-  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
-    * determined by \a prec.
-    *
-    * \sa MatrixBase::isApprox() */
-  bool isApprox(const Hyperplane& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
-  { return m_coeffs.isApprox(other.m_coeffs, prec); }
-
-protected:
-
-  Coefficients m_coeffs;
-};
-
-} // end namespace Eigen
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h b/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h
deleted file mode 100644
index 3523611ee..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h
+++ /dev/null
@@ -1,141 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
-
-namespace Eigen { 
-
-/** \geometry_module \ingroup Geometry_Module
-  *
-  * \class ParametrizedLine
-  *
-  * \brief A parametrized line
-  *
-  * A parametrized line is defined by an origin point \f$ \mathbf{o} \f$ and a unit
-  * direction vector \f$ \mathbf{d} \f$ such that the line corresponds to
-  * the set \f$ l(t) = \mathbf{o} + t \mathbf{d} \f$, \f$ l \in \mathbf{R} \f$.
-  *
-  * \param _Scalar the scalar type, i.e., the type of the coefficients
-  * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
-  */
-template <typename _Scalar, int _AmbientDim>
-class ParametrizedLine
-{
-public:
-  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
-  enum { AmbientDimAtCompileTime = _AmbientDim };
-  typedef _Scalar Scalar;
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
-
-  /** Default constructor without initialization */
-  inline explicit ParametrizedLine() {}
-
-  /** Constructs a dynamic-size line with \a _dim the dimension
-    * of the ambient space */
-  inline explicit ParametrizedLine(int _dim) : m_origin(_dim), m_direction(_dim) {}
-
-  /** Initializes a parametrized line of direction \a direction and origin \a origin.
-    * \warning the vector direction is assumed to be normalized.
-    */
-  ParametrizedLine(const VectorType& origin, const VectorType& direction)
-    : m_origin(origin), m_direction(direction) {}
-
-  explicit ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim>& hyperplane);
-
-  /** Constructs a parametrized line going from \a p0 to \a p1. */
-  static inline ParametrizedLine Through(const VectorType& p0, const VectorType& p1)
-  { return ParametrizedLine(p0, (p1-p0).normalized()); }
-
-  ~ParametrizedLine() {}
-
-  /** \returns the dimension in which the line holds */
-  inline int dim() const { return m_direction.size(); }
-
-  const VectorType& origin() const { return m_origin; }
-  VectorType& origin() { return m_origin; }
-
-  const VectorType& direction() const { return m_direction; }
-  VectorType& direction() { return m_direction; }
-
-  /** \returns the squared distance of a point \a p to its projection onto the line \c *this.
-    * \sa distance()
-    */
-  RealScalar squaredDistance(const VectorType& p) const
-  {
-    VectorType diff = p-origin();
-    return (diff - diff.eigen2_dot(direction())* direction()).squaredNorm();
-  }
-  /** \returns the distance of a point \a p to its projection onto the line \c *this.
-    * \sa squaredDistance()
-    */
-  RealScalar distance(const VectorType& p) const { return ei_sqrt(squaredDistance(p)); }
-
-  /** \returns the projection of a point \a p onto the line \c *this. */
-  VectorType projection(const VectorType& p) const
-  { return origin() + (p-origin()).eigen2_dot(direction()) * direction(); }
-
-  Scalar intersection(const Hyperplane<_Scalar, _AmbientDim>& hyperplane);
-
-  /** \returns \c *this with scalar type casted to \a NewScalarType
-    *
-    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
-    * then this function smartly returns a const reference to \c *this.
-    */
-  template<typename NewScalarType>
-  inline typename internal::cast_return_type<ParametrizedLine,
-           ParametrizedLine<NewScalarType,AmbientDimAtCompileTime> >::type cast() const
-  {
-    return typename internal::cast_return_type<ParametrizedLine,
-                    ParametrizedLine<NewScalarType,AmbientDimAtCompileTime> >::type(*this);
-  }
-
-  /** Copy constructor with scalar type conversion */
-  template<typename OtherScalarType>
-  inline explicit ParametrizedLine(const ParametrizedLine<OtherScalarType,AmbientDimAtCompileTime>& other)
-  {
-    m_origin = other.origin().template cast<Scalar>();
-    m_direction = other.direction().template cast<Scalar>();
-  }
-
-  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
-    * determined by \a prec.
-    *
-    * \sa MatrixBase::isApprox() */
-  bool isApprox(const ParametrizedLine& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
-  { return m_origin.isApprox(other.m_origin, prec) && m_direction.isApprox(other.m_direction, prec); }
-
-protected:
-
-  VectorType m_origin, m_direction;
-};
-
-/** Constructs a parametrized line from a 2D hyperplane
-  *
-  * \warning the ambient space must have dimension 2 such that the hyperplane actually describes a line
-  */
-template <typename _Scalar, int _AmbientDim>
-inline ParametrizedLine<_Scalar, _AmbientDim>::ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim>& hyperplane)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2)
-  direction() = hyperplane.normal().unitOrthogonal();
-  origin() = -hyperplane.normal()*hyperplane.offset();
-}
-
-/** \returns the parameter value of the intersection between \c *this and the given hyperplane
-  */
-template <typename _Scalar, int _AmbientDim>
-inline _Scalar ParametrizedLine<_Scalar, _AmbientDim>::intersection(const Hyperplane<_Scalar, _AmbientDim>& hyperplane)
-{
-  return -(hyperplane.offset()+origin().eigen2_dot(hyperplane.normal()))
-          /(direction().eigen2_dot(hyperplane.normal()));
-}
-
-} // end namespace Eigen
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Quaternion.h b/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Quaternion.h
deleted file mode 100644
index 4b6390cf1..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Quaternion.h
+++ /dev/null
@@ -1,495 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
-
-namespace Eigen { 
-
-template<typename Other,
-         int OtherRows=Other::RowsAtCompileTime,
-         int OtherCols=Other::ColsAtCompileTime>
-struct ei_quaternion_assign_impl;
-
-/** \geometry_module \ingroup Geometry_Module
-  *
-  * \class Quaternion
-  *
-  * \brief The quaternion class used to represent 3D orientations and rotations
-  *
-  * \param _Scalar the scalar type, i.e., the type of the coefficients
-  *
-  * This class represents a quaternion \f$ w+xi+yj+zk \f$ that is a convenient representation of
-  * orientations and rotations of objects in three dimensions. Compared to other representations
-  * like Euler angles or 3x3 matrices, quatertions offer the following advantages:
-  * \li \b compact storage (4 scalars)
-  * \li \b efficient to compose (28 flops),
-  * \li \b stable spherical interpolation
-  *
-  * The following two typedefs are provided for convenience:
-  * \li \c Quaternionf for \c float
-  * \li \c Quaterniond for \c double
-  *
-  * \sa  class AngleAxis, class Transform
-  */
-
-template<typename _Scalar> struct ei_traits<Quaternion<_Scalar> >
-{
-  typedef _Scalar Scalar;
-};
-
-template<typename _Scalar>
-class Quaternion : public RotationBase<Quaternion<_Scalar>,3>
-{
-  typedef RotationBase<Quaternion<_Scalar>,3> Base;
-
-public:
-  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,4)
-
-  using Base::operator*;
-
-  /** the scalar type of the coefficients */
-  typedef _Scalar Scalar;
-
-  /** the type of the Coefficients 4-vector */
-  typedef Matrix<Scalar, 4, 1> Coefficients;
-  /** the type of a 3D vector */
-  typedef Matrix<Scalar,3,1> Vector3;
-  /** the equivalent rotation matrix type */
-  typedef Matrix<Scalar,3,3> Matrix3;
-  /** the equivalent angle-axis type */
-  typedef AngleAxis<Scalar> AngleAxisType;
-
-  /** \returns the \c x coefficient */
-  inline Scalar x() const { return m_coeffs.coeff(0); }
-  /** \returns the \c y coefficient */
-  inline Scalar y() const { return m_coeffs.coeff(1); }
-  /** \returns the \c z coefficient */
-  inline Scalar z() const { return m_coeffs.coeff(2); }
-  /** \returns the \c w coefficient */
-  inline Scalar w() const { return m_coeffs.coeff(3); }
-
-  /** \returns a reference to the \c x coefficient */
-  inline Scalar& x() { return m_coeffs.coeffRef(0); }
-  /** \returns a reference to the \c y coefficient */
-  inline Scalar& y() { return m_coeffs.coeffRef(1); }
-  /** \returns a reference to the \c z coefficient */
-  inline Scalar& z() { return m_coeffs.coeffRef(2); }
-  /** \returns a reference to the \c w coefficient */
-  inline Scalar& w() { return m_coeffs.coeffRef(3); }
-
-  /** \returns a read-only vector expression of the imaginary part (x,y,z) */
-  inline const Block<const Coefficients,3,1> vec() const { return m_coeffs.template start<3>(); }
-
-  /** \returns a vector expression of the imaginary part (x,y,z) */
-  inline Block<Coefficients,3,1> vec() { return m_coeffs.template start<3>(); }
-
-  /** \returns a read-only vector expression of the coefficients (x,y,z,w) */
-  inline const Coefficients& coeffs() const { return m_coeffs; }
-
-  /** \returns a vector expression of the coefficients (x,y,z,w) */
-  inline Coefficients& coeffs() { return m_coeffs; }
-
-  /** Default constructor leaving the quaternion uninitialized. */
-  inline Quaternion() {}
-
-  /** Constructs and initializes the quaternion \f$ w+xi+yj+zk \f$ from
-    * its four coefficients \a w, \a x, \a y and \a z.
-    *
-    * \warning Note the order of the arguments: the real \a w coefficient first,
-    * while internally the coefficients are stored in the following order:
-    * [\c x, \c y, \c z, \c w]
-    */
-  inline Quaternion(Scalar w, Scalar x, Scalar y, Scalar z)
-  { m_coeffs << x, y, z, w; }
-
-  /** Copy constructor */
-  inline Quaternion(const Quaternion& other) { m_coeffs = other.m_coeffs; }
-
-  /** Constructs and initializes a quaternion from the angle-axis \a aa */
-  explicit inline Quaternion(const AngleAxisType& aa) { *this = aa; }
-
-  /** Constructs and initializes a quaternion from either:
-    *  - a rotation matrix expression,
-    *  - a 4D vector expression representing quaternion coefficients.
-    * \sa operator=(MatrixBase<Derived>)
-    */
-  template<typename Derived>
-  explicit inline Quaternion(const MatrixBase<Derived>& other) { *this = other; }
-
-  Quaternion& operator=(const Quaternion& other);
-  Quaternion& operator=(const AngleAxisType& aa);
-  template<typename Derived>
-  Quaternion& operator=(const MatrixBase<Derived>& m);
-
-  /** \returns a quaternion representing an identity rotation
-    * \sa MatrixBase::Identity()
-    */
-  static inline Quaternion Identity() { return Quaternion(1, 0, 0, 0); }
-
-  /** \sa Quaternion::Identity(), MatrixBase::setIdentity()
-    */
-  inline Quaternion& setIdentity() { m_coeffs << 0, 0, 0, 1; return *this; }
-
-  /** \returns the squared norm of the quaternion's coefficients
-    * \sa Quaternion::norm(), MatrixBase::squaredNorm()
-    */
-  inline Scalar squaredNorm() const { return m_coeffs.squaredNorm(); }
-
-  /** \returns the norm of the quaternion's coefficients
-    * \sa Quaternion::squaredNorm(), MatrixBase::norm()
-    */
-  inline Scalar norm() const { return m_coeffs.norm(); }
-
-  /** Normalizes the quaternion \c *this
-    * \sa normalized(), MatrixBase::normalize() */
-  inline void normalize() { m_coeffs.normalize(); }
-  /** \returns a normalized version of \c *this
-    * \sa normalize(), MatrixBase::normalized() */
-  inline Quaternion normalized() const { return Quaternion(m_coeffs.normalized()); }
-
-  /** \returns the dot product of \c *this and \a other
-    * Geometrically speaking, the dot product of two unit quaternions
-    * corresponds to the cosine of half the angle between the two rotations.
-    * \sa angularDistance()
-    */
-  inline Scalar eigen2_dot(const Quaternion& other) const { return m_coeffs.eigen2_dot(other.m_coeffs); }
-
-  inline Scalar angularDistance(const Quaternion& other) const;
-
-  Matrix3 toRotationMatrix(void) const;
-
-  template<typename Derived1, typename Derived2>
-  Quaternion& setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b);
-
-  inline Quaternion operator* (const Quaternion& q) const;
-  inline Quaternion& operator*= (const Quaternion& q);
-
-  Quaternion inverse(void) const;
-  Quaternion conjugate(void) const;
-
-  Quaternion slerp(Scalar t, const Quaternion& other) const;
-
-  template<typename Derived>
-  Vector3 operator* (const MatrixBase<Derived>& vec) const;
-
-  /** \returns \c *this with scalar type casted to \a NewScalarType
-    *
-    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
-    * then this function smartly returns a const reference to \c *this.
-    */
-  template<typename NewScalarType>
-  inline typename internal::cast_return_type<Quaternion,Quaternion<NewScalarType> >::type cast() const
-  { return typename internal::cast_return_type<Quaternion,Quaternion<NewScalarType> >::type(*this); }
-
-  /** Copy constructor with scalar type conversion */
-  template<typename OtherScalarType>
-  inline explicit Quaternion(const Quaternion<OtherScalarType>& other)
-  { m_coeffs = other.coeffs().template cast<Scalar>(); }
-
-  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
-    * determined by \a prec.
-    *
-    * \sa MatrixBase::isApprox() */
-  bool isApprox(const Quaternion& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
-  { return m_coeffs.isApprox(other.m_coeffs, prec); }
-
-protected:
-  Coefficients m_coeffs;
-};
-
-/** \ingroup Geometry_Module
-  * single precision quaternion type */
-typedef Quaternion<float> Quaternionf;
-/** \ingroup Geometry_Module
-  * double precision quaternion type */
-typedef Quaternion<double> Quaterniond;
-
-// Generic Quaternion * Quaternion product
-template<typename Scalar> inline Quaternion<Scalar>
-ei_quaternion_product(const Quaternion<Scalar>& a, const Quaternion<Scalar>& b)
-{
-  return Quaternion<Scalar>
-  (
-    a.w() * b.w() - a.x() * b.x() - a.y() * b.y() - a.z() * b.z(),
-    a.w() * b.x() + a.x() * b.w() + a.y() * b.z() - a.z() * b.y(),
-    a.w() * b.y() + a.y() * b.w() + a.z() * b.x() - a.x() * b.z(),
-    a.w() * b.z() + a.z() * b.w() + a.x() * b.y() - a.y() * b.x()
-  );
-}
-
-/** \returns the concatenation of two rotations as a quaternion-quaternion product */
-template <typename Scalar>
-inline Quaternion<Scalar> Quaternion<Scalar>::operator* (const Quaternion& other) const
-{
-  return ei_quaternion_product(*this,other);
-}
-
-/** \sa operator*(Quaternion) */
-template <typename Scalar>
-inline Quaternion<Scalar>& Quaternion<Scalar>::operator*= (const Quaternion& other)
-{
-  return (*this = *this * other);
-}
-
-/** Rotation of a vector by a quaternion.
-  * \remarks If the quaternion is used to rotate several points (>1)
-  * then it is much more efficient to first convert it to a 3x3 Matrix.
-  * Comparison of the operation cost for n transformations:
-  *   - Quaternion:    30n
-  *   - Via a Matrix3: 24 + 15n
-  */
-template <typename Scalar>
-template<typename Derived>
-inline typename Quaternion<Scalar>::Vector3
-Quaternion<Scalar>::operator* (const MatrixBase<Derived>& v) const
-{
-    // Note that this algorithm comes from the optimization by hand
-    // of the conversion to a Matrix followed by a Matrix/Vector product.
-    // It appears to be much faster than the common algorithm found
-    // in the litterature (30 versus 39 flops). It also requires two
-    // Vector3 as temporaries.
-    Vector3 uv;
-    uv = 2 * this->vec().cross(v);
-    return v + this->w() * uv + this->vec().cross(uv);
-}
-
-template<typename Scalar>
-inline Quaternion<Scalar>& Quaternion<Scalar>::operator=(const Quaternion& other)
-{
-  m_coeffs = other.m_coeffs;
-  return *this;
-}
-
-/** Set \c *this from an angle-axis \a aa and returns a reference to \c *this
-  */
-template<typename Scalar>
-inline Quaternion<Scalar>& Quaternion<Scalar>::operator=(const AngleAxisType& aa)
-{
-  Scalar ha = Scalar(0.5)*aa.angle(); // Scalar(0.5) to suppress precision loss warnings
-  this->w() = ei_cos(ha);
-  this->vec() = ei_sin(ha) * aa.axis();
-  return *this;
-}
-
-/** Set \c *this from the expression \a xpr:
-  *   - if \a xpr is a 4x1 vector, then \a xpr is assumed to be a quaternion
-  *   - if \a xpr is a 3x3 matrix, then \a xpr is assumed to be rotation matrix
-  *     and \a xpr is converted to a quaternion
-  */
-template<typename Scalar>
-template<typename Derived>
-inline Quaternion<Scalar>& Quaternion<Scalar>::operator=(const MatrixBase<Derived>& xpr)
-{
-  ei_quaternion_assign_impl<Derived>::run(*this, xpr.derived());
-  return *this;
-}
-
-/** Convert the quaternion to a 3x3 rotation matrix */
-template<typename Scalar>
-inline typename Quaternion<Scalar>::Matrix3
-Quaternion<Scalar>::toRotationMatrix(void) const
-{
-  // NOTE if inlined, then gcc 4.2 and 4.4 get rid of the temporary (not gcc 4.3 !!)
-  // if not inlined then the cost of the return by value is huge ~ +35%,
-  // however, not inlining this function is an order of magnitude slower, so
-  // it has to be inlined, and so the return by value is not an issue
-  Matrix3 res;
-
-  const Scalar tx  = Scalar(2)*this->x();
-  const Scalar ty  = Scalar(2)*this->y();
-  const Scalar tz  = Scalar(2)*this->z();
-  const Scalar twx = tx*this->w();
-  const Scalar twy = ty*this->w();
-  const Scalar twz = tz*this->w();
-  const Scalar txx = tx*this->x();
-  const Scalar txy = ty*this->x();
-  const Scalar txz = tz*this->x();
-  const Scalar tyy = ty*this->y();
-  const Scalar tyz = tz*this->y();
-  const Scalar tzz = tz*this->z();
-
-  res.coeffRef(0,0) = Scalar(1)-(tyy+tzz);
-  res.coeffRef(0,1) = txy-twz;
-  res.coeffRef(0,2) = txz+twy;
-  res.coeffRef(1,0) = txy+twz;
-  res.coeffRef(1,1) = Scalar(1)-(txx+tzz);
-  res.coeffRef(1,2) = tyz-twx;
-  res.coeffRef(2,0) = txz-twy;
-  res.coeffRef(2,1) = tyz+twx;
-  res.coeffRef(2,2) = Scalar(1)-(txx+tyy);
-
-  return res;
-}
-
-/** Sets *this to be a quaternion representing a rotation sending the vector \a a to the vector \a b.
-  *
-  * \returns a reference to *this.
-  *
-  * Note that the two input vectors do \b not have to be normalized.
-  */
-template<typename Scalar>
-template<typename Derived1, typename Derived2>
-inline Quaternion<Scalar>& Quaternion<Scalar>::setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b)
-{
-  Vector3 v0 = a.normalized();
-  Vector3 v1 = b.normalized();
-  Scalar c = v0.eigen2_dot(v1);
-
-  // if dot == 1, vectors are the same
-  if (ei_isApprox(c,Scalar(1)))
-  {
-    // set to identity
-    this->w() = 1; this->vec().setZero();
-    return *this;
-  }
-  // if dot == -1, vectors are opposites
-  if (ei_isApprox(c,Scalar(-1)))
-  {
-    this->vec() = v0.unitOrthogonal();
-    this->w() = 0;
-    return *this;
-  }
-
-  Vector3 axis = v0.cross(v1);
-  Scalar s = ei_sqrt((Scalar(1)+c)*Scalar(2));
-  Scalar invs = Scalar(1)/s;
-  this->vec() = axis * invs;
-  this->w() = s * Scalar(0.5);
-
-  return *this;
-}
-
-/** \returns the multiplicative inverse of \c *this
-  * Note that in most cases, i.e., if you simply want the opposite rotation,
-  * and/or the quaternion is normalized, then it is enough to use the conjugate.
-  *
-  * \sa Quaternion::conjugate()
-  */
-template <typename Scalar>
-inline Quaternion<Scalar> Quaternion<Scalar>::inverse() const
-{
-  // FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite()  ??
-  Scalar n2 = this->squaredNorm();
-  if (n2 > 0)
-    return Quaternion(conjugate().coeffs() / n2);
-  else
-  {
-    // return an invalid result to flag the error
-    return Quaternion(Coefficients::Zero());
-  }
-}
-
-/** \returns the conjugate of the \c *this which is equal to the multiplicative inverse
-  * if the quaternion is normalized.
-  * The conjugate of a quaternion represents the opposite rotation.
-  *
-  * \sa Quaternion::inverse()
-  */
-template <typename Scalar>
-inline Quaternion<Scalar> Quaternion<Scalar>::conjugate() const
-{
-  return Quaternion(this->w(),-this->x(),-this->y(),-this->z());
-}
-
-/** \returns the angle (in radian) between two rotations
-  * \sa eigen2_dot()
-  */
-template <typename Scalar>
-inline Scalar Quaternion<Scalar>::angularDistance(const Quaternion& other) const
-{
-  double d = ei_abs(this->eigen2_dot(other));
-  if (d>=1.0)
-    return 0;
-  return Scalar(2) * std::acos(d);
-}
-
-/** \returns the spherical linear interpolation between the two quaternions
-  * \c *this and \a other at the parameter \a t
-  */
-template <typename Scalar>
-Quaternion<Scalar> Quaternion<Scalar>::slerp(Scalar t, const Quaternion& other) const
-{
-  static const Scalar one = Scalar(1) - machine_epsilon<Scalar>();
-  Scalar d = this->eigen2_dot(other);
-  Scalar absD = ei_abs(d);
-
-  Scalar scale0;
-  Scalar scale1;
-
-  if (absD>=one)
-  {
-    scale0 = Scalar(1) - t;
-    scale1 = t;
-  }
-  else
-  {
-    // theta is the angle between the 2 quaternions
-    Scalar theta = std::acos(absD);
-    Scalar sinTheta = ei_sin(theta);
-
-    scale0 = ei_sin( ( Scalar(1) - t ) * theta) / sinTheta;
-    scale1 = ei_sin( ( t * theta) ) / sinTheta;
-    if (d<0)
-      scale1 = -scale1;
-  }
-
-  return Quaternion<Scalar>(scale0 * coeffs() + scale1 * other.coeffs());
-}
-
-// set from a rotation matrix
-template<typename Other>
-struct ei_quaternion_assign_impl<Other,3,3>
-{
-  typedef typename Other::Scalar Scalar;
-  static inline void run(Quaternion<Scalar>& q, const Other& mat)
-  {
-    // This algorithm comes from  "Quaternion Calculus and Fast Animation",
-    // Ken Shoemake, 1987 SIGGRAPH course notes
-    Scalar t = mat.trace();
-    if (t > 0)
-    {
-      t = ei_sqrt(t + Scalar(1.0));
-      q.w() = Scalar(0.5)*t;
-      t = Scalar(0.5)/t;
-      q.x() = (mat.coeff(2,1) - mat.coeff(1,2)) * t;
-      q.y() = (mat.coeff(0,2) - mat.coeff(2,0)) * t;
-      q.z() = (mat.coeff(1,0) - mat.coeff(0,1)) * t;
-    }
-    else
-    {
-      int i = 0;
-      if (mat.coeff(1,1) > mat.coeff(0,0))
-        i = 1;
-      if (mat.coeff(2,2) > mat.coeff(i,i))
-        i = 2;
-      int j = (i+1)%3;
-      int k = (j+1)%3;
-
-      t = ei_sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0));
-      q.coeffs().coeffRef(i) = Scalar(0.5) * t;
-      t = Scalar(0.5)/t;
-      q.w() = (mat.coeff(k,j)-mat.coeff(j,k))*t;
-      q.coeffs().coeffRef(j) = (mat.coeff(j,i)+mat.coeff(i,j))*t;
-      q.coeffs().coeffRef(k) = (mat.coeff(k,i)+mat.coeff(i,k))*t;
-    }
-  }
-};
-
-// set from a vector of coefficients assumed to be a quaternion
-template<typename Other>
-struct ei_quaternion_assign_impl<Other,4,1>
-{
-  typedef typename Other::Scalar Scalar;
-  static inline void run(Quaternion<Scalar>& q, const Other& vec)
-  {
-    q.coeffs() = vec;
-  }
-};
-
-} // end namespace Eigen
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Rotation2D.h b/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Rotation2D.h
deleted file mode 100644
index 19b8582a1..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Rotation2D.h
+++ /dev/null
@@ -1,145 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
-
-namespace Eigen { 
-
-/** \geometry_module \ingroup Geometry_Module
-  *
-  * \class Rotation2D
-  *
-  * \brief Represents a rotation/orientation in a 2 dimensional space.
-  *
-  * \param _Scalar the scalar type, i.e., the type of the coefficients
-  *
-  * This class is equivalent to a single scalar representing a counter clock wise rotation
-  * as a single angle in radian. It provides some additional features such as the automatic
-  * conversion from/to a 2x2 rotation matrix. Moreover this class aims to provide a similar
-  * interface to Quaternion in order to facilitate the writing of generic algorithms
-  * dealing with rotations.
-  *
-  * \sa class Quaternion, class Transform
-  */
-template<typename _Scalar> struct ei_traits<Rotation2D<_Scalar> >
-{
-  typedef _Scalar Scalar;
-};
-
-template<typename _Scalar>
-class Rotation2D : public RotationBase<Rotation2D<_Scalar>,2>
-{
-  typedef RotationBase<Rotation2D<_Scalar>,2> Base;
-
-public:
-
-  using Base::operator*;
-
-  enum { Dim = 2 };
-  /** the scalar type of the coefficients */
-  typedef _Scalar Scalar;
-  typedef Matrix<Scalar,2,1> Vector2;
-  typedef Matrix<Scalar,2,2> Matrix2;
-
-protected:
-
-  Scalar m_angle;
-
-public:
-
-  /** Construct a 2D counter clock wise rotation from the angle \a a in radian. */
-  inline Rotation2D(Scalar a) : m_angle(a) {}
-
-  /** \returns the rotation angle */
-  inline Scalar angle() const { return m_angle; }
-
-  /** \returns a read-write reference to the rotation angle */
-  inline Scalar& angle() { return m_angle; }
-
-  /** \returns the inverse rotation */
-  inline Rotation2D inverse() const { return -m_angle; }
-
-  /** Concatenates two rotations */
-  inline Rotation2D operator*(const Rotation2D& other) const
-  { return m_angle + other.m_angle; }
-
-  /** Concatenates two rotations */
-  inline Rotation2D& operator*=(const Rotation2D& other)
-  { return m_angle += other.m_angle; return *this; }
-
-  /** Applies the rotation to a 2D vector */
-  Vector2 operator* (const Vector2& vec) const
-  { return toRotationMatrix() * vec; }
-
-  template<typename Derived>
-  Rotation2D& fromRotationMatrix(const MatrixBase<Derived>& m);
-  Matrix2 toRotationMatrix(void) const;
-
-  /** \returns the spherical interpolation between \c *this and \a other using
-    * parameter \a t. It is in fact equivalent to a linear interpolation.
-    */
-  inline Rotation2D slerp(Scalar t, const Rotation2D& other) const
-  { return m_angle * (1-t) + other.angle() * t; }
-
-  /** \returns \c *this with scalar type casted to \a NewScalarType
-    *
-    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
-    * then this function smartly returns a const reference to \c *this.
-    */
-  template<typename NewScalarType>
-  inline typename internal::cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type cast() const
-  { return typename internal::cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type(*this); }
-
-  /** Copy constructor with scalar type conversion */
-  template<typename OtherScalarType>
-  inline explicit Rotation2D(const Rotation2D<OtherScalarType>& other)
-  {
-    m_angle = Scalar(other.angle());
-  }
-
-  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
-    * determined by \a prec.
-    *
-    * \sa MatrixBase::isApprox() */
-  bool isApprox(const Rotation2D& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
-  { return ei_isApprox(m_angle,other.m_angle, prec); }
-};
-
-/** \ingroup Geometry_Module
-  * single precision 2D rotation type */
-typedef Rotation2D<float> Rotation2Df;
-/** \ingroup Geometry_Module
-  * double precision 2D rotation type */
-typedef Rotation2D<double> Rotation2Dd;
-
-/** Set \c *this from a 2x2 rotation matrix \a mat.
-  * In other words, this function extract the rotation angle
-  * from the rotation matrix.
-  */
-template<typename Scalar>
-template<typename Derived>
-Rotation2D<Scalar>& Rotation2D<Scalar>::fromRotationMatrix(const MatrixBase<Derived>& mat)
-{
-  EIGEN_STATIC_ASSERT(Derived::RowsAtCompileTime==2 && Derived::ColsAtCompileTime==2,YOU_MADE_A_PROGRAMMING_MISTAKE)
-  m_angle = ei_atan2(mat.coeff(1,0), mat.coeff(0,0));
-  return *this;
-}
-
-/** Constructs and \returns an equivalent 2x2 rotation matrix.
-  */
-template<typename Scalar>
-typename Rotation2D<Scalar>::Matrix2
-Rotation2D<Scalar>::toRotationMatrix(void) const
-{
-  Scalar sinA = ei_sin(m_angle);
-  Scalar cosA = ei_cos(m_angle);
-  return (Matrix2() << cosA, -sinA, sinA, cosA).finished();
-}
-
-} // end namespace Eigen
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/RotationBase.h b/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/RotationBase.h
deleted file mode 100644
index b1c8f38da..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/RotationBase.h
+++ /dev/null
@@ -1,123 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
-
-namespace Eigen { 
-
-// this file aims to contains the various representations of rotation/orientation
-// in 2D and 3D space excepted Matrix and Quaternion.
-
-/** \class RotationBase
-  *
-  * \brief Common base class for compact rotation representations
-  *
-  * \param Derived is the derived type, i.e., a rotation type
-  * \param _Dim the dimension of the space
-  */
-template<typename Derived, int _Dim>
-class RotationBase
-{
-  public:
-    enum { Dim = _Dim };
-    /** the scalar type of the coefficients */
-    typedef typename ei_traits<Derived>::Scalar Scalar;
-    
-    /** corresponding linear transformation matrix type */
-    typedef Matrix<Scalar,Dim,Dim> RotationMatrixType;
-
-    inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
-    inline Derived& derived() { return *static_cast<Derived*>(this); }
-
-    /** \returns an equivalent rotation matrix */
-    inline RotationMatrixType toRotationMatrix() const { return derived().toRotationMatrix(); }
-
-    /** \returns the inverse rotation */
-    inline Derived inverse() const { return derived().inverse(); }
-
-    /** \returns the concatenation of the rotation \c *this with a translation \a t */
-    inline Transform<Scalar,Dim> operator*(const Translation<Scalar,Dim>& t) const
-    { return toRotationMatrix() * t; }
-
-    /** \returns the concatenation of the rotation \c *this with a scaling \a s */
-    inline RotationMatrixType operator*(const Scaling<Scalar,Dim>& s) const
-    { return toRotationMatrix() * s; }
-
-    /** \returns the concatenation of the rotation \c *this with an affine transformation \a t */
-    inline Transform<Scalar,Dim> operator*(const Transform<Scalar,Dim>& t) const
-    { return toRotationMatrix() * t; }
-};
-
-/** \geometry_module
-  *
-  * Constructs a Dim x Dim rotation matrix from the rotation \a r
-  */
-template<typename _Scalar, int _Rows, int _Cols, int _Storage, int _MaxRows, int _MaxCols>
-template<typename OtherDerived>
-Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>
-::Matrix(const RotationBase<OtherDerived,ColsAtCompileTime>& r)
-{
-  EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim))
-  *this = r.toRotationMatrix();
-}
-
-/** \geometry_module
-  *
-  * Set a Dim x Dim rotation matrix from the rotation \a r
-  */
-template<typename _Scalar, int _Rows, int _Cols, int _Storage, int _MaxRows, int _MaxCols>
-template<typename OtherDerived>
-Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>&
-Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>
-::operator=(const RotationBase<OtherDerived,ColsAtCompileTime>& r)
-{
-  EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim))
-  return *this = r.toRotationMatrix();
-}
-
-/** \internal
-  *
-  * Helper function to return an arbitrary rotation object to a rotation matrix.
-  *
-  * \param Scalar the numeric type of the matrix coefficients
-  * \param Dim the dimension of the current space
-  *
-  * It returns a Dim x Dim fixed size matrix.
-  *
-  * Default specializations are provided for:
-  *   - any scalar type (2D),
-  *   - any matrix expression,
-  *   - any type based on RotationBase (e.g., Quaternion, AngleAxis, Rotation2D)
-  *
-  * Currently ei_toRotationMatrix is only used by Transform.
-  *
-  * \sa class Transform, class Rotation2D, class Quaternion, class AngleAxis
-  */
-template<typename Scalar, int Dim>
-static inline Matrix<Scalar,2,2> ei_toRotationMatrix(const Scalar& s)
-{
-  EIGEN_STATIC_ASSERT(Dim==2,YOU_MADE_A_PROGRAMMING_MISTAKE)
-  return Rotation2D<Scalar>(s).toRotationMatrix();
-}
-
-template<typename Scalar, int Dim, typename OtherDerived>
-static inline Matrix<Scalar,Dim,Dim> ei_toRotationMatrix(const RotationBase<OtherDerived,Dim>& r)
-{
-  return r.toRotationMatrix();
-}
-
-template<typename Scalar, int Dim, typename OtherDerived>
-static inline const MatrixBase<OtherDerived>& ei_toRotationMatrix(const MatrixBase<OtherDerived>& mat)
-{
-  EIGEN_STATIC_ASSERT(OtherDerived::RowsAtCompileTime==Dim && OtherDerived::ColsAtCompileTime==Dim,
-    YOU_MADE_A_PROGRAMMING_MISTAKE)
-  return mat;
-}
-
-} // end namespace Eigen
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Scaling.h b/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Scaling.h
deleted file mode 100644
index b8fa6cd3f..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Scaling.h
+++ /dev/null
@@ -1,167 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
-
-namespace Eigen { 
-
-/** \geometry_module \ingroup Geometry_Module
-  *
-  * \class Scaling
-  *
-  * \brief Represents a possibly non uniform scaling transformation
-  *
-  * \param _Scalar the scalar type, i.e., the type of the coefficients.
-  * \param _Dim the  dimension of the space, can be a compile time value or Dynamic
-  *
-  * \note This class is not aimed to be used to store a scaling transformation,
-  * but rather to make easier the constructions and updates of Transform objects.
-  *
-  * \sa class Translation, class Transform
-  */
-template<typename _Scalar, int _Dim>
-class Scaling
-{
-public:
-  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim)
-  /** dimension of the space */
-  enum { Dim = _Dim };
-  /** the scalar type of the coefficients */
-  typedef _Scalar Scalar;
-  /** corresponding vector type */
-  typedef Matrix<Scalar,Dim,1> VectorType;
-  /** corresponding linear transformation matrix type */
-  typedef Matrix<Scalar,Dim,Dim> LinearMatrixType;
-  /** corresponding translation type */
-  typedef Translation<Scalar,Dim> TranslationType;
-  /** corresponding affine transformation type */
-  typedef Transform<Scalar,Dim> TransformType;
-
-protected:
-
-  VectorType m_coeffs;
-
-public:
-
-  /** Default constructor without initialization. */
-  Scaling() {}
-  /** Constructs and initialize a uniform scaling transformation */
-  explicit inline Scaling(const Scalar& s) { m_coeffs.setConstant(s); }
-  /** 2D only */
-  inline Scaling(const Scalar& sx, const Scalar& sy)
-  {
-    ei_assert(Dim==2);
-    m_coeffs.x() = sx;
-    m_coeffs.y() = sy;
-  }
-  /** 3D only */
-  inline Scaling(const Scalar& sx, const Scalar& sy, const Scalar& sz)
-  {
-    ei_assert(Dim==3);
-    m_coeffs.x() = sx;
-    m_coeffs.y() = sy;
-    m_coeffs.z() = sz;
-  }
-  /** Constructs and initialize the scaling transformation from a vector of scaling coefficients */
-  explicit inline Scaling(const VectorType& coeffs) : m_coeffs(coeffs) {}
-
-  const VectorType& coeffs() const { return m_coeffs; }
-  VectorType& coeffs() { return m_coeffs; }
-
-  /** Concatenates two scaling */
-  inline Scaling operator* (const Scaling& other) const
-  { return Scaling(coeffs().cwise() * other.coeffs()); }
-
-  /** Concatenates a scaling and a translation */
-  inline TransformType operator* (const TranslationType& t) const;
-
-  /** Concatenates a scaling and an affine transformation */
-  inline TransformType operator* (const TransformType& t) const;
-
-  /** Concatenates a scaling and a linear transformation matrix */
-  // TODO returns an expression
-  inline LinearMatrixType operator* (const LinearMatrixType& other) const
-  { return coeffs().asDiagonal() * other; }
-
-  /** Concatenates a linear transformation matrix and a scaling */
-  // TODO returns an expression
-  friend inline LinearMatrixType operator* (const LinearMatrixType& other, const Scaling& s)
-  { return other * s.coeffs().asDiagonal(); }
-
-  template<typename Derived>
-  inline LinearMatrixType operator*(const RotationBase<Derived,Dim>& r) const
-  { return *this * r.toRotationMatrix(); }
-
-  /** Applies scaling to vector */
-  inline VectorType operator* (const VectorType& other) const
-  { return coeffs().asDiagonal() * other; }
-
-  /** \returns the inverse scaling */
-  inline Scaling inverse() const
-  { return Scaling(coeffs().cwise().inverse()); }
-
-  inline Scaling& operator=(const Scaling& other)
-  {
-    m_coeffs = other.m_coeffs;
-    return *this;
-  }
-
-  /** \returns \c *this with scalar type casted to \a NewScalarType
-    *
-    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
-    * then this function smartly returns a const reference to \c *this.
-    */
-  template<typename NewScalarType>
-  inline typename internal::cast_return_type<Scaling,Scaling<NewScalarType,Dim> >::type cast() const
-  { return typename internal::cast_return_type<Scaling,Scaling<NewScalarType,Dim> >::type(*this); }
-
-  /** Copy constructor with scalar type conversion */
-  template<typename OtherScalarType>
-  inline explicit Scaling(const Scaling<OtherScalarType,Dim>& other)
-  { m_coeffs = other.coeffs().template cast<Scalar>(); }
-
-  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
-    * determined by \a prec.
-    *
-    * \sa MatrixBase::isApprox() */
-  bool isApprox(const Scaling& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
-  { return m_coeffs.isApprox(other.m_coeffs, prec); }
-
-};
-
-/** \addtogroup Geometry_Module */
-//@{
-typedef Scaling<float, 2> Scaling2f;
-typedef Scaling<double,2> Scaling2d;
-typedef Scaling<float, 3> Scaling3f;
-typedef Scaling<double,3> Scaling3d;
-//@}
-
-template<typename Scalar, int Dim>
-inline typename Scaling<Scalar,Dim>::TransformType
-Scaling<Scalar,Dim>::operator* (const TranslationType& t) const
-{
-  TransformType res;
-  res.matrix().setZero();
-  res.linear().diagonal() = coeffs();
-  res.translation() = m_coeffs.cwise() * t.vector();
-  res(Dim,Dim) = Scalar(1);
-  return res;
-}
-
-template<typename Scalar, int Dim>
-inline typename Scaling<Scalar,Dim>::TransformType
-Scaling<Scalar,Dim>::operator* (const TransformType& t) const
-{
-  TransformType res = t;
-  res.prescale(m_coeffs);
-  return res;
-}
-
-} // end namespace Eigen
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Transform.h b/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Transform.h
deleted file mode 100644
index fab60b251..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Transform.h
+++ /dev/null
@@ -1,786 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
-
-namespace Eigen { 
-
-// Note that we have to pass Dim and HDim because it is not allowed to use a template
-// parameter to define a template specialization. To be more precise, in the following
-// specializations, it is not allowed to use Dim+1 instead of HDim.
-template< typename Other,
-          int Dim,
-          int HDim,
-          int OtherRows=Other::RowsAtCompileTime,
-          int OtherCols=Other::ColsAtCompileTime>
-struct ei_transform_product_impl;
-
-/** \geometry_module \ingroup Geometry_Module
-  *
-  * \class Transform
-  *
-  * \brief Represents an homogeneous transformation in a N dimensional space
-  *
-  * \param _Scalar the scalar type, i.e., the type of the coefficients
-  * \param _Dim the dimension of the space
-  *
-  * The homography is internally represented and stored as a (Dim+1)^2 matrix which
-  * is available through the matrix() method.
-  *
-  * Conversion methods from/to Qt's QMatrix and QTransform are available if the
-  * preprocessor token EIGEN_QT_SUPPORT is defined.
-  *
-  * \sa class Matrix, class Quaternion
-  */
-template<typename _Scalar, int _Dim>
-class Transform
-{
-public:
-  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim==Dynamic ? Dynamic : (_Dim+1)*(_Dim+1))
-  enum {
-    Dim = _Dim,     ///< space dimension in which the transformation holds
-    HDim = _Dim+1   ///< size of a respective homogeneous vector
-  };
-  /** the scalar type of the coefficients */
-  typedef _Scalar Scalar;
-  /** type of the matrix used to represent the transformation */
-  typedef Matrix<Scalar,HDim,HDim> MatrixType;
-  /** type of the matrix used to represent the linear part of the transformation */
-  typedef Matrix<Scalar,Dim,Dim> LinearMatrixType;
-  /** type of read/write reference to the linear part of the transformation */
-  typedef Block<MatrixType,Dim,Dim> LinearPart;
-  /** type of read/write reference to the linear part of the transformation */
-  typedef const Block<const MatrixType,Dim,Dim> ConstLinearPart;
-  /** type of a vector */
-  typedef Matrix<Scalar,Dim,1> VectorType;
-  /** type of a read/write reference to the translation part of the rotation */
-  typedef Block<MatrixType,Dim,1> TranslationPart;
-  /** type of a read/write reference to the translation part of the rotation */
-  typedef const Block<const MatrixType,Dim,1> ConstTranslationPart;
-  /** corresponding translation type */
-  typedef Translation<Scalar,Dim> TranslationType;
-  /** corresponding scaling transformation type */
-  typedef Scaling<Scalar,Dim> ScalingType;
-
-protected:
-
-  MatrixType m_matrix;
-
-public:
-
-  /** Default constructor without initialization of the coefficients. */
-  inline Transform() { }
-
-  inline Transform(const Transform& other)
-  {
-    m_matrix = other.m_matrix;
-  }
-
-  inline explicit Transform(const TranslationType& t) { *this = t; }
-  inline explicit Transform(const ScalingType& s) { *this = s; }
-  template<typename Derived>
-  inline explicit Transform(const RotationBase<Derived, Dim>& r) { *this = r; }
-
-  inline Transform& operator=(const Transform& other)
-  { m_matrix = other.m_matrix; return *this; }
-
-  template<typename OtherDerived, bool BigMatrix> // MSVC 2005 will commit suicide if BigMatrix has a default value
-  struct construct_from_matrix
-  {
-    static inline void run(Transform *transform, const MatrixBase<OtherDerived>& other)
-    {
-      transform->matrix() = other;
-    }
-  };
-
-  template<typename OtherDerived> struct construct_from_matrix<OtherDerived, true>
-  {
-    static inline void run(Transform *transform, const MatrixBase<OtherDerived>& other)
-    {
-      transform->linear() = other;
-      transform->translation().setZero();
-      transform->matrix()(Dim,Dim) = Scalar(1);
-      transform->matrix().template block<1,Dim>(Dim,0).setZero();
-    }
-  };
-
-  /** Constructs and initializes a transformation from a Dim^2 or a (Dim+1)^2 matrix. */
-  template<typename OtherDerived>
-  inline explicit Transform(const MatrixBase<OtherDerived>& other)
-  {
-    construct_from_matrix<OtherDerived, int(OtherDerived::RowsAtCompileTime) == Dim>::run(this, other);
-  }
-
-  /** Set \c *this from a (Dim+1)^2 matrix. */
-  template<typename OtherDerived>
-  inline Transform& operator=(const MatrixBase<OtherDerived>& other)
-  { m_matrix = other; return *this; }
-
-  #ifdef EIGEN_QT_SUPPORT
-  inline Transform(const QMatrix& other);
-  inline Transform& operator=(const QMatrix& other);
-  inline QMatrix toQMatrix(void) const;
-  inline Transform(const QTransform& other);
-  inline Transform& operator=(const QTransform& other);
-  inline QTransform toQTransform(void) const;
-  #endif
-
-  /** shortcut for m_matrix(row,col);
-    * \sa MatrixBase::operaror(int,int) const */
-  inline Scalar operator() (int row, int col) const { return m_matrix(row,col); }
-  /** shortcut for m_matrix(row,col);
-    * \sa MatrixBase::operaror(int,int) */
-  inline Scalar& operator() (int row, int col) { return m_matrix(row,col); }
-
-  /** \returns a read-only expression of the transformation matrix */
-  inline const MatrixType& matrix() const { return m_matrix; }
-  /** \returns a writable expression of the transformation matrix */
-  inline MatrixType& matrix() { return m_matrix; }
-
-  /** \returns a read-only expression of the linear (linear) part of the transformation */
-  inline ConstLinearPart linear() const { return m_matrix.template block<Dim,Dim>(0,0); }
-  /** \returns a writable expression of the linear (linear) part of the transformation */
-  inline LinearPart linear() { return m_matrix.template block<Dim,Dim>(0,0); }
-
-  /** \returns a read-only expression of the translation vector of the transformation */
-  inline ConstTranslationPart translation() const { return m_matrix.template block<Dim,1>(0,Dim); }
-  /** \returns a writable expression of the translation vector of the transformation */
-  inline TranslationPart translation() { return m_matrix.template block<Dim,1>(0,Dim); }
-
-  /** \returns an expression of the product between the transform \c *this and a matrix expression \a other
-  *
-  * The right hand side \a other might be either:
-  * \li a vector of size Dim,
-  * \li an homogeneous vector of size Dim+1,
-  * \li a transformation matrix of size Dim+1 x Dim+1.
-  */
-  // note: this function is defined here because some compilers cannot find the respective declaration
-  template<typename OtherDerived>
-  inline const typename ei_transform_product_impl<OtherDerived,_Dim,_Dim+1>::ResultType
-  operator * (const MatrixBase<OtherDerived> &other) const
-  { return ei_transform_product_impl<OtherDerived,Dim,HDim>::run(*this,other.derived()); }
-
-  /** \returns the product expression of a transformation matrix \a a times a transform \a b
-    * The transformation matrix \a a must have a Dim+1 x Dim+1 sizes. */
-  template<typename OtherDerived>
-  friend inline const typename ProductReturnType<OtherDerived,MatrixType>::Type
-  operator * (const MatrixBase<OtherDerived> &a, const Transform &b)
-  { return a.derived() * b.matrix(); }
-
-  /** Contatenates two transformations */
-  inline const Transform
-  operator * (const Transform& other) const
-  { return Transform(m_matrix * other.matrix()); }
-
-  /** \sa MatrixBase::setIdentity() */
-  void setIdentity() { m_matrix.setIdentity(); }
-  static const typename MatrixType::IdentityReturnType Identity()
-  {
-    return MatrixType::Identity();
-  }
-
-  template<typename OtherDerived>
-  inline Transform& scale(const MatrixBase<OtherDerived> &other);
-
-  template<typename OtherDerived>
-  inline Transform& prescale(const MatrixBase<OtherDerived> &other);
-
-  inline Transform& scale(Scalar s);
-  inline Transform& prescale(Scalar s);
-
-  template<typename OtherDerived>
-  inline Transform& translate(const MatrixBase<OtherDerived> &other);
-
-  template<typename OtherDerived>
-  inline Transform& pretranslate(const MatrixBase<OtherDerived> &other);
-
-  template<typename RotationType>
-  inline Transform& rotate(const RotationType& rotation);
-
-  template<typename RotationType>
-  inline Transform& prerotate(const RotationType& rotation);
-
-  Transform& shear(Scalar sx, Scalar sy);
-  Transform& preshear(Scalar sx, Scalar sy);
-
-  inline Transform& operator=(const TranslationType& t);
-  inline Transform& operator*=(const TranslationType& t) { return translate(t.vector()); }
-  inline Transform operator*(const TranslationType& t) const;
-
-  inline Transform& operator=(const ScalingType& t);
-  inline Transform& operator*=(const ScalingType& s) { return scale(s.coeffs()); }
-  inline Transform operator*(const ScalingType& s) const;
-  friend inline Transform operator*(const LinearMatrixType& mat, const Transform& t)
-  {
-    Transform res = t;
-    res.matrix().row(Dim) = t.matrix().row(Dim);
-    res.matrix().template block<Dim,HDim>(0,0) = (mat * t.matrix().template block<Dim,HDim>(0,0)).lazy();
-    return res;
-  }
-
-  template<typename Derived>
-  inline Transform& operator=(const RotationBase<Derived,Dim>& r);
-  template<typename Derived>
-  inline Transform& operator*=(const RotationBase<Derived,Dim>& r) { return rotate(r.toRotationMatrix()); }
-  template<typename Derived>
-  inline Transform operator*(const RotationBase<Derived,Dim>& r) const;
-
-  LinearMatrixType rotation() const;
-  template<typename RotationMatrixType, typename ScalingMatrixType>
-  void computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const;
-  template<typename ScalingMatrixType, typename RotationMatrixType>
-  void computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const;
-
-  template<typename PositionDerived, typename OrientationType, typename ScaleDerived>
-  Transform& fromPositionOrientationScale(const MatrixBase<PositionDerived> &position,
-    const OrientationType& orientation, const MatrixBase<ScaleDerived> &scale);
-
-  inline const MatrixType inverse(TransformTraits traits = Affine) const;
-
-  /** \returns a const pointer to the column major internal matrix */
-  const Scalar* data() const { return m_matrix.data(); }
-  /** \returns a non-const pointer to the column major internal matrix */
-  Scalar* data() { return m_matrix.data(); }
-
-  /** \returns \c *this with scalar type casted to \a NewScalarType
-    *
-    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
-    * then this function smartly returns a const reference to \c *this.
-    */
-  template<typename NewScalarType>
-  inline typename internal::cast_return_type<Transform,Transform<NewScalarType,Dim> >::type cast() const
-  { return typename internal::cast_return_type<Transform,Transform<NewScalarType,Dim> >::type(*this); }
-
-  /** Copy constructor with scalar type conversion */
-  template<typename OtherScalarType>
-  inline explicit Transform(const Transform<OtherScalarType,Dim>& other)
-  { m_matrix = other.matrix().template cast<Scalar>(); }
-
-  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
-    * determined by \a prec.
-    *
-    * \sa MatrixBase::isApprox() */
-  bool isApprox(const Transform& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
-  { return m_matrix.isApprox(other.m_matrix, prec); }
-
-  #ifdef EIGEN_TRANSFORM_PLUGIN
-  #include EIGEN_TRANSFORM_PLUGIN
-  #endif
-
-protected:
-
-};
-
-/** \ingroup Geometry_Module */
-typedef Transform<float,2> Transform2f;
-/** \ingroup Geometry_Module */
-typedef Transform<float,3> Transform3f;
-/** \ingroup Geometry_Module */
-typedef Transform<double,2> Transform2d;
-/** \ingroup Geometry_Module */
-typedef Transform<double,3> Transform3d;
-
-/**************************
-*** Optional QT support ***
-**************************/
-
-#ifdef EIGEN_QT_SUPPORT
-/** Initialises \c *this from a QMatrix assuming the dimension is 2.
-  *
-  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
-  */
-template<typename Scalar, int Dim>
-Transform<Scalar,Dim>::Transform(const QMatrix& other)
-{
-  *this = other;
-}
-
-/** Set \c *this from a QMatrix assuming the dimension is 2.
-  *
-  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
-  */
-template<typename Scalar, int Dim>
-Transform<Scalar,Dim>& Transform<Scalar,Dim>::operator=(const QMatrix& other)
-{
-  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
-  m_matrix << other.m11(), other.m21(), other.dx(),
-              other.m12(), other.m22(), other.dy(),
-              0, 0, 1;
-   return *this;
-}
-
-/** \returns a QMatrix from \c *this assuming the dimension is 2.
-  *
-  * \warning this convertion might loss data if \c *this is not affine
-  *
-  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
-  */
-template<typename Scalar, int Dim>
-QMatrix Transform<Scalar,Dim>::toQMatrix(void) const
-{
-  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
-  return QMatrix(m_matrix.coeff(0,0), m_matrix.coeff(1,0),
-                 m_matrix.coeff(0,1), m_matrix.coeff(1,1),
-                 m_matrix.coeff(0,2), m_matrix.coeff(1,2));
-}
-
-/** Initialises \c *this from a QTransform assuming the dimension is 2.
-  *
-  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
-  */
-template<typename Scalar, int Dim>
-Transform<Scalar,Dim>::Transform(const QTransform& other)
-{
-  *this = other;
-}
-
-/** Set \c *this from a QTransform assuming the dimension is 2.
-  *
-  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
-  */
-template<typename Scalar, int Dim>
-Transform<Scalar,Dim>& Transform<Scalar,Dim>::operator=(const QTransform& other)
-{
-  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
-  m_matrix << other.m11(), other.m21(), other.dx(),
-              other.m12(), other.m22(), other.dy(),
-              other.m13(), other.m23(), other.m33();
-   return *this;
-}
-
-/** \returns a QTransform from \c *this assuming the dimension is 2.
-  *
-  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
-  */
-template<typename Scalar, int Dim>
-QTransform Transform<Scalar,Dim>::toQTransform(void) const
-{
-  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
-  return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0), m_matrix.coeff(2,0),
-                    m_matrix.coeff(0,1), m_matrix.coeff(1,1), m_matrix.coeff(2,1),
-                    m_matrix.coeff(0,2), m_matrix.coeff(1,2), m_matrix.coeff(2,2));
-}
-#endif
-
-/*********************
-*** Procedural API ***
-*********************/
-
-/** Applies on the right the non uniform scale transformation represented
-  * by the vector \a other to \c *this and returns a reference to \c *this.
-  * \sa prescale()
-  */
-template<typename Scalar, int Dim>
-template<typename OtherDerived>
-Transform<Scalar,Dim>&
-Transform<Scalar,Dim>::scale(const MatrixBase<OtherDerived> &other)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
-  linear() = (linear() * other.asDiagonal()).lazy();
-  return *this;
-}
-
-/** Applies on the right a uniform scale of a factor \a c to \c *this
-  * and returns a reference to \c *this.
-  * \sa prescale(Scalar)
-  */
-template<typename Scalar, int Dim>
-inline Transform<Scalar,Dim>& Transform<Scalar,Dim>::scale(Scalar s)
-{
-  linear() *= s;
-  return *this;
-}
-
-/** Applies on the left the non uniform scale transformation represented
-  * by the vector \a other to \c *this and returns a reference to \c *this.
-  * \sa scale()
-  */
-template<typename Scalar, int Dim>
-template<typename OtherDerived>
-Transform<Scalar,Dim>&
-Transform<Scalar,Dim>::prescale(const MatrixBase<OtherDerived> &other)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
-  m_matrix.template block<Dim,HDim>(0,0) = (other.asDiagonal() * m_matrix.template block<Dim,HDim>(0,0)).lazy();
-  return *this;
-}
-
-/** Applies on the left a uniform scale of a factor \a c to \c *this
-  * and returns a reference to \c *this.
-  * \sa scale(Scalar)
-  */
-template<typename Scalar, int Dim>
-inline Transform<Scalar,Dim>& Transform<Scalar,Dim>::prescale(Scalar s)
-{
-  m_matrix.template corner<Dim,HDim>(TopLeft) *= s;
-  return *this;
-}
-
-/** Applies on the right the translation matrix represented by the vector \a other
-  * to \c *this and returns a reference to \c *this.
-  * \sa pretranslate()
-  */
-template<typename Scalar, int Dim>
-template<typename OtherDerived>
-Transform<Scalar,Dim>&
-Transform<Scalar,Dim>::translate(const MatrixBase<OtherDerived> &other)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
-  translation() += linear() * other;
-  return *this;
-}
-
-/** Applies on the left the translation matrix represented by the vector \a other
-  * to \c *this and returns a reference to \c *this.
-  * \sa translate()
-  */
-template<typename Scalar, int Dim>
-template<typename OtherDerived>
-Transform<Scalar,Dim>&
-Transform<Scalar,Dim>::pretranslate(const MatrixBase<OtherDerived> &other)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
-  translation() += other;
-  return *this;
-}
-
-/** Applies on the right the rotation represented by the rotation \a rotation
-  * to \c *this and returns a reference to \c *this.
-  *
-  * The template parameter \a RotationType is the type of the rotation which
-  * must be known by ei_toRotationMatrix<>.
-  *
-  * Natively supported types includes:
-  *   - any scalar (2D),
-  *   - a Dim x Dim matrix expression,
-  *   - a Quaternion (3D),
-  *   - a AngleAxis (3D)
-  *
-  * This mechanism is easily extendable to support user types such as Euler angles,
-  * or a pair of Quaternion for 4D rotations.
-  *
-  * \sa rotate(Scalar), class Quaternion, class AngleAxis, prerotate(RotationType)
-  */
-template<typename Scalar, int Dim>
-template<typename RotationType>
-Transform<Scalar,Dim>&
-Transform<Scalar,Dim>::rotate(const RotationType& rotation)
-{
-  linear() *= ei_toRotationMatrix<Scalar,Dim>(rotation);
-  return *this;
-}
-
-/** Applies on the left the rotation represented by the rotation \a rotation
-  * to \c *this and returns a reference to \c *this.
-  *
-  * See rotate() for further details.
-  *
-  * \sa rotate()
-  */
-template<typename Scalar, int Dim>
-template<typename RotationType>
-Transform<Scalar,Dim>&
-Transform<Scalar,Dim>::prerotate(const RotationType& rotation)
-{
-  m_matrix.template block<Dim,HDim>(0,0) = ei_toRotationMatrix<Scalar,Dim>(rotation)
-                                         * m_matrix.template block<Dim,HDim>(0,0);
-  return *this;
-}
-
-/** Applies on the right the shear transformation represented
-  * by the vector \a other to \c *this and returns a reference to \c *this.
-  * \warning 2D only.
-  * \sa preshear()
-  */
-template<typename Scalar, int Dim>
-Transform<Scalar,Dim>&
-Transform<Scalar,Dim>::shear(Scalar sx, Scalar sy)
-{
-  EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
-  VectorType tmp = linear().col(0)*sy + linear().col(1);
-  linear() << linear().col(0) + linear().col(1)*sx, tmp;
-  return *this;
-}
-
-/** Applies on the left the shear transformation represented
-  * by the vector \a other to \c *this and returns a reference to \c *this.
-  * \warning 2D only.
-  * \sa shear()
-  */
-template<typename Scalar, int Dim>
-Transform<Scalar,Dim>&
-Transform<Scalar,Dim>::preshear(Scalar sx, Scalar sy)
-{
-  EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
-  m_matrix.template block<Dim,HDim>(0,0) = LinearMatrixType(1, sx, sy, 1) * m_matrix.template block<Dim,HDim>(0,0);
-  return *this;
-}
-
-/******************************************************
-*** Scaling, Translation and Rotation compatibility ***
-******************************************************/
-
-template<typename Scalar, int Dim>
-inline Transform<Scalar,Dim>& Transform<Scalar,Dim>::operator=(const TranslationType& t)
-{
-  linear().setIdentity();
-  translation() = t.vector();
-  m_matrix.template block<1,Dim>(Dim,0).setZero();
-  m_matrix(Dim,Dim) = Scalar(1);
-  return *this;
-}
-
-template<typename Scalar, int Dim>
-inline Transform<Scalar,Dim> Transform<Scalar,Dim>::operator*(const TranslationType& t) const
-{
-  Transform res = *this;
-  res.translate(t.vector());
-  return res;
-}
-
-template<typename Scalar, int Dim>
-inline Transform<Scalar,Dim>& Transform<Scalar,Dim>::operator=(const ScalingType& s)
-{
-  m_matrix.setZero();
-  linear().diagonal() = s.coeffs();
-  m_matrix.coeffRef(Dim,Dim) = Scalar(1);
-  return *this;
-}
-
-template<typename Scalar, int Dim>
-inline Transform<Scalar,Dim> Transform<Scalar,Dim>::operator*(const ScalingType& s) const
-{
-  Transform res = *this;
-  res.scale(s.coeffs());
-  return res;
-}
-
-template<typename Scalar, int Dim>
-template<typename Derived>
-inline Transform<Scalar,Dim>& Transform<Scalar,Dim>::operator=(const RotationBase<Derived,Dim>& r)
-{
-  linear() = ei_toRotationMatrix<Scalar,Dim>(r);
-  translation().setZero();
-  m_matrix.template block<1,Dim>(Dim,0).setZero();
-  m_matrix.coeffRef(Dim,Dim) = Scalar(1);
-  return *this;
-}
-
-template<typename Scalar, int Dim>
-template<typename Derived>
-inline Transform<Scalar,Dim> Transform<Scalar,Dim>::operator*(const RotationBase<Derived,Dim>& r) const
-{
-  Transform res = *this;
-  res.rotate(r.derived());
-  return res;
-}
-
-/************************
-*** Special functions ***
-************************/
-
-/** \returns the rotation part of the transformation
-  * \nonstableyet
-  *
-  * \svd_module
-  *
-  * \sa computeRotationScaling(), computeScalingRotation(), class SVD
-  */
-template<typename Scalar, int Dim>
-typename Transform<Scalar,Dim>::LinearMatrixType
-Transform<Scalar,Dim>::rotation() const
-{
-  LinearMatrixType result;
-  computeRotationScaling(&result, (LinearMatrixType*)0);
-  return result;
-}
-
-
-/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being
-  * not necessarily positive.
-  *
-  * If either pointer is zero, the corresponding computation is skipped.
-  *
-  * \nonstableyet
-  *
-  * \svd_module
-  *
-  * \sa computeScalingRotation(), rotation(), class SVD
-  */
-template<typename Scalar, int Dim>
-template<typename RotationMatrixType, typename ScalingMatrixType>
-void Transform<Scalar,Dim>::computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const
-{
-  JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU|ComputeFullV);
-  Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1
-  Matrix<Scalar, Dim, 1> sv(svd.singularValues());
-  sv.coeffRef(0) *= x;
-  if(scaling)
-  {
-    scaling->noalias() = svd.matrixV() * sv.asDiagonal() * svd.matrixV().adjoint();
-  }
-  if(rotation)
-  {
-    LinearMatrixType m(svd.matrixU());
-    m.col(0) /= x;
-    rotation->noalias() = m * svd.matrixV().adjoint();
-  }
-}
-
-/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being
-  * not necessarily positive.
-  *
-  * If either pointer is zero, the corresponding computation is skipped.
-  *
-  * \nonstableyet
-  *
-  * \svd_module
-  *
-  * \sa computeRotationScaling(), rotation(), class SVD
-  */
-template<typename Scalar, int Dim>
-template<typename ScalingMatrixType, typename RotationMatrixType>
-void Transform<Scalar,Dim>::computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const
-{
-  JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU|ComputeFullV);
-  Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1
-  Matrix<Scalar, Dim, 1> sv(svd.singularValues());
-  sv.coeffRef(0) *= x;
-  if(scaling)
-  {
-    scaling->noalias() = svd.matrixU() * sv.asDiagonal() * svd.matrixU().adjoint();
-  }
-  if(rotation)
-  {
-    LinearMatrixType m(svd.matrixU());
-    m.col(0) /= x;
-    rotation->noalias() = m * svd.matrixV().adjoint();
-  }
-}
-
-/** Convenient method to set \c *this from a position, orientation and scale
-  * of a 3D object.
-  */
-template<typename Scalar, int Dim>
-template<typename PositionDerived, typename OrientationType, typename ScaleDerived>
-Transform<Scalar,Dim>&
-Transform<Scalar,Dim>::fromPositionOrientationScale(const MatrixBase<PositionDerived> &position,
-  const OrientationType& orientation, const MatrixBase<ScaleDerived> &scale)
-{
-  linear() = ei_toRotationMatrix<Scalar,Dim>(orientation);
-  linear() *= scale.asDiagonal();
-  translation() = position;
-  m_matrix.template block<1,Dim>(Dim,0).setZero();
-  m_matrix(Dim,Dim) = Scalar(1);
-  return *this;
-}
-
-/** \nonstableyet
-  *
-  * \returns the inverse transformation matrix according to some given knowledge
-  * on \c *this.
-  *
-  * \param traits allows to optimize the inversion process when the transformion
-  * is known to be not a general transformation. The possible values are:
-  *  - Projective if the transformation is not necessarily affine, i.e., if the
-  *    last row is not guaranteed to be [0 ... 0 1]
-  *  - Affine is the default, the last row is assumed to be [0 ... 0 1]
-  *  - Isometry if the transformation is only a concatenations of translations
-  *    and rotations.
-  *
-  * \warning unless \a traits is always set to NoShear or NoScaling, this function
-  * requires the generic inverse method of MatrixBase defined in the LU module. If
-  * you forget to include this module, then you will get hard to debug linking errors.
-  *
-  * \sa MatrixBase::inverse()
-  */
-template<typename Scalar, int Dim>
-inline const typename Transform<Scalar,Dim>::MatrixType
-Transform<Scalar,Dim>::inverse(TransformTraits traits) const
-{
-  if (traits == Projective)
-  {
-    return m_matrix.inverse();
-  }
-  else
-  {
-    MatrixType res;
-    if (traits == Affine)
-    {
-      res.template corner<Dim,Dim>(TopLeft) = linear().inverse();
-    }
-    else if (traits == Isometry)
-    {
-      res.template corner<Dim,Dim>(TopLeft) = linear().transpose();
-    }
-    else
-    {
-      ei_assert("invalid traits value in Transform::inverse()");
-    }
-    // translation and remaining parts
-    res.template corner<Dim,1>(TopRight) = - res.template corner<Dim,Dim>(TopLeft) * translation();
-    res.template corner<1,Dim>(BottomLeft).setZero();
-    res.coeffRef(Dim,Dim) = Scalar(1);
-    return res;
-  }
-}
-
-/*****************************************************
-*** Specializations of operator* with a MatrixBase ***
-*****************************************************/
-
-template<typename Other, int Dim, int HDim>
-struct ei_transform_product_impl<Other,Dim,HDim, HDim,HDim>
-{
-  typedef Transform<typename Other::Scalar,Dim> TransformType;
-  typedef typename TransformType::MatrixType MatrixType;
-  typedef typename ProductReturnType<MatrixType,Other>::Type ResultType;
-  static ResultType run(const TransformType& tr, const Other& other)
-  { return tr.matrix() * other; }
-};
-
-template<typename Other, int Dim, int HDim>
-struct ei_transform_product_impl<Other,Dim,HDim, Dim,Dim>
-{
-  typedef Transform<typename Other::Scalar,Dim> TransformType;
-  typedef typename TransformType::MatrixType MatrixType;
-  typedef TransformType ResultType;
-  static ResultType run(const TransformType& tr, const Other& other)
-  {
-    TransformType res;
-    res.translation() = tr.translation();
-    res.matrix().row(Dim) = tr.matrix().row(Dim);
-    res.linear() = (tr.linear() * other).lazy();
-    return res;
-  }
-};
-
-template<typename Other, int Dim, int HDim>
-struct ei_transform_product_impl<Other,Dim,HDim, HDim,1>
-{
-  typedef Transform<typename Other::Scalar,Dim> TransformType;
-  typedef typename TransformType::MatrixType MatrixType;
-  typedef typename ProductReturnType<MatrixType,Other>::Type ResultType;
-  static ResultType run(const TransformType& tr, const Other& other)
-  { return tr.matrix() * other; }
-};
-
-template<typename Other, int Dim, int HDim>
-struct ei_transform_product_impl<Other,Dim,HDim, Dim,1>
-{
-  typedef typename Other::Scalar Scalar;
-  typedef Transform<Scalar,Dim> TransformType;
-  typedef Matrix<Scalar,Dim,1> ResultType;
-  static ResultType run(const TransformType& tr, const Other& other)
-  { return ((tr.linear() * other) + tr.translation())
-          * (Scalar(1) / ( (tr.matrix().template block<1,Dim>(Dim,0) * other).coeff(0) + tr.matrix().coeff(Dim,Dim))); }
-};
-
-} // end namespace Eigen
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Translation.h b/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Translation.h
deleted file mode 100644
index 2b9859f6f..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/Geometry/Translation.h
+++ /dev/null
@@ -1,184 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
-
-namespace Eigen { 
-
-/** \geometry_module \ingroup Geometry_Module
-  *
-  * \class Translation
-  *
-  * \brief Represents a translation transformation
-  *
-  * \param _Scalar the scalar type, i.e., the type of the coefficients.
-  * \param _Dim the  dimension of the space, can be a compile time value or Dynamic
-  *
-  * \note This class is not aimed to be used to store a translation transformation,
-  * but rather to make easier the constructions and updates of Transform objects.
-  *
-  * \sa class Scaling, class Transform
-  */
-template<typename _Scalar, int _Dim>
-class Translation
-{
-public:
-  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim)
-  /** dimension of the space */
-  enum { Dim = _Dim };
-  /** the scalar type of the coefficients */
-  typedef _Scalar Scalar;
-  /** corresponding vector type */
-  typedef Matrix<Scalar,Dim,1> VectorType;
-  /** corresponding linear transformation matrix type */
-  typedef Matrix<Scalar,Dim,Dim> LinearMatrixType;
-  /** corresponding scaling transformation type */
-  typedef Scaling<Scalar,Dim> ScalingType;
-  /** corresponding affine transformation type */
-  typedef Transform<Scalar,Dim> TransformType;
-
-protected:
-
-  VectorType m_coeffs;
-
-public:
-
-  /** Default constructor without initialization. */
-  Translation() {}
-  /**  */
-  inline Translation(const Scalar& sx, const Scalar& sy)
-  {
-    ei_assert(Dim==2);
-    m_coeffs.x() = sx;
-    m_coeffs.y() = sy;
-  }
-  /**  */
-  inline Translation(const Scalar& sx, const Scalar& sy, const Scalar& sz)
-  {
-    ei_assert(Dim==3);
-    m_coeffs.x() = sx;
-    m_coeffs.y() = sy;
-    m_coeffs.z() = sz;
-  }
-  /** Constructs and initialize the scaling transformation from a vector of scaling coefficients */
-  explicit inline Translation(const VectorType& vector) : m_coeffs(vector) {}
-
-  const VectorType& vector() const { return m_coeffs; }
-  VectorType& vector() { return m_coeffs; }
-
-  /** Concatenates two translation */
-  inline Translation operator* (const Translation& other) const
-  { return Translation(m_coeffs + other.m_coeffs); }
-
-  /** Concatenates a translation and a scaling */
-  inline TransformType operator* (const ScalingType& other) const;
-
-  /** Concatenates a translation and a linear transformation */
-  inline TransformType operator* (const LinearMatrixType& linear) const;
-
-  template<typename Derived>
-  inline TransformType operator*(const RotationBase<Derived,Dim>& r) const
-  { return *this * r.toRotationMatrix(); }
-
-  /** Concatenates a linear transformation and a translation */
-  // its a nightmare to define a templated friend function outside its declaration
-  friend inline TransformType operator* (const LinearMatrixType& linear, const Translation& t)
-  {
-    TransformType res;
-    res.matrix().setZero();
-    res.linear() = linear;
-    res.translation() = linear * t.m_coeffs;
-    res.matrix().row(Dim).setZero();
-    res(Dim,Dim) = Scalar(1);
-    return res;
-  }
-
-  /** Concatenates a translation and an affine transformation */
-  inline TransformType operator* (const TransformType& t) const;
-
-  /** Applies translation to vector */
-  inline VectorType operator* (const VectorType& other) const
-  { return m_coeffs + other; }
-
-  /** \returns the inverse translation (opposite) */
-  Translation inverse() const { return Translation(-m_coeffs); }
-
-  Translation& operator=(const Translation& other)
-  {
-    m_coeffs = other.m_coeffs;
-    return *this;
-  }
-
-  /** \returns \c *this with scalar type casted to \a NewScalarType
-    *
-    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
-    * then this function smartly returns a const reference to \c *this.
-    */
-  template<typename NewScalarType>
-  inline typename internal::cast_return_type<Translation,Translation<NewScalarType,Dim> >::type cast() const
-  { return typename internal::cast_return_type<Translation,Translation<NewScalarType,Dim> >::type(*this); }
-
-  /** Copy constructor with scalar type conversion */
-  template<typename OtherScalarType>
-  inline explicit Translation(const Translation<OtherScalarType,Dim>& other)
-  { m_coeffs = other.vector().template cast<Scalar>(); }
-
-  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
-    * determined by \a prec.
-    *
-    * \sa MatrixBase::isApprox() */
-  bool isApprox(const Translation& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
-  { return m_coeffs.isApprox(other.m_coeffs, prec); }
-
-};
-
-/** \addtogroup Geometry_Module */
-//@{
-typedef Translation<float, 2> Translation2f;
-typedef Translation<double,2> Translation2d;
-typedef Translation<float, 3> Translation3f;
-typedef Translation<double,3> Translation3d;
-//@}
-
-
-template<typename Scalar, int Dim>
-inline typename Translation<Scalar,Dim>::TransformType
-Translation<Scalar,Dim>::operator* (const ScalingType& other) const
-{
-  TransformType res;
-  res.matrix().setZero();
-  res.linear().diagonal() = other.coeffs();
-  res.translation() = m_coeffs;
-  res(Dim,Dim) = Scalar(1);
-  return res;
-}
-
-template<typename Scalar, int Dim>
-inline typename Translation<Scalar,Dim>::TransformType
-Translation<Scalar,Dim>::operator* (const LinearMatrixType& linear) const
-{
-  TransformType res;
-  res.matrix().setZero();
-  res.linear() = linear;
-  res.translation() = m_coeffs;
-  res.matrix().row(Dim).setZero();
-  res(Dim,Dim) = Scalar(1);
-  return res;
-}
-
-template<typename Scalar, int Dim>
-inline typename Translation<Scalar,Dim>::TransformType
-Translation<Scalar,Dim>::operator* (const TransformType& t) const
-{
-  TransformType res = t;
-  res.pretranslate(m_coeffs);
-  return res;
-}
-
-} // end namespace Eigen
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/LeastSquares.h b/resources/3rdparty/eigen/Eigen/src/Eigen2Support/LeastSquares.h
deleted file mode 100644
index 0e6fdb488..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/LeastSquares.h
+++ /dev/null
@@ -1,170 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN2_LEASTSQUARES_H
-#define EIGEN2_LEASTSQUARES_H
-
-namespace Eigen { 
-
-/** \ingroup LeastSquares_Module
-  *
-  * \leastsquares_module
-  *
-  * For a set of points, this function tries to express
-  * one of the coords as a linear (affine) function of the other coords.
-  *
-  * This is best explained by an example. This function works in full
-  * generality, for points in a space of arbitrary dimension, and also over
-  * the complex numbers, but for this example we will work in dimension 3
-  * over the real numbers (doubles).
-  *
-  * So let us work with the following set of 5 points given by their
-  * \f$(x,y,z)\f$ coordinates:
-  * @code
-    Vector3d points[5];
-    points[0] = Vector3d( 3.02, 6.89, -4.32 );
-    points[1] = Vector3d( 2.01, 5.39, -3.79 );
-    points[2] = Vector3d( 2.41, 6.01, -4.01 );
-    points[3] = Vector3d( 2.09, 5.55, -3.86 );
-    points[4] = Vector3d( 2.58, 6.32, -4.10 );
-  * @endcode
-  * Suppose that we want to express the second coordinate (\f$y\f$) as a linear
-  * expression in \f$x\f$ and \f$z\f$, that is,
-  * \f[ y=ax+bz+c \f]
-  * for some constants \f$a,b,c\f$. Thus, we want to find the best possible
-  * constants \f$a,b,c\f$ so that the plane of equation \f$y=ax+bz+c\f$ fits
-  * best the five above points. To do that, call this function as follows:
-  * @code
-    Vector3d coeffs; // will store the coefficients a, b, c
-    linearRegression(
-      5,
-      &points,
-      &coeffs,
-      1 // the coord to express as a function of
-        // the other ones. 0 means x, 1 means y, 2 means z.
-    );
-  * @endcode
-  * Now the vector \a coeffs is approximately
-  * \f$( 0.495 ,  -1.927 ,  -2.906 )\f$.
-  * Thus, we get \f$a=0.495, b = -1.927, c = -2.906\f$. Let us check for
-  * instance how near points[0] is from the plane of equation \f$y=ax+bz+c\f$.
-  * Looking at the coords of points[0], we see that:
-  * \f[ax+bz+c = 0.495 * 3.02 + (-1.927) * (-4.32) + (-2.906) = 6.91.\f]
-  * On the other hand, we have \f$y=6.89\f$. We see that the values
-  * \f$6.91\f$ and \f$6.89\f$
-  * are near, so points[0] is very near the plane of equation \f$y=ax+bz+c\f$.
-  *
-  * Let's now describe precisely the parameters:
-  * @param numPoints the number of points
-  * @param points the array of pointers to the points on which to perform the linear regression
-  * @param result pointer to the vector in which to store the result.
-                  This vector must be of the same type and size as the
-                  data points. The meaning of its coords is as follows.
-                  For brevity, let \f$n=Size\f$,
-                  \f$r_i=result[i]\f$,
-                  and \f$f=funcOfOthers\f$. Denote by
-                  \f$x_0,\ldots,x_{n-1}\f$
-                  the n coordinates in the n-dimensional space.
-                  Then the resulting equation is:
-                  \f[ x_f = r_0 x_0 + \cdots + r_{f-1}x_{f-1}
-                   + r_{f+1}x_{f+1} + \cdots + r_{n-1}x_{n-1} + r_n. \f]
-  * @param funcOfOthers Determines which coord to express as a function of the
-                        others. Coords are numbered starting from 0, so that a
-                        value of 0 means \f$x\f$, 1 means \f$y\f$,
-                        2 means \f$z\f$, ...
-  *
-  * \sa fitHyperplane()
-  */
-template<typename VectorType>
-void linearRegression(int numPoints,
-                      VectorType **points,
-                      VectorType *result,
-                      int funcOfOthers )
-{
-  typedef typename VectorType::Scalar Scalar;
-  typedef Hyperplane<Scalar, VectorType::SizeAtCompileTime> HyperplaneType;
-  const int size = points[0]->size();
-  result->resize(size);
-  HyperplaneType h(size);
-  fitHyperplane(numPoints, points, &h);
-  for(int i = 0; i < funcOfOthers; i++)
-    result->coeffRef(i) = - h.coeffs()[i] / h.coeffs()[funcOfOthers];
-  for(int i = funcOfOthers; i < size; i++)
-    result->coeffRef(i) = - h.coeffs()[i+1] / h.coeffs()[funcOfOthers];
-}
-
-/** \ingroup LeastSquares_Module
-  *
-  * \leastsquares_module
-  *
-  * This function is quite similar to linearRegression(), so we refer to the
-  * documentation of this function and only list here the differences.
-  *
-  * The main difference from linearRegression() is that this function doesn't
-  * take a \a funcOfOthers argument. Instead, it finds a general equation
-  * of the form
-  * \f[ r_0 x_0 + \cdots + r_{n-1}x_{n-1} + r_n = 0, \f]
-  * where \f$n=Size\f$, \f$r_i=retCoefficients[i]\f$, and we denote by
-  * \f$x_0,\ldots,x_{n-1}\f$ the n coordinates in the n-dimensional space.
-  *
-  * Thus, the vector \a retCoefficients has size \f$n+1\f$, which is another
-  * difference from linearRegression().
-  *
-  * In practice, this function performs an hyper-plane fit in a total least square sense
-  * via the following steps:
-  *  1 - center the data to the mean
-  *  2 - compute the covariance matrix
-  *  3 - pick the eigenvector corresponding to the smallest eigenvalue of the covariance matrix
-  * The ratio of the smallest eigenvalue and the second one gives us a hint about the relevance
-  * of the solution. This value is optionally returned in \a soundness.
-  *
-  * \sa linearRegression()
-  */
-template<typename VectorType, typename HyperplaneType>
-void fitHyperplane(int numPoints,
-                   VectorType **points,
-                   HyperplaneType *result,
-                   typename NumTraits<typename VectorType::Scalar>::Real* soundness = 0)
-{
-  typedef typename VectorType::Scalar Scalar;
-  typedef Matrix<Scalar,VectorType::SizeAtCompileTime,VectorType::SizeAtCompileTime> CovMatrixType;
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorType)
-  ei_assert(numPoints >= 1);
-  int size = points[0]->size();
-  ei_assert(size+1 == result->coeffs().size());
-
-  // compute the mean of the data
-  VectorType mean = VectorType::Zero(size);
-  for(int i = 0; i < numPoints; ++i)
-    mean += *(points[i]);
-  mean /= numPoints;
-
-  // compute the covariance matrix
-  CovMatrixType covMat = CovMatrixType::Zero(size, size);
-  VectorType remean = VectorType::Zero(size);
-  for(int i = 0; i < numPoints; ++i)
-  {
-    VectorType diff = (*(points[i]) - mean).conjugate();
-    covMat += diff * diff.adjoint();
-  }
-
-  // now we just have to pick the eigen vector with smallest eigen value
-  SelfAdjointEigenSolver<CovMatrixType> eig(covMat);
-  result->normal() = eig.eigenvectors().col(0);
-  if (soundness)
-    *soundness = eig.eigenvalues().coeff(0)/eig.eigenvalues().coeff(1);
-
-  // let's compute the constant coefficient such that the
-  // plane pass trough the mean point:
-  result->offset() = - (result->normal().cwise()* mean).sum();
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN2_LEASTSQUARES_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/SVD.h b/resources/3rdparty/eigen/Eigen/src/Eigen2Support/SVD.h
deleted file mode 100644
index a08b695a4..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Eigen2Support/SVD.h
+++ /dev/null
@@ -1,638 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN2_SVD_H
-#define EIGEN2_SVD_H
-
-namespace Eigen {
-
-/** \ingroup SVD_Module
-  * \nonstableyet
-  *
-  * \class SVD
-  *
-  * \brief Standard SVD decomposition of a matrix and associated features
-  *
-  * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
-  *
-  * This class performs a standard SVD decomposition of a real matrix A of size \c M x \c N
-  * with \c M \>= \c N.
-  *
-  *
-  * \sa MatrixBase::SVD()
-  */
-template<typename MatrixType> class SVD
-{
-  private:
-    typedef typename MatrixType::Scalar Scalar;
-    typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
-
-    enum {
-      PacketSize = internal::packet_traits<Scalar>::size,
-      AlignmentMask = int(PacketSize)-1,
-      MinSize = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime)
-    };
-
-    typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> ColVector;
-    typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, 1> RowVector;
-
-    typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, MinSize> MatrixUType;
-    typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, MatrixType::ColsAtCompileTime> MatrixVType;
-    typedef Matrix<Scalar, MinSize, 1> SingularValuesType;
-
-  public:
-
-    SVD() {} // a user who relied on compiler-generated default compiler reported problems with MSVC in 2.0.7
-    
-    SVD(const MatrixType& matrix)
-      : m_matU(matrix.rows(), (std::min)(matrix.rows(), matrix.cols())),
-        m_matV(matrix.cols(),matrix.cols()),
-        m_sigma((std::min)(matrix.rows(),matrix.cols()))
-    {
-      compute(matrix);
-    }
-
-    template<typename OtherDerived, typename ResultType>
-    bool solve(const MatrixBase<OtherDerived> &b, ResultType* result) const;
-
-    const MatrixUType& matrixU() const { return m_matU; }
-    const SingularValuesType& singularValues() const { return m_sigma; }
-    const MatrixVType& matrixV() const { return m_matV; }
-
-    void compute(const MatrixType& matrix);
-    SVD& sort();
-
-    template<typename UnitaryType, typename PositiveType>
-    void computeUnitaryPositive(UnitaryType *unitary, PositiveType *positive) const;
-    template<typename PositiveType, typename UnitaryType>
-    void computePositiveUnitary(PositiveType *positive, UnitaryType *unitary) const;
-    template<typename RotationType, typename ScalingType>
-    void computeRotationScaling(RotationType *unitary, ScalingType *positive) const;
-    template<typename ScalingType, typename RotationType>
-    void computeScalingRotation(ScalingType *positive, RotationType *unitary) const;
-
-  protected:
-    /** \internal */
-    MatrixUType m_matU;
-    /** \internal */
-    MatrixVType m_matV;
-    /** \internal */
-    SingularValuesType m_sigma;
-};
-
-/** Computes / recomputes the SVD decomposition A = U S V^* of \a matrix
-  *
-  * \note this code has been adapted from JAMA (public domain)
-  */
-template<typename MatrixType>
-void SVD<MatrixType>::compute(const MatrixType& matrix)
-{
-  const int m = matrix.rows();
-  const int n = matrix.cols();
-  const int nu = (std::min)(m,n);
-  ei_assert(m>=n && "In Eigen 2.0, SVD only works for MxN matrices with M>=N. Sorry!");
-  ei_assert(m>1 && "In Eigen 2.0, SVD doesn't work on 1x1 matrices");
-
-  m_matU.resize(m, nu);
-  m_matU.setZero();
-  m_sigma.resize((std::min)(m,n));
-  m_matV.resize(n,n);
-
-  RowVector e(n);
-  ColVector work(m);
-  MatrixType matA(matrix);
-  const bool wantu = true;
-  const bool wantv = true;
-  int i=0, j=0, k=0;
-
-  // Reduce A to bidiagonal form, storing the diagonal elements
-  // in s and the super-diagonal elements in e.
-  int nct = (std::min)(m-1,n);
-  int nrt = (std::max)(0,(std::min)(n-2,m));
-  for (k = 0; k < (std::max)(nct,nrt); ++k)
-  {
-    if (k < nct)
-    {
-      // Compute the transformation for the k-th column and
-      // place the k-th diagonal in m_sigma[k].
-      m_sigma[k] = matA.col(k).end(m-k).norm();
-      if (m_sigma[k] != 0.0) // FIXME
-      {
-        if (matA(k,k) < 0.0)
-          m_sigma[k] = -m_sigma[k];
-        matA.col(k).end(m-k) /= m_sigma[k];
-        matA(k,k) += 1.0;
-      }
-      m_sigma[k] = -m_sigma[k];
-    }
-
-    for (j = k+1; j < n; ++j)
-    {
-      if ((k < nct) && (m_sigma[k] != 0.0))
-      {
-        // Apply the transformation.
-        Scalar t = matA.col(k).end(m-k).eigen2_dot(matA.col(j).end(m-k)); // FIXME dot product or cwise prod + .sum() ??
-        t = -t/matA(k,k);
-        matA.col(j).end(m-k) += t * matA.col(k).end(m-k);
-      }
-
-      // Place the k-th row of A into e for the
-      // subsequent calculation of the row transformation.
-      e[j] = matA(k,j);
-    }
-
-    // Place the transformation in U for subsequent back multiplication.
-    if (wantu & (k < nct))
-      m_matU.col(k).end(m-k) = matA.col(k).end(m-k);
-
-    if (k < nrt)
-    {
-      // Compute the k-th row transformation and place the
-      // k-th super-diagonal in e[k].
-      e[k] = e.end(n-k-1).norm();
-      if (e[k] != 0.0)
-      {
-          if (e[k+1] < 0.0)
-            e[k] = -e[k];
-          e.end(n-k-1) /= e[k];
-          e[k+1] += 1.0;
-      }
-      e[k] = -e[k];
-      if ((k+1 < m) & (e[k] != 0.0))
-      {
-        // Apply the transformation.
-        work.end(m-k-1) = matA.corner(BottomRight,m-k-1,n-k-1) * e.end(n-k-1);
-        for (j = k+1; j < n; ++j)
-          matA.col(j).end(m-k-1) += (-e[j]/e[k+1]) * work.end(m-k-1);
-      }
-
-      // Place the transformation in V for subsequent back multiplication.
-      if (wantv)
-        m_matV.col(k).end(n-k-1) = e.end(n-k-1);
-    }
-  }
-
-
-  // Set up the final bidiagonal matrix or order p.
-  int p = (std::min)(n,m+1);
-  if (nct < n)
-    m_sigma[nct] = matA(nct,nct);
-  if (m < p)
-    m_sigma[p-1] = 0.0;
-  if (nrt+1 < p)
-    e[nrt] = matA(nrt,p-1);
-  e[p-1] = 0.0;
-
-  // If required, generate U.
-  if (wantu)
-  {
-    for (j = nct; j < nu; ++j)
-    {
-      m_matU.col(j).setZero();
-      m_matU(j,j) = 1.0;
-    }
-    for (k = nct-1; k >= 0; k--)
-    {
-      if (m_sigma[k] != 0.0)
-      {
-        for (j = k+1; j < nu; ++j)
-        {
-          Scalar t = m_matU.col(k).end(m-k).eigen2_dot(m_matU.col(j).end(m-k)); // FIXME is it really a dot product we want ?
-          t = -t/m_matU(k,k);
-          m_matU.col(j).end(m-k) += t * m_matU.col(k).end(m-k);
-        }
-        m_matU.col(k).end(m-k) = - m_matU.col(k).end(m-k);
-        m_matU(k,k) = Scalar(1) + m_matU(k,k);
-        if (k-1>0)
-          m_matU.col(k).start(k-1).setZero();
-      }
-      else
-      {
-        m_matU.col(k).setZero();
-        m_matU(k,k) = 1.0;
-      }
-    }
-  }
-
-  // If required, generate V.
-  if (wantv)
-  {
-    for (k = n-1; k >= 0; k--)
-    {
-      if ((k < nrt) & (e[k] != 0.0))
-      {
-        for (j = k+1; j < nu; ++j)
-        {
-          Scalar t = m_matV.col(k).end(n-k-1).eigen2_dot(m_matV.col(j).end(n-k-1)); // FIXME is it really a dot product we want ?
-          t = -t/m_matV(k+1,k);
-          m_matV.col(j).end(n-k-1) += t * m_matV.col(k).end(n-k-1);
-        }
-      }
-      m_matV.col(k).setZero();
-      m_matV(k,k) = 1.0;
-    }
-  }
-
-  // Main iteration loop for the singular values.
-  int pp = p-1;
-  int iter = 0;
-  Scalar eps = ei_pow(Scalar(2),ei_is_same_type<Scalar,float>::ret ? Scalar(-23) : Scalar(-52));
-  while (p > 0)
-  {
-    int k=0;
-    int kase=0;
-
-    // Here is where a test for too many iterations would go.
-
-    // This section of the program inspects for
-    // negligible elements in the s and e arrays.  On
-    // completion the variables kase and k are set as follows.
-
-    // kase = 1     if s(p) and e[k-1] are negligible and k<p
-    // kase = 2     if s(k) is negligible and k<p
-    // kase = 3     if e[k-1] is negligible, k<p, and
-    //              s(k), ..., s(p) are not negligible (qr step).
-    // kase = 4     if e(p-1) is negligible (convergence).
-
-    for (k = p-2; k >= -1; --k)
-    {
-      if (k == -1)
-          break;
-      if (ei_abs(e[k]) <= eps*(ei_abs(m_sigma[k]) + ei_abs(m_sigma[k+1])))
-      {
-          e[k] = 0.0;
-          break;
-      }
-    }
-    if (k == p-2)
-    {
-      kase = 4;
-    }
-    else
-    {
-      int ks;
-      for (ks = p-1; ks >= k; --ks)
-      {
-        if (ks == k)
-          break;
-        Scalar t = (ks != p ? ei_abs(e[ks]) : Scalar(0)) + (ks != k+1 ? ei_abs(e[ks-1]) : Scalar(0));
-        if (ei_abs(m_sigma[ks]) <= eps*t)
-        {
-          m_sigma[ks] = 0.0;
-          break;
-        }
-      }
-      if (ks == k)
-      {
-        kase = 3;
-      }
-      else if (ks == p-1)
-      {
-        kase = 1;
-      }
-      else
-      {
-        kase = 2;
-        k = ks;
-      }
-    }
-    ++k;
-
-    // Perform the task indicated by kase.
-    switch (kase)
-    {
-
-      // Deflate negligible s(p).
-      case 1:
-      {
-        Scalar f(e[p-2]);
-        e[p-2] = 0.0;
-        for (j = p-2; j >= k; --j)
-        {
-          Scalar t(internal::hypot(m_sigma[j],f));
-          Scalar cs(m_sigma[j]/t);
-          Scalar sn(f/t);
-          m_sigma[j] = t;
-          if (j != k)
-          {
-            f = -sn*e[j-1];
-            e[j-1] = cs*e[j-1];
-          }
-          if (wantv)
-          {
-            for (i = 0; i < n; ++i)
-            {
-              t = cs*m_matV(i,j) + sn*m_matV(i,p-1);
-              m_matV(i,p-1) = -sn*m_matV(i,j) + cs*m_matV(i,p-1);
-              m_matV(i,j) = t;
-            }
-          }
-        }
-      }
-      break;
-
-      // Split at negligible s(k).
-      case 2:
-      {
-        Scalar f(e[k-1]);
-        e[k-1] = 0.0;
-        for (j = k; j < p; ++j)
-        {
-          Scalar t(internal::hypot(m_sigma[j],f));
-          Scalar cs( m_sigma[j]/t);
-          Scalar sn(f/t);
-          m_sigma[j] = t;
-          f = -sn*e[j];
-          e[j] = cs*e[j];
-          if (wantu)
-          {
-            for (i = 0; i < m; ++i)
-            {
-              t = cs*m_matU(i,j) + sn*m_matU(i,k-1);
-              m_matU(i,k-1) = -sn*m_matU(i,j) + cs*m_matU(i,k-1);
-              m_matU(i,j) = t;
-            }
-          }
-        }
-      }
-      break;
-
-      // Perform one qr step.
-      case 3:
-      {
-        // Calculate the shift.
-        Scalar scale = (std::max)((std::max)((std::max)((std::max)(
-                        ei_abs(m_sigma[p-1]),ei_abs(m_sigma[p-2])),ei_abs(e[p-2])),
-                        ei_abs(m_sigma[k])),ei_abs(e[k]));
-        Scalar sp = m_sigma[p-1]/scale;
-        Scalar spm1 = m_sigma[p-2]/scale;
-        Scalar epm1 = e[p-2]/scale;
-        Scalar sk = m_sigma[k]/scale;
-        Scalar ek = e[k]/scale;
-        Scalar b = ((spm1 + sp)*(spm1 - sp) + epm1*epm1)/Scalar(2);
-        Scalar c = (sp*epm1)*(sp*epm1);
-        Scalar shift(0);
-        if ((b != 0.0) || (c != 0.0))
-        {
-          shift = ei_sqrt(b*b + c);
-          if (b < 0.0)
-            shift = -shift;
-          shift = c/(b + shift);
-        }
-        Scalar f = (sk + sp)*(sk - sp) + shift;
-        Scalar g = sk*ek;
-
-        // Chase zeros.
-
-        for (j = k; j < p-1; ++j)
-        {
-          Scalar t = internal::hypot(f,g);
-          Scalar cs = f/t;
-          Scalar sn = g/t;
-          if (j != k)
-            e[j-1] = t;
-          f = cs*m_sigma[j] + sn*e[j];
-          e[j] = cs*e[j] - sn*m_sigma[j];
-          g = sn*m_sigma[j+1];
-          m_sigma[j+1] = cs*m_sigma[j+1];
-          if (wantv)
-          {
-            for (i = 0; i < n; ++i)
-            {
-              t = cs*m_matV(i,j) + sn*m_matV(i,j+1);
-              m_matV(i,j+1) = -sn*m_matV(i,j) + cs*m_matV(i,j+1);
-              m_matV(i,j) = t;
-            }
-          }
-          t = internal::hypot(f,g);
-          cs = f/t;
-          sn = g/t;
-          m_sigma[j] = t;
-          f = cs*e[j] + sn*m_sigma[j+1];
-          m_sigma[j+1] = -sn*e[j] + cs*m_sigma[j+1];
-          g = sn*e[j+1];
-          e[j+1] = cs*e[j+1];
-          if (wantu && (j < m-1))
-          {
-            for (i = 0; i < m; ++i)
-            {
-              t = cs*m_matU(i,j) + sn*m_matU(i,j+1);
-              m_matU(i,j+1) = -sn*m_matU(i,j) + cs*m_matU(i,j+1);
-              m_matU(i,j) = t;
-            }
-          }
-        }
-        e[p-2] = f;
-        iter = iter + 1;
-      }
-      break;
-
-      // Convergence.
-      case 4:
-      {
-        // Make the singular values positive.
-        if (m_sigma[k] <= 0.0)
-        {
-          m_sigma[k] = m_sigma[k] < Scalar(0) ? -m_sigma[k] : Scalar(0);
-          if (wantv)
-            m_matV.col(k).start(pp+1) = -m_matV.col(k).start(pp+1);
-        }
-
-        // Order the singular values.
-        while (k < pp)
-        {
-          if (m_sigma[k] >= m_sigma[k+1])
-            break;
-          Scalar t = m_sigma[k];
-          m_sigma[k] = m_sigma[k+1];
-          m_sigma[k+1] = t;
-          if (wantv && (k < n-1))
-            m_matV.col(k).swap(m_matV.col(k+1));
-          if (wantu && (k < m-1))
-            m_matU.col(k).swap(m_matU.col(k+1));
-          ++k;
-        }
-        iter = 0;
-        p--;
-      }
-      break;
-    } // end big switch
-  } // end iterations
-}
-
-template<typename MatrixType>
-SVD<MatrixType>& SVD<MatrixType>::sort()
-{
-  int mu = m_matU.rows();
-  int mv = m_matV.rows();
-  int n  = m_matU.cols();
-
-  for (int i=0; i<n; ++i)
-  {
-    int  k = i;
-    Scalar p = m_sigma.coeff(i);
-
-    for (int j=i+1; j<n; ++j)
-    {
-      if (m_sigma.coeff(j) > p)
-      {
-        k = j;
-        p = m_sigma.coeff(j);
-      }
-    }
-    if (k != i)
-    {
-      m_sigma.coeffRef(k) = m_sigma.coeff(i);  // i.e.
-      m_sigma.coeffRef(i) = p;                 // swaps the i-th and the k-th elements
-
-      int j = mu;
-      for(int s=0; j!=0; ++s, --j)
-        std::swap(m_matU.coeffRef(s,i), m_matU.coeffRef(s,k));
-
-      j = mv;
-      for (int s=0; j!=0; ++s, --j)
-        std::swap(m_matV.coeffRef(s,i), m_matV.coeffRef(s,k));
-    }
-  }
-  return *this;
-}
-
-/** \returns the solution of \f$ A x = b \f$ using the current SVD decomposition of A.
-  * The parts of the solution corresponding to zero singular values are ignored.
-  *
-  * \sa MatrixBase::svd(), LU::solve(), LLT::solve()
-  */
-template<typename MatrixType>
-template<typename OtherDerived, typename ResultType>
-bool SVD<MatrixType>::solve(const MatrixBase<OtherDerived> &b, ResultType* result) const
-{
-  const int rows = m_matU.rows();
-  ei_assert(b.rows() == rows);
-
-  Scalar maxVal = m_sigma.cwise().abs().maxCoeff();
-  for (int j=0; j<b.cols(); ++j)
-  {
-    Matrix<Scalar,MatrixUType::RowsAtCompileTime,1> aux = m_matU.transpose() * b.col(j);
-
-    for (int i = 0; i <m_matU.cols(); ++i)
-    {
-      Scalar si = m_sigma.coeff(i);
-      if (ei_isMuchSmallerThan(ei_abs(si),maxVal))
-        aux.coeffRef(i) = 0;
-      else
-        aux.coeffRef(i) /= si;
-    }
-
-    result->col(j) = m_matV * aux;
-  }
-  return true;
-}
-
-/** Computes the polar decomposition of the matrix, as a product unitary x positive.
-  *
-  * If either pointer is zero, the corresponding computation is skipped.
-  *
-  * Only for square matrices.
-  *
-  * \sa computePositiveUnitary(), computeRotationScaling()
-  */
-template<typename MatrixType>
-template<typename UnitaryType, typename PositiveType>
-void SVD<MatrixType>::computeUnitaryPositive(UnitaryType *unitary,
-                                             PositiveType *positive) const
-{
-  ei_assert(m_matU.cols() == m_matV.cols() && "Polar decomposition is only for square matrices");
-  if(unitary) *unitary = m_matU * m_matV.adjoint();
-  if(positive) *positive = m_matV * m_sigma.asDiagonal() * m_matV.adjoint();
-}
-
-/** Computes the polar decomposition of the matrix, as a product positive x unitary.
-  *
-  * If either pointer is zero, the corresponding computation is skipped.
-  *
-  * Only for square matrices.
-  *
-  * \sa computeUnitaryPositive(), computeRotationScaling()
-  */
-template<typename MatrixType>
-template<typename UnitaryType, typename PositiveType>
-void SVD<MatrixType>::computePositiveUnitary(UnitaryType *positive,
-                                             PositiveType *unitary) const
-{
-  ei_assert(m_matU.rows() == m_matV.rows() && "Polar decomposition is only for square matrices");
-  if(unitary) *unitary = m_matU * m_matV.adjoint();
-  if(positive) *positive = m_matU * m_sigma.asDiagonal() * m_matU.adjoint();
-}
-
-/** decomposes the matrix as a product rotation x scaling, the scaling being
-  * not necessarily positive.
-  *
-  * If either pointer is zero, the corresponding computation is skipped.
-  *
-  * This method requires the Geometry module.
-  *
-  * \sa computeScalingRotation(), computeUnitaryPositive()
-  */
-template<typename MatrixType>
-template<typename RotationType, typename ScalingType>
-void SVD<MatrixType>::computeRotationScaling(RotationType *rotation, ScalingType *scaling) const
-{
-  ei_assert(m_matU.rows() == m_matV.rows() && "Polar decomposition is only for square matrices");
-  Scalar x = (m_matU * m_matV.adjoint()).determinant(); // so x has absolute value 1
-  Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> sv(m_sigma);
-  sv.coeffRef(0) *= x;
-  if(scaling) scaling->lazyAssign(m_matV * sv.asDiagonal() * m_matV.adjoint());
-  if(rotation)
-  {
-    MatrixType m(m_matU);
-    m.col(0) /= x;
-    rotation->lazyAssign(m * m_matV.adjoint());
-  }
-}
-
-/** decomposes the matrix as a product scaling x rotation, the scaling being
-  * not necessarily positive.
-  *
-  * If either pointer is zero, the corresponding computation is skipped.
-  *
-  * This method requires the Geometry module.
-  *
-  * \sa computeRotationScaling(), computeUnitaryPositive()
-  */
-template<typename MatrixType>
-template<typename ScalingType, typename RotationType>
-void SVD<MatrixType>::computeScalingRotation(ScalingType *scaling, RotationType *rotation) const
-{
-  ei_assert(m_matU.rows() == m_matV.rows() && "Polar decomposition is only for square matrices");
-  Scalar x = (m_matU * m_matV.adjoint()).determinant(); // so x has absolute value 1
-  Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> sv(m_sigma);
-  sv.coeffRef(0) *= x;
-  if(scaling) scaling->lazyAssign(m_matU * sv.asDiagonal() * m_matU.adjoint());
-  if(rotation)
-  {
-    MatrixType m(m_matU);
-    m.col(0) /= x;
-    rotation->lazyAssign(m * m_matV.adjoint());
-  }
-}
-
-
-/** \svd_module
-  * \returns the SVD decomposition of \c *this
-  */
-template<typename Derived>
-inline SVD<typename MatrixBase<Derived>::PlainObject>
-MatrixBase<Derived>::svd() const
-{
-  return SVD<PlainObject>(derived());
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN2_SVD_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h b/resources/3rdparty/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h
deleted file mode 100644
index 95c70aecb..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h
+++ /dev/null
@@ -1,333 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009 Claire Maurice
-// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_COMPLEX_EIGEN_SOLVER_H
-#define EIGEN_COMPLEX_EIGEN_SOLVER_H
-
-#include "./ComplexSchur.h"
-
-namespace Eigen { 
-
-/** \eigenvalues_module \ingroup Eigenvalues_Module
-  *
-  *
-  * \class ComplexEigenSolver
-  *
-  * \brief Computes eigenvalues and eigenvectors of general complex matrices
-  *
-  * \tparam _MatrixType the type of the matrix of which we are
-  * computing the eigendecomposition; this is expected to be an
-  * instantiation of the Matrix class template.
-  *
-  * The eigenvalues and eigenvectors of a matrix \f$ A \f$ are scalars
-  * \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda v
-  * \f$.  If \f$ D \f$ is a diagonal matrix with the eigenvalues on
-  * the diagonal, and \f$ V \f$ is a matrix with the eigenvectors as
-  * its columns, then \f$ A V = V D \f$. The matrix \f$ V \f$ is
-  * almost always invertible, in which case we have \f$ A = V D V^{-1}
-  * \f$. This is called the eigendecomposition.
-  *
-  * The main function in this class is compute(), which computes the
-  * eigenvalues and eigenvectors of a given function. The
-  * documentation for that function contains an example showing the
-  * main features of the class.
-  *
-  * \sa class EigenSolver, class SelfAdjointEigenSolver
-  */
-template<typename _MatrixType> class ComplexEigenSolver
-{
-  public:
-
-    /** \brief Synonym for the template parameter \p _MatrixType. */
-    typedef _MatrixType MatrixType;
-
-    enum {
-      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
-      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
-      Options = MatrixType::Options,
-      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
-      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
-    };
-
-    /** \brief Scalar type for matrices of type #MatrixType. */
-    typedef typename MatrixType::Scalar Scalar;
-    typedef typename NumTraits<Scalar>::Real RealScalar;
-    typedef typename MatrixType::Index Index;
-
-    /** \brief Complex scalar type for #MatrixType.
-      *
-      * This is \c std::complex<Scalar> if #Scalar is real (e.g.,
-      * \c float or \c double) and just \c Scalar if #Scalar is
-      * complex.
-      */
-    typedef std::complex<RealScalar> ComplexScalar;
-
-    /** \brief Type for vector of eigenvalues as returned by eigenvalues().
-      *
-      * This is a column vector with entries of type #ComplexScalar.
-      * The length of the vector is the size of #MatrixType.
-      */
-    typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options&(~RowMajor), MaxColsAtCompileTime, 1> EigenvalueType;
-
-    /** \brief Type for matrix of eigenvectors as returned by eigenvectors().
-      *
-      * This is a square matrix with entries of type #ComplexScalar.
-      * The size is the same as the size of #MatrixType.
-      */
-    typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorType;
-
-    /** \brief Default constructor.
-      *
-      * The default constructor is useful in cases in which the user intends to
-      * perform decompositions via compute().
-      */
-    ComplexEigenSolver()
-            : m_eivec(),
-              m_eivalues(),
-              m_schur(),
-              m_isInitialized(false),
-              m_eigenvectorsOk(false),
-              m_matX()
-    {}
-
-    /** \brief Default Constructor with memory preallocation
-      *
-      * Like the default constructor but with preallocation of the internal data
-      * according to the specified problem \a size.
-      * \sa ComplexEigenSolver()
-      */
-    ComplexEigenSolver(Index size)
-            : m_eivec(size, size),
-              m_eivalues(size),
-              m_schur(size),
-              m_isInitialized(false),
-              m_eigenvectorsOk(false),
-              m_matX(size, size)
-    {}
-
-    /** \brief Constructor; computes eigendecomposition of given matrix.
-      *
-      * \param[in]  matrix  Square matrix whose eigendecomposition is to be computed.
-      * \param[in]  computeEigenvectors  If true, both the eigenvectors and the
-      *    eigenvalues are computed; if false, only the eigenvalues are
-      *    computed.
-      *
-      * This constructor calls compute() to compute the eigendecomposition.
-      */
-      ComplexEigenSolver(const MatrixType& matrix, bool computeEigenvectors = true)
-            : m_eivec(matrix.rows(),matrix.cols()),
-              m_eivalues(matrix.cols()),
-              m_schur(matrix.rows()),
-              m_isInitialized(false),
-              m_eigenvectorsOk(false),
-              m_matX(matrix.rows(),matrix.cols())
-    {
-      compute(matrix, computeEigenvectors);
-    }
-
-    /** \brief Returns the eigenvectors of given matrix.
-      *
-      * \returns  A const reference to the matrix whose columns are the eigenvectors.
-      *
-      * \pre Either the constructor
-      * ComplexEigenSolver(const MatrixType& matrix, bool) or the member
-      * function compute(const MatrixType& matrix, bool) has been called before
-      * to compute the eigendecomposition of a matrix, and
-      * \p computeEigenvectors was set to true (the default).
-      *
-      * This function returns a matrix whose columns are the eigenvectors. Column
-      * \f$ k \f$ is an eigenvector corresponding to eigenvalue number \f$ k
-      * \f$ as returned by eigenvalues().  The eigenvectors are normalized to
-      * have (Euclidean) norm equal to one. The matrix returned by this
-      * function is the matrix \f$ V \f$ in the eigendecomposition \f$ A = V D
-      * V^{-1} \f$, if it exists.
-      *
-      * Example: \include ComplexEigenSolver_eigenvectors.cpp
-      * Output: \verbinclude ComplexEigenSolver_eigenvectors.out
-      */
-    const EigenvectorType& eigenvectors() const
-    {
-      eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized.");
-      eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
-      return m_eivec;
-    }
-
-    /** \brief Returns the eigenvalues of given matrix.
-      *
-      * \returns A const reference to the column vector containing the eigenvalues.
-      *
-      * \pre Either the constructor
-      * ComplexEigenSolver(const MatrixType& matrix, bool) or the member
-      * function compute(const MatrixType& matrix, bool) has been called before
-      * to compute the eigendecomposition of a matrix.
-      *
-      * This function returns a column vector containing the
-      * eigenvalues. Eigenvalues are repeated according to their
-      * algebraic multiplicity, so there are as many eigenvalues as
-      * rows in the matrix. The eigenvalues are not sorted in any particular
-      * order.
-      *
-      * Example: \include ComplexEigenSolver_eigenvalues.cpp
-      * Output: \verbinclude ComplexEigenSolver_eigenvalues.out
-      */
-    const EigenvalueType& eigenvalues() const
-    {
-      eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized.");
-      return m_eivalues;
-    }
-
-    /** \brief Computes eigendecomposition of given matrix.
-      *
-      * \param[in]  matrix  Square matrix whose eigendecomposition is to be computed.
-      * \param[in]  computeEigenvectors  If true, both the eigenvectors and the
-      *    eigenvalues are computed; if false, only the eigenvalues are
-      *    computed.
-      * \returns    Reference to \c *this
-      *
-      * This function computes the eigenvalues of the complex matrix \p matrix.
-      * The eigenvalues() function can be used to retrieve them.  If
-      * \p computeEigenvectors is true, then the eigenvectors are also computed
-      * and can be retrieved by calling eigenvectors().
-      *
-      * The matrix is first reduced to Schur form using the
-      * ComplexSchur class. The Schur decomposition is then used to
-      * compute the eigenvalues and eigenvectors.
-      *
-      * The cost of the computation is dominated by the cost of the
-      * Schur decomposition, which is \f$ O(n^3) \f$ where \f$ n \f$
-      * is the size of the matrix.
-      *
-      * Example: \include ComplexEigenSolver_compute.cpp
-      * Output: \verbinclude ComplexEigenSolver_compute.out
-      */
-    ComplexEigenSolver& compute(const MatrixType& matrix, bool computeEigenvectors = true);
-
-    /** \brief Reports whether previous computation was successful.
-      *
-      * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
-      */
-    ComputationInfo info() const
-    {
-      eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized.");
-      return m_schur.info();
-    }
-
-    /** \brief Sets the maximum number of iterations allowed. */
-    ComplexEigenSolver& setMaxIterations(Index maxIters)
-    {
-      m_schur.setMaxIterations(maxIters);
-      return *this;
-    }
-
-    /** \brief Returns the maximum number of iterations. */
-    Index getMaxIterations()
-    {
-      return m_schur.getMaxIterations();
-    }
-
-  protected:
-    EigenvectorType m_eivec;
-    EigenvalueType m_eivalues;
-    ComplexSchur<MatrixType> m_schur;
-    bool m_isInitialized;
-    bool m_eigenvectorsOk;
-    EigenvectorType m_matX;
-
-  private:
-    void doComputeEigenvectors(RealScalar matrixnorm);
-    void sortEigenvalues(bool computeEigenvectors);
-};
-
-
-template<typename MatrixType>
-ComplexEigenSolver<MatrixType>& 
-ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
-{
-  // this code is inspired from Jampack
-  assert(matrix.cols() == matrix.rows());
-
-  // Do a complex Schur decomposition, A = U T U^*
-  // The eigenvalues are on the diagonal of T.
-  m_schur.compute(matrix, computeEigenvectors);
-
-  if(m_schur.info() == Success)
-  {
-    m_eivalues = m_schur.matrixT().diagonal();
-    if(computeEigenvectors)
-      doComputeEigenvectors(matrix.norm());
-    sortEigenvalues(computeEigenvectors);
-  }
-
-  m_isInitialized = true;
-  m_eigenvectorsOk = computeEigenvectors;
-  return *this;
-}
-
-
-template<typename MatrixType>
-void ComplexEigenSolver<MatrixType>::doComputeEigenvectors(RealScalar matrixnorm)
-{
-  const Index n = m_eivalues.size();
-
-  // Compute X such that T = X D X^(-1), where D is the diagonal of T.
-  // The matrix X is unit triangular.
-  m_matX = EigenvectorType::Zero(n, n);
-  for(Index k=n-1 ; k>=0 ; k--)
-  {
-    m_matX.coeffRef(k,k) = ComplexScalar(1.0,0.0);
-    // Compute X(i,k) using the (i,k) entry of the equation X T = D X
-    for(Index i=k-1 ; i>=0 ; i--)
-    {
-      m_matX.coeffRef(i,k) = -m_schur.matrixT().coeff(i,k);
-      if(k-i-1>0)
-        m_matX.coeffRef(i,k) -= (m_schur.matrixT().row(i).segment(i+1,k-i-1) * m_matX.col(k).segment(i+1,k-i-1)).value();
-      ComplexScalar z = m_schur.matrixT().coeff(i,i) - m_schur.matrixT().coeff(k,k);
-      if(z==ComplexScalar(0))
-      {
-        // If the i-th and k-th eigenvalue are equal, then z equals 0.
-        // Use a small value instead, to prevent division by zero.
-        internal::real_ref(z) = NumTraits<RealScalar>::epsilon() * matrixnorm;
-      }
-      m_matX.coeffRef(i,k) = m_matX.coeff(i,k) / z;
-    }
-  }
-
-  // Compute V as V = U X; now A = U T U^* = U X D X^(-1) U^* = V D V^(-1)
-  m_eivec.noalias() = m_schur.matrixU() * m_matX;
-  // .. and normalize the eigenvectors
-  for(Index k=0 ; k<n ; k++)
-  {
-    m_eivec.col(k).normalize();
-  }
-}
-
-
-template<typename MatrixType>
-void ComplexEigenSolver<MatrixType>::sortEigenvalues(bool computeEigenvectors)
-{
-  const Index n =  m_eivalues.size();
-  for (Index i=0; i<n; i++)
-  {
-    Index k;
-    m_eivalues.cwiseAbs().tail(n-i).minCoeff(&k);
-    if (k != 0)
-    {
-      k += i;
-      std::swap(m_eivalues[k],m_eivalues[i]);
-      if(computeEigenvectors)
-	m_eivec.col(i).swap(m_eivec.col(k));
-    }
-  }
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_COMPLEX_EIGEN_SOLVER_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigenvalues/ComplexSchur.h b/resources/3rdparty/eigen/Eigen/src/Eigenvalues/ComplexSchur.h
deleted file mode 100644
index 62cbbb14f..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Eigenvalues/ComplexSchur.h
+++ /dev/null
@@ -1,426 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009 Claire Maurice
-// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_COMPLEX_SCHUR_H
-#define EIGEN_COMPLEX_SCHUR_H
-
-#include "./HessenbergDecomposition.h"
-
-namespace Eigen { 
-
-namespace internal {
-template<typename MatrixType, bool IsComplex> struct complex_schur_reduce_to_hessenberg;
-}
-
-/** \eigenvalues_module \ingroup Eigenvalues_Module
-  *
-  *
-  * \class ComplexSchur
-  *
-  * \brief Performs a complex Schur decomposition of a real or complex square matrix
-  *
-  * \tparam _MatrixType the type of the matrix of which we are
-  * computing the Schur decomposition; this is expected to be an
-  * instantiation of the Matrix class template.
-  *
-  * Given a real or complex square matrix A, this class computes the
-  * Schur decomposition: \f$ A = U T U^*\f$ where U is a unitary
-  * complex matrix, and T is a complex upper triangular matrix.  The
-  * diagonal of the matrix T corresponds to the eigenvalues of the
-  * matrix A.
-  *
-  * Call the function compute() to compute the Schur decomposition of
-  * a given matrix. Alternatively, you can use the 
-  * ComplexSchur(const MatrixType&, bool) constructor which computes
-  * the Schur decomposition at construction time. Once the
-  * decomposition is computed, you can use the matrixU() and matrixT()
-  * functions to retrieve the matrices U and V in the decomposition.
-  *
-  * \note This code is inspired from Jampack
-  *
-  * \sa class RealSchur, class EigenSolver, class ComplexEigenSolver
-  */
-template<typename _MatrixType> class ComplexSchur
-{
-  public:
-    typedef _MatrixType MatrixType;
-    enum {
-      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
-      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
-      Options = MatrixType::Options,
-      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
-      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
-    };
-
-    /** \brief Scalar type for matrices of type \p _MatrixType. */
-    typedef typename MatrixType::Scalar Scalar;
-    typedef typename NumTraits<Scalar>::Real RealScalar;
-    typedef typename MatrixType::Index Index;
-
-    /** \brief Complex scalar type for \p _MatrixType. 
-      *
-      * This is \c std::complex<Scalar> if #Scalar is real (e.g.,
-      * \c float or \c double) and just \c Scalar if #Scalar is
-      * complex.
-      */
-    typedef std::complex<RealScalar> ComplexScalar;
-
-    /** \brief Type for the matrices in the Schur decomposition.
-      *
-      * This is a square matrix with entries of type #ComplexScalar. 
-      * The size is the same as the size of \p _MatrixType.
-      */
-    typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> ComplexMatrixType;
-
-    /** \brief Default constructor.
-      *
-      * \param [in] size  Positive integer, size of the matrix whose Schur decomposition will be computed.
-      *
-      * The default constructor is useful in cases in which the user
-      * intends to perform decompositions via compute().  The \p size
-      * parameter is only used as a hint. It is not an error to give a
-      * wrong \p size, but it may impair performance.
-      *
-      * \sa compute() for an example.
-      */
-    ComplexSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime)
-      : m_matT(size,size),
-        m_matU(size,size),
-        m_hess(size),
-        m_isInitialized(false),
-        m_matUisUptodate(false),
-        m_maxIters(-1)
-    {}
-
-    /** \brief Constructor; computes Schur decomposition of given matrix. 
-      * 
-      * \param[in]  matrix    Square matrix whose Schur decomposition is to be computed.
-      * \param[in]  computeU  If true, both T and U are computed; if false, only T is computed.
-      *
-      * This constructor calls compute() to compute the Schur decomposition.
-      *
-      * \sa matrixT() and matrixU() for examples.
-      */
-    ComplexSchur(const MatrixType& matrix, bool computeU = true)
-      : m_matT(matrix.rows(),matrix.cols()),
-        m_matU(matrix.rows(),matrix.cols()),
-        m_hess(matrix.rows()),
-        m_isInitialized(false),
-        m_matUisUptodate(false),
-        m_maxIters(-1)
-    {
-      compute(matrix, computeU);
-    }
-
-    /** \brief Returns the unitary matrix in the Schur decomposition. 
-      *
-      * \returns A const reference to the matrix U.
-      *
-      * It is assumed that either the constructor
-      * ComplexSchur(const MatrixType& matrix, bool computeU) or the
-      * member function compute(const MatrixType& matrix, bool computeU)
-      * has been called before to compute the Schur decomposition of a
-      * matrix, and that \p computeU was set to true (the default
-      * value).
-      *
-      * Example: \include ComplexSchur_matrixU.cpp
-      * Output: \verbinclude ComplexSchur_matrixU.out
-      */
-    const ComplexMatrixType& matrixU() const
-    {
-      eigen_assert(m_isInitialized && "ComplexSchur is not initialized.");
-      eigen_assert(m_matUisUptodate && "The matrix U has not been computed during the ComplexSchur decomposition.");
-      return m_matU;
-    }
-
-    /** \brief Returns the triangular matrix in the Schur decomposition. 
-      *
-      * \returns A const reference to the matrix T.
-      *
-      * It is assumed that either the constructor
-      * ComplexSchur(const MatrixType& matrix, bool computeU) or the
-      * member function compute(const MatrixType& matrix, bool computeU)
-      * has been called before to compute the Schur decomposition of a
-      * matrix.
-      *
-      * Note that this function returns a plain square matrix. If you want to reference
-      * only the upper triangular part, use:
-      * \code schur.matrixT().triangularView<Upper>() \endcode 
-      *
-      * Example: \include ComplexSchur_matrixT.cpp
-      * Output: \verbinclude ComplexSchur_matrixT.out
-      */
-    const ComplexMatrixType& matrixT() const
-    {
-      eigen_assert(m_isInitialized && "ComplexSchur is not initialized.");
-      return m_matT;
-    }
-
-    /** \brief Computes Schur decomposition of given matrix. 
-      * 
-      * \param[in]  matrix  Square matrix whose Schur decomposition is to be computed.
-      * \param[in]  computeU  If true, both T and U are computed; if false, only T is computed.
-
-      * \returns    Reference to \c *this
-      *
-      * The Schur decomposition is computed by first reducing the
-      * matrix to Hessenberg form using the class
-      * HessenbergDecomposition. The Hessenberg matrix is then reduced
-      * to triangular form by performing QR iterations with a single
-      * shift. The cost of computing the Schur decomposition depends
-      * on the number of iterations; as a rough guide, it may be taken
-      * on the number of iterations; as a rough guide, it may be taken
-      * to be \f$25n^3\f$ complex flops, or \f$10n^3\f$ complex flops
-      * if \a computeU is false.
-      *
-      * Example: \include ComplexSchur_compute.cpp
-      * Output: \verbinclude ComplexSchur_compute.out
-      *
-      * \sa compute(const MatrixType&, bool, Index)
-      */
-    ComplexSchur& compute(const MatrixType& matrix, bool computeU = true);
-
-    /** \brief Reports whether previous computation was successful.
-      *
-      * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
-      */
-    ComputationInfo info() const
-    {
-      eigen_assert(m_isInitialized && "ComplexSchur is not initialized.");
-      return m_info;
-    }
-
-    /** \brief Sets the maximum number of iterations allowed. 
-      *
-      * If not specified by the user, the maximum number of iterations is m_maxIterationsPerRow times the size
-      * of the matrix.
-      */
-    ComplexSchur& setMaxIterations(Index maxIters)
-    {
-      m_maxIters = maxIters;
-      return *this;
-    }
-
-    /** \brief Returns the maximum number of iterations. */
-    Index getMaxIterations()
-    {
-      return m_maxIters;
-    }
-
-    /** \brief Maximum number of iterations per row.
-      *
-      * If not otherwise specified, the maximum number of iterations is this number times the size of the
-      * matrix. It is currently set to 30.
-      */
-    static const int m_maxIterationsPerRow = 30;
-
-  protected:
-    ComplexMatrixType m_matT, m_matU;
-    HessenbergDecomposition<MatrixType> m_hess;
-    ComputationInfo m_info;
-    bool m_isInitialized;
-    bool m_matUisUptodate;
-    Index m_maxIters;
-
-  private:  
-    bool subdiagonalEntryIsNeglegible(Index i);
-    ComplexScalar computeShift(Index iu, Index iter);
-    void reduceToTriangularForm(bool computeU);
-    friend struct internal::complex_schur_reduce_to_hessenberg<MatrixType, NumTraits<Scalar>::IsComplex>;
-};
-
-/** If m_matT(i+1,i) is neglegible in floating point arithmetic
-  * compared to m_matT(i,i) and m_matT(j,j), then set it to zero and
-  * return true, else return false. */
-template<typename MatrixType>
-inline bool ComplexSchur<MatrixType>::subdiagonalEntryIsNeglegible(Index i)
-{
-  RealScalar d = internal::norm1(m_matT.coeff(i,i)) + internal::norm1(m_matT.coeff(i+1,i+1));
-  RealScalar sd = internal::norm1(m_matT.coeff(i+1,i));
-  if (internal::isMuchSmallerThan(sd, d, NumTraits<RealScalar>::epsilon()))
-  {
-    m_matT.coeffRef(i+1,i) = ComplexScalar(0);
-    return true;
-  }
-  return false;
-}
-
-
-/** Compute the shift in the current QR iteration. */
-template<typename MatrixType>
-typename ComplexSchur<MatrixType>::ComplexScalar ComplexSchur<MatrixType>::computeShift(Index iu, Index iter)
-{
-  if (iter == 10 || iter == 20) 
-  {
-    // exceptional shift, taken from http://www.netlib.org/eispack/comqr.f
-    return internal::abs(internal::real(m_matT.coeff(iu,iu-1))) + internal::abs(internal::real(m_matT.coeff(iu-1,iu-2)));
-  }
-
-  // compute the shift as one of the eigenvalues of t, the 2x2
-  // diagonal block on the bottom of the active submatrix
-  Matrix<ComplexScalar,2,2> t = m_matT.template block<2,2>(iu-1,iu-1);
-  RealScalar normt = t.cwiseAbs().sum();
-  t /= normt;     // the normalization by sf is to avoid under/overflow
-
-  ComplexScalar b = t.coeff(0,1) * t.coeff(1,0);
-  ComplexScalar c = t.coeff(0,0) - t.coeff(1,1);
-  ComplexScalar disc = sqrt(c*c + RealScalar(4)*b);
-  ComplexScalar det = t.coeff(0,0) * t.coeff(1,1) - b;
-  ComplexScalar trace = t.coeff(0,0) + t.coeff(1,1);
-  ComplexScalar eival1 = (trace + disc) / RealScalar(2);
-  ComplexScalar eival2 = (trace - disc) / RealScalar(2);
-
-  if(internal::norm1(eival1) > internal::norm1(eival2))
-    eival2 = det / eival1;
-  else
-    eival1 = det / eival2;
-
-  // choose the eigenvalue closest to the bottom entry of the diagonal
-  if(internal::norm1(eival1-t.coeff(1,1)) < internal::norm1(eival2-t.coeff(1,1)))
-    return normt * eival1;
-  else
-    return normt * eival2;
-}
-
-
-template<typename MatrixType>
-ComplexSchur<MatrixType>& ComplexSchur<MatrixType>::compute(const MatrixType& matrix, bool computeU)
-{
-  m_matUisUptodate = false;
-  eigen_assert(matrix.cols() == matrix.rows());
-
-  if(matrix.cols() == 1)
-  {
-    m_matT = matrix.template cast<ComplexScalar>();
-    if(computeU)  m_matU = ComplexMatrixType::Identity(1,1);
-    m_info = Success;
-    m_isInitialized = true;
-    m_matUisUptodate = computeU;
-    return *this;
-  }
-
-  internal::complex_schur_reduce_to_hessenberg<MatrixType, NumTraits<Scalar>::IsComplex>::run(*this, matrix, computeU);
-  reduceToTriangularForm(computeU);
-  return *this;
-}
-
-namespace internal {
-
-/* Reduce given matrix to Hessenberg form */
-template<typename MatrixType, bool IsComplex>
-struct complex_schur_reduce_to_hessenberg
-{
-  // this is the implementation for the case IsComplex = true
-  static void run(ComplexSchur<MatrixType>& _this, const MatrixType& matrix, bool computeU)
-  {
-    _this.m_hess.compute(matrix);
-    _this.m_matT = _this.m_hess.matrixH();
-    if(computeU)  _this.m_matU = _this.m_hess.matrixQ();
-  }
-};
-
-template<typename MatrixType>
-struct complex_schur_reduce_to_hessenberg<MatrixType, false>
-{
-  static void run(ComplexSchur<MatrixType>& _this, const MatrixType& matrix, bool computeU)
-  {
-    typedef typename ComplexSchur<MatrixType>::ComplexScalar ComplexScalar;
-    typedef typename ComplexSchur<MatrixType>::ComplexMatrixType ComplexMatrixType;
-
-    // Note: m_hess is over RealScalar; m_matT and m_matU is over ComplexScalar
-    _this.m_hess.compute(matrix);
-    _this.m_matT = _this.m_hess.matrixH().template cast<ComplexScalar>();
-    if(computeU)  
-    {
-      // This may cause an allocation which seems to be avoidable
-      MatrixType Q = _this.m_hess.matrixQ(); 
-      _this.m_matU = Q.template cast<ComplexScalar>();
-    }
-  }
-};
-
-} // end namespace internal
-
-// Reduce the Hessenberg matrix m_matT to triangular form by QR iteration.
-template<typename MatrixType>
-void ComplexSchur<MatrixType>::reduceToTriangularForm(bool computeU)
-{  
-  Index maxIters = m_maxIters;
-  if (maxIters == -1)
-    maxIters = m_maxIterationsPerRow * m_matT.rows();
-
-  // The matrix m_matT is divided in three parts. 
-  // Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero. 
-  // Rows il,...,iu is the part we are working on (the active submatrix).
-  // Rows iu+1,...,end are already brought in triangular form.
-  Index iu = m_matT.cols() - 1;
-  Index il;
-  Index iter = 0; // number of iterations we are working on the (iu,iu) element
-  Index totalIter = 0; // number of iterations for whole matrix
-
-  while(true)
-  {
-    // find iu, the bottom row of the active submatrix
-    while(iu > 0)
-    {
-      if(!subdiagonalEntryIsNeglegible(iu-1)) break;
-      iter = 0;
-      --iu;
-    }
-
-    // if iu is zero then we are done; the whole matrix is triangularized
-    if(iu==0) break;
-
-    // if we spent too many iterations, we give up
-    iter++;
-    totalIter++;
-    if(totalIter > maxIters) break;
-
-    // find il, the top row of the active submatrix
-    il = iu-1;
-    while(il > 0 && !subdiagonalEntryIsNeglegible(il-1))
-    {
-      --il;
-    }
-
-    /* perform the QR step using Givens rotations. The first rotation
-       creates a bulge; the (il+2,il) element becomes nonzero. This
-       bulge is chased down to the bottom of the active submatrix. */
-
-    ComplexScalar shift = computeShift(iu, iter);
-    JacobiRotation<ComplexScalar> rot;
-    rot.makeGivens(m_matT.coeff(il,il) - shift, m_matT.coeff(il+1,il));
-    m_matT.rightCols(m_matT.cols()-il).applyOnTheLeft(il, il+1, rot.adjoint());
-    m_matT.topRows((std::min)(il+2,iu)+1).applyOnTheRight(il, il+1, rot);
-    if(computeU) m_matU.applyOnTheRight(il, il+1, rot);
-
-    for(Index i=il+1 ; i<iu ; i++)
-    {
-      rot.makeGivens(m_matT.coeffRef(i,i-1), m_matT.coeffRef(i+1,i-1), &m_matT.coeffRef(i,i-1));
-      m_matT.coeffRef(i+1,i-1) = ComplexScalar(0);
-      m_matT.rightCols(m_matT.cols()-i).applyOnTheLeft(i, i+1, rot.adjoint());
-      m_matT.topRows((std::min)(i+2,iu)+1).applyOnTheRight(i, i+1, rot);
-      if(computeU) m_matU.applyOnTheRight(i, i+1, rot);
-    }
-  }
-
-  if(totalIter <= maxIters)
-    m_info = Success;
-  else
-    m_info = NoConvergence;
-
-  m_isInitialized = true;
-  m_matUisUptodate = computeU;
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_COMPLEX_SCHUR_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigenvalues/EigenSolver.h b/resources/3rdparty/eigen/Eigen/src/Eigenvalues/EigenSolver.h
deleted file mode 100644
index 9c3bba1e5..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Eigenvalues/EigenSolver.h
+++ /dev/null
@@ -1,594 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_EIGENSOLVER_H
-#define EIGEN_EIGENSOLVER_H
-
-#include "./RealSchur.h"
-
-namespace Eigen { 
-
-/** \eigenvalues_module \ingroup Eigenvalues_Module
-  *
-  *
-  * \class EigenSolver
-  *
-  * \brief Computes eigenvalues and eigenvectors of general matrices
-  *
-  * \tparam _MatrixType the type of the matrix of which we are computing the
-  * eigendecomposition; this is expected to be an instantiation of the Matrix
-  * class template. Currently, only real matrices are supported.
-  *
-  * The eigenvalues and eigenvectors of a matrix \f$ A \f$ are scalars
-  * \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda v \f$.  If
-  * \f$ D \f$ is a diagonal matrix with the eigenvalues on the diagonal, and
-  * \f$ V \f$ is a matrix with the eigenvectors as its columns, then \f$ A V =
-  * V D \f$. The matrix \f$ V \f$ is almost always invertible, in which case we
-  * have \f$ A = V D V^{-1} \f$. This is called the eigendecomposition.
-  *
-  * The eigenvalues and eigenvectors of a matrix may be complex, even when the
-  * matrix is real. However, we can choose real matrices \f$ V \f$ and \f$ D
-  * \f$ satisfying \f$ A V = V D \f$, just like the eigendecomposition, if the
-  * matrix \f$ D \f$ is not required to be diagonal, but if it is allowed to
-  * have blocks of the form
-  * \f[ \begin{bmatrix} u & v \\ -v & u \end{bmatrix} \f]
-  * (where \f$ u \f$ and \f$ v \f$ are real numbers) on the diagonal.  These
-  * blocks correspond to complex eigenvalue pairs \f$ u \pm iv \f$. We call
-  * this variant of the eigendecomposition the pseudo-eigendecomposition.
-  *
-  * Call the function compute() to compute the eigenvalues and eigenvectors of
-  * a given matrix. Alternatively, you can use the 
-  * EigenSolver(const MatrixType&, bool) constructor which computes the
-  * eigenvalues and eigenvectors at construction time. Once the eigenvalue and
-  * eigenvectors are computed, they can be retrieved with the eigenvalues() and
-  * eigenvectors() functions. The pseudoEigenvalueMatrix() and
-  * pseudoEigenvectors() methods allow the construction of the
-  * pseudo-eigendecomposition.
-  *
-  * The documentation for EigenSolver(const MatrixType&, bool) contains an
-  * example of the typical use of this class.
-  *
-  * \note The implementation is adapted from
-  * <a href="http://math.nist.gov/javanumerics/jama/">JAMA</a> (public domain).
-  * Their code is based on EISPACK.
-  *
-  * \sa MatrixBase::eigenvalues(), class ComplexEigenSolver, class SelfAdjointEigenSolver
-  */
-template<typename _MatrixType> class EigenSolver
-{
-  public:
-
-    /** \brief Synonym for the template parameter \p _MatrixType. */
-    typedef _MatrixType MatrixType;
-
-    enum {
-      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
-      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
-      Options = MatrixType::Options,
-      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
-      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
-    };
-
-    /** \brief Scalar type for matrices of type #MatrixType. */
-    typedef typename MatrixType::Scalar Scalar;
-    typedef typename NumTraits<Scalar>::Real RealScalar;
-    typedef typename MatrixType::Index Index;
-
-    /** \brief Complex scalar type for #MatrixType. 
-      *
-      * This is \c std::complex<Scalar> if #Scalar is real (e.g.,
-      * \c float or \c double) and just \c Scalar if #Scalar is
-      * complex.
-      */
-    typedef std::complex<RealScalar> ComplexScalar;
-
-    /** \brief Type for vector of eigenvalues as returned by eigenvalues(). 
-      *
-      * This is a column vector with entries of type #ComplexScalar.
-      * The length of the vector is the size of #MatrixType.
-      */
-    typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;
-
-    /** \brief Type for matrix of eigenvectors as returned by eigenvectors(). 
-      *
-      * This is a square matrix with entries of type #ComplexScalar. 
-      * The size is the same as the size of #MatrixType.
-      */
-    typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorsType;
-
-    /** \brief Default constructor.
-      *
-      * The default constructor is useful in cases in which the user intends to
-      * perform decompositions via EigenSolver::compute(const MatrixType&, bool).
-      *
-      * \sa compute() for an example.
-      */
- EigenSolver() : m_eivec(), m_eivalues(), m_isInitialized(false), m_realSchur(), m_matT(), m_tmp() {}
-
-    /** \brief Default constructor with memory preallocation
-      *
-      * Like the default constructor but with preallocation of the internal data
-      * according to the specified problem \a size.
-      * \sa EigenSolver()
-      */
-    EigenSolver(Index size)
-      : m_eivec(size, size),
-        m_eivalues(size),
-        m_isInitialized(false),
-        m_eigenvectorsOk(false),
-        m_realSchur(size),
-        m_matT(size, size), 
-        m_tmp(size)
-    {}
-
-    /** \brief Constructor; computes eigendecomposition of given matrix. 
-      * 
-      * \param[in]  matrix  Square matrix whose eigendecomposition is to be computed.
-      * \param[in]  computeEigenvectors  If true, both the eigenvectors and the
-      *    eigenvalues are computed; if false, only the eigenvalues are
-      *    computed. 
-      *
-      * This constructor calls compute() to compute the eigenvalues
-      * and eigenvectors.
-      *
-      * Example: \include EigenSolver_EigenSolver_MatrixType.cpp
-      * Output: \verbinclude EigenSolver_EigenSolver_MatrixType.out
-      *
-      * \sa compute()
-      */
-    EigenSolver(const MatrixType& matrix, bool computeEigenvectors = true)
-      : m_eivec(matrix.rows(), matrix.cols()),
-        m_eivalues(matrix.cols()),
-        m_isInitialized(false),
-        m_eigenvectorsOk(false),
-        m_realSchur(matrix.cols()),
-        m_matT(matrix.rows(), matrix.cols()), 
-        m_tmp(matrix.cols())
-    {
-      compute(matrix, computeEigenvectors);
-    }
-
-    /** \brief Returns the eigenvectors of given matrix. 
-      *
-      * \returns  %Matrix whose columns are the (possibly complex) eigenvectors.
-      *
-      * \pre Either the constructor 
-      * EigenSolver(const MatrixType&,bool) or the member function
-      * compute(const MatrixType&, bool) has been called before, and
-      * \p computeEigenvectors was set to true (the default).
-      *
-      * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding
-      * to eigenvalue number \f$ k \f$ as returned by eigenvalues().  The
-      * eigenvectors are normalized to have (Euclidean) norm equal to one. The
-      * matrix returned by this function is the matrix \f$ V \f$ in the
-      * eigendecomposition \f$ A = V D V^{-1} \f$, if it exists.
-      *
-      * Example: \include EigenSolver_eigenvectors.cpp
-      * Output: \verbinclude EigenSolver_eigenvectors.out
-      *
-      * \sa eigenvalues(), pseudoEigenvectors()
-      */
-    EigenvectorsType eigenvectors() const;
-
-    /** \brief Returns the pseudo-eigenvectors of given matrix. 
-      *
-      * \returns  Const reference to matrix whose columns are the pseudo-eigenvectors.
-      *
-      * \pre Either the constructor 
-      * EigenSolver(const MatrixType&,bool) or the member function
-      * compute(const MatrixType&, bool) has been called before, and
-      * \p computeEigenvectors was set to true (the default).
-      *
-      * The real matrix \f$ V \f$ returned by this function and the
-      * block-diagonal matrix \f$ D \f$ returned by pseudoEigenvalueMatrix()
-      * satisfy \f$ AV = VD \f$.
-      *
-      * Example: \include EigenSolver_pseudoEigenvectors.cpp
-      * Output: \verbinclude EigenSolver_pseudoEigenvectors.out
-      *
-      * \sa pseudoEigenvalueMatrix(), eigenvectors()
-      */
-    const MatrixType& pseudoEigenvectors() const
-    {
-      eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
-      eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
-      return m_eivec;
-    }
-
-    /** \brief Returns the block-diagonal matrix in the pseudo-eigendecomposition.
-      *
-      * \returns  A block-diagonal matrix.
-      *
-      * \pre Either the constructor 
-      * EigenSolver(const MatrixType&,bool) or the member function
-      * compute(const MatrixType&, bool) has been called before.
-      *
-      * The matrix \f$ D \f$ returned by this function is real and
-      * block-diagonal. The blocks on the diagonal are either 1-by-1 or 2-by-2
-      * blocks of the form
-      * \f$ \begin{bmatrix} u & v \\ -v & u \end{bmatrix} \f$.
-      * These blocks are not sorted in any particular order.
-      * The matrix \f$ D \f$ and the matrix \f$ V \f$ returned by
-      * pseudoEigenvectors() satisfy \f$ AV = VD \f$.
-      *
-      * \sa pseudoEigenvectors() for an example, eigenvalues()
-      */
-    MatrixType pseudoEigenvalueMatrix() const;
-
-    /** \brief Returns the eigenvalues of given matrix. 
-      *
-      * \returns A const reference to the column vector containing the eigenvalues.
-      *
-      * \pre Either the constructor 
-      * EigenSolver(const MatrixType&,bool) or the member function
-      * compute(const MatrixType&, bool) has been called before.
-      *
-      * The eigenvalues are repeated according to their algebraic multiplicity,
-      * so there are as many eigenvalues as rows in the matrix. The eigenvalues 
-      * are not sorted in any particular order.
-      *
-      * Example: \include EigenSolver_eigenvalues.cpp
-      * Output: \verbinclude EigenSolver_eigenvalues.out
-      *
-      * \sa eigenvectors(), pseudoEigenvalueMatrix(),
-      *     MatrixBase::eigenvalues()
-      */
-    const EigenvalueType& eigenvalues() const
-    {
-      eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
-      return m_eivalues;
-    }
-
-    /** \brief Computes eigendecomposition of given matrix. 
-      * 
-      * \param[in]  matrix  Square matrix whose eigendecomposition is to be computed.
-      * \param[in]  computeEigenvectors  If true, both the eigenvectors and the
-      *    eigenvalues are computed; if false, only the eigenvalues are
-      *    computed. 
-      * \returns    Reference to \c *this
-      *
-      * This function computes the eigenvalues of the real matrix \p matrix.
-      * The eigenvalues() function can be used to retrieve them.  If 
-      * \p computeEigenvectors is true, then the eigenvectors are also computed
-      * and can be retrieved by calling eigenvectors().
-      *
-      * The matrix is first reduced to real Schur form using the RealSchur
-      * class. The Schur decomposition is then used to compute the eigenvalues
-      * and eigenvectors.
-      *
-      * The cost of the computation is dominated by the cost of the
-      * Schur decomposition, which is very approximately \f$ 25n^3 \f$
-      * (where \f$ n \f$ is the size of the matrix) if \p computeEigenvectors 
-      * is true, and \f$ 10n^3 \f$ if \p computeEigenvectors is false.
-      *
-      * This method reuses of the allocated data in the EigenSolver object.
-      *
-      * Example: \include EigenSolver_compute.cpp
-      * Output: \verbinclude EigenSolver_compute.out
-      */
-    EigenSolver& compute(const MatrixType& matrix, bool computeEigenvectors = true);
-
-    ComputationInfo info() const
-    {
-      eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
-      return m_realSchur.info();
-    }
-
-    /** \brief Sets the maximum number of iterations allowed. */
-    EigenSolver& setMaxIterations(Index maxIters)
-    {
-      m_realSchur.setMaxIterations(maxIters);
-      return *this;
-    }
-
-    /** \brief Returns the maximum number of iterations. */
-    Index getMaxIterations()
-    {
-      return m_realSchur.getMaxIterations();
-    }
-
-  private:
-    void doComputeEigenvectors();
-
-  protected:
-    MatrixType m_eivec;
-    EigenvalueType m_eivalues;
-    bool m_isInitialized;
-    bool m_eigenvectorsOk;
-    RealSchur<MatrixType> m_realSchur;
-    MatrixType m_matT;
-
-    typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
-    ColumnVectorType m_tmp;
-};
-
-template<typename MatrixType>
-MatrixType EigenSolver<MatrixType>::pseudoEigenvalueMatrix() const
-{
-  eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
-  Index n = m_eivalues.rows();
-  MatrixType matD = MatrixType::Zero(n,n);
-  for (Index i=0; i<n; ++i)
-  {
-    if (internal::isMuchSmallerThan(internal::imag(m_eivalues.coeff(i)), internal::real(m_eivalues.coeff(i))))
-      matD.coeffRef(i,i) = internal::real(m_eivalues.coeff(i));
-    else
-    {
-      matD.template block<2,2>(i,i) <<  internal::real(m_eivalues.coeff(i)), internal::imag(m_eivalues.coeff(i)),
-                                       -internal::imag(m_eivalues.coeff(i)), internal::real(m_eivalues.coeff(i));
-      ++i;
-    }
-  }
-  return matD;
-}
-
-template<typename MatrixType>
-typename EigenSolver<MatrixType>::EigenvectorsType EigenSolver<MatrixType>::eigenvectors() const
-{
-  eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
-  eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
-  Index n = m_eivec.cols();
-  EigenvectorsType matV(n,n);
-  for (Index j=0; j<n; ++j)
-  {
-    if (internal::isMuchSmallerThan(internal::imag(m_eivalues.coeff(j)), internal::real(m_eivalues.coeff(j))) || j+1==n)
-    {
-      // we have a real eigen value
-      matV.col(j) = m_eivec.col(j).template cast<ComplexScalar>();
-      matV.col(j).normalize();
-    }
-    else
-    {
-      // we have a pair of complex eigen values
-      for (Index i=0; i<n; ++i)
-      {
-        matV.coeffRef(i,j)   = ComplexScalar(m_eivec.coeff(i,j),  m_eivec.coeff(i,j+1));
-        matV.coeffRef(i,j+1) = ComplexScalar(m_eivec.coeff(i,j), -m_eivec.coeff(i,j+1));
-      }
-      matV.col(j).normalize();
-      matV.col(j+1).normalize();
-      ++j;
-    }
-  }
-  return matV;
-}
-
-template<typename MatrixType>
-EigenSolver<MatrixType>& 
-EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
-{
-  assert(matrix.cols() == matrix.rows());
-
-  // Reduce to real Schur form.
-  m_realSchur.compute(matrix, computeEigenvectors);
-
-  if (m_realSchur.info() == Success)
-  {
-    m_matT = m_realSchur.matrixT();
-    if (computeEigenvectors)
-      m_eivec = m_realSchur.matrixU();
-  
-    // Compute eigenvalues from matT
-    m_eivalues.resize(matrix.cols());
-    Index i = 0;
-    while (i < matrix.cols()) 
-    {
-      if (i == matrix.cols() - 1 || m_matT.coeff(i+1, i) == Scalar(0)) 
-      {
-        m_eivalues.coeffRef(i) = m_matT.coeff(i, i);
-        ++i;
-      }
-      else
-      {
-        Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i+1, i+1));
-        Scalar z = internal::sqrt(internal::abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1)));
-        m_eivalues.coeffRef(i)   = ComplexScalar(m_matT.coeff(i+1, i+1) + p, z);
-        m_eivalues.coeffRef(i+1) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, -z);
-        i += 2;
-      }
-    }
-    
-    // Compute eigenvectors.
-    if (computeEigenvectors)
-      doComputeEigenvectors();
-  }
-
-  m_isInitialized = true;
-  m_eigenvectorsOk = computeEigenvectors;
-
-  return *this;
-}
-
-// Complex scalar division.
-template<typename Scalar>
-std::complex<Scalar> cdiv(Scalar xr, Scalar xi, Scalar yr, Scalar yi)
-{
-  Scalar r,d;
-  if (internal::abs(yr) > internal::abs(yi))
-  {
-      r = yi/yr;
-      d = yr + r*yi;
-      return std::complex<Scalar>((xr + r*xi)/d, (xi - r*xr)/d);
-  }
-  else
-  {
-      r = yr/yi;
-      d = yi + r*yr;
-      return std::complex<Scalar>((r*xr + xi)/d, (r*xi - xr)/d);
-  }
-}
-
-
-template<typename MatrixType>
-void EigenSolver<MatrixType>::doComputeEigenvectors()
-{
-  const Index size = m_eivec.cols();
-  const Scalar eps = NumTraits<Scalar>::epsilon();
-
-  // inefficient! this is already computed in RealSchur
-  Scalar norm(0);
-  for (Index j = 0; j < size; ++j)
-  {
-    norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum();
-  }
-  
-  // Backsubstitute to find vectors of upper triangular form
-  if (norm == 0.0)
-  {
-    return;
-  }
-
-  for (Index n = size-1; n >= 0; n--)
-  {
-    Scalar p = m_eivalues.coeff(n).real();
-    Scalar q = m_eivalues.coeff(n).imag();
-
-    // Scalar vector
-    if (q == Scalar(0))
-    {
-      Scalar lastr(0), lastw(0);
-      Index l = n;
-
-      m_matT.coeffRef(n,n) = 1.0;
-      for (Index i = n-1; i >= 0; i--)
-      {
-        Scalar w = m_matT.coeff(i,i) - p;
-        Scalar r = m_matT.row(i).segment(l,n-l+1).dot(m_matT.col(n).segment(l, n-l+1));
-
-        if (m_eivalues.coeff(i).imag() < 0.0)
-        {
-          lastw = w;
-          lastr = r;
-        }
-        else
-        {
-          l = i;
-          if (m_eivalues.coeff(i).imag() == 0.0)
-          {
-            if (w != 0.0)
-              m_matT.coeffRef(i,n) = -r / w;
-            else
-              m_matT.coeffRef(i,n) = -r / (eps * norm);
-          }
-          else // Solve real equations
-          {
-            Scalar x = m_matT.coeff(i,i+1);
-            Scalar y = m_matT.coeff(i+1,i);
-            Scalar denom = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag();
-            Scalar t = (x * lastr - lastw * r) / denom;
-            m_matT.coeffRef(i,n) = t;
-            if (internal::abs(x) > internal::abs(lastw))
-              m_matT.coeffRef(i+1,n) = (-r - w * t) / x;
-            else
-              m_matT.coeffRef(i+1,n) = (-lastr - y * t) / lastw;
-          }
-
-          // Overflow control
-          Scalar t = internal::abs(m_matT.coeff(i,n));
-          if ((eps * t) * t > Scalar(1))
-            m_matT.col(n).tail(size-i) /= t;
-        }
-      }
-    }
-    else if (q < Scalar(0) && n > 0) // Complex vector
-    {
-      Scalar lastra(0), lastsa(0), lastw(0);
-      Index l = n-1;
-
-      // Last vector component imaginary so matrix is triangular
-      if (internal::abs(m_matT.coeff(n,n-1)) > internal::abs(m_matT.coeff(n-1,n)))
-      {
-        m_matT.coeffRef(n-1,n-1) = q / m_matT.coeff(n,n-1);
-        m_matT.coeffRef(n-1,n) = -(m_matT.coeff(n,n) - p) / m_matT.coeff(n,n-1);
-      }
-      else
-      {
-        std::complex<Scalar> cc = cdiv<Scalar>(0.0,-m_matT.coeff(n-1,n),m_matT.coeff(n-1,n-1)-p,q);
-        m_matT.coeffRef(n-1,n-1) = internal::real(cc);
-        m_matT.coeffRef(n-1,n) = internal::imag(cc);
-      }
-      m_matT.coeffRef(n,n-1) = 0.0;
-      m_matT.coeffRef(n,n) = 1.0;
-      for (Index i = n-2; i >= 0; i--)
-      {
-        Scalar ra = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n-1).segment(l, n-l+1));
-        Scalar sa = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n).segment(l, n-l+1));
-        Scalar w = m_matT.coeff(i,i) - p;
-
-        if (m_eivalues.coeff(i).imag() < 0.0)
-        {
-          lastw = w;
-          lastra = ra;
-          lastsa = sa;
-        }
-        else
-        {
-          l = i;
-          if (m_eivalues.coeff(i).imag() == RealScalar(0))
-          {
-            std::complex<Scalar> cc = cdiv(-ra,-sa,w,q);
-            m_matT.coeffRef(i,n-1) = internal::real(cc);
-            m_matT.coeffRef(i,n) = internal::imag(cc);
-          }
-          else
-          {
-            // Solve complex equations
-            Scalar x = m_matT.coeff(i,i+1);
-            Scalar y = m_matT.coeff(i+1,i);
-            Scalar vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q;
-            Scalar vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q;
-            if ((vr == 0.0) && (vi == 0.0))
-              vr = eps * norm * (internal::abs(w) + internal::abs(q) + internal::abs(x) + internal::abs(y) + internal::abs(lastw));
-
-            std::complex<Scalar> cc = cdiv(x*lastra-lastw*ra+q*sa,x*lastsa-lastw*sa-q*ra,vr,vi);
-            m_matT.coeffRef(i,n-1) = internal::real(cc);
-            m_matT.coeffRef(i,n) = internal::imag(cc);
-            if (internal::abs(x) > (internal::abs(lastw) + internal::abs(q)))
-            {
-              m_matT.coeffRef(i+1,n-1) = (-ra - w * m_matT.coeff(i,n-1) + q * m_matT.coeff(i,n)) / x;
-              m_matT.coeffRef(i+1,n) = (-sa - w * m_matT.coeff(i,n) - q * m_matT.coeff(i,n-1)) / x;
-            }
-            else
-            {
-              cc = cdiv(-lastra-y*m_matT.coeff(i,n-1),-lastsa-y*m_matT.coeff(i,n),lastw,q);
-              m_matT.coeffRef(i+1,n-1) = internal::real(cc);
-              m_matT.coeffRef(i+1,n) = internal::imag(cc);
-            }
-          }
-
-          // Overflow control
-          using std::max;
-          Scalar t = (max)(internal::abs(m_matT.coeff(i,n-1)),internal::abs(m_matT.coeff(i,n)));
-          if ((eps * t) * t > Scalar(1))
-            m_matT.block(i, n-1, size-i, 2) /= t;
-
-        }
-      }
-      
-      // We handled a pair of complex conjugate eigenvalues, so need to skip them both
-      n--;
-    }
-    else
-    {
-      eigen_assert(0 && "Internal bug in EigenSolver"); // this should not happen
-    }
-  }
-
-  // Back transformation to get eigenvectors of original matrix
-  for (Index j = size-1; j >= 0; j--)
-  {
-    m_tmp.noalias() = m_eivec.leftCols(j+1) * m_matT.col(j).segment(0, j+1);
-    m_eivec.col(j) = m_tmp;
-  }
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_EIGENSOLVER_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigenvalues/RealSchur.h b/resources/3rdparty/eigen/Eigen/src/Eigenvalues/RealSchur.h
deleted file mode 100644
index da069bc55..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Eigenvalues/RealSchur.h
+++ /dev/null
@@ -1,492 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_REAL_SCHUR_H
-#define EIGEN_REAL_SCHUR_H
-
-#include "./HessenbergDecomposition.h"
-
-namespace Eigen { 
-
-/** \eigenvalues_module \ingroup Eigenvalues_Module
-  *
-  *
-  * \class RealSchur
-  *
-  * \brief Performs a real Schur decomposition of a square matrix
-  *
-  * \tparam _MatrixType the type of the matrix of which we are computing the
-  * real Schur decomposition; this is expected to be an instantiation of the
-  * Matrix class template.
-  *
-  * Given a real square matrix A, this class computes the real Schur
-  * decomposition: \f$ A = U T U^T \f$ where U is a real orthogonal matrix and
-  * T is a real quasi-triangular matrix. An orthogonal matrix is a matrix whose
-  * inverse is equal to its transpose, \f$ U^{-1} = U^T \f$. A quasi-triangular
-  * matrix is a block-triangular matrix whose diagonal consists of 1-by-1
-  * blocks and 2-by-2 blocks with complex eigenvalues. The eigenvalues of the
-  * blocks on the diagonal of T are the same as the eigenvalues of the matrix
-  * A, and thus the real Schur decomposition is used in EigenSolver to compute
-  * the eigendecomposition of a matrix.
-  *
-  * Call the function compute() to compute the real Schur decomposition of a
-  * given matrix. Alternatively, you can use the RealSchur(const MatrixType&, bool)
-  * constructor which computes the real Schur decomposition at construction
-  * time. Once the decomposition is computed, you can use the matrixU() and
-  * matrixT() functions to retrieve the matrices U and T in the decomposition.
-  *
-  * The documentation of RealSchur(const MatrixType&, bool) contains an example
-  * of the typical use of this class.
-  *
-  * \note The implementation is adapted from
-  * <a href="http://math.nist.gov/javanumerics/jama/">JAMA</a> (public domain).
-  * Their code is based on EISPACK.
-  *
-  * \sa class ComplexSchur, class EigenSolver, class ComplexEigenSolver
-  */
-template<typename _MatrixType> class RealSchur
-{
-  public:
-    typedef _MatrixType MatrixType;
-    enum {
-      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
-      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
-      Options = MatrixType::Options,
-      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
-      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
-    };
-    typedef typename MatrixType::Scalar Scalar;
-    typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
-    typedef typename MatrixType::Index Index;
-
-    typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;
-    typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
-
-    /** \brief Default constructor.
-      *
-      * \param [in] size  Positive integer, size of the matrix whose Schur decomposition will be computed.
-      *
-      * The default constructor is useful in cases in which the user intends to
-      * perform decompositions via compute().  The \p size parameter is only
-      * used as a hint. It is not an error to give a wrong \p size, but it may
-      * impair performance.
-      *
-      * \sa compute() for an example.
-      */
-    RealSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime)
-            : m_matT(size, size),
-              m_matU(size, size),
-              m_workspaceVector(size),
-              m_hess(size),
-              m_isInitialized(false),
-              m_matUisUptodate(false),
-              m_maxIters(-1)
-    { }
-
-    /** \brief Constructor; computes real Schur decomposition of given matrix. 
-      * 
-      * \param[in]  matrix    Square matrix whose Schur decomposition is to be computed.
-      * \param[in]  computeU  If true, both T and U are computed; if false, only T is computed.
-      *
-      * This constructor calls compute() to compute the Schur decomposition.
-      *
-      * Example: \include RealSchur_RealSchur_MatrixType.cpp
-      * Output: \verbinclude RealSchur_RealSchur_MatrixType.out
-      */
-    RealSchur(const MatrixType& matrix, bool computeU = true)
-            : m_matT(matrix.rows(),matrix.cols()),
-              m_matU(matrix.rows(),matrix.cols()),
-              m_workspaceVector(matrix.rows()),
-              m_hess(matrix.rows()),
-              m_isInitialized(false),
-              m_matUisUptodate(false),
-              m_maxIters(-1)
-    {
-      compute(matrix, computeU);
-    }
-
-    /** \brief Returns the orthogonal matrix in the Schur decomposition. 
-      *
-      * \returns A const reference to the matrix U.
-      *
-      * \pre Either the constructor RealSchur(const MatrixType&, bool) or the
-      * member function compute(const MatrixType&, bool) has been called before
-      * to compute the Schur decomposition of a matrix, and \p computeU was set
-      * to true (the default value).
-      *
-      * \sa RealSchur(const MatrixType&, bool) for an example
-      */
-    const MatrixType& matrixU() const
-    {
-      eigen_assert(m_isInitialized && "RealSchur is not initialized.");
-      eigen_assert(m_matUisUptodate && "The matrix U has not been computed during the RealSchur decomposition.");
-      return m_matU;
-    }
-
-    /** \brief Returns the quasi-triangular matrix in the Schur decomposition. 
-      *
-      * \returns A const reference to the matrix T.
-      *
-      * \pre Either the constructor RealSchur(const MatrixType&, bool) or the
-      * member function compute(const MatrixType&, bool) has been called before
-      * to compute the Schur decomposition of a matrix.
-      *
-      * \sa RealSchur(const MatrixType&, bool) for an example
-      */
-    const MatrixType& matrixT() const
-    {
-      eigen_assert(m_isInitialized && "RealSchur is not initialized.");
-      return m_matT;
-    }
-  
-    /** \brief Computes Schur decomposition of given matrix. 
-      * 
-      * \param[in]  matrix    Square matrix whose Schur decomposition is to be computed.
-      * \param[in]  computeU  If true, both T and U are computed; if false, only T is computed.
-      * \returns    Reference to \c *this
-      *
-      * The Schur decomposition is computed by first reducing the matrix to
-      * Hessenberg form using the class HessenbergDecomposition. The Hessenberg
-      * matrix is then reduced to triangular form by performing Francis QR
-      * iterations with implicit double shift. The cost of computing the Schur
-      * decomposition depends on the number of iterations; as a rough guide, it
-      * may be taken to be \f$25n^3\f$ flops if \a computeU is true and
-      * \f$10n^3\f$ flops if \a computeU is false.
-      *
-      * Example: \include RealSchur_compute.cpp
-      * Output: \verbinclude RealSchur_compute.out
-      *
-      * \sa compute(const MatrixType&, bool, Index)
-      */
-    RealSchur& compute(const MatrixType& matrix, bool computeU = true);
-
-    /** \brief Reports whether previous computation was successful.
-      *
-      * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
-      */
-    ComputationInfo info() const
-    {
-      eigen_assert(m_isInitialized && "RealSchur is not initialized.");
-      return m_info;
-    }
-
-    /** \brief Sets the maximum number of iterations allowed. 
-      *
-      * If not specified by the user, the maximum number of iterations is m_maxIterationsPerRow times the size
-      * of the matrix.
-      */
-    RealSchur& setMaxIterations(Index maxIters)
-    {
-      m_maxIters = maxIters;
-      return *this;
-    }
-
-    /** \brief Returns the maximum number of iterations. */
-    Index getMaxIterations()
-    {
-      return m_maxIters;
-    }
-
-    /** \brief Maximum number of iterations per row.
-      *
-      * If not otherwise specified, the maximum number of iterations is this number times the size of the
-      * matrix. It is currently set to 40.
-      */
-    static const int m_maxIterationsPerRow = 40;
-
-  private:
-    
-    MatrixType m_matT;
-    MatrixType m_matU;
-    ColumnVectorType m_workspaceVector;
-    HessenbergDecomposition<MatrixType> m_hess;
-    ComputationInfo m_info;
-    bool m_isInitialized;
-    bool m_matUisUptodate;
-    Index m_maxIters;
-
-    typedef Matrix<Scalar,3,1> Vector3s;
-
-    Scalar computeNormOfT();
-    Index findSmallSubdiagEntry(Index iu, Scalar norm);
-    void splitOffTwoRows(Index iu, bool computeU, Scalar exshift);
-    void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo);
-    void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector);
-    void performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace);
-};
-
-
-template<typename MatrixType>
-RealSchur<MatrixType>& RealSchur<MatrixType>::compute(const MatrixType& matrix, bool computeU)
-{
-  assert(matrix.cols() == matrix.rows());
-  Index maxIters = m_maxIters;
-  if (maxIters == -1)
-    maxIters = m_maxIterationsPerRow * matrix.rows();
-
-  // Step 1. Reduce to Hessenberg form
-  m_hess.compute(matrix);
-  m_matT = m_hess.matrixH();
-  if (computeU)
-    m_matU = m_hess.matrixQ();
-
-  // Step 2. Reduce to real Schur form  
-  m_workspaceVector.resize(m_matT.cols());
-  Scalar* workspace = &m_workspaceVector.coeffRef(0);
-
-  // The matrix m_matT is divided in three parts. 
-  // Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero. 
-  // Rows il,...,iu is the part we are working on (the active window).
-  // Rows iu+1,...,end are already brought in triangular form.
-  Index iu = m_matT.cols() - 1;
-  Index iter = 0;      // iteration count for current eigenvalue
-  Index totalIter = 0; // iteration count for whole matrix
-  Scalar exshift(0);   // sum of exceptional shifts
-  Scalar norm = computeNormOfT();
-
-  if(norm!=0)
-  {
-    while (iu >= 0)
-    {
-      Index il = findSmallSubdiagEntry(iu, norm);
-
-      // Check for convergence
-      if (il == iu) // One root found
-      {
-        m_matT.coeffRef(iu,iu) = m_matT.coeff(iu,iu) + exshift;
-        if (iu > 0)
-          m_matT.coeffRef(iu, iu-1) = Scalar(0);
-        iu--;
-        iter = 0;
-      }
-      else if (il == iu-1) // Two roots found
-      {
-        splitOffTwoRows(iu, computeU, exshift);
-        iu -= 2;
-        iter = 0;
-      }
-      else // No convergence yet
-      {
-        // The firstHouseholderVector vector has to be initialized to something to get rid of a silly GCC warning (-O1 -Wall -DNDEBUG )
-        Vector3s firstHouseholderVector(0,0,0), shiftInfo;
-        computeShift(iu, iter, exshift, shiftInfo);
-        iter = iter + 1;
-        totalIter = totalIter + 1;
-        if (totalIter > maxIters) break;
-        Index im;
-        initFrancisQRStep(il, iu, shiftInfo, im, firstHouseholderVector);
-        performFrancisQRStep(il, im, iu, computeU, firstHouseholderVector, workspace);
-      }
-    }
-  }
-  if(totalIter <= maxIters)
-    m_info = Success;
-  else
-    m_info = NoConvergence;
-
-  m_isInitialized = true;
-  m_matUisUptodate = computeU;
-  return *this;
-}
-
-/** \internal Computes and returns vector L1 norm of T */
-template<typename MatrixType>
-inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
-{
-  const Index size = m_matT.cols();
-  // FIXME to be efficient the following would requires a triangular reduxion code
-  // Scalar norm = m_matT.upper().cwiseAbs().sum() 
-  //               + m_matT.bottomLeftCorner(size-1,size-1).diagonal().cwiseAbs().sum();
-  Scalar norm(0);
-  for (Index j = 0; j < size; ++j)
-    norm += m_matT.col(j).segment(0, (std::min)(size,j+2)).cwiseAbs().sum();
-  return norm;
-}
-
-/** \internal Look for single small sub-diagonal element and returns its index */
-template<typename MatrixType>
-inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu, Scalar norm)
-{
-  Index res = iu;
-  while (res > 0)
-  {
-    Scalar s = internal::abs(m_matT.coeff(res-1,res-1)) + internal::abs(m_matT.coeff(res,res));
-    if (s == 0.0)
-      s = norm;
-    if (internal::abs(m_matT.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s)
-      break;
-    res--;
-  }
-  return res;
-}
-
-/** \internal Update T given that rows iu-1 and iu decouple from the rest. */
-template<typename MatrixType>
-inline void RealSchur<MatrixType>::splitOffTwoRows(Index iu, bool computeU, Scalar exshift)
-{
-  const Index size = m_matT.cols();
-
-  // The eigenvalues of the 2x2 matrix [a b; c d] are 
-  // trace +/- sqrt(discr/4) where discr = tr^2 - 4*det, tr = a + d, det = ad - bc
-  Scalar p = Scalar(0.5) * (m_matT.coeff(iu-1,iu-1) - m_matT.coeff(iu,iu));
-  Scalar q = p * p + m_matT.coeff(iu,iu-1) * m_matT.coeff(iu-1,iu);   // q = tr^2 / 4 - det = discr/4
-  m_matT.coeffRef(iu,iu) += exshift;
-  m_matT.coeffRef(iu-1,iu-1) += exshift;
-
-  if (q >= Scalar(0)) // Two real eigenvalues
-  {
-    Scalar z = internal::sqrt(internal::abs(q));
-    JacobiRotation<Scalar> rot;
-    if (p >= Scalar(0))
-      rot.makeGivens(p + z, m_matT.coeff(iu, iu-1));
-    else
-      rot.makeGivens(p - z, m_matT.coeff(iu, iu-1));
-
-    m_matT.rightCols(size-iu+1).applyOnTheLeft(iu-1, iu, rot.adjoint());
-    m_matT.topRows(iu+1).applyOnTheRight(iu-1, iu, rot);
-    m_matT.coeffRef(iu, iu-1) = Scalar(0); 
-    if (computeU)
-      m_matU.applyOnTheRight(iu-1, iu, rot);
-  }
-
-  if (iu > 1) 
-    m_matT.coeffRef(iu-1, iu-2) = Scalar(0);
-}
-
-/** \internal Form shift in shiftInfo, and update exshift if an exceptional shift is performed. */
-template<typename MatrixType>
-inline void RealSchur<MatrixType>::computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo)
-{
-  shiftInfo.coeffRef(0) = m_matT.coeff(iu,iu);
-  shiftInfo.coeffRef(1) = m_matT.coeff(iu-1,iu-1);
-  shiftInfo.coeffRef(2) = m_matT.coeff(iu,iu-1) * m_matT.coeff(iu-1,iu);
-
-  // Wilkinson's original ad hoc shift
-  if (iter == 10)
-  {
-    exshift += shiftInfo.coeff(0);
-    for (Index i = 0; i <= iu; ++i)
-      m_matT.coeffRef(i,i) -= shiftInfo.coeff(0);
-    Scalar s = internal::abs(m_matT.coeff(iu,iu-1)) + internal::abs(m_matT.coeff(iu-1,iu-2));
-    shiftInfo.coeffRef(0) = Scalar(0.75) * s;
-    shiftInfo.coeffRef(1) = Scalar(0.75) * s;
-    shiftInfo.coeffRef(2) = Scalar(-0.4375) * s * s;
-  }
-
-  // MATLAB's new ad hoc shift
-  if (iter == 30)
-  {
-    Scalar s = (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0);
-    s = s * s + shiftInfo.coeff(2);
-    if (s > Scalar(0))
-    {
-      s = internal::sqrt(s);
-      if (shiftInfo.coeff(1) < shiftInfo.coeff(0))
-        s = -s;
-      s = s + (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0);
-      s = shiftInfo.coeff(0) - shiftInfo.coeff(2) / s;
-      exshift += s;
-      for (Index i = 0; i <= iu; ++i)
-        m_matT.coeffRef(i,i) -= s;
-      shiftInfo.setConstant(Scalar(0.964));
-    }
-  }
-}
-
-/** \internal Compute index im at which Francis QR step starts and the first Householder vector. */
-template<typename MatrixType>
-inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector)
-{
-  Vector3s& v = firstHouseholderVector; // alias to save typing
-
-  for (im = iu-2; im >= il; --im)
-  {
-    const Scalar Tmm = m_matT.coeff(im,im);
-    const Scalar r = shiftInfo.coeff(0) - Tmm;
-    const Scalar s = shiftInfo.coeff(1) - Tmm;
-    v.coeffRef(0) = (r * s - shiftInfo.coeff(2)) / m_matT.coeff(im+1,im) + m_matT.coeff(im,im+1);
-    v.coeffRef(1) = m_matT.coeff(im+1,im+1) - Tmm - r - s;
-    v.coeffRef(2) = m_matT.coeff(im+2,im+1);
-    if (im == il) {
-      break;
-    }
-    const Scalar lhs = m_matT.coeff(im,im-1) * (internal::abs(v.coeff(1)) + internal::abs(v.coeff(2)));
-    const Scalar rhs = v.coeff(0) * (internal::abs(m_matT.coeff(im-1,im-1)) + internal::abs(Tmm) + internal::abs(m_matT.coeff(im+1,im+1)));
-    if (internal::abs(lhs) < NumTraits<Scalar>::epsilon() * rhs)
-    {
-      break;
-    }
-  }
-}
-
-/** \internal Perform a Francis QR step involving rows il:iu and columns im:iu. */
-template<typename MatrixType>
-inline void RealSchur<MatrixType>::performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace)
-{
-  assert(im >= il);
-  assert(im <= iu-2);
-
-  const Index size = m_matT.cols();
-
-  for (Index k = im; k <= iu-2; ++k)
-  {
-    bool firstIteration = (k == im);
-
-    Vector3s v;
-    if (firstIteration)
-      v = firstHouseholderVector;
-    else
-      v = m_matT.template block<3,1>(k,k-1);
-
-    Scalar tau, beta;
-    Matrix<Scalar, 2, 1> ess;
-    v.makeHouseholder(ess, tau, beta);
-    
-    if (beta != Scalar(0)) // if v is not zero
-    {
-      if (firstIteration && k > il)
-        m_matT.coeffRef(k,k-1) = -m_matT.coeff(k,k-1);
-      else if (!firstIteration)
-        m_matT.coeffRef(k,k-1) = beta;
-
-      // These Householder transformations form the O(n^3) part of the algorithm
-      m_matT.block(k, k, 3, size-k).applyHouseholderOnTheLeft(ess, tau, workspace);
-      m_matT.block(0, k, (std::min)(iu,k+3) + 1, 3).applyHouseholderOnTheRight(ess, tau, workspace);
-      if (computeU)
-        m_matU.block(0, k, size, 3).applyHouseholderOnTheRight(ess, tau, workspace);
-    }
-  }
-
-  Matrix<Scalar, 2, 1> v = m_matT.template block<2,1>(iu-1, iu-2);
-  Scalar tau, beta;
-  Matrix<Scalar, 1, 1> ess;
-  v.makeHouseholder(ess, tau, beta);
-
-  if (beta != Scalar(0)) // if v is not zero
-  {
-    m_matT.coeffRef(iu-1, iu-2) = beta;
-    m_matT.block(iu-1, iu-1, 2, size-iu+1).applyHouseholderOnTheLeft(ess, tau, workspace);
-    m_matT.block(0, iu-1, iu+1, 2).applyHouseholderOnTheRight(ess, tau, workspace);
-    if (computeU)
-      m_matU.block(0, iu-1, size, 2).applyHouseholderOnTheRight(ess, tau, workspace);
-  }
-
-  // clean up pollution due to round-off errors
-  for (Index i = im+2; i <= iu; ++i)
-  {
-    m_matT.coeffRef(i,i-2) = Scalar(0);
-    if (i > im+2)
-      m_matT.coeffRef(i,i-3) = Scalar(0);
-  }
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_REAL_SCHUR_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Geometry/AlignedBox.h b/resources/3rdparty/eigen/Eigen/src/Geometry/AlignedBox.h
deleted file mode 100644
index 5830fcd35..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Geometry/AlignedBox.h
+++ /dev/null
@@ -1,375 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_ALIGNEDBOX_H
-#define EIGEN_ALIGNEDBOX_H
-
-namespace Eigen { 
-
-/** \geometry_module \ingroup Geometry_Module
-  *
-  *
-  * \class AlignedBox
-  *
-  * \brief An axis aligned box
-  *
-  * \param _Scalar the type of the scalar coefficients
-  * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
-  *
-  * This class represents an axis aligned box as a pair of the minimal and maximal corners.
-  */
-template <typename _Scalar, int _AmbientDim>
-class AlignedBox
-{
-public:
-EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
-  enum { AmbientDimAtCompileTime = _AmbientDim };
-  typedef _Scalar                                   Scalar;
-  typedef NumTraits<Scalar>                         ScalarTraits;
-  typedef DenseIndex                                Index;
-  typedef typename ScalarTraits::Real               RealScalar;
-  typedef typename ScalarTraits::NonInteger      NonInteger;
-  typedef Matrix<Scalar,AmbientDimAtCompileTime,1>  VectorType;
-
-  /** Define constants to name the corners of a 1D, 2D or 3D axis aligned bounding box */
-  enum CornerType
-  {
-    /** 1D names */
-    Min=0, Max=1,
-
-    /** Added names for 2D */
-    BottomLeft=0, BottomRight=1,
-    TopLeft=2, TopRight=3,
-
-    /** Added names for 3D */
-    BottomLeftFloor=0, BottomRightFloor=1,
-    TopLeftFloor=2, TopRightFloor=3,
-    BottomLeftCeil=4, BottomRightCeil=5,
-    TopLeftCeil=6, TopRightCeil=7
-  };
-
-
-  /** Default constructor initializing a null box. */
-  inline explicit AlignedBox()
-  { if (AmbientDimAtCompileTime!=Dynamic) setEmpty(); }
-
-  /** Constructs a null box with \a _dim the dimension of the ambient space. */
-  inline explicit AlignedBox(Index _dim) : m_min(_dim), m_max(_dim)
-  { setEmpty(); }
-
-  /** Constructs a box with extremities \a _min and \a _max. */
-  template<typename OtherVectorType1, typename OtherVectorType2>
-  inline AlignedBox(const OtherVectorType1& _min, const OtherVectorType2& _max) : m_min(_min), m_max(_max) {}
-
-  /** Constructs a box containing a single point \a p. */
-  template<typename Derived>
-  inline explicit AlignedBox(const MatrixBase<Derived>& a_p)
-  {
-    const typename internal::nested<Derived,2>::type p(a_p.derived());
-    m_min = p;
-    m_max = p;
-  }
-
-  ~AlignedBox() {}
-
-  /** \returns the dimension in which the box holds */
-  inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : Index(AmbientDimAtCompileTime); }
-
-  /** \deprecated use isEmpty */
-  inline bool isNull() const { return isEmpty(); }
-
-  /** \deprecated use setEmpty */
-  inline void setNull() { setEmpty(); }
-
-  /** \returns true if the box is empty. */
-  inline bool isEmpty() const { return (m_min.array() > m_max.array()).any(); }
-
-  /** Makes \c *this an empty box. */
-  inline void setEmpty()
-  {
-    m_min.setConstant( ScalarTraits::highest() );
-    m_max.setConstant( ScalarTraits::lowest() );
-  }
-
-  /** \returns the minimal corner */
-  inline const VectorType& (min)() const { return m_min; }
-  /** \returns a non const reference to the minimal corner */
-  inline VectorType& (min)() { return m_min; }
-  /** \returns the maximal corner */
-  inline const VectorType& (max)() const { return m_max; }
-  /** \returns a non const reference to the maximal corner */
-  inline VectorType& (max)() { return m_max; }
-
-  /** \returns the center of the box */
-  inline const CwiseUnaryOp<internal::scalar_quotient1_op<Scalar>,
-                            const CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const VectorType, const VectorType> >
-  center() const
-  { return (m_min+m_max)/2; }
-
-  /** \returns the lengths of the sides of the bounding box.
-    * Note that this function does not get the same
-    * result for integral or floating scalar types: see
-    */
-  inline const CwiseBinaryOp< internal::scalar_difference_op<Scalar>, const VectorType, const VectorType> sizes() const
-  { return m_max - m_min; }
-
-  /** \returns the volume of the bounding box */
-  inline Scalar volume() const
-  { return sizes().prod(); }
-
-  /** \returns an expression for the bounding box diagonal vector
-    * if the length of the diagonal is needed: diagonal().norm()
-    * will provide it.
-    */
-  inline CwiseBinaryOp< internal::scalar_difference_op<Scalar>, const VectorType, const VectorType> diagonal() const
-  { return sizes(); }
-
-  /** \returns the vertex of the bounding box at the corner defined by
-    * the corner-id corner. It works only for a 1D, 2D or 3D bounding box.
-    * For 1D bounding boxes corners are named by 2 enum constants:
-    * BottomLeft and BottomRight.
-    * For 2D bounding boxes, corners are named by 4 enum constants:
-    * BottomLeft, BottomRight, TopLeft, TopRight.
-    * For 3D bounding boxes, the following names are added:
-    * BottomLeftCeil, BottomRightCeil, TopLeftCeil, TopRightCeil.
-    */
-  inline VectorType corner(CornerType corner) const
-  {
-    EIGEN_STATIC_ASSERT(_AmbientDim <= 3, THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE);
-
-    VectorType res;
-
-    Index mult = 1;
-    for(Index d=0; d<dim(); ++d)
-    {
-      if( mult & corner ) res[d] = m_max[d];
-      else                res[d] = m_min[d];
-      mult *= 2;
-    }
-    return res;
-  }
-
-  /** \returns a random point inside the bounding box sampled with
-   * a uniform distribution */
-  inline VectorType sample() const
-  {
-    VectorType r;
-    for(Index d=0; d<dim(); ++d)
-    {
-      if(!ScalarTraits::IsInteger)
-      {
-        r[d] = m_min[d] + (m_max[d]-m_min[d])
-             * internal::random<Scalar>(Scalar(0), Scalar(1));
-      }
-      else
-        r[d] = internal::random(m_min[d], m_max[d]);
-    }
-    return r;
-  }
-
-  /** \returns true if the point \a p is inside the box \c *this. */
-  template<typename Derived>
-  inline bool contains(const MatrixBase<Derived>& a_p) const
-  {
-    typename internal::nested<Derived,2>::type p(a_p.derived());
-    return (m_min.array()<=p.array()).all() && (p.array()<=m_max.array()).all();
-  }
-
-  /** \returns true if the box \a b is entirely inside the box \c *this. */
-  inline bool contains(const AlignedBox& b) const
-  { return (m_min.array()<=(b.min)().array()).all() && ((b.max)().array()<=m_max.array()).all(); }
-
-  /** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */
-  template<typename Derived>
-  inline AlignedBox& extend(const MatrixBase<Derived>& a_p)
-  {
-    typename internal::nested<Derived,2>::type p(a_p.derived());
-    m_min = m_min.cwiseMin(p);
-    m_max = m_max.cwiseMax(p);
-    return *this;
-  }
-
-  /** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */
-  inline AlignedBox& extend(const AlignedBox& b)
-  {
-    m_min = m_min.cwiseMin(b.m_min);
-    m_max = m_max.cwiseMax(b.m_max);
-    return *this;
-  }
-
-  /** Clamps \c *this by the box \a b and returns a reference to \c *this. */
-  inline AlignedBox& clamp(const AlignedBox& b)
-  {
-    m_min = m_min.cwiseMax(b.m_min);
-    m_max = m_max.cwiseMin(b.m_max);
-    return *this;
-  }
-
-  /** Returns an AlignedBox that is the intersection of \a b and \c *this */
-  inline AlignedBox intersection(const AlignedBox& b) const
-  {return AlignedBox(m_min.cwiseMax(b.m_min), m_max.cwiseMin(b.m_max)); }
-
-  /** Returns an AlignedBox that is the union of \a b and \c *this */
-  inline AlignedBox merged(const AlignedBox& b) const
-  { return AlignedBox(m_min.cwiseMin(b.m_min), m_max.cwiseMax(b.m_max)); }
-
-  /** Translate \c *this by the vector \a t and returns a reference to \c *this. */
-  template<typename Derived>
-  inline AlignedBox& translate(const MatrixBase<Derived>& a_t)
-  {
-    const typename internal::nested<Derived,2>::type t(a_t.derived());
-    m_min += t;
-    m_max += t;
-    return *this;
-  }
-
-  /** \returns the squared distance between the point \a p and the box \c *this,
-    * and zero if \a p is inside the box.
-    * \sa exteriorDistance()
-    */
-  template<typename Derived>
-  inline Scalar squaredExteriorDistance(const MatrixBase<Derived>& a_p) const;
-
-  /** \returns the squared distance between the boxes \a b and \c *this,
-    * and zero if the boxes intersect.
-    * \sa exteriorDistance()
-    */
-  inline Scalar squaredExteriorDistance(const AlignedBox& b) const;
-
-  /** \returns the distance between the point \a p and the box \c *this,
-    * and zero if \a p is inside the box.
-    * \sa squaredExteriorDistance()
-    */
-  template<typename Derived>
-  inline NonInteger exteriorDistance(const MatrixBase<Derived>& p) const
-  { return internal::sqrt(NonInteger(squaredExteriorDistance(p))); }
-
-  /** \returns the distance between the boxes \a b and \c *this,
-    * and zero if the boxes intersect.
-    * \sa squaredExteriorDistance()
-    */
-  inline NonInteger exteriorDistance(const AlignedBox& b) const
-  { return internal::sqrt(NonInteger(squaredExteriorDistance(b))); }
-
-  /** \returns \c *this with scalar type casted to \a NewScalarType
-    *
-    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
-    * then this function smartly returns a const reference to \c *this.
-    */
-  template<typename NewScalarType>
-  inline typename internal::cast_return_type<AlignedBox,
-           AlignedBox<NewScalarType,AmbientDimAtCompileTime> >::type cast() const
-  {
-    return typename internal::cast_return_type<AlignedBox,
-                    AlignedBox<NewScalarType,AmbientDimAtCompileTime> >::type(*this);
-  }
-
-  /** Copy constructor with scalar type conversion */
-  template<typename OtherScalarType>
-  inline explicit AlignedBox(const AlignedBox<OtherScalarType,AmbientDimAtCompileTime>& other)
-  {
-    m_min = (other.min)().template cast<Scalar>();
-    m_max = (other.max)().template cast<Scalar>();
-  }
-
-  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
-    * determined by \a prec.
-    *
-    * \sa MatrixBase::isApprox() */
-  bool isApprox(const AlignedBox& other, RealScalar prec = ScalarTraits::dummy_precision()) const
-  { return m_min.isApprox(other.m_min, prec) && m_max.isApprox(other.m_max, prec); }
-
-protected:
-
-  VectorType m_min, m_max;
-};
-
-
-
-template<typename Scalar,int AmbientDim>
-template<typename Derived>
-inline Scalar AlignedBox<Scalar,AmbientDim>::squaredExteriorDistance(const MatrixBase<Derived>& a_p) const
-{
-  const typename internal::nested<Derived,2*AmbientDim>::type p(a_p.derived());
-  Scalar dist2(0);
-  Scalar aux;
-  for (Index k=0; k<dim(); ++k)
-  {
-    if( m_min[k] > p[k] )
-    {
-      aux = m_min[k] - p[k];
-      dist2 += aux*aux;
-    }
-    else if( p[k] > m_max[k] )
-    {
-      aux = p[k] - m_max[k];
-      dist2 += aux*aux;
-    }
-  }
-  return dist2;
-}
-
-template<typename Scalar,int AmbientDim>
-inline Scalar AlignedBox<Scalar,AmbientDim>::squaredExteriorDistance(const AlignedBox& b) const
-{
-  Scalar dist2(0);
-  Scalar aux;
-  for (Index k=0; k<dim(); ++k)
-  {
-    if( m_min[k] > b.m_max[k] )
-    {
-      aux = m_min[k] - b.m_max[k];
-      dist2 += aux*aux;
-    }
-    else if( b.m_min[k] > m_max[k] )
-    {
-      aux = b.m_min[k] - m_max[k];
-      dist2 += aux*aux;
-    }
-  }
-  return dist2;
-}
-
-/** \defgroup alignedboxtypedefs Global aligned box typedefs
-  *
-  * \ingroup Geometry_Module
-  *
-  * Eigen defines several typedef shortcuts for most common aligned box types.
-  *
-  * The general patterns are the following:
-  *
-  * \c AlignedBoxSizeType where \c Size can be \c 1, \c 2,\c 3,\c 4 for fixed size boxes or \c X for dynamic size,
-  * and where \c Type can be \c i for integer, \c f for float, \c d for double.
-  *
-  * For example, \c AlignedBox3d is a fixed-size 3x3 aligned box type of doubles, and \c AlignedBoxXf is a dynamic-size aligned box of floats.
-  *
-  * \sa class AlignedBox
-  */
-
-#define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix)    \
-/** \ingroup alignedboxtypedefs */                                 \
-typedef AlignedBox<Type, Size>   AlignedBox##SizeSuffix##TypeSuffix;
-
-#define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \
-EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 1, 1) \
-EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \
-EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \
-EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \
-EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X)
-
-EIGEN_MAKE_TYPEDEFS_ALL_SIZES(int,                  i)
-EIGEN_MAKE_TYPEDEFS_ALL_SIZES(float,                f)
-EIGEN_MAKE_TYPEDEFS_ALL_SIZES(double,               d)
-
-#undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES
-#undef EIGEN_MAKE_TYPEDEFS
-
-} // end namespace Eigen
-
-#endif // EIGEN_ALIGNEDBOX_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Geometry/AngleAxis.h b/resources/3rdparty/eigen/Eigen/src/Geometry/AngleAxis.h
deleted file mode 100644
index eee2cd0e1..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Geometry/AngleAxis.h
+++ /dev/null
@@ -1,230 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_ANGLEAXIS_H
-#define EIGEN_ANGLEAXIS_H
-
-namespace Eigen { 
-
-/** \geometry_module \ingroup Geometry_Module
-  *
-  * \class AngleAxis
-  *
-  * \brief Represents a 3D rotation as a rotation angle around an arbitrary 3D axis
-  *
-  * \param _Scalar the scalar type, i.e., the type of the coefficients.
-  *
-  * \warning When setting up an AngleAxis object, the axis vector \b must \b be \b normalized.
-  *
-  * The following two typedefs are provided for convenience:
-  * \li \c AngleAxisf for \c float
-  * \li \c AngleAxisd for \c double
-  *
-  * Combined with MatrixBase::Unit{X,Y,Z}, AngleAxis can be used to easily
-  * mimic Euler-angles. Here is an example:
-  * \include AngleAxis_mimic_euler.cpp
-  * Output: \verbinclude AngleAxis_mimic_euler.out
-  *
-  * \note This class is not aimed to be used to store a rotation transformation,
-  * but rather to make easier the creation of other rotation (Quaternion, rotation Matrix)
-  * and transformation objects.
-  *
-  * \sa class Quaternion, class Transform, MatrixBase::UnitX()
-  */
-
-namespace internal {
-template<typename _Scalar> struct traits<AngleAxis<_Scalar> >
-{
-  typedef _Scalar Scalar;
-};
-}
-
-template<typename _Scalar>
-class AngleAxis : public RotationBase<AngleAxis<_Scalar>,3>
-{
-  typedef RotationBase<AngleAxis<_Scalar>,3> Base;
-
-public:
-
-  using Base::operator*;
-
-  enum { Dim = 3 };
-  /** the scalar type of the coefficients */
-  typedef _Scalar Scalar;
-  typedef Matrix<Scalar,3,3> Matrix3;
-  typedef Matrix<Scalar,3,1> Vector3;
-  typedef Quaternion<Scalar> QuaternionType;
-
-protected:
-
-  Vector3 m_axis;
-  Scalar m_angle;
-
-public:
-
-  /** Default constructor without initialization. */
-  AngleAxis() {}
-  /** Constructs and initialize the angle-axis rotation from an \a angle in radian
-    * and an \a axis which \b must \b be \b normalized.
-    *
-    * \warning If the \a axis vector is not normalized, then the angle-axis object
-    *          represents an invalid rotation. */
-  template<typename Derived>
-  inline AngleAxis(const Scalar& angle, const MatrixBase<Derived>& axis) : m_axis(axis), m_angle(angle) {}
-  /** Constructs and initialize the angle-axis rotation from a quaternion \a q. */
-  template<typename QuatDerived> inline explicit AngleAxis(const QuaternionBase<QuatDerived>& q) { *this = q; }
-  /** Constructs and initialize the angle-axis rotation from a 3x3 rotation matrix. */
-  template<typename Derived>
-  inline explicit AngleAxis(const MatrixBase<Derived>& m) { *this = m; }
-
-  Scalar angle() const { return m_angle; }
-  Scalar& angle() { return m_angle; }
-
-  const Vector3& axis() const { return m_axis; }
-  Vector3& axis() { return m_axis; }
-
-  /** Concatenates two rotations */
-  inline QuaternionType operator* (const AngleAxis& other) const
-  { return QuaternionType(*this) * QuaternionType(other); }
-
-  /** Concatenates two rotations */
-  inline QuaternionType operator* (const QuaternionType& other) const
-  { return QuaternionType(*this) * other; }
-
-  /** Concatenates two rotations */
-  friend inline QuaternionType operator* (const QuaternionType& a, const AngleAxis& b)
-  { return a * QuaternionType(b); }
-
-  /** \returns the inverse rotation, i.e., an angle-axis with opposite rotation angle */
-  AngleAxis inverse() const
-  { return AngleAxis(-m_angle, m_axis); }
-
-  template<class QuatDerived>
-  AngleAxis& operator=(const QuaternionBase<QuatDerived>& q);
-  template<typename Derived>
-  AngleAxis& operator=(const MatrixBase<Derived>& m);
-
-  template<typename Derived>
-  AngleAxis& fromRotationMatrix(const MatrixBase<Derived>& m);
-  Matrix3 toRotationMatrix(void) const;
-
-  /** \returns \c *this with scalar type casted to \a NewScalarType
-    *
-    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
-    * then this function smartly returns a const reference to \c *this.
-    */
-  template<typename NewScalarType>
-  inline typename internal::cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type cast() const
-  { return typename internal::cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type(*this); }
-
-  /** Copy constructor with scalar type conversion */
-  template<typename OtherScalarType>
-  inline explicit AngleAxis(const AngleAxis<OtherScalarType>& other)
-  {
-    m_axis = other.axis().template cast<Scalar>();
-    m_angle = Scalar(other.angle());
-  }
-
-  static inline const AngleAxis Identity() { return AngleAxis(0, Vector3::UnitX()); }
-
-  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
-    * determined by \a prec.
-    *
-    * \sa MatrixBase::isApprox() */
-  bool isApprox(const AngleAxis& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
-  { return m_axis.isApprox(other.m_axis, prec) && internal::isApprox(m_angle,other.m_angle, prec); }
-};
-
-/** \ingroup Geometry_Module
-  * single precision angle-axis type */
-typedef AngleAxis<float> AngleAxisf;
-/** \ingroup Geometry_Module
-  * double precision angle-axis type */
-typedef AngleAxis<double> AngleAxisd;
-
-/** Set \c *this from a \b unit quaternion.
-  * The axis is normalized.
-  * 
-  * \warning As any other method dealing with quaternion, if the input quaternion
-  *          is not normalized then the result is undefined.
-  */
-template<typename Scalar>
-template<typename QuatDerived>
-AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionBase<QuatDerived>& q)
-{
-  using std::acos;
-  using std::min;
-  using std::max;
-  Scalar n2 = q.vec().squaredNorm();
-  if (n2 < NumTraits<Scalar>::dummy_precision()*NumTraits<Scalar>::dummy_precision())
-  {
-    m_angle = 0;
-    m_axis << 1, 0, 0;
-  }
-  else
-  {
-    m_angle = Scalar(2)*acos((min)((max)(Scalar(-1),q.w()),Scalar(1)));
-    m_axis = q.vec() / internal::sqrt(n2);
-  }
-  return *this;
-}
-
-/** Set \c *this from a 3x3 rotation matrix \a mat.
-  */
-template<typename Scalar>
-template<typename Derived>
-AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const MatrixBase<Derived>& mat)
-{
-  // Since a direct conversion would not be really faster,
-  // let's use the robust Quaternion implementation:
-  return *this = QuaternionType(mat);
-}
-
-/**
-* \brief Sets \c *this from a 3x3 rotation matrix.
-**/
-template<typename Scalar>
-template<typename Derived>
-AngleAxis<Scalar>& AngleAxis<Scalar>::fromRotationMatrix(const MatrixBase<Derived>& mat)
-{
-  return *this = QuaternionType(mat);
-}
-
-/** Constructs and \returns an equivalent 3x3 rotation matrix.
-  */
-template<typename Scalar>
-typename AngleAxis<Scalar>::Matrix3
-AngleAxis<Scalar>::toRotationMatrix(void) const
-{
-  Matrix3 res;
-  Vector3 sin_axis  = internal::sin(m_angle) * m_axis;
-  Scalar c = internal::cos(m_angle);
-  Vector3 cos1_axis = (Scalar(1)-c) * m_axis;
-
-  Scalar tmp;
-  tmp = cos1_axis.x() * m_axis.y();
-  res.coeffRef(0,1) = tmp - sin_axis.z();
-  res.coeffRef(1,0) = tmp + sin_axis.z();
-
-  tmp = cos1_axis.x() * m_axis.z();
-  res.coeffRef(0,2) = tmp + sin_axis.y();
-  res.coeffRef(2,0) = tmp - sin_axis.y();
-
-  tmp = cos1_axis.y() * m_axis.z();
-  res.coeffRef(1,2) = tmp - sin_axis.x();
-  res.coeffRef(2,1) = tmp + sin_axis.x();
-
-  res.diagonal() = (cos1_axis.cwiseProduct(m_axis)).array() + c;
-
-  return res;
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_ANGLEAXIS_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Geometry/Hyperplane.h b/resources/3rdparty/eigen/Eigen/src/Geometry/Hyperplane.h
deleted file mode 100644
index 8b45c89e6..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Geometry/Hyperplane.h
+++ /dev/null
@@ -1,269 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_HYPERPLANE_H
-#define EIGEN_HYPERPLANE_H
-
-namespace Eigen { 
-
-/** \geometry_module \ingroup Geometry_Module
-  *
-  * \class Hyperplane
-  *
-  * \brief A hyperplane
-  *
-  * A hyperplane is an affine subspace of dimension n-1 in a space of dimension n.
-  * For example, a hyperplane in a plane is a line; a hyperplane in 3-space is a plane.
-  *
-  * \param _Scalar the scalar type, i.e., the type of the coefficients
-  * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
-  *             Notice that the dimension of the hyperplane is _AmbientDim-1.
-  *
-  * This class represents an hyperplane as the zero set of the implicit equation
-  * \f$ n \cdot x + d = 0 \f$ where \f$ n \f$ is a unit normal vector of the plane (linear part)
-  * and \f$ d \f$ is the distance (offset) to the origin.
-  */
-template <typename _Scalar, int _AmbientDim, int _Options>
-class Hyperplane
-{
-public:
-  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1)
-  enum {
-    AmbientDimAtCompileTime = _AmbientDim,
-    Options = _Options
-  };
-  typedef _Scalar Scalar;
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  typedef DenseIndex Index;
-  typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
-  typedef Matrix<Scalar,Index(AmbientDimAtCompileTime)==Dynamic
-                        ? Dynamic
-                        : Index(AmbientDimAtCompileTime)+1,1,Options> Coefficients;
-  typedef Block<Coefficients,AmbientDimAtCompileTime,1> NormalReturnType;
-  typedef const Block<const Coefficients,AmbientDimAtCompileTime,1> ConstNormalReturnType;
-
-  /** Default constructor without initialization */
-  inline explicit Hyperplane() {}
-  
-  template<int OtherOptions>
-  Hyperplane(const Hyperplane<Scalar,AmbientDimAtCompileTime,OtherOptions>& other)
-   : m_coeffs(other.coeffs())
-  {}
-
-  /** Constructs a dynamic-size hyperplane with \a _dim the dimension
-    * of the ambient space */
-  inline explicit Hyperplane(Index _dim) : m_coeffs(_dim+1) {}
-
-  /** Construct a plane from its normal \a n and a point \a e onto the plane.
-    * \warning the vector normal is assumed to be normalized.
-    */
-  inline Hyperplane(const VectorType& n, const VectorType& e)
-    : m_coeffs(n.size()+1)
-  {
-    normal() = n;
-    offset() = -n.dot(e);
-  }
-
-  /** Constructs a plane from its normal \a n and distance to the origin \a d
-    * such that the algebraic equation of the plane is \f$ n \cdot x + d = 0 \f$.
-    * \warning the vector normal is assumed to be normalized.
-    */
-  inline Hyperplane(const VectorType& n, const Scalar& d)
-    : m_coeffs(n.size()+1)
-  {
-    normal() = n;
-    offset() = d;
-  }
-
-  /** Constructs a hyperplane passing through the two points. If the dimension of the ambient space
-    * is greater than 2, then there isn't uniqueness, so an arbitrary choice is made.
-    */
-  static inline Hyperplane Through(const VectorType& p0, const VectorType& p1)
-  {
-    Hyperplane result(p0.size());
-    result.normal() = (p1 - p0).unitOrthogonal();
-    result.offset() = -p0.dot(result.normal());
-    return result;
-  }
-
-  /** Constructs a hyperplane passing through the three points. The dimension of the ambient space
-    * is required to be exactly 3.
-    */
-  static inline Hyperplane Through(const VectorType& p0, const VectorType& p1, const VectorType& p2)
-  {
-    EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 3)
-    Hyperplane result(p0.size());
-    result.normal() = (p2 - p0).cross(p1 - p0).normalized();
-    result.offset() = -p0.dot(result.normal());
-    return result;
-  }
-
-  /** Constructs a hyperplane passing through the parametrized line \a parametrized.
-    * If the dimension of the ambient space is greater than 2, then there isn't uniqueness,
-    * so an arbitrary choice is made.
-    */
-  // FIXME to be consitent with the rest this could be implemented as a static Through function ??
-  explicit Hyperplane(const ParametrizedLine<Scalar, AmbientDimAtCompileTime>& parametrized)
-  {
-    normal() = parametrized.direction().unitOrthogonal();
-    offset() = -parametrized.origin().dot(normal());
-  }
-
-  ~Hyperplane() {}
-
-  /** \returns the dimension in which the plane holds */
-  inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_coeffs.size()-1 : Index(AmbientDimAtCompileTime); }
-
-  /** normalizes \c *this */
-  void normalize(void)
-  {
-    m_coeffs /= normal().norm();
-  }
-
-  /** \returns the signed distance between the plane \c *this and a point \a p.
-    * \sa absDistance()
-    */
-  inline Scalar signedDistance(const VectorType& p) const { return normal().dot(p) + offset(); }
-
-  /** \returns the absolute distance between the plane \c *this and a point \a p.
-    * \sa signedDistance()
-    */
-  inline Scalar absDistance(const VectorType& p) const { return internal::abs(signedDistance(p)); }
-
-  /** \returns the projection of a point \a p onto the plane \c *this.
-    */
-  inline VectorType projection(const VectorType& p) const { return p - signedDistance(p) * normal(); }
-
-  /** \returns a constant reference to the unit normal vector of the plane, which corresponds
-    * to the linear part of the implicit equation.
-    */
-  inline ConstNormalReturnType normal() const { return ConstNormalReturnType(m_coeffs,0,0,dim(),1); }
-
-  /** \returns a non-constant reference to the unit normal vector of the plane, which corresponds
-    * to the linear part of the implicit equation.
-    */
-  inline NormalReturnType normal() { return NormalReturnType(m_coeffs,0,0,dim(),1); }
-
-  /** \returns the distance to the origin, which is also the "constant term" of the implicit equation
-    * \warning the vector normal is assumed to be normalized.
-    */
-  inline const Scalar& offset() const { return m_coeffs.coeff(dim()); }
-
-  /** \returns a non-constant reference to the distance to the origin, which is also the constant part
-    * of the implicit equation */
-  inline Scalar& offset() { return m_coeffs(dim()); }
-
-  /** \returns a constant reference to the coefficients c_i of the plane equation:
-    * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$
-    */
-  inline const Coefficients& coeffs() const { return m_coeffs; }
-
-  /** \returns a non-constant reference to the coefficients c_i of the plane equation:
-    * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$
-    */
-  inline Coefficients& coeffs() { return m_coeffs; }
-
-  /** \returns the intersection of *this with \a other.
-    *
-    * \warning The ambient space must be a plane, i.e. have dimension 2, so that \c *this and \a other are lines.
-    *
-    * \note If \a other is approximately parallel to *this, this method will return any point on *this.
-    */
-  VectorType intersection(const Hyperplane& other) const
-  {
-    EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2)
-    Scalar det = coeffs().coeff(0) * other.coeffs().coeff(1) - coeffs().coeff(1) * other.coeffs().coeff(0);
-    // since the line equations ax+by=c are normalized with a^2+b^2=1, the following tests
-    // whether the two lines are approximately parallel.
-    if(internal::isMuchSmallerThan(det, Scalar(1)))
-    {   // special case where the two lines are approximately parallel. Pick any point on the first line.
-        if(internal::abs(coeffs().coeff(1))>internal::abs(coeffs().coeff(0)))
-            return VectorType(coeffs().coeff(1), -coeffs().coeff(2)/coeffs().coeff(1)-coeffs().coeff(0));
-        else
-            return VectorType(-coeffs().coeff(2)/coeffs().coeff(0)-coeffs().coeff(1), coeffs().coeff(0));
-    }
-    else
-    {   // general case
-        Scalar invdet = Scalar(1) / det;
-        return VectorType(invdet*(coeffs().coeff(1)*other.coeffs().coeff(2)-other.coeffs().coeff(1)*coeffs().coeff(2)),
-                          invdet*(other.coeffs().coeff(0)*coeffs().coeff(2)-coeffs().coeff(0)*other.coeffs().coeff(2)));
-    }
-  }
-
-  /** Applies the transformation matrix \a mat to \c *this and returns a reference to \c *this.
-    *
-    * \param mat the Dim x Dim transformation matrix
-    * \param traits specifies whether the matrix \a mat represents an #Isometry
-    *               or a more generic #Affine transformation. The default is #Affine.
-    */
-  template<typename XprType>
-  inline Hyperplane& transform(const MatrixBase<XprType>& mat, TransformTraits traits = Affine)
-  {
-    if (traits==Affine)
-      normal() = mat.inverse().transpose() * normal();
-    else if (traits==Isometry)
-      normal() = mat * normal();
-    else
-    {
-      eigen_assert(0 && "invalid traits value in Hyperplane::transform()");
-    }
-    return *this;
-  }
-
-  /** Applies the transformation \a t to \c *this and returns a reference to \c *this.
-    *
-    * \param t the transformation of dimension Dim
-    * \param traits specifies whether the transformation \a t represents an #Isometry
-    *               or a more generic #Affine transformation. The default is #Affine.
-    *               Other kind of transformations are not supported.
-    */
-  template<int TrOptions>
-  inline Hyperplane& transform(const Transform<Scalar,AmbientDimAtCompileTime,Affine,TrOptions>& t,
-                                TransformTraits traits = Affine)
-  {
-    transform(t.linear(), traits);
-    offset() -= normal().dot(t.translation());
-    return *this;
-  }
-
-  /** \returns \c *this with scalar type casted to \a NewScalarType
-    *
-    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
-    * then this function smartly returns a const reference to \c *this.
-    */
-  template<typename NewScalarType>
-  inline typename internal::cast_return_type<Hyperplane,
-           Hyperplane<NewScalarType,AmbientDimAtCompileTime,Options> >::type cast() const
-  {
-    return typename internal::cast_return_type<Hyperplane,
-                    Hyperplane<NewScalarType,AmbientDimAtCompileTime,Options> >::type(*this);
-  }
-
-  /** Copy constructor with scalar type conversion */
-  template<typename OtherScalarType,int OtherOptions>
-  inline explicit Hyperplane(const Hyperplane<OtherScalarType,AmbientDimAtCompileTime,OtherOptions>& other)
-  { m_coeffs = other.coeffs().template cast<Scalar>(); }
-
-  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
-    * determined by \a prec.
-    *
-    * \sa MatrixBase::isApprox() */
-  template<int OtherOptions>
-  bool isApprox(const Hyperplane<Scalar,AmbientDimAtCompileTime,OtherOptions>& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
-  { return m_coeffs.isApprox(other.m_coeffs, prec); }
-
-protected:
-
-  Coefficients m_coeffs;
-};
-
-} // end namespace Eigen
-
-#endif // EIGEN_HYPERPLANE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Geometry/ParametrizedLine.h b/resources/3rdparty/eigen/Eigen/src/Geometry/ParametrizedLine.h
deleted file mode 100644
index ab5203e55..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Geometry/ParametrizedLine.h
+++ /dev/null
@@ -1,195 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_PARAMETRIZEDLINE_H
-#define EIGEN_PARAMETRIZEDLINE_H
-
-namespace Eigen { 
-
-/** \geometry_module \ingroup Geometry_Module
-  *
-  * \class ParametrizedLine
-  *
-  * \brief A parametrized line
-  *
-  * A parametrized line is defined by an origin point \f$ \mathbf{o} \f$ and a unit
-  * direction vector \f$ \mathbf{d} \f$ such that the line corresponds to
-  * the set \f$ l(t) = \mathbf{o} + t \mathbf{d} \f$, \f$ t \in \mathbf{R} \f$.
-  *
-  * \param _Scalar the scalar type, i.e., the type of the coefficients
-  * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
-  */
-template <typename _Scalar, int _AmbientDim, int _Options>
-class ParametrizedLine
-{
-public:
-  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
-  enum {
-    AmbientDimAtCompileTime = _AmbientDim,
-    Options = _Options
-  };
-  typedef _Scalar Scalar;
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  typedef DenseIndex Index;
-  typedef Matrix<Scalar,AmbientDimAtCompileTime,1,Options> VectorType;
-
-  /** Default constructor without initialization */
-  inline explicit ParametrizedLine() {}
-  
-  template<int OtherOptions>
-  ParametrizedLine(const ParametrizedLine<Scalar,AmbientDimAtCompileTime,OtherOptions>& other)
-   : m_origin(other.origin()), m_direction(other.direction())
-  {}
-
-  /** Constructs a dynamic-size line with \a _dim the dimension
-    * of the ambient space */
-  inline explicit ParametrizedLine(Index _dim) : m_origin(_dim), m_direction(_dim) {}
-
-  /** Initializes a parametrized line of direction \a direction and origin \a origin.
-    * \warning the vector direction is assumed to be normalized.
-    */
-  ParametrizedLine(const VectorType& origin, const VectorType& direction)
-    : m_origin(origin), m_direction(direction) {}
-
-  template <int OtherOptions>
-  explicit ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane);
-
-  /** Constructs a parametrized line going from \a p0 to \a p1. */
-  static inline ParametrizedLine Through(const VectorType& p0, const VectorType& p1)
-  { return ParametrizedLine(p0, (p1-p0).normalized()); }
-
-  ~ParametrizedLine() {}
-
-  /** \returns the dimension in which the line holds */
-  inline Index dim() const { return m_direction.size(); }
-
-  const VectorType& origin() const { return m_origin; }
-  VectorType& origin() { return m_origin; }
-
-  const VectorType& direction() const { return m_direction; }
-  VectorType& direction() { return m_direction; }
-
-  /** \returns the squared distance of a point \a p to its projection onto the line \c *this.
-    * \sa distance()
-    */
-  RealScalar squaredDistance(const VectorType& p) const
-  {
-    VectorType diff = p - origin();
-    return (diff - direction().dot(diff) * direction()).squaredNorm();
-  }
-  /** \returns the distance of a point \a p to its projection onto the line \c *this.
-    * \sa squaredDistance()
-    */
-  RealScalar distance(const VectorType& p) const { return internal::sqrt(squaredDistance(p)); }
-
-  /** \returns the projection of a point \a p onto the line \c *this. */
-  VectorType projection(const VectorType& p) const
-  { return origin() + direction().dot(p-origin()) * direction(); }
-
-  VectorType pointAt(const Scalar& t) const;
-  
-  template <int OtherOptions>
-  Scalar intersectionParameter(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const;
- 
-  template <int OtherOptions>
-  Scalar intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const;
-  
-  template <int OtherOptions>
-  VectorType intersectionPoint(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const;
-
-  /** \returns \c *this with scalar type casted to \a NewScalarType
-    *
-    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
-    * then this function smartly returns a const reference to \c *this.
-    */
-  template<typename NewScalarType>
-  inline typename internal::cast_return_type<ParametrizedLine,
-           ParametrizedLine<NewScalarType,AmbientDimAtCompileTime,Options> >::type cast() const
-  {
-    return typename internal::cast_return_type<ParametrizedLine,
-                    ParametrizedLine<NewScalarType,AmbientDimAtCompileTime,Options> >::type(*this);
-  }
-
-  /** Copy constructor with scalar type conversion */
-  template<typename OtherScalarType,int OtherOptions>
-  inline explicit ParametrizedLine(const ParametrizedLine<OtherScalarType,AmbientDimAtCompileTime,OtherOptions>& other)
-  {
-    m_origin = other.origin().template cast<Scalar>();
-    m_direction = other.direction().template cast<Scalar>();
-  }
-
-  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
-    * determined by \a prec.
-    *
-    * \sa MatrixBase::isApprox() */
-  bool isApprox(const ParametrizedLine& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
-  { return m_origin.isApprox(other.m_origin, prec) && m_direction.isApprox(other.m_direction, prec); }
-
-protected:
-
-  VectorType m_origin, m_direction;
-};
-
-/** Constructs a parametrized line from a 2D hyperplane
-  *
-  * \warning the ambient space must have dimension 2 such that the hyperplane actually describes a line
-  */
-template <typename _Scalar, int _AmbientDim, int _Options>
-template <int OtherOptions>
-inline ParametrizedLine<_Scalar, _AmbientDim,_Options>::ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim,OtherOptions>& hyperplane)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2)
-  direction() = hyperplane.normal().unitOrthogonal();
-  origin() = -hyperplane.normal()*hyperplane.offset();
-}
-
-/** \returns the point at \a t along this line
-  */
-template <typename _Scalar, int _AmbientDim, int _Options>
-inline typename ParametrizedLine<_Scalar, _AmbientDim,_Options>::VectorType
-ParametrizedLine<_Scalar, _AmbientDim,_Options>::pointAt(const _Scalar& t) const
-{
-  return origin() + (direction()*t); 
-}
-
-/** \returns the parameter value of the intersection between \c *this and the given \a hyperplane
-  */
-template <typename _Scalar, int _AmbientDim, int _Options>
-template <int OtherOptions>
-inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersectionParameter(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const
-{
-  return -(hyperplane.offset()+hyperplane.normal().dot(origin()))
-          / hyperplane.normal().dot(direction());
-}
-
-
-/** \deprecated use intersectionParameter()
-  * \returns the parameter value of the intersection between \c *this and the given \a hyperplane
-  */
-template <typename _Scalar, int _AmbientDim, int _Options>
-template <int OtherOptions>
-inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const
-{
-  return intersectionParameter(hyperplane);
-}
-
-/** \returns the point of the intersection between \c *this and the given hyperplane
-  */
-template <typename _Scalar, int _AmbientDim, int _Options>
-template <int OtherOptions>
-inline typename ParametrizedLine<_Scalar, _AmbientDim,_Options>::VectorType
-ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersectionPoint(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const
-{
-  return pointAt(intersectionParameter(hyperplane));
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_PARAMETRIZEDLINE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Geometry/Quaternion.h b/resources/3rdparty/eigen/Eigen/src/Geometry/Quaternion.h
deleted file mode 100644
index c4a3cbf51..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Geometry/Quaternion.h
+++ /dev/null
@@ -1,778 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2009 Mathieu Gautier <mathieu.gautier@cea.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_QUATERNION_H
-#define EIGEN_QUATERNION_H
-namespace Eigen { 
-
-
-/***************************************************************************
-* Definition of QuaternionBase<Derived>
-* The implementation is at the end of the file
-***************************************************************************/
-
-namespace internal {
-template<typename Other,
-         int OtherRows=Other::RowsAtCompileTime,
-         int OtherCols=Other::ColsAtCompileTime>
-struct quaternionbase_assign_impl;
-}
-
-/** \geometry_module \ingroup Geometry_Module
-  * \class QuaternionBase
-  * \brief Base class for quaternion expressions
-  * \tparam Derived derived type (CRTP)
-  * \sa class Quaternion
-  */
-template<class Derived>
-class QuaternionBase : public RotationBase<Derived, 3>
-{
-  typedef RotationBase<Derived, 3> Base;
-public:
-  using Base::operator*;
-  using Base::derived;
-
-  typedef typename internal::traits<Derived>::Scalar Scalar;
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  typedef typename internal::traits<Derived>::Coefficients Coefficients;
-  enum {
-    Flags = Eigen::internal::traits<Derived>::Flags
-  };
-
- // typedef typename Matrix<Scalar,4,1> Coefficients;
-  /** the type of a 3D vector */
-  typedef Matrix<Scalar,3,1> Vector3;
-  /** the equivalent rotation matrix type */
-  typedef Matrix<Scalar,3,3> Matrix3;
-  /** the equivalent angle-axis type */
-  typedef AngleAxis<Scalar> AngleAxisType;
-
-
-
-  /** \returns the \c x coefficient */
-  inline Scalar x() const { return this->derived().coeffs().coeff(0); }
-  /** \returns the \c y coefficient */
-  inline Scalar y() const { return this->derived().coeffs().coeff(1); }
-  /** \returns the \c z coefficient */
-  inline Scalar z() const { return this->derived().coeffs().coeff(2); }
-  /** \returns the \c w coefficient */
-  inline Scalar w() const { return this->derived().coeffs().coeff(3); }
-
-  /** \returns a reference to the \c x coefficient */
-  inline Scalar& x() { return this->derived().coeffs().coeffRef(0); }
-  /** \returns a reference to the \c y coefficient */
-  inline Scalar& y() { return this->derived().coeffs().coeffRef(1); }
-  /** \returns a reference to the \c z coefficient */
-  inline Scalar& z() { return this->derived().coeffs().coeffRef(2); }
-  /** \returns a reference to the \c w coefficient */
-  inline Scalar& w() { return this->derived().coeffs().coeffRef(3); }
-
-  /** \returns a read-only vector expression of the imaginary part (x,y,z) */
-  inline const VectorBlock<const Coefficients,3> vec() const { return coeffs().template head<3>(); }
-
-  /** \returns a vector expression of the imaginary part (x,y,z) */
-  inline VectorBlock<Coefficients,3> vec() { return coeffs().template head<3>(); }
-
-  /** \returns a read-only vector expression of the coefficients (x,y,z,w) */
-  inline const typename internal::traits<Derived>::Coefficients& coeffs() const { return derived().coeffs(); }
-
-  /** \returns a vector expression of the coefficients (x,y,z,w) */
-  inline typename internal::traits<Derived>::Coefficients& coeffs() { return derived().coeffs(); }
-
-  EIGEN_STRONG_INLINE QuaternionBase<Derived>& operator=(const QuaternionBase<Derived>& other);
-  template<class OtherDerived> EIGEN_STRONG_INLINE Derived& operator=(const QuaternionBase<OtherDerived>& other);
-
-// disabled this copy operator as it is giving very strange compilation errors when compiling
-// test_stdvector with GCC 4.4.2. This looks like a GCC bug though, so feel free to re-enable it if it's
-// useful; however notice that we already have the templated operator= above and e.g. in MatrixBase
-// we didn't have to add, in addition to templated operator=, such a non-templated copy operator.
-//  Derived& operator=(const QuaternionBase& other)
-//  { return operator=<Derived>(other); }
-
-  Derived& operator=(const AngleAxisType& aa);
-  template<class OtherDerived> Derived& operator=(const MatrixBase<OtherDerived>& m);
-
-  /** \returns a quaternion representing an identity rotation
-    * \sa MatrixBase::Identity()
-    */
-  static inline Quaternion<Scalar> Identity() { return Quaternion<Scalar>(1, 0, 0, 0); }
-
-  /** \sa QuaternionBase::Identity(), MatrixBase::setIdentity()
-    */
-  inline QuaternionBase& setIdentity() { coeffs() << 0, 0, 0, 1; return *this; }
-
-  /** \returns the squared norm of the quaternion's coefficients
-    * \sa QuaternionBase::norm(), MatrixBase::squaredNorm()
-    */
-  inline Scalar squaredNorm() const { return coeffs().squaredNorm(); }
-
-  /** \returns the norm of the quaternion's coefficients
-    * \sa QuaternionBase::squaredNorm(), MatrixBase::norm()
-    */
-  inline Scalar norm() const { return coeffs().norm(); }
-
-  /** Normalizes the quaternion \c *this
-    * \sa normalized(), MatrixBase::normalize() */
-  inline void normalize() { coeffs().normalize(); }
-  /** \returns a normalized copy of \c *this
-    * \sa normalize(), MatrixBase::normalized() */
-  inline Quaternion<Scalar> normalized() const { return Quaternion<Scalar>(coeffs().normalized()); }
-
-    /** \returns the dot product of \c *this and \a other
-    * Geometrically speaking, the dot product of two unit quaternions
-    * corresponds to the cosine of half the angle between the two rotations.
-    * \sa angularDistance()
-    */
-  template<class OtherDerived> inline Scalar dot(const QuaternionBase<OtherDerived>& other) const { return coeffs().dot(other.coeffs()); }
-
-  template<class OtherDerived> Scalar angularDistance(const QuaternionBase<OtherDerived>& other) const;
-
-  /** \returns an equivalent 3x3 rotation matrix */
-  Matrix3 toRotationMatrix() const;
-
-  /** \returns the quaternion which transform \a a into \a b through a rotation */
-  template<typename Derived1, typename Derived2>
-  Derived& setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b);
-
-  template<class OtherDerived> EIGEN_STRONG_INLINE Quaternion<Scalar> operator* (const QuaternionBase<OtherDerived>& q) const;
-  template<class OtherDerived> EIGEN_STRONG_INLINE Derived& operator*= (const QuaternionBase<OtherDerived>& q);
-
-  /** \returns the quaternion describing the inverse rotation */
-  Quaternion<Scalar> inverse() const;
-
-  /** \returns the conjugated quaternion */
-  Quaternion<Scalar> conjugate() const;
-
-  /** \returns an interpolation for a constant motion between \a other and \c *this
-    * \a t in [0;1]
-    * see http://en.wikipedia.org/wiki/Slerp
-    */
-  template<class OtherDerived> Quaternion<Scalar> slerp(Scalar t, const QuaternionBase<OtherDerived>& other) const;
-
-  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
-    * determined by \a prec.
-    *
-    * \sa MatrixBase::isApprox() */
-  template<class OtherDerived>
-  bool isApprox(const QuaternionBase<OtherDerived>& other, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const
-  { return coeffs().isApprox(other.coeffs(), prec); }
-
-	/** return the result vector of \a v through the rotation*/
-  EIGEN_STRONG_INLINE Vector3 _transformVector(Vector3 v) const;
-
-  /** \returns \c *this with scalar type casted to \a NewScalarType
-    *
-    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
-    * then this function smartly returns a const reference to \c *this.
-    */
-  template<typename NewScalarType>
-  inline typename internal::cast_return_type<Derived,Quaternion<NewScalarType> >::type cast() const
-  {
-    return typename internal::cast_return_type<Derived,Quaternion<NewScalarType> >::type(derived());
-  }
-
-#ifdef EIGEN_QUATERNIONBASE_PLUGIN
-# include EIGEN_QUATERNIONBASE_PLUGIN
-#endif
-};
-
-/***************************************************************************
-* Definition/implementation of Quaternion<Scalar>
-***************************************************************************/
-
-/** \geometry_module \ingroup Geometry_Module
-  *
-  * \class Quaternion
-  *
-  * \brief The quaternion class used to represent 3D orientations and rotations
-  *
-  * \param _Scalar the scalar type, i.e., the type of the coefficients
-  *
-  * This class represents a quaternion \f$ w+xi+yj+zk \f$ that is a convenient representation of
-  * orientations and rotations of objects in three dimensions. Compared to other representations
-  * like Euler angles or 3x3 matrices, quatertions offer the following advantages:
-  * \li \b compact storage (4 scalars)
-  * \li \b efficient to compose (28 flops),
-  * \li \b stable spherical interpolation
-  *
-  * The following two typedefs are provided for convenience:
-  * \li \c Quaternionf for \c float
-  * \li \c Quaterniond for \c double
-  *
-  * \sa  class AngleAxis, class Transform
-  */
-
-namespace internal {
-template<typename _Scalar,int _Options>
-struct traits<Quaternion<_Scalar,_Options> >
-{
-  typedef Quaternion<_Scalar,_Options> PlainObject;
-  typedef _Scalar Scalar;
-  typedef Matrix<_Scalar,4,1,_Options> Coefficients;
-  enum{
-    IsAligned = internal::traits<Coefficients>::Flags & AlignedBit,
-    Flags = IsAligned ? (AlignedBit | LvalueBit) : LvalueBit
-  };
-};
-}
-
-template<typename _Scalar, int _Options>
-class Quaternion : public QuaternionBase<Quaternion<_Scalar,_Options> >
-{
-  typedef QuaternionBase<Quaternion<_Scalar,_Options> > Base;
-  enum { IsAligned = internal::traits<Quaternion>::IsAligned };
-
-public:
-  typedef _Scalar Scalar;
-
-  EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Quaternion)
-  using Base::operator*=;
-
-  typedef typename internal::traits<Quaternion>::Coefficients Coefficients;
-  typedef typename Base::AngleAxisType AngleAxisType;
-
-  /** Default constructor leaving the quaternion uninitialized. */
-  inline Quaternion() {}
-
-  /** Constructs and initializes the quaternion \f$ w+xi+yj+zk \f$ from
-    * its four coefficients \a w, \a x, \a y and \a z.
-    *
-    * \warning Note the order of the arguments: the real \a w coefficient first,
-    * while internally the coefficients are stored in the following order:
-    * [\c x, \c y, \c z, \c w]
-    */
-  inline Quaternion(const Scalar& w, const Scalar& x, const Scalar& y, const Scalar& z) : m_coeffs(x, y, z, w){}
-
-  /** Constructs and initialize a quaternion from the array data */
-  inline Quaternion(const Scalar* data) : m_coeffs(data) {}
-
-  /** Copy constructor */
-  template<class Derived> EIGEN_STRONG_INLINE Quaternion(const QuaternionBase<Derived>& other) { this->Base::operator=(other); }
-
-  /** Constructs and initializes a quaternion from the angle-axis \a aa */
-  explicit inline Quaternion(const AngleAxisType& aa) { *this = aa; }
-
-  /** Constructs and initializes a quaternion from either:
-    *  - a rotation matrix expression,
-    *  - a 4D vector expression representing quaternion coefficients.
-    */
-  template<typename Derived>
-  explicit inline Quaternion(const MatrixBase<Derived>& other) { *this = other; }
-
-  /** Explicit copy constructor with scalar conversion */
-  template<typename OtherScalar, int OtherOptions>
-  explicit inline Quaternion(const Quaternion<OtherScalar, OtherOptions>& other)
-  { m_coeffs = other.coeffs().template cast<Scalar>(); }
-
-  template<typename Derived1, typename Derived2>
-  static Quaternion FromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b);
-
-  inline Coefficients& coeffs() { return m_coeffs;}
-  inline const Coefficients& coeffs() const { return m_coeffs;}
-
-  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(IsAligned)
-
-protected:
-  Coefficients m_coeffs;
-  
-#ifndef EIGEN_PARSED_BY_DOXYGEN
-    static EIGEN_STRONG_INLINE void _check_template_params()
-    {
-      EIGEN_STATIC_ASSERT( (_Options & DontAlign) == _Options,
-        INVALID_MATRIX_TEMPLATE_PARAMETERS)
-    }
-#endif
-};
-
-/** \ingroup Geometry_Module
-  * single precision quaternion type */
-typedef Quaternion<float> Quaternionf;
-/** \ingroup Geometry_Module
-  * double precision quaternion type */
-typedef Quaternion<double> Quaterniond;
-
-/***************************************************************************
-* Specialization of Map<Quaternion<Scalar>>
-***************************************************************************/
-
-namespace internal {
-  template<typename _Scalar, int _Options>
-  struct traits<Map<Quaternion<_Scalar>, _Options> >:
-  traits<Quaternion<_Scalar, _Options> >
-  {
-    typedef _Scalar Scalar;
-    typedef Map<Matrix<_Scalar,4,1>, _Options> Coefficients;
-
-    typedef traits<Quaternion<_Scalar, _Options> > TraitsBase;
-    enum {
-      IsAligned = TraitsBase::IsAligned,
-
-      Flags = TraitsBase::Flags
-    };
-  };
-}
-
-namespace internal {
-  template<typename _Scalar, int _Options>
-  struct traits<Map<const Quaternion<_Scalar>, _Options> >:
-  traits<Quaternion<_Scalar> >
-  {
-    typedef _Scalar Scalar;
-    typedef Map<const Matrix<_Scalar,4,1>, _Options> Coefficients;
-
-    typedef traits<Quaternion<_Scalar, _Options> > TraitsBase;
-    enum {
-      IsAligned = TraitsBase::IsAligned,
-      Flags = TraitsBase::Flags & ~LvalueBit
-    };
-  };
-}
-
-/** \brief Quaternion expression mapping a constant memory buffer
-  *
-  * \param _Scalar the type of the Quaternion coefficients
-  * \param _Options see class Map
-  *
-  * This is a specialization of class Map for Quaternion. This class allows to view
-  * a 4 scalar memory buffer as an Eigen's Quaternion object.
-  *
-  * \sa class Map, class Quaternion, class QuaternionBase
-  */
-template<typename _Scalar, int _Options>
-class Map<const Quaternion<_Scalar>, _Options >
-  : public QuaternionBase<Map<const Quaternion<_Scalar>, _Options> >
-{
-    typedef QuaternionBase<Map<const Quaternion<_Scalar>, _Options> > Base;
-
-  public:
-    typedef _Scalar Scalar;
-    typedef typename internal::traits<Map>::Coefficients Coefficients;
-    EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map)
-    using Base::operator*=;
-
-    /** Constructs a Mapped Quaternion object from the pointer \a coeffs
-      *
-      * The pointer \a coeffs must reference the four coeffecients of Quaternion in the following order:
-      * \code *coeffs == {x, y, z, w} \endcode
-      *
-      * If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */
-    EIGEN_STRONG_INLINE Map(const Scalar* coeffs) : m_coeffs(coeffs) {}
-
-    inline const Coefficients& coeffs() const { return m_coeffs;}
-
-  protected:
-    const Coefficients m_coeffs;
-};
-
-/** \brief Expression of a quaternion from a memory buffer
-  *
-  * \param _Scalar the type of the Quaternion coefficients
-  * \param _Options see class Map
-  *
-  * This is a specialization of class Map for Quaternion. This class allows to view
-  * a 4 scalar memory buffer as an Eigen's  Quaternion object.
-  *
-  * \sa class Map, class Quaternion, class QuaternionBase
-  */
-template<typename _Scalar, int _Options>
-class Map<Quaternion<_Scalar>, _Options >
-  : public QuaternionBase<Map<Quaternion<_Scalar>, _Options> >
-{
-    typedef QuaternionBase<Map<Quaternion<_Scalar>, _Options> > Base;
-
-  public:
-    typedef _Scalar Scalar;
-    typedef typename internal::traits<Map>::Coefficients Coefficients;
-    EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map)
-    using Base::operator*=;
-
-    /** Constructs a Mapped Quaternion object from the pointer \a coeffs
-      *
-      * The pointer \a coeffs must reference the four coeffecients of Quaternion in the following order:
-      * \code *coeffs == {x, y, z, w} \endcode
-      *
-      * If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */
-    EIGEN_STRONG_INLINE Map(Scalar* coeffs) : m_coeffs(coeffs) {}
-
-    inline Coefficients& coeffs() { return m_coeffs; }
-    inline const Coefficients& coeffs() const { return m_coeffs; }
-
-  protected:
-    Coefficients m_coeffs;
-};
-
-/** \ingroup Geometry_Module
-  * Map an unaligned array of single precision scalar as a quaternion */
-typedef Map<Quaternion<float>, 0>         QuaternionMapf;
-/** \ingroup Geometry_Module
-  * Map an unaligned array of double precision scalar as a quaternion */
-typedef Map<Quaternion<double>, 0>        QuaternionMapd;
-/** \ingroup Geometry_Module
-  * Map a 16-bits aligned array of double precision scalars as a quaternion */
-typedef Map<Quaternion<float>, Aligned>   QuaternionMapAlignedf;
-/** \ingroup Geometry_Module
-  * Map a 16-bits aligned array of double precision scalars as a quaternion */
-typedef Map<Quaternion<double>, Aligned>  QuaternionMapAlignedd;
-
-/***************************************************************************
-* Implementation of QuaternionBase methods
-***************************************************************************/
-
-// Generic Quaternion * Quaternion product
-// This product can be specialized for a given architecture via the Arch template argument.
-namespace internal {
-template<int Arch, class Derived1, class Derived2, typename Scalar, int _Options> struct quat_product
-{
-  static EIGEN_STRONG_INLINE Quaternion<Scalar> run(const QuaternionBase<Derived1>& a, const QuaternionBase<Derived2>& b){
-    return Quaternion<Scalar>
-    (
-      a.w() * b.w() - a.x() * b.x() - a.y() * b.y() - a.z() * b.z(),
-      a.w() * b.x() + a.x() * b.w() + a.y() * b.z() - a.z() * b.y(),
-      a.w() * b.y() + a.y() * b.w() + a.z() * b.x() - a.x() * b.z(),
-      a.w() * b.z() + a.z() * b.w() + a.x() * b.y() - a.y() * b.x()
-    );
-  }
-};
-}
-
-/** \returns the concatenation of two rotations as a quaternion-quaternion product */
-template <class Derived>
-template <class OtherDerived>
-EIGEN_STRONG_INLINE Quaternion<typename internal::traits<Derived>::Scalar>
-QuaternionBase<Derived>::operator* (const QuaternionBase<OtherDerived>& other) const
-{
-  EIGEN_STATIC_ASSERT((internal::is_same<typename Derived::Scalar, typename OtherDerived::Scalar>::value),
-   YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
-  return internal::quat_product<Architecture::Target, Derived, OtherDerived,
-                         typename internal::traits<Derived>::Scalar,
-                         internal::traits<Derived>::IsAligned && internal::traits<OtherDerived>::IsAligned>::run(*this, other);
-}
-
-/** \sa operator*(Quaternion) */
-template <class Derived>
-template <class OtherDerived>
-EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator*= (const QuaternionBase<OtherDerived>& other)
-{
-  derived() = derived() * other.derived();
-  return derived();
-}
-
-/** Rotation of a vector by a quaternion.
-  * \remarks If the quaternion is used to rotate several points (>1)
-  * then it is much more efficient to first convert it to a 3x3 Matrix.
-  * Comparison of the operation cost for n transformations:
-  *   - Quaternion2:    30n
-  *   - Via a Matrix3: 24 + 15n
-  */
-template <class Derived>
-EIGEN_STRONG_INLINE typename QuaternionBase<Derived>::Vector3
-QuaternionBase<Derived>::_transformVector(Vector3 v) const
-{
-    // Note that this algorithm comes from the optimization by hand
-    // of the conversion to a Matrix followed by a Matrix/Vector product.
-    // It appears to be much faster than the common algorithm found
-    // in the litterature (30 versus 39 flops). It also requires two
-    // Vector3 as temporaries.
-    Vector3 uv = this->vec().cross(v);
-    uv += uv;
-    return v + this->w() * uv + this->vec().cross(uv);
-}
-
-template<class Derived>
-EIGEN_STRONG_INLINE QuaternionBase<Derived>& QuaternionBase<Derived>::operator=(const QuaternionBase<Derived>& other)
-{
-  coeffs() = other.coeffs();
-  return derived();
-}
-
-template<class Derived>
-template<class OtherDerived>
-EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator=(const QuaternionBase<OtherDerived>& other)
-{
-  coeffs() = other.coeffs();
-  return derived();
-}
-
-/** Set \c *this from an angle-axis \a aa and returns a reference to \c *this
-  */
-template<class Derived>
-EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator=(const AngleAxisType& aa)
-{
-  Scalar ha = Scalar(0.5)*aa.angle(); // Scalar(0.5) to suppress precision loss warnings
-  this->w() = internal::cos(ha);
-  this->vec() = internal::sin(ha) * aa.axis();
-  return derived();
-}
-
-/** Set \c *this from the expression \a xpr:
-  *   - if \a xpr is a 4x1 vector, then \a xpr is assumed to be a quaternion
-  *   - if \a xpr is a 3x3 matrix, then \a xpr is assumed to be rotation matrix
-  *     and \a xpr is converted to a quaternion
-  */
-
-template<class Derived>
-template<class MatrixDerived>
-inline Derived& QuaternionBase<Derived>::operator=(const MatrixBase<MatrixDerived>& xpr)
-{
-  EIGEN_STATIC_ASSERT((internal::is_same<typename Derived::Scalar, typename MatrixDerived::Scalar>::value),
-   YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
-  internal::quaternionbase_assign_impl<MatrixDerived>::run(*this, xpr.derived());
-  return derived();
-}
-
-/** Convert the quaternion to a 3x3 rotation matrix. The quaternion is required to
-  * be normalized, otherwise the result is undefined.
-  */
-template<class Derived>
-inline typename QuaternionBase<Derived>::Matrix3
-QuaternionBase<Derived>::toRotationMatrix(void) const
-{
-  // NOTE if inlined, then gcc 4.2 and 4.4 get rid of the temporary (not gcc 4.3 !!)
-  // if not inlined then the cost of the return by value is huge ~ +35%,
-  // however, not inlining this function is an order of magnitude slower, so
-  // it has to be inlined, and so the return by value is not an issue
-  Matrix3 res;
-
-  const Scalar tx  = Scalar(2)*this->x();
-  const Scalar ty  = Scalar(2)*this->y();
-  const Scalar tz  = Scalar(2)*this->z();
-  const Scalar twx = tx*this->w();
-  const Scalar twy = ty*this->w();
-  const Scalar twz = tz*this->w();
-  const Scalar txx = tx*this->x();
-  const Scalar txy = ty*this->x();
-  const Scalar txz = tz*this->x();
-  const Scalar tyy = ty*this->y();
-  const Scalar tyz = tz*this->y();
-  const Scalar tzz = tz*this->z();
-
-  res.coeffRef(0,0) = Scalar(1)-(tyy+tzz);
-  res.coeffRef(0,1) = txy-twz;
-  res.coeffRef(0,2) = txz+twy;
-  res.coeffRef(1,0) = txy+twz;
-  res.coeffRef(1,1) = Scalar(1)-(txx+tzz);
-  res.coeffRef(1,2) = tyz-twx;
-  res.coeffRef(2,0) = txz-twy;
-  res.coeffRef(2,1) = tyz+twx;
-  res.coeffRef(2,2) = Scalar(1)-(txx+tyy);
-
-  return res;
-}
-
-/** Sets \c *this to be a quaternion representing a rotation between
-  * the two arbitrary vectors \a a and \a b. In other words, the built
-  * rotation represent a rotation sending the line of direction \a a
-  * to the line of direction \a b, both lines passing through the origin.
-  *
-  * \returns a reference to \c *this.
-  *
-  * Note that the two input vectors do \b not have to be normalized, and
-  * do not need to have the same norm.
-  */
-template<class Derived>
-template<typename Derived1, typename Derived2>
-inline Derived& QuaternionBase<Derived>::setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b)
-{
-  using std::max;
-  Vector3 v0 = a.normalized();
-  Vector3 v1 = b.normalized();
-  Scalar c = v1.dot(v0);
-
-  // if dot == -1, vectors are nearly opposites
-  // => accuraletly compute the rotation axis by computing the
-  //    intersection of the two planes. This is done by solving:
-  //       x^T v0 = 0
-  //       x^T v1 = 0
-  //    under the constraint:
-  //       ||x|| = 1
-  //    which yields a singular value problem
-  if (c < Scalar(-1)+NumTraits<Scalar>::dummy_precision())
-  {
-    c = max<Scalar>(c,-1);
-    Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose();
-    JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV);
-    Vector3 axis = svd.matrixV().col(2);
-
-    Scalar w2 = (Scalar(1)+c)*Scalar(0.5);
-    this->w() = internal::sqrt(w2);
-    this->vec() = axis * internal::sqrt(Scalar(1) - w2);
-    return derived();
-  }
-  Vector3 axis = v0.cross(v1);
-  Scalar s = internal::sqrt((Scalar(1)+c)*Scalar(2));
-  Scalar invs = Scalar(1)/s;
-  this->vec() = axis * invs;
-  this->w() = s * Scalar(0.5);
-
-  return derived();
-}
-
-
-/** Returns a quaternion representing a rotation between
-  * the two arbitrary vectors \a a and \a b. In other words, the built
-  * rotation represent a rotation sending the line of direction \a a
-  * to the line of direction \a b, both lines passing through the origin.
-  *
-  * \returns resulting quaternion
-  *
-  * Note that the two input vectors do \b not have to be normalized, and
-  * do not need to have the same norm.
-  */
-template<typename Scalar, int Options>
-template<typename Derived1, typename Derived2>
-Quaternion<Scalar,Options> Quaternion<Scalar,Options>::FromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b)
-{
-    Quaternion quat;
-    quat.setFromTwoVectors(a, b);
-    return quat;
-}
-
-
-/** \returns the multiplicative inverse of \c *this
-  * Note that in most cases, i.e., if you simply want the opposite rotation,
-  * and/or the quaternion is normalized, then it is enough to use the conjugate.
-  *
-  * \sa QuaternionBase::conjugate()
-  */
-template <class Derived>
-inline Quaternion<typename internal::traits<Derived>::Scalar> QuaternionBase<Derived>::inverse() const
-{
-  // FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite()  ??
-  Scalar n2 = this->squaredNorm();
-  if (n2 > 0)
-    return Quaternion<Scalar>(conjugate().coeffs() / n2);
-  else
-  {
-    // return an invalid result to flag the error
-    return Quaternion<Scalar>(Coefficients::Zero());
-  }
-}
-
-/** \returns the conjugate of the \c *this which is equal to the multiplicative inverse
-  * if the quaternion is normalized.
-  * The conjugate of a quaternion represents the opposite rotation.
-  *
-  * \sa Quaternion2::inverse()
-  */
-template <class Derived>
-inline Quaternion<typename internal::traits<Derived>::Scalar>
-QuaternionBase<Derived>::conjugate() const
-{
-  return Quaternion<Scalar>(this->w(),-this->x(),-this->y(),-this->z());
-}
-
-/** \returns the angle (in radian) between two rotations
-  * \sa dot()
-  */
-template <class Derived>
-template <class OtherDerived>
-inline typename internal::traits<Derived>::Scalar
-QuaternionBase<Derived>::angularDistance(const QuaternionBase<OtherDerived>& other) const
-{
-  using std::acos;
-  double d = internal::abs(this->dot(other));
-  if (d>=1.0)
-    return Scalar(0);
-  return static_cast<Scalar>(2 * acos(d));
-}
-
-/** \returns the spherical linear interpolation between the two quaternions
-  * \c *this and \a other at the parameter \a t
-  */
-template <class Derived>
-template <class OtherDerived>
-Quaternion<typename internal::traits<Derived>::Scalar>
-QuaternionBase<Derived>::slerp(Scalar t, const QuaternionBase<OtherDerived>& other) const
-{
-  using std::acos;
-  static const Scalar one = Scalar(1) - NumTraits<Scalar>::epsilon();
-  Scalar d = this->dot(other);
-  Scalar absD = internal::abs(d);
-
-  Scalar scale0;
-  Scalar scale1;
-
-  if(absD>=one)
-  {
-    scale0 = Scalar(1) - t;
-    scale1 = t;
-  }
-  else
-  {
-    // theta is the angle between the 2 quaternions
-    Scalar theta = acos(absD);
-    Scalar sinTheta = internal::sin(theta);
-
-    scale0 = internal::sin( ( Scalar(1) - t ) * theta) / sinTheta;
-    scale1 = internal::sin( ( t * theta) ) / sinTheta;
-  }
-  if(d<0) scale1 = -scale1;
-
-  return Quaternion<Scalar>(scale0 * coeffs() + scale1 * other.coeffs());
-}
-
-namespace internal {
-
-// set from a rotation matrix
-template<typename Other>
-struct quaternionbase_assign_impl<Other,3,3>
-{
-  typedef typename Other::Scalar Scalar;
-  typedef DenseIndex Index;
-  template<class Derived> static inline void run(QuaternionBase<Derived>& q, const Other& mat)
-  {
-    // This algorithm comes from  "Quaternion Calculus and Fast Animation",
-    // Ken Shoemake, 1987 SIGGRAPH course notes
-    Scalar t = mat.trace();
-    if (t > Scalar(0))
-    {
-      t = sqrt(t + Scalar(1.0));
-      q.w() = Scalar(0.5)*t;
-      t = Scalar(0.5)/t;
-      q.x() = (mat.coeff(2,1) - mat.coeff(1,2)) * t;
-      q.y() = (mat.coeff(0,2) - mat.coeff(2,0)) * t;
-      q.z() = (mat.coeff(1,0) - mat.coeff(0,1)) * t;
-    }
-    else
-    {
-      DenseIndex i = 0;
-      if (mat.coeff(1,1) > mat.coeff(0,0))
-        i = 1;
-      if (mat.coeff(2,2) > mat.coeff(i,i))
-        i = 2;
-      DenseIndex j = (i+1)%3;
-      DenseIndex k = (j+1)%3;
-
-      t = sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0));
-      q.coeffs().coeffRef(i) = Scalar(0.5) * t;
-      t = Scalar(0.5)/t;
-      q.w() = (mat.coeff(k,j)-mat.coeff(j,k))*t;
-      q.coeffs().coeffRef(j) = (mat.coeff(j,i)+mat.coeff(i,j))*t;
-      q.coeffs().coeffRef(k) = (mat.coeff(k,i)+mat.coeff(i,k))*t;
-    }
-  }
-};
-
-// set from a vector of coefficients assumed to be a quaternion
-template<typename Other>
-struct quaternionbase_assign_impl<Other,4,1>
-{
-  typedef typename Other::Scalar Scalar;
-  template<class Derived> static inline void run(QuaternionBase<Derived>& q, const Other& vec)
-  {
-    q.coeffs() = vec;
-  }
-};
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_QUATERNION_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Geometry/Rotation2D.h b/resources/3rdparty/eigen/Eigen/src/Geometry/Rotation2D.h
deleted file mode 100644
index 060ab10f3..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Geometry/Rotation2D.h
+++ /dev/null
@@ -1,154 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_ROTATION2D_H
-#define EIGEN_ROTATION2D_H
-
-namespace Eigen { 
-
-/** \geometry_module \ingroup Geometry_Module
-  *
-  * \class Rotation2D
-  *
-  * \brief Represents a rotation/orientation in a 2 dimensional space.
-  *
-  * \param _Scalar the scalar type, i.e., the type of the coefficients
-  *
-  * This class is equivalent to a single scalar representing a counter clock wise rotation
-  * as a single angle in radian. It provides some additional features such as the automatic
-  * conversion from/to a 2x2 rotation matrix. Moreover this class aims to provide a similar
-  * interface to Quaternion in order to facilitate the writing of generic algorithms
-  * dealing with rotations.
-  *
-  * \sa class Quaternion, class Transform
-  */
-
-namespace internal {
-
-template<typename _Scalar> struct traits<Rotation2D<_Scalar> >
-{
-  typedef _Scalar Scalar;
-};
-} // end namespace internal
-
-template<typename _Scalar>
-class Rotation2D : public RotationBase<Rotation2D<_Scalar>,2>
-{
-  typedef RotationBase<Rotation2D<_Scalar>,2> Base;
-
-public:
-
-  using Base::operator*;
-
-  enum { Dim = 2 };
-  /** the scalar type of the coefficients */
-  typedef _Scalar Scalar;
-  typedef Matrix<Scalar,2,1> Vector2;
-  typedef Matrix<Scalar,2,2> Matrix2;
-
-protected:
-
-  Scalar m_angle;
-
-public:
-
-  /** Construct a 2D counter clock wise rotation from the angle \a a in radian. */
-  inline Rotation2D(const Scalar& a) : m_angle(a) {}
-
-  /** \returns the rotation angle */
-  inline Scalar angle() const { return m_angle; }
-
-  /** \returns a read-write reference to the rotation angle */
-  inline Scalar& angle() { return m_angle; }
-
-  /** \returns the inverse rotation */
-  inline Rotation2D inverse() const { return -m_angle; }
-
-  /** Concatenates two rotations */
-  inline Rotation2D operator*(const Rotation2D& other) const
-  { return m_angle + other.m_angle; }
-
-  /** Concatenates two rotations */
-  inline Rotation2D& operator*=(const Rotation2D& other)
-  { m_angle += other.m_angle; return *this; }
-
-  /** Applies the rotation to a 2D vector */
-  Vector2 operator* (const Vector2& vec) const
-  { return toRotationMatrix() * vec; }
-
-  template<typename Derived>
-  Rotation2D& fromRotationMatrix(const MatrixBase<Derived>& m);
-  Matrix2 toRotationMatrix(void) const;
-
-  /** \returns the spherical interpolation between \c *this and \a other using
-    * parameter \a t. It is in fact equivalent to a linear interpolation.
-    */
-  inline Rotation2D slerp(const Scalar& t, const Rotation2D& other) const
-  { return m_angle * (1-t) + other.angle() * t; }
-
-  /** \returns \c *this with scalar type casted to \a NewScalarType
-    *
-    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
-    * then this function smartly returns a const reference to \c *this.
-    */
-  template<typename NewScalarType>
-  inline typename internal::cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type cast() const
-  { return typename internal::cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type(*this); }
-
-  /** Copy constructor with scalar type conversion */
-  template<typename OtherScalarType>
-  inline explicit Rotation2D(const Rotation2D<OtherScalarType>& other)
-  {
-    m_angle = Scalar(other.angle());
-  }
-
-  static inline Rotation2D Identity() { return Rotation2D(0); }
-
-  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
-    * determined by \a prec.
-    *
-    * \sa MatrixBase::isApprox() */
-  bool isApprox(const Rotation2D& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
-  { return internal::isApprox(m_angle,other.m_angle, prec); }
-};
-
-/** \ingroup Geometry_Module
-  * single precision 2D rotation type */
-typedef Rotation2D<float> Rotation2Df;
-/** \ingroup Geometry_Module
-  * double precision 2D rotation type */
-typedef Rotation2D<double> Rotation2Dd;
-
-/** Set \c *this from a 2x2 rotation matrix \a mat.
-  * In other words, this function extract the rotation angle
-  * from the rotation matrix.
-  */
-template<typename Scalar>
-template<typename Derived>
-Rotation2D<Scalar>& Rotation2D<Scalar>::fromRotationMatrix(const MatrixBase<Derived>& mat)
-{
-  EIGEN_STATIC_ASSERT(Derived::RowsAtCompileTime==2 && Derived::ColsAtCompileTime==2,YOU_MADE_A_PROGRAMMING_MISTAKE)
-  m_angle = internal::atan2(mat.coeff(1,0), mat.coeff(0,0));
-  return *this;
-}
-
-/** Constructs and \returns an equivalent 2x2 rotation matrix.
-  */
-template<typename Scalar>
-typename Rotation2D<Scalar>::Matrix2
-Rotation2D<Scalar>::toRotationMatrix(void) const
-{
-  Scalar sinA = internal::sin(m_angle);
-  Scalar cosA = internal::cos(m_angle);
-  return (Matrix2() << cosA, -sinA, sinA, cosA).finished();
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_ROTATION2D_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Geometry/Scaling.h b/resources/3rdparty/eigen/Eigen/src/Geometry/Scaling.h
deleted file mode 100644
index 1c25f36fe..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Geometry/Scaling.h
+++ /dev/null
@@ -1,166 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_SCALING_H
-#define EIGEN_SCALING_H
-
-namespace Eigen { 
-
-/** \geometry_module \ingroup Geometry_Module
-  *
-  * \class Scaling
-  *
-  * \brief Represents a generic uniform scaling transformation
-  *
-  * \param _Scalar the scalar type, i.e., the type of the coefficients.
-  *
-  * This class represent a uniform scaling transformation. It is the return
-  * type of Scaling(Scalar), and most of the time this is the only way it
-  * is used. In particular, this class is not aimed to be used to store a scaling transformation,
-  * but rather to make easier the constructions and updates of Transform objects.
-  *
-  * To represent an axis aligned scaling, use the DiagonalMatrix class.
-  *
-  * \sa Scaling(), class DiagonalMatrix, MatrixBase::asDiagonal(), class Translation, class Transform
-  */
-template<typename _Scalar>
-class UniformScaling
-{
-public:
-  /** the scalar type of the coefficients */
-  typedef _Scalar Scalar;
-
-protected:
-
-  Scalar m_factor;
-
-public:
-
-  /** Default constructor without initialization. */
-  UniformScaling() {}
-  /** Constructs and initialize a uniform scaling transformation */
-  explicit inline UniformScaling(const Scalar& s) : m_factor(s) {}
-
-  inline const Scalar& factor() const { return m_factor; }
-  inline Scalar& factor() { return m_factor; }
-
-  /** Concatenates two uniform scaling */
-  inline UniformScaling operator* (const UniformScaling& other) const
-  { return UniformScaling(m_factor * other.factor()); }
-
-  /** Concatenates a uniform scaling and a translation */
-  template<int Dim>
-  inline Transform<Scalar,Dim,Affine> operator* (const Translation<Scalar,Dim>& t) const;
-
-  /** Concatenates a uniform scaling and an affine transformation */
-  template<int Dim, int Mode, int Options>
-  inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> operator* (const Transform<Scalar,Dim, Mode, Options>& t) const
-  {
-   Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> res = t;
-   res.prescale(factor());
-   return res;
-}
-
-  /** Concatenates a uniform scaling and a linear transformation matrix */
-  // TODO returns an expression
-  template<typename Derived>
-  inline typename internal::plain_matrix_type<Derived>::type operator* (const MatrixBase<Derived>& other) const
-  { return other * m_factor; }
-
-  template<typename Derived,int Dim>
-  inline Matrix<Scalar,Dim,Dim> operator*(const RotationBase<Derived,Dim>& r) const
-  { return r.toRotationMatrix() * m_factor; }
-
-  /** \returns the inverse scaling */
-  inline UniformScaling inverse() const
-  { return UniformScaling(Scalar(1)/m_factor); }
-
-  /** \returns \c *this with scalar type casted to \a NewScalarType
-    *
-    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
-    * then this function smartly returns a const reference to \c *this.
-    */
-  template<typename NewScalarType>
-  inline UniformScaling<NewScalarType> cast() const
-  { return UniformScaling<NewScalarType>(NewScalarType(m_factor)); }
-
-  /** Copy constructor with scalar type conversion */
-  template<typename OtherScalarType>
-  inline explicit UniformScaling(const UniformScaling<OtherScalarType>& other)
-  { m_factor = Scalar(other.factor()); }
-
-  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
-    * determined by \a prec.
-    *
-    * \sa MatrixBase::isApprox() */
-  bool isApprox(const UniformScaling& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
-  { return internal::isApprox(m_factor, other.factor(), prec); }
-
-};
-
-/** Concatenates a linear transformation matrix and a uniform scaling */
-// NOTE this operator is defiend in MatrixBase and not as a friend function
-// of UniformScaling to fix an internal crash of Intel's ICC
-template<typename Derived> typename MatrixBase<Derived>::ScalarMultipleReturnType
-MatrixBase<Derived>::operator*(const UniformScaling<Scalar>& s) const
-{ return derived() * s.factor(); }
-
-/** Constructs a uniform scaling from scale factor \a s */
-static inline UniformScaling<float> Scaling(float s) { return UniformScaling<float>(s); }
-/** Constructs a uniform scaling from scale factor \a s */
-static inline UniformScaling<double> Scaling(double s) { return UniformScaling<double>(s); }
-/** Constructs a uniform scaling from scale factor \a s */
-template<typename RealScalar>
-static inline UniformScaling<std::complex<RealScalar> > Scaling(const std::complex<RealScalar>& s)
-{ return UniformScaling<std::complex<RealScalar> >(s); }
-
-/** Constructs a 2D axis aligned scaling */
-template<typename Scalar>
-static inline DiagonalMatrix<Scalar,2> Scaling(const Scalar& sx, const Scalar& sy)
-{ return DiagonalMatrix<Scalar,2>(sx, sy); }
-/** Constructs a 3D axis aligned scaling */
-template<typename Scalar>
-static inline DiagonalMatrix<Scalar,3> Scaling(const Scalar& sx, const Scalar& sy, const Scalar& sz)
-{ return DiagonalMatrix<Scalar,3>(sx, sy, sz); }
-
-/** Constructs an axis aligned scaling expression from vector expression \a coeffs
-  * This is an alias for coeffs.asDiagonal()
-  */
-template<typename Derived>
-static inline const DiagonalWrapper<const Derived> Scaling(const MatrixBase<Derived>& coeffs)
-{ return coeffs.asDiagonal(); }
-
-/** \addtogroup Geometry_Module */
-//@{
-/** \deprecated */
-typedef DiagonalMatrix<float, 2> AlignedScaling2f;
-/** \deprecated */
-typedef DiagonalMatrix<double,2> AlignedScaling2d;
-/** \deprecated */
-typedef DiagonalMatrix<float, 3> AlignedScaling3f;
-/** \deprecated */
-typedef DiagonalMatrix<double,3> AlignedScaling3d;
-//@}
-
-template<typename Scalar>
-template<int Dim>
-inline Transform<Scalar,Dim,Affine>
-UniformScaling<Scalar>::operator* (const Translation<Scalar,Dim>& t) const
-{
-  Transform<Scalar,Dim,Affine> res;
-  res.matrix().setZero();
-  res.linear().diagonal().fill(factor());
-  res.translation() = factor() * t.vector();
-  res(Dim,Dim) = Scalar(1);
-  return res;
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_SCALING_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Geometry/Transform.h b/resources/3rdparty/eigen/Eigen/src/Geometry/Transform.h
deleted file mode 100644
index 887e718d6..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Geometry/Transform.h
+++ /dev/null
@@ -1,1440 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_TRANSFORM_H
-#define EIGEN_TRANSFORM_H
-
-namespace Eigen { 
-
-namespace internal {
-
-template<typename Transform>
-struct transform_traits
-{
-  enum
-  {
-    Dim = Transform::Dim,
-    HDim = Transform::HDim,
-    Mode = Transform::Mode,
-    IsProjective = (int(Mode)==int(Projective))
-  };
-};
-
-template< typename TransformType,
-          typename MatrixType,
-          int Case = transform_traits<TransformType>::IsProjective ? 0
-                   : int(MatrixType::RowsAtCompileTime) == int(transform_traits<TransformType>::HDim) ? 1
-                   : 2>
-struct transform_right_product_impl;
-
-template< typename Other,
-          int Mode,
-          int Options,
-          int Dim,
-          int HDim,
-          int OtherRows=Other::RowsAtCompileTime,
-          int OtherCols=Other::ColsAtCompileTime>
-struct transform_left_product_impl;
-
-template< typename Lhs,
-          typename Rhs,
-          bool AnyProjective = 
-            transform_traits<Lhs>::IsProjective ||
-            transform_traits<Rhs>::IsProjective>
-struct transform_transform_product_impl;
-
-template< typename Other,
-          int Mode,
-          int Options,
-          int Dim,
-          int HDim,
-          int OtherRows=Other::RowsAtCompileTime,
-          int OtherCols=Other::ColsAtCompileTime>
-struct transform_construct_from_matrix;
-
-template<typename TransformType> struct transform_take_affine_part;
-
-} // end namespace internal
-
-/** \geometry_module \ingroup Geometry_Module
-  *
-  * \class Transform
-  *
-  * \brief Represents an homogeneous transformation in a N dimensional space
-  *
-  * \tparam _Scalar the scalar type, i.e., the type of the coefficients
-  * \tparam _Dim the dimension of the space
-  * \tparam _Mode the type of the transformation. Can be:
-  *              - #Affine: the transformation is stored as a (Dim+1)^2 matrix,
-  *                         where the last row is assumed to be [0 ... 0 1].
-  *              - #AffineCompact: the transformation is stored as a (Dim)x(Dim+1) matrix.
-  *              - #Projective: the transformation is stored as a (Dim+1)^2 matrix
-  *                             without any assumption.
-  * \tparam _Options has the same meaning as in class Matrix. It allows to specify DontAlign and/or RowMajor.
-  *                  These Options are passed directly to the underlying matrix type.
-  *
-  * The homography is internally represented and stored by a matrix which
-  * is available through the matrix() method. To understand the behavior of
-  * this class you have to think a Transform object as its internal
-  * matrix representation. The chosen convention is right multiply:
-  *
-  * \code v' = T * v \endcode
-  *
-  * Therefore, an affine transformation matrix M is shaped like this:
-  *
-  * \f$ \left( \begin{array}{cc}
-  * linear & translation\\
-  * 0 ... 0 & 1
-  * \end{array} \right) \f$
-  *
-  * Note that for a projective transformation the last row can be anything,
-  * and then the interpretation of different parts might be sightly different.
-  *
-  * However, unlike a plain matrix, the Transform class provides many features
-  * simplifying both its assembly and usage. In particular, it can be composed
-  * with any other transformations (Transform,Translation,RotationBase,Matrix)
-  * and can be directly used to transform implicit homogeneous vectors. All these
-  * operations are handled via the operator*. For the composition of transformations,
-  * its principle consists to first convert the right/left hand sides of the product
-  * to a compatible (Dim+1)^2 matrix and then perform a pure matrix product.
-  * Of course, internally, operator* tries to perform the minimal number of operations
-  * according to the nature of each terms. Likewise, when applying the transform
-  * to non homogeneous vectors, the latters are automatically promoted to homogeneous
-  * one before doing the matrix product. The convertions to homogeneous representations
-  * are performed as follow:
-  *
-  * \b Translation t (Dim)x(1):
-  * \f$ \left( \begin{array}{cc}
-  * I & t \\
-  * 0\,...\,0 & 1
-  * \end{array} \right) \f$
-  *
-  * \b Rotation R (Dim)x(Dim):
-  * \f$ \left( \begin{array}{cc}
-  * R & 0\\
-  * 0\,...\,0 & 1
-  * \end{array} \right) \f$
-  *
-  * \b Linear \b Matrix L (Dim)x(Dim):
-  * \f$ \left( \begin{array}{cc}
-  * L & 0\\
-  * 0\,...\,0 & 1
-  * \end{array} \right) \f$
-  *
-  * \b Affine \b Matrix A (Dim)x(Dim+1):
-  * \f$ \left( \begin{array}{c}
-  * A\\
-  * 0\,...\,0\,1
-  * \end{array} \right) \f$
-  *
-  * \b Column \b vector v (Dim)x(1):
-  * \f$ \left( \begin{array}{c}
-  * v\\
-  * 1
-  * \end{array} \right) \f$
-  *
-  * \b Set \b of \b column \b vectors V1...Vn (Dim)x(n):
-  * \f$ \left( \begin{array}{ccc}
-  * v_1 & ... & v_n\\
-  * 1 & ... & 1
-  * \end{array} \right) \f$
-  *
-  * The concatenation of a Transform object with any kind of other transformation
-  * always returns a Transform object.
-  *
-  * A little exception to the "as pure matrix product" rule is the case of the
-  * transformation of non homogeneous vectors by an affine transformation. In
-  * that case the last matrix row can be ignored, and the product returns non
-  * homogeneous vectors.
-  *
-  * Since, for instance, a Dim x Dim matrix is interpreted as a linear transformation,
-  * it is not possible to directly transform Dim vectors stored in a Dim x Dim matrix.
-  * The solution is either to use a Dim x Dynamic matrix or explicitly request a
-  * vector transformation by making the vector homogeneous:
-  * \code
-  * m' = T * m.colwise().homogeneous();
-  * \endcode
-  * Note that there is zero overhead.
-  *
-  * Conversion methods from/to Qt's QMatrix and QTransform are available if the
-  * preprocessor token EIGEN_QT_SUPPORT is defined.
-  *
-  * This class can be extended with the help of the plugin mechanism described on the page
-  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_TRANSFORM_PLUGIN.
-  *
-  * \sa class Matrix, class Quaternion
-  */
-template<typename _Scalar, int _Dim, int _Mode, int _Options>
-class Transform
-{
-public:
-  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim==Dynamic ? Dynamic : (_Dim+1)*(_Dim+1))
-  enum {
-    Mode = _Mode,
-    Options = _Options,
-    Dim = _Dim,     ///< space dimension in which the transformation holds
-    HDim = _Dim+1,  ///< size of a respective homogeneous vector
-    Rows = int(Mode)==(AffineCompact) ? Dim : HDim
-  };
-  /** the scalar type of the coefficients */
-  typedef _Scalar Scalar;
-  typedef DenseIndex Index;
-  /** type of the matrix used to represent the transformation */
-  typedef typename internal::make_proper_matrix_type<Scalar,Rows,HDim,Options>::type MatrixType;
-  /** constified MatrixType */
-  typedef const MatrixType ConstMatrixType;
-  /** type of the matrix used to represent the linear part of the transformation */
-  typedef Matrix<Scalar,Dim,Dim,Options> LinearMatrixType;
-  /** type of read/write reference to the linear part of the transformation */
-  typedef Block<MatrixType,Dim,Dim,int(Mode)==(AffineCompact)> LinearPart;
-  /** type of read reference to the linear part of the transformation */
-  typedef const Block<ConstMatrixType,Dim,Dim,int(Mode)==(AffineCompact)> ConstLinearPart;
-  /** type of read/write reference to the affine part of the transformation */
-  typedef typename internal::conditional<int(Mode)==int(AffineCompact),
-                              MatrixType&,
-                              Block<MatrixType,Dim,HDim> >::type AffinePart;
-  /** type of read reference to the affine part of the transformation */
-  typedef typename internal::conditional<int(Mode)==int(AffineCompact),
-                              const MatrixType&,
-                              const Block<const MatrixType,Dim,HDim> >::type ConstAffinePart;
-  /** type of a vector */
-  typedef Matrix<Scalar,Dim,1> VectorType;
-  /** type of a read/write reference to the translation part of the rotation */
-  typedef Block<MatrixType,Dim,1,int(Mode)==(AffineCompact)> TranslationPart;
-  /** type of a read reference to the translation part of the rotation */
-  typedef const Block<ConstMatrixType,Dim,1,int(Mode)==(AffineCompact)> ConstTranslationPart;
-  /** corresponding translation type */
-  typedef Translation<Scalar,Dim> TranslationType;
-  
-  // this intermediate enum is needed to avoid an ICE with gcc 3.4 and 4.0
-  enum { TransformTimeDiagonalMode = ((Mode==int(Isometry))?Affine:int(Mode)) };
-  /** The return type of the product between a diagonal matrix and a transform */
-  typedef Transform<Scalar,Dim,TransformTimeDiagonalMode> TransformTimeDiagonalReturnType;
-
-protected:
-
-  MatrixType m_matrix;
-
-public:
-
-  /** Default constructor without initialization of the meaningful coefficients.
-    * If Mode==Affine, then the last row is set to [0 ... 0 1] */
-  inline Transform()
-  {
-    check_template_params();
-    if (int(Mode)==Affine)
-      makeAffine();
-  }
-
-  inline Transform(const Transform& other)
-  {
-    check_template_params();
-    m_matrix = other.m_matrix;
-  }
-
-  inline explicit Transform(const TranslationType& t)
-  {
-    check_template_params();
-    *this = t;
-  }
-  inline explicit Transform(const UniformScaling<Scalar>& s)
-  {
-    check_template_params();
-    *this = s;
-  }
-  template<typename Derived>
-  inline explicit Transform(const RotationBase<Derived, Dim>& r)
-  {
-    check_template_params();
-    *this = r;
-  }
-
-  inline Transform& operator=(const Transform& other)
-  { m_matrix = other.m_matrix; return *this; }
-
-  typedef internal::transform_take_affine_part<Transform> take_affine_part;
-
-  /** Constructs and initializes a transformation from a Dim^2 or a (Dim+1)^2 matrix. */
-  template<typename OtherDerived>
-  inline explicit Transform(const EigenBase<OtherDerived>& other)
-  {
-    EIGEN_STATIC_ASSERT((internal::is_same<Scalar,typename OtherDerived::Scalar>::value),
-      YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY);
-
-    check_template_params();
-    internal::transform_construct_from_matrix<OtherDerived,Mode,Options,Dim,HDim>::run(this, other.derived());
-  }
-
-  /** Set \c *this from a Dim^2 or (Dim+1)^2 matrix. */
-  template<typename OtherDerived>
-  inline Transform& operator=(const EigenBase<OtherDerived>& other)
-  {
-    EIGEN_STATIC_ASSERT((internal::is_same<Scalar,typename OtherDerived::Scalar>::value),
-      YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY);
-
-    internal::transform_construct_from_matrix<OtherDerived,Mode,Options,Dim,HDim>::run(this, other.derived());
-    return *this;
-  }
-  
-  template<int OtherOptions>
-  inline Transform(const Transform<Scalar,Dim,Mode,OtherOptions>& other)
-  {
-    check_template_params();
-    // only the options change, we can directly copy the matrices
-    m_matrix = other.matrix();
-  }
-
-  template<int OtherMode,int OtherOptions>
-  inline Transform(const Transform<Scalar,Dim,OtherMode,OtherOptions>& other)
-  {
-    check_template_params();
-    // prevent conversions as:
-    // Affine | AffineCompact | Isometry = Projective
-    EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(OtherMode==int(Projective), Mode==int(Projective)),
-                        YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION)
-
-    // prevent conversions as:
-    // Isometry = Affine | AffineCompact
-    EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(OtherMode==int(Affine)||OtherMode==int(AffineCompact), Mode!=int(Isometry)),
-                        YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION)
-
-    enum { ModeIsAffineCompact = Mode == int(AffineCompact),
-           OtherModeIsAffineCompact = OtherMode == int(AffineCompact)
-    };
-
-    if(ModeIsAffineCompact == OtherModeIsAffineCompact)
-    {
-      // We need the block expression because the code is compiled for all
-      // combinations of transformations and will trigger a compile time error
-      // if one tries to assign the matrices directly
-      m_matrix.template block<Dim,Dim+1>(0,0) = other.matrix().template block<Dim,Dim+1>(0,0);
-      makeAffine();
-    }
-    else if(OtherModeIsAffineCompact)
-    {
-      typedef typename Transform<Scalar,Dim,OtherMode,OtherOptions>::MatrixType OtherMatrixType;
-      internal::transform_construct_from_matrix<OtherMatrixType,Mode,Options,Dim,HDim>::run(this, other.matrix());
-    }
-    else
-    {
-      // here we know that Mode == AffineCompact and OtherMode != AffineCompact.
-      // if OtherMode were Projective, the static assert above would already have caught it.
-      // So the only possibility is that OtherMode == Affine
-      linear() = other.linear();
-      translation() = other.translation();
-    }
-  }
-
-  template<typename OtherDerived>
-  Transform(const ReturnByValue<OtherDerived>& other)
-  {
-    check_template_params();
-    other.evalTo(*this);
-  }
-
-  template<typename OtherDerived>
-  Transform& operator=(const ReturnByValue<OtherDerived>& other)
-  {
-    other.evalTo(*this);
-    return *this;
-  }
-
-  #ifdef EIGEN_QT_SUPPORT
-  inline Transform(const QMatrix& other);
-  inline Transform& operator=(const QMatrix& other);
-  inline QMatrix toQMatrix(void) const;
-  inline Transform(const QTransform& other);
-  inline Transform& operator=(const QTransform& other);
-  inline QTransform toQTransform(void) const;
-  #endif
-
-  /** shortcut for m_matrix(row,col);
-    * \sa MatrixBase::operator(Index,Index) const */
-  inline Scalar operator() (Index row, Index col) const { return m_matrix(row,col); }
-  /** shortcut for m_matrix(row,col);
-    * \sa MatrixBase::operator(Index,Index) */
-  inline Scalar& operator() (Index row, Index col) { return m_matrix(row,col); }
-
-  /** \returns a read-only expression of the transformation matrix */
-  inline const MatrixType& matrix() const { return m_matrix; }
-  /** \returns a writable expression of the transformation matrix */
-  inline MatrixType& matrix() { return m_matrix; }
-
-  /** \returns a read-only expression of the linear part of the transformation */
-  inline ConstLinearPart linear() const { return ConstLinearPart(m_matrix,0,0); }
-  /** \returns a writable expression of the linear part of the transformation */
-  inline LinearPart linear() { return LinearPart(m_matrix,0,0); }
-
-  /** \returns a read-only expression of the Dim x HDim affine part of the transformation */
-  inline ConstAffinePart affine() const { return take_affine_part::run(m_matrix); }
-  /** \returns a writable expression of the Dim x HDim affine part of the transformation */
-  inline AffinePart affine() { return take_affine_part::run(m_matrix); }
-
-  /** \returns a read-only expression of the translation vector of the transformation */
-  inline ConstTranslationPart translation() const { return ConstTranslationPart(m_matrix,0,Dim); }
-  /** \returns a writable expression of the translation vector of the transformation */
-  inline TranslationPart translation() { return TranslationPart(m_matrix,0,Dim); }
-
-  /** \returns an expression of the product between the transform \c *this and a matrix expression \a other
-    *
-    * The right hand side \a other might be either:
-    * \li a vector of size Dim,
-    * \li an homogeneous vector of size Dim+1,
-    * \li a set of vectors of size Dim x Dynamic,
-    * \li a set of homogeneous vectors of size Dim+1 x Dynamic,
-    * \li a linear transformation matrix of size Dim x Dim,
-    * \li an affine transformation matrix of size Dim x Dim+1,
-    * \li a transformation matrix of size Dim+1 x Dim+1.
-    */
-  // note: this function is defined here because some compilers cannot find the respective declaration
-  template<typename OtherDerived>
-  EIGEN_STRONG_INLINE const typename internal::transform_right_product_impl<Transform, OtherDerived>::ResultType
-  operator * (const EigenBase<OtherDerived> &other) const
-  { return internal::transform_right_product_impl<Transform, OtherDerived>::run(*this,other.derived()); }
-
-  /** \returns the product expression of a transformation matrix \a a times a transform \a b
-    *
-    * The left hand side \a other might be either:
-    * \li a linear transformation matrix of size Dim x Dim,
-    * \li an affine transformation matrix of size Dim x Dim+1,
-    * \li a general transformation matrix of size Dim+1 x Dim+1.
-    */
-  template<typename OtherDerived> friend
-  inline const typename internal::transform_left_product_impl<OtherDerived,Mode,Options,_Dim,_Dim+1>::ResultType
-    operator * (const EigenBase<OtherDerived> &a, const Transform &b)
-  { return internal::transform_left_product_impl<OtherDerived,Mode,Options,Dim,HDim>::run(a.derived(),b); }
-
-  /** \returns The product expression of a transform \a a times a diagonal matrix \a b
-    *
-    * The rhs diagonal matrix is interpreted as an affine scaling transformation. The
-    * product results in a Transform of the same type (mode) as the lhs only if the lhs 
-    * mode is no isometry. In that case, the returned transform is an affinity.
-    */
-  template<typename DiagonalDerived>
-  inline const TransformTimeDiagonalReturnType
-    operator * (const DiagonalBase<DiagonalDerived> &b) const
-  {
-    TransformTimeDiagonalReturnType res(*this);
-    res.linear() *= b;
-    return res;
-  }
-
-  /** \returns The product expression of a diagonal matrix \a a times a transform \a b
-    *
-    * The lhs diagonal matrix is interpreted as an affine scaling transformation. The
-    * product results in a Transform of the same type (mode) as the lhs only if the lhs 
-    * mode is no isometry. In that case, the returned transform is an affinity.
-    */
-  template<typename DiagonalDerived>
-  friend inline TransformTimeDiagonalReturnType
-    operator * (const DiagonalBase<DiagonalDerived> &a, const Transform &b)
-  {
-    TransformTimeDiagonalReturnType res;
-    res.linear().noalias() = a*b.linear();
-    res.translation().noalias() = a*b.translation();
-    if (Mode!=int(AffineCompact))
-      res.matrix().row(Dim) = b.matrix().row(Dim);
-    return res;
-  }
-
-  template<typename OtherDerived>
-  inline Transform& operator*=(const EigenBase<OtherDerived>& other) { return *this = *this * other; }
-
-  /** Concatenates two transformations */
-  inline const Transform operator * (const Transform& other) const
-  {
-    return internal::transform_transform_product_impl<Transform,Transform>::run(*this,other);
-  }
-  
-  #ifdef __INTEL_COMPILER
-private:
-  // this intermediate structure permits to workaround a bug in ICC 11:
-  //   error: template instantiation resulted in unexpected function type of "Eigen::Transform<double, 3, 32, 0>
-  //             (const Eigen::Transform<double, 3, 2, 0> &) const"
-  //  (the meaning of a name may have changed since the template declaration -- the type of the template is:
-  // "Eigen::internal::transform_transform_product_impl<Eigen::Transform<double, 3, 32, 0>,
-  //     Eigen::Transform<double, 3, Mode, Options>, <expression>>::ResultType (const Eigen::Transform<double, 3, Mode, Options> &) const")
-  // 
-  template<int OtherMode,int OtherOptions> struct icc_11_workaround
-  {
-    typedef internal::transform_transform_product_impl<Transform,Transform<Scalar,Dim,OtherMode,OtherOptions> > ProductType;
-    typedef typename ProductType::ResultType ResultType;
-  };
-  
-public:
-  /** Concatenates two different transformations */
-  template<int OtherMode,int OtherOptions>
-  inline typename icc_11_workaround<OtherMode,OtherOptions>::ResultType
-    operator * (const Transform<Scalar,Dim,OtherMode,OtherOptions>& other) const
-  {
-    typedef typename icc_11_workaround<OtherMode,OtherOptions>::ProductType ProductType;
-    return ProductType::run(*this,other);
-  }
-  #else
-  /** Concatenates two different transformations */
-  template<int OtherMode,int OtherOptions>
-  inline typename internal::transform_transform_product_impl<Transform,Transform<Scalar,Dim,OtherMode,OtherOptions> >::ResultType
-    operator * (const Transform<Scalar,Dim,OtherMode,OtherOptions>& other) const
-  {
-    return internal::transform_transform_product_impl<Transform,Transform<Scalar,Dim,OtherMode,OtherOptions> >::run(*this,other);
-  }
-  #endif
-
-  /** \sa MatrixBase::setIdentity() */
-  void setIdentity() { m_matrix.setIdentity(); }
-
-  /**
-   * \brief Returns an identity transformation.
-   * \todo In the future this function should be returning a Transform expression.
-   */
-  static const Transform Identity()
-  {
-    return Transform(MatrixType::Identity());
-  }
-
-  template<typename OtherDerived>
-  inline Transform& scale(const MatrixBase<OtherDerived> &other);
-
-  template<typename OtherDerived>
-  inline Transform& prescale(const MatrixBase<OtherDerived> &other);
-
-  inline Transform& scale(const Scalar& s);
-  inline Transform& prescale(const Scalar& s);
-
-  template<typename OtherDerived>
-  inline Transform& translate(const MatrixBase<OtherDerived> &other);
-
-  template<typename OtherDerived>
-  inline Transform& pretranslate(const MatrixBase<OtherDerived> &other);
-
-  template<typename RotationType>
-  inline Transform& rotate(const RotationType& rotation);
-
-  template<typename RotationType>
-  inline Transform& prerotate(const RotationType& rotation);
-
-  Transform& shear(const Scalar& sx, const Scalar& sy);
-  Transform& preshear(const Scalar& sx, const Scalar& sy);
-
-  inline Transform& operator=(const TranslationType& t);
-  inline Transform& operator*=(const TranslationType& t) { return translate(t.vector()); }
-  inline Transform operator*(const TranslationType& t) const;
-
-  inline Transform& operator=(const UniformScaling<Scalar>& t);
-  inline Transform& operator*=(const UniformScaling<Scalar>& s) { return scale(s.factor()); }
-  inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Isometry)> operator*(const UniformScaling<Scalar>& s) const
-  {
-    Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Isometry),Options> res = *this;
-    res.scale(s.factor());
-    return res;
-  }
-
-  inline Transform& operator*=(const DiagonalMatrix<Scalar,Dim>& s) { linear() *= s; return *this; }
-
-  template<typename Derived>
-  inline Transform& operator=(const RotationBase<Derived,Dim>& r);
-  template<typename Derived>
-  inline Transform& operator*=(const RotationBase<Derived,Dim>& r) { return rotate(r.toRotationMatrix()); }
-  template<typename Derived>
-  inline Transform operator*(const RotationBase<Derived,Dim>& r) const;
-
-  const LinearMatrixType rotation() const;
-  template<typename RotationMatrixType, typename ScalingMatrixType>
-  void computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const;
-  template<typename ScalingMatrixType, typename RotationMatrixType>
-  void computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const;
-
-  template<typename PositionDerived, typename OrientationType, typename ScaleDerived>
-  Transform& fromPositionOrientationScale(const MatrixBase<PositionDerived> &position,
-    const OrientationType& orientation, const MatrixBase<ScaleDerived> &scale);
-
-  inline Transform inverse(TransformTraits traits = (TransformTraits)Mode) const;
-
-  /** \returns a const pointer to the column major internal matrix */
-  const Scalar* data() const { return m_matrix.data(); }
-  /** \returns a non-const pointer to the column major internal matrix */
-  Scalar* data() { return m_matrix.data(); }
-
-  /** \returns \c *this with scalar type casted to \a NewScalarType
-    *
-    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
-    * then this function smartly returns a const reference to \c *this.
-    */
-  template<typename NewScalarType>
-  inline typename internal::cast_return_type<Transform,Transform<NewScalarType,Dim,Mode,Options> >::type cast() const
-  { return typename internal::cast_return_type<Transform,Transform<NewScalarType,Dim,Mode,Options> >::type(*this); }
-
-  /** Copy constructor with scalar type conversion */
-  template<typename OtherScalarType>
-  inline explicit Transform(const Transform<OtherScalarType,Dim,Mode,Options>& other)
-  {
-    check_template_params();
-    m_matrix = other.matrix().template cast<Scalar>();
-  }
-
-  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
-    * determined by \a prec.
-    *
-    * \sa MatrixBase::isApprox() */
-  bool isApprox(const Transform& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
-  { return m_matrix.isApprox(other.m_matrix, prec); }
-
-  /** Sets the last row to [0 ... 0 1]
-    */
-  void makeAffine()
-  {
-    if(int(Mode)!=int(AffineCompact))
-    {
-      matrix().template block<1,Dim>(Dim,0).setZero();
-      matrix().coeffRef(Dim,Dim) = Scalar(1);
-    }
-  }
-
-  /** \internal
-    * \returns the Dim x Dim linear part if the transformation is affine,
-    *          and the HDim x Dim part for projective transformations.
-    */
-  inline Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,Dim> linearExt()
-  { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,Dim>(0,0); }
-  /** \internal
-    * \returns the Dim x Dim linear part if the transformation is affine,
-    *          and the HDim x Dim part for projective transformations.
-    */
-  inline const Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,Dim> linearExt() const
-  { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,Dim>(0,0); }
-
-  /** \internal
-    * \returns the translation part if the transformation is affine,
-    *          and the last column for projective transformations.
-    */
-  inline Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,1> translationExt()
-  { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,1>(0,Dim); }
-  /** \internal
-    * \returns the translation part if the transformation is affine,
-    *          and the last column for projective transformations.
-    */
-  inline const Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,1> translationExt() const
-  { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,1>(0,Dim); }
-
-
-  #ifdef EIGEN_TRANSFORM_PLUGIN
-  #include EIGEN_TRANSFORM_PLUGIN
-  #endif
-  
-protected:
-  #ifndef EIGEN_PARSED_BY_DOXYGEN
-    static EIGEN_STRONG_INLINE void check_template_params()
-    {
-      EIGEN_STATIC_ASSERT((Options & (DontAlign|RowMajor)) == Options, INVALID_MATRIX_TEMPLATE_PARAMETERS)
-    }
-  #endif
-
-};
-
-/** \ingroup Geometry_Module */
-typedef Transform<float,2,Isometry> Isometry2f;
-/** \ingroup Geometry_Module */
-typedef Transform<float,3,Isometry> Isometry3f;
-/** \ingroup Geometry_Module */
-typedef Transform<double,2,Isometry> Isometry2d;
-/** \ingroup Geometry_Module */
-typedef Transform<double,3,Isometry> Isometry3d;
-
-/** \ingroup Geometry_Module */
-typedef Transform<float,2,Affine> Affine2f;
-/** \ingroup Geometry_Module */
-typedef Transform<float,3,Affine> Affine3f;
-/** \ingroup Geometry_Module */
-typedef Transform<double,2,Affine> Affine2d;
-/** \ingroup Geometry_Module */
-typedef Transform<double,3,Affine> Affine3d;
-
-/** \ingroup Geometry_Module */
-typedef Transform<float,2,AffineCompact> AffineCompact2f;
-/** \ingroup Geometry_Module */
-typedef Transform<float,3,AffineCompact> AffineCompact3f;
-/** \ingroup Geometry_Module */
-typedef Transform<double,2,AffineCompact> AffineCompact2d;
-/** \ingroup Geometry_Module */
-typedef Transform<double,3,AffineCompact> AffineCompact3d;
-
-/** \ingroup Geometry_Module */
-typedef Transform<float,2,Projective> Projective2f;
-/** \ingroup Geometry_Module */
-typedef Transform<float,3,Projective> Projective3f;
-/** \ingroup Geometry_Module */
-typedef Transform<double,2,Projective> Projective2d;
-/** \ingroup Geometry_Module */
-typedef Transform<double,3,Projective> Projective3d;
-
-/**************************
-*** Optional QT support ***
-**************************/
-
-#ifdef EIGEN_QT_SUPPORT
-/** Initializes \c *this from a QMatrix assuming the dimension is 2.
-  *
-  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
-  */
-template<typename Scalar, int Dim, int Mode,int Options>
-Transform<Scalar,Dim,Mode,Options>::Transform(const QMatrix& other)
-{
-  check_template_params();
-  *this = other;
-}
-
-/** Set \c *this from a QMatrix assuming the dimension is 2.
-  *
-  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
-  */
-template<typename Scalar, int Dim, int Mode,int Options>
-Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const QMatrix& other)
-{
-  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
-  m_matrix << other.m11(), other.m21(), other.dx(),
-              other.m12(), other.m22(), other.dy(),
-              0, 0, 1;
-  return *this;
-}
-
-/** \returns a QMatrix from \c *this assuming the dimension is 2.
-  *
-  * \warning this conversion might loss data if \c *this is not affine
-  *
-  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
-  */
-template<typename Scalar, int Dim, int Mode, int Options>
-QMatrix Transform<Scalar,Dim,Mode,Options>::toQMatrix(void) const
-{
-  check_template_params();
-  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
-  return QMatrix(m_matrix.coeff(0,0), m_matrix.coeff(1,0),
-                 m_matrix.coeff(0,1), m_matrix.coeff(1,1),
-                 m_matrix.coeff(0,2), m_matrix.coeff(1,2));
-}
-
-/** Initializes \c *this from a QTransform assuming the dimension is 2.
-  *
-  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
-  */
-template<typename Scalar, int Dim, int Mode,int Options>
-Transform<Scalar,Dim,Mode,Options>::Transform(const QTransform& other)
-{
-  check_template_params();
-  *this = other;
-}
-
-/** Set \c *this from a QTransform assuming the dimension is 2.
-  *
-  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
-  */
-template<typename Scalar, int Dim, int Mode, int Options>
-Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const QTransform& other)
-{
-  check_template_params();
-  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
-  if (Mode == int(AffineCompact))
-    m_matrix << other.m11(), other.m21(), other.dx(),
-                other.m12(), other.m22(), other.dy();
-  else
-    m_matrix << other.m11(), other.m21(), other.dx(),
-                other.m12(), other.m22(), other.dy(),
-                other.m13(), other.m23(), other.m33();
-  return *this;
-}
-
-/** \returns a QTransform from \c *this assuming the dimension is 2.
-  *
-  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
-  */
-template<typename Scalar, int Dim, int Mode, int Options>
-QTransform Transform<Scalar,Dim,Mode,Options>::toQTransform(void) const
-{
-  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
-  if (Mode == int(AffineCompact))
-    return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0),
-                      m_matrix.coeff(0,1), m_matrix.coeff(1,1),
-                      m_matrix.coeff(0,2), m_matrix.coeff(1,2));
-  else
-    return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0), m_matrix.coeff(2,0),
-                      m_matrix.coeff(0,1), m_matrix.coeff(1,1), m_matrix.coeff(2,1),
-                      m_matrix.coeff(0,2), m_matrix.coeff(1,2), m_matrix.coeff(2,2));
-}
-#endif
-
-/*********************
-*** Procedural API ***
-*********************/
-
-/** Applies on the right the non uniform scale transformation represented
-  * by the vector \a other to \c *this and returns a reference to \c *this.
-  * \sa prescale()
-  */
-template<typename Scalar, int Dim, int Mode, int Options>
-template<typename OtherDerived>
-Transform<Scalar,Dim,Mode,Options>&
-Transform<Scalar,Dim,Mode,Options>::scale(const MatrixBase<OtherDerived> &other)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
-  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
-  linearExt().noalias() = (linearExt() * other.asDiagonal());
-  return *this;
-}
-
-/** Applies on the right a uniform scale of a factor \a c to \c *this
-  * and returns a reference to \c *this.
-  * \sa prescale(Scalar)
-  */
-template<typename Scalar, int Dim, int Mode, int Options>
-inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::scale(const Scalar& s)
-{
-  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
-  linearExt() *= s;
-  return *this;
-}
-
-/** Applies on the left the non uniform scale transformation represented
-  * by the vector \a other to \c *this and returns a reference to \c *this.
-  * \sa scale()
-  */
-template<typename Scalar, int Dim, int Mode, int Options>
-template<typename OtherDerived>
-Transform<Scalar,Dim,Mode,Options>&
-Transform<Scalar,Dim,Mode,Options>::prescale(const MatrixBase<OtherDerived> &other)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
-  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
-  m_matrix.template block<Dim,HDim>(0,0).noalias() = (other.asDiagonal() * m_matrix.template block<Dim,HDim>(0,0));
-  return *this;
-}
-
-/** Applies on the left a uniform scale of a factor \a c to \c *this
-  * and returns a reference to \c *this.
-  * \sa scale(Scalar)
-  */
-template<typename Scalar, int Dim, int Mode, int Options>
-inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::prescale(const Scalar& s)
-{
-  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
-  m_matrix.template topRows<Dim>() *= s;
-  return *this;
-}
-
-/** Applies on the right the translation matrix represented by the vector \a other
-  * to \c *this and returns a reference to \c *this.
-  * \sa pretranslate()
-  */
-template<typename Scalar, int Dim, int Mode, int Options>
-template<typename OtherDerived>
-Transform<Scalar,Dim,Mode,Options>&
-Transform<Scalar,Dim,Mode,Options>::translate(const MatrixBase<OtherDerived> &other)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
-  translationExt() += linearExt() * other;
-  return *this;
-}
-
-/** Applies on the left the translation matrix represented by the vector \a other
-  * to \c *this and returns a reference to \c *this.
-  * \sa translate()
-  */
-template<typename Scalar, int Dim, int Mode, int Options>
-template<typename OtherDerived>
-Transform<Scalar,Dim,Mode,Options>&
-Transform<Scalar,Dim,Mode,Options>::pretranslate(const MatrixBase<OtherDerived> &other)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
-  if(int(Mode)==int(Projective))
-    affine() += other * m_matrix.row(Dim);
-  else
-    translation() += other;
-  return *this;
-}
-
-/** Applies on the right the rotation represented by the rotation \a rotation
-  * to \c *this and returns a reference to \c *this.
-  *
-  * The template parameter \a RotationType is the type of the rotation which
-  * must be known by internal::toRotationMatrix<>.
-  *
-  * Natively supported types includes:
-  *   - any scalar (2D),
-  *   - a Dim x Dim matrix expression,
-  *   - a Quaternion (3D),
-  *   - a AngleAxis (3D)
-  *
-  * This mechanism is easily extendable to support user types such as Euler angles,
-  * or a pair of Quaternion for 4D rotations.
-  *
-  * \sa rotate(Scalar), class Quaternion, class AngleAxis, prerotate(RotationType)
-  */
-template<typename Scalar, int Dim, int Mode, int Options>
-template<typename RotationType>
-Transform<Scalar,Dim,Mode,Options>&
-Transform<Scalar,Dim,Mode,Options>::rotate(const RotationType& rotation)
-{
-  linearExt() *= internal::toRotationMatrix<Scalar,Dim>(rotation);
-  return *this;
-}
-
-/** Applies on the left the rotation represented by the rotation \a rotation
-  * to \c *this and returns a reference to \c *this.
-  *
-  * See rotate() for further details.
-  *
-  * \sa rotate()
-  */
-template<typename Scalar, int Dim, int Mode, int Options>
-template<typename RotationType>
-Transform<Scalar,Dim,Mode,Options>&
-Transform<Scalar,Dim,Mode,Options>::prerotate(const RotationType& rotation)
-{
-  m_matrix.template block<Dim,HDim>(0,0) = internal::toRotationMatrix<Scalar,Dim>(rotation)
-                                         * m_matrix.template block<Dim,HDim>(0,0);
-  return *this;
-}
-
-/** Applies on the right the shear transformation represented
-  * by the vector \a other to \c *this and returns a reference to \c *this.
-  * \warning 2D only.
-  * \sa preshear()
-  */
-template<typename Scalar, int Dim, int Mode, int Options>
-Transform<Scalar,Dim,Mode,Options>&
-Transform<Scalar,Dim,Mode,Options>::shear(const Scalar& sx, const Scalar& sy)
-{
-  EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
-  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
-  VectorType tmp = linear().col(0)*sy + linear().col(1);
-  linear() << linear().col(0) + linear().col(1)*sx, tmp;
-  return *this;
-}
-
-/** Applies on the left the shear transformation represented
-  * by the vector \a other to \c *this and returns a reference to \c *this.
-  * \warning 2D only.
-  * \sa shear()
-  */
-template<typename Scalar, int Dim, int Mode, int Options>
-Transform<Scalar,Dim,Mode,Options>&
-Transform<Scalar,Dim,Mode,Options>::preshear(const Scalar& sx, const Scalar& sy)
-{
-  EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
-  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
-  m_matrix.template block<Dim,HDim>(0,0) = LinearMatrixType(1, sx, sy, 1) * m_matrix.template block<Dim,HDim>(0,0);
-  return *this;
-}
-
-/******************************************************
-*** Scaling, Translation and Rotation compatibility ***
-******************************************************/
-
-template<typename Scalar, int Dim, int Mode, int Options>
-inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const TranslationType& t)
-{
-  linear().setIdentity();
-  translation() = t.vector();
-  makeAffine();
-  return *this;
-}
-
-template<typename Scalar, int Dim, int Mode, int Options>
-inline Transform<Scalar,Dim,Mode,Options> Transform<Scalar,Dim,Mode,Options>::operator*(const TranslationType& t) const
-{
-  Transform res = *this;
-  res.translate(t.vector());
-  return res;
-}
-
-template<typename Scalar, int Dim, int Mode, int Options>
-inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const UniformScaling<Scalar>& s)
-{
-  m_matrix.setZero();
-  linear().diagonal().fill(s.factor());
-  makeAffine();
-  return *this;
-}
-
-template<typename Scalar, int Dim, int Mode, int Options>
-template<typename Derived>
-inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const RotationBase<Derived,Dim>& r)
-{
-  linear() = internal::toRotationMatrix<Scalar,Dim>(r);
-  translation().setZero();
-  makeAffine();
-  return *this;
-}
-
-template<typename Scalar, int Dim, int Mode, int Options>
-template<typename Derived>
-inline Transform<Scalar,Dim,Mode,Options> Transform<Scalar,Dim,Mode,Options>::operator*(const RotationBase<Derived,Dim>& r) const
-{
-  Transform res = *this;
-  res.rotate(r.derived());
-  return res;
-}
-
-/************************
-*** Special functions ***
-************************/
-
-/** \returns the rotation part of the transformation
-  *
-  *
-  * \svd_module
-  *
-  * \sa computeRotationScaling(), computeScalingRotation(), class SVD
-  */
-template<typename Scalar, int Dim, int Mode, int Options>
-const typename Transform<Scalar,Dim,Mode,Options>::LinearMatrixType
-Transform<Scalar,Dim,Mode,Options>::rotation() const
-{
-  LinearMatrixType result;
-  computeRotationScaling(&result, (LinearMatrixType*)0);
-  return result;
-}
-
-
-/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being
-  * not necessarily positive.
-  *
-  * If either pointer is zero, the corresponding computation is skipped.
-  *
-  *
-  *
-  * \svd_module
-  *
-  * \sa computeScalingRotation(), rotation(), class SVD
-  */
-template<typename Scalar, int Dim, int Mode, int Options>
-template<typename RotationMatrixType, typename ScalingMatrixType>
-void Transform<Scalar,Dim,Mode,Options>::computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const
-{
-  JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU | ComputeFullV);
-
-  Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1
-  VectorType sv(svd.singularValues());
-  sv.coeffRef(0) *= x;
-  if(scaling) scaling->lazyAssign(svd.matrixV() * sv.asDiagonal() * svd.matrixV().adjoint());
-  if(rotation)
-  {
-    LinearMatrixType m(svd.matrixU());
-    m.col(0) /= x;
-    rotation->lazyAssign(m * svd.matrixV().adjoint());
-  }
-}
-
-/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being
-  * not necessarily positive.
-  *
-  * If either pointer is zero, the corresponding computation is skipped.
-  *
-  *
-  *
-  * \svd_module
-  *
-  * \sa computeRotationScaling(), rotation(), class SVD
-  */
-template<typename Scalar, int Dim, int Mode, int Options>
-template<typename ScalingMatrixType, typename RotationMatrixType>
-void Transform<Scalar,Dim,Mode,Options>::computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const
-{
-  JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU | ComputeFullV);
-
-  Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1
-  VectorType sv(svd.singularValues());
-  sv.coeffRef(0) *= x;
-  if(scaling) scaling->lazyAssign(svd.matrixU() * sv.asDiagonal() * svd.matrixU().adjoint());
-  if(rotation)
-  {
-    LinearMatrixType m(svd.matrixU());
-    m.col(0) /= x;
-    rotation->lazyAssign(m * svd.matrixV().adjoint());
-  }
-}
-
-/** Convenient method to set \c *this from a position, orientation and scale
-  * of a 3D object.
-  */
-template<typename Scalar, int Dim, int Mode, int Options>
-template<typename PositionDerived, typename OrientationType, typename ScaleDerived>
-Transform<Scalar,Dim,Mode,Options>&
-Transform<Scalar,Dim,Mode,Options>::fromPositionOrientationScale(const MatrixBase<PositionDerived> &position,
-  const OrientationType& orientation, const MatrixBase<ScaleDerived> &scale)
-{
-  linear() = internal::toRotationMatrix<Scalar,Dim>(orientation);
-  linear() *= scale.asDiagonal();
-  translation() = position;
-  makeAffine();
-  return *this;
-}
-
-namespace internal {
-
-// selector needed to avoid taking the inverse of a 3x4 matrix
-template<typename TransformType, int Mode=TransformType::Mode>
-struct projective_transform_inverse
-{
-  static inline void run(const TransformType&, TransformType&)
-  {}
-};
-
-template<typename TransformType>
-struct projective_transform_inverse<TransformType, Projective>
-{
-  static inline void run(const TransformType& m, TransformType& res)
-  {
-    res.matrix() = m.matrix().inverse();
-  }
-};
-
-} // end namespace internal
-
-
-/**
-  *
-  * \returns the inverse transformation according to some given knowledge
-  * on \c *this.
-  *
-  * \param hint allows to optimize the inversion process when the transformation
-  * is known to be not a general transformation (optional). The possible values are:
-  *  - #Projective if the transformation is not necessarily affine, i.e., if the
-  *    last row is not guaranteed to be [0 ... 0 1]
-  *  - #Affine if the last row can be assumed to be [0 ... 0 1]
-  *  - #Isometry if the transformation is only a concatenations of translations
-  *    and rotations.
-  *  The default is the template class parameter \c Mode.
-  *
-  * \warning unless \a traits is always set to NoShear or NoScaling, this function
-  * requires the generic inverse method of MatrixBase defined in the LU module. If
-  * you forget to include this module, then you will get hard to debug linking errors.
-  *
-  * \sa MatrixBase::inverse()
-  */
-template<typename Scalar, int Dim, int Mode, int Options>
-Transform<Scalar,Dim,Mode,Options>
-Transform<Scalar,Dim,Mode,Options>::inverse(TransformTraits hint) const
-{
-  Transform res;
-  if (hint == Projective)
-  {
-    internal::projective_transform_inverse<Transform>::run(*this, res);
-  }
-  else
-  {
-    if (hint == Isometry)
-    {
-      res.matrix().template topLeftCorner<Dim,Dim>() = linear().transpose();
-    }
-    else if(hint&Affine)
-    {
-      res.matrix().template topLeftCorner<Dim,Dim>() = linear().inverse();
-    }
-    else
-    {
-      eigen_assert(false && "Invalid transform traits in Transform::Inverse");
-    }
-    // translation and remaining parts
-    res.matrix().template topRightCorner<Dim,1>()
-      = - res.matrix().template topLeftCorner<Dim,Dim>() * translation();
-    res.makeAffine(); // we do need this, because in the beginning res is uninitialized
-  }
-  return res;
-}
-
-namespace internal {
-
-/*****************************************************
-*** Specializations of take affine part            ***
-*****************************************************/
-
-template<typename TransformType> struct transform_take_affine_part {
-  typedef typename TransformType::MatrixType MatrixType;
-  typedef typename TransformType::AffinePart AffinePart;
-  typedef typename TransformType::ConstAffinePart ConstAffinePart;
-  static inline AffinePart run(MatrixType& m)
-  { return m.template block<TransformType::Dim,TransformType::HDim>(0,0); }
-  static inline ConstAffinePart run(const MatrixType& m)
-  { return m.template block<TransformType::Dim,TransformType::HDim>(0,0); }
-};
-
-template<typename Scalar, int Dim, int Options>
-struct transform_take_affine_part<Transform<Scalar,Dim,AffineCompact, Options> > {
-  typedef typename Transform<Scalar,Dim,AffineCompact,Options>::MatrixType MatrixType;
-  static inline MatrixType& run(MatrixType& m) { return m; }
-  static inline const MatrixType& run(const MatrixType& m) { return m; }
-};
-
-/*****************************************************
-*** Specializations of construct from matrix       ***
-*****************************************************/
-
-template<typename Other, int Mode, int Options, int Dim, int HDim>
-struct transform_construct_from_matrix<Other, Mode,Options,Dim,HDim, Dim,Dim>
-{
-  static inline void run(Transform<typename Other::Scalar,Dim,Mode,Options> *transform, const Other& other)
-  {
-    transform->linear() = other;
-    transform->translation().setZero();
-    transform->makeAffine();
-  }
-};
-
-template<typename Other, int Mode, int Options, int Dim, int HDim>
-struct transform_construct_from_matrix<Other, Mode,Options,Dim,HDim, Dim,HDim>
-{
-  static inline void run(Transform<typename Other::Scalar,Dim,Mode,Options> *transform, const Other& other)
-  {
-    transform->affine() = other;
-    transform->makeAffine();
-  }
-};
-
-template<typename Other, int Mode, int Options, int Dim, int HDim>
-struct transform_construct_from_matrix<Other, Mode,Options,Dim,HDim, HDim,HDim>
-{
-  static inline void run(Transform<typename Other::Scalar,Dim,Mode,Options> *transform, const Other& other)
-  { transform->matrix() = other; }
-};
-
-template<typename Other, int Options, int Dim, int HDim>
-struct transform_construct_from_matrix<Other, AffineCompact,Options,Dim,HDim, HDim,HDim>
-{
-  static inline void run(Transform<typename Other::Scalar,Dim,AffineCompact,Options> *transform, const Other& other)
-  { transform->matrix() = other.template block<Dim,HDim>(0,0); }
-};
-
-/**********************************************************
-***   Specializations of operator* with rhs EigenBase   ***
-**********************************************************/
-
-template<int LhsMode,int RhsMode>
-struct transform_product_result
-{
-  enum 
-  { 
-    Mode =
-      (LhsMode == (int)Projective    || RhsMode == (int)Projective    ) ? Projective :
-      (LhsMode == (int)Affine        || RhsMode == (int)Affine        ) ? Affine :
-      (LhsMode == (int)AffineCompact || RhsMode == (int)AffineCompact ) ? AffineCompact :
-      (LhsMode == (int)Isometry      || RhsMode == (int)Isometry      ) ? Isometry : Projective
-  };
-};
-
-template< typename TransformType, typename MatrixType >
-struct transform_right_product_impl< TransformType, MatrixType, 0 >
-{
-  typedef typename MatrixType::PlainObject ResultType;
-
-  static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)
-  {
-    return T.matrix() * other;
-  }
-};
-
-template< typename TransformType, typename MatrixType >
-struct transform_right_product_impl< TransformType, MatrixType, 1 >
-{
-  enum { 
-    Dim = TransformType::Dim, 
-    HDim = TransformType::HDim,
-    OtherRows = MatrixType::RowsAtCompileTime,
-    OtherCols = MatrixType::ColsAtCompileTime
-  };
-
-  typedef typename MatrixType::PlainObject ResultType;
-
-  static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)
-  {
-    EIGEN_STATIC_ASSERT(OtherRows==HDim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES);
-
-    typedef Block<ResultType, Dim, OtherCols, int(MatrixType::RowsAtCompileTime)==Dim> TopLeftLhs;
-
-    ResultType res(other.rows(),other.cols());
-    TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() = T.affine() * other;
-    res.row(OtherRows-1) = other.row(OtherRows-1);
-    
-    return res;
-  }
-};
-
-template< typename TransformType, typename MatrixType >
-struct transform_right_product_impl< TransformType, MatrixType, 2 >
-{
-  enum { 
-    Dim = TransformType::Dim, 
-    HDim = TransformType::HDim,
-    OtherRows = MatrixType::RowsAtCompileTime,
-    OtherCols = MatrixType::ColsAtCompileTime
-  };
-
-  typedef typename MatrixType::PlainObject ResultType;
-
-  static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)
-  {
-    EIGEN_STATIC_ASSERT(OtherRows==Dim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES);
-
-    typedef Block<ResultType, Dim, OtherCols, true> TopLeftLhs;
-    ResultType res(Replicate<typename TransformType::ConstTranslationPart, 1, OtherCols>(T.translation(),1,other.cols()));
-    TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() += T.linear() * other;
-
-    return res;
-  }
-};
-
-/**********************************************************
-***   Specializations of operator* with lhs EigenBase   ***
-**********************************************************/
-
-// generic HDim x HDim matrix * T => Projective
-template<typename Other,int Mode, int Options, int Dim, int HDim>
-struct transform_left_product_impl<Other,Mode,Options,Dim,HDim, HDim,HDim>
-{
-  typedef Transform<typename Other::Scalar,Dim,Mode,Options> TransformType;
-  typedef typename TransformType::MatrixType MatrixType;
-  typedef Transform<typename Other::Scalar,Dim,Projective,Options> ResultType;
-  static ResultType run(const Other& other,const TransformType& tr)
-  { return ResultType(other * tr.matrix()); }
-};
-
-// generic HDim x HDim matrix * AffineCompact => Projective
-template<typename Other, int Options, int Dim, int HDim>
-struct transform_left_product_impl<Other,AffineCompact,Options,Dim,HDim, HDim,HDim>
-{
-  typedef Transform<typename Other::Scalar,Dim,AffineCompact,Options> TransformType;
-  typedef typename TransformType::MatrixType MatrixType;
-  typedef Transform<typename Other::Scalar,Dim,Projective,Options> ResultType;
-  static ResultType run(const Other& other,const TransformType& tr)
-  {
-    ResultType res;
-    res.matrix().noalias() = other.template block<HDim,Dim>(0,0) * tr.matrix();
-    res.matrix().col(Dim) += other.col(Dim);
-    return res;
-  }
-};
-
-// affine matrix * T
-template<typename Other,int Mode, int Options, int Dim, int HDim>
-struct transform_left_product_impl<Other,Mode,Options,Dim,HDim, Dim,HDim>
-{
-  typedef Transform<typename Other::Scalar,Dim,Mode,Options> TransformType;
-  typedef typename TransformType::MatrixType MatrixType;
-  typedef TransformType ResultType;
-  static ResultType run(const Other& other,const TransformType& tr)
-  {
-    ResultType res;
-    res.affine().noalias() = other * tr.matrix();
-    res.matrix().row(Dim) = tr.matrix().row(Dim);
-    return res;
-  }
-};
-
-// affine matrix * AffineCompact
-template<typename Other, int Options, int Dim, int HDim>
-struct transform_left_product_impl<Other,AffineCompact,Options,Dim,HDim, Dim,HDim>
-{
-  typedef Transform<typename Other::Scalar,Dim,AffineCompact,Options> TransformType;
-  typedef typename TransformType::MatrixType MatrixType;
-  typedef TransformType ResultType;
-  static ResultType run(const Other& other,const TransformType& tr)
-  {
-    ResultType res;
-    res.matrix().noalias() = other.template block<Dim,Dim>(0,0) * tr.matrix();
-    res.translation() += other.col(Dim);
-    return res;
-  }
-};
-
-// linear matrix * T
-template<typename Other,int Mode, int Options, int Dim, int HDim>
-struct transform_left_product_impl<Other,Mode,Options,Dim,HDim, Dim,Dim>
-{
-  typedef Transform<typename Other::Scalar,Dim,Mode,Options> TransformType;
-  typedef typename TransformType::MatrixType MatrixType;
-  typedef TransformType ResultType;
-  static ResultType run(const Other& other, const TransformType& tr)
-  {
-    TransformType res;
-    if(Mode!=int(AffineCompact))
-      res.matrix().row(Dim) = tr.matrix().row(Dim);
-    res.matrix().template topRows<Dim>().noalias()
-      = other * tr.matrix().template topRows<Dim>();
-    return res;
-  }
-};
-
-/**********************************************************
-*** Specializations of operator* with another Transform ***
-**********************************************************/
-
-template<typename Scalar, int Dim, int LhsMode, int LhsOptions, int RhsMode, int RhsOptions>
-struct transform_transform_product_impl<Transform<Scalar,Dim,LhsMode,LhsOptions>,Transform<Scalar,Dim,RhsMode,RhsOptions>,false >
-{
-  enum { ResultMode = transform_product_result<LhsMode,RhsMode>::Mode };
-  typedef Transform<Scalar,Dim,LhsMode,LhsOptions> Lhs;
-  typedef Transform<Scalar,Dim,RhsMode,RhsOptions> Rhs;
-  typedef Transform<Scalar,Dim,ResultMode,LhsOptions> ResultType;
-  static ResultType run(const Lhs& lhs, const Rhs& rhs)
-  {
-    ResultType res;
-    res.linear() = lhs.linear() * rhs.linear();
-    res.translation() = lhs.linear() * rhs.translation() + lhs.translation();
-    res.makeAffine();
-    return res;
-  }
-};
-
-template<typename Scalar, int Dim, int LhsMode, int LhsOptions, int RhsMode, int RhsOptions>
-struct transform_transform_product_impl<Transform<Scalar,Dim,LhsMode,LhsOptions>,Transform<Scalar,Dim,RhsMode,RhsOptions>,true >
-{
-  typedef Transform<Scalar,Dim,LhsMode,LhsOptions> Lhs;
-  typedef Transform<Scalar,Dim,RhsMode,RhsOptions> Rhs;
-  typedef Transform<Scalar,Dim,Projective> ResultType;
-  static ResultType run(const Lhs& lhs, const Rhs& rhs)
-  {
-    return ResultType( lhs.matrix() * rhs.matrix() );
-  }
-};
-
-template<typename Scalar, int Dim, int LhsOptions, int RhsOptions>
-struct transform_transform_product_impl<Transform<Scalar,Dim,AffineCompact,LhsOptions>,Transform<Scalar,Dim,Projective,RhsOptions>,true >
-{
-  typedef Transform<Scalar,Dim,AffineCompact,LhsOptions> Lhs;
-  typedef Transform<Scalar,Dim,Projective,RhsOptions> Rhs;
-  typedef Transform<Scalar,Dim,Projective> ResultType;
-  static ResultType run(const Lhs& lhs, const Rhs& rhs)
-  {
-    ResultType res;
-    res.matrix().template topRows<Dim>() = lhs.matrix() * rhs.matrix();
-    res.matrix().row(Dim) = rhs.matrix().row(Dim);
-    return res;
-  }
-};
-
-template<typename Scalar, int Dim, int LhsOptions, int RhsOptions>
-struct transform_transform_product_impl<Transform<Scalar,Dim,Projective,LhsOptions>,Transform<Scalar,Dim,AffineCompact,RhsOptions>,true >
-{
-  typedef Transform<Scalar,Dim,Projective,LhsOptions> Lhs;
-  typedef Transform<Scalar,Dim,AffineCompact,RhsOptions> Rhs;
-  typedef Transform<Scalar,Dim,Projective> ResultType;
-  static ResultType run(const Lhs& lhs, const Rhs& rhs)
-  {
-    ResultType res(lhs.matrix().template leftCols<Dim>() * rhs.matrix());
-    res.matrix().col(Dim) += lhs.matrix().col(Dim);
-    return res;
-  }
-};
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_TRANSFORM_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Geometry/Umeyama.h b/resources/3rdparty/eigen/Eigen/src/Geometry/Umeyama.h
deleted file mode 100644
index 345b47e0c..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Geometry/Umeyama.h
+++ /dev/null
@@ -1,177 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009 Hauke Heibel <hauke.heibel@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_UMEYAMA_H
-#define EIGEN_UMEYAMA_H
-
-// This file requires the user to include 
-// * Eigen/Core
-// * Eigen/LU 
-// * Eigen/SVD
-// * Eigen/Array
-
-namespace Eigen { 
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN
-
-// These helpers are required since it allows to use mixed types as parameters
-// for the Umeyama. The problem with mixed parameters is that the return type
-// cannot trivially be deduced when float and double types are mixed.
-namespace internal {
-
-// Compile time return type deduction for different MatrixBase types.
-// Different means here different alignment and parameters but the same underlying
-// real scalar type.
-template<typename MatrixType, typename OtherMatrixType>
-struct umeyama_transform_matrix_type
-{
-  enum {
-    MinRowsAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::RowsAtCompileTime, OtherMatrixType::RowsAtCompileTime),
-
-    // When possible we want to choose some small fixed size value since the result
-    // is likely to fit on the stack. So here, EIGEN_SIZE_MIN_PREFER_DYNAMIC is not what we want.
-    HomogeneousDimension = int(MinRowsAtCompileTime) == Dynamic ? Dynamic : int(MinRowsAtCompileTime)+1
-  };
-
-  typedef Matrix<typename traits<MatrixType>::Scalar,
-    HomogeneousDimension,
-    HomogeneousDimension,
-    AutoAlign | (traits<MatrixType>::Flags & RowMajorBit ? RowMajor : ColMajor),
-    HomogeneousDimension,
-    HomogeneousDimension
-  > type;
-};
-
-}
-
-#endif
-
-/**
-* \geometry_module \ingroup Geometry_Module
-*
-* \brief Returns the transformation between two point sets.
-*
-* The algorithm is based on:
-* "Least-squares estimation of transformation parameters between two point patterns",
-* Shinji Umeyama, PAMI 1991, DOI: 10.1109/34.88573
-*
-* It estimates parameters \f$ c, \mathbf{R}, \f$ and \f$ \mathbf{t} \f$ such that
-* \f{align*}
-*   \frac{1}{n} \sum_{i=1}^n \vert\vert y_i - (c\mathbf{R}x_i + \mathbf{t}) \vert\vert_2^2
-* \f}
-* is minimized.
-*
-* The algorithm is based on the analysis of the covariance matrix
-* \f$ \Sigma_{\mathbf{x}\mathbf{y}} \in \mathbb{R}^{d \times d} \f$
-* of the input point sets \f$ \mathbf{x} \f$ and \f$ \mathbf{y} \f$ where 
-* \f$d\f$ is corresponding to the dimension (which is typically small).
-* The analysis is involving the SVD having a complexity of \f$O(d^3)\f$
-* though the actual computational effort lies in the covariance
-* matrix computation which has an asymptotic lower bound of \f$O(dm)\f$ when 
-* the input point sets have dimension \f$d \times m\f$.
-*
-* Currently the method is working only for floating point matrices.
-*
-* \todo Should the return type of umeyama() become a Transform?
-*
-* \param src Source points \f$ \mathbf{x} = \left( x_1, \hdots, x_n \right) \f$.
-* \param dst Destination points \f$ \mathbf{y} = \left( y_1, \hdots, y_n \right) \f$.
-* \param with_scaling Sets \f$ c=1 \f$ when <code>false</code> is passed.
-* \return The homogeneous transformation 
-* \f{align*}
-*   T = \begin{bmatrix} c\mathbf{R} & \mathbf{t} \\ \mathbf{0} & 1 \end{bmatrix}
-* \f}
-* minimizing the resudiual above. This transformation is always returned as an 
-* Eigen::Matrix.
-*/
-template <typename Derived, typename OtherDerived>
-typename internal::umeyama_transform_matrix_type<Derived, OtherDerived>::type
-umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, bool with_scaling = true)
-{
-  typedef typename internal::umeyama_transform_matrix_type<Derived, OtherDerived>::type TransformationMatrixType;
-  typedef typename internal::traits<TransformationMatrixType>::Scalar Scalar;
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  typedef typename Derived::Index Index;
-
-  EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL)
-  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename internal::traits<OtherDerived>::Scalar>::value),
-    YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
-
-  enum { Dimension = EIGEN_SIZE_MIN_PREFER_DYNAMIC(Derived::RowsAtCompileTime, OtherDerived::RowsAtCompileTime) };
-
-  typedef Matrix<Scalar, Dimension, 1> VectorType;
-  typedef Matrix<Scalar, Dimension, Dimension> MatrixType;
-  typedef typename internal::plain_matrix_type_row_major<Derived>::type RowMajorMatrixType;
-
-  const Index m = src.rows(); // dimension
-  const Index n = src.cols(); // number of measurements
-
-  // required for demeaning ...
-  const RealScalar one_over_n = 1 / static_cast<RealScalar>(n);
-
-  // computation of mean
-  const VectorType src_mean = src.rowwise().sum() * one_over_n;
-  const VectorType dst_mean = dst.rowwise().sum() * one_over_n;
-
-  // demeaning of src and dst points
-  const RowMajorMatrixType src_demean = src.colwise() - src_mean;
-  const RowMajorMatrixType dst_demean = dst.colwise() - dst_mean;
-
-  // Eq. (36)-(37)
-  const Scalar src_var = src_demean.rowwise().squaredNorm().sum() * one_over_n;
-
-  // Eq. (38)
-  const MatrixType sigma = one_over_n * dst_demean * src_demean.transpose();
-
-  JacobiSVD<MatrixType> svd(sigma, ComputeFullU | ComputeFullV);
-
-  // Initialize the resulting transformation with an identity matrix...
-  TransformationMatrixType Rt = TransformationMatrixType::Identity(m+1,m+1);
-
-  // Eq. (39)
-  VectorType S = VectorType::Ones(m);
-  if (sigma.determinant()<0) S(m-1) = -1;
-
-  // Eq. (40) and (43)
-  const VectorType& d = svd.singularValues();
-  Index rank = 0; for (Index i=0; i<m; ++i) if (!internal::isMuchSmallerThan(d.coeff(i),d.coeff(0))) ++rank;
-  if (rank == m-1) {
-    if ( svd.matrixU().determinant() * svd.matrixV().determinant() > 0 ) {
-      Rt.block(0,0,m,m).noalias() = svd.matrixU()*svd.matrixV().transpose();
-    } else {
-      const Scalar s = S(m-1); S(m-1) = -1;
-      Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose();
-      S(m-1) = s;
-    }
-  } else {
-    Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose();
-  }
-
-  if (with_scaling)
-  {
-    // Eq. (42)
-    const Scalar c = 1/src_var * svd.singularValues().dot(S);
-
-    // Eq. (41)
-    Rt.col(m).head(m) = dst_mean;
-    Rt.col(m).head(m).noalias() -= c*Rt.topLeftCorner(m,m)*src_mean;
-    Rt.block(0,0,m,m) *= c;
-  }
-  else
-  {
-    Rt.col(m).head(m) = dst_mean;
-    Rt.col(m).head(m).noalias() -= Rt.topLeftCorner(m,m)*src_mean;
-  }
-
-  return Rt;
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_UMEYAMA_H
diff --git a/resources/3rdparty/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h b/resources/3rdparty/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
deleted file mode 100644
index 5a822e0ea..000000000
--- a/resources/3rdparty/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
+++ /dev/null
@@ -1,256 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_BICGSTAB_H
-#define EIGEN_BICGSTAB_H
-
-namespace Eigen { 
-
-namespace internal {
-
-/** \internal Low-level bi conjugate gradient stabilized algorithm
-  * \param mat The matrix A
-  * \param rhs The right hand side vector b
-  * \param x On input and initial solution, on output the computed solution.
-  * \param precond A preconditioner being able to efficiently solve for an
-  *                approximation of Ax=b (regardless of b)
-  * \param iters On input the max number of iteration, on output the number of performed iterations.
-  * \param tol_error On input the tolerance error, on output an estimation of the relative error.
-  * \return false in the case of numerical issue, for example a break down of BiCGSTAB. 
-  */
-template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
-bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
-              const Preconditioner& precond, int& iters,
-              typename Dest::RealScalar& tol_error)
-{
-  using std::sqrt;
-  using std::abs;
-  typedef typename Dest::RealScalar RealScalar;
-  typedef typename Dest::Scalar Scalar;
-  typedef Matrix<Scalar,Dynamic,1> VectorType;
-  RealScalar tol = tol_error;
-  int maxIters = iters;
-
-  int n = mat.cols();
-  x = precond.solve(x);
-  VectorType r  = rhs - mat * x;
-  VectorType r0 = r;
-  
-  RealScalar r0_sqnorm = rhs.squaredNorm();
-  Scalar rho    = 1;
-  Scalar alpha  = 1;
-  Scalar w      = 1;
-  
-  VectorType v = VectorType::Zero(n), p = VectorType::Zero(n);
-  VectorType y(n),  z(n);
-  VectorType kt(n), ks(n);
-
-  VectorType s(n), t(n);
-
-  RealScalar tol2 = tol*tol;
-  int i = 0;
-
-  while ( r.squaredNorm()/r0_sqnorm > tol2 && i<maxIters )
-  {
-    Scalar rho_old = rho;
-
-    rho = r0.dot(r);
-    if (rho == Scalar(0)) return false; /* New search directions cannot be found */
-    Scalar beta = (rho/rho_old) * (alpha / w);
-    p = r + beta * (p - w * v);
-    
-    y = precond.solve(p);
-    
-    v.noalias() = mat * y;
-
-    alpha = rho / r0.dot(v);
-    s = r - alpha * v;
-
-    z = precond.solve(s);
-    t.noalias() = mat * z;
-
-    w = t.dot(s) / t.squaredNorm();
-    x += alpha * y + w * z;
-    r = s - w * t;
-    ++i;
-  }
-  tol_error = sqrt(r.squaredNorm()/r0_sqnorm);
-  iters = i;
-  return true; 
-}
-
-}
-
-template< typename _MatrixType,
-          typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
-class BiCGSTAB;
-
-namespace internal {
-
-template< typename _MatrixType, typename _Preconditioner>
-struct traits<BiCGSTAB<_MatrixType,_Preconditioner> >
-{
-  typedef _MatrixType MatrixType;
-  typedef _Preconditioner Preconditioner;
-};
-
-}
-
-/** \ingroup IterativeLinearSolvers_Module
-  * \brief A bi conjugate gradient stabilized solver for sparse square problems
-  *
-  * This class allows to solve for A.x = b sparse linear problems using a bi conjugate gradient
-  * stabilized algorithm. The vectors x and b can be either dense or sparse.
-  *
-  * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
-  * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
-  *
-  * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
-  * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
-  * and NumTraits<Scalar>::epsilon() for the tolerance.
-  * 
-  * This class can be used as the direct solver classes. Here is a typical usage example:
-  * \code
-  * int n = 10000;
-  * VectorXd x(n), b(n);
-  * SparseMatrix<double> A(n,n);
-  * // fill A and b
-  * BiCGSTAB<SparseMatrix<double> > solver;
-  * solver(A);
-  * x = solver.solve(b);
-  * std::cout << "#iterations:     " << solver.iterations() << std::endl;
-  * std::cout << "estimated error: " << solver.error()      << std::endl;
-  * // update b, and solve again
-  * x = solver.solve(b);
-  * \endcode
-  * 
-  * By default the iterations start with x=0 as an initial guess of the solution.
-  * One can control the start using the solveWithGuess() method. Here is a step by
-  * step execution example starting with a random guess and printing the evolution
-  * of the estimated error:
-  * * \code
-  * x = VectorXd::Random(n);
-  * solver.setMaxIterations(1);
-  * int i = 0;
-  * do {
-  *   x = solver.solveWithGuess(b,x);
-  *   std::cout << i << " : " << solver.error() << std::endl;
-  *   ++i;
-  * } while (solver.info()!=Success && i<100);
-  * \endcode
-  * Note that such a step by step excution is slightly slower.
-  * 
-  * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
-  */
-template< typename _MatrixType, typename _Preconditioner>
-class BiCGSTAB : public IterativeSolverBase<BiCGSTAB<_MatrixType,_Preconditioner> >
-{
-  typedef IterativeSolverBase<BiCGSTAB> Base;
-  using Base::mp_matrix;
-  using Base::m_error;
-  using Base::m_iterations;
-  using Base::m_info;
-  using Base::m_isInitialized;
-public:
-  typedef _MatrixType MatrixType;
-  typedef typename MatrixType::Scalar Scalar;
-  typedef typename MatrixType::Index Index;
-  typedef typename MatrixType::RealScalar RealScalar;
-  typedef _Preconditioner Preconditioner;
-
-public:
-
-  /** Default constructor. */
-  BiCGSTAB() : Base() {}
-
-  /** Initialize the solver with matrix \a A for further \c Ax=b solving.
-    * 
-    * This constructor is a shortcut for the default constructor followed
-    * by a call to compute().
-    * 
-    * \warning this class stores a reference to the matrix A as well as some
-    * precomputed values that depend on it. Therefore, if \a A is changed
-    * this class becomes invalid. Call compute() to update it with the new
-    * matrix A, or modify a copy of A.
-    */
-  BiCGSTAB(const MatrixType& A) : Base(A) {}
-
-  ~BiCGSTAB() {}
-  
-  /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
-    * \a x0 as an initial solution.
-    *
-    * \sa compute()
-    */
-  template<typename Rhs,typename Guess>
-  inline const internal::solve_retval_with_guess<BiCGSTAB, Rhs, Guess>
-  solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
-  {
-    eigen_assert(m_isInitialized && "BiCGSTAB is not initialized.");
-    eigen_assert(Base::rows()==b.rows()
-              && "BiCGSTAB::solve(): invalid number of rows of the right hand side matrix b");
-    return internal::solve_retval_with_guess
-            <BiCGSTAB, Rhs, Guess>(*this, b.derived(), x0);
-  }
-  
-  /** \internal */
-  template<typename Rhs,typename Dest>
-  void _solveWithGuess(const Rhs& b, Dest& x) const
-  {    
-    bool failed = false;
-    for(int j=0; j<b.cols(); ++j)
-    {
-      m_iterations = Base::maxIterations();
-      m_error = Base::m_tolerance;
-      
-      typename Dest::ColXpr xj(x,j);
-      if(!internal::bicgstab(*mp_matrix, b.col(j), xj, Base::m_preconditioner, m_iterations, m_error))
-        failed = true;
-    }
-    m_info = failed ? NumericalIssue
-           : m_error <= Base::m_tolerance ? Success
-           : NoConvergence;
-    m_isInitialized = true;
-  }
-
-  /** \internal */
-  template<typename Rhs,typename Dest>
-  void _solve(const Rhs& b, Dest& x) const
-  {
-//     x.setZero();
-  x = b;
-    _solveWithGuess(b,x);
-  }
-
-protected:
-
-};
-
-
-namespace internal {
-
-  template<typename _MatrixType, typename _Preconditioner, typename Rhs>
-struct solve_retval<BiCGSTAB<_MatrixType, _Preconditioner>, Rhs>
-  : solve_retval_base<BiCGSTAB<_MatrixType, _Preconditioner>, Rhs>
-{
-  typedef BiCGSTAB<_MatrixType, _Preconditioner> Dec;
-  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
-
-  template<typename Dest> void evalTo(Dest& dst) const
-  {
-    dec()._solve(rhs(),dst);
-  }
-};
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_BICGSTAB_H
diff --git a/resources/3rdparty/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h b/resources/3rdparty/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
deleted file mode 100644
index 5a71531cd..000000000
--- a/resources/3rdparty/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
+++ /dev/null
@@ -1,465 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_INCOMPLETE_LUT_H
-#define EIGEN_INCOMPLETE_LUT_H
-
-
-namespace Eigen { 
-
-namespace internal {
-    
-/**
- * Compute a quick-sort split of a vector 
- * On output, the vector row is permuted such that its elements satisfy
- * abs(row(i)) >= abs(row(ncut)) if i<ncut
- * abs(row(i)) <= abs(row(ncut)) if i>ncut 
- * \param row The vector of values
- * \param ind The array of index for the elements in @p row
- * \param ncut  The number of largest elements to keep
- **/ 
-template <typename VectorV, typename VectorI>
-int QuickSplit(VectorV &row, VectorI &ind, int ncut)
-{
-  typedef typename VectorV::RealScalar RealScalar;
-  using std::swap;
-  int mid;
-  int n = row.size(); /* length of the vector */
-  int first, last ; 
-  
-  ncut--; /* to fit the zero-based indices */
-  first = 0; 
-  last = n-1; 
-  if (ncut < first || ncut > last ) return 0;
-  
-  do {
-    mid = first; 
-    RealScalar abskey = std::abs(row(mid)); 
-    for (int j = first + 1; j <= last; j++) {
-      if ( std::abs(row(j)) > abskey) {
-        ++mid;
-        swap(row(mid), row(j));
-        swap(ind(mid), ind(j));
-      }
-    }
-    /* Interchange for the pivot element */
-    swap(row(mid), row(first));
-    swap(ind(mid), ind(first));
-    
-    if (mid > ncut) last = mid - 1;
-    else if (mid < ncut ) first = mid + 1; 
-  } while (mid != ncut );
-  
-  return 0; /* mid is equal to ncut */ 
-}
-
-}// end namespace internal
-/**
- * \brief Incomplete LU factorization with dual-threshold strategy
- * During the numerical factorization, two dropping rules are used :
- *  1) any element whose magnitude is less than some tolerance is dropped.
- *    This tolerance is obtained by multiplying the input tolerance @p droptol 
- *    by the average magnitude of all the original elements in the current row.
- *  2) After the elimination of the row, only the @p fill largest elements in 
- *    the L part and the @p fill largest elements in the U part are kept 
- *    (in addition to the diagonal element ). Note that @p fill is computed from 
- *    the input parameter @p fillfactor which is used the ratio to control the fill_in 
- *    relatively to the initial number of nonzero elements.
- * 
- * The two extreme cases are when @p droptol=0 (to keep all the @p fill*2 largest elements)
- * and when @p fill=n/2 with @p droptol being different to zero. 
- * 
- * References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization, 
- *              Numerical Linear Algebra with Applications, 1(4), pp 387-402, 1994.
- * 
- * NOTE : The following implementation is derived from the ILUT implementation
- * in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota 
- *  released under the terms of the GNU LGPL: 
- *    http://www-users.cs.umn.edu/~saad/software/SPARSKIT/README
- * However, Yousef Saad gave us permission to relicense his ILUT code to MPL2.
- * See the Eigen mailing list archive, thread: ILUT, date: July 8, 2012:
- *   http://listengine.tuxfamily.org/lists.tuxfamily.org/eigen/2012/07/msg00064.html
- * alternatively, on GMANE:
- *   http://comments.gmane.org/gmane.comp.lib.eigen/3302
- */
-template <typename _Scalar>
-class IncompleteLUT : internal::noncopyable
-{
-    typedef _Scalar Scalar;
-    typedef typename NumTraits<Scalar>::Real RealScalar;
-    typedef Matrix<Scalar,Dynamic,1> Vector;
-    typedef SparseMatrix<Scalar,RowMajor> FactorType;
-    typedef SparseMatrix<Scalar,ColMajor> PermutType;
-    typedef typename FactorType::Index Index;
-
-  public:
-    typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
-    
-    IncompleteLUT()
-      : m_droptol(NumTraits<Scalar>::dummy_precision()), m_fillfactor(10),
-        m_analysisIsOk(false), m_factorizationIsOk(false), m_isInitialized(false)
-    {}
-    
-    template<typename MatrixType>
-    IncompleteLUT(const MatrixType& mat, RealScalar droptol=NumTraits<Scalar>::dummy_precision(), int fillfactor = 10)
-      : m_droptol(droptol),m_fillfactor(fillfactor),
-        m_analysisIsOk(false),m_factorizationIsOk(false),m_isInitialized(false)
-    {
-      eigen_assert(fillfactor != 0);
-      compute(mat); 
-    }
-    
-    Index rows() const { return m_lu.rows(); }
-    
-    Index cols() const { return m_lu.cols(); }
-
-    /** \brief Reports whether previous computation was successful.
-      *
-      * \returns \c Success if computation was succesful,
-      *          \c NumericalIssue if the matrix.appears to be negative.
-      */
-    ComputationInfo info() const
-    {
-      eigen_assert(m_isInitialized && "IncompleteLUT is not initialized.");
-      return m_info;
-    }
-    
-    template<typename MatrixType>
-    void analyzePattern(const MatrixType& amat);
-    
-    template<typename MatrixType>
-    void factorize(const MatrixType& amat);
-    
-    /**
-      * Compute an incomplete LU factorization with dual threshold on the matrix mat
-      * No pivoting is done in this version
-      * 
-      **/
-    template<typename MatrixType>
-    IncompleteLUT<Scalar>& compute(const MatrixType& amat)
-    {
-      analyzePattern(amat); 
-      factorize(amat);
-      eigen_assert(m_factorizationIsOk == true); 
-      m_isInitialized = true;
-      return *this;
-    }
-
-    void setDroptol(RealScalar droptol); 
-    void setFillfactor(int fillfactor); 
-    
-    template<typename Rhs, typename Dest>
-    void _solve(const Rhs& b, Dest& x) const
-    {
-      x = m_Pinv * b;  
-      x = m_lu.template triangularView<UnitLower>().solve(x);
-      x = m_lu.template triangularView<Upper>().solve(x);
-      x = m_P * x; 
-    }
-
-    template<typename Rhs> inline const internal::solve_retval<IncompleteLUT, Rhs>
-     solve(const MatrixBase<Rhs>& b) const
-    {
-      eigen_assert(m_isInitialized && "IncompleteLUT is not initialized.");
-      eigen_assert(cols()==b.rows()
-                && "IncompleteLUT::solve(): invalid number of rows of the right hand side matrix b");
-      return internal::solve_retval<IncompleteLUT, Rhs>(*this, b.derived());
-    }
-
-protected:
-
-    /** keeps off-diagonal entries; drops diagonal entries */
-    struct keep_diag {
-      inline bool operator() (const Index& row, const Index& col, const Scalar&) const
-      {
-        return row!=col;
-      }
-    };
-
-protected:
-
-    FactorType m_lu;
-    RealScalar m_droptol;
-    int m_fillfactor;
-    bool m_analysisIsOk;
-    bool m_factorizationIsOk;
-    bool m_isInitialized;
-    ComputationInfo m_info;
-    PermutationMatrix<Dynamic,Dynamic,Index> m_P;     // Fill-reducing permutation
-    PermutationMatrix<Dynamic,Dynamic,Index> m_Pinv;  // Inverse permutation
-};
-
-/**
- * Set control parameter droptol
- *  \param droptol   Drop any element whose magnitude is less than this tolerance 
- **/ 
-template<typename Scalar>
-void IncompleteLUT<Scalar>::setDroptol(RealScalar droptol)
-{
-  this->m_droptol = droptol;   
-}
-
-/**
- * Set control parameter fillfactor
- * \param fillfactor  This is used to compute the  number @p fill_in of largest elements to keep on each row. 
- **/ 
-template<typename Scalar>
-void IncompleteLUT<Scalar>::setFillfactor(int fillfactor)
-{
-  this->m_fillfactor = fillfactor;   
-}
-
-template <typename Scalar>
-template<typename _MatrixType>
-void IncompleteLUT<Scalar>::analyzePattern(const _MatrixType& amat)
-{
-  // Compute the Fill-reducing permutation
-  SparseMatrix<Scalar,ColMajor, Index> mat1 = amat;
-  SparseMatrix<Scalar,ColMajor, Index> mat2 = amat.transpose();
-  // Symmetrize the pattern
-  // FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice.
-  //       on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered...
-  SparseMatrix<Scalar,ColMajor, Index> AtA = mat2 + mat1;
-  AtA.prune(keep_diag());
-  internal::minimum_degree_ordering<Scalar, Index>(AtA, m_P);  // Then compute the AMD ordering...
-
-  m_Pinv  = m_P.inverse(); // ... and the inverse permutation
-
-  m_analysisIsOk = true;
-}
-
-template <typename Scalar>
-template<typename _MatrixType>
-void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
-{
-  using std::sqrt;
-  using std::swap;
-  using std::abs;
-
-  eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix");
-  int n = amat.cols();  // Size of the matrix
-  m_lu.resize(n,n);
-  // Declare Working vectors and variables
-  Vector u(n) ;     // real values of the row -- maximum size is n --
-  VectorXi ju(n);   // column position of the values in u -- maximum size  is n
-  VectorXi jr(n);   // Indicate the position of the nonzero elements in the vector u -- A zero location is indicated by -1
-
-  // Apply the fill-reducing permutation
-  eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
-  SparseMatrix<Scalar,RowMajor, Index> mat;
-  mat = amat.twistedBy(m_Pinv);
-
-  // Initialization
-  jr.fill(-1);
-  ju.fill(0);
-  u.fill(0);
-
-  // number of largest elements to keep in each row:
-  int fill_in =   static_cast<int> (amat.nonZeros()*m_fillfactor)/n+1;
-  if (fill_in > n) fill_in = n;
-
-  // number of largest nonzero elements to keep in the L and the U part of the current row:
-  int nnzL = fill_in/2;
-  int nnzU = nnzL;
-  m_lu.reserve(n * (nnzL + nnzU + 1));
-
-  // global loop over the rows of the sparse matrix
-  for (int ii = 0; ii < n; ii++)
-  {
-    // 1 - copy the lower and the upper part of the row i of mat in the working vector u
-
-    int sizeu = 1; // number of nonzero elements in the upper part of the current row
-    int sizel = 0; // number of nonzero elements in the lower part of the current row
-    ju(ii)    = ii;
-    u(ii)     = 0;
-    jr(ii)    = ii;
-    RealScalar rownorm = 0;
-
-    typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii
-    for (; j_it; ++j_it)
-    {
-      int k = j_it.index();
-      if (k < ii)
-      {
-        // copy the lower part
-        ju(sizel) = k;
-        u(sizel) = j_it.value();
-        jr(k) = sizel;
-        ++sizel;
-      }
-      else if (k == ii)
-      {
-        u(ii) = j_it.value();
-      }
-      else
-      {
-        // copy the upper part
-        int jpos = ii + sizeu;
-        ju(jpos) = k;
-        u(jpos) = j_it.value();
-        jr(k) = jpos;
-        ++sizeu;
-      }
-      rownorm += internal::abs2(j_it.value());
-    }
-
-    // 2 - detect possible zero row
-    if(rownorm==0)
-    {
-      m_info = NumericalIssue;
-      return;
-    }
-    // Take the 2-norm of the current row as a relative tolerance
-    rownorm = sqrt(rownorm);
-
-    // 3 - eliminate the previous nonzero rows
-    int jj = 0;
-    int len = 0;
-    while (jj < sizel)
-    {
-      // In order to eliminate in the correct order,
-      // we must select first the smallest column index among  ju(jj:sizel)
-      int k;
-      int minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment
-      k += jj;
-      if (minrow != ju(jj))
-      {
-        // swap the two locations
-        int j = ju(jj);
-        swap(ju(jj), ju(k));
-        jr(minrow) = jj;   jr(j) = k;
-        swap(u(jj), u(k));
-      }
-      // Reset this location
-      jr(minrow) = -1;
-
-      // Start elimination
-      typename FactorType::InnerIterator ki_it(m_lu, minrow);
-      while (ki_it && ki_it.index() < minrow) ++ki_it;
-      eigen_internal_assert(ki_it && ki_it.col()==minrow);
-      Scalar fact = u(jj) / ki_it.value();
-
-      // drop too small elements
-      if(abs(fact) <= m_droptol)
-      {
-        jj++;
-        continue;
-      }
-
-      // linear combination of the current row ii and the row minrow
-      ++ki_it;
-      for (; ki_it; ++ki_it)
-      {
-        Scalar prod = fact * ki_it.value();
-        int j       = ki_it.index();
-        int jpos    = jr(j);
-        if (jpos == -1) // fill-in element
-        {
-          int newpos;
-          if (j >= ii) // dealing with the upper part
-          {
-            newpos = ii + sizeu;
-            sizeu++;
-            eigen_internal_assert(sizeu<=n);
-          }
-          else // dealing with the lower part
-          {
-            newpos = sizel;
-            sizel++;
-            eigen_internal_assert(sizel<=ii);
-          }
-          ju(newpos) = j;
-          u(newpos) = -prod;
-          jr(j) = newpos;
-        }
-        else
-          u(jpos) -= prod;
-      }
-      // store the pivot element
-      u(len) = fact;
-      ju(len) = minrow;
-      ++len;
-
-      jj++;
-    } // end of the elimination on the row ii
-
-    // reset the upper part of the pointer jr to zero
-    for(int k = 0; k <sizeu; k++) jr(ju(ii+k)) = -1;
-
-    // 4 - partially sort and insert the elements in the m_lu matrix
-
-    // sort the L-part of the row
-    sizel = len;
-    len = (std::min)(sizel, nnzL);
-    typename Vector::SegmentReturnType ul(u.segment(0, sizel));
-    typename VectorXi::SegmentReturnType jul(ju.segment(0, sizel));
-    internal::QuickSplit(ul, jul, len);
-
-    // store the largest m_fill elements of the L part
-    m_lu.startVec(ii);
-    for(int k = 0; k < len; k++)
-      m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
-
-    // store the diagonal element
-    // apply a shifting rule to avoid zero pivots (we are doing an incomplete factorization)
-    if (u(ii) == Scalar(0))
-      u(ii) = sqrt(m_droptol) * rownorm;
-    m_lu.insertBackByOuterInnerUnordered(ii, ii) = u(ii);
-
-    // sort the U-part of the row
-    // apply the dropping rule first
-    len = 0;
-    for(int k = 1; k < sizeu; k++)
-    {
-      if(abs(u(ii+k)) > m_droptol * rownorm )
-      {
-        ++len;
-        u(ii + len)  = u(ii + k);
-        ju(ii + len) = ju(ii + k);
-      }
-    }
-    sizeu = len + 1; // +1 to take into account the diagonal element
-    len = (std::min)(sizeu, nnzU);
-    typename Vector::SegmentReturnType uu(u.segment(ii+1, sizeu-1));
-    typename VectorXi::SegmentReturnType juu(ju.segment(ii+1, sizeu-1));
-    internal::QuickSplit(uu, juu, len);
-
-    // store the largest elements of the U part
-    for(int k = ii + 1; k < ii + len; k++)
-      m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
-  }
-
-  m_lu.finalize();
-  m_lu.makeCompressed();
-
-  m_factorizationIsOk = true;
-  m_info = Success;
-}
-
-namespace internal {
-
-template<typename _MatrixType, typename Rhs>
-struct solve_retval<IncompleteLUT<_MatrixType>, Rhs>
-  : solve_retval_base<IncompleteLUT<_MatrixType>, Rhs>
-{
-  typedef IncompleteLUT<_MatrixType> Dec;
-  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
-
-  template<typename Dest> void evalTo(Dest& dst) const
-  {
-    dec()._solve(rhs(),dst);
-  }
-};
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_INCOMPLETE_LUT_H
-
diff --git a/resources/3rdparty/eigen/Eigen/src/Jacobi/Jacobi.h b/resources/3rdparty/eigen/Eigen/src/Jacobi/Jacobi.h
deleted file mode 100644
index 7eb19a023..000000000
--- a/resources/3rdparty/eigen/Eigen/src/Jacobi/Jacobi.h
+++ /dev/null
@@ -1,424 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_JACOBI_H
-#define EIGEN_JACOBI_H
-
-namespace Eigen { 
-
-/** \ingroup Jacobi_Module
-  * \jacobi_module
-  * \class JacobiRotation
-  * \brief Rotation given by a cosine-sine pair.
-  *
-  * This class represents a Jacobi or Givens rotation.
-  * This is a 2D rotation in the plane \c J of angle \f$ \theta \f$ defined by
-  * its cosine \c c and sine \c s as follow:
-  * \f$ J = \left ( \begin{array}{cc} c & \overline s \\ -s  & \overline c \end{array} \right ) \f$
-  *
-  * You can apply the respective counter-clockwise rotation to a column vector \c v by
-  * applying its adjoint on the left: \f$ v = J^* v \f$ that translates to the following Eigen code:
-  * \code
-  * v.applyOnTheLeft(J.adjoint());
-  * \endcode
-  *
-  * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
-  */
-template<typename Scalar> class JacobiRotation
-{
-  public:
-    typedef typename NumTraits<Scalar>::Real RealScalar;
-
-    /** Default constructor without any initialization. */
-    JacobiRotation() {}
-
-    /** Construct a planar rotation from a cosine-sine pair (\a c, \c s). */
-    JacobiRotation(const Scalar& c, const Scalar& s) : m_c(c), m_s(s) {}
-
-    Scalar& c() { return m_c; }
-    Scalar c() const { return m_c; }
-    Scalar& s() { return m_s; }
-    Scalar s() const { return m_s; }
-
-    /** Concatenates two planar rotation */
-    JacobiRotation operator*(const JacobiRotation& other)
-    {
-      return JacobiRotation(m_c * other.m_c - internal::conj(m_s) * other.m_s,
-                            internal::conj(m_c * internal::conj(other.m_s) + internal::conj(m_s) * internal::conj(other.m_c)));
-    }
-
-    /** Returns the transposed transformation */
-    JacobiRotation transpose() const { return JacobiRotation(m_c, -internal::conj(m_s)); }
-
-    /** Returns the adjoint transformation */
-    JacobiRotation adjoint() const { return JacobiRotation(internal::conj(m_c), -m_s); }
-
-    template<typename Derived>
-    bool makeJacobi(const MatrixBase<Derived>&, typename Derived::Index p, typename Derived::Index q);
-    bool makeJacobi(RealScalar x, Scalar y, RealScalar z);
-
-    void makeGivens(const Scalar& p, const Scalar& q, Scalar* z=0);
-
-  protected:
-    void makeGivens(const Scalar& p, const Scalar& q, Scalar* z, internal::true_type);
-    void makeGivens(const Scalar& p, const Scalar& q, Scalar* z, internal::false_type);
-
-    Scalar m_c, m_s;
-};
-
-/** Makes \c *this as a Jacobi rotation \a J such that applying \a J on both the right and left sides of the selfadjoint 2x2 matrix
-  * \f$ B = \left ( \begin{array}{cc} x & y \\ \overline y & z \end{array} \right )\f$ yields a diagonal matrix \f$ A = J^* B J \f$
-  *
-  * \sa MatrixBase::makeJacobi(const MatrixBase<Derived>&, Index, Index), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
-  */
-template<typename Scalar>
-bool JacobiRotation<Scalar>::makeJacobi(RealScalar x, Scalar y, RealScalar z)
-{
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  if(y == Scalar(0))
-  {
-    m_c = Scalar(1);
-    m_s = Scalar(0);
-    return false;
-  }
-  else
-  {
-    RealScalar tau = (x-z)/(RealScalar(2)*internal::abs(y));
-    RealScalar w = internal::sqrt(internal::abs2(tau) + RealScalar(1));
-    RealScalar t;
-    if(tau>RealScalar(0))
-    {
-      t = RealScalar(1) / (tau + w);
-    }
-    else
-    {
-      t = RealScalar(1) / (tau - w);
-    }
-    RealScalar sign_t = t > RealScalar(0) ? RealScalar(1) : RealScalar(-1);
-    RealScalar n = RealScalar(1) / internal::sqrt(internal::abs2(t)+RealScalar(1));
-    m_s = - sign_t * (internal::conj(y) / internal::abs(y)) * internal::abs(t) * n;
-    m_c = n;
-    return true;
-  }
-}
-
-/** Makes \c *this as a Jacobi rotation \c J such that applying \a J on both the right and left sides of the 2x2 selfadjoint matrix
-  * \f$ B = \left ( \begin{array}{cc} \text{this}_{pp} & \text{this}_{pq} \\ (\text{this}_{pq})^* & \text{this}_{qq} \end{array} \right )\f$ yields
-  * a diagonal matrix \f$ A = J^* B J \f$
-  *
-  * Example: \include Jacobi_makeJacobi.cpp
-  * Output: \verbinclude Jacobi_makeJacobi.out
-  *
-  * \sa JacobiRotation::makeJacobi(RealScalar, Scalar, RealScalar), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
-  */
-template<typename Scalar>
-template<typename Derived>
-inline bool JacobiRotation<Scalar>::makeJacobi(const MatrixBase<Derived>& m, typename Derived::Index p, typename Derived::Index q)
-{
-  return makeJacobi(internal::real(m.coeff(p,p)), m.coeff(p,q), internal::real(m.coeff(q,q)));
-}
-
-/** Makes \c *this as a Givens rotation \c G such that applying \f$ G^* \f$ to the left of the vector
-  * \f$ V = \left ( \begin{array}{c} p \\ q \end{array} \right )\f$ yields:
-  * \f$ G^* V = \left ( \begin{array}{c} r \\ 0 \end{array} \right )\f$.
-  *
-  * The value of \a z is returned if \a z is not null (the default is null).
-  * Also note that G is built such that the cosine is always real.
-  *
-  * Example: \include Jacobi_makeGivens.cpp
-  * Output: \verbinclude Jacobi_makeGivens.out
-  *
-  * This function implements the continuous Givens rotation generation algorithm
-  * found in Anderson (2000), Discontinuous Plane Rotations and the Symmetric Eigenvalue Problem.
-  * LAPACK Working Note 150, University of Tennessee, UT-CS-00-454, December 4, 2000.
-  *
-  * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
-  */
-template<typename Scalar>
-void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* z)
-{
-  makeGivens(p, q, z, typename internal::conditional<NumTraits<Scalar>::IsComplex, internal::true_type, internal::false_type>::type());
-}
-
-
-// specialization for complexes
-template<typename Scalar>
-void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::true_type)
-{
-  if(q==Scalar(0))
-  {
-    m_c = internal::real(p)<0 ? Scalar(-1) : Scalar(1);
-    m_s = 0;
-    if(r) *r = m_c * p;
-  }
-  else if(p==Scalar(0))
-  {
-    m_c = 0;
-    m_s = -q/internal::abs(q);
-    if(r) *r = internal::abs(q);
-  }
-  else
-  {
-    RealScalar p1 = internal::norm1(p);
-    RealScalar q1 = internal::norm1(q);
-    if(p1>=q1)
-    {
-      Scalar ps = p / p1;
-      RealScalar p2 = internal::abs2(ps);
-      Scalar qs = q / p1;
-      RealScalar q2 = internal::abs2(qs);
-
-      RealScalar u = internal::sqrt(RealScalar(1) + q2/p2);
-      if(internal::real(p)<RealScalar(0))
-        u = -u;
-
-      m_c = Scalar(1)/u;
-      m_s = -qs*internal::conj(ps)*(m_c/p2);
-      if(r) *r = p * u;
-    }
-    else
-    {
-      Scalar ps = p / q1;
-      RealScalar p2 = internal::abs2(ps);
-      Scalar qs = q / q1;
-      RealScalar q2 = internal::abs2(qs);
-
-      RealScalar u = q1 * internal::sqrt(p2 + q2);
-      if(internal::real(p)<RealScalar(0))
-        u = -u;
-
-      p1 = internal::abs(p);
-      ps = p/p1;
-      m_c = p1/u;
-      m_s = -internal::conj(ps) * (q/u);
-      if(r) *r = ps * u;
-    }
-  }
-}
-
-// specialization for reals
-template<typename Scalar>
-void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::false_type)
-{
-  if(q==Scalar(0))
-  {
-    m_c = p<Scalar(0) ? Scalar(-1) : Scalar(1);
-    m_s = Scalar(0);
-    if(r) *r = internal::abs(p);
-  }
-  else if(p==Scalar(0))
-  {
-    m_c = Scalar(0);
-    m_s = q<Scalar(0) ? Scalar(1) : Scalar(-1);
-    if(r) *r = internal::abs(q);
-  }
-  else if(internal::abs(p) > internal::abs(q))
-  {
-    Scalar t = q/p;
-    Scalar u = internal::sqrt(Scalar(1) + internal::abs2(t));
-    if(p<Scalar(0))
-      u = -u;
-    m_c = Scalar(1)/u;
-    m_s = -t * m_c;
-    if(r) *r = p * u;
-  }
-  else
-  {
-    Scalar t = p/q;
-    Scalar u = internal::sqrt(Scalar(1) + internal::abs2(t));
-    if(q<Scalar(0))
-      u = -u;
-    m_s = -Scalar(1)/u;
-    m_c = -t * m_s;
-    if(r) *r = q * u;
-  }
-
-}
-
-/****************************************************************************************
-*   Implementation of MatrixBase methods
-****************************************************************************************/
-
-/** \jacobi_module
-  * Applies the clock wise 2D rotation \a j to the set of 2D vectors of cordinates \a x and \a y:
-  * \f$ \left ( \begin{array}{cc} x \\ y \end{array} \right )  =  J \left ( \begin{array}{cc} x \\ y \end{array} \right ) \f$
-  *
-  * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
-  */
-namespace internal {
-template<typename VectorX, typename VectorY, typename OtherScalar>
-void apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, const JacobiRotation<OtherScalar>& j);
-}
-
-/** \jacobi_module
-  * Applies the rotation in the plane \a j to the rows \a p and \a q of \c *this, i.e., it computes B = J * B,
-  * with \f$ B = \left ( \begin{array}{cc} \text{*this.row}(p) \\ \text{*this.row}(q) \end{array} \right ) \f$.
-  *
-  * \sa class JacobiRotation, MatrixBase::applyOnTheRight(), internal::apply_rotation_in_the_plane()
-  */
-template<typename Derived>
-template<typename OtherScalar>
-inline void MatrixBase<Derived>::applyOnTheLeft(Index p, Index q, const JacobiRotation<OtherScalar>& j)
-{
-  RowXpr x(this->row(p));
-  RowXpr y(this->row(q));
-  internal::apply_rotation_in_the_plane(x, y, j);
-}
-
-/** \ingroup Jacobi_Module
-  * Applies the rotation in the plane \a j to the columns \a p and \a q of \c *this, i.e., it computes B = B * J
-  * with \f$ B = \left ( \begin{array}{cc} \text{*this.col}(p) & \text{*this.col}(q) \end{array} \right ) \f$.
-  *
-  * \sa class JacobiRotation, MatrixBase::applyOnTheLeft(), internal::apply_rotation_in_the_plane()
-  */
-template<typename Derived>
-template<typename OtherScalar>
-inline void MatrixBase<Derived>::applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j)
-{
-  ColXpr x(this->col(p));
-  ColXpr y(this->col(q));
-  internal::apply_rotation_in_the_plane(x, y, j.transpose());
-}
-
-namespace internal {
-template<typename VectorX, typename VectorY, typename OtherScalar>
-void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, const JacobiRotation<OtherScalar>& j)
-{
-  typedef typename VectorX::Index Index;
-  typedef typename VectorX::Scalar Scalar;
-  enum { PacketSize = packet_traits<Scalar>::size };
-  typedef typename packet_traits<Scalar>::type Packet;
-  eigen_assert(_x.size() == _y.size());
-  Index size = _x.size();
-  Index incrx = _x.innerStride();
-  Index incry = _y.innerStride();
-
-  Scalar* EIGEN_RESTRICT x = &_x.coeffRef(0);
-  Scalar* EIGEN_RESTRICT y = &_y.coeffRef(0);
-  
-  OtherScalar c = j.c();
-  OtherScalar s = j.s();
-  if (c==OtherScalar(1) && s==OtherScalar(0))
-    return;
-
-  /*** dynamic-size vectorized paths ***/
-
-  if(VectorX::SizeAtCompileTime == Dynamic &&
-    (VectorX::Flags & VectorY::Flags & PacketAccessBit) &&
-    ((incrx==1 && incry==1) || PacketSize == 1))
-  {
-    // both vectors are sequentially stored in memory => vectorization
-    enum { Peeling = 2 };
-
-    Index alignedStart = internal::first_aligned(y, size);
-    Index alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize;
-
-    const Packet pc = pset1<Packet>(c);
-    const Packet ps = pset1<Packet>(s);
-    conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex,false> pcj;
-
-    for(Index i=0; i<alignedStart; ++i)
-    {
-      Scalar xi = x[i];
-      Scalar yi = y[i];
-      x[i] =  c * xi + conj(s) * yi;
-      y[i] = -s * xi + conj(c) * yi;
-    }
-
-    Scalar* EIGEN_RESTRICT px = x + alignedStart;
-    Scalar* EIGEN_RESTRICT py = y + alignedStart;
-
-    if(internal::first_aligned(x, size)==alignedStart)
-    {
-      for(Index i=alignedStart; i<alignedEnd; i+=PacketSize)
-      {
-        Packet xi = pload<Packet>(px);
-        Packet yi = pload<Packet>(py);
-        pstore(px, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
-        pstore(py, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
-        px += PacketSize;
-        py += PacketSize;
-      }
-    }
-    else
-    {
-      Index peelingEnd = alignedStart + ((size-alignedStart)/(Peeling*PacketSize))*(Peeling*PacketSize);
-      for(Index i=alignedStart; i<peelingEnd; i+=Peeling*PacketSize)
-      {
-        Packet xi   = ploadu<Packet>(px);
-        Packet xi1  = ploadu<Packet>(px+PacketSize);
-        Packet yi   = pload <Packet>(py);
-        Packet yi1  = pload <Packet>(py+PacketSize);
-        pstoreu(px, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
-        pstoreu(px+PacketSize, padd(pmul(pc,xi1),pcj.pmul(ps,yi1)));
-        pstore (py, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
-        pstore (py+PacketSize, psub(pcj.pmul(pc,yi1),pmul(ps,xi1)));
-        px += Peeling*PacketSize;
-        py += Peeling*PacketSize;
-      }
-      if(alignedEnd!=peelingEnd)
-      {
-        Packet xi = ploadu<Packet>(x+peelingEnd);
-        Packet yi = pload <Packet>(y+peelingEnd);
-        pstoreu(x+peelingEnd, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
-        pstore (y+peelingEnd, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
-      }
-    }
-
-    for(Index i=alignedEnd; i<size; ++i)
-    {
-      Scalar xi = x[i];
-      Scalar yi = y[i];
-      x[i] =  c * xi + conj(s) * yi;
-      y[i] = -s * xi + conj(c) * yi;
-    }
-  }
-
-  /*** fixed-size vectorized path ***/
-  else if(VectorX::SizeAtCompileTime != Dynamic &&
-          (VectorX::Flags & VectorY::Flags & PacketAccessBit) &&
-          (VectorX::Flags & VectorY::Flags & AlignedBit))
-  {
-    const Packet pc = pset1<Packet>(c);
-    const Packet ps = pset1<Packet>(s);
-    conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex,false> pcj;
-    Scalar* EIGEN_RESTRICT px = x;
-    Scalar* EIGEN_RESTRICT py = y;
-    for(Index i=0; i<size; i+=PacketSize)
-    {
-      Packet xi = pload<Packet>(px);
-      Packet yi = pload<Packet>(py);
-      pstore(px, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
-      pstore(py, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
-      px += PacketSize;
-      py += PacketSize;
-    }
-  }
-
-  /*** non-vectorized path ***/
-  else
-  {
-    for(Index i=0; i<size; ++i)
-    {
-      Scalar xi = *x;
-      Scalar yi = *y;
-      *x =  c * xi + conj(s) * yi;
-      *y = -s * xi + conj(c) * yi;
-      x += incrx;
-      y += incry;
-    }
-  }
-}
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_JACOBI_H
diff --git a/resources/3rdparty/eigen/Eigen/src/PardisoSupport/PardisoSupport.h b/resources/3rdparty/eigen/Eigen/src/PardisoSupport/PardisoSupport.h
deleted file mode 100644
index e6defc8c3..000000000
--- a/resources/3rdparty/eigen/Eigen/src/PardisoSupport/PardisoSupport.h
+++ /dev/null
@@ -1,614 +0,0 @@
-/*
- Copyright (c) 2011, Intel Corporation. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without modification,
- are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice, this
-   list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright notice,
-   this list of conditions and the following disclaimer in the documentation
-   and/or other materials provided with the distribution.
- * Neither the name of Intel Corporation nor the names of its contributors may
-   be used to endorse or promote products derived from this software without
-   specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
- ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- ********************************************************************************
- *   Content : Eigen bindings to Intel(R) MKL PARDISO
- ********************************************************************************
-*/
-
-#ifndef EIGEN_PARDISOSUPPORT_H
-#define EIGEN_PARDISOSUPPORT_H
-
-namespace Eigen { 
-
-template<typename _MatrixType> class PardisoLU;
-template<typename _MatrixType, int Options=Upper> class PardisoLLT;
-template<typename _MatrixType, int Options=Upper> class PardisoLDLT;
-
-namespace internal
-{
-  template<typename Index>
-  struct pardiso_run_selector
-  {
-    static Index run( _MKL_DSS_HANDLE_t pt, Index maxfct, Index mnum, Index type, Index phase, Index n, void *a,
-                      Index *ia, Index *ja, Index *perm, Index nrhs, Index *iparm, Index msglvl, void *b, void *x)
-    {
-      Index error = 0;
-      ::pardiso(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error);
-      return error;
-    }
-  };
-  template<>
-  struct pardiso_run_selector<long long int>
-  {
-    typedef long long int Index;
-    static Index run( _MKL_DSS_HANDLE_t pt, Index maxfct, Index mnum, Index type, Index phase, Index n, void *a,
-                      Index *ia, Index *ja, Index *perm, Index nrhs, Index *iparm, Index msglvl, void *b, void *x)
-    {
-      Index error = 0;
-      ::pardiso_64(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error);
-      return error;
-    }
-  };
-
-  template<class Pardiso> struct pardiso_traits;
-
-  template<typename _MatrixType>
-  struct pardiso_traits< PardisoLU<_MatrixType> >
-  {
-    typedef _MatrixType MatrixType;
-    typedef typename _MatrixType::Scalar Scalar;
-    typedef typename _MatrixType::RealScalar RealScalar;
-    typedef typename _MatrixType::Index Index;
-  };
-
-  template<typename _MatrixType, int Options>
-  struct pardiso_traits< PardisoLLT<_MatrixType, Options> >
-  {
-    typedef _MatrixType MatrixType;
-    typedef typename _MatrixType::Scalar Scalar;
-    typedef typename _MatrixType::RealScalar RealScalar;
-    typedef typename _MatrixType::Index Index;
-  };
-
-  template<typename _MatrixType, int Options>
-  struct pardiso_traits< PardisoLDLT<_MatrixType, Options> >
-  {
-    typedef _MatrixType MatrixType;
-    typedef typename _MatrixType::Scalar Scalar;
-    typedef typename _MatrixType::RealScalar RealScalar;
-    typedef typename _MatrixType::Index Index;    
-  };
-
-}
-
-template<class Derived>
-class PardisoImpl
-{
-    typedef internal::pardiso_traits<Derived> Traits;
-  public:
-    typedef typename Traits::MatrixType MatrixType;
-    typedef typename Traits::Scalar Scalar;
-    typedef typename Traits::RealScalar RealScalar;
-    typedef typename Traits::Index Index;
-    typedef SparseMatrix<Scalar,RowMajor,Index> SparseMatrixType;
-    typedef Matrix<Scalar,Dynamic,1> VectorType;
-    typedef Matrix<Index, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
-    typedef Matrix<Index, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
-    enum {
-      ScalarIsComplex = NumTraits<Scalar>::IsComplex
-    };
-
-    PardisoImpl()
-    {
-      eigen_assert((sizeof(Index) >= sizeof(_INTEGER_t) && sizeof(Index) <= 8) && "Non-supported index type");
-      m_iparm.setZero();
-      m_msglvl = 0; // No output
-      m_initialized = false;
-    }
-
-    ~PardisoImpl()
-    {
-      pardisoRelease();
-    }
-
-    inline Index cols() const { return m_size; }
-    inline Index rows() const { return m_size; }
-  
-    /** \brief Reports whether previous computation was successful.
-      *
-      * \returns \c Success if computation was succesful,
-      *          \c NumericalIssue if the matrix appears to be negative.
-      */
-    ComputationInfo info() const
-    {
-      eigen_assert(m_initialized && "Decomposition is not initialized.");
-      return m_info;
-    }
-
-    /** \warning for advanced usage only.
-      * \returns a reference to the parameter array controlling PARDISO.
-      * See the PARDISO manual to know how to use it. */
-    Array<Index,64,1>& pardisoParameterArray()
-    {
-      return m_iparm;
-    }
-    
-    /** Performs a symbolic decomposition on the sparcity of \a matrix.
-      *
-      * This function is particularly useful when solving for several problems having the same structure.
-      * 
-      * \sa factorize()
-      */
-    Derived& analyzePattern(const MatrixType& matrix);
-    
-    /** Performs a numeric decomposition of \a matrix
-      *
-      * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
-      *
-      * \sa analyzePattern()
-      */
-    Derived& factorize(const MatrixType& matrix);
-
-    Derived& compute(const MatrixType& matrix);
-    
-    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
-      *
-      * \sa compute()
-      */
-    template<typename Rhs>
-    inline const internal::solve_retval<PardisoImpl, Rhs>
-    solve(const MatrixBase<Rhs>& b) const
-    {
-      eigen_assert(m_initialized && "Pardiso solver is not initialized.");
-      eigen_assert(rows()==b.rows()
-                && "PardisoImpl::solve(): invalid number of rows of the right hand side matrix b");
-      return internal::solve_retval<PardisoImpl, Rhs>(*this, b.derived());
-    }
-
-    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
-      *
-      * \sa compute()
-      */
-    template<typename Rhs>
-    inline const internal::sparse_solve_retval<PardisoImpl, Rhs>
-    solve(const SparseMatrixBase<Rhs>& b) const
-    {
-      eigen_assert(m_initialized && "Pardiso solver is not initialized.");
-      eigen_assert(rows()==b.rows()
-                && "PardisoImpl::solve(): invalid number of rows of the right hand side matrix b");
-      return internal::sparse_solve_retval<PardisoImpl, Rhs>(*this, b.derived());
-    }
-
-    Derived& derived()
-    {
-      return *static_cast<Derived*>(this);
-    }
-    const Derived& derived() const
-    {
-      return *static_cast<const Derived*>(this);
-    }
-
-    template<typename BDerived, typename XDerived>
-    bool _solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>& x) const;
-
-    /** \internal */
-    template<typename Rhs, typename DestScalar, int DestOptions, typename DestIndex>
-    void _solve_sparse(const Rhs& b, SparseMatrix<DestScalar,DestOptions,DestIndex> &dest) const
-    {
-      eigen_assert(m_size==b.rows());
-
-      // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
-      static const int NbColsAtOnce = 4;
-      int rhsCols = b.cols();
-      int size = b.rows();
-      // Pardiso cannot solve in-place,
-      // so we need two temporaries
-      Eigen::Matrix<DestScalar,Dynamic,Dynamic,ColMajor> tmp_rhs(size,rhsCols);
-      Eigen::Matrix<DestScalar,Dynamic,Dynamic,ColMajor> tmp_res(size,rhsCols);
-      for(int k=0; k<rhsCols; k+=NbColsAtOnce)
-      {
-        int actualCols = std::min<int>(rhsCols-k, NbColsAtOnce);
-        tmp_rhs.leftCols(actualCols) = b.middleCols(k,actualCols);
-        tmp_res.leftCols(actualCols) = derived().solve(tmp_rhs.leftCols(actualCols));
-        dest.middleCols(k,actualCols) = tmp_res.leftCols(actualCols).sparseView();
-      }
-    }
-
-  protected:
-    void pardisoRelease()
-    {
-      if(m_initialized) // Factorization ran at least once
-      {
-        internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, -1, m_size, 0, 0, 0, m_perm.data(), 0,
-                                                   m_iparm.data(), m_msglvl, 0, 0);
-      }
-    }
-
-    void pardisoInit(int type)
-    {
-      m_type = type;
-      bool symmetric = abs(m_type) < 10;
-      m_iparm[0] = 1;   // No solver default
-      m_iparm[1] = 3;   // use Metis for the ordering
-      m_iparm[2] = 1;   // Numbers of processors, value of OMP_NUM_THREADS
-      m_iparm[3] = 0;   // No iterative-direct algorithm
-      m_iparm[4] = 0;   // No user fill-in reducing permutation
-      m_iparm[5] = 0;   // Write solution into x
-      m_iparm[6] = 0;   // Not in use
-      m_iparm[7] = 2;   // Max numbers of iterative refinement steps
-      m_iparm[8] = 0;   // Not in use
-      m_iparm[9] = 13;  // Perturb the pivot elements with 1E-13
-      m_iparm[10] = symmetric ? 0 : 1; // Use nonsymmetric permutation and scaling MPS
-      m_iparm[11] = 0;  // Not in use
-      m_iparm[12] = symmetric ? 0 : 1;  // Maximum weighted matching algorithm is switched-off (default for symmetric).
-                                        // Try m_iparm[12] = 1 in case of inappropriate accuracy
-      m_iparm[13] = 0;  // Output: Number of perturbed pivots
-      m_iparm[14] = 0;  // Not in use
-      m_iparm[15] = 0;  // Not in use
-      m_iparm[16] = 0;  // Not in use
-      m_iparm[17] = -1; // Output: Number of nonzeros in the factor LU
-      m_iparm[18] = -1; // Output: Mflops for LU factorization
-      m_iparm[19] = 0;  // Output: Numbers of CG Iterations
-      
-      m_iparm[20] = 0;  // 1x1 pivoting
-      m_iparm[26] = 0;  // No matrix checker
-      m_iparm[27] = (sizeof(RealScalar) == 4) ? 1 : 0;
-      m_iparm[34] = 1;  // C indexing
-      m_iparm[59] = 1;  // Automatic switch between In-Core and Out-of-Core modes
-    }
-
-  protected:
-    // cached data to reduce reallocation, etc.
-    
-    void manageErrorCode(Index error)
-    {
-      switch(error)
-      {
-        case 0:
-          m_info = Success;
-          break;
-        case -4:
-        case -7:
-          m_info = NumericalIssue;
-          break;
-        default:
-          m_info = InvalidInput;
-      }
-    }
-
-    mutable SparseMatrixType m_matrix;
-    ComputationInfo m_info;
-    bool m_initialized, m_analysisIsOk, m_factorizationIsOk;
-    Index m_type, m_msglvl;
-    mutable void *m_pt[64];
-    mutable Array<Index,64,1> m_iparm;
-    mutable IntColVectorType m_perm;
-    Index m_size;
-    
-  private:
-    PardisoImpl(PardisoImpl &) {}
-};
-
-template<class Derived>
-Derived& PardisoImpl<Derived>::compute(const MatrixType& a)
-{
-  m_size = a.rows();
-  eigen_assert(a.rows() == a.cols());
-
-  pardisoRelease();
-  memset(m_pt, 0, sizeof(m_pt));
-  m_perm.setZero(m_size);
-  derived().getMatrix(a);
-  
-  Index error;
-  error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 12, m_size,
-                                                     m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
-                                                     m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
-
-  manageErrorCode(error);
-  m_analysisIsOk = true;
-  m_factorizationIsOk = true;
-  m_initialized = true;
-  return derived();
-}
-
-template<class Derived>
-Derived& PardisoImpl<Derived>::analyzePattern(const MatrixType& a)
-{
-  m_size = a.rows();
-  eigen_assert(m_size == a.cols());
-
-  pardisoRelease();
-  memset(m_pt, 0, sizeof(m_pt));
-  m_perm.setZero(m_size);
-  derived().getMatrix(a);
-  
-  Index error;
-  error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 11, m_size,
-                                                     m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
-                                                     m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
-  
-  manageErrorCode(error);
-  m_analysisIsOk = true;
-  m_factorizationIsOk = false;
-  m_initialized = true;
-  return derived();
-}
-
-template<class Derived>
-Derived& PardisoImpl<Derived>::factorize(const MatrixType& a)
-{
-  eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
-  eigen_assert(m_size == a.rows() && m_size == a.cols());
-  
-  derived().getMatrix(a);
-
-  Index error;  
-  error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 22, m_size,
-                                                     m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
-                                                     m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
-  
-  manageErrorCode(error);
-  m_factorizationIsOk = true;
-  return derived();
-}
-
-template<class Base>
-template<typename BDerived,typename XDerived>
-bool PardisoImpl<Base>::_solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>& x) const
-{
-  if(m_iparm[0] == 0) // Factorization was not computed
-    return false;
-
-  //Index n = m_matrix.rows();
-  Index nrhs = Index(b.cols());
-  eigen_assert(m_size==b.rows());
-  eigen_assert(((MatrixBase<BDerived>::Flags & RowMajorBit) == 0 || nrhs == 1) && "Row-major right hand sides are not supported");
-  eigen_assert(((MatrixBase<XDerived>::Flags & RowMajorBit) == 0 || nrhs == 1) && "Row-major matrices of unknowns are not supported");
-  eigen_assert(((nrhs == 1) || b.outerStride() == b.rows()));
-
-
-//  switch (transposed) {
-//    case SvNoTrans    : m_iparm[11] = 0 ; break;
-//    case SvTranspose  : m_iparm[11] = 2 ; break;
-//    case SvAdjoint    : m_iparm[11] = 1 ; break;
-//    default:
-//      //std::cerr << "Eigen: transposition  option \"" << transposed << "\" not supported by the PARDISO backend\n";
-//      m_iparm[11] = 0;
-//  }
-
-  Scalar* rhs_ptr = const_cast<Scalar*>(b.derived().data());
-  Matrix<Scalar,Dynamic,Dynamic,ColMajor> tmp;
-  
-  // Pardiso cannot solve in-place
-  if(rhs_ptr == x.derived().data())
-  {
-    tmp = b;
-    rhs_ptr = tmp.data();
-  }
-  
-  Index error;
-  error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 33, m_size,
-                                                     m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
-                                                     m_perm.data(), nrhs, m_iparm.data(), m_msglvl,
-                                                     rhs_ptr, x.derived().data());
-
-  return error==0;
-}
-
-
-/** \ingroup PardisoSupport_Module
-  * \class PardisoLU
-  * \brief A sparse direct LU factorization and solver based on the PARDISO library
-  *
-  * This class allows to solve for A.X = B sparse linear problems via a direct LU factorization
-  * using the Intel MKL PARDISO library. The sparse matrix A must be squared and invertible.
-  * The vectors or matrices X and B can be either dense or sparse.
-  *
-  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
-  *
-  * \sa \ref TutorialSparseDirectSolvers
-  */
-template<typename MatrixType>
-class PardisoLU : public PardisoImpl< PardisoLU<MatrixType> >
-{
-  protected:
-    typedef PardisoImpl< PardisoLU<MatrixType> > Base;
-    typedef typename Base::Scalar Scalar;
-    typedef typename Base::RealScalar RealScalar;
-    using Base::pardisoInit;
-    using Base::m_matrix;
-    friend class PardisoImpl< PardisoLU<MatrixType> >;
-
-  public:
-
-    using Base::compute;
-    using Base::solve;
-
-    PardisoLU()
-      : Base()
-    {
-      pardisoInit(Base::ScalarIsComplex ? 13 : 11);
-    }
-
-    PardisoLU(const MatrixType& matrix)
-      : Base()
-    {
-      pardisoInit(Base::ScalarIsComplex ? 13 : 11);
-      compute(matrix);
-    }
-  protected:
-    void getMatrix(const MatrixType& matrix)
-    {
-      m_matrix = matrix;
-    }
-    
-  private:
-    PardisoLU(PardisoLU& ) {}
-};
-
-/** \ingroup PardisoSupport_Module
-  * \class PardisoLLT
-  * \brief A sparse direct Cholesky (LLT) factorization and solver based on the PARDISO library
-  *
-  * This class allows to solve for A.X = B sparse linear problems via a LL^T Cholesky factorization
-  * using the Intel MKL PARDISO library. The sparse matrix A must be selfajoint and positive definite.
-  * The vectors or matrices X and B can be either dense or sparse.
-  *
-  * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
-  * \tparam UpLo can be any bitwise combination of Upper, Lower. The default is Upper, meaning only the upper triangular part has to be used.
-  *         Upper|Lower can be used to tell both triangular parts can be used as input.
-  *
-  * \sa \ref TutorialSparseDirectSolvers
-  */
-template<typename MatrixType, int _UpLo>
-class PardisoLLT : public PardisoImpl< PardisoLLT<MatrixType,_UpLo> >
-{
-  protected:
-    typedef PardisoImpl< PardisoLLT<MatrixType,_UpLo> > Base;
-    typedef typename Base::Scalar Scalar;
-    typedef typename Base::Index Index;
-    typedef typename Base::RealScalar RealScalar;
-    using Base::pardisoInit;
-    using Base::m_matrix;
-    friend class PardisoImpl< PardisoLLT<MatrixType,_UpLo> >;
-
-  public:
-
-    enum { UpLo = _UpLo };
-    using Base::compute;
-    using Base::solve;
-
-    PardisoLLT()
-      : Base()
-    {
-      pardisoInit(Base::ScalarIsComplex ? 4 : 2);
-    }
-
-    PardisoLLT(const MatrixType& matrix)
-      : Base()
-    {
-      pardisoInit(Base::ScalarIsComplex ? 4 : 2);
-      compute(matrix);
-    }
-    
-  protected:
-    
-    void getMatrix(const MatrixType& matrix)
-    {
-      // PARDISO supports only upper, row-major matrices
-      PermutationMatrix<Dynamic,Dynamic,Index> p_null;
-      m_matrix.resize(matrix.rows(), matrix.cols());
-      m_matrix.template selfadjointView<Upper>() = matrix.template selfadjointView<UpLo>().twistedBy(p_null);
-    }
-    
-  private:
-    PardisoLLT(PardisoLLT& ) {}
-};
-
-/** \ingroup PardisoSupport_Module
-  * \class PardisoLDLT
-  * \brief A sparse direct Cholesky (LDLT) factorization and solver based on the PARDISO library
-  *
-  * This class allows to solve for A.X = B sparse linear problems via a LDL^T Cholesky factorization
-  * using the Intel MKL PARDISO library. The sparse matrix A is assumed to be selfajoint and positive definite.
-  * For complex matrices, A can also be symmetric only, see the \a Options template parameter.
-  * The vectors or matrices X and B can be either dense or sparse.
-  *
-  * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
-  * \tparam Options can be any bitwise combination of Upper, Lower, and Symmetric. The default is Upper, meaning only the upper triangular part has to be used.
-  *         Symmetric can be used for symmetric, non-selfadjoint complex matrices, the default being to assume a selfadjoint matrix.
-  *         Upper|Lower can be used to tell both triangular parts can be used as input.
-  *
-  * \sa \ref TutorialSparseDirectSolvers
-  */
-template<typename MatrixType, int Options>
-class PardisoLDLT : public PardisoImpl< PardisoLDLT<MatrixType,Options> >
-{
-  protected:
-    typedef PardisoImpl< PardisoLDLT<MatrixType,Options> > Base;
-    typedef typename Base::Scalar Scalar;
-    typedef typename Base::Index Index;
-    typedef typename Base::RealScalar RealScalar;
-    using Base::pardisoInit;
-    using Base::m_matrix;
-    friend class PardisoImpl< PardisoLDLT<MatrixType,Options> >;
-
-  public:
-
-    using Base::compute;
-    using Base::solve;
-    enum { UpLo = Options&(Upper|Lower) };
-
-    PardisoLDLT()
-      : Base()
-    {
-      pardisoInit(Base::ScalarIsComplex ? ( bool(Options&Symmetric) ? 6 : -4 ) : -2);
-    }
-
-    PardisoLDLT(const MatrixType& matrix)
-      : Base()
-    {
-      pardisoInit(Base::ScalarIsComplex ? ( bool(Options&Symmetric) ? 6 : -4 ) : -2);
-      compute(matrix);
-    }
-    
-    void getMatrix(const MatrixType& matrix)
-    {
-      // PARDISO supports only upper, row-major matrices
-      PermutationMatrix<Dynamic,Dynamic,Index> p_null;
-      m_matrix.resize(matrix.rows(), matrix.cols());
-      m_matrix.template selfadjointView<Upper>() = matrix.template selfadjointView<UpLo>().twistedBy(p_null);
-    }
-    
-  private:
-    PardisoLDLT(PardisoLDLT& ) {}
-};
-
-namespace internal {
-  
-template<typename _Derived, typename Rhs>
-struct solve_retval<PardisoImpl<_Derived>, Rhs>
-  : solve_retval_base<PardisoImpl<_Derived>, Rhs>
-{
-  typedef PardisoImpl<_Derived> Dec;
-  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
-
-  template<typename Dest> void evalTo(Dest& dst) const
-  {
-    dec()._solve(rhs(),dst);
-  }
-};
-
-template<typename Derived, typename Rhs>
-struct sparse_solve_retval<PardisoImpl<Derived>, Rhs>
-  : sparse_solve_retval_base<PardisoImpl<Derived>, Rhs>
-{
-  typedef PardisoImpl<Derived> Dec;
-  EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
-
-  template<typename Dest> void evalTo(Dest& dst) const
-  {
-    dec().derived()._solve_sparse(rhs(),dst);
-  }
-};
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_PARDISOSUPPORT_H
diff --git a/resources/3rdparty/eigen/Eigen/src/QR/HouseholderQR.h b/resources/3rdparty/eigen/Eigen/src/QR/HouseholderQR.h
deleted file mode 100644
index c45d697f7..000000000
--- a/resources/3rdparty/eigen/Eigen/src/QR/HouseholderQR.h
+++ /dev/null
@@ -1,351 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2010 Vincent Lejeune
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_QR_H
-#define EIGEN_QR_H
-
-namespace Eigen { 
-
-/** \ingroup QR_Module
-  *
-  *
-  * \class HouseholderQR
-  *
-  * \brief Householder QR decomposition of a matrix
-  *
-  * \param MatrixType the type of the matrix of which we are computing the QR decomposition
-  *
-  * This class performs a QR decomposition of a matrix \b A into matrices \b Q and \b R
-  * such that 
-  * \f[
-  *  \mathbf{A} = \mathbf{Q} \, \mathbf{R}
-  * \f]
-  * by using Householder transformations. Here, \b Q a unitary matrix and \b R an upper triangular matrix.
-  * The result is stored in a compact way compatible with LAPACK.
-  *
-  * Note that no pivoting is performed. This is \b not a rank-revealing decomposition.
-  * If you want that feature, use FullPivHouseholderQR or ColPivHouseholderQR instead.
-  *
-  * This Householder QR decomposition is faster, but less numerically stable and less feature-full than
-  * FullPivHouseholderQR or ColPivHouseholderQR.
-  *
-  * \sa MatrixBase::householderQr()
-  */
-template<typename _MatrixType> class HouseholderQR
-{
-  public:
-
-    typedef _MatrixType MatrixType;
-    enum {
-      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
-      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
-      Options = MatrixType::Options,
-      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
-      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
-    };
-    typedef typename MatrixType::Scalar Scalar;
-    typedef typename MatrixType::RealScalar RealScalar;
-    typedef typename MatrixType::Index Index;
-    typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, (MatrixType::Flags&RowMajorBit) ? RowMajor : ColMajor, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;
-    typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
-    typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
-    typedef typename HouseholderSequence<MatrixType,HCoeffsType>::ConjugateReturnType HouseholderSequenceType;
-
-    /**
-    * \brief Default Constructor.
-    *
-    * The default constructor is useful in cases in which the user intends to
-    * perform decompositions via HouseholderQR::compute(const MatrixType&).
-    */
-    HouseholderQR() : m_qr(), m_hCoeffs(), m_temp(), m_isInitialized(false) {}
-
-    /** \brief Default Constructor with memory preallocation
-      *
-      * Like the default constructor but with preallocation of the internal data
-      * according to the specified problem \a size.
-      * \sa HouseholderQR()
-      */
-    HouseholderQR(Index rows, Index cols)
-      : m_qr(rows, cols),
-        m_hCoeffs((std::min)(rows,cols)),
-        m_temp(cols),
-        m_isInitialized(false) {}
-
-    HouseholderQR(const MatrixType& matrix)
-      : m_qr(matrix.rows(), matrix.cols()),
-        m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),
-        m_temp(matrix.cols()),
-        m_isInitialized(false)
-    {
-      compute(matrix);
-    }
-
-    /** This method finds a solution x to the equation Ax=b, where A is the matrix of which
-      * *this is the QR decomposition, if any exists.
-      *
-      * \param b the right-hand-side of the equation to solve.
-      *
-      * \returns a solution.
-      *
-      * \note The case where b is a matrix is not yet implemented. Also, this
-      *       code is space inefficient.
-      *
-      * \note_about_checking_solutions
-      *
-      * \note_about_arbitrary_choice_of_solution
-      *
-      * Example: \include HouseholderQR_solve.cpp
-      * Output: \verbinclude HouseholderQR_solve.out
-      */
-    template<typename Rhs>
-    inline const internal::solve_retval<HouseholderQR, Rhs>
-    solve(const MatrixBase<Rhs>& b) const
-    {
-      eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
-      return internal::solve_retval<HouseholderQR, Rhs>(*this, b.derived());
-    }
-
-    /** This method returns an expression of the unitary matrix Q as a sequence of Householder transformations.
-      *
-      * The returned expression can directly be used to perform matrix products. It can also be assigned to a dense Matrix object.
-      * Here is an example showing how to recover the full or thin matrix Q, as well as how to perform matrix products using operator*:
-      *
-      * Example: \include HouseholderQR_householderQ.cpp
-      * Output: \verbinclude HouseholderQR_householderQ.out
-      */
-    HouseholderSequenceType householderQ() const
-    {
-      eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
-      return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate());
-    }
-
-    /** \returns a reference to the matrix where the Householder QR decomposition is stored
-      * in a LAPACK-compatible way.
-      */
-    const MatrixType& matrixQR() const
-    {
-        eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
-        return m_qr;
-    }
-
-    HouseholderQR& compute(const MatrixType& matrix);
-
-    /** \returns the absolute value of the determinant of the matrix of which
-      * *this is the QR decomposition. It has only linear complexity
-      * (that is, O(n) where n is the dimension of the square matrix)
-      * as the QR decomposition has already been computed.
-      *
-      * \note This is only for square matrices.
-      *
-      * \warning a determinant can be very big or small, so for matrices
-      * of large enough dimension, there is a risk of overflow/underflow.
-      * One way to work around that is to use logAbsDeterminant() instead.
-      *
-      * \sa logAbsDeterminant(), MatrixBase::determinant()
-      */
-    typename MatrixType::RealScalar absDeterminant() const;
-
-    /** \returns the natural log of the absolute value of the determinant of the matrix of which
-      * *this is the QR decomposition. It has only linear complexity
-      * (that is, O(n) where n is the dimension of the square matrix)
-      * as the QR decomposition has already been computed.
-      *
-      * \note This is only for square matrices.
-      *
-      * \note This method is useful to work around the risk of overflow/underflow that's inherent
-      * to determinant computation.
-      *
-      * \sa absDeterminant(), MatrixBase::determinant()
-      */
-    typename MatrixType::RealScalar logAbsDeterminant() const;
-
-    inline Index rows() const { return m_qr.rows(); }
-    inline Index cols() const { return m_qr.cols(); }
-    const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
-
-  protected:
-    MatrixType m_qr;
-    HCoeffsType m_hCoeffs;
-    RowVectorType m_temp;
-    bool m_isInitialized;
-};
-
-template<typename MatrixType>
-typename MatrixType::RealScalar HouseholderQR<MatrixType>::absDeterminant() const
-{
-  eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
-  eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
-  return internal::abs(m_qr.diagonal().prod());
-}
-
-template<typename MatrixType>
-typename MatrixType::RealScalar HouseholderQR<MatrixType>::logAbsDeterminant() const
-{
-  eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
-  eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
-  return m_qr.diagonal().cwiseAbs().array().log().sum();
-}
-
-namespace internal {
-
-/** \internal */
-template<typename MatrixQR, typename HCoeffs>
-void householder_qr_inplace_unblocked(MatrixQR& mat, HCoeffs& hCoeffs, typename MatrixQR::Scalar* tempData = 0)
-{
-  typedef typename MatrixQR::Index Index;
-  typedef typename MatrixQR::Scalar Scalar;
-  typedef typename MatrixQR::RealScalar RealScalar;
-  Index rows = mat.rows();
-  Index cols = mat.cols();
-  Index size = (std::min)(rows,cols);
-
-  eigen_assert(hCoeffs.size() == size);
-
-  typedef Matrix<Scalar,MatrixQR::ColsAtCompileTime,1> TempType;
-  TempType tempVector;
-  if(tempData==0)
-  {
-    tempVector.resize(cols);
-    tempData = tempVector.data();
-  }
-
-  for(Index k = 0; k < size; ++k)
-  {
-    Index remainingRows = rows - k;
-    Index remainingCols = cols - k - 1;
-
-    RealScalar beta;
-    mat.col(k).tail(remainingRows).makeHouseholderInPlace(hCoeffs.coeffRef(k), beta);
-    mat.coeffRef(k,k) = beta;
-
-    // apply H to remaining part of m_qr from the left
-    mat.bottomRightCorner(remainingRows, remainingCols)
-        .applyHouseholderOnTheLeft(mat.col(k).tail(remainingRows-1), hCoeffs.coeffRef(k), tempData+k+1);
-  }
-}
-
-/** \internal */
-template<typename MatrixQR, typename HCoeffs>
-void householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs,
-                                       typename MatrixQR::Index maxBlockSize=32,
-                                       typename MatrixQR::Scalar* tempData = 0)
-{
-  typedef typename MatrixQR::Index Index;
-  typedef typename MatrixQR::Scalar Scalar;
-  typedef typename MatrixQR::RealScalar RealScalar;
-  typedef Block<MatrixQR,Dynamic,Dynamic> BlockType;
-
-  Index rows = mat.rows();
-  Index cols = mat.cols();
-  Index size = (std::min)(rows, cols);
-
-  typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixQR::MaxColsAtCompileTime,1> TempType;
-  TempType tempVector;
-  if(tempData==0)
-  {
-    tempVector.resize(cols);
-    tempData = tempVector.data();
-  }
-
-  Index blockSize = (std::min)(maxBlockSize,size);
-
-  Index k = 0;
-  for (k = 0; k < size; k += blockSize)
-  {
-    Index bs = (std::min)(size-k,blockSize);  // actual size of the block
-    Index tcols = cols - k - bs;            // trailing columns
-    Index brows = rows-k;                   // rows of the block
-
-    // partition the matrix:
-    //        A00 | A01 | A02
-    // mat  = A10 | A11 | A12
-    //        A20 | A21 | A22
-    // and performs the qr dec of [A11^T A12^T]^T
-    // and update [A21^T A22^T]^T using level 3 operations.
-    // Finally, the algorithm continue on A22
-
-    BlockType A11_21 = mat.block(k,k,brows,bs);
-    Block<HCoeffs,Dynamic,1> hCoeffsSegment = hCoeffs.segment(k,bs);
-
-    householder_qr_inplace_unblocked(A11_21, hCoeffsSegment, tempData);
-
-    if(tcols)
-    {
-      BlockType A21_22 = mat.block(k,k+bs,brows,tcols);
-      apply_block_householder_on_the_left(A21_22,A11_21,hCoeffsSegment.adjoint());
-    }
-  }
-}
-
-template<typename _MatrixType, typename Rhs>
-struct solve_retval<HouseholderQR<_MatrixType>, Rhs>
-  : solve_retval_base<HouseholderQR<_MatrixType>, Rhs>
-{
-  EIGEN_MAKE_SOLVE_HELPERS(HouseholderQR<_MatrixType>,Rhs)
-
-  template<typename Dest> void evalTo(Dest& dst) const
-  {
-    const Index rows = dec().rows(), cols = dec().cols();
-    const Index rank = (std::min)(rows, cols);
-    eigen_assert(rhs().rows() == rows);
-
-    typename Rhs::PlainObject c(rhs());
-
-    // Note that the matrix Q = H_0^* H_1^*... so its inverse is Q^* = (H_0 H_1 ...)^T
-    c.applyOnTheLeft(householderSequence(
-      dec().matrixQR().leftCols(rank),
-      dec().hCoeffs().head(rank)).transpose()
-    );
-
-    dec().matrixQR()
-       .topLeftCorner(rank, rank)
-       .template triangularView<Upper>()
-       .solveInPlace(c.topRows(rank));
-
-    dst.topRows(rank) = c.topRows(rank);
-    dst.bottomRows(cols-rank).setZero();
-  }
-};
-
-} // end namespace internal
-
-template<typename MatrixType>
-HouseholderQR<MatrixType>& HouseholderQR<MatrixType>::compute(const MatrixType& matrix)
-{
-  Index rows = matrix.rows();
-  Index cols = matrix.cols();
-  Index size = (std::min)(rows,cols);
-
-  m_qr = matrix;
-  m_hCoeffs.resize(size);
-
-  m_temp.resize(cols);
-
-  internal::householder_qr_inplace_blocked(m_qr, m_hCoeffs, 48, m_temp.data());
-
-  m_isInitialized = true;
-  return *this;
-}
-
-/** \return the Householder QR decomposition of \c *this.
-  *
-  * \sa class HouseholderQR
-  */
-template<typename Derived>
-const HouseholderQR<typename MatrixBase<Derived>::PlainObject>
-MatrixBase<Derived>::householderQr() const
-{
-  return HouseholderQR<PlainObject>(eval());
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_QR_H
diff --git a/resources/3rdparty/eigen/Eigen/src/SVD/JacobiSVD.h b/resources/3rdparty/eigen/Eigen/src/SVD/JacobiSVD.h
deleted file mode 100644
index 4d525beb5..000000000
--- a/resources/3rdparty/eigen/Eigen/src/SVD/JacobiSVD.h
+++ /dev/null
@@ -1,869 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_JACOBISVD_H
-#define EIGEN_JACOBISVD_H
-
-namespace Eigen { 
-
-namespace internal {
-// forward declaration (needed by ICC)
-// the empty body is required by MSVC
-template<typename MatrixType, int QRPreconditioner,
-         bool IsComplex = NumTraits<typename MatrixType::Scalar>::IsComplex>
-struct svd_precondition_2x2_block_to_be_real {};
-
-/*** QR preconditioners (R-SVD)
- ***
- *** Their role is to reduce the problem of computing the SVD to the case of a square matrix.
- *** This approach, known as R-SVD, is an optimization for rectangular-enough matrices, and is a requirement for
- *** JacobiSVD which by itself is only able to work on square matrices.
- ***/
-
-enum { PreconditionIfMoreColsThanRows, PreconditionIfMoreRowsThanCols };
-
-template<typename MatrixType, int QRPreconditioner, int Case>
-struct qr_preconditioner_should_do_anything
-{
-  enum { a = MatrixType::RowsAtCompileTime != Dynamic &&
-             MatrixType::ColsAtCompileTime != Dynamic &&
-             MatrixType::ColsAtCompileTime <= MatrixType::RowsAtCompileTime,
-         b = MatrixType::RowsAtCompileTime != Dynamic &&
-             MatrixType::ColsAtCompileTime != Dynamic &&
-             MatrixType::RowsAtCompileTime <= MatrixType::ColsAtCompileTime,
-         ret = !( (QRPreconditioner == NoQRPreconditioner) ||
-                  (Case == PreconditionIfMoreColsThanRows && bool(a)) ||
-                  (Case == PreconditionIfMoreRowsThanCols && bool(b)) )
-  };
-};
-
-template<typename MatrixType, int QRPreconditioner, int Case,
-         bool DoAnything = qr_preconditioner_should_do_anything<MatrixType, QRPreconditioner, Case>::ret
-> struct qr_preconditioner_impl {};
-
-template<typename MatrixType, int QRPreconditioner, int Case>
-class qr_preconditioner_impl<MatrixType, QRPreconditioner, Case, false>
-{
-public:
-  typedef typename MatrixType::Index Index;
-  void allocate(const JacobiSVD<MatrixType, QRPreconditioner>&) {}
-  bool run(JacobiSVD<MatrixType, QRPreconditioner>&, const MatrixType&)
-  {
-    return false;
-  }
-};
-
-/*** preconditioner using FullPivHouseholderQR ***/
-
-template<typename MatrixType>
-class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
-{
-public:
-  typedef typename MatrixType::Index Index;
-  typedef typename MatrixType::Scalar Scalar;
-  enum
-  {
-    RowsAtCompileTime = MatrixType::RowsAtCompileTime,
-    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime
-  };
-  typedef Matrix<Scalar, 1, RowsAtCompileTime, RowMajor, 1, MaxRowsAtCompileTime> WorkspaceType;
-
-  void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
-  {
-    if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
-    {
-      m_qr = FullPivHouseholderQR<MatrixType>(svd.rows(), svd.cols());
-    }
-    if (svd.m_computeFullU) m_workspace.resize(svd.rows());
-  }
-
-  bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
-  {
-    if(matrix.rows() > matrix.cols())
-    {
-      m_qr.compute(matrix);
-      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
-      if(svd.m_computeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace);
-      if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
-      return true;
-    }
-    return false;
-  }
-private:
-  FullPivHouseholderQR<MatrixType> m_qr;
-  WorkspaceType m_workspace;
-};
-
-template<typename MatrixType>
-class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
-{
-public:
-  typedef typename MatrixType::Index Index;
-  typedef typename MatrixType::Scalar Scalar;
-  enum
-  {
-    RowsAtCompileTime = MatrixType::RowsAtCompileTime,
-    ColsAtCompileTime = MatrixType::ColsAtCompileTime,
-    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
-    MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
-    Options = MatrixType::Options
-  };
-  typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
-          TransposeTypeWithSameStorageOrder;
-
-  void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
-  {
-    if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
-    {
-      m_qr = FullPivHouseholderQR<TransposeTypeWithSameStorageOrder>(svd.cols(), svd.rows());
-    }
-    m_adjoint.resize(svd.cols(), svd.rows());
-    if (svd.m_computeFullV) m_workspace.resize(svd.cols());
-  }
-
-  bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
-  {
-    if(matrix.cols() > matrix.rows())
-    {
-      m_adjoint = matrix.adjoint();
-      m_qr.compute(m_adjoint);
-      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
-      if(svd.m_computeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace);
-      if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
-      return true;
-    }
-    else return false;
-  }
-private:
-  FullPivHouseholderQR<TransposeTypeWithSameStorageOrder> m_qr;
-  TransposeTypeWithSameStorageOrder m_adjoint;
-  typename internal::plain_row_type<MatrixType>::type m_workspace;
-};
-
-/*** preconditioner using ColPivHouseholderQR ***/
-
-template<typename MatrixType>
-class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
-{
-public:
-  typedef typename MatrixType::Index Index;
-
-  void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
-  {
-    if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
-    {
-      m_qr = ColPivHouseholderQR<MatrixType>(svd.rows(), svd.cols());
-    }
-    if (svd.m_computeFullU) m_workspace.resize(svd.rows());
-    else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
-  }
-
-  bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
-  {
-    if(matrix.rows() > matrix.cols())
-    {
-      m_qr.compute(matrix);
-      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
-      if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
-      else if(svd.m_computeThinU)
-      {
-        svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
-        m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
-      }
-      if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
-      return true;
-    }
-    return false;
-  }
-
-private:
-  ColPivHouseholderQR<MatrixType> m_qr;
-  typename internal::plain_col_type<MatrixType>::type m_workspace;
-};
-
-template<typename MatrixType>
-class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
-{
-public:
-  typedef typename MatrixType::Index Index;
-  typedef typename MatrixType::Scalar Scalar;
-  enum
-  {
-    RowsAtCompileTime = MatrixType::RowsAtCompileTime,
-    ColsAtCompileTime = MatrixType::ColsAtCompileTime,
-    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
-    MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
-    Options = MatrixType::Options
-  };
-
-  typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
-          TransposeTypeWithSameStorageOrder;
-
-  void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
-  {
-    if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
-    {
-      m_qr = ColPivHouseholderQR<TransposeTypeWithSameStorageOrder>(svd.cols(), svd.rows());
-    }
-    if (svd.m_computeFullV) m_workspace.resize(svd.cols());
-    else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
-    m_adjoint.resize(svd.cols(), svd.rows());
-  }
-
-  bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
-  {
-    if(matrix.cols() > matrix.rows())
-    {
-      m_adjoint = matrix.adjoint();
-      m_qr.compute(m_adjoint);
-
-      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
-      if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
-      else if(svd.m_computeThinV)
-      {
-        svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
-        m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
-      }
-      if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
-      return true;
-    }
-    else return false;
-  }
-
-private:
-  ColPivHouseholderQR<TransposeTypeWithSameStorageOrder> m_qr;
-  TransposeTypeWithSameStorageOrder m_adjoint;
-  typename internal::plain_row_type<MatrixType>::type m_workspace;
-};
-
-/*** preconditioner using HouseholderQR ***/
-
-template<typename MatrixType>
-class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
-{
-public:
-  typedef typename MatrixType::Index Index;
-
-  void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
-  {
-    if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
-    {
-      m_qr = HouseholderQR<MatrixType>(svd.rows(), svd.cols());
-    }
-    if (svd.m_computeFullU) m_workspace.resize(svd.rows());
-    else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
-  }
-
-  bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
-  {
-    if(matrix.rows() > matrix.cols())
-    {
-      m_qr.compute(matrix);
-      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
-      if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
-      else if(svd.m_computeThinU)
-      {
-        svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
-        m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
-      }
-      if(svd.computeV()) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols());
-      return true;
-    }
-    return false;
-  }
-private:
-  HouseholderQR<MatrixType> m_qr;
-  typename internal::plain_col_type<MatrixType>::type m_workspace;
-};
-
-template<typename MatrixType>
-class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
-{
-public:
-  typedef typename MatrixType::Index Index;
-  typedef typename MatrixType::Scalar Scalar;
-  enum
-  {
-    RowsAtCompileTime = MatrixType::RowsAtCompileTime,
-    ColsAtCompileTime = MatrixType::ColsAtCompileTime,
-    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
-    MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
-    Options = MatrixType::Options
-  };
-
-  typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
-          TransposeTypeWithSameStorageOrder;
-
-  void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
-  {
-    if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
-    {
-      m_qr = HouseholderQR<TransposeTypeWithSameStorageOrder>(svd.cols(), svd.rows());
-    }
-    if (svd.m_computeFullV) m_workspace.resize(svd.cols());
-    else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
-    m_adjoint.resize(svd.cols(), svd.rows());
-  }
-
-  bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
-  {
-    if(matrix.cols() > matrix.rows())
-    {
-      m_adjoint = matrix.adjoint();
-      m_qr.compute(m_adjoint);
-
-      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
-      if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
-      else if(svd.m_computeThinV)
-      {
-        svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
-        m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
-      }
-      if(svd.computeU()) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows());
-      return true;
-    }
-    else return false;
-  }
-
-private:
-  HouseholderQR<TransposeTypeWithSameStorageOrder> m_qr;
-  TransposeTypeWithSameStorageOrder m_adjoint;
-  typename internal::plain_row_type<MatrixType>::type m_workspace;
-};
-
-/*** 2x2 SVD implementation
- ***
- *** JacobiSVD consists in performing a series of 2x2 SVD subproblems
- ***/
-
-template<typename MatrixType, int QRPreconditioner>
-struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, false>
-{
-  typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
-  typedef typename SVD::Index Index;
-  static void run(typename SVD::WorkMatrixType&, SVD&, Index, Index) {}
-};
-
-template<typename MatrixType, int QRPreconditioner>
-struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
-{
-  typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
-  typedef typename MatrixType::Scalar Scalar;
-  typedef typename MatrixType::RealScalar RealScalar;
-  typedef typename SVD::Index Index;
-  static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q)
-  {
-    Scalar z;
-    JacobiRotation<Scalar> rot;
-    RealScalar n = sqrt(abs2(work_matrix.coeff(p,p)) + abs2(work_matrix.coeff(q,p)));
-    if(n==0)
-    {
-      z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
-      work_matrix.row(p) *= z;
-      if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
-      z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
-      work_matrix.row(q) *= z;
-      if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
-    }
-    else
-    {
-      rot.c() = conj(work_matrix.coeff(p,p)) / n;
-      rot.s() = work_matrix.coeff(q,p) / n;
-      work_matrix.applyOnTheLeft(p,q,rot);
-      if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint());
-      if(work_matrix.coeff(p,q) != Scalar(0))
-      {
-        Scalar z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
-        work_matrix.col(q) *= z;
-        if(svd.computeV()) svd.m_matrixV.col(q) *= z;
-      }
-      if(work_matrix.coeff(q,q) != Scalar(0))
-      {
-        z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
-        work_matrix.row(q) *= z;
-        if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
-      }
-    }
-  }
-};
-
-template<typename MatrixType, typename RealScalar, typename Index>
-void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
-                            JacobiRotation<RealScalar> *j_left,
-                            JacobiRotation<RealScalar> *j_right)
-{
-  Matrix<RealScalar,2,2> m;
-  m << real(matrix.coeff(p,p)), real(matrix.coeff(p,q)),
-       real(matrix.coeff(q,p)), real(matrix.coeff(q,q));
-  JacobiRotation<RealScalar> rot1;
-  RealScalar t = m.coeff(0,0) + m.coeff(1,1);
-  RealScalar d = m.coeff(1,0) - m.coeff(0,1);
-  if(t == RealScalar(0))
-  {
-    rot1.c() = RealScalar(0);
-    rot1.s() = d > RealScalar(0) ? RealScalar(1) : RealScalar(-1);
-  }
-  else
-  {
-    RealScalar u = d / t;
-    rot1.c() = RealScalar(1) / sqrt(RealScalar(1) + abs2(u));
-    rot1.s() = rot1.c() * u;
-  }
-  m.applyOnTheLeft(0,1,rot1);
-  j_right->makeJacobi(m,0,1);
-  *j_left  = rot1 * j_right->transpose();
-}
-
-} // end namespace internal
-
-/** \ingroup SVD_Module
-  *
-  *
-  * \class JacobiSVD
-  *
-  * \brief Two-sided Jacobi SVD decomposition of a rectangular matrix
-  *
-  * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
-  * \param QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally
-  *                        for the R-SVD step for non-square matrices. See discussion of possible values below.
-  *
-  * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
-  *   \f[ A = U S V^* \f]
-  * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal;
-  * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left
-  * and right \em singular \em vectors of \a A respectively.
-  *
-  * Singular values are always sorted in decreasing order.
-  *
-  * This JacobiSVD decomposition computes only the singular values by default. If you want \a U or \a V, you need to ask for them explicitly.
-  *
-  * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
-  * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
-  * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
-  * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
-  *
-  * Here's an example demonstrating basic usage:
-  * \include JacobiSVD_basic.cpp
-  * Output: \verbinclude JacobiSVD_basic.out
-  *
-  * This JacobiSVD class is a two-sided Jacobi R-SVD decomposition, ensuring optimal reliability and accuracy. The downside is that it's slower than
-  * bidiagonalizing SVD algorithms for large square matrices; however its complexity is still \f$ O(n^2p) \f$ where \a n is the smaller dimension and
-  * \a p is the greater dimension, meaning that it is still of the same order of complexity as the faster bidiagonalizing R-SVD algorithms.
-  * In particular, like any R-SVD, it takes advantage of non-squareness in that its complexity is only linear in the greater dimension.
-  *
-  * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
-  * terminate in finite (and reasonable) time.
-  *
-  * The possible values for QRPreconditioner are:
-  * \li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR.
-  * \li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR.
-  *     Contrary to other QRs, it doesn't allow computing thin unitaries.
-  * \li HouseholderQRPreconditioner is the fastest, and less safe and accurate than the pivoting variants. It uses non-pivoting QR.
-  *     This is very similar in safety and accuracy to the bidiagonalization process used by bidiagonalizing SVD algorithms (since bidiagonalization
-  *     is inherently non-pivoting). However the resulting SVD is still more reliable than bidiagonalizing SVDs because the Jacobi-based iterarive
-  *     process is more reliable than the optimized bidiagonal SVD iterations.
-  * \li NoQRPreconditioner allows not to use a QR preconditioner at all. This is useful if you know that you will only be computing
-  *     JacobiSVD decompositions of square matrices. Non-square matrices require a QR preconditioner. Using this option will result in
-  *     faster compilation and smaller executable code. It won't significantly speed up computation, since JacobiSVD is always checking
-  *     if QR preconditioning is needed before applying it anyway.
-  *
-  * \sa MatrixBase::jacobiSvd()
-  */
-template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
-{
-  public:
-
-    typedef _MatrixType MatrixType;
-    typedef typename MatrixType::Scalar Scalar;
-    typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
-    typedef typename MatrixType::Index Index;
-    enum {
-      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
-      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
-      DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
-      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
-      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
-      MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
-      MatrixOptions = MatrixType::Options
-    };
-
-    typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime,
-                   MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime>
-            MatrixUType;
-    typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime,
-                   MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime>
-            MatrixVType;
-    typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
-    typedef typename internal::plain_row_type<MatrixType>::type RowType;
-    typedef typename internal::plain_col_type<MatrixType>::type ColType;
-    typedef Matrix<Scalar, DiagSizeAtCompileTime, DiagSizeAtCompileTime,
-                   MatrixOptions, MaxDiagSizeAtCompileTime, MaxDiagSizeAtCompileTime>
-            WorkMatrixType;
-
-    /** \brief Default Constructor.
-      *
-      * The default constructor is useful in cases in which the user intends to
-      * perform decompositions via JacobiSVD::compute(const MatrixType&).
-      */
-    JacobiSVD()
-      : m_isInitialized(false),
-        m_isAllocated(false),
-        m_computationOptions(0),
-        m_rows(-1), m_cols(-1)
-    {}
-
-
-    /** \brief Default Constructor with memory preallocation
-      *
-      * Like the default constructor but with preallocation of the internal data
-      * according to the specified problem size.
-      * \sa JacobiSVD()
-      */
-    JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0)
-      : m_isInitialized(false),
-        m_isAllocated(false),
-        m_computationOptions(0),
-        m_rows(-1), m_cols(-1)
-    {
-      allocate(rows, cols, computationOptions);
-    }
-
-    /** \brief Constructor performing the decomposition of given matrix.
-     *
-     * \param matrix the matrix to decompose
-     * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
-     *                           By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
-     *                           #ComputeFullV, #ComputeThinV.
-     *
-     * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
-     * available with the (non-default) FullPivHouseholderQR preconditioner.
-     */
-    JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
-      : m_isInitialized(false),
-        m_isAllocated(false),
-        m_computationOptions(0),
-        m_rows(-1), m_cols(-1)
-    {
-      compute(matrix, computationOptions);
-    }
-
-    /** \brief Method performing the decomposition of given matrix using custom options.
-     *
-     * \param matrix the matrix to decompose
-     * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
-     *                           By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
-     *                           #ComputeFullV, #ComputeThinV.
-     *
-     * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
-     * available with the (non-default) FullPivHouseholderQR preconditioner.
-     */
-    JacobiSVD& compute(const MatrixType& matrix, unsigned int computationOptions);
-
-    /** \brief Method performing the decomposition of given matrix using current options.
-     *
-     * \param matrix the matrix to decompose
-     *
-     * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
-     */
-    JacobiSVD& compute(const MatrixType& matrix)
-    {
-      return compute(matrix, m_computationOptions);
-    }
-
-    /** \returns the \a U matrix.
-     *
-     * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
-     * the U matrix is n-by-n if you asked for #ComputeFullU, and is n-by-m if you asked for #ComputeThinU.
-     *
-     * The \a m first columns of \a U are the left singular vectors of the matrix being decomposed.
-     *
-     * This method asserts that you asked for \a U to be computed.
-     */
-    const MatrixUType& matrixU() const
-    {
-      eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
-      eigen_assert(computeU() && "This JacobiSVD decomposition didn't compute U. Did you ask for it?");
-      return m_matrixU;
-    }
-
-    /** \returns the \a V matrix.
-     *
-     * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
-     * the V matrix is p-by-p if you asked for #ComputeFullV, and is p-by-m if you asked for ComputeThinV.
-     *
-     * The \a m first columns of \a V are the right singular vectors of the matrix being decomposed.
-     *
-     * This method asserts that you asked for \a V to be computed.
-     */
-    const MatrixVType& matrixV() const
-    {
-      eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
-      eigen_assert(computeV() && "This JacobiSVD decomposition didn't compute V. Did you ask for it?");
-      return m_matrixV;
-    }
-
-    /** \returns the vector of singular values.
-     *
-     * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, the
-     * returned vector has size \a m.  Singular values are always sorted in decreasing order.
-     */
-    const SingularValuesType& singularValues() const
-    {
-      eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
-      return m_singularValues;
-    }
-
-    /** \returns true if \a U (full or thin) is asked for in this SVD decomposition */
-    inline bool computeU() const { return m_computeFullU || m_computeThinU; }
-    /** \returns true if \a V (full or thin) is asked for in this SVD decomposition */
-    inline bool computeV() const { return m_computeFullV || m_computeThinV; }
-
-    /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
-      *
-      * \param b the right-hand-side of the equation to solve.
-      *
-      * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V.
-      *
-      * \note SVD solving is implicitly least-squares. Thus, this method serves both purposes of exact solving and least-squares solving.
-      * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$.
-      */
-    template<typename Rhs>
-    inline const internal::solve_retval<JacobiSVD, Rhs>
-    solve(const MatrixBase<Rhs>& b) const
-    {
-      eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
-      eigen_assert(computeU() && computeV() && "JacobiSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
-      return internal::solve_retval<JacobiSVD, Rhs>(*this, b.derived());
-    }
-
-    /** \returns the number of singular values that are not exactly 0 */
-    Index nonzeroSingularValues() const
-    {
-      eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
-      return m_nonzeroSingularValues;
-    }
-
-    inline Index rows() const { return m_rows; }
-    inline Index cols() const { return m_cols; }
-
-  private:
-    void allocate(Index rows, Index cols, unsigned int computationOptions);
-
-  protected:
-    MatrixUType m_matrixU;
-    MatrixVType m_matrixV;
-    SingularValuesType m_singularValues;
-    WorkMatrixType m_workMatrix;
-    bool m_isInitialized, m_isAllocated;
-    bool m_computeFullU, m_computeThinU;
-    bool m_computeFullV, m_computeThinV;
-    unsigned int m_computationOptions;
-    Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize;
-
-    template<typename __MatrixType, int _QRPreconditioner, bool _IsComplex>
-    friend struct internal::svd_precondition_2x2_block_to_be_real;
-    template<typename __MatrixType, int _QRPreconditioner, int _Case, bool _DoAnything>
-    friend struct internal::qr_preconditioner_impl;
-
-    internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreColsThanRows> m_qr_precond_morecols;
-    internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreRowsThanCols> m_qr_precond_morerows;
-};
-
-template<typename MatrixType, int QRPreconditioner>
-void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, unsigned int computationOptions)
-{
-  eigen_assert(rows >= 0 && cols >= 0);
-
-  if (m_isAllocated &&
-      rows == m_rows &&
-      cols == m_cols &&
-      computationOptions == m_computationOptions)
-  {
-    return;
-  }
-
-  m_rows = rows;
-  m_cols = cols;
-  m_isInitialized = false;
-  m_isAllocated = true;
-  m_computationOptions = computationOptions;
-  m_computeFullU = (computationOptions & ComputeFullU) != 0;
-  m_computeThinU = (computationOptions & ComputeThinU) != 0;
-  m_computeFullV = (computationOptions & ComputeFullV) != 0;
-  m_computeThinV = (computationOptions & ComputeThinV) != 0;
-  eigen_assert(!(m_computeFullU && m_computeThinU) && "JacobiSVD: you can't ask for both full and thin U");
-  eigen_assert(!(m_computeFullV && m_computeThinV) && "JacobiSVD: you can't ask for both full and thin V");
-  eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
-              "JacobiSVD: thin U and V are only available when your matrix has a dynamic number of columns.");
-  if (QRPreconditioner == FullPivHouseholderQRPreconditioner)
-  {
-      eigen_assert(!(m_computeThinU || m_computeThinV) &&
-              "JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. "
-              "Use the ColPivHouseholderQR preconditioner instead.");
-  }
-  m_diagSize = (std::min)(m_rows, m_cols);
-  m_singularValues.resize(m_diagSize);
-  if(RowsAtCompileTime==Dynamic)
-    m_matrixU.resize(m_rows, m_computeFullU ? m_rows
-                            : m_computeThinU ? m_diagSize
-                            : 0);
-  if(ColsAtCompileTime==Dynamic)
-    m_matrixV.resize(m_cols, m_computeFullV ? m_cols
-                            : m_computeThinV ? m_diagSize
-                            : 0);
-  m_workMatrix.resize(m_diagSize, m_diagSize);
-  
-  if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this);
-  if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this);
-}
-
-template<typename MatrixType, int QRPreconditioner>
-JacobiSVD<MatrixType, QRPreconditioner>&
-JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
-{
-  allocate(matrix.rows(), matrix.cols(), computationOptions);
-
-  // currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations,
-  // only worsening the precision of U and V as we accumulate more rotations
-  const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();
-
-  // limit for very small denormal numbers to be considered zero in order to avoid infinite loops (see bug 286)
-  const RealScalar considerAsZero = RealScalar(2) * std::numeric_limits<RealScalar>::denorm_min();
-
-  /*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */
-
-  if(!m_qr_precond_morecols.run(*this, matrix) && !m_qr_precond_morerows.run(*this, matrix))
-  {
-    m_workMatrix = matrix.block(0,0,m_diagSize,m_diagSize);
-    if(m_computeFullU) m_matrixU.setIdentity(m_rows,m_rows);
-    if(m_computeThinU) m_matrixU.setIdentity(m_rows,m_diagSize);
-    if(m_computeFullV) m_matrixV.setIdentity(m_cols,m_cols);
-    if(m_computeThinV) m_matrixV.setIdentity(m_cols, m_diagSize);
-  }
-
-  /*** step 2. The main Jacobi SVD iteration. ***/
-
-  bool finished = false;
-  while(!finished)
-  {
-    finished = true;
-
-    // do a sweep: for all index pairs (p,q), perform SVD of the corresponding 2x2 sub-matrix
-
-    for(Index p = 1; p < m_diagSize; ++p)
-    {
-      for(Index q = 0; q < p; ++q)
-      {
-        // if this 2x2 sub-matrix is not diagonal already...
-        // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
-        // keep us iterating forever. Similarly, small denormal numbers are considered zero.
-        using std::max;
-        RealScalar threshold = (max)(considerAsZero, precision * (max)(internal::abs(m_workMatrix.coeff(p,p)),
-                                                                       internal::abs(m_workMatrix.coeff(q,q))));
-        if((max)(internal::abs(m_workMatrix.coeff(p,q)),internal::abs(m_workMatrix.coeff(q,p))) > threshold)
-        {
-          finished = false;
-
-          // perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal
-          internal::svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner>::run(m_workMatrix, *this, p, q);
-          JacobiRotation<RealScalar> j_left, j_right;
-          internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right);
-
-          // accumulate resulting Jacobi rotations
-          m_workMatrix.applyOnTheLeft(p,q,j_left);
-          if(computeU()) m_matrixU.applyOnTheRight(p,q,j_left.transpose());
-
-          m_workMatrix.applyOnTheRight(p,q,j_right);
-          if(computeV()) m_matrixV.applyOnTheRight(p,q,j_right);
-        }
-      }
-    }
-  }
-
-  /*** step 3. The work matrix is now diagonal, so ensure it's positive so its diagonal entries are the singular values ***/
-
-  for(Index i = 0; i < m_diagSize; ++i)
-  {
-    RealScalar a = internal::abs(m_workMatrix.coeff(i,i));
-    m_singularValues.coeffRef(i) = a;
-    if(computeU() && (a!=RealScalar(0))) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a;
-  }
-
-  /*** step 4. Sort singular values in descending order and compute the number of nonzero singular values ***/
-
-  m_nonzeroSingularValues = m_diagSize;
-  for(Index i = 0; i < m_diagSize; i++)
-  {
-    Index pos;
-    RealScalar maxRemainingSingularValue = m_singularValues.tail(m_diagSize-i).maxCoeff(&pos);
-    if(maxRemainingSingularValue == RealScalar(0))
-    {
-      m_nonzeroSingularValues = i;
-      break;
-    }
-    if(pos)
-    {
-      pos += i;
-      std::swap(m_singularValues.coeffRef(i), m_singularValues.coeffRef(pos));
-      if(computeU()) m_matrixU.col(pos).swap(m_matrixU.col(i));
-      if(computeV()) m_matrixV.col(pos).swap(m_matrixV.col(i));
-    }
-  }
-
-  m_isInitialized = true;
-  return *this;
-}
-
-namespace internal {
-template<typename _MatrixType, int QRPreconditioner, typename Rhs>
-struct solve_retval<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
-  : solve_retval_base<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
-{
-  typedef JacobiSVD<_MatrixType, QRPreconditioner> JacobiSVDType;
-  EIGEN_MAKE_SOLVE_HELPERS(JacobiSVDType,Rhs)
-
-  template<typename Dest> void evalTo(Dest& dst) const
-  {
-    eigen_assert(rhs().rows() == dec().rows());
-
-    // A = U S V^*
-    // So A^{-1} = V S^{-1} U^*
-
-    Index diagSize = (std::min)(dec().rows(), dec().cols());
-    typename JacobiSVDType::SingularValuesType invertedSingVals(diagSize);
-
-    Index nonzeroSingVals = dec().nonzeroSingularValues();
-    invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse();
-    invertedSingVals.tail(diagSize - nonzeroSingVals).setZero();
-
-    dst = dec().matrixV().leftCols(diagSize)
-        * invertedSingVals.asDiagonal()
-        * dec().matrixU().leftCols(diagSize).adjoint()
-        * rhs();
-  }
-};
-} // end namespace internal
-
-/** \svd_module
-  *
-  * \return the singular value decomposition of \c *this computed by two-sided
-  * Jacobi transformations.
-  *
-  * \sa class JacobiSVD
-  */
-template<typename Derived>
-JacobiSVD<typename MatrixBase<Derived>::PlainObject>
-MatrixBase<Derived>::jacobiSvd(unsigned int computationOptions) const
-{
-  return JacobiSVD<PlainObject>(*this, computationOptions);
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_JACOBISVD_H
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/CompressedStorage.h b/resources/3rdparty/eigen/Eigen/src/SparseCore/CompressedStorage.h
deleted file mode 100644
index 3321fab4a..000000000
--- a/resources/3rdparty/eigen/Eigen/src/SparseCore/CompressedStorage.h
+++ /dev/null
@@ -1,233 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_COMPRESSED_STORAGE_H
-#define EIGEN_COMPRESSED_STORAGE_H
-
-namespace Eigen { 
-
-namespace internal {
-
-/** \internal
-  * Stores a sparse set of values as a list of values and a list of indices.
-  *
-  */
-template<typename _Scalar,typename _Index>
-class CompressedStorage
-{
-  public:
-
-    typedef _Scalar Scalar;
-    typedef _Index Index;
-
-  protected:
-
-    typedef typename NumTraits<Scalar>::Real RealScalar;
-
-  public:
-
-    CompressedStorage()
-      : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
-    {}
-
-    CompressedStorage(size_t size)
-      : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
-    {
-      resize(size);
-    }
-
-    CompressedStorage(const CompressedStorage& other)
-      : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
-    {
-      *this = other;
-    }
-
-    CompressedStorage& operator=(const CompressedStorage& other)
-    {
-      resize(other.size());
-      memcpy(m_values, other.m_values, m_size * sizeof(Scalar));
-      memcpy(m_indices, other.m_indices, m_size * sizeof(Index));
-      return *this;
-    }
-
-    void swap(CompressedStorage& other)
-    {
-      std::swap(m_values, other.m_values);
-      std::swap(m_indices, other.m_indices);
-      std::swap(m_size, other.m_size);
-      std::swap(m_allocatedSize, other.m_allocatedSize);
-    }
-
-    ~CompressedStorage()
-    {
-      delete[] m_values;
-      delete[] m_indices;
-    }
-
-    void reserve(size_t size)
-    {
-      size_t newAllocatedSize = m_size + size;
-      if (newAllocatedSize > m_allocatedSize)
-        reallocate(newAllocatedSize);
-    }
-
-    void squeeze()
-    {
-      if (m_allocatedSize>m_size)
-        reallocate(m_size);
-    }
-
-    void resize(size_t size, float reserveSizeFactor = 0)
-    {
-      if (m_allocatedSize<size)
-        reallocate(size + size_t(reserveSizeFactor*size));
-      m_size = size;
-    }
-
-    void append(const Scalar& v, Index i)
-    {
-      Index id = static_cast<Index>(m_size);
-      resize(m_size+1, 1);
-      m_values[id] = v;
-      m_indices[id] = i;
-    }
-
-    inline size_t size() const { return m_size; }
-    inline size_t allocatedSize() const { return m_allocatedSize; }
-    inline void clear() { m_size = 0; }
-
-    inline Scalar& value(size_t i) { return m_values[i]; }
-    inline const Scalar& value(size_t i) const { return m_values[i]; }
-
-    inline Index& index(size_t i) { return m_indices[i]; }
-    inline const Index& index(size_t i) const { return m_indices[i]; }
-
-    static CompressedStorage Map(Index* indices, Scalar* values, size_t size)
-    {
-      CompressedStorage res;
-      res.m_indices = indices;
-      res.m_values = values;
-      res.m_allocatedSize = res.m_size = size;
-      return res;
-    }
-
-    /** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */
-    inline Index searchLowerIndex(Index key) const
-    {
-      return searchLowerIndex(0, m_size, key);
-    }
-
-    /** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */
-    inline Index searchLowerIndex(size_t start, size_t end, Index key) const
-    {
-      while(end>start)
-      {
-        size_t mid = (end+start)>>1;
-        if (m_indices[mid]<key)
-          start = mid+1;
-        else
-          end = mid;
-      }
-      return static_cast<Index>(start);
-    }
-
-    /** \returns the stored value at index \a key
-      * If the value does not exist, then the value \a defaultValue is returned without any insertion. */
-    inline Scalar at(Index key, const Scalar& defaultValue = Scalar(0)) const
-    {
-      if (m_size==0)
-        return defaultValue;
-      else if (key==m_indices[m_size-1])
-        return m_values[m_size-1];
-      // ^^  optimization: let's first check if it is the last coefficient
-      // (very common in high level algorithms)
-      const size_t id = searchLowerIndex(0,m_size-1,key);
-      return ((id<m_size) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
-    }
-
-    /** Like at(), but the search is performed in the range [start,end) */
-    inline Scalar atInRange(size_t start, size_t end, Index key, const Scalar& defaultValue = Scalar(0)) const
-    {
-      if (start>=end)
-        return Scalar(0);
-      else if (end>start && key==m_indices[end-1])
-        return m_values[end-1];
-      // ^^  optimization: let's first check if it is the last coefficient
-      // (very common in high level algorithms)
-      const size_t id = searchLowerIndex(start,end-1,key);
-      return ((id<end) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
-    }
-
-    /** \returns a reference to the value at index \a key
-      * If the value does not exist, then the value \a defaultValue is inserted
-      * such that the keys are sorted. */
-    inline Scalar& atWithInsertion(Index key, const Scalar& defaultValue = Scalar(0))
-    {
-      size_t id = searchLowerIndex(0,m_size,key);
-      if (id>=m_size || m_indices[id]!=key)
-      {
-        resize(m_size+1,1);
-        for (size_t j=m_size-1; j>id; --j)
-        {
-          m_indices[j] = m_indices[j-1];
-          m_values[j] = m_values[j-1];
-        }
-        m_indices[id] = key;
-        m_values[id] = defaultValue;
-      }
-      return m_values[id];
-    }
-
-    void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
-    {
-      size_t k = 0;
-      size_t n = size();
-      for (size_t i=0; i<n; ++i)
-      {
-        if (!internal::isMuchSmallerThan(value(i), reference, epsilon))
-        {
-          value(k) = value(i);
-          index(k) = index(i);
-          ++k;
-        }
-      }
-      resize(k,0);
-    }
-
-  protected:
-
-    inline void reallocate(size_t size)
-    {
-      Scalar* newValues  = new Scalar[size];
-      Index* newIndices = new Index[size];
-      size_t copySize = (std::min)(size, m_size);
-      // copy
-      internal::smart_copy(m_values, m_values+copySize, newValues);
-      internal::smart_copy(m_indices, m_indices+copySize, newIndices);
-      // delete old stuff
-      delete[] m_values;
-      delete[] m_indices;
-      m_values = newValues;
-      m_indices = newIndices;
-      m_allocatedSize = size;
-    }
-
-  protected:
-    Scalar* m_values;
-    Index* m_indices;
-    size_t m_size;
-    size_t m_allocatedSize;
-
-};
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_COMPRESSED_STORAGE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseDenseProduct.h b/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseDenseProduct.h
deleted file mode 100644
index 8c608a622..000000000
--- a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseDenseProduct.h
+++ /dev/null
@@ -1,300 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_SPARSEDENSEPRODUCT_H
-#define EIGEN_SPARSEDENSEPRODUCT_H
-
-namespace Eigen { 
-
-template<typename Lhs, typename Rhs, int InnerSize> struct SparseDenseProductReturnType
-{
-  typedef SparseTimeDenseProduct<Lhs,Rhs> Type;
-};
-
-template<typename Lhs, typename Rhs> struct SparseDenseProductReturnType<Lhs,Rhs,1>
-{
-  typedef SparseDenseOuterProduct<Lhs,Rhs,false> Type;
-};
-
-template<typename Lhs, typename Rhs, int InnerSize> struct DenseSparseProductReturnType
-{
-  typedef DenseTimeSparseProduct<Lhs,Rhs> Type;
-};
-
-template<typename Lhs, typename Rhs> struct DenseSparseProductReturnType<Lhs,Rhs,1>
-{
-  typedef SparseDenseOuterProduct<Rhs,Lhs,true> Type;
-};
-
-namespace internal {
-
-template<typename Lhs, typename Rhs, bool Tr>
-struct traits<SparseDenseOuterProduct<Lhs,Rhs,Tr> >
-{
-  typedef Sparse StorageKind;
-  typedef typename scalar_product_traits<typename traits<Lhs>::Scalar,
-                                         typename traits<Rhs>::Scalar>::ReturnType Scalar;
-  typedef typename Lhs::Index Index;
-  typedef typename Lhs::Nested LhsNested;
-  typedef typename Rhs::Nested RhsNested;
-  typedef typename remove_all<LhsNested>::type _LhsNested;
-  typedef typename remove_all<RhsNested>::type _RhsNested;
-
-  enum {
-    LhsCoeffReadCost = traits<_LhsNested>::CoeffReadCost,
-    RhsCoeffReadCost = traits<_RhsNested>::CoeffReadCost,
-
-    RowsAtCompileTime    = Tr ? int(traits<Rhs>::RowsAtCompileTime)     : int(traits<Lhs>::RowsAtCompileTime),
-    ColsAtCompileTime    = Tr ? int(traits<Lhs>::ColsAtCompileTime)     : int(traits<Rhs>::ColsAtCompileTime),
-    MaxRowsAtCompileTime = Tr ? int(traits<Rhs>::MaxRowsAtCompileTime)  : int(traits<Lhs>::MaxRowsAtCompileTime),
-    MaxColsAtCompileTime = Tr ? int(traits<Lhs>::MaxColsAtCompileTime)  : int(traits<Rhs>::MaxColsAtCompileTime),
-
-    Flags = Tr ? RowMajorBit : 0,
-
-    CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + NumTraits<Scalar>::MulCost
-  };
-};
-
-} // end namespace internal
-
-template<typename Lhs, typename Rhs, bool Tr>
-class SparseDenseOuterProduct
- : public SparseMatrixBase<SparseDenseOuterProduct<Lhs,Rhs,Tr> >
-{
-  public:
-
-    typedef SparseMatrixBase<SparseDenseOuterProduct> Base;
-    EIGEN_DENSE_PUBLIC_INTERFACE(SparseDenseOuterProduct)
-    typedef internal::traits<SparseDenseOuterProduct> Traits;
-
-  private:
-
-    typedef typename Traits::LhsNested LhsNested;
-    typedef typename Traits::RhsNested RhsNested;
-    typedef typename Traits::_LhsNested _LhsNested;
-    typedef typename Traits::_RhsNested _RhsNested;
-
-  public:
-
-    class InnerIterator;
-
-    EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Lhs& lhs, const Rhs& rhs)
-      : m_lhs(lhs), m_rhs(rhs)
-    {
-      EIGEN_STATIC_ASSERT(!Tr,YOU_MADE_A_PROGRAMMING_MISTAKE);
-    }
-
-    EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Rhs& rhs, const Lhs& lhs)
-      : m_lhs(lhs), m_rhs(rhs)
-    {
-      EIGEN_STATIC_ASSERT(Tr,YOU_MADE_A_PROGRAMMING_MISTAKE);
-    }
-
-    EIGEN_STRONG_INLINE Index rows() const { return Tr ? m_rhs.rows() : m_lhs.rows(); }
-    EIGEN_STRONG_INLINE Index cols() const { return Tr ? m_lhs.cols() : m_rhs.cols(); }
-
-    EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
-    EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
-
-  protected:
-    LhsNested m_lhs;
-    RhsNested m_rhs;
-};
-
-template<typename Lhs, typename Rhs, bool Transpose>
-class SparseDenseOuterProduct<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNested::InnerIterator
-{
-    typedef typename _LhsNested::InnerIterator Base;
-  public:
-    EIGEN_STRONG_INLINE InnerIterator(const SparseDenseOuterProduct& prod, Index outer)
-      : Base(prod.lhs(), 0), m_outer(outer), m_factor(prod.rhs().coeff(outer))
-    {
-    }
-
-    inline Index outer() const { return m_outer; }
-    inline Index row() const { return Transpose ? Base::row() : m_outer; }
-    inline Index col() const { return Transpose ? m_outer : Base::row(); }
-
-    inline Scalar value() const { return Base::value() * m_factor; }
-
-  protected:
-    int m_outer;
-    Scalar m_factor;
-};
-
-namespace internal {
-template<typename Lhs, typename Rhs>
-struct traits<SparseTimeDenseProduct<Lhs,Rhs> >
- : traits<ProductBase<SparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs> >
-{
-  typedef Dense StorageKind;
-  typedef MatrixXpr XprKind;
-};
-
-template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,
-         int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor,
-         bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1>
-struct sparse_time_dense_product_impl;
-
-template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
-struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, RowMajor, true>
-{
-  typedef typename internal::remove_all<SparseLhsType>::type Lhs;
-  typedef typename internal::remove_all<DenseRhsType>::type Rhs;
-  typedef typename internal::remove_all<DenseResType>::type Res;
-  typedef typename Lhs::Index Index;
-  typedef typename Lhs::InnerIterator LhsInnerIterator;
-  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
-  {
-    for(Index c=0; c<rhs.cols(); ++c)
-    {
-      int n = lhs.outerSize();
-      for(Index j=0; j<n; ++j)
-      {
-        typename Res::Scalar tmp(0);
-        for(LhsInnerIterator it(lhs,j); it ;++it)
-          tmp += it.value() * rhs.coeff(it.index(),c);
-        res.coeffRef(j,c) = alpha * tmp;
-      }
-    }
-  }
-};
-
-template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
-struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, ColMajor, true>
-{
-  typedef typename internal::remove_all<SparseLhsType>::type Lhs;
-  typedef typename internal::remove_all<DenseRhsType>::type Rhs;
-  typedef typename internal::remove_all<DenseResType>::type Res;
-  typedef typename Lhs::InnerIterator LhsInnerIterator;
-  typedef typename Lhs::Index Index;
-  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
-  {
-    for(Index c=0; c<rhs.cols(); ++c)
-    {
-      for(Index j=0; j<lhs.outerSize(); ++j)
-      {
-        typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c);
-        for(LhsInnerIterator it(lhs,j); it ;++it)
-          res.coeffRef(it.index(),c) += it.value() * rhs_j;
-      }
-    }
-  }
-};
-
-template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
-struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, RowMajor, false>
-{
-  typedef typename internal::remove_all<SparseLhsType>::type Lhs;
-  typedef typename internal::remove_all<DenseRhsType>::type Rhs;
-  typedef typename internal::remove_all<DenseResType>::type Res;
-  typedef typename Lhs::InnerIterator LhsInnerIterator;
-  typedef typename Lhs::Index Index;
-  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
-  {
-    for(Index j=0; j<lhs.outerSize(); ++j)
-    {
-      typename Res::RowXpr res_j(res.row(j));
-      for(LhsInnerIterator it(lhs,j); it ;++it)
-        res_j += (alpha*it.value()) * rhs.row(it.index());
-    }
-  }
-};
-
-template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
-struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, ColMajor, false>
-{
-  typedef typename internal::remove_all<SparseLhsType>::type Lhs;
-  typedef typename internal::remove_all<DenseRhsType>::type Rhs;
-  typedef typename internal::remove_all<DenseResType>::type Res;
-  typedef typename Lhs::InnerIterator LhsInnerIterator;
-  typedef typename Lhs::Index Index;
-  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
-  {
-    for(Index j=0; j<lhs.outerSize(); ++j)
-    {
-      typename Rhs::ConstRowXpr rhs_j(rhs.row(j));
-      for(LhsInnerIterator it(lhs,j); it ;++it)
-        res.row(it.index()) += (alpha*it.value()) * rhs_j;
-    }
-  }
-};
-
-template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType>
-inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
-{
-  sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType>::run(lhs, rhs, res, alpha);
-}
-
-} // end namespace internal
-
-template<typename Lhs, typename Rhs>
-class SparseTimeDenseProduct
-  : public ProductBase<SparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs>
-{
-  public:
-    EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseTimeDenseProduct)
-
-    SparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
-    {}
-
-    template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
-    {
-      internal::sparse_time_dense_product(m_lhs, m_rhs, dest, alpha);
-    }
-
-  private:
-    SparseTimeDenseProduct& operator=(const SparseTimeDenseProduct&);
-};
-
-
-// dense = dense * sparse
-namespace internal {
-template<typename Lhs, typename Rhs>
-struct traits<DenseTimeSparseProduct<Lhs,Rhs> >
- : traits<ProductBase<DenseTimeSparseProduct<Lhs,Rhs>, Lhs, Rhs> >
-{
-  typedef Dense StorageKind;
-};
-} // end namespace internal
-
-template<typename Lhs, typename Rhs>
-class DenseTimeSparseProduct
-  : public ProductBase<DenseTimeSparseProduct<Lhs,Rhs>, Lhs, Rhs>
-{
-  public:
-    EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseProduct)
-
-    DenseTimeSparseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
-    {}
-
-    template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
-    {
-      Transpose<const _LhsNested> lhs_t(m_lhs);
-      Transpose<const _RhsNested> rhs_t(m_rhs);
-      Transpose<Dest> dest_t(dest);
-      internal::sparse_time_dense_product(rhs_t, lhs_t, dest_t, alpha);
-    }
-
-  private:
-    DenseTimeSparseProduct& operator=(const DenseTimeSparseProduct&);
-};
-
-// sparse * dense
-template<typename Derived>
-template<typename OtherDerived>
-inline const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
-SparseMatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
-{
-  return typename SparseDenseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_SPARSEDENSEPRODUCT_H
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h b/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h
deleted file mode 100644
index 095bf6863..000000000
--- a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h
+++ /dev/null
@@ -1,184 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_SPARSE_DIAGONAL_PRODUCT_H
-#define EIGEN_SPARSE_DIAGONAL_PRODUCT_H
-
-namespace Eigen { 
-
-// The product of a diagonal matrix with a sparse matrix can be easily
-// implemented using expression template.
-// We have two consider very different cases:
-// 1 - diag * row-major sparse
-//     => each inner vector <=> scalar * sparse vector product
-//     => so we can reuse CwiseUnaryOp::InnerIterator
-// 2 - diag * col-major sparse
-//     => each inner vector <=> densevector * sparse vector cwise product
-//     => again, we can reuse specialization of CwiseBinaryOp::InnerIterator
-//        for that particular case
-// The two other cases are symmetric.
-
-namespace internal {
-
-template<typename Lhs, typename Rhs>
-struct traits<SparseDiagonalProduct<Lhs, Rhs> >
-{
-  typedef typename remove_all<Lhs>::type _Lhs;
-  typedef typename remove_all<Rhs>::type _Rhs;
-  typedef typename _Lhs::Scalar Scalar;
-  typedef typename promote_index_type<typename traits<Lhs>::Index,
-                                         typename traits<Rhs>::Index>::type Index;
-  typedef Sparse StorageKind;
-  typedef MatrixXpr XprKind;
-  enum {
-    RowsAtCompileTime = _Lhs::RowsAtCompileTime,
-    ColsAtCompileTime = _Rhs::ColsAtCompileTime,
-
-    MaxRowsAtCompileTime = _Lhs::MaxRowsAtCompileTime,
-    MaxColsAtCompileTime = _Rhs::MaxColsAtCompileTime,
-
-    SparseFlags = is_diagonal<_Lhs>::ret ? int(_Rhs::Flags) : int(_Lhs::Flags),
-    Flags = (SparseFlags&RowMajorBit),
-    CoeffReadCost = Dynamic
-  };
-};
-
-enum {SDP_IsDiagonal, SDP_IsSparseRowMajor, SDP_IsSparseColMajor};
-template<typename Lhs, typename Rhs, typename SparseDiagonalProductType, int RhsMode, int LhsMode>
-class sparse_diagonal_product_inner_iterator_selector;
-
-} // end namespace internal
-
-template<typename Lhs, typename Rhs>
-class SparseDiagonalProduct
-  : public SparseMatrixBase<SparseDiagonalProduct<Lhs,Rhs> >,
-    internal::no_assignment_operator
-{
-    typedef typename Lhs::Nested LhsNested;
-    typedef typename Rhs::Nested RhsNested;
-
-    typedef typename internal::remove_all<LhsNested>::type _LhsNested;
-    typedef typename internal::remove_all<RhsNested>::type _RhsNested;
-
-    enum {
-      LhsMode = internal::is_diagonal<_LhsNested>::ret ? internal::SDP_IsDiagonal
-              : (_LhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor,
-      RhsMode = internal::is_diagonal<_RhsNested>::ret ? internal::SDP_IsDiagonal
-              : (_RhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor
-    };
-
-  public:
-
-    EIGEN_SPARSE_PUBLIC_INTERFACE(SparseDiagonalProduct)
-
-    typedef internal::sparse_diagonal_product_inner_iterator_selector
-                <_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator;
-
-    EIGEN_STRONG_INLINE SparseDiagonalProduct(const Lhs& lhs, const Rhs& rhs)
-      : m_lhs(lhs), m_rhs(rhs)
-    {
-      eigen_assert(lhs.cols() == rhs.rows() && "invalid sparse matrix * diagonal matrix product");
-    }
-
-    EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
-    EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
-
-    EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
-    EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
-
-  protected:
-    LhsNested m_lhs;
-    RhsNested m_rhs;
-};
-
-namespace internal {
-
-template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
-class sparse_diagonal_product_inner_iterator_selector
-<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseRowMajor>
-  : public CwiseUnaryOp<scalar_multiple_op<typename Lhs::Scalar>,const Rhs>::InnerIterator
-{
-    typedef typename CwiseUnaryOp<scalar_multiple_op<typename Lhs::Scalar>,const Rhs>::InnerIterator Base;
-    typedef typename Lhs::Index Index;
-  public:
-    inline sparse_diagonal_product_inner_iterator_selector(
-              const SparseDiagonalProductType& expr, Index outer)
-      : Base(expr.rhs()*(expr.lhs().diagonal().coeff(outer)), outer)
-    {}
-};
-
-template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
-class sparse_diagonal_product_inner_iterator_selector
-<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseColMajor>
-  : public CwiseBinaryOp<
-      scalar_product_op<typename Lhs::Scalar>,
-      SparseInnerVectorSet<Rhs,1>,
-      typename Lhs::DiagonalVectorType>::InnerIterator
-{
-    typedef typename CwiseBinaryOp<
-      scalar_product_op<typename Lhs::Scalar>,
-      SparseInnerVectorSet<Rhs,1>,
-      typename Lhs::DiagonalVectorType>::InnerIterator Base;
-    typedef typename Lhs::Index Index;
-  public:
-    inline sparse_diagonal_product_inner_iterator_selector(
-              const SparseDiagonalProductType& expr, Index outer)
-      : Base(expr.rhs().innerVector(outer) .cwiseProduct(expr.lhs().diagonal()), 0)
-    {}
-};
-
-template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
-class sparse_diagonal_product_inner_iterator_selector
-<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseColMajor,SDP_IsDiagonal>
-  : public CwiseUnaryOp<scalar_multiple_op<typename Rhs::Scalar>,const Lhs>::InnerIterator
-{
-    typedef typename CwiseUnaryOp<scalar_multiple_op<typename Rhs::Scalar>,const Lhs>::InnerIterator Base;
-    typedef typename Lhs::Index Index;
-  public:
-    inline sparse_diagonal_product_inner_iterator_selector(
-              const SparseDiagonalProductType& expr, Index outer)
-      : Base(expr.lhs()*expr.rhs().diagonal().coeff(outer), outer)
-    {}
-};
-
-template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
-class sparse_diagonal_product_inner_iterator_selector
-<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseRowMajor,SDP_IsDiagonal>
-  : public CwiseBinaryOp<
-      scalar_product_op<typename Rhs::Scalar>,
-      SparseInnerVectorSet<Lhs,1>,
-      Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator
-{
-    typedef typename CwiseBinaryOp<
-      scalar_product_op<typename Rhs::Scalar>,
-      SparseInnerVectorSet<Lhs,1>,
-      Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator Base;
-    typedef typename Lhs::Index Index;
-  public:
-    inline sparse_diagonal_product_inner_iterator_selector(
-              const SparseDiagonalProductType& expr, Index outer)
-      : Base(expr.lhs().innerVector(outer) .cwiseProduct(expr.rhs().diagonal().transpose()), 0)
-    {}
-};
-
-} // end namespace internal
-
-// SparseMatrixBase functions
-
-template<typename Derived>
-template<typename OtherDerived>
-const SparseDiagonalProduct<Derived,OtherDerived>
-SparseMatrixBase<Derived>::operator*(const DiagonalBase<OtherDerived> &other) const
-{
-  return SparseDiagonalProduct<Derived,OtherDerived>(this->derived(), other.derived());
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_SPARSE_DIAGONAL_PRODUCT_H
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseMatrix.h b/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseMatrix.h
deleted file mode 100644
index 573804837..000000000
--- a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseMatrix.h
+++ /dev/null
@@ -1,1209 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_SPARSEMATRIX_H
-#define EIGEN_SPARSEMATRIX_H
-
-namespace Eigen { 
-
-/** \ingroup SparseCore_Module
-  *
-  * \class SparseMatrix
-  *
-  * \brief A versatible sparse matrix representation
-  *
-  * This class implements a more versatile variants of the common \em compressed row/column storage format.
-  * Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
-  * All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
-  * space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
-  * can be done with limited memory reallocation and copies.
-  *
-  * A call to the function makeCompressed() turns the matrix into the standard \em compressed format
-  * compatible with many library.
-  *
-  * More details on this storage sceheme are given in the \ref TutorialSparse "manual pages".
-  *
-  * \tparam _Scalar the scalar type, i.e. the type of the coefficients
-  * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
-  *                 is RowMajor. The default is 0 which means column-major.
-  * \tparam _Index the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
-  *
-  * This class can be extended with the help of the plugin mechanism described on the page
-  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
-  */
-
-namespace internal {
-template<typename _Scalar, int _Options, typename _Index>
-struct traits<SparseMatrix<_Scalar, _Options, _Index> >
-{
-  typedef _Scalar Scalar;
-  typedef _Index Index;
-  typedef Sparse StorageKind;
-  typedef MatrixXpr XprKind;
-  enum {
-    RowsAtCompileTime = Dynamic,
-    ColsAtCompileTime = Dynamic,
-    MaxRowsAtCompileTime = Dynamic,
-    MaxColsAtCompileTime = Dynamic,
-    Flags = _Options | NestByRefBit | LvalueBit,
-    CoeffReadCost = NumTraits<Scalar>::ReadCost,
-    SupportedAccessPatterns = InnerRandomAccessPattern
-  };
-};
-
-template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
-struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
-{
-  typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
-  typedef typename nested<MatrixType>::type MatrixTypeNested;
-  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
-
-  typedef _Scalar Scalar;
-  typedef Dense StorageKind;
-  typedef _Index Index;
-  typedef MatrixXpr XprKind;
-
-  enum {
-    RowsAtCompileTime = Dynamic,
-    ColsAtCompileTime = 1,
-    MaxRowsAtCompileTime = Dynamic,
-    MaxColsAtCompileTime = 1,
-    Flags = 0,
-    CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10
-  };
-};
-
-} // end namespace internal
-
-template<typename _Scalar, int _Options, typename _Index>
-class SparseMatrix
-  : public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
-{
-  public:
-    EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
-    EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
-    EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
-
-    typedef MappedSparseMatrix<Scalar,Flags> Map;
-    using Base::IsRowMajor;
-    typedef internal::CompressedStorage<Scalar,Index> Storage;
-    enum {
-      Options = _Options
-    };
-
-  protected:
-
-    typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
-
-    Index m_outerSize;
-    Index m_innerSize;
-    Index* m_outerIndex;
-    Index* m_innerNonZeros;     // optional, if null then the data is compressed
-    Storage m_data;
-    
-    Eigen::Map<Matrix<Index,Dynamic,1> > innerNonZeros() { return Eigen::Map<Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
-    const  Eigen::Map<const Matrix<Index,Dynamic,1> > innerNonZeros() const { return Eigen::Map<const Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
-
-  public:
-    
-    /** \returns whether \c *this is in compressed form. */
-    inline bool isCompressed() const { return m_innerNonZeros==0; }
-
-    /** \returns the number of rows of the matrix */
-    inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
-    /** \returns the number of columns of the matrix */
-    inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
-
-    /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */
-    inline Index innerSize() const { return m_innerSize; }
-    /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */
-    inline Index outerSize() const { return m_outerSize; }
-    
-    /** \returns a const pointer to the array of values.
-      * This function is aimed at interoperability with other libraries.
-      * \sa innerIndexPtr(), outerIndexPtr() */
-    inline const Scalar* valuePtr() const { return &m_data.value(0); }
-    /** \returns a non-const pointer to the array of values.
-      * This function is aimed at interoperability with other libraries.
-      * \sa innerIndexPtr(), outerIndexPtr() */
-    inline Scalar* valuePtr() { return &m_data.value(0); }
-
-    /** \returns a const pointer to the array of inner indices.
-      * This function is aimed at interoperability with other libraries.
-      * \sa valuePtr(), outerIndexPtr() */
-    inline const Index* innerIndexPtr() const { return &m_data.index(0); }
-    /** \returns a non-const pointer to the array of inner indices.
-      * This function is aimed at interoperability with other libraries.
-      * \sa valuePtr(), outerIndexPtr() */
-    inline Index* innerIndexPtr() { return &m_data.index(0); }
-
-    /** \returns a const pointer to the array of the starting positions of the inner vectors.
-      * This function is aimed at interoperability with other libraries.
-      * \sa valuePtr(), innerIndexPtr() */
-    inline const Index* outerIndexPtr() const { return m_outerIndex; }
-    /** \returns a non-const pointer to the array of the starting positions of the inner vectors.
-      * This function is aimed at interoperability with other libraries.
-      * \sa valuePtr(), innerIndexPtr() */
-    inline Index* outerIndexPtr() { return m_outerIndex; }
-
-    /** \returns a const pointer to the array of the number of non zeros of the inner vectors.
-      * This function is aimed at interoperability with other libraries.
-      * \warning it returns the null pointer 0 in compressed mode */
-    inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; }
-    /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
-      * This function is aimed at interoperability with other libraries.
-      * \warning it returns the null pointer 0 in compressed mode */
-    inline Index* innerNonZeroPtr() { return m_innerNonZeros; }
-
-    /** \internal */
-    inline Storage& data() { return m_data; }
-    /** \internal */
-    inline const Storage& data() const { return m_data; }
-
-    /** \returns the value of the matrix at position \a i, \a j
-      * This function returns Scalar(0) if the element is an explicit \em zero */
-    inline Scalar coeff(Index row, Index col) const
-    {
-      const Index outer = IsRowMajor ? row : col;
-      const Index inner = IsRowMajor ? col : row;
-      Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
-      return m_data.atInRange(m_outerIndex[outer], end, inner);
-    }
-
-    /** \returns a non-const reference to the value of the matrix at position \a i, \a j
-      *
-      * If the element does not exist then it is inserted via the insert(Index,Index) function
-      * which itself turns the matrix into a non compressed form if that was not the case.
-      *
-      * This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index)
-      * function if the element does not already exist.
-      */
-    inline Scalar& coeffRef(Index row, Index col)
-    {
-      const Index outer = IsRowMajor ? row : col;
-      const Index inner = IsRowMajor ? col : row;
-
-      Index start = m_outerIndex[outer];
-      Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
-      eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
-      if(end<=start)
-        return insert(row,col);
-      const Index p = m_data.searchLowerIndex(start,end-1,inner);
-      if((p<end) && (m_data.index(p)==inner))
-        return m_data.value(p);
-      else
-        return insert(row,col);
-    }
-
-    /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
-      * The non zero coefficient must \b not already exist.
-      *
-      * If the matrix \c *this is in compressed mode, then \c *this is turned into uncompressed
-      * mode while reserving room for 2 non zeros per inner vector. It is strongly recommended to first
-      * call reserve(const SizesType &) to reserve a more appropriate number of elements per
-      * inner vector that better match your scenario.
-      *
-      * This function performs a sorted insertion in O(1) if the elements of each inner vector are
-      * inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
-      *
-      */
-    EIGEN_DONT_INLINE Scalar& insert(Index row, Index col)
-    {
-      if(isCompressed())
-      {
-        reserve(VectorXi::Constant(outerSize(), 2));
-      }
-      return insertUncompressed(row,col);
-    }
-
-  public:
-
-    class InnerIterator;
-    class ReverseInnerIterator;
-
-    /** Removes all non zeros but keep allocated memory */
-    inline void setZero()
-    {
-      m_data.clear();
-      memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
-      if(m_innerNonZeros)
-        memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index));
-    }
-
-    /** \returns the number of non zero coefficients */
-    inline Index nonZeros() const
-    {
-      if(m_innerNonZeros)
-        return innerNonZeros().sum();
-      return static_cast<Index>(m_data.size());
-    }
-
-    /** Preallocates \a reserveSize non zeros.
-      *
-      * Precondition: the matrix must be in compressed mode. */
-    inline void reserve(Index reserveSize)
-    {
-      eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
-      m_data.reserve(reserveSize);
-    }
-    
-    #ifdef EIGEN_PARSED_BY_DOXYGEN
-    /** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j.
-      *
-      * This function turns the matrix in non-compressed mode */
-    template<class SizesType>
-    inline void reserve(const SizesType& reserveSizes);
-    #else
-    template<class SizesType>
-    inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = typename SizesType::value_type())
-    {
-      EIGEN_UNUSED_VARIABLE(enableif);
-      reserveInnerVectors(reserveSizes);
-    }
-    template<class SizesType>
-    inline void reserve(const SizesType& reserveSizes, const typename SizesType::Scalar& enableif =
-    #if (!defined(_MSC_VER)) || (_MSC_VER>=1500) // MSVC 2005 fails to compile with this typename
-        typename
-    #endif
-        SizesType::Scalar())
-    {
-      EIGEN_UNUSED_VARIABLE(enableif);
-      reserveInnerVectors(reserveSizes);
-    }
-    #endif // EIGEN_PARSED_BY_DOXYGEN
-  protected:
-    template<class SizesType>
-    inline void reserveInnerVectors(const SizesType& reserveSizes)
-    {
-      
-      if(isCompressed())
-      {
-        std::size_t totalReserveSize = 0;
-        // turn the matrix into non-compressed mode
-        m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
-        if (!m_innerNonZeros) internal::throw_std_bad_alloc();
-        
-        // temporarily use m_innerSizes to hold the new starting points.
-        Index* newOuterIndex = m_innerNonZeros;
-        
-        Index count = 0;
-        for(Index j=0; j<m_outerSize; ++j)
-        {
-          newOuterIndex[j] = count;
-          count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
-          totalReserveSize += reserveSizes[j];
-        }
-        m_data.reserve(totalReserveSize);
-        std::ptrdiff_t previousOuterIndex = m_outerIndex[m_outerSize];
-        for(std::ptrdiff_t j=m_outerSize-1; j>=0; --j)
-        {
-          ptrdiff_t innerNNZ = previousOuterIndex - m_outerIndex[j];
-          for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
-          {
-            m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
-            m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
-          }
-          previousOuterIndex = m_outerIndex[j];
-          m_outerIndex[j] = newOuterIndex[j];
-          m_innerNonZeros[j] = innerNNZ;
-        }
-        m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
-        
-        m_data.resize(m_outerIndex[m_outerSize]);
-      }
-      else
-      {
-        Index* newOuterIndex = static_cast<Index*>(std::malloc((m_outerSize+1)*sizeof(Index)));
-        if (!newOuterIndex) internal::throw_std_bad_alloc();
-        
-        Index count = 0;
-        for(Index j=0; j<m_outerSize; ++j)
-        {
-          newOuterIndex[j] = count;
-          Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
-          Index toReserve = std::max<std::ptrdiff_t>(reserveSizes[j], alreadyReserved);
-          count += toReserve + m_innerNonZeros[j];
-        }
-        newOuterIndex[m_outerSize] = count;
-        
-        m_data.resize(count);
-        for(ptrdiff_t j=m_outerSize-1; j>=0; --j)
-        {
-          std::ptrdiff_t offset = newOuterIndex[j] - m_outerIndex[j];
-          if(offset>0)
-          {
-            std::ptrdiff_t innerNNZ = m_innerNonZeros[j];
-            for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
-            {
-              m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
-              m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
-            }
-          }
-        }
-        
-        std::swap(m_outerIndex, newOuterIndex);
-        std::free(newOuterIndex);
-      }
-      
-    }
-  public:
-
-    //--- low level purely coherent filling ---
-
-    /** \internal
-      * \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
-      * - the nonzero does not already exist
-      * - the new coefficient is the last one according to the storage order
-      *
-      * Before filling a given inner vector you must call the statVec(Index) function.
-      *
-      * After an insertion session, you should call the finalize() function.
-      *
-      * \sa insert, insertBackByOuterInner, startVec */
-    inline Scalar& insertBack(Index row, Index col)
-    {
-      return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
-    }
-
-    /** \internal
-      * \sa insertBack, startVec */
-    inline Scalar& insertBackByOuterInner(Index outer, Index inner)
-    {
-      eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
-      eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
-      Index p = m_outerIndex[outer+1];
-      ++m_outerIndex[outer+1];
-      m_data.append(0, inner);
-      return m_data.value(p);
-    }
-
-    /** \internal
-      * \warning use it only if you know what you are doing */
-    inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
-    {
-      Index p = m_outerIndex[outer+1];
-      ++m_outerIndex[outer+1];
-      m_data.append(0, inner);
-      return m_data.value(p);
-    }
-
-    /** \internal
-      * \sa insertBack, insertBackByOuterInner */
-    inline void startVec(Index outer)
-    {
-      eigen_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially");
-      eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
-      m_outerIndex[outer+1] = m_outerIndex[outer];
-    }
-
-    /** \internal
-      * Must be called after inserting a set of non zero entries using the low level compressed API.
-      */
-    inline void finalize()
-    {
-      if(isCompressed())
-      {
-        Index size = static_cast<Index>(m_data.size());
-        Index i = m_outerSize;
-        // find the last filled column
-        while (i>=0 && m_outerIndex[i]==0)
-          --i;
-        ++i;
-        while (i<=m_outerSize)
-        {
-          m_outerIndex[i] = size;
-          ++i;
-        }
-      }
-    }
-
-    //---
-
-    template<typename InputIterators>
-    void setFromTriplets(const InputIterators& begin, const InputIterators& end);
-
-    void sumupDuplicates();
-
-    //---
-    
-    /** \internal
-      * same as insert(Index,Index) except that the indices are given relative to the storage order */
-    EIGEN_DONT_INLINE Scalar& insertByOuterInner(Index j, Index i)
-    {
-      return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
-    }
-
-    /** Turns the matrix into the \em compressed format.
-      */
-    void makeCompressed()
-    {
-      if(isCompressed())
-        return;
-      
-      Index oldStart = m_outerIndex[1];
-      m_outerIndex[1] = m_innerNonZeros[0];
-      for(Index j=1; j<m_outerSize; ++j)
-      {
-        Index nextOldStart = m_outerIndex[j+1];
-        std::ptrdiff_t offset = oldStart - m_outerIndex[j];
-        if(offset>0)
-        {
-          for(Index k=0; k<m_innerNonZeros[j]; ++k)
-          {
-            m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
-            m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
-          }
-        }
-        m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
-        oldStart = nextOldStart;
-      }
-      std::free(m_innerNonZeros);
-      m_innerNonZeros = 0;
-      m_data.resize(m_outerIndex[m_outerSize]);
-      m_data.squeeze();
-    }
-
-    /** Turns the matrix into the uncompressed mode */
-    void uncompress()
-    {
-      if(m_innerNonZeros != 0)
-        return; 
-      m_innerNonZeros = new Index[m_outerSize]; 
-      for (int i = 0; i < m_outerSize; i++)
-      {
-        m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; 
-      }
-    }
-    
-    /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */
-    void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
-    {
-      prune(default_prunning_func(reference,epsilon));
-    }
-    
-    /** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \a keep.
-      * The functor type \a KeepFunc must implement the following function:
-      * \code
-      * bool operator() (const Index& row, const Index& col, const Scalar& value) const;
-      * \endcode
-      * \sa prune(Scalar,RealScalar)
-      */
-    template<typename KeepFunc>
-    void prune(const KeepFunc& keep = KeepFunc())
-    {
-      // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
-      // TODO also implement a unit test
-      makeCompressed();
-
-      Index k = 0;
-      for(Index j=0; j<m_outerSize; ++j)
-      {
-        Index previousStart = m_outerIndex[j];
-        m_outerIndex[j] = k;
-        Index end = m_outerIndex[j+1];
-        for(Index i=previousStart; i<end; ++i)
-        {
-          if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
-          {
-            m_data.value(k) = m_data.value(i);
-            m_data.index(k) = m_data.index(i);
-            ++k;
-          }
-        }
-      }
-      m_outerIndex[m_outerSize] = k;
-      m_data.resize(k,0);
-    }
-
-    /** Resizes the matrix to a \a rows x \a cols matrix leaving old values untouched.
-      * \sa resizeNonZeros(Index), reserve(), setZero()
-      */
-    void conservativeResize(Index rows, Index cols) 
-    {
-        // No change
-        if (this->rows() == rows && this->cols() == cols) return;
-
-        Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
-        Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
-        Index newInnerSize = IsRowMajor ? cols : rows;
-
-        // Deals with inner non zeros
-        if (m_innerNonZeros)
-        {
-          // Resize m_innerNonZeros
-          Index *newInnerNonZeros = static_cast<Index*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(Index)));
-          if (!newInnerNonZeros) internal::throw_std_bad_alloc();
-          m_innerNonZeros = newInnerNonZeros;
-          
-          for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)          
-            m_innerNonZeros[i] = 0;
-        } 
-        else if (innerChange < 0) 
-        {
-          // Inner size decreased: allocate a new m_innerNonZeros
-          m_innerNonZeros = static_cast<Index*>(std::malloc((m_outerSize+outerChange+1) * sizeof(Index)));
-          if (!m_innerNonZeros) internal::throw_std_bad_alloc();
-          for(Index i = 0; i < m_outerSize; i++)
-            m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
-        }
-        
-        // Change the m_innerNonZeros in case of a decrease of inner size
-        if (m_innerNonZeros && innerChange < 0) {
-              for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
-              {
-                Index &n = m_innerNonZeros[i];
-                Index start = m_outerIndex[i];
-                while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n; 
-              }
-        }
-        
-        m_innerSize = newInnerSize;
-
-        // Re-allocate outer index structure if necessary
-        if (outerChange == 0)
-          return;
-            
-        Index *newOuterIndex = static_cast<Index*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(Index)));
-        if (!newOuterIndex) internal::throw_std_bad_alloc();
-        m_outerIndex = newOuterIndex;
-        if (outerChange > 0) {
-          Index last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
-          for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)          
-            m_outerIndex[i] = last; 
-        }
-        m_outerSize += outerChange;
-        
-    }
-    
-    /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
-      * \sa resizeNonZeros(Index), reserve(), setZero()
-      */
-    void resize(Index rows, Index cols)
-    {
-      const Index outerSize = IsRowMajor ? rows : cols;
-      m_innerSize = IsRowMajor ? cols : rows;
-      m_data.clear();
-      if (m_outerSize != outerSize || m_outerSize==0)
-      {
-        std::free(m_outerIndex);
-        m_outerIndex = static_cast<Index*>(std::malloc((outerSize + 1) * sizeof(Index)));
-        if (!m_outerIndex) internal::throw_std_bad_alloc();
-        
-        m_outerSize = outerSize;
-      }
-      if(m_innerNonZeros)
-      {
-        std::free(m_innerNonZeros);
-        m_innerNonZeros = 0;
-      }
-      memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
-    }
-
-    /** \internal
-      * Resize the nonzero vector to \a size */
-    void resizeNonZeros(Index size)
-    {
-      // TODO remove this function
-      m_data.resize(size);
-    }
-
-    /** \returns a const expression of the diagonal coefficients */
-    const Diagonal<const SparseMatrix> diagonal() const { return *this; }
-
-    /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
-    inline SparseMatrix()
-      : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
-    {
-      check_template_parameters();
-      resize(0, 0);
-    }
-
-    /** Constructs a \a rows \c x \a cols empty matrix */
-    inline SparseMatrix(Index rows, Index cols)
-      : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
-    {
-      check_template_parameters();
-      resize(rows, cols);
-    }
-
-    /** Constructs a sparse matrix from the sparse expression \a other */
-    template<typename OtherDerived>
-    inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
-      : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
-    {
-      check_template_parameters();
-      *this = other.derived();
-    }
-
-    /** Copy constructor (it performs a deep copy) */
-    inline SparseMatrix(const SparseMatrix& other)
-      : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
-    {
-      check_template_parameters();
-      *this = other.derived();
-    }
-
-    /** \brief Copy constructor with in-place evaluation */
-    template<typename OtherDerived>
-    SparseMatrix(const ReturnByValue<OtherDerived>& other)
-      : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
-    {
-      check_template_parameters();
-      initAssignment(other);
-      other.evalTo(*this);
-    }
-
-    /** Swaps the content of two sparse matrices of the same type.
-      * This is a fast operation that simply swaps the underlying pointers and parameters. */
-    inline void swap(SparseMatrix& other)
-    {
-      //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
-      std::swap(m_outerIndex, other.m_outerIndex);
-      std::swap(m_innerSize, other.m_innerSize);
-      std::swap(m_outerSize, other.m_outerSize);
-      std::swap(m_innerNonZeros, other.m_innerNonZeros);
-      m_data.swap(other.m_data);
-    }
-
-    inline SparseMatrix& operator=(const SparseMatrix& other)
-    {
-      if (other.isRValue())
-      {
-        swap(other.const_cast_derived());
-      }
-      else
-      {
-        initAssignment(other);
-        if(other.isCompressed())
-        {
-          memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
-          m_data = other.m_data;
-        }
-        else
-        {
-          Base::operator=(other);
-        }
-      }
-      return *this;
-    }
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    template<typename Lhs, typename Rhs>
-    inline SparseMatrix& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
-    { return Base::operator=(product); }
-    
-    template<typename OtherDerived>
-    inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
-    {
-      initAssignment(other);
-      return Base::operator=(other.derived());
-    }
-    
-    template<typename OtherDerived>
-    inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
-    { return Base::operator=(other.derived()); }
-    #endif
-
-    template<typename OtherDerived>
-    EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other)
-    {
-      const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
-      if (needToTranspose)
-      {
-        // two passes algorithm:
-        //  1 - compute the number of coeffs per dest inner vector
-        //  2 - do the actual copy/eval
-        // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
-        typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
-        typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
-        OtherCopy otherCopy(other.derived());
-
-        SparseMatrix dest(other.rows(),other.cols());
-        Eigen::Map<Matrix<Index, Dynamic, 1> > (dest.m_outerIndex,dest.outerSize()).setZero();
-
-        // pass 1
-        // FIXME the above copy could be merged with that pass
-        for (Index j=0; j<otherCopy.outerSize(); ++j)
-          for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
-            ++dest.m_outerIndex[it.index()];
-
-        // prefix sum
-        Index count = 0;
-        VectorXi positions(dest.outerSize());
-        for (Index j=0; j<dest.outerSize(); ++j)
-        {
-          Index tmp = dest.m_outerIndex[j];
-          dest.m_outerIndex[j] = count;
-          positions[j] = count;
-          count += tmp;
-        }
-        dest.m_outerIndex[dest.outerSize()] = count;
-        // alloc
-        dest.m_data.resize(count);
-        // pass 2
-        for (Index j=0; j<otherCopy.outerSize(); ++j)
-        {
-          for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
-          {
-            Index pos = positions[it.index()]++;
-            dest.m_data.index(pos) = j;
-            dest.m_data.value(pos) = it.value();
-          }
-        }
-        this->swap(dest);
-        return *this;
-      }
-      else
-      {
-        if(other.isRValue())
-          initAssignment(other.derived());
-        // there is no special optimization
-        return Base::operator=(other.derived());
-      }
-    }
-
-    friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
-    {
-      EIGEN_DBG_SPARSE(
-        s << "Nonzero entries:\n";
-        if(m.isCompressed())
-          for (Index i=0; i<m.nonZeros(); ++i)
-            s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
-        else
-          for (Index i=0; i<m.outerSize(); ++i)
-          {
-            int p = m.m_outerIndex[i];
-            int pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
-            Index k=p;
-            for (; k<pe; ++k)
-              s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
-            for (; k<m.m_outerIndex[i+1]; ++k)
-              s << "(_,_) ";
-          }
-        s << std::endl;
-        s << std::endl;
-        s << "Outer pointers:\n";
-        for (Index i=0; i<m.outerSize(); ++i)
-          s << m.m_outerIndex[i] << " ";
-        s << " $" << std::endl;
-        if(!m.isCompressed())
-        {
-          s << "Inner non zeros:\n";
-          for (Index i=0; i<m.outerSize(); ++i)
-            s << m.m_innerNonZeros[i] << " ";
-          s << " $" << std::endl;
-        }
-        s << std::endl;
-      );
-      s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
-      return s;
-    }
-
-    /** Destructor */
-    inline ~SparseMatrix()
-    {
-      std::free(m_outerIndex);
-      std::free(m_innerNonZeros);
-    }
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN
-    /** Overloaded for performance */
-    Scalar sum() const;
-#endif
-    
-#   ifdef EIGEN_SPARSEMATRIX_PLUGIN
-#     include EIGEN_SPARSEMATRIX_PLUGIN
-#   endif
-
-protected:
-
-    template<typename Other>
-    void initAssignment(const Other& other)
-    {
-      resize(other.rows(), other.cols());
-      if(m_innerNonZeros)
-      {
-        std::free(m_innerNonZeros);
-        m_innerNonZeros = 0;
-      }
-    }
-
-    /** \internal
-      * \sa insert(Index,Index) */
-    EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col)
-    {
-      eigen_assert(isCompressed());
-
-      const Index outer = IsRowMajor ? row : col;
-      const Index inner = IsRowMajor ? col : row;
-
-      Index previousOuter = outer;
-      if (m_outerIndex[outer+1]==0)
-      {
-        // we start a new inner vector
-        while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
-        {
-          m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
-          --previousOuter;
-        }
-        m_outerIndex[outer+1] = m_outerIndex[outer];
-      }
-
-      // here we have to handle the tricky case where the outerIndex array
-      // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
-      // the 2nd inner vector...
-      bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
-                    && (size_t(m_outerIndex[outer+1]) == m_data.size());
-
-      size_t startId = m_outerIndex[outer];
-      // FIXME let's make sure sizeof(long int) == sizeof(size_t)
-      size_t p = m_outerIndex[outer+1];
-      ++m_outerIndex[outer+1];
-
-      float reallocRatio = 1;
-      if (m_data.allocatedSize()<=m_data.size())
-      {
-        // if there is no preallocated memory, let's reserve a minimum of 32 elements
-        if (m_data.size()==0)
-        {
-          m_data.reserve(32);
-        }
-        else
-        {
-          // we need to reallocate the data, to reduce multiple reallocations
-          // we use a smart resize algorithm based on the current filling ratio
-          // in addition, we use float to avoid integers overflows
-          float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1);
-          reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
-          // furthermore we bound the realloc ratio to:
-          //   1) reduce multiple minor realloc when the matrix is almost filled
-          //   2) avoid to allocate too much memory when the matrix is almost empty
-          reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
-        }
-      }
-      m_data.resize(m_data.size()+1,reallocRatio);
-
-      if (!isLastVec)
-      {
-        if (previousOuter==-1)
-        {
-          // oops wrong guess.
-          // let's correct the outer offsets
-          for (Index k=0; k<=(outer+1); ++k)
-            m_outerIndex[k] = 0;
-          Index k=outer+1;
-          while(m_outerIndex[k]==0)
-            m_outerIndex[k++] = 1;
-          while (k<=m_outerSize && m_outerIndex[k]!=0)
-            m_outerIndex[k++]++;
-          p = 0;
-          --k;
-          k = m_outerIndex[k]-1;
-          while (k>0)
-          {
-            m_data.index(k) = m_data.index(k-1);
-            m_data.value(k) = m_data.value(k-1);
-            k--;
-          }
-        }
-        else
-        {
-          // we are not inserting into the last inner vec
-          // update outer indices:
-          Index j = outer+2;
-          while (j<=m_outerSize && m_outerIndex[j]!=0)
-            m_outerIndex[j++]++;
-          --j;
-          // shift data of last vecs:
-          Index k = m_outerIndex[j]-1;
-          while (k>=Index(p))
-          {
-            m_data.index(k) = m_data.index(k-1);
-            m_data.value(k) = m_data.value(k-1);
-            k--;
-          }
-        }
-      }
-
-      while ( (p > startId) && (m_data.index(p-1) > inner) )
-      {
-        m_data.index(p) = m_data.index(p-1);
-        m_data.value(p) = m_data.value(p-1);
-        --p;
-      }
-
-      m_data.index(p) = inner;
-      return (m_data.value(p) = 0);
-    }
-
-    /** \internal
-      * A vector object that is equal to 0 everywhere but v at the position i */
-    class SingletonVector
-    {
-        Index m_index;
-        Index m_value;
-      public:
-        typedef Index value_type;
-        SingletonVector(Index i, Index v)
-          : m_index(i), m_value(v)
-        {}
-
-        Index operator[](Index i) const { return i==m_index ? m_value : 0; }
-    };
-
-    /** \internal
-      * \sa insert(Index,Index) */
-    EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col)
-    {
-      eigen_assert(!isCompressed());
-
-      const Index outer = IsRowMajor ? row : col;
-      const Index inner = IsRowMajor ? col : row;
-
-      std::ptrdiff_t room = m_outerIndex[outer+1] - m_outerIndex[outer];
-      std::ptrdiff_t innerNNZ = m_innerNonZeros[outer];
-      if(innerNNZ>=room)
-      {
-        // this inner vector is full, we need to reallocate the whole buffer :(
-        reserve(SingletonVector(outer,std::max<std::ptrdiff_t>(2,innerNNZ)));
-      }
-
-      Index startId = m_outerIndex[outer];
-      Index p = startId + m_innerNonZeros[outer];
-      while ( (p > startId) && (m_data.index(p-1) > inner) )
-      {
-        m_data.index(p) = m_data.index(p-1);
-        m_data.value(p) = m_data.value(p-1);
-        --p;
-      }
-
-      m_innerNonZeros[outer]++;
-
-      m_data.index(p) = inner;
-      return (m_data.value(p) = 0);
-    }
-
-public:
-    /** \internal
-      * \sa insert(Index,Index) */
-    EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
-    {
-      const Index outer = IsRowMajor ? row : col;
-      const Index inner = IsRowMajor ? col : row;
-
-      eigen_assert(!isCompressed());
-      eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
-
-      Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
-      m_data.index(p) = inner;
-      return (m_data.value(p) = 0);
-    }
-
-private:
-  static void check_template_parameters()
-  {
-    EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
-  }
-
-  struct default_prunning_func {
-    default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
-    inline bool operator() (const Index&, const Index&, const Scalar& value) const
-    {
-      return !internal::isMuchSmallerThan(value, reference, epsilon);
-    }
-    Scalar reference;
-    RealScalar epsilon;
-  };
-};
-
-template<typename Scalar, int _Options, typename _Index>
-class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
-{
-  public:
-    InnerIterator(const SparseMatrix& mat, Index outer)
-      : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer])
-    {
-      if(mat.isCompressed())
-        m_end = mat.m_outerIndex[outer+1];
-      else
-        m_end = m_id + mat.m_innerNonZeros[outer];
-    }
-
-    inline InnerIterator& operator++() { m_id++; return *this; }
-
-    inline const Scalar& value() const { return m_values[m_id]; }
-    inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
-
-    inline Index index() const { return m_indices[m_id]; }
-    inline Index outer() const { return m_outer; }
-    inline Index row() const { return IsRowMajor ? m_outer : index(); }
-    inline Index col() const { return IsRowMajor ? index() : m_outer; }
-
-    inline operator bool() const { return (m_id < m_end); }
-
-  protected:
-    const Scalar* m_values;
-    const Index* m_indices;
-    const Index m_outer;
-    Index m_id;
-    Index m_end;
-};
-
-template<typename Scalar, int _Options, typename _Index>
-class SparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator
-{
-  public:
-    ReverseInnerIterator(const SparseMatrix& mat, Index outer)
-      : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_start(mat.m_outerIndex[outer])
-    {
-      if(mat.isCompressed())
-        m_id = mat.m_outerIndex[outer+1];
-      else
-        m_id = m_start + mat.m_innerNonZeros[outer];
-    }
-
-    inline ReverseInnerIterator& operator--() { --m_id; return *this; }
-
-    inline const Scalar& value() const { return m_values[m_id-1]; }
-    inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
-
-    inline Index index() const { return m_indices[m_id-1]; }
-    inline Index outer() const { return m_outer; }
-    inline Index row() const { return IsRowMajor ? m_outer : index(); }
-    inline Index col() const { return IsRowMajor ? index() : m_outer; }
-
-    inline operator bool() const { return (m_id > m_start); }
-
-  protected:
-    const Scalar* m_values;
-    const Index* m_indices;
-    const Index m_outer;
-    Index m_id;
-    const Index m_start;
-};
-
-namespace internal {
-
-template<typename InputIterator, typename SparseMatrixType>
-void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, int Options = 0)
-{
-  EIGEN_UNUSED_VARIABLE(Options);
-  enum { IsRowMajor = SparseMatrixType::IsRowMajor };
-  typedef typename SparseMatrixType::Scalar Scalar;
-  typedef typename SparseMatrixType::Index Index;
-  SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor> trMat(mat.rows(),mat.cols());
-
-  // pass 1: count the nnz per inner-vector
-  VectorXi wi(trMat.outerSize());
-  wi.setZero();
-  for(InputIterator it(begin); it!=end; ++it)
-    wi(IsRowMajor ? it->col() : it->row())++;
-
-  // pass 2: insert all the elements into trMat
-  trMat.reserve(wi);
-  for(InputIterator it(begin); it!=end; ++it)
-    trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
-
-  // pass 3:
-  trMat.sumupDuplicates();
-
-  // pass 4: transposed copy -> implicit sorting
-  mat = trMat;
-}
-
-}
-
-
-/** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \b.
-  *
-  * A \em triplet is a tuple (i,j,value) defining a non-zero element.
-  * The input list of triplets does not have to be sorted, and can contains duplicated elements.
-  * In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up.
-  * This is a \em O(n) operation, with \em n the number of triplet elements.
-  * The initial contents of \c *this is destroyed.
-  * The matrix \c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor,
-  * or the resize(Index,Index) method. The sizes are not extracted from the triplet list.
-  *
-  * The \a InputIterators value_type must provide the following interface:
-  * \code
-  * Scalar value() const; // the value
-  * Scalar row() const;   // the row index i
-  * Scalar col() const;   // the column index j
-  * \endcode
-  * See for instance the Eigen::Triplet template class.
-  *
-  * Here is a typical usage example:
-  * \code
-    typedef Triplet<double> T;
-    std::vector<T> tripletList;
-    triplets.reserve(estimation_of_entries);
-    for(...)
-    {
-      // ...
-      tripletList.push_back(T(i,j,v_ij));
-    }
-    SparseMatrixType m(rows,cols);
-    m.setFromTriplets(tripletList.begin(), tripletList.end());
-    // m is ready to go!
-  * \endcode
-  *
-  * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
-  * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
-  * be explicitely stored into a std::vector for instance.
-  */
-template<typename Scalar, int _Options, typename _Index>
-template<typename InputIterators>
-void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
-{
-  internal::set_from_triplets(begin, end, *this);
-}
-
-/** \internal */
-template<typename Scalar, int _Options, typename _Index>
-void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
-{
-  eigen_assert(!isCompressed());
-  // TODO, in practice we should be able to use m_innerNonZeros for that task
-  VectorXi wi(innerSize());
-  wi.fill(-1);
-  Index count = 0;
-  // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
-  for(int j=0; j<outerSize(); ++j)
-  {
-    Index start   = count;
-    Index oldEnd  = m_outerIndex[j]+m_innerNonZeros[j];
-    for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
-    {
-      Index i = m_data.index(k);
-      if(wi(i)>=start)
-      {
-        // we already meet this entry => accumulate it
-        m_data.value(wi(i)) += m_data.value(k);
-      }
-      else
-      {
-        m_data.value(count) = m_data.value(k);
-        m_data.index(count) = m_data.index(k);
-        wi(i) = count;
-        ++count;
-      }
-    }
-    m_outerIndex[j] = start;
-  }
-  m_outerIndex[m_outerSize] = count;
-
-  // turn the matrix into compressed form
-  std::free(m_innerNonZeros);
-  m_innerNonZeros = 0;
-  m_data.resize(m_outerIndex[m_outerSize]);
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_SPARSEMATRIX_H
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseMatrixBase.h b/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseMatrixBase.h
deleted file mode 100644
index 39332a178..000000000
--- a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseMatrixBase.h
+++ /dev/null
@@ -1,458 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_SPARSEMATRIXBASE_H
-#define EIGEN_SPARSEMATRIXBASE_H
-
-namespace Eigen { 
-
-/** \ingroup SparseCore_Module
-  *
-  * \class SparseMatrixBase
-  *
-  * \brief Base class of any sparse matrices or sparse expressions
-  *
-  * \tparam Derived
-  *
-  * This class can be extended with the help of the plugin mechanism described on the page
-  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN.
-  */
-template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
-{
-  public:
-
-    typedef typename internal::traits<Derived>::Scalar Scalar;
-    typedef typename internal::packet_traits<Scalar>::type PacketScalar;
-    typedef typename internal::traits<Derived>::StorageKind StorageKind;
-    typedef typename internal::traits<Derived>::Index Index;
-    typedef typename internal::add_const_on_value_type_if_arithmetic<
-                         typename internal::packet_traits<Scalar>::type
-                     >::type PacketReturnType;
-
-    typedef SparseMatrixBase StorageBaseType;
-    typedef EigenBase<Derived> Base;
-    
-    template<typename OtherDerived>
-    Derived& operator=(const EigenBase<OtherDerived> &other)
-    {
-      other.derived().evalTo(derived());
-      return derived();
-    }
-
-    enum {
-
-      RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
-        /**< The number of rows at compile-time. This is just a copy of the value provided
-          * by the \a Derived type. If a value is not known at compile-time,
-          * it is set to the \a Dynamic constant.
-          * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */
-
-      ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
-        /**< The number of columns at compile-time. This is just a copy of the value provided
-          * by the \a Derived type. If a value is not known at compile-time,
-          * it is set to the \a Dynamic constant.
-          * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
-
-
-      SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
-                                                   internal::traits<Derived>::ColsAtCompileTime>::ret),
-        /**< This is equal to the number of coefficients, i.e. the number of
-          * rows times the number of columns, or to \a Dynamic if this is not
-          * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
-
-      MaxRowsAtCompileTime = RowsAtCompileTime,
-      MaxColsAtCompileTime = ColsAtCompileTime,
-
-      MaxSizeAtCompileTime = (internal::size_at_compile_time<MaxRowsAtCompileTime,
-                                                      MaxColsAtCompileTime>::ret),
-
-      IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1,
-        /**< This is set to true if either the number of rows or the number of
-          * columns is known at compile-time to be equal to 1. Indeed, in that case,
-          * we are dealing with a column-vector (if there is only one column) or with
-          * a row-vector (if there is only one row). */
-
-      Flags = internal::traits<Derived>::Flags,
-        /**< This stores expression \ref flags flags which may or may not be inherited by new expressions
-          * constructed from this one. See the \ref flags "list of flags".
-          */
-
-      CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
-        /**< This is a rough measure of how expensive it is to read one coefficient from
-          * this expression.
-          */
-
-      IsRowMajor = Flags&RowMajorBit ? 1 : 0,
-
-      #ifndef EIGEN_PARSED_BY_DOXYGEN
-      _HasDirectAccess = (int(Flags)&DirectAccessBit) ? 1 : 0 // workaround sunCC
-      #endif
-    };
-
-    /** \internal the return type of MatrixBase::adjoint() */
-    typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
-                        CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, Eigen::Transpose<const Derived> >,
-                        Transpose<const Derived>
-                     >::type AdjointReturnType;
-
-
-    typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor> PlainObject;
-
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN
-    /** This is the "real scalar" type; if the \a Scalar type is already real numbers
-      * (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If
-      * \a Scalar is \a std::complex<T> then RealScalar is \a T.
-      *
-      * \sa class NumTraits
-      */
-    typedef typename NumTraits<Scalar>::Real RealScalar;
-
-    /** \internal the return type of coeff()
-      */
-    typedef typename internal::conditional<_HasDirectAccess, const Scalar&, Scalar>::type CoeffReturnType;
-
-    /** \internal Represents a matrix with all coefficients equal to one another*/
-    typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Matrix<Scalar,Dynamic,Dynamic> > ConstantReturnType;
-
-    /** type of the equivalent square matrix */
-    typedef Matrix<Scalar,EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime),
-                          EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
-
-    inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
-    inline Derived& derived() { return *static_cast<Derived*>(this); }
-    inline Derived& const_cast_derived() const
-    { return *static_cast<Derived*>(const_cast<SparseMatrixBase*>(this)); }
-#endif // not EIGEN_PARSED_BY_DOXYGEN
-
-#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase
-#   include "../plugins/CommonCwiseUnaryOps.h"
-#   include "../plugins/CommonCwiseBinaryOps.h"
-#   include "../plugins/MatrixCwiseUnaryOps.h"
-#   include "../plugins/MatrixCwiseBinaryOps.h"
-#   ifdef EIGEN_SPARSEMATRIXBASE_PLUGIN
-#     include EIGEN_SPARSEMATRIXBASE_PLUGIN
-#   endif
-#   undef EIGEN_CURRENT_STORAGE_BASE_CLASS
-#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
-
-
-    /** \returns the number of rows. \sa cols() */
-    inline Index rows() const { return derived().rows(); }
-    /** \returns the number of columns. \sa rows() */
-    inline Index cols() const { return derived().cols(); }
-    /** \returns the number of coefficients, which is \a rows()*cols().
-      * \sa rows(), cols(). */
-    inline Index size() const { return rows() * cols(); }
-    /** \returns the number of nonzero coefficients which is in practice the number
-      * of stored coefficients. */
-    inline Index nonZeros() const { return derived().nonZeros(); }
-    /** \returns true if either the number of rows or the number of columns is equal to 1.
-      * In other words, this function returns
-      * \code rows()==1 || cols()==1 \endcode
-      * \sa rows(), cols(), IsVectorAtCompileTime. */
-    inline bool isVector() const { return rows()==1 || cols()==1; }
-    /** \returns the size of the storage major dimension,
-      * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */
-    Index outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); }
-    /** \returns the size of the inner dimension according to the storage order,
-      * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */
-    Index innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); }
-
-    bool isRValue() const { return m_isRValue; }
-    Derived& markAsRValue() { m_isRValue = true; return derived(); }
-
-    SparseMatrixBase() : m_isRValue(false) { /* TODO check flags */ }
-
-    
-    template<typename OtherDerived>
-    Derived& operator=(const ReturnByValue<OtherDerived>& other)
-    {
-      other.evalTo(derived());
-      return derived();
-    }
-
-
-    template<typename OtherDerived>
-    inline Derived& operator=(const SparseMatrixBase<OtherDerived>& other)
-    {
-      return assign(other.derived());
-    }
-
-    inline Derived& operator=(const Derived& other)
-    {
-//       if (other.isRValue())
-//         derived().swap(other.const_cast_derived());
-//       else
-      return assign(other.derived());
-    }
-
-  protected:
-
-    template<typename OtherDerived>
-    inline Derived& assign(const OtherDerived& other)
-    {
-      const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
-      const Index outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? other.rows() : other.cols();
-      if ((!transpose) && other.isRValue())
-      {
-        // eval without temporary
-        derived().resize(other.rows(), other.cols());
-        derived().setZero();
-        derived().reserve((std::max)(this->rows(),this->cols())*2);
-        for (Index j=0; j<outerSize; ++j)
-        {
-          derived().startVec(j);
-          for (typename OtherDerived::InnerIterator it(other, j); it; ++it)
-          {
-            Scalar v = it.value();
-            derived().insertBackByOuterInner(j,it.index()) = v;
-          }
-        }
-        derived().finalize();
-      }
-      else
-      {
-        assignGeneric(other);
-      }
-      return derived();
-    }
-
-    template<typename OtherDerived>
-    inline void assignGeneric(const OtherDerived& other)
-    {
-      //const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
-      eigen_assert(( ((internal::traits<Derived>::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
-                  (!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit)))) &&
-                  "the transpose operation is supposed to be handled in SparseMatrix::operator=");
-
-      enum { Flip = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit) };
-
-      const Index outerSize = other.outerSize();
-      //typedef typename internal::conditional<transpose, LinkedVectorMatrix<Scalar,Flags&RowMajorBit>, Derived>::type TempType;
-      // thanks to shallow copies, we always eval to a tempary
-      Derived temp(other.rows(), other.cols());
-
-      temp.reserve((std::max)(this->rows(),this->cols())*2);
-      for (Index j=0; j<outerSize; ++j)
-      {
-        temp.startVec(j);
-        for (typename OtherDerived::InnerIterator it(other.derived(), j); it; ++it)
-        {
-          Scalar v = it.value();
-          temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v;
-        }
-      }
-      temp.finalize();
-
-      derived() = temp.markAsRValue();
-    }
-
-  public:
-
-    template<typename Lhs, typename Rhs>
-    inline Derived& operator=(const SparseSparseProduct<Lhs,Rhs>& product);
-
-    friend std::ostream & operator << (std::ostream & s, const SparseMatrixBase& m)
-    {
-      typedef typename Derived::Nested Nested;
-      typedef typename internal::remove_all<Nested>::type NestedCleaned;
-
-      if (Flags&RowMajorBit)
-      {
-        const Nested nm(m.derived());
-        for (Index row=0; row<nm.outerSize(); ++row)
-        {
-          Index col = 0;
-          for (typename NestedCleaned::InnerIterator it(nm.derived(), row); it; ++it)
-          {
-            for ( ; col<it.index(); ++col)
-              s << "0 ";
-            s << it.value() << " ";
-            ++col;
-          }
-          for ( ; col<m.cols(); ++col)
-            s << "0 ";
-          s << std::endl;
-        }
-      }
-      else
-      {
-        const Nested nm(m.derived());
-        if (m.cols() == 1) {
-          Index row = 0;
-          for (typename NestedCleaned::InnerIterator it(nm.derived(), 0); it; ++it)
-          {
-            for ( ; row<it.index(); ++row)
-              s << "0" << std::endl;
-            s << it.value() << std::endl;
-            ++row;
-          }
-          for ( ; row<m.rows(); ++row)
-            s << "0" << std::endl;
-        }
-        else
-        {
-          SparseMatrix<Scalar, RowMajorBit> trans = m;
-          s << static_cast<const SparseMatrixBase<SparseMatrix<Scalar, RowMajorBit> >&>(trans);
-        }
-      }
-      return s;
-    }
-
-    template<typename OtherDerived>
-    Derived& operator+=(const SparseMatrixBase<OtherDerived>& other);
-    template<typename OtherDerived>
-    Derived& operator-=(const SparseMatrixBase<OtherDerived>& other);
-
-    Derived& operator*=(const Scalar& other);
-    Derived& operator/=(const Scalar& other);
-
-    #define EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE \
-      CwiseBinaryOp< \
-        internal::scalar_product_op< \
-          typename internal::scalar_product_traits< \
-            typename internal::traits<Derived>::Scalar, \
-            typename internal::traits<OtherDerived>::Scalar \
-          >::ReturnType \
-        >, \
-        Derived, \
-        OtherDerived \
-      >
-
-    template<typename OtherDerived>
-    EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
-    cwiseProduct(const MatrixBase<OtherDerived> &other) const;
-
-    // sparse * sparse
-    template<typename OtherDerived>
-    const typename SparseSparseProductReturnType<Derived,OtherDerived>::Type
-    operator*(const SparseMatrixBase<OtherDerived> &other) const;
-
-    // sparse * diagonal
-    template<typename OtherDerived>
-    const SparseDiagonalProduct<Derived,OtherDerived>
-    operator*(const DiagonalBase<OtherDerived> &other) const;
-
-    // diagonal * sparse
-    template<typename OtherDerived> friend
-    const SparseDiagonalProduct<OtherDerived,Derived>
-    operator*(const DiagonalBase<OtherDerived> &lhs, const SparseMatrixBase& rhs)
-    { return SparseDiagonalProduct<OtherDerived,Derived>(lhs.derived(), rhs.derived()); }
-
-    /** dense * sparse (return a dense object unless it is an outer product) */
-    template<typename OtherDerived> friend
-    const typename DenseSparseProductReturnType<OtherDerived,Derived>::Type
-    operator*(const MatrixBase<OtherDerived>& lhs, const Derived& rhs)
-    { return typename DenseSparseProductReturnType<OtherDerived,Derived>::Type(lhs.derived(),rhs); }
-
-    /** sparse * dense (returns a dense object unless it is an outer product) */
-    template<typename OtherDerived>
-    const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
-    operator*(const MatrixBase<OtherDerived> &other) const;
-    
-     /** \returns an expression of P H P^-1 where H is the matrix represented by \c *this */
-    SparseSymmetricPermutationProduct<Derived,Upper|Lower> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const
-    {
-      return SparseSymmetricPermutationProduct<Derived,Upper|Lower>(derived(), perm);
-    }
-
-    template<typename OtherDerived>
-    Derived& operator*=(const SparseMatrixBase<OtherDerived>& other);
-
-    #ifdef EIGEN2_SUPPORT
-    // deprecated
-    template<typename OtherDerived>
-    typename internal::plain_matrix_type_column_major<OtherDerived>::type
-    solveTriangular(const MatrixBase<OtherDerived>& other) const;
-
-    // deprecated
-    template<typename OtherDerived>
-    void solveTriangularInPlace(MatrixBase<OtherDerived>& other) const;
-    #endif // EIGEN2_SUPPORT
-
-    template<int Mode>
-    inline const SparseTriangularView<Derived, Mode> triangularView() const;
-
-    template<unsigned int UpLo> inline const SparseSelfAdjointView<Derived, UpLo> selfadjointView() const;
-    template<unsigned int UpLo> inline SparseSelfAdjointView<Derived, UpLo> selfadjointView();
-
-    template<typename OtherDerived> Scalar dot(const MatrixBase<OtherDerived>& other) const;
-    template<typename OtherDerived> Scalar dot(const SparseMatrixBase<OtherDerived>& other) const;
-    RealScalar squaredNorm() const;
-    RealScalar norm()  const;
-
-    Transpose<Derived> transpose() { return derived(); }
-    const Transpose<const Derived> transpose() const { return derived(); }
-    const AdjointReturnType adjoint() const { return transpose(); }
-
-    // sub-vector
-    SparseInnerVectorSet<Derived,1> row(Index i);
-    const SparseInnerVectorSet<Derived,1> row(Index i) const;
-    SparseInnerVectorSet<Derived,1> col(Index j);
-    const SparseInnerVectorSet<Derived,1> col(Index j) const;
-    SparseInnerVectorSet<Derived,1> innerVector(Index outer);
-    const SparseInnerVectorSet<Derived,1> innerVector(Index outer) const;
-
-    // set of sub-vectors
-    SparseInnerVectorSet<Derived,Dynamic> subrows(Index start, Index size);
-    const SparseInnerVectorSet<Derived,Dynamic> subrows(Index start, Index size) const;
-    SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size);
-    const SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size) const;
-    
-    SparseInnerVectorSet<Derived,Dynamic> middleRows(Index start, Index size);
-    const SparseInnerVectorSet<Derived,Dynamic> middleRows(Index start, Index size) const;
-    SparseInnerVectorSet<Derived,Dynamic> middleCols(Index start, Index size);
-    const SparseInnerVectorSet<Derived,Dynamic> middleCols(Index start, Index size) const;
-    SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize);
-    const SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize) const;
-
-      /** \internal use operator= */
-      template<typename DenseDerived>
-      void evalTo(MatrixBase<DenseDerived>& dst) const
-      {
-        dst.setZero();
-        for (Index j=0; j<outerSize(); ++j)
-          for (typename Derived::InnerIterator i(derived(),j); i; ++i)
-            dst.coeffRef(i.row(),i.col()) = i.value();
-      }
-
-      Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> toDense() const
-      {
-        return derived();
-      }
-
-    template<typename OtherDerived>
-    bool isApprox(const SparseMatrixBase<OtherDerived>& other,
-                  const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const
-    { return toDense().isApprox(other.toDense(),prec); }
-
-    template<typename OtherDerived>
-    bool isApprox(const MatrixBase<OtherDerived>& other,
-                  const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const
-    { return toDense().isApprox(other,prec); }
-
-    /** \returns the matrix or vector obtained by evaluating this expression.
-      *
-      * Notice that in the case of a plain matrix or vector (not an expression) this function just returns
-      * a const reference, in order to avoid a useless copy.
-      */
-    inline const typename internal::eval<Derived>::type eval() const
-    { return typename internal::eval<Derived>::type(derived()); }
-
-    Scalar sum() const;
-
-  protected:
-
-    bool m_isRValue;
-};
-
-} // end namespace Eigen
-
-#endif // EIGEN_SPARSEMATRIXBASE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseProduct.h b/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseProduct.h
deleted file mode 100644
index 34dd7de69..000000000
--- a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseProduct.h
+++ /dev/null
@@ -1,186 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_SPARSEPRODUCT_H
-#define EIGEN_SPARSEPRODUCT_H
-
-namespace Eigen { 
-
-template<typename Lhs, typename Rhs>
-struct SparseSparseProductReturnType
-{
-  typedef typename internal::traits<Lhs>::Scalar Scalar;
-  enum {
-    LhsRowMajor = internal::traits<Lhs>::Flags & RowMajorBit,
-    RhsRowMajor = internal::traits<Rhs>::Flags & RowMajorBit,
-    TransposeRhs = (!LhsRowMajor) && RhsRowMajor,
-    TransposeLhs = LhsRowMajor && (!RhsRowMajor)
-  };
-
-  typedef typename internal::conditional<TransposeLhs,
-    SparseMatrix<Scalar,0>,
-    typename internal::nested<Lhs,Rhs::RowsAtCompileTime>::type>::type LhsNested;
-
-  typedef typename internal::conditional<TransposeRhs,
-    SparseMatrix<Scalar,0>,
-    typename internal::nested<Rhs,Lhs::RowsAtCompileTime>::type>::type RhsNested;
-
-  typedef SparseSparseProduct<LhsNested, RhsNested> Type;
-};
-
-namespace internal {
-template<typename LhsNested, typename RhsNested>
-struct traits<SparseSparseProduct<LhsNested, RhsNested> >
-{
-  typedef MatrixXpr XprKind;
-  // clean the nested types:
-  typedef typename remove_all<LhsNested>::type _LhsNested;
-  typedef typename remove_all<RhsNested>::type _RhsNested;
-  typedef typename _LhsNested::Scalar Scalar;
-  typedef typename promote_index_type<typename traits<_LhsNested>::Index,
-                                         typename traits<_RhsNested>::Index>::type Index;
-
-  enum {
-    LhsCoeffReadCost = _LhsNested::CoeffReadCost,
-    RhsCoeffReadCost = _RhsNested::CoeffReadCost,
-    LhsFlags = _LhsNested::Flags,
-    RhsFlags = _RhsNested::Flags,
-
-    RowsAtCompileTime    = _LhsNested::RowsAtCompileTime,
-    ColsAtCompileTime    = _RhsNested::ColsAtCompileTime,
-    MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime,
-    MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime,
-
-    InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime),
-
-    EvalToRowMajor = (RhsFlags & LhsFlags & RowMajorBit),
-
-    RemovedBits = ~(EvalToRowMajor ? 0 : RowMajorBit),
-
-    Flags = (int(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits)
-          | EvalBeforeAssigningBit
-          | EvalBeforeNestingBit,
-
-    CoeffReadCost = Dynamic
-  };
-
-  typedef Sparse StorageKind;
-};
-
-} // end namespace internal
-
-template<typename LhsNested, typename RhsNested>
-class SparseSparseProduct : internal::no_assignment_operator,
-  public SparseMatrixBase<SparseSparseProduct<LhsNested, RhsNested> >
-{
-  public:
-
-    typedef SparseMatrixBase<SparseSparseProduct> Base;
-    EIGEN_DENSE_PUBLIC_INTERFACE(SparseSparseProduct)
-
-  private:
-
-    typedef typename internal::traits<SparseSparseProduct>::_LhsNested _LhsNested;
-    typedef typename internal::traits<SparseSparseProduct>::_RhsNested _RhsNested;
-
-  public:
-
-    template<typename Lhs, typename Rhs>
-    EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs)
-      : m_lhs(lhs), m_rhs(rhs), m_tolerance(0), m_conservative(true)
-    {
-      init();
-    }
-
-    template<typename Lhs, typename Rhs>
-    EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs, const RealScalar& tolerance)
-      : m_lhs(lhs), m_rhs(rhs), m_tolerance(tolerance), m_conservative(false)
-    {
-      init();
-    }
-
-    SparseSparseProduct pruned(const Scalar& reference = 0, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision()) const
-    {
-      return SparseSparseProduct(m_lhs,m_rhs,internal::abs(reference)*epsilon);
-    }
-
-    template<typename Dest>
-    void evalTo(Dest& result) const
-    {
-      if(m_conservative)
-        internal::conservative_sparse_sparse_product_selector<_LhsNested, _RhsNested, Dest>::run(lhs(),rhs(),result);
-      else
-        internal::sparse_sparse_product_with_pruning_selector<_LhsNested, _RhsNested, Dest>::run(lhs(),rhs(),result,m_tolerance);
-    }
-
-    EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
-    EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
-
-    EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
-    EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
-
-  protected:
-    void init()
-    {
-      eigen_assert(m_lhs.cols() == m_rhs.rows());
-
-      enum {
-        ProductIsValid = _LhsNested::ColsAtCompileTime==Dynamic
-                      || _RhsNested::RowsAtCompileTime==Dynamic
-                      || int(_LhsNested::ColsAtCompileTime)==int(_RhsNested::RowsAtCompileTime),
-        AreVectors = _LhsNested::IsVectorAtCompileTime && _RhsNested::IsVectorAtCompileTime,
-        SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(_LhsNested,_RhsNested)
-      };
-      // note to the lost user:
-      //    * for a dot product use: v1.dot(v2)
-      //    * for a coeff-wise product use: v1.cwise()*v2
-      EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
-        INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
-      EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
-        INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
-      EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
-    }
-
-    LhsNested m_lhs;
-    RhsNested m_rhs;
-    RealScalar m_tolerance;
-    bool m_conservative;
-};
-
-// sparse = sparse * sparse
-template<typename Derived>
-template<typename Lhs, typename Rhs>
-inline Derived& SparseMatrixBase<Derived>::operator=(const SparseSparseProduct<Lhs,Rhs>& product)
-{
-  product.evalTo(derived());
-  return derived();
-}
-
-/** \returns an expression of the product of two sparse matrices.
-  * By default a conservative product preserving the symbolic non zeros is performed.
-  * The automatic pruning of the small values can be achieved by calling the pruned() function
-  * in which case a totally different product algorithm is employed:
-  * \code
-  * C = (A*B).pruned();             // supress numerical zeros (exact)
-  * C = (A*B).pruned(ref);
-  * C = (A*B).pruned(ref,epsilon);
-  * \endcode
-  * where \c ref is a meaningful non zero reference value.
-  * */
-template<typename Derived>
-template<typename OtherDerived>
-inline const typename SparseSparseProductReturnType<Derived,OtherDerived>::Type
-SparseMatrixBase<Derived>::operator*(const SparseMatrixBase<OtherDerived> &other) const
-{
-  return typename SparseSparseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_SPARSEPRODUCT_H
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h b/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h
deleted file mode 100644
index 55ec69886..000000000
--- a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h
+++ /dev/null
@@ -1,480 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
-#define EIGEN_SPARSE_SELFADJOINTVIEW_H
-
-namespace Eigen { 
-
-/** \ingroup SparseCore_Module
-  * \class SparseSelfAdjointView
-  *
-  * \brief Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
-  *
-  * \param MatrixType the type of the dense matrix storing the coefficients
-  * \param UpLo can be either \c #Lower or \c #Upper
-  *
-  * This class is an expression of a sefladjoint matrix from a triangular part of a matrix
-  * with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView()
-  * and most of the time this is the only way that it is used.
-  *
-  * \sa SparseMatrixBase::selfadjointView()
-  */
-template<typename Lhs, typename Rhs, int UpLo>
-class SparseSelfAdjointTimeDenseProduct;
-
-template<typename Lhs, typename Rhs, int UpLo>
-class DenseTimeSparseSelfAdjointProduct;
-
-namespace internal {
-  
-template<typename MatrixType, unsigned int UpLo>
-struct traits<SparseSelfAdjointView<MatrixType,UpLo> > : traits<MatrixType> {
-};
-
-template<int SrcUpLo,int DstUpLo,typename MatrixType,int DestOrder>
-void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
-
-template<int UpLo,typename MatrixType,int DestOrder>
-void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
-
-}
-
-template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
-  : public EigenBase<SparseSelfAdjointView<MatrixType,UpLo> >
-{
-  public:
-
-    typedef typename MatrixType::Scalar Scalar;
-    typedef typename MatrixType::Index Index;
-    typedef Matrix<Index,Dynamic,1> VectorI;
-    typedef typename MatrixType::Nested MatrixTypeNested;
-    typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
-
-    inline SparseSelfAdjointView(const MatrixType& matrix) : m_matrix(matrix)
-    {
-      eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
-    }
-
-    inline Index rows() const { return m_matrix.rows(); }
-    inline Index cols() const { return m_matrix.cols(); }
-
-    /** \internal \returns a reference to the nested matrix */
-    const _MatrixTypeNested& matrix() const { return m_matrix; }
-    _MatrixTypeNested& matrix() { return m_matrix.const_cast_derived(); }
-
-    /** Efficient sparse self-adjoint matrix times dense vector/matrix product */
-    template<typename OtherDerived>
-    SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>
-    operator*(const MatrixBase<OtherDerived>& rhs) const
-    {
-      return SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>(m_matrix, rhs.derived());
-    }
-
-    /** Efficient dense vector/matrix times sparse self-adjoint matrix product */
-    template<typename OtherDerived> friend
-    DenseTimeSparseSelfAdjointProduct<OtherDerived,MatrixType,UpLo>
-    operator*(const MatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
-    {
-      return DenseTimeSparseSelfAdjointProduct<OtherDerived,_MatrixTypeNested,UpLo>(lhs.derived(), rhs.m_matrix);
-    }
-
-    /** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
-      * \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix.
-      *
-      * \returns a reference to \c *this
-      *
-      * To perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply
-      * call this function with u.adjoint().
-      */
-    template<typename DerivedU>
-    SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
-    
-    /** \internal triggered by sparse_matrix = SparseSelfadjointView; */
-    template<typename DestScalar,int StorageOrder> void evalTo(SparseMatrix<DestScalar,StorageOrder,Index>& _dest) const
-    {
-      internal::permute_symm_to_fullsymm<UpLo>(m_matrix, _dest);
-    }
-    
-    template<typename DestScalar> void evalTo(DynamicSparseMatrix<DestScalar,ColMajor,Index>& _dest) const
-    {
-      // TODO directly evaluate into _dest;
-      SparseMatrix<DestScalar,ColMajor,Index> tmp(_dest.rows(),_dest.cols());
-      internal::permute_symm_to_fullsymm<UpLo>(m_matrix, tmp);
-      _dest = tmp;
-    }
-    
-    /** \returns an expression of P H P^-1 */
-    SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const
-    {
-      return SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo>(m_matrix, perm);
-    }
-    
-    template<typename SrcMatrixType,int SrcUpLo>
-    SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcUpLo>& permutedMatrix)
-    {
-      permutedMatrix.evalTo(*this);
-      return *this;
-    }
-
-
-    SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src)
-    {
-      PermutationMatrix<Dynamic> pnull;
-      return *this = src.twistedBy(pnull);
-    }
-
-    template<typename SrcMatrixType,unsigned int SrcUpLo>
-    SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcUpLo>& src)
-    {
-      PermutationMatrix<Dynamic> pnull;
-      return *this = src.twistedBy(pnull);
-    }
-    
-
-    // const SparseLLT<PlainObject, UpLo> llt() const;
-    // const SparseLDLT<PlainObject, UpLo> ldlt() const;
-
-  protected:
-
-    typename MatrixType::Nested m_matrix;
-    mutable VectorI m_countPerRow;
-    mutable VectorI m_countPerCol;
-};
-
-/***************************************************************************
-* Implementation of SparseMatrixBase methods
-***************************************************************************/
-
-template<typename Derived>
-template<unsigned int UpLo>
-const SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView() const
-{
-  return derived();
-}
-
-template<typename Derived>
-template<unsigned int UpLo>
-SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView()
-{
-  return derived();
-}
-
-/***************************************************************************
-* Implementation of SparseSelfAdjointView methods
-***************************************************************************/
-
-template<typename MatrixType, unsigned int UpLo>
-template<typename DerivedU>
-SparseSelfAdjointView<MatrixType,UpLo>&
-SparseSelfAdjointView<MatrixType,UpLo>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
-{
-  SparseMatrix<Scalar,MatrixType::Flags&RowMajorBit?RowMajor:ColMajor> tmp = u * u.adjoint();
-  if(alpha==Scalar(0))
-    m_matrix.const_cast_derived() = tmp.template triangularView<UpLo>();
-  else
-    m_matrix.const_cast_derived() += alpha * tmp.template triangularView<UpLo>();
-
-  return *this;
-}
-
-/***************************************************************************
-* Implementation of sparse self-adjoint time dense matrix
-***************************************************************************/
-
-namespace internal {
-template<typename Lhs, typename Rhs, int UpLo>
-struct traits<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo> >
- : traits<ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
-{
-  typedef Dense StorageKind;
-};
-}
-
-template<typename Lhs, typename Rhs, int UpLo>
-class SparseSelfAdjointTimeDenseProduct
-  : public ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
-{
-  public:
-    EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseSelfAdjointTimeDenseProduct)
-
-    SparseSelfAdjointTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
-    {}
-
-    template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
-    {
-      // TODO use alpha
-      eigen_assert(alpha==Scalar(1) && "alpha != 1 is not implemented yet, sorry");
-      typedef typename internal::remove_all<Lhs>::type _Lhs;
-      typedef typename internal::remove_all<Rhs>::type _Rhs;
-      typedef typename _Lhs::InnerIterator LhsInnerIterator;
-      enum {
-        LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit,
-        ProcessFirstHalf =
-                 ((UpLo&(Upper|Lower))==(Upper|Lower))
-              || ( (UpLo&Upper) && !LhsIsRowMajor)
-              || ( (UpLo&Lower) && LhsIsRowMajor),
-        ProcessSecondHalf = !ProcessFirstHalf
-      };
-      for (Index j=0; j<m_lhs.outerSize(); ++j)
-      {
-        LhsInnerIterator i(m_lhs,j);
-        if (ProcessSecondHalf)
-        {
-          while (i && i.index()<j) ++i;
-          if(i && i.index()==j)
-          {
-            dest.row(j) += i.value() * m_rhs.row(j);
-            ++i;
-          }
-        }
-        for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
-        {
-          Index a = LhsIsRowMajor ? j : i.index();
-          Index b = LhsIsRowMajor ? i.index() : j;
-          typename Lhs::Scalar v = i.value();
-          dest.row(a) += (v) * m_rhs.row(b);
-          dest.row(b) += internal::conj(v) * m_rhs.row(a);
-        }
-        if (ProcessFirstHalf && i && (i.index()==j))
-          dest.row(j) += i.value() * m_rhs.row(j);
-      }
-    }
-
-  private:
-    SparseSelfAdjointTimeDenseProduct& operator=(const SparseSelfAdjointTimeDenseProduct&);
-};
-
-namespace internal {
-template<typename Lhs, typename Rhs, int UpLo>
-struct traits<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo> >
- : traits<ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
-{};
-}
-
-template<typename Lhs, typename Rhs, int UpLo>
-class DenseTimeSparseSelfAdjointProduct
-  : public ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
-{
-  public:
-    EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseSelfAdjointProduct)
-
-    DenseTimeSparseSelfAdjointProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
-    {}
-
-    template<typename Dest> void scaleAndAddTo(Dest& /*dest*/, const Scalar& /*alpha*/) const
-    {
-      // TODO
-    }
-
-  private:
-    DenseTimeSparseSelfAdjointProduct& operator=(const DenseTimeSparseSelfAdjointProduct&);
-};
-
-/***************************************************************************
-* Implementation of symmetric copies and permutations
-***************************************************************************/
-namespace internal {
-  
-template<typename MatrixType, int UpLo>
-struct traits<SparseSymmetricPermutationProduct<MatrixType,UpLo> > : traits<MatrixType> {
-};
-
-template<int UpLo,typename MatrixType,int DestOrder>
-void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
-{
-  typedef typename MatrixType::Index Index;
-  typedef typename MatrixType::Scalar Scalar;
-  typedef SparseMatrix<Scalar,DestOrder,Index> Dest;
-  typedef Matrix<Index,Dynamic,1> VectorI;
-  
-  Dest& dest(_dest.derived());
-  enum {
-    StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
-  };
-  
-  Index size = mat.rows();
-  VectorI count;
-  count.resize(size);
-  count.setZero();
-  dest.resize(size,size);
-  for(Index j = 0; j<size; ++j)
-  {
-    Index jp = perm ? perm[j] : j;
-    for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
-    {
-      Index i = it.index();
-      Index r = it.row();
-      Index c = it.col();
-      Index ip = perm ? perm[i] : i;
-      if(UpLo==(Upper|Lower))
-        count[StorageOrderMatch ? jp : ip]++;
-      else if(r==c)
-        count[ip]++;
-      else if(( UpLo==Lower && r>c) || ( UpLo==Upper && r<c))
-      {
-        count[ip]++;
-        count[jp]++;
-      }
-    }
-  }
-  Index nnz = count.sum();
-  
-  // reserve space
-  dest.resizeNonZeros(nnz);
-  dest.outerIndexPtr()[0] = 0;
-  for(Index j=0; j<size; ++j)
-    dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
-  for(Index j=0; j<size; ++j)
-    count[j] = dest.outerIndexPtr()[j];
-  
-  // copy data
-  for(Index j = 0; j<size; ++j)
-  {
-    for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
-    {
-      Index i = it.index();
-      Index r = it.row();
-      Index c = it.col();
-      
-      Index jp = perm ? perm[j] : j;
-      Index ip = perm ? perm[i] : i;
-      
-      if(UpLo==(Upper|Lower))
-      {
-        Index k = count[StorageOrderMatch ? jp : ip]++;
-        dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
-        dest.valuePtr()[k] = it.value();
-      }
-      else if(r==c)
-      {
-        Index k = count[ip]++;
-        dest.innerIndexPtr()[k] = ip;
-        dest.valuePtr()[k] = it.value();
-      }
-      else if(( (UpLo&Lower)==Lower && r>c) || ( (UpLo&Upper)==Upper && r<c))
-      {
-        if(!StorageOrderMatch)
-          std::swap(ip,jp);
-        Index k = count[jp]++;
-        dest.innerIndexPtr()[k] = ip;
-        dest.valuePtr()[k] = it.value();
-        k = count[ip]++;
-        dest.innerIndexPtr()[k] = jp;
-        dest.valuePtr()[k] = internal::conj(it.value());
-      }
-    }
-  }
-}
-
-template<int _SrcUpLo,int _DstUpLo,typename MatrixType,int DstOrder>
-void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
-{
-  typedef typename MatrixType::Index Index;
-  typedef typename MatrixType::Scalar Scalar;
-  SparseMatrix<Scalar,DstOrder,Index>& dest(_dest.derived());
-  typedef Matrix<Index,Dynamic,1> VectorI;
-  enum {
-    SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
-    StorageOrderMatch = int(SrcOrder) == int(DstOrder),
-    DstUpLo = DstOrder==RowMajor ? (_DstUpLo==Upper ? Lower : Upper) : _DstUpLo,
-    SrcUpLo = SrcOrder==RowMajor ? (_SrcUpLo==Upper ? Lower : Upper) : _SrcUpLo
-  };
-  
-  Index size = mat.rows();
-  VectorI count(size);
-  count.setZero();
-  dest.resize(size,size);
-  for(Index j = 0; j<size; ++j)
-  {
-    Index jp = perm ? perm[j] : j;
-    for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
-    {
-      Index i = it.index();
-      if((int(SrcUpLo)==int(Lower) && i<j) || (int(SrcUpLo)==int(Upper) && i>j))
-        continue;
-                  
-      Index ip = perm ? perm[i] : i;
-      count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
-    }
-  }
-  dest.outerIndexPtr()[0] = 0;
-  for(Index j=0; j<size; ++j)
-    dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
-  dest.resizeNonZeros(dest.outerIndexPtr()[size]);
-  for(Index j=0; j<size; ++j)
-    count[j] = dest.outerIndexPtr()[j];
-  
-  for(Index j = 0; j<size; ++j)
-  {
-    
-    for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
-    {
-      Index i = it.index();
-      if((int(SrcUpLo)==int(Lower) && i<j) || (int(SrcUpLo)==int(Upper) && i>j))
-        continue;
-                  
-      Index jp = perm ? perm[j] : j;
-      Index ip = perm? perm[i] : i;
-      
-      Index k = count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
-      dest.innerIndexPtr()[k] = int(DstUpLo)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
-      
-      if(!StorageOrderMatch) std::swap(ip,jp);
-      if( ((int(DstUpLo)==int(Lower) && ip<jp) || (int(DstUpLo)==int(Upper) && ip>jp)))
-        dest.valuePtr()[k] = conj(it.value());
-      else
-        dest.valuePtr()[k] = it.value();
-    }
-  }
-}
-
-}
-
-template<typename MatrixType,int UpLo>
-class SparseSymmetricPermutationProduct
-  : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,UpLo> >
-{
-  public:
-    typedef typename MatrixType::Scalar Scalar;
-    typedef typename MatrixType::Index Index;
-  protected:
-    typedef PermutationMatrix<Dynamic,Dynamic,Index> Perm;
-  public:
-    typedef Matrix<Index,Dynamic,1> VectorI;
-    typedef typename MatrixType::Nested MatrixTypeNested;
-    typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
-    
-    SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
-      : m_matrix(mat), m_perm(perm)
-    {}
-    
-    inline Index rows() const { return m_matrix.rows(); }
-    inline Index cols() const { return m_matrix.cols(); }
-    
-    template<typename DestScalar, int Options, typename DstIndex>
-    void evalTo(SparseMatrix<DestScalar,Options,DstIndex>& _dest) const
-    {
-      internal::permute_symm_to_fullsymm<UpLo>(m_matrix,_dest,m_perm.indices().data());
-    }
-    
-    template<typename DestType,unsigned int DestUpLo> void evalTo(SparseSelfAdjointView<DestType,DestUpLo>& dest) const
-    {
-      internal::permute_symm_to_symm<UpLo,DestUpLo>(m_matrix,dest.matrix(),m_perm.indices().data());
-    }
-    
-  protected:
-    MatrixTypeNested m_matrix;
-    const Perm& m_perm;
-
-};
-
-} // end namespace Eigen
-
-#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
deleted file mode 100644
index 70857c7b6..000000000
--- a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
+++ /dev/null
@@ -1,149 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
-#define EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
-
-namespace Eigen { 
-
-namespace internal {
-
-
-// perform a pseudo in-place sparse * sparse product assuming all matrices are col major
-template<typename Lhs, typename Rhs, typename ResultType>
-static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, const typename ResultType::RealScalar& tolerance)
-{
-  // return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res);
-
-  typedef typename remove_all<Lhs>::type::Scalar Scalar;
-  typedef typename remove_all<Lhs>::type::Index Index;
-
-  // make sure to call innerSize/outerSize since we fake the storage order.
-  Index rows = lhs.innerSize();
-  Index cols = rhs.outerSize();
-  //int size = lhs.outerSize();
-  eigen_assert(lhs.outerSize() == rhs.innerSize());
-
-  // allocate a temporary buffer
-  AmbiVector<Scalar,Index> tempVector(rows);
-
-  // estimate the number of non zero entries
-  // given a rhs column containing Y non zeros, we assume that the respective Y columns
-  // of the lhs differs in average of one non zeros, thus the number of non zeros for
-  // the product of a rhs column with the lhs is X+Y where X is the average number of non zero
-  // per column of the lhs.
-  // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
-  Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros();
-
-  // mimics a resizeByInnerOuter:
-  if(ResultType::IsRowMajor)
-    res.resize(cols, rows);
-  else
-    res.resize(rows, cols);
-
-  res.reserve(estimated_nnz_prod);
-  double ratioColRes = double(estimated_nnz_prod)/double(lhs.rows()*rhs.cols());
-  for (Index j=0; j<cols; ++j)
-  {
-    // FIXME:
-    //double ratioColRes = (double(rhs.innerVector(j).nonZeros()) + double(lhs.nonZeros())/double(lhs.cols()))/double(lhs.rows());
-    // let's do a more accurate determination of the nnz ratio for the current column j of res
-    tempVector.init(ratioColRes);
-    tempVector.setZero();
-    for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
-    {
-      // FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index())
-      tempVector.restart();
-      Scalar x = rhsIt.value();
-      for (typename Lhs::InnerIterator lhsIt(lhs, rhsIt.index()); lhsIt; ++lhsIt)
-      {
-        tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x;
-      }
-    }
-    res.startVec(j);
-    for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector,tolerance); it; ++it)
-      res.insertBackByOuterInner(j,it.index()) = it.value();
-  }
-  res.finalize();
-}
-
-template<typename Lhs, typename Rhs, typename ResultType,
-  int LhsStorageOrder = traits<Lhs>::Flags&RowMajorBit,
-  int RhsStorageOrder = traits<Rhs>::Flags&RowMajorBit,
-  int ResStorageOrder = traits<ResultType>::Flags&RowMajorBit>
-struct sparse_sparse_product_with_pruning_selector;
-
-template<typename Lhs, typename Rhs, typename ResultType>
-struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
-{
-  typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
-  typedef typename ResultType::RealScalar RealScalar;
-
-  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
-  {
-    typename remove_all<ResultType>::type _res(res.rows(), res.cols());
-    internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,ResultType>(lhs, rhs, _res, tolerance);
-    res.swap(_res);
-  }
-};
-
-template<typename Lhs, typename Rhs, typename ResultType>
-struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
-{
-  typedef typename ResultType::RealScalar RealScalar;
-  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
-  {
-    // we need a col-major matrix to hold the result
-    typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
-    SparseTemporaryType _res(res.rows(), res.cols());
-    internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,SparseTemporaryType>(lhs, rhs, _res, tolerance);
-    res = _res;
-  }
-};
-
-template<typename Lhs, typename Rhs, typename ResultType>
-struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
-{
-  typedef typename ResultType::RealScalar RealScalar;
-  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
-  {
-    // let's transpose the product to get a column x column product
-    typename remove_all<ResultType>::type _res(res.rows(), res.cols());
-    internal::sparse_sparse_product_with_pruning_impl<Rhs,Lhs,ResultType>(rhs, lhs, _res, tolerance);
-    res.swap(_res);
-  }
-};
-
-template<typename Lhs, typename Rhs, typename ResultType>
-struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
-{
-  typedef typename ResultType::RealScalar RealScalar;
-  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
-  {
-    typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
-    ColMajorMatrix colLhs(lhs);
-    ColMajorMatrix colRhs(rhs);
-    internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrix,ColMajorMatrix,ResultType>(colLhs, colRhs, res, tolerance);
-
-    // let's transpose the product to get a column x column product
-//     typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
-//     SparseTemporaryType _res(res.cols(), res.rows());
-//     sparse_sparse_product_with_pruning_impl<Rhs,Lhs,SparseTemporaryType>(rhs, lhs, _res);
-//     res = _res.transpose();
-  }
-};
-
-// NOTE the 2 others cases (col row *) must never occur since they are caught
-// by ProductReturnType which transforms it to (col col *) by evaluating rhs.
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseTranspose.h b/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseTranspose.h
deleted file mode 100644
index c78c20a2f..000000000
--- a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseTranspose.h
+++ /dev/null
@@ -1,61 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_SPARSETRANSPOSE_H
-#define EIGEN_SPARSETRANSPOSE_H
-
-namespace Eigen { 
-
-template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
-  : public SparseMatrixBase<Transpose<MatrixType> >
-{
-    typedef typename internal::remove_all<typename MatrixType::Nested>::type _MatrixTypeNested;
-  public:
-
-    EIGEN_SPARSE_PUBLIC_INTERFACE(Transpose<MatrixType> )
-
-    class InnerIterator;
-    class ReverseInnerIterator;
-
-    inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); }
-};
-
-// NOTE: VC10 trigger an ICE if don't put typename TransposeImpl<MatrixType,Sparse>:: in front of Index,
-// a typedef typename TransposeImpl<MatrixType,Sparse>::Index Index;
-// does not fix the issue.
-// An alternative is to define the nested class in the parent class itself.
-template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::InnerIterator
-  : public _MatrixTypeNested::InnerIterator
-{
-    typedef typename _MatrixTypeNested::InnerIterator Base;
-  public:
-
-    EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, typename TransposeImpl<MatrixType,Sparse>::Index outer)
-      : Base(trans.derived().nestedExpression(), outer)
-    {}
-    inline typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); }
-    inline typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); }
-};
-
-template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::ReverseInnerIterator
-  : public _MatrixTypeNested::ReverseInnerIterator
-{
-    typedef typename _MatrixTypeNested::ReverseInnerIterator Base;
-  public:
-
-    EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, typename TransposeImpl<MatrixType,Sparse>::Index outer)
-      : Base(xpr.derived().nestedExpression(), outer)
-    {}
-    inline typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); }
-    inline typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); }
-};
-
-} // end namespace Eigen
-
-#endif // EIGEN_SPARSETRANSPOSE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseVector.h b/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseVector.h
deleted file mode 100644
index a6a92d8aa..000000000
--- a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseVector.h
+++ /dev/null
@@ -1,398 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_SPARSEVECTOR_H
-#define EIGEN_SPARSEVECTOR_H
-
-namespace Eigen { 
-
-/** \ingroup SparseCore_Module
-  * \class SparseVector
-  *
-  * \brief a sparse vector class
-  *
-  * \tparam _Scalar the scalar type, i.e. the type of the coefficients
-  *
-  * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
-  *
-  * This class can be extended with the help of the plugin mechanism described on the page
-  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEVECTOR_PLUGIN.
-  */
-
-namespace internal {
-template<typename _Scalar, int _Options, typename _Index>
-struct traits<SparseVector<_Scalar, _Options, _Index> >
-{
-  typedef _Scalar Scalar;
-  typedef _Index Index;
-  typedef Sparse StorageKind;
-  typedef MatrixXpr XprKind;
-  enum {
-    IsColVector = (_Options & RowMajorBit) ? 0 : 1,
-
-    RowsAtCompileTime = IsColVector ? Dynamic : 1,
-    ColsAtCompileTime = IsColVector ? 1 : Dynamic,
-    MaxRowsAtCompileTime = RowsAtCompileTime,
-    MaxColsAtCompileTime = ColsAtCompileTime,
-    Flags = _Options | NestByRefBit | LvalueBit | (IsColVector ? 0 : RowMajorBit),
-    CoeffReadCost = NumTraits<Scalar>::ReadCost,
-    SupportedAccessPatterns = InnerRandomAccessPattern
-  };
-};
-}
-
-template<typename _Scalar, int _Options, typename _Index>
-class SparseVector
-  : public SparseMatrixBase<SparseVector<_Scalar, _Options, _Index> >
-{
-  public:
-    EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector)
-    EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
-    EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
-
-  protected:
-  public:
-
-    typedef SparseMatrixBase<SparseVector> SparseBase;
-    enum { IsColVector = internal::traits<SparseVector>::IsColVector };
-    
-    enum {
-      Options = _Options
-    };
-
-    internal::CompressedStorage<Scalar,Index> m_data;
-    Index m_size;
-
-    internal::CompressedStorage<Scalar,Index>& _data() { return m_data; }
-    internal::CompressedStorage<Scalar,Index>& _data() const { return m_data; }
-
-  public:
-
-    EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; }
-    EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; }
-    EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }
-    EIGEN_STRONG_INLINE Index outerSize() const { return 1; }
-
-    EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return &m_data.value(0); }
-    EIGEN_STRONG_INLINE Scalar* valuePtr() { return &m_data.value(0); }
-
-    EIGEN_STRONG_INLINE const Index* innerIndexPtr() const { return &m_data.index(0); }
-    EIGEN_STRONG_INLINE Index* innerIndexPtr() { return &m_data.index(0); }
-
-    inline Scalar coeff(Index row, Index col) const
-    {
-      eigen_assert((IsColVector ? col : row)==0);
-      return coeff(IsColVector ? row : col);
-    }
-    inline Scalar coeff(Index i) const { return m_data.at(i); }
-
-    inline Scalar& coeffRef(Index row, Index col)
-    {
-      eigen_assert((IsColVector ? col : row)==0);
-      return coeff(IsColVector ? row : col);
-    }
-
-    /** \returns a reference to the coefficient value at given index \a i
-      * This operation involes a log(rho*size) binary search. If the coefficient does not
-      * exist yet, then a sorted insertion into a sequential buffer is performed.
-      *
-      * This insertion might be very costly if the number of nonzeros above \a i is large.
-      */
-    inline Scalar& coeffRef(Index i)
-    {
-      return m_data.atWithInsertion(i);
-    }
-
-  public:
-
-    class InnerIterator;
-    class ReverseInnerIterator;
-
-    inline void setZero() { m_data.clear(); }
-
-    /** \returns the number of non zero coefficients */
-    inline Index nonZeros() const  { return static_cast<Index>(m_data.size()); }
-
-    inline void startVec(Index outer)
-    {
-      EIGEN_UNUSED_VARIABLE(outer);
-      eigen_assert(outer==0);
-    }
-
-    inline Scalar& insertBackByOuterInner(Index outer, Index inner)
-    {
-      EIGEN_UNUSED_VARIABLE(outer);
-      eigen_assert(outer==0);
-      return insertBack(inner);
-    }
-    inline Scalar& insertBack(Index i)
-    {
-      m_data.append(0, i);
-      return m_data.value(m_data.size()-1);
-    }
-
-    inline Scalar& insert(Index row, Index col)
-    {
-      Index inner = IsColVector ? row : col;
-      Index outer = IsColVector ? col : row;
-      eigen_assert(outer==0);
-      return insert(inner);
-    }
-    Scalar& insert(Index i)
-    {
-      Index startId = 0;
-      Index p = Index(m_data.size()) - 1;
-      // TODO smart realloc
-      m_data.resize(p+2,1);
-
-      while ( (p >= startId) && (m_data.index(p) > i) )
-      {
-        m_data.index(p+1) = m_data.index(p);
-        m_data.value(p+1) = m_data.value(p);
-        --p;
-      }
-      m_data.index(p+1) = i;
-      m_data.value(p+1) = 0;
-      return m_data.value(p+1);
-    }
-
-    /**
-      */
-    inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); }
-
-
-    inline void finalize() {}
-
-    void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
-    {
-      m_data.prune(reference,epsilon);
-    }
-
-    void resize(Index rows, Index cols)
-    {
-      eigen_assert(rows==1 || cols==1);
-      resize(IsColVector ? rows : cols);
-    }
-
-    void resize(Index newSize)
-    {
-      m_size = newSize;
-      m_data.clear();
-    }
-
-    void resizeNonZeros(Index size) { m_data.resize(size); }
-
-    inline SparseVector() : m_size(0) { resize(0); }
-
-    inline SparseVector(Index size) : m_size(0) { resize(size); }
-
-    inline SparseVector(Index rows, Index cols) : m_size(0) { resize(rows,cols); }
-
-    template<typename OtherDerived>
-    inline SparseVector(const SparseMatrixBase<OtherDerived>& other)
-      : m_size(0)
-    {
-      *this = other.derived();
-    }
-
-    inline SparseVector(const SparseVector& other)
-      : m_size(0)
-    {
-      *this = other.derived();
-    }
-
-    inline void swap(SparseVector& other)
-    {
-      std::swap(m_size, other.m_size);
-      m_data.swap(other.m_data);
-    }
-
-    inline SparseVector& operator=(const SparseVector& other)
-    {
-      if (other.isRValue())
-      {
-        swap(other.const_cast_derived());
-      }
-      else
-      {
-        resize(other.size());
-        m_data = other.m_data;
-      }
-      return *this;
-    }
-
-    template<typename OtherDerived>
-    inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other)
-    {
-      if (int(RowsAtCompileTime)!=int(OtherDerived::RowsAtCompileTime))
-        return assign(other.transpose());
-      else
-        return assign(other);
-    }
-
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    template<typename Lhs, typename Rhs>
-    inline SparseVector& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
-    {
-      return Base::operator=(product);
-    }
-    #endif
-
-    friend std::ostream & operator << (std::ostream & s, const SparseVector& m)
-    {
-      for (Index i=0; i<m.nonZeros(); ++i)
-        s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
-      s << std::endl;
-      return s;
-    }
-
-    /** Destructor */
-    inline ~SparseVector() {}
-
-    /** Overloaded for performance */
-    Scalar sum() const;
-
-  public:
-
-    /** \deprecated use setZero() and reserve() */
-    EIGEN_DEPRECATED void startFill(Index reserve)
-    {
-      setZero();
-      m_data.reserve(reserve);
-    }
-
-    /** \deprecated use insertBack(Index,Index) */
-    EIGEN_DEPRECATED Scalar& fill(Index r, Index c)
-    {
-      eigen_assert(r==0 || c==0);
-      return fill(IsColVector ? r : c);
-    }
-
-    /** \deprecated use insertBack(Index) */
-    EIGEN_DEPRECATED Scalar& fill(Index i)
-    {
-      m_data.append(0, i);
-      return m_data.value(m_data.size()-1);
-    }
-
-    /** \deprecated use insert(Index,Index) */
-    EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c)
-    {
-      eigen_assert(r==0 || c==0);
-      return fillrand(IsColVector ? r : c);
-    }
-
-    /** \deprecated use insert(Index) */
-    EIGEN_DEPRECATED Scalar& fillrand(Index i)
-    {
-      return insert(i);
-    }
-
-    /** \deprecated use finalize() */
-    EIGEN_DEPRECATED void endFill() {}
-    
-#   ifdef EIGEN_SPARSEVECTOR_PLUGIN
-#     include EIGEN_SPARSEVECTOR_PLUGIN
-#   endif
-
-protected:
-    template<typename OtherDerived>
-    EIGEN_DONT_INLINE SparseVector& assign(const SparseMatrixBase<OtherDerived>& _other)
-    {
-      const OtherDerived& other(_other.derived());
-      const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
-      if(needToTranspose)
-      {
-        Index size = other.size();
-        Index nnz = other.nonZeros();
-        resize(size);
-        reserve(nnz);
-        for(Index i=0; i<size; ++i)
-        {
-          typename OtherDerived::InnerIterator it(other, i);
-          if(it)
-              insert(i) = it.value();
-        }
-        return *this;
-      }
-      else
-      {
-        // there is no special optimization
-        return Base::operator=(other);
-      }
-    }
-};
-
-template<typename Scalar, int _Options, typename _Index>
-class SparseVector<Scalar,_Options,_Index>::InnerIterator
-{
-  public:
-    InnerIterator(const SparseVector& vec, Index outer=0)
-      : m_data(vec.m_data), m_id(0), m_end(static_cast<Index>(m_data.size()))
-    {
-      EIGEN_UNUSED_VARIABLE(outer);
-      eigen_assert(outer==0);
-    }
-
-    InnerIterator(const internal::CompressedStorage<Scalar,Index>& data)
-      : m_data(data), m_id(0), m_end(static_cast<Index>(m_data.size()))
-    {}
-
-    inline InnerIterator& operator++() { m_id++; return *this; }
-
-    inline Scalar value() const { return m_data.value(m_id); }
-    inline Scalar& valueRef() { return const_cast<Scalar&>(m_data.value(m_id)); }
-
-    inline Index index() const { return m_data.index(m_id); }
-    inline Index row() const { return IsColVector ? index() : 0; }
-    inline Index col() const { return IsColVector ? 0 : index(); }
-
-    inline operator bool() const { return (m_id < m_end); }
-
-  protected:
-    const internal::CompressedStorage<Scalar,Index>& m_data;
-    Index m_id;
-    const Index m_end;
-};
-
-template<typename Scalar, int _Options, typename _Index>
-class SparseVector<Scalar,_Options,_Index>::ReverseInnerIterator
-{
-  public:
-    ReverseInnerIterator(const SparseVector& vec, Index outer=0)
-      : m_data(vec.m_data), m_id(static_cast<Index>(m_data.size())), m_start(0)
-    {
-      EIGEN_UNUSED_VARIABLE(outer);
-      eigen_assert(outer==0);
-    }
-
-    ReverseInnerIterator(const internal::CompressedStorage<Scalar,Index>& data)
-      : m_data(data), m_id(static_cast<Index>(m_data.size())), m_start(0)
-    {}
-
-    inline ReverseInnerIterator& operator--() { m_id--; return *this; }
-
-    inline Scalar value() const { return m_data.value(m_id-1); }
-    inline Scalar& valueRef() { return const_cast<Scalar&>(m_data.value(m_id-1)); }
-
-    inline Index index() const { return m_data.index(m_id-1); }
-    inline Index row() const { return IsColVector ? index() : 0; }
-    inline Index col() const { return IsColVector ? 0 : index(); }
-
-    inline operator bool() const { return (m_id > m_start); }
-
-  protected:
-    const internal::CompressedStorage<Scalar,Index>& m_data;
-    Index m_id;
-    const Index m_start;
-};
-
-} // end namespace Eigen
-
-#endif // EIGEN_SPARSEVECTOR_H
diff --git a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseView.h b/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseView.h
deleted file mode 100644
index 4fd0cb3d8..000000000
--- a/resources/3rdparty/eigen/Eigen/src/SparseCore/SparseView.h
+++ /dev/null
@@ -1,98 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2010 Daniel Lowengrub <lowdanie@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_SPARSEVIEW_H
-#define EIGEN_SPARSEVIEW_H
-
-namespace Eigen { 
-
-namespace internal {
-
-template<typename MatrixType>
-struct traits<SparseView<MatrixType> > : traits<MatrixType>
-{
-  typedef int Index;
-  typedef Sparse StorageKind;
-  enum {
-    Flags = int(traits<MatrixType>::Flags) & (RowMajorBit)
-  };
-};
-
-} // end namespace internal
-
-template<typename MatrixType>
-class SparseView : public SparseMatrixBase<SparseView<MatrixType> >
-{
-  typedef typename MatrixType::Nested MatrixTypeNested;
-  typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
-public:
-  EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView)
-
-  SparseView(const MatrixType& mat, const Scalar& m_reference = Scalar(0),
-             typename NumTraits<Scalar>::Real m_epsilon = NumTraits<Scalar>::dummy_precision()) : 
-    m_matrix(mat), m_reference(m_reference), m_epsilon(m_epsilon) {}
-
-  class InnerIterator;
-
-  inline Index rows() const { return m_matrix.rows(); }
-  inline Index cols() const { return m_matrix.cols(); }
-
-  inline Index innerSize() const { return m_matrix.innerSize(); }
-  inline Index outerSize() const { return m_matrix.outerSize(); }
-
-protected:
-  MatrixTypeNested m_matrix;
-  Scalar m_reference;
-  typename NumTraits<Scalar>::Real m_epsilon;
-};
-
-template<typename MatrixType>
-class SparseView<MatrixType>::InnerIterator : public _MatrixTypeNested::InnerIterator
-{
-public:
-  typedef typename _MatrixTypeNested::InnerIterator IterBase;
-  InnerIterator(const SparseView& view, Index outer) :
-  IterBase(view.m_matrix, outer), m_view(view)
-  {
-    incrementToNonZero();
-  }
-
-  EIGEN_STRONG_INLINE InnerIterator& operator++()
-  {
-    IterBase::operator++();
-    incrementToNonZero();
-    return *this;
-  }
-
-  using IterBase::value;
-
-protected:
-  const SparseView& m_view;
-
-private:
-  void incrementToNonZero()
-  {
-    while((bool(*this)) && internal::isMuchSmallerThan(value(), m_view.m_reference, m_view.m_epsilon))
-    {
-      IterBase::operator++();
-    }
-  }
-};
-
-template<typename Derived>
-const SparseView<Derived> MatrixBase<Derived>::sparseView(const Scalar& m_reference,
-                                                          const typename NumTraits<Scalar>::Real& m_epsilon) const
-{
-  return SparseView<Derived>(derived(), m_reference, m_epsilon);
-}
-
-} // end namespace Eigen
-
-#endif
diff --git a/resources/3rdparty/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h b/resources/3rdparty/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h
deleted file mode 100644
index cd6c4b91f..000000000
--- a/resources/3rdparty/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h
+++ /dev/null
@@ -1,1026 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_SUPERLUSUPPORT_H
-#define EIGEN_SUPERLUSUPPORT_H
-
-namespace Eigen { 
-
-#define DECL_GSSVX(PREFIX,FLOATTYPE,KEYTYPE)		\
-    extern "C" {                                                                                          \
-      typedef struct { FLOATTYPE for_lu; FLOATTYPE total_needed; int expansions; } PREFIX##mem_usage_t;   \
-      extern void PREFIX##gssvx(superlu_options_t *, SuperMatrix *, int *, int *, int *,                  \
-                                char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *,           \
-                                void *, int, SuperMatrix *, SuperMatrix *,                                \
-                                FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, FLOATTYPE *,                       \
-                                PREFIX##mem_usage_t *, SuperLUStat_t *, int *);                           \
-    }                                                                                                     \
-    inline float SuperLU_gssvx(superlu_options_t *options, SuperMatrix *A,                                \
-         int *perm_c, int *perm_r, int *etree, char *equed,                                               \
-         FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L,                                                      \
-         SuperMatrix *U, void *work, int lwork,                                                           \
-         SuperMatrix *B, SuperMatrix *X,                                                                  \
-         FLOATTYPE *recip_pivot_growth,                                                                   \
-         FLOATTYPE *rcond, FLOATTYPE *ferr, FLOATTYPE *berr,                                              \
-         SuperLUStat_t *stats, int *info, KEYTYPE) {                                                      \
-    PREFIX##mem_usage_t mem_usage;                                                                        \
-    PREFIX##gssvx(options, A, perm_c, perm_r, etree, equed, R, C, L,                                      \
-         U, work, lwork, B, X, recip_pivot_growth, rcond,                                                 \
-         ferr, berr, &mem_usage, stats, info);                                                            \
-    return mem_usage.for_lu; /* bytes used by the factor storage */                                       \
-  }
-
-DECL_GSSVX(s,float,float)
-DECL_GSSVX(c,float,std::complex<float>)
-DECL_GSSVX(d,double,double)
-DECL_GSSVX(z,double,std::complex<double>)
-
-#ifdef MILU_ALPHA
-#define EIGEN_SUPERLU_HAS_ILU
-#endif
-
-#ifdef EIGEN_SUPERLU_HAS_ILU
-
-// similarly for the incomplete factorization using gsisx
-#define DECL_GSISX(PREFIX,FLOATTYPE,KEYTYPE)                                                    \
-    extern "C" {                                                                                \
-      extern void PREFIX##gsisx(superlu_options_t *, SuperMatrix *, int *, int *, int *,        \
-                         char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *,        \
-                         void *, int, SuperMatrix *, SuperMatrix *, FLOATTYPE *, FLOATTYPE *,   \
-                         PREFIX##mem_usage_t *, SuperLUStat_t *, int *);                        \
-    }                                                                                           \
-    inline float SuperLU_gsisx(superlu_options_t *options, SuperMatrix *A,                      \
-         int *perm_c, int *perm_r, int *etree, char *equed,                                     \
-         FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L,                                            \
-         SuperMatrix *U, void *work, int lwork,                                                 \
-         SuperMatrix *B, SuperMatrix *X,                                                        \
-         FLOATTYPE *recip_pivot_growth,                                                         \
-         FLOATTYPE *rcond,                                                                      \
-         SuperLUStat_t *stats, int *info, KEYTYPE) {                                            \
-    PREFIX##mem_usage_t mem_usage;                                                              \
-    PREFIX##gsisx(options, A, perm_c, perm_r, etree, equed, R, C, L,                            \
-         U, work, lwork, B, X, recip_pivot_growth, rcond,                                       \
-         &mem_usage, stats, info);                                                              \
-    return mem_usage.for_lu; /* bytes used by the factor storage */                             \
-  }
-
-DECL_GSISX(s,float,float)
-DECL_GSISX(c,float,std::complex<float>)
-DECL_GSISX(d,double,double)
-DECL_GSISX(z,double,std::complex<double>)
-
-#endif
-
-template<typename MatrixType>
-struct SluMatrixMapHelper;
-
-/** \internal
-  *
-  * A wrapper class for SuperLU matrices. It supports only compressed sparse matrices
-  * and dense matrices. Supernodal and other fancy format are not supported by this wrapper.
-  *
-  * This wrapper class mainly aims to avoids the need of dynamic allocation of the storage structure.
-  */
-struct SluMatrix : SuperMatrix
-{
-  SluMatrix()
-  {
-    Store = &storage;
-  }
-
-  SluMatrix(const SluMatrix& other)
-    : SuperMatrix(other)
-  {
-    Store = &storage;
-    storage = other.storage;
-  }
-
-  SluMatrix& operator=(const SluMatrix& other)
-  {
-    SuperMatrix::operator=(static_cast<const SuperMatrix&>(other));
-    Store = &storage;
-    storage = other.storage;
-    return *this;
-  }
-
-  struct
-  {
-    union {int nnz;int lda;};
-    void *values;
-    int *innerInd;
-    int *outerInd;
-  } storage;
-
-  void setStorageType(Stype_t t)
-  {
-    Stype = t;
-    if (t==SLU_NC || t==SLU_NR || t==SLU_DN)
-      Store = &storage;
-    else
-    {
-      eigen_assert(false && "storage type not supported");
-      Store = 0;
-    }
-  }
-
-  template<typename Scalar>
-  void setScalarType()
-  {
-    if (internal::is_same<Scalar,float>::value)
-      Dtype = SLU_S;
-    else if (internal::is_same<Scalar,double>::value)
-      Dtype = SLU_D;
-    else if (internal::is_same<Scalar,std::complex<float> >::value)
-      Dtype = SLU_C;
-    else if (internal::is_same<Scalar,std::complex<double> >::value)
-      Dtype = SLU_Z;
-    else
-    {
-      eigen_assert(false && "Scalar type not supported by SuperLU");
-    }
-  }
-
-  template<typename MatrixType>
-  static SluMatrix Map(MatrixBase<MatrixType>& _mat)
-  {
-    MatrixType& mat(_mat.derived());
-    eigen_assert( ((MatrixType::Flags&RowMajorBit)!=RowMajorBit) && "row-major dense matrices are not supported by SuperLU");
-    SluMatrix res;
-    res.setStorageType(SLU_DN);
-    res.setScalarType<typename MatrixType::Scalar>();
-    res.Mtype     = SLU_GE;
-
-    res.nrow      = mat.rows();
-    res.ncol      = mat.cols();
-
-    res.storage.lda       = MatrixType::IsVectorAtCompileTime ? mat.size() : mat.outerStride();
-    res.storage.values    = mat.data();
-    return res;
-  }
-
-  template<typename MatrixType>
-  static SluMatrix Map(SparseMatrixBase<MatrixType>& mat)
-  {
-    SluMatrix res;
-    if ((MatrixType::Flags&RowMajorBit)==RowMajorBit)
-    {
-      res.setStorageType(SLU_NR);
-      res.nrow      = mat.cols();
-      res.ncol      = mat.rows();
-    }
-    else
-    {
-      res.setStorageType(SLU_NC);
-      res.nrow      = mat.rows();
-      res.ncol      = mat.cols();
-    }
-
-    res.Mtype       = SLU_GE;
-
-    res.storage.nnz       = mat.nonZeros();
-    res.storage.values    = mat.derived().valuePtr();
-    res.storage.innerInd  = mat.derived().innerIndexPtr();
-    res.storage.outerInd  = mat.derived().outerIndexPtr();
-
-    res.setScalarType<typename MatrixType::Scalar>();
-
-    // FIXME the following is not very accurate
-    if (MatrixType::Flags & Upper)
-      res.Mtype = SLU_TRU;
-    if (MatrixType::Flags & Lower)
-      res.Mtype = SLU_TRL;
-
-    eigen_assert(((MatrixType::Flags & SelfAdjoint)==0) && "SelfAdjoint matrix shape not supported by SuperLU");
-
-    return res;
-  }
-};
-
-template<typename Scalar, int Rows, int Cols, int Options, int MRows, int MCols>
-struct SluMatrixMapHelper<Matrix<Scalar,Rows,Cols,Options,MRows,MCols> >
-{
-  typedef Matrix<Scalar,Rows,Cols,Options,MRows,MCols> MatrixType;
-  static void run(MatrixType& mat, SluMatrix& res)
-  {
-    eigen_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU");
-    res.setStorageType(SLU_DN);
-    res.setScalarType<Scalar>();
-    res.Mtype     = SLU_GE;
-
-    res.nrow      = mat.rows();
-    res.ncol      = mat.cols();
-
-    res.storage.lda       = mat.outerStride();
-    res.storage.values    = mat.data();
-  }
-};
-
-template<typename Derived>
-struct SluMatrixMapHelper<SparseMatrixBase<Derived> >
-{
-  typedef Derived MatrixType;
-  static void run(MatrixType& mat, SluMatrix& res)
-  {
-    if ((MatrixType::Flags&RowMajorBit)==RowMajorBit)
-    {
-      res.setStorageType(SLU_NR);
-      res.nrow      = mat.cols();
-      res.ncol      = mat.rows();
-    }
-    else
-    {
-      res.setStorageType(SLU_NC);
-      res.nrow      = mat.rows();
-      res.ncol      = mat.cols();
-    }
-
-    res.Mtype       = SLU_GE;
-
-    res.storage.nnz       = mat.nonZeros();
-    res.storage.values    = mat.valuePtr();
-    res.storage.innerInd  = mat.innerIndexPtr();
-    res.storage.outerInd  = mat.outerIndexPtr();
-
-    res.setScalarType<typename MatrixType::Scalar>();
-
-    // FIXME the following is not very accurate
-    if (MatrixType::Flags & Upper)
-      res.Mtype = SLU_TRU;
-    if (MatrixType::Flags & Lower)
-      res.Mtype = SLU_TRL;
-
-    eigen_assert(((MatrixType::Flags & SelfAdjoint)==0) && "SelfAdjoint matrix shape not supported by SuperLU");
-  }
-};
-
-namespace internal {
-
-template<typename MatrixType>
-SluMatrix asSluMatrix(MatrixType& mat)
-{
-  return SluMatrix::Map(mat);
-}
-
-/** View a Super LU matrix as an Eigen expression */
-template<typename Scalar, int Flags, typename Index>
-MappedSparseMatrix<Scalar,Flags,Index> map_superlu(SluMatrix& sluMat)
-{
-  eigen_assert((Flags&RowMajor)==RowMajor && sluMat.Stype == SLU_NR
-         || (Flags&ColMajor)==ColMajor && sluMat.Stype == SLU_NC);
-
-  Index outerSize = (Flags&RowMajor)==RowMajor ? sluMat.ncol : sluMat.nrow;
-
-  return MappedSparseMatrix<Scalar,Flags,Index>(
-    sluMat.nrow, sluMat.ncol, sluMat.storage.outerInd[outerSize],
-    sluMat.storage.outerInd, sluMat.storage.innerInd, reinterpret_cast<Scalar*>(sluMat.storage.values) );
-}
-
-} // end namespace internal
-
-/** \ingroup SuperLUSupport_Module
-  * \class SuperLUBase
-  * \brief The base class for the direct and incomplete LU factorization of SuperLU
-  */
-template<typename _MatrixType, typename Derived>
-class SuperLUBase : internal::noncopyable
-{
-  public:
-    typedef _MatrixType MatrixType;
-    typedef typename MatrixType::Scalar Scalar;
-    typedef typename MatrixType::RealScalar RealScalar;
-    typedef typename MatrixType::Index Index;
-    typedef Matrix<Scalar,Dynamic,1> Vector;
-    typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
-    typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;    
-    typedef SparseMatrix<Scalar> LUMatrixType;
-
-  public:
-
-    SuperLUBase() {}
-
-    ~SuperLUBase()
-    {
-      clearFactors();
-    }
-    
-    Derived& derived() { return *static_cast<Derived*>(this); }
-    const Derived& derived() const { return *static_cast<const Derived*>(this); }
-    
-    inline Index rows() const { return m_matrix.rows(); }
-    inline Index cols() const { return m_matrix.cols(); }
-    
-    /** \returns a reference to the Super LU option object to configure the  Super LU algorithms. */
-    inline superlu_options_t& options() { return m_sluOptions; }
-    
-    /** \brief Reports whether previous computation was successful.
-      *
-      * \returns \c Success if computation was succesful,
-      *          \c NumericalIssue if the matrix.appears to be negative.
-      */
-    ComputationInfo info() const
-    {
-      eigen_assert(m_isInitialized && "Decomposition is not initialized.");
-      return m_info;
-    }
-
-    /** Computes the sparse Cholesky decomposition of \a matrix */
-    void compute(const MatrixType& matrix)
-    {
-      derived().analyzePattern(matrix);
-      derived().factorize(matrix);
-    }
-    
-    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
-      *
-      * \sa compute()
-      */
-    template<typename Rhs>
-    inline const internal::solve_retval<SuperLUBase, Rhs> solve(const MatrixBase<Rhs>& b) const
-    {
-      eigen_assert(m_isInitialized && "SuperLU is not initialized.");
-      eigen_assert(rows()==b.rows()
-                && "SuperLU::solve(): invalid number of rows of the right hand side matrix b");
-      return internal::solve_retval<SuperLUBase, Rhs>(*this, b.derived());
-    }
-    
-    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
-      *
-      * \sa compute()
-      */
-//     template<typename Rhs>
-//     inline const internal::sparse_solve_retval<SuperLU, Rhs> solve(const SparseMatrixBase<Rhs>& b) const
-//     {
-//       eigen_assert(m_isInitialized && "SuperLU is not initialized.");
-//       eigen_assert(rows()==b.rows()
-//                 && "SuperLU::solve(): invalid number of rows of the right hand side matrix b");
-//       return internal::sparse_solve_retval<SuperLU, Rhs>(*this, b.derived());
-//     }
-    
-    /** Performs a symbolic decomposition on the sparcity of \a matrix.
-      *
-      * This function is particularly useful when solving for several problems having the same structure.
-      * 
-      * \sa factorize()
-      */
-    void analyzePattern(const MatrixType& /*matrix*/)
-    {
-      m_isInitialized = true;
-      m_info = Success;
-      m_analysisIsOk = true;
-      m_factorizationIsOk = false;
-    }
-    
-    template<typename Stream>
-    void dumpMemory(Stream& s)
-    {}
-    
-  protected:
-    
-    void initFactorization(const MatrixType& a)
-    {
-      set_default_options(&this->m_sluOptions);
-      
-      const int size = a.rows();
-      m_matrix = a;
-
-      m_sluA = internal::asSluMatrix(m_matrix);
-      clearFactors();
-
-      m_p.resize(size);
-      m_q.resize(size);
-      m_sluRscale.resize(size);
-      m_sluCscale.resize(size);
-      m_sluEtree.resize(size);
-
-      // set empty B and X
-      m_sluB.setStorageType(SLU_DN);
-      m_sluB.setScalarType<Scalar>();
-      m_sluB.Mtype          = SLU_GE;
-      m_sluB.storage.values = 0;
-      m_sluB.nrow           = 0;
-      m_sluB.ncol           = 0;
-      m_sluB.storage.lda    = size;
-      m_sluX                = m_sluB;
-      
-      m_extractedDataAreDirty = true;
-    }
-    
-    void init()
-    {
-      m_info = InvalidInput;
-      m_isInitialized = false;
-      m_sluL.Store = 0;
-      m_sluU.Store = 0;
-    }
-    
-    void extractData() const;
-
-    void clearFactors()
-    {
-      if(m_sluL.Store)
-        Destroy_SuperNode_Matrix(&m_sluL);
-      if(m_sluU.Store)
-        Destroy_CompCol_Matrix(&m_sluU);
-
-      m_sluL.Store = 0;
-      m_sluU.Store = 0;
-
-      memset(&m_sluL,0,sizeof m_sluL);
-      memset(&m_sluU,0,sizeof m_sluU);
-    }
-
-    // cached data to reduce reallocation, etc.
-    mutable LUMatrixType m_l;
-    mutable LUMatrixType m_u;
-    mutable IntColVectorType m_p;
-    mutable IntRowVectorType m_q;
-
-    mutable LUMatrixType m_matrix;  // copy of the factorized matrix
-    mutable SluMatrix m_sluA;
-    mutable SuperMatrix m_sluL, m_sluU;
-    mutable SluMatrix m_sluB, m_sluX;
-    mutable SuperLUStat_t m_sluStat;
-    mutable superlu_options_t m_sluOptions;
-    mutable std::vector<int> m_sluEtree;
-    mutable Matrix<RealScalar,Dynamic,1> m_sluRscale, m_sluCscale;
-    mutable Matrix<RealScalar,Dynamic,1> m_sluFerr, m_sluBerr;
-    mutable char m_sluEqued;
-
-    mutable ComputationInfo m_info;
-    bool m_isInitialized;
-    int m_factorizationIsOk;
-    int m_analysisIsOk;
-    mutable bool m_extractedDataAreDirty;
-    
-  private:
-    SuperLUBase(SuperLUBase& ) { }
-};
-
-
-/** \ingroup SuperLUSupport_Module
-  * \class SuperLU
-  * \brief A sparse direct LU factorization and solver based on the SuperLU library
-  *
-  * This class allows to solve for A.X = B sparse linear problems via a direct LU factorization
-  * using the SuperLU library. The sparse matrix A must be squared and invertible. The vectors or matrices
-  * X and B can be either dense or sparse.
-  *
-  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
-  *
-  * \sa \ref TutorialSparseDirectSolvers
-  */
-template<typename _MatrixType>
-class SuperLU : public SuperLUBase<_MatrixType,SuperLU<_MatrixType> >
-{
-  public:
-    typedef SuperLUBase<_MatrixType,SuperLU> Base;
-    typedef _MatrixType MatrixType;
-    typedef typename Base::Scalar Scalar;
-    typedef typename Base::RealScalar RealScalar;
-    typedef typename Base::Index Index;
-    typedef typename Base::IntRowVectorType IntRowVectorType;
-    typedef typename Base::IntColVectorType IntColVectorType;    
-    typedef typename Base::LUMatrixType LUMatrixType;
-    typedef TriangularView<LUMatrixType, Lower|UnitDiag>  LMatrixType;
-    typedef TriangularView<LUMatrixType,  Upper>           UMatrixType;
-
-  public:
-
-    SuperLU() : Base() { init(); }
-
-    SuperLU(const MatrixType& matrix) : Base()
-    {
-      init();
-      Base::compute(matrix);
-    }
-
-    ~SuperLU()
-    {
-    }
-    
-    /** Performs a symbolic decomposition on the sparcity of \a matrix.
-      *
-      * This function is particularly useful when solving for several problems having the same structure.
-      * 
-      * \sa factorize()
-      */
-    void analyzePattern(const MatrixType& matrix)
-    {
-      m_info = InvalidInput;
-      m_isInitialized = false;
-      Base::analyzePattern(matrix);
-    }
-    
-    /** Performs a numeric decomposition of \a matrix
-      *
-      * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
-      *
-      * \sa analyzePattern()
-      */
-    void factorize(const MatrixType& matrix);
-    
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    /** \internal */
-    template<typename Rhs,typename Dest>
-    void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const;
-    #endif // EIGEN_PARSED_BY_DOXYGEN
-    
-    inline const LMatrixType& matrixL() const
-    {
-      if (m_extractedDataAreDirty) this->extractData();
-      return m_l;
-    }
-
-    inline const UMatrixType& matrixU() const
-    {
-      if (m_extractedDataAreDirty) this->extractData();
-      return m_u;
-    }
-
-    inline const IntColVectorType& permutationP() const
-    {
-      if (m_extractedDataAreDirty) this->extractData();
-      return m_p;
-    }
-
-    inline const IntRowVectorType& permutationQ() const
-    {
-      if (m_extractedDataAreDirty) this->extractData();
-      return m_q;
-    }
-    
-    Scalar determinant() const;
-    
-  protected:
-    
-    using Base::m_matrix;
-    using Base::m_sluOptions;
-    using Base::m_sluA;
-    using Base::m_sluB;
-    using Base::m_sluX;
-    using Base::m_p;
-    using Base::m_q;
-    using Base::m_sluEtree;
-    using Base::m_sluEqued;
-    using Base::m_sluRscale;
-    using Base::m_sluCscale;
-    using Base::m_sluL;
-    using Base::m_sluU;
-    using Base::m_sluStat;
-    using Base::m_sluFerr;
-    using Base::m_sluBerr;
-    using Base::m_l;
-    using Base::m_u;
-    
-    using Base::m_analysisIsOk;
-    using Base::m_factorizationIsOk;
-    using Base::m_extractedDataAreDirty;
-    using Base::m_isInitialized;
-    using Base::m_info;
-    
-    void init()
-    {
-      Base::init();
-      
-      set_default_options(&this->m_sluOptions);
-      m_sluOptions.PrintStat        = NO;
-      m_sluOptions.ConditionNumber  = NO;
-      m_sluOptions.Trans            = NOTRANS;
-      m_sluOptions.ColPerm          = COLAMD;
-    }
-    
-    
-  private:
-    SuperLU(SuperLU& ) { }
-};
-
-template<typename MatrixType>
-void SuperLU<MatrixType>::factorize(const MatrixType& a)
-{
-  eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
-  if(!m_analysisIsOk)
-  {
-    m_info = InvalidInput;
-    return;
-  }
-  
-  this->initFactorization(a);
-  
-  m_sluOptions.ColPerm = COLAMD;
-  int info = 0;
-  RealScalar recip_pivot_growth, rcond;
-  RealScalar ferr, berr;
-
-  StatInit(&m_sluStat);
-  SuperLU_gssvx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0],
-                &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0],
-                &m_sluL, &m_sluU,
-                NULL, 0,
-                &m_sluB, &m_sluX,
-                &recip_pivot_growth, &rcond,
-                &ferr, &berr,
-                &m_sluStat, &info, Scalar());
-  StatFree(&m_sluStat);
-
-  m_extractedDataAreDirty = true;
-
-  // FIXME how to better check for errors ???
-  m_info = info == 0 ? Success : NumericalIssue;
-  m_factorizationIsOk = true;
-}
-
-template<typename MatrixType>
-template<typename Rhs,typename Dest>
-void SuperLU<MatrixType>::_solve(const MatrixBase<Rhs> &b, MatrixBase<Dest>& x) const
-{
-  eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()");
-
-  const int size = m_matrix.rows();
-  const int rhsCols = b.cols();
-  eigen_assert(size==b.rows());
-
-  m_sluOptions.Trans = NOTRANS;
-  m_sluOptions.Fact = FACTORED;
-  m_sluOptions.IterRefine = NOREFINE;
-  
-
-  m_sluFerr.resize(rhsCols);
-  m_sluBerr.resize(rhsCols);
-  m_sluB = SluMatrix::Map(b.const_cast_derived());
-  m_sluX = SluMatrix::Map(x.derived());
-  
-  typename Rhs::PlainObject b_cpy;
-  if(m_sluEqued!='N')
-  {
-    b_cpy = b;
-    m_sluB = SluMatrix::Map(b_cpy.const_cast_derived());  
-  }
-
-  StatInit(&m_sluStat);
-  int info = 0;
-  RealScalar recip_pivot_growth, rcond;
-  SuperLU_gssvx(&m_sluOptions, &m_sluA,
-                m_q.data(), m_p.data(),
-                &m_sluEtree[0], &m_sluEqued,
-                &m_sluRscale[0], &m_sluCscale[0],
-                &m_sluL, &m_sluU,
-                NULL, 0,
-                &m_sluB, &m_sluX,
-                &recip_pivot_growth, &rcond,
-                &m_sluFerr[0], &m_sluBerr[0],
-                &m_sluStat, &info, Scalar());
-  StatFree(&m_sluStat);
-  m_info = info==0 ? Success : NumericalIssue;
-}
-
-// the code of this extractData() function has been adapted from the SuperLU's Matlab support code,
-//
-//  Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
-//
-//  THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
-//  EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
-//
-template<typename MatrixType, typename Derived>
-void SuperLUBase<MatrixType,Derived>::extractData() const
-{
-  eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for extracting factors, you must first call either compute() or analyzePattern()/factorize()");
-  if (m_extractedDataAreDirty)
-  {
-    int         upper;
-    int         fsupc, istart, nsupr;
-    int         lastl = 0, lastu = 0;
-    SCformat    *Lstore = static_cast<SCformat*>(m_sluL.Store);
-    NCformat    *Ustore = static_cast<NCformat*>(m_sluU.Store);
-    Scalar      *SNptr;
-
-    const int size = m_matrix.rows();
-    m_l.resize(size,size);
-    m_l.resizeNonZeros(Lstore->nnz);
-    m_u.resize(size,size);
-    m_u.resizeNonZeros(Ustore->nnz);
-
-    int* Lcol = m_l.outerIndexPtr();
-    int* Lrow = m_l.innerIndexPtr();
-    Scalar* Lval = m_l.valuePtr();
-
-    int* Ucol = m_u.outerIndexPtr();
-    int* Urow = m_u.innerIndexPtr();
-    Scalar* Uval = m_u.valuePtr();
-
-    Ucol[0] = 0;
-    Ucol[0] = 0;
-
-    /* for each supernode */
-    for (int k = 0; k <= Lstore->nsuper; ++k)
-    {
-      fsupc   = L_FST_SUPC(k);
-      istart  = L_SUB_START(fsupc);
-      nsupr   = L_SUB_START(fsupc+1) - istart;
-      upper   = 1;
-
-      /* for each column in the supernode */
-      for (int j = fsupc; j < L_FST_SUPC(k+1); ++j)
-      {
-        SNptr = &((Scalar*)Lstore->nzval)[L_NZ_START(j)];
-
-        /* Extract U */
-        for (int i = U_NZ_START(j); i < U_NZ_START(j+1); ++i)
-        {
-          Uval[lastu] = ((Scalar*)Ustore->nzval)[i];
-          /* Matlab doesn't like explicit zero. */
-          if (Uval[lastu] != 0.0)
-            Urow[lastu++] = U_SUB(i);
-        }
-        for (int i = 0; i < upper; ++i)
-        {
-          /* upper triangle in the supernode */
-          Uval[lastu] = SNptr[i];
-          /* Matlab doesn't like explicit zero. */
-          if (Uval[lastu] != 0.0)
-            Urow[lastu++] = L_SUB(istart+i);
-        }
-        Ucol[j+1] = lastu;
-
-        /* Extract L */
-        Lval[lastl] = 1.0; /* unit diagonal */
-        Lrow[lastl++] = L_SUB(istart + upper - 1);
-        for (int i = upper; i < nsupr; ++i)
-        {
-          Lval[lastl] = SNptr[i];
-          /* Matlab doesn't like explicit zero. */
-          if (Lval[lastl] != 0.0)
-            Lrow[lastl++] = L_SUB(istart+i);
-        }
-        Lcol[j+1] = lastl;
-
-        ++upper;
-      } /* for j ... */
-
-    } /* for k ... */
-
-    // squeeze the matrices :
-    m_l.resizeNonZeros(lastl);
-    m_u.resizeNonZeros(lastu);
-
-    m_extractedDataAreDirty = false;
-  }
-}
-
-template<typename MatrixType>
-typename SuperLU<MatrixType>::Scalar SuperLU<MatrixType>::determinant() const
-{
-  eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for computing the determinant, you must first call either compute() or analyzePattern()/factorize()");
-  
-  if (m_extractedDataAreDirty)
-    this->extractData();
-
-  Scalar det = Scalar(1);
-  for (int j=0; j<m_u.cols(); ++j)
-  {
-    if (m_u.outerIndexPtr()[j+1]-m_u.outerIndexPtr()[j] > 0)
-    {
-      int lastId = m_u.outerIndexPtr()[j+1]-1;
-      eigen_assert(m_u.innerIndexPtr()[lastId]<=j);
-      if (m_u.innerIndexPtr()[lastId]==j)
-        det *= m_u.valuePtr()[lastId];
-    }
-  }
-  if(m_sluEqued!='N')
-    return det/m_sluRscale.prod()/m_sluCscale.prod();
-  else
-    return det;
-}
-
-#ifdef EIGEN_PARSED_BY_DOXYGEN
-#define EIGEN_SUPERLU_HAS_ILU
-#endif
-
-#ifdef EIGEN_SUPERLU_HAS_ILU
-
-/** \ingroup SuperLUSupport_Module
-  * \class SuperILU
-  * \brief A sparse direct \b incomplete LU factorization and solver based on the SuperLU library
-  *
-  * This class allows to solve for an approximate solution of A.X = B sparse linear problems via an incomplete LU factorization
-  * using the SuperLU library. This class is aimed to be used as a preconditioner of the iterative linear solvers.
-  *
-  * \warning This class requires SuperLU 4 or later.
-  *
-  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
-  *
-  * \sa \ref TutorialSparseDirectSolvers, class ConjugateGradient, class BiCGSTAB
-  */
-
-template<typename _MatrixType>
-class SuperILU : public SuperLUBase<_MatrixType,SuperILU<_MatrixType> >
-{
-  public:
-    typedef SuperLUBase<_MatrixType,SuperILU> Base;
-    typedef _MatrixType MatrixType;
-    typedef typename Base::Scalar Scalar;
-    typedef typename Base::RealScalar RealScalar;
-    typedef typename Base::Index Index;
-
-  public:
-
-    SuperILU() : Base() { init(); }
-
-    SuperILU(const MatrixType& matrix) : Base()
-    {
-      init();
-      Base::compute(matrix);
-    }
-
-    ~SuperILU()
-    {
-    }
-    
-    /** Performs a symbolic decomposition on the sparcity of \a matrix.
-      *
-      * This function is particularly useful when solving for several problems having the same structure.
-      * 
-      * \sa factorize()
-      */
-    void analyzePattern(const MatrixType& matrix)
-    {
-      Base::analyzePattern(matrix);
-    }
-    
-    /** Performs a numeric decomposition of \a matrix
-      *
-      * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
-      *
-      * \sa analyzePattern()
-      */
-    void factorize(const MatrixType& matrix);
-    
-    #ifndef EIGEN_PARSED_BY_DOXYGEN
-    /** \internal */
-    template<typename Rhs,typename Dest>
-    void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const;
-    #endif // EIGEN_PARSED_BY_DOXYGEN
-    
-  protected:
-    
-    using Base::m_matrix;
-    using Base::m_sluOptions;
-    using Base::m_sluA;
-    using Base::m_sluB;
-    using Base::m_sluX;
-    using Base::m_p;
-    using Base::m_q;
-    using Base::m_sluEtree;
-    using Base::m_sluEqued;
-    using Base::m_sluRscale;
-    using Base::m_sluCscale;
-    using Base::m_sluL;
-    using Base::m_sluU;
-    using Base::m_sluStat;
-    using Base::m_sluFerr;
-    using Base::m_sluBerr;
-    using Base::m_l;
-    using Base::m_u;
-    
-    using Base::m_analysisIsOk;
-    using Base::m_factorizationIsOk;
-    using Base::m_extractedDataAreDirty;
-    using Base::m_isInitialized;
-    using Base::m_info;
-
-    void init()
-    {
-      Base::init();
-      
-      ilu_set_default_options(&m_sluOptions);
-      m_sluOptions.PrintStat        = NO;
-      m_sluOptions.ConditionNumber  = NO;
-      m_sluOptions.Trans            = NOTRANS;
-      m_sluOptions.ColPerm          = MMD_AT_PLUS_A;
-      
-      // no attempt to preserve column sum
-      m_sluOptions.ILU_MILU = SILU;
-      // only basic ILU(k) support -- no direct control over memory consumption
-      // better to use ILU_DropRule = DROP_BASIC | DROP_AREA
-      // and set ILU_FillFactor to max memory growth
-      m_sluOptions.ILU_DropRule = DROP_BASIC;
-      m_sluOptions.ILU_DropTol = NumTraits<Scalar>::dummy_precision()*10;
-    }
-    
-  private:
-    SuperILU(SuperILU& ) { }
-};
-
-template<typename MatrixType>
-void SuperILU<MatrixType>::factorize(const MatrixType& a)
-{
-  eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
-  if(!m_analysisIsOk)
-  {
-    m_info = InvalidInput;
-    return;
-  }
-  
-  this->initFactorization(a);
-
-  int info = 0;
-  RealScalar recip_pivot_growth, rcond;
-
-  StatInit(&m_sluStat);
-  SuperLU_gsisx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0],
-                &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0],
-                &m_sluL, &m_sluU,
-                NULL, 0,
-                &m_sluB, &m_sluX,
-                &recip_pivot_growth, &rcond,
-                &m_sluStat, &info, Scalar());
-  StatFree(&m_sluStat);
-
-  // FIXME how to better check for errors ???
-  m_info = info == 0 ? Success : NumericalIssue;
-  m_factorizationIsOk = true;
-}
-
-template<typename MatrixType>
-template<typename Rhs,typename Dest>
-void SuperILU<MatrixType>::_solve(const MatrixBase<Rhs> &b, MatrixBase<Dest>& x) const
-{
-  eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()");
-
-  const int size = m_matrix.rows();
-  const int rhsCols = b.cols();
-  eigen_assert(size==b.rows());
-
-  m_sluOptions.Trans = NOTRANS;
-  m_sluOptions.Fact = FACTORED;
-  m_sluOptions.IterRefine = NOREFINE;
-
-  m_sluFerr.resize(rhsCols);
-  m_sluBerr.resize(rhsCols);
-  m_sluB = SluMatrix::Map(b.const_cast_derived());
-  m_sluX = SluMatrix::Map(x.derived());
-
-  typename Rhs::PlainObject b_cpy;
-  if(m_sluEqued!='N')
-  {
-    b_cpy = b;
-    m_sluB = SluMatrix::Map(b_cpy.const_cast_derived());  
-  }
-  
-  int info = 0;
-  RealScalar recip_pivot_growth, rcond;
-
-  StatInit(&m_sluStat);
-  SuperLU_gsisx(&m_sluOptions, &m_sluA,
-                m_q.data(), m_p.data(),
-                &m_sluEtree[0], &m_sluEqued,
-                &m_sluRscale[0], &m_sluCscale[0],
-                &m_sluL, &m_sluU,
-                NULL, 0,
-                &m_sluB, &m_sluX,
-                &recip_pivot_growth, &rcond,
-                &m_sluStat, &info, Scalar());
-  StatFree(&m_sluStat);
-
-  m_info = info==0 ? Success : NumericalIssue;
-}
-#endif
-
-namespace internal {
-  
-template<typename _MatrixType, typename Derived, typename Rhs>
-struct solve_retval<SuperLUBase<_MatrixType,Derived>, Rhs>
-  : solve_retval_base<SuperLUBase<_MatrixType,Derived>, Rhs>
-{
-  typedef SuperLUBase<_MatrixType,Derived> Dec;
-  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
-
-  template<typename Dest> void evalTo(Dest& dst) const
-  {
-    dec().derived()._solve(rhs(),dst);
-  }
-};
-
-template<typename _MatrixType, typename Derived, typename Rhs>
-struct sparse_solve_retval<SuperLUBase<_MatrixType,Derived>, Rhs>
-  : sparse_solve_retval_base<SuperLUBase<_MatrixType,Derived>, Rhs>
-{
-  typedef SuperLUBase<_MatrixType,Derived> Dec;
-  EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
-
-  template<typename Dest> void evalTo(Dest& dst) const
-  {
-    dec().derived()._solve(rhs(),dst);
-  }
-};
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_SUPERLUSUPPORT_H
diff --git a/resources/3rdparty/eigen/bench/bench_gemm.cpp b/resources/3rdparty/eigen/bench/bench_gemm.cpp
deleted file mode 100644
index 41ca8b3b6..000000000
--- a/resources/3rdparty/eigen/bench/bench_gemm.cpp
+++ /dev/null
@@ -1,271 +0,0 @@
-
-// g++-4.4 bench_gemm.cpp -I .. -O2 -DNDEBUG -lrt -fopenmp && OMP_NUM_THREADS=2  ./a.out
-// icpc bench_gemm.cpp -I .. -O3 -DNDEBUG -lrt -openmp  && OMP_NUM_THREADS=2  ./a.out
-
-#include <iostream>
-#include <Eigen/Core>
-#include <bench/BenchTimer.h>
-
-using namespace std;
-using namespace Eigen;
-
-#ifndef SCALAR
-// #define SCALAR std::complex<float>
-#define SCALAR float
-#endif
-
-typedef SCALAR Scalar;
-typedef NumTraits<Scalar>::Real RealScalar;
-typedef Matrix<RealScalar,Dynamic,Dynamic> A;
-typedef Matrix</*Real*/Scalar,Dynamic,Dynamic> B;
-typedef Matrix<Scalar,Dynamic,Dynamic> C;
-typedef Matrix<RealScalar,Dynamic,Dynamic> M;
-
-#ifdef HAVE_BLAS
-
-extern "C" {
-  #include <Eigen/src/misc/blas.h>
-}
-
-static float fone = 1;
-static float fzero = 0;
-static double done = 1;
-static double szero = 0;
-static std::complex<float> cfone = 1;
-static std::complex<float> cfzero = 0;
-static std::complex<double> cdone = 1;
-static std::complex<double> cdzero = 0;
-static char notrans = 'N';
-static char trans = 'T';  
-static char nonunit = 'N';
-static char lower = 'L';
-static char right = 'R';
-static int intone = 1;
-
-void blas_gemm(const MatrixXf& a, const MatrixXf& b, MatrixXf& c)
-{
-  int M = c.rows(); int N = c.cols(); int K = a.cols();
-  int lda = a.rows(); int ldb = b.rows(); int ldc = c.rows();
-
-  sgemm_(&notrans,&notrans,&M,&N,&K,&fone,
-         const_cast<float*>(a.data()),&lda,
-         const_cast<float*>(b.data()),&ldb,&fone,
-         c.data(),&ldc);
-}
-
-EIGEN_DONT_INLINE void blas_gemm(const MatrixXd& a, const MatrixXd& b, MatrixXd& c)
-{
-  int M = c.rows(); int N = c.cols(); int K = a.cols();
-  int lda = a.rows(); int ldb = b.rows(); int ldc = c.rows();
-
-  dgemm_(&notrans,&notrans,&M,&N,&K,&done,
-         const_cast<double*>(a.data()),&lda,
-         const_cast<double*>(b.data()),&ldb,&done,
-         c.data(),&ldc);
-}
-
-void blas_gemm(const MatrixXcf& a, const MatrixXcf& b, MatrixXcf& c)
-{
-  int M = c.rows(); int N = c.cols(); int K = a.cols();
-  int lda = a.rows(); int ldb = b.rows(); int ldc = c.rows();
-
-  cgemm_(&notrans,&notrans,&M,&N,&K,(float*)&cfone,
-         const_cast<float*>((const float*)a.data()),&lda,
-         const_cast<float*>((const float*)b.data()),&ldb,(float*)&cfone,
-         (float*)c.data(),&ldc);
-}
-
-void blas_gemm(const MatrixXcd& a, const MatrixXcd& b, MatrixXcd& c)
-{
-  int M = c.rows(); int N = c.cols(); int K = a.cols();
-  int lda = a.rows(); int ldb = b.rows(); int ldc = c.rows();
-
-  zgemm_(&notrans,&notrans,&M,&N,&K,(double*)&cdone,
-         const_cast<double*>((const double*)a.data()),&lda,
-         const_cast<double*>((const double*)b.data()),&ldb,(double*)&cdone,
-         (double*)c.data(),&ldc);
-}
-
-
-
-#endif
-
-void matlab_cplx_cplx(const M& ar, const M& ai, const M& br, const M& bi, M& cr, M& ci)
-{
-  cr.noalias() += ar * br;
-  cr.noalias() -= ai * bi;
-  ci.noalias() += ar * bi;
-  ci.noalias() += ai * br;
-}
-
-void matlab_real_cplx(const M& a, const M& br, const M& bi, M& cr, M& ci)
-{
-  cr.noalias() += a * br;
-  ci.noalias() += a * bi;
-}
-
-void matlab_cplx_real(const M& ar, const M& ai, const M& b, M& cr, M& ci)
-{
-  cr.noalias() += ar * b;
-  ci.noalias() += ai * b;
-}
-
-template<typename A, typename B, typename C>
-EIGEN_DONT_INLINE void gemm(const A& a, const B& b, C& c)
-{
- c.noalias() += a * b;
-}
-
-int main(int argc, char ** argv)
-{
-  std::ptrdiff_t l1 = internal::queryL1CacheSize();
-  std::ptrdiff_t l2 = internal::queryTopLevelCacheSize();
-  std::cout << "L1 cache size     = " << (l1>0 ? l1/1024 : -1) << " KB\n";
-  std::cout << "L2/L3 cache size  = " << (l2>0 ? l2/1024 : -1) << " KB\n";
-  typedef internal::gebp_traits<Scalar,Scalar> Traits;
-  std::cout << "Register blocking = " << Traits::mr << " x " << Traits::nr << "\n";
-
-  int rep = 1;    // number of repetitions per try
-  int tries = 2;  // number of tries, we keep the best
-
-  int s = 2048;
-  int cache_size = -1;
-
-  bool need_help = false;
-  for (int i=1; i<argc; ++i)
-  {
-    if(argv[i][0]=='s')
-      s = atoi(argv[i]+1);
-    else if(argv[i][0]=='c')
-      cache_size = atoi(argv[i]+1);
-    else if(argv[i][0]=='t')
-      tries = atoi(argv[i]+1);
-    else if(argv[i][0]=='p')
-      rep = atoi(argv[i]+1);
-    else
-      need_help = true;
-  }
-
-  if(need_help)
-  {
-    std::cout << argv[0] << " s<matrix size> c<cache size> t<nb tries> p<nb repeats>\n";
-    return 1;
-  }
-
-  if(cache_size>0)
-    setCpuCacheSizes(cache_size,96*cache_size);
-
-  int m = s;
-  int n = s;
-  int p = s;
-  A a(m,p); a.setRandom();
-  B b(p,n); b.setRandom();
-  C c(m,n); c.setOnes();
-  C rc = c;
-
-  std::cout << "Matrix sizes = " << m << "x" << p << " * " << p << "x" << n << "\n";
-  std::ptrdiff_t mc(m), nc(n), kc(p);
-  internal::computeProductBlockingSizes<Scalar,Scalar>(kc, mc, nc);
-  std::cout << "blocking size (mc x kc) = " << mc << " x " << kc << "\n";
-
-  C r = c;
-
-  // check the parallel product is correct
-  #if defined EIGEN_HAS_OPENMP
-  int procs = omp_get_max_threads();
-  if(procs>1)
-  {
-    #ifdef HAVE_BLAS
-    blas_gemm(a,b,r);
-    #else
-    omp_set_num_threads(1);
-    r.noalias() += a * b;
-    omp_set_num_threads(procs);
-    #endif
-    c.noalias() += a * b;
-    if(!r.isApprox(c)) std::cerr << "Warning, your parallel product is crap!\n\n";
-  }
-  #elif defined HAVE_BLAS
-    blas_gemm(a,b,r);
-    c.noalias() += a * b;
-    if(!r.isApprox(c)) std::cerr << "Warning, your product is crap!\n\n";
-  #else
-    gemm(a,b,c);
-    r.noalias() += a.cast<Scalar>() * b.cast<Scalar>();
-    if(!r.isApprox(c)) std::cerr << "Warning, your product is crap!\n\n";
-  #endif
-
-  #ifdef HAVE_BLAS
-  BenchTimer tblas;
-  c = rc;
-  BENCH(tblas, tries, rep, blas_gemm(a,b,c));
-  std::cout << "blas  cpu         " << tblas.best(CPU_TIMER)/rep  << "s  \t" << (double(m)*n*p*rep*2/tblas.best(CPU_TIMER))*1e-9  <<  " GFLOPS \t(" << tblas.total(CPU_TIMER)  << "s)\n";
-  std::cout << "blas  real        " << tblas.best(REAL_TIMER)/rep << "s  \t" << (double(m)*n*p*rep*2/tblas.best(REAL_TIMER))*1e-9 <<  " GFLOPS \t(" << tblas.total(REAL_TIMER) << "s)\n";
-  #endif
-
-  BenchTimer tmt;
-  c = rc;
-  BENCH(tmt, tries, rep, gemm(a,b,c));
-  std::cout << "eigen cpu         " << tmt.best(CPU_TIMER)/rep  << "s  \t" << (double(m)*n*p*rep*2/tmt.best(CPU_TIMER))*1e-9  <<  " GFLOPS \t(" << tmt.total(CPU_TIMER)  << "s)\n";
-  std::cout << "eigen real        " << tmt.best(REAL_TIMER)/rep << "s  \t" << (double(m)*n*p*rep*2/tmt.best(REAL_TIMER))*1e-9 <<  " GFLOPS \t(" << tmt.total(REAL_TIMER) << "s)\n";
-
-  #ifdef EIGEN_HAS_OPENMP
-  if(procs>1)
-  {
-    BenchTimer tmono;
-    omp_set_num_threads(1);
-    Eigen::internal::setNbThreads(1);
-    c = rc;
-    BENCH(tmono, tries, rep, gemm(a,b,c));
-    std::cout << "eigen mono cpu    " << tmono.best(CPU_TIMER)/rep  << "s  \t" << (double(m)*n*p*rep*2/tmono.best(CPU_TIMER))*1e-9  <<  " GFLOPS \t(" << tmono.total(CPU_TIMER)  << "s)\n";
-    std::cout << "eigen mono real   " << tmono.best(REAL_TIMER)/rep << "s  \t" << (double(m)*n*p*rep*2/tmono.best(REAL_TIMER))*1e-9 <<  " GFLOPS \t(" << tmono.total(REAL_TIMER) << "s)\n";
-    std::cout << "mt speed up x" << tmono.best(CPU_TIMER) / tmt.best(REAL_TIMER)  << " => " << (100.0*tmono.best(CPU_TIMER) / tmt.best(REAL_TIMER))/procs << "%\n";
-  }
-  #endif
-  
-  #ifdef DECOUPLED
-  if((NumTraits<A::Scalar>::IsComplex) && (NumTraits<B::Scalar>::IsComplex))
-  {
-    M ar(m,p); ar.setRandom();
-    M ai(m,p); ai.setRandom();
-    M br(p,n); br.setRandom();
-    M bi(p,n); bi.setRandom();
-    M cr(m,n); cr.setRandom();
-    M ci(m,n); ci.setRandom();
-    
-    BenchTimer t;
-    BENCH(t, tries, rep, matlab_cplx_cplx(ar,ai,br,bi,cr,ci));
-    std::cout << "\"matlab\" cpu    " << t.best(CPU_TIMER)/rep  << "s  \t" << (double(m)*n*p*rep*2/t.best(CPU_TIMER))*1e-9  <<  " GFLOPS \t(" << t.total(CPU_TIMER)  << "s)\n";
-    std::cout << "\"matlab\" real   " << t.best(REAL_TIMER)/rep << "s  \t" << (double(m)*n*p*rep*2/t.best(REAL_TIMER))*1e-9 <<  " GFLOPS \t(" << t.total(REAL_TIMER) << "s)\n";
-  }
-  if((!NumTraits<A::Scalar>::IsComplex) && (NumTraits<B::Scalar>::IsComplex))
-  {
-    M a(m,p);  a.setRandom();
-    M br(p,n); br.setRandom();
-    M bi(p,n); bi.setRandom();
-    M cr(m,n); cr.setRandom();
-    M ci(m,n); ci.setRandom();
-    
-    BenchTimer t;
-    BENCH(t, tries, rep, matlab_real_cplx(a,br,bi,cr,ci));
-    std::cout << "\"matlab\" cpu    " << t.best(CPU_TIMER)/rep  << "s  \t" << (double(m)*n*p*rep*2/t.best(CPU_TIMER))*1e-9  <<  " GFLOPS \t(" << t.total(CPU_TIMER)  << "s)\n";
-    std::cout << "\"matlab\" real   " << t.best(REAL_TIMER)/rep << "s  \t" << (double(m)*n*p*rep*2/t.best(REAL_TIMER))*1e-9 <<  " GFLOPS \t(" << t.total(REAL_TIMER) << "s)\n";
-  }
-  if((NumTraits<A::Scalar>::IsComplex) && (!NumTraits<B::Scalar>::IsComplex))
-  {
-    M ar(m,p); ar.setRandom();
-    M ai(m,p); ai.setRandom();
-    M b(p,n);  b.setRandom();
-    M cr(m,n); cr.setRandom();
-    M ci(m,n); ci.setRandom();
-    
-    BenchTimer t;
-    BENCH(t, tries, rep, matlab_cplx_real(ar,ai,b,cr,ci));
-    std::cout << "\"matlab\" cpu    " << t.best(CPU_TIMER)/rep  << "s  \t" << (double(m)*n*p*rep*2/t.best(CPU_TIMER))*1e-9  <<  " GFLOPS \t(" << t.total(CPU_TIMER)  << "s)\n";
-    std::cout << "\"matlab\" real   " << t.best(REAL_TIMER)/rep << "s  \t" << (double(m)*n*p*rep*2/t.best(REAL_TIMER))*1e-9 <<  " GFLOPS \t(" << t.total(REAL_TIMER) << "s)\n";
-  }
-  #endif
-
-  return 0;
-}
-
diff --git a/resources/3rdparty/eigen/bench/spbench/CMakeLists.txt b/resources/3rdparty/eigen/bench/spbench/CMakeLists.txt
deleted file mode 100644
index 6e0e1b103..000000000
--- a/resources/3rdparty/eigen/bench/spbench/CMakeLists.txt
+++ /dev/null
@@ -1,78 +0,0 @@
-
-
-set(BLAS_FOUND TRUE)
-set(LAPACK_FOUND TRUE)
-set(BLAS_LIBRARIES eigen_blas_static)
-set(LAPACK_LIBRARIES eigen_lapack_static)
-
-set(SPARSE_LIBS "")
-
-# find_library(PARDISO_LIBRARIES pardiso412-GNU450-X86-64)
-# if(PARDISO_LIBRARIES)
-#   add_definitions("-DEIGEN_PARDISO_SUPPORT")
-#   set(SPARSE_LIBS ${SPARSE_LIBS} ${PARDISO_LIBRARIES})
-# endif(PARDISO_LIBRARIES)
-
-find_package(Cholmod)
-if(CHOLMOD_FOUND AND BLAS_FOUND AND LAPACK_FOUND)
-  add_definitions("-DEIGEN_CHOLMOD_SUPPORT")
-  include_directories(${CHOLMOD_INCLUDES})
-  set(SPARSE_LIBS ${SPARSE_LIBS} ${CHOLMOD_LIBRARIES} ${BLAS_LIBRARIES} ${LAPACK_LIBRARIES})
-  set(CHOLMOD_ALL_LIBS  ${CHOLMOD_LIBRARIES} ${BLAS_LIBRARIES} ${LAPACK_LIBRARIES})
-endif()
-
-find_package(Umfpack)
-if(UMFPACK_FOUND AND BLAS_FOUND)
-  add_definitions("-DEIGEN_UMFPACK_SUPPORT")
-  include_directories(${UMFPACK_INCLUDES})
-  set(SPARSE_LIBS ${SPARSE_LIBS} ${UMFPACK_LIBRARIES} ${BLAS_LIBRARIES})
-  set(UMFPACK_ALL_LIBS ${UMFPACK_LIBRARIES} ${BLAS_LIBRARIES})
-endif()
-
-find_package(SuperLU)
-if(SUPERLU_FOUND AND BLAS_FOUND)
-  add_definitions("-DEIGEN_SUPERLU_SUPPORT")
-  include_directories(${SUPERLU_INCLUDES})
-  set(SPARSE_LIBS ${SPARSE_LIBS} ${SUPERLU_LIBRARIES} ${BLAS_LIBRARIES})
-  set(SUPERLU_ALL_LIBS ${SUPERLU_LIBRARIES} ${BLAS_LIBRARIES})
-endif()
-
-
-find_package(Pastix)
-find_package(Scotch)
-find_package(Metis)
-if(PASTIX_FOUND AND BLAS_FOUND)
-  add_definitions("-DEIGEN_PASTIX_SUPPORT")
-  include_directories(${PASTIX_INCLUDES})
-  if(SCOTCH_FOUND)
-    include_directories(${SCOTCH_INCLUDES})
-    set(PASTIX_LIBRARIES ${PASTIX_LIBRARIES} ${SCOTCH_LIBRARIES})
-  elseif(METIS_FOUND)
-    include_directories(${METIS_INCLUDES})
-    set(PASTIX_LIBRARIES ${PASTIX_LIBRARIES} ${METIS_LIBRARIES})  
-  endif(SCOTCH_FOUND)
-  set(SPARSE_LIBS ${SPARSE_LIBS} ${PASTIX_LIBRARIES} ${ORDERING_LIBRARIES} ${BLAS_LIBRARIES})
-  set(PASTIX_ALL_LIBS ${PASTIX_LIBRARIES} ${BLAS_LIBRARIES})
-endif(PASTIX_FOUND AND BLAS_FOUND)
-
-if(METIS_FOUND)
-  include_directories(${METIS_INCLUDES})
-  set (SPARSE_LIBS ${SPARSE_LIBS} ${METIS_LIBRARIES})
-  add_definitions("-DEIGEN_METIS_SUPPORT")
-endif(METIS_FOUND)
-
-find_library(RT_LIBRARY rt)
-if(RT_LIBRARY)
-  set(SPARSE_LIBS ${SPARSE_LIBS} ${RT_LIBRARY})
-endif(RT_LIBRARY)
-
-add_executable(spbenchsolver spbenchsolver.cpp)
-target_link_libraries (spbenchsolver ${SPARSE_LIBS})
-
-add_executable(spsolver sp_solver.cpp)
-target_link_libraries (spsolver ${SPARSE_LIBS})
-
-
-add_executable(test_sparseLU test_sparseLU.cpp)
-target_link_libraries (test_sparseLU ${SPARSE_LIBS})
-
diff --git a/resources/3rdparty/eigen/bench/spbench/spbenchsolver.cpp b/resources/3rdparty/eigen/bench/spbench/spbenchsolver.cpp
deleted file mode 100644
index 4acd0039c..000000000
--- a/resources/3rdparty/eigen/bench/spbench/spbenchsolver.cpp
+++ /dev/null
@@ -1,87 +0,0 @@
-#include <bench/spbench/spbenchsolver.h>
-
-void bench_printhelp()
-{
-    cout<< " \nbenchsolver : performs a benchmark of all the solvers available in Eigen \n\n";
-    cout<< " MATRIX FOLDER : \n";
-    cout<< " The matrices for the benchmark should be collected in a folder specified with an environment variable EIGEN_MATRIXDIR \n";
-    cout<< " The matrices are stored using the matrix market coordinate format \n";
-    cout<< " The matrix and associated right-hand side (rhs) files are named respectively \n";
-    cout<< " as MatrixName.mtx and MatrixName_b.mtx. If the rhs does not exist, a random one is generated. \n";
-    cout<< " If a matrix is SPD, the matrix should be named as MatrixName_SPD.mtx \n";
-    cout<< " If a true solution exists, it should be named as MatrixName_x.mtx; \n"     ;
-    cout<< " it will be used to compute the norm of the error relative to the computed solutions\n\n";
-    cout<< " OPTIONS : \n"; 
-    cout<< " -h or --help \n    print this help and return\n\n";
-    cout<< " -d matrixdir \n    Use matrixdir as the matrix folder instead of the one specified in the environment variable EIGEN_MATRIXDIR\n\n"; 
-    cout<< " -o outputfile.xml \n    Output the statistics to a xml file \n\n";
-    cout<< " --eps <RelErr> Sets the relative tolerance for iterative solvers (default 1e-08) \n\n";
-    cout<< " --maxits <MaxIts> Sets the maximum number of iterations (default 1000) \n\n";
-    
-}
-int main(int argc, char ** args)
-{
-  
-  bool help = ( get_options(argc, args, "-h") || get_options(argc, args, "--help") );
-  if(help) {
-    bench_printhelp();
-    return 0;
-  }
-
-  // Get the location of the test matrices
-  string matrix_dir;
-  if (!get_options(argc, args, "-d", &matrix_dir))
-  {
-    if(getenv("EIGEN_MATRIXDIR") == NULL){
-      std::cerr << "Please, specify the location of the matrices with -d mat_folder or the environment variable EIGEN_MATRIXDIR \n";
-      std::cerr << " Run with --help to see the list of all the available options \n";
-      return -1;
-    }
-    matrix_dir = getenv("EIGEN_MATRIXDIR");
-  }
-     
-  std::ofstream statbuf;
-  string statFile ;
-  
-  // Get the file to write the statistics
-  bool statFileExists = get_options(argc, args, "-o", &statFile);
-  if(statFileExists)
-  {
-    statbuf.open(statFile.c_str(), std::ios::out);
-    if(statbuf.good()){
-      statFileExists = true; 
-      printStatheader(statbuf);
-      statbuf.close();
-    }
-    else
-      std::cerr << "Unable to open the provided file for writting... \n";
-  }       
-  
-  // Get the maximum number of iterations and the tolerance
-  int maxiters = 1000; 
-  double tol = 1e-08; 
-  string inval; 
-  if (get_options(argc, args, "--eps", &inval))
-    tol = atof(inval.c_str()); 
-  if(get_options(argc, args, "--maxits", &inval))
-    maxiters = atoi(inval.c_str()); 
-  
-  string current_dir; 
-  // Test the real-arithmetics matrices
-  Browse_Matrices<double>(matrix_dir, statFileExists, statFile,maxiters, tol);
-  
-  // Test the complex-arithmetics matrices
-  Browse_Matrices<std::complex<double> >(matrix_dir, statFileExists, statFile, maxiters, tol); 
-  
-  if(statFileExists)
-  {
-    statbuf.open(statFile.c_str(), std::ios::app); 
-    statbuf << "</BENCH> \n";
-    cout << "\n Output written in " << statFile << " ...\n";
-    statbuf.close();
-  }
-
-  return 0;
-}
-
-      
diff --git a/resources/3rdparty/eigen/bench/spbench/spbenchsolver.h b/resources/3rdparty/eigen/bench/spbench/spbenchsolver.h
deleted file mode 100644
index 19c719c04..000000000
--- a/resources/3rdparty/eigen/bench/spbench/spbenchsolver.h
+++ /dev/null
@@ -1,554 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-
-#include <iostream>
-#include <fstream>
-#include <Eigen/SparseCore>
-#include <bench/BenchTimer.h>
-#include <cstdlib>
-#include <string>
-#include <Eigen/Cholesky>
-#include <Eigen/Jacobi>
-#include <Eigen/Householder>
-#include <Eigen/IterativeLinearSolvers>
-#include <unsupported/Eigen/IterativeSolvers>
-#include <Eigen/LU>
-#include <unsupported/Eigen/SparseExtra>
-#include <Eigen/SparseLU>
-
-#include "spbenchstyle.h"
-
-#ifdef EIGEN_METIS_SUPPORT
-#include <Eigen/MetisSupport>
-#endif
-
-#ifdef EIGEN_CHOLMOD_SUPPORT
-#include <Eigen/CholmodSupport>
-#endif
-
-#ifdef EIGEN_UMFPACK_SUPPORT
-#include <Eigen/UmfPackSupport>
-#endif
-
-#ifdef EIGEN_PARDISO_SUPPORT
-#include <Eigen/PardisoSupport>
-#endif
-
-#ifdef EIGEN_SUPERLU_SUPPORT
-#include <Eigen/SuperLUSupport>
-#endif
-
-#ifdef EIGEN_PASTIX_SUPPORT
-#include <Eigen/PaStiXSupport>
-#endif
-
-// CONSTANTS
-#define EIGEN_UMFPACK  10
-#define EIGEN_SUPERLU  20
-#define EIGEN_PASTIX  30
-#define EIGEN_PARDISO  40
-#define EIGEN_SPARSELU_COLAMD 50
-#define EIGEN_SPARSELU_METIS 51
-#define EIGEN_BICGSTAB  60
-#define EIGEN_BICGSTAB_ILUT  61
-#define EIGEN_GMRES 70
-#define EIGEN_GMRES_ILUT 71
-#define EIGEN_SIMPLICIAL_LDLT  80
-#define EIGEN_CHOLMOD_LDLT  90
-#define EIGEN_PASTIX_LDLT  100
-#define EIGEN_PARDISO_LDLT  110
-#define EIGEN_SIMPLICIAL_LLT  120
-#define EIGEN_CHOLMOD_SUPERNODAL_LLT  130
-#define EIGEN_CHOLMOD_SIMPLICIAL_LLT  140
-#define EIGEN_PASTIX_LLT  150
-#define EIGEN_PARDISO_LLT  160
-#define EIGEN_CG  170
-#define EIGEN_CG_PRECOND  180
-
-using namespace Eigen;
-using namespace std; 
-
-
-// Global variables for input parameters
-int MaximumIters; // Maximum number of iterations
-double RelErr; // Relative error of the computed solution
-double best_time_val; // Current best time overall solvers 
-int best_time_id; //  id of the best solver for the current system 
-
-template<typename T> inline typename NumTraits<T>::Real test_precision() { return NumTraits<T>::dummy_precision(); }
-template<> inline float test_precision<float>() { return 1e-3f; }                                                             
-template<> inline double test_precision<double>() { return 1e-6; }                                                            
-template<> inline float test_precision<std::complex<float> >() { return test_precision<float>(); }
-template<> inline double test_precision<std::complex<double> >() { return test_precision<double>(); }
-
-void printStatheader(std::ofstream& out)
-{
-  // Print XML header
-  // NOTE It would have been much easier to write these XML documents using external libraries like tinyXML or Xerces-C++.
-  
-  out << "<?xml version='1.0' encoding='UTF-8'?> \n";
-  out << "<?xml-stylesheet type='text/xsl' href='#stylesheet' ?> \n"; 
-  out << "<!DOCTYPE BENCH  [\n<!ATTLIST xsl:stylesheet\n id\t ID  #REQUIRED>\n]>";
-  out << "\n\n<!-- Generated by the Eigen library -->\n"; 
-  
-  out << "\n<BENCH> \n" ; //root XML element 
-  // Print the xsl style section
-  printBenchStyle(out); 
-  // List all available solvers 
-  out << " <AVAILSOLVER> \n";
-#ifdef EIGEN_UMFPACK_SUPPORT
-  out <<"  <SOLVER ID='" << EIGEN_UMFPACK << "'>\n"; 
-  out << "   <TYPE> LU </TYPE> \n";
-  out << "   <PACKAGE> UMFPACK </PACKAGE> \n"; 
-  out << "  </SOLVER> \n"; 
-#endif
-#ifdef EIGEN_SUPERLU_SUPPORT
-  out <<"  <SOLVER ID='" << EIGEN_SUPERLU << "'>\n"; 
-  out << "   <TYPE> LU </TYPE> \n";
-  out << "   <PACKAGE> SUPERLU </PACKAGE> \n"; 
-  out << "  </SOLVER> \n"; 
-#endif
-#ifdef EIGEN_CHOLMOD_SUPPORT
-  out <<"  <SOLVER ID='" << EIGEN_CHOLMOD_SIMPLICIAL_LLT << "'>\n"; 
-  out << "   <TYPE> LLT SP</TYPE> \n";
-  out << "   <PACKAGE> CHOLMOD </PACKAGE> \n";
-  out << "  </SOLVER> \n"; 
-  
-  out <<"  <SOLVER ID='" << EIGEN_CHOLMOD_SUPERNODAL_LLT << "'>\n"; 
-  out << "   <TYPE> LLT</TYPE> \n";
-  out << "   <PACKAGE> CHOLMOD </PACKAGE> \n";
-  out << "  </SOLVER> \n";
-  
-  out <<"  <SOLVER ID='" << EIGEN_CHOLMOD_LDLT << "'>\n"; 
-  out << "   <TYPE> LDLT </TYPE> \n";
-  out << "   <PACKAGE> CHOLMOD </PACKAGE> \n";  
-  out << "  </SOLVER> \n"; 
-#endif
-#ifdef EIGEN_PARDISO_SUPPORT
-  out <<"  <SOLVER ID='" << EIGEN_PARDISO << "'>\n"; 
-  out << "   <TYPE> LU </TYPE> \n";
-  out << "   <PACKAGE> PARDISO </PACKAGE> \n"; 
-  out << "  </SOLVER> \n"; 
-  
-  out <<"  <SOLVER ID='" << EIGEN_PARDISO_LLT << "'>\n"; 
-  out << "   <TYPE> LLT </TYPE> \n";
-  out << "   <PACKAGE> PARDISO </PACKAGE> \n"; 
-  out << "  </SOLVER> \n"; 
-  
-  out <<"  <SOLVER ID='" << EIGEN_PARDISO_LDLT << "'>\n"; 
-  out << "   <TYPE> LDLT </TYPE> \n";
-  out << "   <PACKAGE> PARDISO </PACKAGE> \n"; 
-  out << "  </SOLVER> \n"; 
-#endif
-#ifdef EIGEN_PASTIX_SUPPORT
-  out <<"  <SOLVER ID='" << EIGEN_PASTIX << "'>\n"; 
-  out << "   <TYPE> LU </TYPE> \n";
-  out << "   <PACKAGE> PASTIX </PACKAGE> \n"; 
-  out << "  </SOLVER> \n"; 
-  
-  out <<"  <SOLVER ID='" << EIGEN_PASTIX_LLT << "'>\n"; 
-  out << "   <TYPE> LLT </TYPE> \n";
-  out << "   <PACKAGE> PASTIX </PACKAGE> \n"; 
-  out << "  </SOLVER> \n"; 
-  
-  out <<"  <SOLVER ID='" << EIGEN_PASTIX_LDLT << "'>\n"; 
-  out << "   <TYPE> LDLT </TYPE> \n";
-  out << "   <PACKAGE> PASTIX </PACKAGE> \n"; 
-  out << "  </SOLVER> \n"; 
-#endif
-  
-  out <<"  <SOLVER ID='" << EIGEN_BICGSTAB << "'>\n"; 
-  out << "   <TYPE> BICGSTAB </TYPE> \n";
-  out << "   <PACKAGE> EIGEN </PACKAGE> \n"; 
-  out << "  </SOLVER> \n"; 
-  
-  out <<"  <SOLVER ID='" << EIGEN_BICGSTAB_ILUT << "'>\n"; 
-  out << "   <TYPE> BICGSTAB_ILUT </TYPE> \n";
-  out << "   <PACKAGE> EIGEN </PACKAGE> \n"; 
-  out << "  </SOLVER> \n"; 
-  
-  out <<"  <SOLVER ID='" << EIGEN_GMRES_ILUT << "'>\n"; 
-  out << "   <TYPE> GMRES_ILUT </TYPE> \n";
-  out << "   <PACKAGE> EIGEN </PACKAGE> \n"; 
-  out << "  </SOLVER> \n"; 
-  
-  out <<"  <SOLVER ID='" << EIGEN_SIMPLICIAL_LDLT << "'>\n"; 
-  out << "   <TYPE> LDLT </TYPE> \n";
-  out << "   <PACKAGE> EIGEN </PACKAGE> \n"; 
-  out << "  </SOLVER> \n"; 
-  
-  out <<"  <SOLVER ID='" << EIGEN_SIMPLICIAL_LLT << "'>\n"; 
-  out << "   <TYPE> LLT </TYPE> \n";
-  out << "   <PACKAGE> EIGEN </PACKAGE> \n"; 
-  out << "  </SOLVER> \n"; 
-  
-  out <<"  <SOLVER ID='" << EIGEN_CG << "'>\n"; 
-  out << "   <TYPE> CG </TYPE> \n";
-  out << "   <PACKAGE> EIGEN </PACKAGE> \n"; 
-  out << "  </SOLVER> \n"; 
-  
-  out <<"  <SOLVER ID='" << EIGEN_SPARSELU_COLAMD << "'>\n"; 
-  out << "   <TYPE> LU_COLAMD </TYPE> \n";
-  out << "   <PACKAGE> EIGEN </PACKAGE> \n"; 
-  out << "  </SOLVER> \n"; 
-  
-#ifdef EIGEN_METIS_SUPPORT
-  out <<"  <SOLVER ID='" << EIGEN_SPARSELU_METIS << "'>\n"; 
-  out << "   <TYPE> LU_METIS </TYPE> \n";
-  out << "   <PACKAGE> EIGEN </PACKAGE> \n"; 
-  out << "  </SOLVER> \n"; 
-#endif
-  out << " </AVAILSOLVER> \n"; 
-  
-}
-
-
-template<typename Solver, typename Scalar>
-void call_solver(Solver &solver, const int solver_id, const typename Solver::MatrixType& A, const Matrix<Scalar, Dynamic, 1>& b, const Matrix<Scalar, Dynamic, 1>& refX,std::ofstream& statbuf)
-{
-  
-  double total_time;
-  double compute_time;
-  double solve_time; 
-  double rel_error;
-  Matrix<Scalar, Dynamic, 1> x; 
-  BenchTimer timer; 
-  timer.reset();
-  timer.start();
-  solver.compute(A); 
-  if (solver.info() != Success)
-  {
-    std::cerr << "Solver failed ... \n";
-    return;
-  }
-  timer.stop();
-  compute_time = timer.value();
-  statbuf << "    <TIME>\n"; 
-  statbuf << "     <COMPUTE> " << timer.value() << "</COMPUTE>\n";
-  std::cout<< "COMPUTE TIME : " << timer.value() <<std::endl; 
-    
-  timer.reset();
-  timer.start();
-  x = solver.solve(b); 
-  if (solver.info() == NumericalIssue)
-  {
-    std::cerr << "Solver failed ... \n";
-    return;
-  }
-  timer.stop();
-  solve_time = timer.value();
-  statbuf << "     <SOLVE> " << timer.value() << "</SOLVE>\n"; 
-  std::cout<< "SOLVE TIME : " << timer.value() <<std::endl; 
-  
-  total_time = solve_time + compute_time;
-  statbuf << "     <TOTAL> " << total_time << "</TOTAL>\n"; 
-  std::cout<< "TOTAL TIME : " << total_time <<std::endl; 
-  statbuf << "    </TIME>\n"; 
-  
-  // Verify the relative error
-  if(refX.size() != 0)
-    rel_error = (refX - x).norm()/refX.norm();
-  else 
-  {
-    // Compute the relative residual norm
-    Matrix<Scalar, Dynamic, 1> temp; 
-    temp = A * x; 
-    rel_error = (b-temp).norm()/b.norm();
-  }
-  statbuf << "    <ERROR> " << rel_error << "</ERROR>\n"; 
-  std::cout<< "REL. ERROR : " << rel_error << "\n\n" ;
-  if ( rel_error <= RelErr )
-  {
-    // check the best time if convergence
-    if(!best_time_val || (best_time_val > total_time))
-    {
-      best_time_val = total_time;
-      best_time_id = solver_id;
-    }
-  }
-}
-
-template<typename Solver, typename Scalar>
-void call_directsolver(Solver& solver, const int solver_id, const typename Solver::MatrixType& A, const Matrix<Scalar, Dynamic, 1>& b, const Matrix<Scalar, Dynamic, 1>& refX, std::string& statFile)
-{
-    std::ofstream statbuf(statFile.c_str(), std::ios::app);
-    statbuf << "   <SOLVER_STAT ID='" << solver_id <<"'>\n"; 
-    call_solver(solver, solver_id, A, b, refX,statbuf);
-    statbuf << "   </SOLVER_STAT>\n";
-    statbuf.close();
-}
-
-template<typename Solver, typename Scalar>
-void call_itersolver(Solver &solver, const int solver_id, const typename Solver::MatrixType& A, const Matrix<Scalar, Dynamic, 1>& b, const Matrix<Scalar, Dynamic, 1>& refX, std::string& statFile)
-{
-  solver.setTolerance(RelErr); 
-  solver.setMaxIterations(MaximumIters);
-  
-  std::ofstream statbuf(statFile.c_str(), std::ios::app);
-  statbuf << " <SOLVER_STAT ID='" << solver_id <<"'>\n"; 
-  call_solver(solver, solver_id, A, b, refX,statbuf); 
-  statbuf << "   <ITER> "<< solver.iterations() << "</ITER>\n";
-  statbuf << " </SOLVER_STAT>\n";
-  std::cout << "ITERATIONS : " << solver.iterations() <<"\n\n\n"; 
-  
-}
-
-
-template <typename Scalar>
-void SelectSolvers(const SparseMatrix<Scalar>&A, unsigned int sym, Matrix<Scalar, Dynamic, 1>& b, const Matrix<Scalar, Dynamic, 1>& refX, std::string& statFile)
-{
-  typedef SparseMatrix<Scalar, ColMajor> SpMat; 
-  // First, deal with Nonsymmetric and symmetric matrices
-  best_time_id = 0; 
-  best_time_val = 0.0;
-  //UMFPACK
-  #ifdef EIGEN_UMFPACK_SUPPORT
-  {
-    cout << "Solving with UMFPACK LU ... \n"; 
-    UmfPackLU<SpMat> solver; 
-    call_directsolver(solver, EIGEN_UMFPACK, A, b, refX,statFile); 
-  }
-  #endif
-    //SuperLU
-  #ifdef EIGEN_SUPERLU_SUPPORT
-  {
-    cout << "\nSolving with SUPERLU ... \n"; 
-    SuperLU<SpMat> solver;
-    call_directsolver(solver, EIGEN_SUPERLU, A, b, refX,statFile); 
-  }
-  #endif
-    
-   // PaStix LU
-  #ifdef EIGEN_PASTIX_SUPPORT
-  {
-    cout << "\nSolving with PASTIX LU ... \n"; 
-    PastixLU<SpMat> solver; 
-    call_directsolver(solver, EIGEN_PASTIX, A, b, refX,statFile) ;
-  }
-  #endif
-
-   //PARDISO LU
-  #ifdef EIGEN_PARDISO_SUPPORT
-  {
-    cout << "\nSolving with PARDISO LU ... \n"; 
-    PardisoLU<SpMat>  solver; 
-    call_directsolver(solver, EIGEN_PARDISO, A, b, refX,statFile);
-  }
-  #endif
-  
-  // Eigen SparseLU METIS
-  cout << "\n Solving with Sparse LU AND COLAMD ... \n";
-  SparseLU<SpMat, COLAMDOrdering<int> >   solver;
-  call_directsolver(solver, EIGEN_SPARSELU_COLAMD, A, b, refX, statFile); 
-  // Eigen SparseLU METIS
-  #ifdef EIGEN_METIS_SUPPORT
-  {
-    cout << "\n Solving with Sparse LU AND METIS ... \n";
-    SparseLU<SpMat, MetisOrdering<int> >   solver;
-    call_directsolver(solver, EIGEN_SPARSELU_METIS, A, b, refX, statFile); 
-  }
-  #endif
-  
-  //BiCGSTAB
-  {
-    cout << "\nSolving with BiCGSTAB ... \n"; 
-    BiCGSTAB<SpMat> solver; 
-    call_itersolver(solver, EIGEN_BICGSTAB, A, b, refX,statFile);
-  }
-  //BiCGSTAB+ILUT
-  {
-    cout << "\nSolving with BiCGSTAB and ILUT ... \n"; 
-    BiCGSTAB<SpMat, IncompleteLUT<Scalar> > solver; 
-    call_itersolver(solver, EIGEN_BICGSTAB_ILUT, A, b, refX,statFile); 
-  }
-  
-   
-  //GMRES
-//   {
-//     cout << "\nSolving with GMRES ... \n"; 
-//     GMRES<SpMat> solver; 
-//     call_itersolver(solver, EIGEN_GMRES, A, b, refX,statFile); 
-//   }
-  //GMRES+ILUT
-  {
-    cout << "\nSolving with GMRES and ILUT ... \n"; 
-    GMRES<SpMat, IncompleteLUT<Scalar> > solver; 
-    call_itersolver(solver, EIGEN_GMRES_ILUT, A, b, refX,statFile);
-  }
-  
-  // Hermitian and not necessarily positive-definites
-  if (sym != NonSymmetric)
-  {
-    // Internal Cholesky
-    {
-      cout << "\nSolving with Simplicial LDLT ... \n"; 
-      SimplicialLDLT<SpMat, Lower> solver;
-      call_directsolver(solver, EIGEN_SIMPLICIAL_LDLT, A, b, refX,statFile); 
-    }
-    
-    // CHOLMOD
-    #ifdef EIGEN_CHOLMOD_SUPPORT
-    {
-      cout << "\nSolving with CHOLMOD LDLT ... \n"; 
-      CholmodDecomposition<SpMat, Lower> solver;
-      solver.setMode(CholmodLDLt);
-       call_directsolver(solver,EIGEN_CHOLMOD_LDLT, A, b, refX,statFile);
-    }
-    #endif
-    
-    //PASTIX LLT
-    #ifdef EIGEN_PASTIX_SUPPORT
-    {
-      cout << "\nSolving with PASTIX LDLT ... \n"; 
-      PastixLDLT<SpMat, Lower> solver; 
-      call_directsolver(solver,EIGEN_PASTIX_LDLT, A, b, refX,statFile); 
-    }
-    #endif
-    
-    //PARDISO LLT
-    #ifdef EIGEN_PARDISO_SUPPORT
-    {
-      cout << "\nSolving with PARDISO LDLT ... \n"; 
-      PardisoLDLT<SpMat, Lower> solver; 
-      call_directsolver(solver,EIGEN_PARDISO_LDLT, A, b, refX,statFile); 
-    }
-    #endif
-  }
-
-   // Now, symmetric POSITIVE DEFINITE matrices
-  if (sym == SPD)
-  {
-    
-    //Internal Sparse Cholesky
-    {
-      cout << "\nSolving with SIMPLICIAL LLT ... \n"; 
-      SimplicialLLT<SpMat, Lower> solver; 
-      call_directsolver(solver,EIGEN_SIMPLICIAL_LLT, A, b, refX,statFile); 
-    }
-    
-    // CHOLMOD
-    #ifdef EIGEN_CHOLMOD_SUPPORT
-    {
-      // CholMOD SuperNodal LLT
-      cout << "\nSolving with CHOLMOD LLT (Supernodal)... \n"; 
-      CholmodDecomposition<SpMat, Lower> solver;
-      solver.setMode(CholmodSupernodalLLt);
-       call_directsolver(solver,EIGEN_CHOLMOD_SUPERNODAL_LLT, A, b, refX,statFile);
-      // CholMod Simplicial LLT
-      cout << "\nSolving with CHOLMOD LLT (Simplicial) ... \n"; 
-      solver.setMode(CholmodSimplicialLLt);
-      call_directsolver(solver,EIGEN_CHOLMOD_SIMPLICIAL_LLT, A, b, refX,statFile);
-    }
-    #endif
-    
-    //PASTIX LLT
-    #ifdef EIGEN_PASTIX_SUPPORT
-    {
-      cout << "\nSolving with PASTIX LLT ... \n"; 
-      PastixLLT<SpMat, Lower> solver; 
-      call_directsolver(solver,EIGEN_PASTIX_LLT, A, b, refX,statFile);
-    }
-    #endif
-    
-    //PARDISO LLT
-    #ifdef EIGEN_PARDISO_SUPPORT
-    {
-      cout << "\nSolving with PARDISO LLT ... \n"; 
-      PardisoLLT<SpMat, Lower> solver; 
-      call_directsolver(solver,EIGEN_PARDISO_LLT, A, b, refX,statFile); 
-    }
-    #endif
-    
-    // Internal CG
-    {
-      cout << "\nSolving with CG ... \n"; 
-      ConjugateGradient<SpMat, Lower> solver; 
-      call_itersolver(solver,EIGEN_CG, A, b, refX,statFile);
-    }
-    //CG+IdentityPreconditioner
-//     {
-//       cout << "\nSolving with CG and IdentityPreconditioner ... \n"; 
-//       ConjugateGradient<SpMat, Lower, IdentityPreconditioner> solver; 
-//       call_itersolver(solver,EIGEN_CG_PRECOND, A, b, refX,statFile);
-//     }
-  } // End SPD matrices 
-}
-
-/* Browse all the matrices available in the specified folder 
- * and solve the associated linear system.
- * The results of each solve are printed in the standard output
- * and optionally in the provided html file
- */
-template <typename Scalar>
-void Browse_Matrices(const string folder, bool statFileExists, std::string& statFile, int maxiters, double tol)
-{
-  MaximumIters = maxiters; // Maximum number of iterations, global variable 
-  RelErr = tol;  //Relative residual error  as stopping criterion for iterative solvers
-  MatrixMarketIterator<Scalar> it(folder);
-  for ( ; it; ++it)
-  {
-    //print the infos for this linear system 
-    if(statFileExists)
-    {
-      std::ofstream statbuf(statFile.c_str(), std::ios::app);
-      statbuf << "<LINEARSYSTEM> \n";
-      statbuf << "   <MATRIX> \n";
-      statbuf << "     <NAME> " << it.matname() << " </NAME>\n"; 
-      statbuf << "     <SIZE> " << it.matrix().rows() << " </SIZE>\n"; 
-      statbuf << "     <ENTRIES> " << it.matrix().nonZeros() << "</ENTRIES>\n";
-      if (it.sym()!=NonSymmetric)
-      {
-        statbuf << "     <SYMMETRY> Symmetric </SYMMETRY>\n" ; 
-        if (it.sym() == SPD) 
-          statbuf << "     <POSDEF> YES </POSDEF>\n"; 
-        else 
-          statbuf << "     <POSDEF> NO </POSDEF>\n"; 
-          
-      }
-      else
-      {
-        statbuf << "     <SYMMETRY> NonSymmetric </SYMMETRY>\n" ; 
-        statbuf << "     <POSDEF> NO </POSDEF>\n"; 
-      }
-      statbuf << "   </MATRIX> \n";
-      statbuf.close();
-    }
-    
-    cout<< "\n\n===================================================== \n";
-    cout<< " ======  SOLVING WITH MATRIX " << it.matname() << " ====\n";
-    cout<< " =================================================== \n\n";
-    Matrix<Scalar, Dynamic, 1> refX;
-    if(it.hasrefX()) refX = it.refX();
-    // Call all suitable solvers for this linear system 
-    SelectSolvers<Scalar>(it.matrix(), it.sym(), it.rhs(), refX, statFile);
-    
-    if(statFileExists)
-    {
-      std::ofstream statbuf(statFile.c_str(), std::ios::app);
-      statbuf << "  <BEST_SOLVER ID='"<< best_time_id
-              << "'></BEST_SOLVER>\n"; 
-      statbuf << " </LINEARSYSTEM> \n"; 
-      statbuf.close();
-    }
-  } 
-} 
-
-bool get_options(int argc, char **args, string option, string* value=0)
-{
-  int idx = 1, found=false; 
-  while (idx<argc && !found){
-    if (option.compare(args[idx]) == 0){
-      found = true; 
-      if(value) *value = args[idx+1];
-    }
-    idx+=2;
-  }
-  return found; 
-}
diff --git a/resources/3rdparty/eigen/blas/CMakeLists.txt b/resources/3rdparty/eigen/blas/CMakeLists.txt
deleted file mode 100644
index c35a2fdbe..000000000
--- a/resources/3rdparty/eigen/blas/CMakeLists.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-
-project(EigenBlas CXX)
-
-include("../cmake/language_support.cmake")
-
-workaround_9220(Fortran EIGEN_Fortran_COMPILER_WORKS)
-
-if(EIGEN_Fortran_COMPILER_WORKS)
-  enable_language(Fortran OPTIONAL)
-endif()
-
-add_custom_target(blas)
-
-set(EigenBlas_SRCS single.cpp double.cpp complex_single.cpp complex_double.cpp xerbla.cpp)
-
-if(EIGEN_Fortran_COMPILER_WORKS)
-
-set(EigenBlas_SRCS ${EigenBlas_SRCS}
-    complexdots.f
-    srotm.f srotmg.f drotm.f drotmg.f
-    lsame.f  dspmv.f ssbmv.f
-    chbmv.f  sspmv.f
-    zhbmv.f  chpmv.f dsbmv.f
-    zhpmv.f
-    dtbmv.f stbmv.f ctbmv.f ztbmv.f
-)
-else()
-
-message(WARNING " No fortran compiler has been detected, the blas build will be incomplete.")
-
-endif()
-
-add_library(eigen_blas_static ${EigenBlas_SRCS})
-add_library(eigen_blas SHARED ${EigenBlas_SRCS})
-
-if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
-  target_link_libraries(eigen_blas_static ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
-  target_link_libraries(eigen_blas        ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
-endif()
-
-add_dependencies(blas eigen_blas eigen_blas_static)
-
-install(TARGETS eigen_blas eigen_blas_static
-        RUNTIME DESTINATION bin
-        LIBRARY DESTINATION lib
-        ARCHIVE DESTINATION lib)
-
-if(EIGEN_Fortran_COMPILER_WORKS)
-
-if(EIGEN_LEAVE_TEST_IN_ALL_TARGET)
-  add_subdirectory(testing) # can't do EXCLUDE_FROM_ALL here, breaks CTest
-else()
-  add_subdirectory(testing EXCLUDE_FROM_ALL)
-endif()
-
-endif()
-
diff --git a/resources/3rdparty/eigen/blas/common.h b/resources/3rdparty/eigen/blas/common.h
deleted file mode 100644
index 2bf642c6b..000000000
--- a/resources/3rdparty/eigen/blas/common.h
+++ /dev/null
@@ -1,145 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_BLAS_COMMON_H
-#define EIGEN_BLAS_COMMON_H
-
-#include <Eigen/Core>
-#include <Eigen/Jacobi>
-
-#include <iostream>
-#include <complex>
-
-#ifndef SCALAR
-#error the token SCALAR must be defined to compile this file
-#endif
-
-#include <Eigen/src/misc/blas.h>
-
-
-#define NOTR    0
-#define TR      1
-#define ADJ     2
-
-#define LEFT    0
-#define RIGHT   1
-
-#define UP      0
-#define LO      1
-
-#define NUNIT   0
-#define UNIT    1
-
-#define INVALID 0xff
-
-#define OP(X)   (   ((X)=='N' || (X)=='n') ? NOTR   \
-                  : ((X)=='T' || (X)=='t') ? TR     \
-                  : ((X)=='C' || (X)=='c') ? ADJ    \
-                  : INVALID)
-
-#define SIDE(X) (   ((X)=='L' || (X)=='l') ? LEFT   \
-                  : ((X)=='R' || (X)=='r') ? RIGHT  \
-                  : INVALID)
-
-#define UPLO(X) (   ((X)=='U' || (X)=='u') ? UP     \
-                  : ((X)=='L' || (X)=='l') ? LO     \
-                  : INVALID)
-
-#define DIAG(X) (   ((X)=='N' || (X)=='n') ? NUNIT  \
-                  : ((X)=='U' || (X)=='u') ? UNIT   \
-                  : INVALID)
-
-
-inline bool check_op(const char* op)
-{
-  return OP(*op)!=0xff;
-}
-
-inline bool check_side(const char* side)
-{
-  return SIDE(*side)!=0xff;
-}
-
-inline bool check_uplo(const char* uplo)
-{
-  return UPLO(*uplo)!=0xff;
-}
-
-
-namespace Eigen {
-#include "BandTriangularSolver.h"
-#include "GeneralRank1Update.h"
-#include "PackedSelfadjointProduct.h"
-#include "PackedTriangularMatrixVector.h"
-#include "PackedTriangularSolverVector.h"
-#include "Rank2Update.h"
-}
-
-using namespace Eigen;
-
-typedef SCALAR Scalar;
-typedef NumTraits<Scalar>::Real RealScalar;
-typedef std::complex<RealScalar> Complex;
-
-enum
-{
-  IsComplex = Eigen::NumTraits<SCALAR>::IsComplex,
-  Conj = IsComplex
-};
-
-typedef Matrix<Scalar,Dynamic,Dynamic,ColMajor> PlainMatrixType;
-typedef Map<Matrix<Scalar,Dynamic,Dynamic,ColMajor>, 0, OuterStride<> > MatrixType;
-typedef Map<Matrix<Scalar,Dynamic,1>, 0, InnerStride<Dynamic> > StridedVectorType;
-typedef Map<Matrix<Scalar,Dynamic,1> > CompactVectorType;
-
-template<typename T>
-Map<Matrix<T,Dynamic,Dynamic,ColMajor>, 0, OuterStride<> >
-matrix(T* data, int rows, int cols, int stride)
-{
-  return Map<Matrix<T,Dynamic,Dynamic,ColMajor>, 0, OuterStride<> >(data, rows, cols, OuterStride<>(stride));
-}
-
-template<typename T>
-Map<Matrix<T,Dynamic,1>, 0, InnerStride<Dynamic> > vector(T* data, int size, int incr)
-{
-  return Map<Matrix<T,Dynamic,1>, 0, InnerStride<Dynamic> >(data, size, InnerStride<Dynamic>(incr));
-}
-
-template<typename T>
-Map<Matrix<T,Dynamic,1> > vector(T* data, int size)
-{
-  return Map<Matrix<T,Dynamic,1> >(data, size);
-}
-
-template<typename T>
-T* get_compact_vector(T* x, int n, int incx)
-{
-  if(incx==1)
-    return x;
-
-  T* ret = new Scalar[n];
-  if(incx<0) vector(ret,n) = vector(x,n,-incx).reverse();
-  else       vector(ret,n) = vector(x,n, incx);
-  return ret;
-}
-
-template<typename T>
-T* copy_back(T* x_cpy, T* x, int n, int incx)
-{
-  if(x_cpy==x)
-    return 0;
-
-  if(incx<0) vector(x,n,-incx).reverse() = vector(x_cpy,n);
-  else       vector(x,n, incx)           = vector(x_cpy,n);
-  return x_cpy;
-}
-
-#define EIGEN_BLAS_FUNC(X) EIGEN_CAT(SCALAR_SUFFIX,X##_)
-
-#endif // EIGEN_BLAS_COMMON_H
diff --git a/resources/3rdparty/eigen/blas/double.cpp b/resources/3rdparty/eigen/blas/double.cpp
deleted file mode 100644
index 8fd0709ba..000000000
--- a/resources/3rdparty/eigen/blas/double.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2012 Chen-Pang He <jdh8@ms63.hinet.net>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#define SCALAR        double
-#define SCALAR_SUFFIX d
-#define SCALAR_SUFFIX_UP "D"
-#define ISCOMPLEX     0
-
-#include "level1_impl.h"
-#include "level1_real_impl.h"
-#include "level2_impl.h"
-#include "level2_real_impl.h"
-#include "level3_impl.h"
-
-double BLASFUNC(dsdot)(int* n, float* x, int* incx, float* y, int* incy)
-{
-  if(*n<=0) return 0;
-
-  if(*incx==1 && *incy==1)    return (vector(x,*n).cast<double>().cwiseProduct(vector(y,*n).cast<double>())).sum();
-  else if(*incx>0 && *incy>0) return (vector(x,*n,*incx).cast<double>().cwiseProduct(vector(y,*n,*incy).cast<double>())).sum();
-  else if(*incx<0 && *incy>0) return (vector(x,*n,-*incx).reverse().cast<double>().cwiseProduct(vector(y,*n,*incy).cast<double>())).sum();
-  else if(*incx>0 && *incy<0) return (vector(x,*n,*incx).cast<double>().cwiseProduct(vector(y,*n,-*incy).reverse().cast<double>())).sum();
-  else if(*incx<0 && *incy<0) return (vector(x,*n,-*incx).reverse().cast<double>().cwiseProduct(vector(y,*n,-*incy).reverse().cast<double>())).sum();
-  else return 0;
-}
-
diff --git a/resources/3rdparty/eigen/blas/level2_cplx_impl.h b/resources/3rdparty/eigen/blas/level2_cplx_impl.h
deleted file mode 100644
index f52d384a9..000000000
--- a/resources/3rdparty/eigen/blas/level2_cplx_impl.h
+++ /dev/null
@@ -1,394 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#include "common.h"
-
-/**  ZHEMV  performs the matrix-vector  operation
-  *
-  *     y := alpha*A*x + beta*y,
-  *
-  *  where alpha and beta are scalars, x and y are n element vectors and
-  *  A is an n by n hermitian matrix.
-  */
-int EIGEN_BLAS_FUNC(hemv)(char *uplo, int *n, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *px, int *incx, RealScalar *pbeta, RealScalar *py, int *incy)
-{
-  typedef void (*functype)(int, const Scalar*, int, const Scalar*, int, Scalar*, Scalar);
-  static functype func[2];
-
-  static bool init = false;
-  if(!init)
-  {
-    for(int k=0; k<2; ++k)
-      func[k] = 0;
-
-    func[UP] = (internal::selfadjoint_matrix_vector_product<Scalar,int,ColMajor,Upper,false,false>::run);
-    func[LO] = (internal::selfadjoint_matrix_vector_product<Scalar,int,ColMajor,Lower,false,false>::run);
-
-    init = true;
-  }
-
-  Scalar* a = reinterpret_cast<Scalar*>(pa);
-  Scalar* x = reinterpret_cast<Scalar*>(px);
-  Scalar* y = reinterpret_cast<Scalar*>(py);
-  Scalar alpha  = *reinterpret_cast<Scalar*>(palpha);
-  Scalar beta   = *reinterpret_cast<Scalar*>(pbeta);
-
-  // check arguments
-  int info = 0;
-  if(UPLO(*uplo)==INVALID)        info = 1;
-  else if(*n<0)                   info = 2;
-  else if(*lda<std::max(1,*n))    info = 5;
-  else if(*incx==0)               info = 7;
-  else if(*incy==0)               info = 10;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"HEMV ",&info,6);
-
-  if(*n==0)
-    return 1;
-
-  Scalar* actual_x = get_compact_vector(x,*n,*incx);
-  Scalar* actual_y = get_compact_vector(y,*n,*incy);
-
-  if(beta!=Scalar(1))
-  {
-    if(beta==Scalar(0)) vector(actual_y, *n).setZero();
-    else                vector(actual_y, *n) *= beta;
-  }
-
-  if(alpha!=Scalar(0))
-  {
-    int code = UPLO(*uplo);
-    if(code>=2 || func[code]==0)
-      return 0;
-
-    func[code](*n, a, *lda, actual_x, 1, actual_y, alpha);
-  }
-
-  if(actual_x!=x) delete[] actual_x;
-  if(actual_y!=y) delete[] copy_back(actual_y,y,*n,*incy);
-
-  return 1;
-}
-
-/**  ZHBMV  performs the matrix-vector  operation
-  *
-  *     y := alpha*A*x + beta*y,
-  *
-  *  where alpha and beta are scalars, x and y are n element vectors and
-  *  A is an n by n hermitian band matrix, with k super-diagonals.
-  */
-// int EIGEN_BLAS_FUNC(hbmv)(char *uplo, int *n, int *k, RealScalar *alpha, RealScalar *a, int *lda,
-//                           RealScalar *x, int *incx, RealScalar *beta, RealScalar *y, int *incy)
-// {
-//   return 1;
-// }
-
-/**  ZHPMV  performs the matrix-vector operation
-  *
-  *     y := alpha*A*x + beta*y,
-  *
-  *  where alpha and beta are scalars, x and y are n element vectors and
-  *  A is an n by n hermitian matrix, supplied in packed form.
-  */
-// int EIGEN_BLAS_FUNC(hpmv)(char *uplo, int *n, RealScalar *alpha, RealScalar *ap, RealScalar *x, int *incx, RealScalar *beta, RealScalar *y, int *incy)
-// {
-//   return 1;
-// }
-
-/**  ZHPR    performs the hermitian rank 1 operation
-  *
-  *     A := alpha*x*conjg( x' ) + A,
-  *
-  *  where alpha is a real scalar, x is an n element vector and A is an
-  *  n by n hermitian matrix, supplied in packed form.
-  */
-int EIGEN_BLAS_FUNC(hpr)(char *uplo, int *n, RealScalar *palpha, RealScalar *px, int *incx, RealScalar *pap)
-{
-  typedef void (*functype)(int, Scalar*, const Scalar*, RealScalar);
-  static functype func[2];
-
-  static bool init = false;
-  if(!init)
-  {
-    for(int k=0; k<2; ++k)
-      func[k] = 0;
-
-    func[UP] = (internal::selfadjoint_packed_rank1_update<Scalar,int,ColMajor,Upper,false,Conj>::run);
-    func[LO] = (internal::selfadjoint_packed_rank1_update<Scalar,int,ColMajor,Lower,false,Conj>::run);
-
-    init = true;
-  }
-
-  Scalar* x = reinterpret_cast<Scalar*>(px);
-  Scalar* ap = reinterpret_cast<Scalar*>(pap);
-  RealScalar alpha = *palpha;
-
-  int info = 0;
-  if(UPLO(*uplo)==INVALID)                                            info = 1;
-  else if(*n<0)                                                       info = 2;
-  else if(*incx==0)                                                   info = 5;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"HPR  ",&info,6);
-
-  if(alpha==Scalar(0))
-    return 1;
-
-  Scalar* x_cpy = get_compact_vector(x, *n, *incx);
-
-  int code = UPLO(*uplo);
-  if(code>=2 || func[code]==0)
-    return 0;
-
-  func[code](*n, ap, x_cpy, alpha);
-
-  if(x_cpy!=x)  delete[] x_cpy;
-
-  return 1;
-}
-
-/**  ZHPR2  performs the hermitian rank 2 operation
-  *
-  *     A := alpha*x*conjg( y' ) + conjg( alpha )*y*conjg( x' ) + A,
-  *
-  *  where alpha is a scalar, x and y are n element vectors and A is an
-  *  n by n hermitian matrix, supplied in packed form.
-  */
-int EIGEN_BLAS_FUNC(hpr2)(char *uplo, int *n, RealScalar *palpha, RealScalar *px, int *incx, RealScalar *py, int *incy, RealScalar *pap)
-{
-  typedef void (*functype)(int, Scalar*, const Scalar*, const Scalar*, Scalar);
-  static functype func[2];
-
-  static bool init = false;
-  if(!init)
-  {
-    for(int k=0; k<2; ++k)
-      func[k] = 0;
-
-    func[UP] = (internal::packed_rank2_update_selector<Scalar,int,Upper>::run);
-    func[LO] = (internal::packed_rank2_update_selector<Scalar,int,Lower>::run);
-
-    init = true;
-  }
-
-  Scalar* x = reinterpret_cast<Scalar*>(px);
-  Scalar* y = reinterpret_cast<Scalar*>(py);
-  Scalar* ap = reinterpret_cast<Scalar*>(pap);
-  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
-
-  int info = 0;
-  if(UPLO(*uplo)==INVALID)                                            info = 1;
-  else if(*n<0)                                                       info = 2;
-  else if(*incx==0)                                                   info = 5;
-  else if(*incy==0)                                                   info = 7;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"HPR2 ",&info,6);
-
-  if(alpha==Scalar(0))
-    return 1;
-
-  Scalar* x_cpy = get_compact_vector(x, *n, *incx);
-  Scalar* y_cpy = get_compact_vector(y, *n, *incy);
-
-  int code = UPLO(*uplo);
-  if(code>=2 || func[code]==0)
-    return 0;
-
-  func[code](*n, ap, x_cpy, y_cpy, alpha);
-
-  if(x_cpy!=x)  delete[] x_cpy;
-  if(y_cpy!=y)  delete[] y_cpy;
-
-  return 1;
-}
-
-/**  ZHER   performs the hermitian rank 1 operation
-  *
-  *     A := alpha*x*conjg( x' ) + A,
-  *
-  *  where alpha is a real scalar, x is an n element vector and A is an
-  *  n by n hermitian matrix.
-  */
-int EIGEN_BLAS_FUNC(her)(char *uplo, int *n, RealScalar *palpha, RealScalar *px, int *incx, RealScalar *pa, int *lda)
-{
-  typedef void (*functype)(int, Scalar*, int, const Scalar*, Scalar);
-  static functype func[2];
-
-  static bool init = false;
-  if(!init)
-  {
-    for(int k=0; k<2; ++k)
-      func[k] = 0;
-
-    func[UP] = (selfadjoint_rank1_update<Scalar,int,ColMajor,Upper,false,Conj>::run);
-    func[LO] = (selfadjoint_rank1_update<Scalar,int,ColMajor,Lower,false,Conj>::run);
-
-    init = true;
-  }
-
-  Scalar* x = reinterpret_cast<Scalar*>(px);
-  Scalar* a = reinterpret_cast<Scalar*>(pa);
-  RealScalar alpha = *reinterpret_cast<RealScalar*>(palpha);
-
-  int info = 0;
-  if(UPLO(*uplo)==INVALID)                                            info = 1;
-  else if(*n<0)                                                       info = 2;
-  else if(*incx==0)                                                   info = 5;
-  else if(*lda<std::max(1,*n))                                        info = 7;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"HER  ",&info,6);
-
-  if(alpha==RealScalar(0))
-    return 1;
-
-  Scalar* x_cpy = get_compact_vector(x, *n, *incx);
-
-  int code = UPLO(*uplo);
-  if(code>=2 || func[code]==0)
-    return 0;
-
-  func[code](*n, a, *lda, x_cpy, alpha);
-
-  matrix(a,*n,*n,*lda).diagonal().imag().setZero();
-
-  if(x_cpy!=x)  delete[] x_cpy;
-
-  return 1;
-}
-
-/**  ZHER2  performs the hermitian rank 2 operation
-  *
-  *     A := alpha*x*conjg( y' ) + conjg( alpha )*y*conjg( x' ) + A,
-  *
-  *  where alpha is a scalar, x and y are n element vectors and A is an n
-  *  by n hermitian matrix.
-  */
-int EIGEN_BLAS_FUNC(her2)(char *uplo, int *n, RealScalar *palpha, RealScalar *px, int *incx, RealScalar *py, int *incy, RealScalar *pa, int *lda)
-{
-  typedef void (*functype)(int, Scalar*, int, const Scalar*, const Scalar*, Scalar);
-  static functype func[2];
-
-  static bool init = false;
-  if(!init)
-  {
-    for(int k=0; k<2; ++k)
-      func[k] = 0;
-
-    func[UP] = (internal::rank2_update_selector<Scalar,int,Upper>::run);
-    func[LO] = (internal::rank2_update_selector<Scalar,int,Lower>::run);
-
-    init = true;
-  }
-
-  Scalar* x = reinterpret_cast<Scalar*>(px);
-  Scalar* y = reinterpret_cast<Scalar*>(py);
-  Scalar* a = reinterpret_cast<Scalar*>(pa);
-  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
-
-  int info = 0;
-  if(UPLO(*uplo)==INVALID)                                            info = 1;
-  else if(*n<0)                                                       info = 2;
-  else if(*incx==0)                                                   info = 5;
-  else if(*incy==0)                                                   info = 7;
-  else if(*lda<std::max(1,*n))                                        info = 9;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"HER2 ",&info,6);
-
-  if(alpha==Scalar(0))
-    return 1;
-
-  Scalar* x_cpy = get_compact_vector(x, *n, *incx);
-  Scalar* y_cpy = get_compact_vector(y, *n, *incy);
-
-  int code = UPLO(*uplo);
-  if(code>=2 || func[code]==0)
-    return 0;
-
-  func[code](*n, a, *lda, x_cpy, y_cpy, alpha);
-
-  matrix(a,*n,*n,*lda).diagonal().imag().setZero();
-
-  if(x_cpy!=x)  delete[] x_cpy;
-  if(y_cpy!=y)  delete[] y_cpy;
-
-  return 1;
-}
-
-/**  ZGERU  performs the rank 1 operation
-  *
-  *     A := alpha*x*y' + A,
-  *
-  *  where alpha is a scalar, x is an m element vector, y is an n element
-  *  vector and A is an m by n matrix.
-  */
-int EIGEN_BLAS_FUNC(geru)(int *m, int *n, RealScalar *palpha, RealScalar *px, int *incx, RealScalar *py, int *incy, RealScalar *pa, int *lda)
-{
-  Scalar* x = reinterpret_cast<Scalar*>(px);
-  Scalar* y = reinterpret_cast<Scalar*>(py);
-  Scalar* a = reinterpret_cast<Scalar*>(pa);
-  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
-
-  int info = 0;
-       if(*m<0)                                                       info = 1;
-  else if(*n<0)                                                       info = 2;
-  else if(*incx==0)                                                   info = 5;
-  else if(*incy==0)                                                   info = 7;
-  else if(*lda<std::max(1,*m))                                        info = 9;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"GERU ",&info,6);
-
-  if(alpha==Scalar(0))
-    return 1;
-
-  Scalar* x_cpy = get_compact_vector(x,*m,*incx);
-  Scalar* y_cpy = get_compact_vector(y,*n,*incy);
-
-  internal::general_rank1_update<Scalar,int,ColMajor,false,false>::run(*m, *n, a, *lda, x_cpy, y_cpy, alpha);
-
-  if(x_cpy!=x)  delete[] x_cpy;
-  if(y_cpy!=y)  delete[] y_cpy;
-
-  return 1;
-}
-
-/**  ZGERC  performs the rank 1 operation
-  *
-  *     A := alpha*x*conjg( y' ) + A,
-  *
-  *  where alpha is a scalar, x is an m element vector, y is an n element
-  *  vector and A is an m by n matrix.
-  */
-int EIGEN_BLAS_FUNC(gerc)(int *m, int *n, RealScalar *palpha, RealScalar *px, int *incx, RealScalar *py, int *incy, RealScalar *pa, int *lda)
-{
-  Scalar* x = reinterpret_cast<Scalar*>(px);
-  Scalar* y = reinterpret_cast<Scalar*>(py);
-  Scalar* a = reinterpret_cast<Scalar*>(pa);
-  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
-
-  int info = 0;
-       if(*m<0)                                                       info = 1;
-  else if(*n<0)                                                       info = 2;
-  else if(*incx==0)                                                   info = 5;
-  else if(*incy==0)                                                   info = 7;
-  else if(*lda<std::max(1,*m))                                        info = 9;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"GERC ",&info,6);
-
-  if(alpha==Scalar(0))
-    return 1;
-
-  Scalar* x_cpy = get_compact_vector(x,*m,*incx);
-  Scalar* y_cpy = get_compact_vector(y,*n,*incy);
-
-  internal::general_rank1_update<Scalar,int,ColMajor,false,Conj>::run(*m, *n, a, *lda, x_cpy, y_cpy, alpha);
-
-  if(x_cpy!=x)  delete[] x_cpy;
-  if(y_cpy!=y)  delete[] y_cpy;
-
-  return 1;
-}
diff --git a/resources/3rdparty/eigen/blas/level2_impl.h b/resources/3rdparty/eigen/blas/level2_impl.h
deleted file mode 100644
index bd41f7e60..000000000
--- a/resources/3rdparty/eigen/blas/level2_impl.h
+++ /dev/null
@@ -1,524 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#include "common.h"
-
-int EIGEN_BLAS_FUNC(gemv)(char *opa, int *m, int *n, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *pb, int *incb, RealScalar *pbeta, RealScalar *pc, int *incc)
-{
-  typedef void (*functype)(int, int, const Scalar *, int, const Scalar *, int , Scalar *, int, Scalar);
-  static functype func[4];
-
-  static bool init = false;
-  if(!init)
-  {
-    for(int k=0; k<4; ++k)
-      func[k] = 0;
-
-    func[NOTR] = (internal::general_matrix_vector_product<int,Scalar,ColMajor,false,Scalar,false>::run);
-    func[TR  ] = (internal::general_matrix_vector_product<int,Scalar,RowMajor,false,Scalar,false>::run);
-    func[ADJ ] = (internal::general_matrix_vector_product<int,Scalar,RowMajor,Conj, Scalar,false>::run);
-
-    init = true;
-  }
-
-  Scalar* a = reinterpret_cast<Scalar*>(pa);
-  Scalar* b = reinterpret_cast<Scalar*>(pb);
-  Scalar* c = reinterpret_cast<Scalar*>(pc);
-  Scalar alpha  = *reinterpret_cast<Scalar*>(palpha);
-  Scalar beta   = *reinterpret_cast<Scalar*>(pbeta);
-
-  // check arguments
-  int info = 0;
-  if(OP(*opa)==INVALID)           info = 1;
-  else if(*m<0)                   info = 2;
-  else if(*n<0)                   info = 3;
-  else if(*lda<std::max(1,*m))    info = 6;
-  else if(*incb==0)               info = 8;
-  else if(*incc==0)               info = 11;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"GEMV ",&info,6);
-
-  if(*m==0 || *n==0 || (alpha==Scalar(0) && beta==Scalar(1)))
-    return 0;
-
-  int actual_m = *m;
-  int actual_n = *n;
-  int code = OP(*opa);
-  if(code!=NOTR)
-    std::swap(actual_m,actual_n);
-
-  Scalar* actual_b = get_compact_vector(b,actual_n,*incb);
-  Scalar* actual_c = get_compact_vector(c,actual_m,*incc);
-
-  if(beta!=Scalar(1))
-  {
-    if(beta==Scalar(0)) vector(actual_c, actual_m).setZero();
-    else                vector(actual_c, actual_m) *= beta;
-  }
-
-  if(code>=4 || func[code]==0)
-    return 0;
-
-  func[code](actual_m, actual_n, a, *lda, actual_b, 1, actual_c, 1, alpha);
-
-  if(actual_b!=b) delete[] actual_b;
-  if(actual_c!=c) delete[] copy_back(actual_c,c,actual_m,*incc);
-
-  return 1;
-}
-
-int EIGEN_BLAS_FUNC(trsv)(char *uplo, char *opa, char *diag, int *n, RealScalar *pa, int *lda, RealScalar *pb, int *incb)
-{
-  typedef void (*functype)(int, const Scalar *, int, Scalar *);
-  static functype func[16];
-
-  static bool init = false;
-  if(!init)
-  {
-    for(int k=0; k<16; ++k)
-      func[k] = 0;
-
-    func[NOTR  | (UP << 2) | (NUNIT << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Upper|0,       false,ColMajor>::run);
-    func[TR    | (UP << 2) | (NUNIT << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Lower|0,       false,RowMajor>::run);
-    func[ADJ   | (UP << 2) | (NUNIT << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Lower|0,       Conj, RowMajor>::run);
-
-    func[NOTR  | (LO << 2) | (NUNIT << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Lower|0,       false,ColMajor>::run);
-    func[TR    | (LO << 2) | (NUNIT << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Upper|0,       false,RowMajor>::run);
-    func[ADJ   | (LO << 2) | (NUNIT << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Upper|0,       Conj, RowMajor>::run);
-
-    func[NOTR  | (UP << 2) | (UNIT  << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Upper|UnitDiag,false,ColMajor>::run);
-    func[TR    | (UP << 2) | (UNIT  << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Lower|UnitDiag,false,RowMajor>::run);
-    func[ADJ   | (UP << 2) | (UNIT  << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Lower|UnitDiag,Conj, RowMajor>::run);
-
-    func[NOTR  | (LO << 2) | (UNIT  << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Lower|UnitDiag,false,ColMajor>::run);
-    func[TR    | (LO << 2) | (UNIT  << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Upper|UnitDiag,false,RowMajor>::run);
-    func[ADJ   | (LO << 2) | (UNIT  << 3)] = (internal::triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Upper|UnitDiag,Conj, RowMajor>::run);
-
-    init = true;
-  }
-
-  Scalar* a = reinterpret_cast<Scalar*>(pa);
-  Scalar* b = reinterpret_cast<Scalar*>(pb);
-
-  int info = 0;
-  if(UPLO(*uplo)==INVALID)                                            info = 1;
-  else if(OP(*opa)==INVALID)                                          info = 2;
-  else if(DIAG(*diag)==INVALID)                                       info = 3;
-  else if(*n<0)                                                       info = 4;
-  else if(*lda<std::max(1,*n))                                        info = 6;
-  else if(*incb==0)                                                   info = 8;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"TRSV ",&info,6);
-
-  Scalar* actual_b = get_compact_vector(b,*n,*incb);
-
-  int code = OP(*opa) | (UPLO(*uplo) << 2) | (DIAG(*diag) << 3);
-  func[code](*n, a, *lda, actual_b);
-
-  if(actual_b!=b) delete[] copy_back(actual_b,b,*n,*incb);
-
-  return 0;
-}
-
-
-
-int EIGEN_BLAS_FUNC(trmv)(char *uplo, char *opa, char *diag, int *n, RealScalar *pa, int *lda, RealScalar *pb, int *incb)
-{
-  typedef void (*functype)(int, int, const Scalar *, int, const Scalar *, int, Scalar *, int, Scalar);
-  static functype func[16];
-
-  static bool init = false;
-  if(!init)
-  {
-    for(int k=0; k<16; ++k)
-      func[k] = 0;
-
-    func[NOTR  | (UP << 2) | (NUNIT << 3)] = (internal::triangular_matrix_vector_product<int,Upper|0,       Scalar,false,Scalar,false,ColMajor>::run);
-    func[TR    | (UP << 2) | (NUNIT << 3)] = (internal::triangular_matrix_vector_product<int,Lower|0,       Scalar,false,Scalar,false,RowMajor>::run);
-    func[ADJ   | (UP << 2) | (NUNIT << 3)] = (internal::triangular_matrix_vector_product<int,Lower|0,       Scalar,Conj, Scalar,false,RowMajor>::run);
-
-    func[NOTR  | (LO << 2) | (NUNIT << 3)] = (internal::triangular_matrix_vector_product<int,Lower|0,       Scalar,false,Scalar,false,ColMajor>::run);
-    func[TR    | (LO << 2) | (NUNIT << 3)] = (internal::triangular_matrix_vector_product<int,Upper|0,       Scalar,false,Scalar,false,RowMajor>::run);
-    func[ADJ   | (LO << 2) | (NUNIT << 3)] = (internal::triangular_matrix_vector_product<int,Upper|0,       Scalar,Conj, Scalar,false,RowMajor>::run);
-
-    func[NOTR  | (UP << 2) | (UNIT  << 3)] = (internal::triangular_matrix_vector_product<int,Upper|UnitDiag,Scalar,false,Scalar,false,ColMajor>::run);
-    func[TR    | (UP << 2) | (UNIT  << 3)] = (internal::triangular_matrix_vector_product<int,Lower|UnitDiag,Scalar,false,Scalar,false,RowMajor>::run);
-    func[ADJ   | (UP << 2) | (UNIT  << 3)] = (internal::triangular_matrix_vector_product<int,Lower|UnitDiag,Scalar,Conj, Scalar,false,RowMajor>::run);
-
-    func[NOTR  | (LO << 2) | (UNIT  << 3)] = (internal::triangular_matrix_vector_product<int,Lower|UnitDiag,Scalar,false,Scalar,false,ColMajor>::run);
-    func[TR    | (LO << 2) | (UNIT  << 3)] = (internal::triangular_matrix_vector_product<int,Upper|UnitDiag,Scalar,false,Scalar,false,RowMajor>::run);
-    func[ADJ   | (LO << 2) | (UNIT  << 3)] = (internal::triangular_matrix_vector_product<int,Upper|UnitDiag,Scalar,Conj, Scalar,false,RowMajor>::run);
-
-    init = true;
-  }
-
-  Scalar* a = reinterpret_cast<Scalar*>(pa);
-  Scalar* b = reinterpret_cast<Scalar*>(pb);
-
-  int info = 0;
-  if(UPLO(*uplo)==INVALID)                                            info = 1;
-  else if(OP(*opa)==INVALID)                                          info = 2;
-  else if(DIAG(*diag)==INVALID)                                       info = 3;
-  else if(*n<0)                                                       info = 4;
-  else if(*lda<std::max(1,*n))                                        info = 6;
-  else if(*incb==0)                                                   info = 8;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"TRMV ",&info,6);
-
-  if(*n==0)
-    return 1;
-
-  Scalar* actual_b = get_compact_vector(b,*n,*incb);
-  Matrix<Scalar,Dynamic,1> res(*n);
-  res.setZero();
-
-  int code = OP(*opa) | (UPLO(*uplo) << 2) | (DIAG(*diag) << 3);
-  if(code>=16 || func[code]==0)
-    return 0;
-
-  func[code](*n, *n, a, *lda, actual_b, 1, res.data(), 1, Scalar(1));
-
-  copy_back(res.data(),b,*n,*incb);
-  if(actual_b!=b) delete[] actual_b;
-
-  return 1;
-}
-
-/**  GBMV  performs one of the matrix-vector operations
-  *
-  *     y := alpha*A*x + beta*y,   or   y := alpha*A'*x + beta*y,
-  *
-  *  where alpha and beta are scalars, x and y are vectors and A is an
-  *  m by n band matrix, with kl sub-diagonals and ku super-diagonals.
-  */
-int EIGEN_BLAS_FUNC(gbmv)(char *trans, int *m, int *n, int *kl, int *ku, RealScalar *palpha, RealScalar *pa, int *lda,
-                          RealScalar *px, int *incx, RealScalar *pbeta, RealScalar *py, int *incy)
-{
-  Scalar* a = reinterpret_cast<Scalar*>(pa);
-  Scalar* x = reinterpret_cast<Scalar*>(px);
-  Scalar* y = reinterpret_cast<Scalar*>(py);
-  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
-  Scalar beta = *reinterpret_cast<Scalar*>(pbeta);
-  int coeff_rows = *kl+*ku+1;
-  
-  int info = 0;
-       if(OP(*trans)==INVALID)                                        info = 1;
-  else if(*m<0)                                                       info = 2;
-  else if(*n<0)                                                       info = 3;
-  else if(*kl<0)                                                      info = 4;
-  else if(*ku<0)                                                      info = 5;
-  else if(*lda<coeff_rows)                                            info = 8;
-  else if(*incx==0)                                                   info = 10;
-  else if(*incy==0)                                                   info = 13;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"GBMV ",&info,6);
-  
-  if(*m==0 || *n==0 || (alpha==Scalar(0) && beta==Scalar(1)))
-    return 0;
-  
-  int actual_m = *m;
-  int actual_n = *n;
-  if(OP(*trans)!=NOTR)
-    std::swap(actual_m,actual_n);
-  
-  Scalar* actual_x = get_compact_vector(x,actual_n,*incx);
-  Scalar* actual_y = get_compact_vector(y,actual_m,*incy);
-  
-  if(beta!=Scalar(1))
-  {
-    if(beta==Scalar(0)) vector(actual_y, actual_m).setZero();
-    else                vector(actual_y, actual_m) *= beta;
-  }
-  
-  MatrixType mat_coeffs(a,coeff_rows,*n,*lda);
-  
-  int nb = std::min(*n,(*m)+(*ku));
-  for(int j=0; j<nb; ++j)
-  {
-    int start = std::max(0,j - *ku);
-    int end = std::min((*m)-1,j + *kl);
-    int len = end - start + 1;
-    int offset = (*ku) - j + start;
-    if(OP(*trans)==NOTR)
-      vector(actual_y+start,len) += (alpha*actual_x[j]) * mat_coeffs.col(j).segment(offset,len);
-    else if(OP(*trans)==TR)
-      actual_y[j] += alpha * ( mat_coeffs.col(j).segment(offset,len).transpose() * vector(actual_x+start,len) ).value();
-    else
-      actual_y[j] += alpha * ( mat_coeffs.col(j).segment(offset,len).adjoint()   * vector(actual_x+start,len) ).value();
-  }    
-  
-  if(actual_x!=x) delete[] actual_x;
-  if(actual_y!=y) delete[] copy_back(actual_y,y,actual_m,*incy);
-  
-  return 0;
-}
-
-#if 0
-/**  TBMV  performs one of the matrix-vector operations
-  *
-  *     x := A*x,   or   x := A'*x,
-  *
-  *  where x is an n element vector and  A is an n by n unit, or non-unit,
-  *  upper or lower triangular band matrix, with ( k + 1 ) diagonals.
-  */
-int EIGEN_BLAS_FUNC(tbmv)(char *uplo, char *opa, char *diag, int *n, int *k, RealScalar *pa, int *lda, RealScalar *px, int *incx)
-{
-  Scalar* a = reinterpret_cast<Scalar*>(pa);
-  Scalar* x = reinterpret_cast<Scalar*>(px);
-  int coeff_rows = *k + 1;
-  
-  int info = 0;
-       if(UPLO(*uplo)==INVALID)                                       info = 1;
-  else if(OP(*opa)==INVALID)                                          info = 2;
-  else if(DIAG(*diag)==INVALID)                                       info = 3;
-  else if(*n<0)                                                       info = 4;
-  else if(*k<0)                                                       info = 5;
-  else if(*lda<coeff_rows)                                            info = 7;
-  else if(*incx==0)                                                   info = 9;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"TBMV ",&info,6);
-  
-  if(*n==0)
-    return 0;
-  
-  int actual_n = *n;
-  
-  Scalar* actual_x = get_compact_vector(x,actual_n,*incx);
-  
-  MatrixType mat_coeffs(a,coeff_rows,*n,*lda);
-  
-  int ku = UPLO(*uplo)==UPPER ? *k : 0;
-  int kl = UPLO(*uplo)==LOWER ? *k : 0;
-  
-  for(int j=0; j<*n; ++j)
-  {
-    int start = std::max(0,j - ku);
-    int end = std::min((*m)-1,j + kl);
-    int len = end - start + 1;
-    int offset = (ku) - j + start;
-    
-    if(OP(*trans)==NOTR)
-      vector(actual_y+start,len) += (alpha*actual_x[j]) * mat_coeffs.col(j).segment(offset,len);
-    else if(OP(*trans)==TR)
-      actual_y[j] += alpha * ( mat_coeffs.col(j).segment(offset,len).transpose() * vector(actual_x+start,len) ).value();
-    else
-      actual_y[j] += alpha * ( mat_coeffs.col(j).segment(offset,len).adjoint()   * vector(actual_x+start,len) ).value();
-  }    
-  
-  if(actual_x!=x) delete[] actual_x;
-  if(actual_y!=y) delete[] copy_back(actual_y,y,actual_m,*incy);
-  
-  return 0;
-}
-#endif
-
-/**  DTBSV  solves one of the systems of equations
-  *
-  *     A*x = b,   or   A'*x = b,
-  *
-  *  where b and x are n element vectors and A is an n by n unit, or
-  *  non-unit, upper or lower triangular band matrix, with ( k + 1 )
-  *  diagonals.
-  *
-  *  No test for singularity or near-singularity is included in this
-  *  routine. Such tests must be performed before calling this routine.
-  */
-int EIGEN_BLAS_FUNC(tbsv)(char *uplo, char *op, char *diag, int *n, int *k, RealScalar *pa, int *lda, RealScalar *px, int *incx)
-{
-  typedef void (*functype)(int, int, const Scalar *, int, Scalar *);
-  static functype func[16];
-
-  static bool init = false;
-  if(!init)
-  {
-    for(int k=0; k<16; ++k)
-      func[k] = 0;
-
-    func[NOTR  | (UP << 2) | (NUNIT << 3)] = (internal::band_solve_triangular_selector<int,Upper|0,       Scalar,false,Scalar,ColMajor>::run);
-    func[TR    | (UP << 2) | (NUNIT << 3)] = (internal::band_solve_triangular_selector<int,Lower|0,       Scalar,false,Scalar,RowMajor>::run);
-    func[ADJ   | (UP << 2) | (NUNIT << 3)] = (internal::band_solve_triangular_selector<int,Lower|0,       Scalar,Conj, Scalar,RowMajor>::run);
-
-    func[NOTR  | (LO << 2) | (NUNIT << 3)] = (internal::band_solve_triangular_selector<int,Lower|0,       Scalar,false,Scalar,ColMajor>::run);
-    func[TR    | (LO << 2) | (NUNIT << 3)] = (internal::band_solve_triangular_selector<int,Upper|0,       Scalar,false,Scalar,RowMajor>::run);
-    func[ADJ   | (LO << 2) | (NUNIT << 3)] = (internal::band_solve_triangular_selector<int,Upper|0,       Scalar,Conj, Scalar,RowMajor>::run);
-
-    func[NOTR  | (UP << 2) | (UNIT  << 3)] = (internal::band_solve_triangular_selector<int,Upper|UnitDiag,Scalar,false,Scalar,ColMajor>::run);
-    func[TR    | (UP << 2) | (UNIT  << 3)] = (internal::band_solve_triangular_selector<int,Lower|UnitDiag,Scalar,false,Scalar,RowMajor>::run);
-    func[ADJ   | (UP << 2) | (UNIT  << 3)] = (internal::band_solve_triangular_selector<int,Lower|UnitDiag,Scalar,Conj, Scalar,RowMajor>::run);
-
-    func[NOTR  | (LO << 2) | (UNIT  << 3)] = (internal::band_solve_triangular_selector<int,Lower|UnitDiag,Scalar,false,Scalar,ColMajor>::run);
-    func[TR    | (LO << 2) | (UNIT  << 3)] = (internal::band_solve_triangular_selector<int,Upper|UnitDiag,Scalar,false,Scalar,RowMajor>::run);
-    func[ADJ   | (LO << 2) | (UNIT  << 3)] = (internal::band_solve_triangular_selector<int,Upper|UnitDiag,Scalar,Conj, Scalar,RowMajor>::run);
-
-    init = true;
-  }
-
-  Scalar* a = reinterpret_cast<Scalar*>(pa);
-  Scalar* x = reinterpret_cast<Scalar*>(px);
-  int coeff_rows = *k+1;
-  
-  int info = 0;
-       if(UPLO(*uplo)==INVALID)                                       info = 1;
-  else if(OP(*op)==INVALID)                                           info = 2;
-  else if(DIAG(*diag)==INVALID)                                       info = 3;
-  else if(*n<0)                                                       info = 4;
-  else if(*k<0)                                                       info = 5;
-  else if(*lda<coeff_rows)                                            info = 7;
-  else if(*incx==0)                                                   info = 9;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"TBSV ",&info,6);
-  
-  if(*n==0 || (*k==0 && DIAG(*diag)==UNIT))
-    return 0;
-  
-  int actual_n = *n;
- 
-  Scalar* actual_x = get_compact_vector(x,actual_n,*incx);
-  
-  int code = OP(*op) | (UPLO(*uplo) << 2) | (DIAG(*diag) << 3);
-  if(code>=16 || func[code]==0)
-    return 0;
-
-  func[code](*n, *k, a, *lda, actual_x);
-  
-  if(actual_x!=x) delete[] copy_back(actual_x,x,actual_n,*incx);
-  
-  return 0;
-}
-
-/**  DTPMV  performs one of the matrix-vector operations
-  *
-  *     x := A*x,   or   x := A'*x,
-  *
-  *  where x is an n element vector and  A is an n by n unit, or non-unit,
-  *  upper or lower triangular matrix, supplied in packed form.
-  */
-int EIGEN_BLAS_FUNC(tpmv)(char *uplo, char *opa, char *diag, int *n, RealScalar *pap, RealScalar *px, int *incx)
-{
-  typedef void (*functype)(int, const Scalar*, const Scalar*, Scalar*, Scalar);
-  static functype func[16];
-
-  static bool init = false;
-  if(!init)
-  {
-    for(int k=0; k<16; ++k)
-      func[k] = 0;
-
-    func[NOTR  | (UP << 2) | (NUNIT << 3)] = (internal::packed_triangular_matrix_vector_product<int,Upper|0,       Scalar,false,Scalar,false,ColMajor>::run);
-    func[TR    | (UP << 2) | (NUNIT << 3)] = (internal::packed_triangular_matrix_vector_product<int,Lower|0,       Scalar,false,Scalar,false,RowMajor>::run);
-    func[ADJ   | (UP << 2) | (NUNIT << 3)] = (internal::packed_triangular_matrix_vector_product<int,Lower|0,       Scalar,Conj, Scalar,false,RowMajor>::run);
-
-    func[NOTR  | (LO << 2) | (NUNIT << 3)] = (internal::packed_triangular_matrix_vector_product<int,Lower|0,       Scalar,false,Scalar,false,ColMajor>::run);
-    func[TR    | (LO << 2) | (NUNIT << 3)] = (internal::packed_triangular_matrix_vector_product<int,Upper|0,       Scalar,false,Scalar,false,RowMajor>::run);
-    func[ADJ   | (LO << 2) | (NUNIT << 3)] = (internal::packed_triangular_matrix_vector_product<int,Upper|0,       Scalar,Conj, Scalar,false,RowMajor>::run);
-
-    func[NOTR  | (UP << 2) | (UNIT  << 3)] = (internal::packed_triangular_matrix_vector_product<int,Upper|UnitDiag,Scalar,false,Scalar,false,ColMajor>::run);
-    func[TR    | (UP << 2) | (UNIT  << 3)] = (internal::packed_triangular_matrix_vector_product<int,Lower|UnitDiag,Scalar,false,Scalar,false,RowMajor>::run);
-    func[ADJ   | (UP << 2) | (UNIT  << 3)] = (internal::packed_triangular_matrix_vector_product<int,Lower|UnitDiag,Scalar,Conj, Scalar,false,RowMajor>::run);
-
-    func[NOTR  | (LO << 2) | (UNIT  << 3)] = (internal::packed_triangular_matrix_vector_product<int,Lower|UnitDiag,Scalar,false,Scalar,false,ColMajor>::run);
-    func[TR    | (LO << 2) | (UNIT  << 3)] = (internal::packed_triangular_matrix_vector_product<int,Upper|UnitDiag,Scalar,false,Scalar,false,RowMajor>::run);
-    func[ADJ   | (LO << 2) | (UNIT  << 3)] = (internal::packed_triangular_matrix_vector_product<int,Upper|UnitDiag,Scalar,Conj, Scalar,false,RowMajor>::run);
-
-    init = true;
-  }
-
-  Scalar* ap = reinterpret_cast<Scalar*>(pap);
-  Scalar* x = reinterpret_cast<Scalar*>(px);
-
-  int info = 0;
-  if(UPLO(*uplo)==INVALID)                                            info = 1;
-  else if(OP(*opa)==INVALID)                                          info = 2;
-  else if(DIAG(*diag)==INVALID)                                       info = 3;
-  else if(*n<0)                                                       info = 4;
-  else if(*incx==0)                                                   info = 7;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"TPMV ",&info,6);
-
-  if(*n==0)
-    return 1;
-
-  Scalar* actual_x = get_compact_vector(x,*n,*incx);
-  Matrix<Scalar,Dynamic,1> res(*n);
-  res.setZero();
-
-  int code = OP(*opa) | (UPLO(*uplo) << 2) | (DIAG(*diag) << 3);
-  if(code>=16 || func[code]==0)
-    return 0;
-
-  func[code](*n, ap, actual_x, res.data(), Scalar(1));
-
-  copy_back(res.data(),x,*n,*incx);
-  if(actual_x!=x) delete[] actual_x;
-
-  return 1;
-}
-
-/**  DTPSV  solves one of the systems of equations
-  *
-  *     A*x = b,   or   A'*x = b,
-  *
-  *  where b and x are n element vectors and A is an n by n unit, or
-  *  non-unit, upper or lower triangular matrix, supplied in packed form.
-  *
-  *  No test for singularity or near-singularity is included in this
-  *  routine. Such tests must be performed before calling this routine.
-  */
-int EIGEN_BLAS_FUNC(tpsv)(char *uplo, char *opa, char *diag, int *n, RealScalar *pap, RealScalar *px, int *incx)
-{
-  typedef void (*functype)(int, const Scalar*, Scalar*);
-  static functype func[16];
-
-  static bool init = false;
-  if(!init)
-  {
-    for(int k=0; k<16; ++k)
-      func[k] = 0;
-
-    func[NOTR  | (UP << 2) | (NUNIT << 3)] = (internal::packed_triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Upper|0,       false,ColMajor>::run);
-    func[TR    | (UP << 2) | (NUNIT << 3)] = (internal::packed_triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Lower|0,       false,RowMajor>::run);
-    func[ADJ   | (UP << 2) | (NUNIT << 3)] = (internal::packed_triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Lower|0,       Conj, RowMajor>::run);
-
-    func[NOTR  | (LO << 2) | (NUNIT << 3)] = (internal::packed_triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Lower|0,       false,ColMajor>::run);
-    func[TR    | (LO << 2) | (NUNIT << 3)] = (internal::packed_triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Upper|0,       false,RowMajor>::run);
-    func[ADJ   | (LO << 2) | (NUNIT << 3)] = (internal::packed_triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Upper|0,       Conj, RowMajor>::run);
-
-    func[NOTR  | (UP << 2) | (UNIT  << 3)] = (internal::packed_triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Upper|UnitDiag,false,ColMajor>::run);
-    func[TR    | (UP << 2) | (UNIT  << 3)] = (internal::packed_triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Lower|UnitDiag,false,RowMajor>::run);
-    func[ADJ   | (UP << 2) | (UNIT  << 3)] = (internal::packed_triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Lower|UnitDiag,Conj, RowMajor>::run);
-
-    func[NOTR  | (LO << 2) | (UNIT  << 3)] = (internal::packed_triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Lower|UnitDiag,false,ColMajor>::run);
-    func[TR    | (LO << 2) | (UNIT  << 3)] = (internal::packed_triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Upper|UnitDiag,false,RowMajor>::run);
-    func[ADJ   | (LO << 2) | (UNIT  << 3)] = (internal::packed_triangular_solve_vector<Scalar,Scalar,int,OnTheLeft, Upper|UnitDiag,Conj, RowMajor>::run);
-
-    init = true;
-  }
-
-  Scalar* ap = reinterpret_cast<Scalar*>(pap);
-  Scalar* x = reinterpret_cast<Scalar*>(px);
-
-  int info = 0;
-  if(UPLO(*uplo)==INVALID)                                            info = 1;
-  else if(OP(*opa)==INVALID)                                          info = 2;
-  else if(DIAG(*diag)==INVALID)                                       info = 3;
-  else if(*n<0)                                                       info = 4;
-  else if(*incx==0)                                                   info = 7;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"TPSV ",&info,6);
-
-  Scalar* actual_x = get_compact_vector(x,*n,*incx);
-
-  int code = OP(*opa) | (UPLO(*uplo) << 2) | (DIAG(*diag) << 3);
-  func[code](*n, ap, actual_x);
-
-  if(actual_x!=x) delete[] copy_back(actual_x,x,*n,*incx);
-
-  return 1;
-}
-
diff --git a/resources/3rdparty/eigen/blas/level2_real_impl.h b/resources/3rdparty/eigen/blas/level2_real_impl.h
deleted file mode 100644
index febf08d1f..000000000
--- a/resources/3rdparty/eigen/blas/level2_real_impl.h
+++ /dev/null
@@ -1,370 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#include "common.h"
-
-// y = alpha*A*x + beta*y
-int EIGEN_BLAS_FUNC(symv) (char *uplo, int *n, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *px, int *incx, RealScalar *pbeta, RealScalar *py, int *incy)
-{
-  typedef void (*functype)(int, const Scalar*, int, const Scalar*, int, Scalar*, Scalar);
-  static functype func[2];
-
-  static bool init = false;
-  if(!init)
-  {
-    for(int k=0; k<2; ++k)
-      func[k] = 0;
-
-    func[UP] = (internal::selfadjoint_matrix_vector_product<Scalar,int,ColMajor,Upper,false,false>::run);
-    func[LO] = (internal::selfadjoint_matrix_vector_product<Scalar,int,ColMajor,Lower,false,false>::run);
-
-    init = true;
-  }
-
-  Scalar* a = reinterpret_cast<Scalar*>(pa);
-  Scalar* x = reinterpret_cast<Scalar*>(px);
-  Scalar* y = reinterpret_cast<Scalar*>(py);
-  Scalar alpha  = *reinterpret_cast<Scalar*>(palpha);
-  Scalar beta   = *reinterpret_cast<Scalar*>(pbeta);
-
-  // check arguments
-  int info = 0;
-  if(UPLO(*uplo)==INVALID)        info = 1;
-  else if(*n<0)                   info = 2;
-  else if(*lda<std::max(1,*n))    info = 5;
-  else if(*incx==0)               info = 7;
-  else if(*incy==0)               info = 10;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"SYMV ",&info,6);
-
-  if(*n==0)
-    return 0;
-
-  Scalar* actual_x = get_compact_vector(x,*n,*incx);
-  Scalar* actual_y = get_compact_vector(y,*n,*incy);
-
-  if(beta!=Scalar(1))
-  {
-    if(beta==Scalar(0)) vector(actual_y, *n).setZero();
-    else                vector(actual_y, *n) *= beta;
-  }
-
-  int code = UPLO(*uplo);
-  if(code>=2 || func[code]==0)
-    return 0;
-
-  func[code](*n, a, *lda, actual_x, 1, actual_y, alpha);
-
-  if(actual_x!=x) delete[] actual_x;
-  if(actual_y!=y) delete[] copy_back(actual_y,y,*n,*incy);
-
-  return 1;
-}
-
-// C := alpha*x*x' + C
-int EIGEN_BLAS_FUNC(syr)(char *uplo, int *n, RealScalar *palpha, RealScalar *px, int *incx, RealScalar *pc, int *ldc)
-{
-
-//   typedef void (*functype)(int, const Scalar *, int, Scalar *, int, Scalar);
-//   static functype func[2];
-
-//   static bool init = false;
-//   if(!init)
-//   {
-//     for(int k=0; k<2; ++k)
-//       func[k] = 0;
-//
-//     func[UP] = (internal::selfadjoint_product<Scalar,ColMajor,ColMajor,false,UpperTriangular>::run);
-//     func[LO] = (internal::selfadjoint_product<Scalar,ColMajor,ColMajor,false,LowerTriangular>::run);
-
-//     init = true;
-//   }
-  typedef void (*functype)(int, Scalar*, int, const Scalar*, Scalar);
-  static functype func[2];
-
-  static bool init = false;
-  if(!init)
-  {
-    for(int k=0; k<2; ++k)
-      func[k] = 0;
-
-    func[UP] = (selfadjoint_rank1_update<Scalar,int,ColMajor,Upper,false,Conj>::run);
-    func[LO] = (selfadjoint_rank1_update<Scalar,int,ColMajor,Lower,false,Conj>::run);
-
-    init = true;
-  }
-
-  Scalar* x = reinterpret_cast<Scalar*>(px);
-  Scalar* c = reinterpret_cast<Scalar*>(pc);
-  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
-
-  int info = 0;
-  if(UPLO(*uplo)==INVALID)                                            info = 1;
-  else if(*n<0)                                                       info = 2;
-  else if(*incx==0)                                                   info = 5;
-  else if(*ldc<std::max(1,*n))                                        info = 7;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"SYR  ",&info,6);
-
-  if(*n==0 || alpha==Scalar(0)) return 1;
-
-  // if the increment is not 1, let's copy it to a temporary vector to enable vectorization
-  Scalar* x_cpy = get_compact_vector(x,*n,*incx);
-
-  int code = UPLO(*uplo);
-  if(code>=2 || func[code]==0)
-    return 0;
-
-  func[code](*n, c, *ldc, x_cpy, alpha);
-
-  if(x_cpy!=x)  delete[] x_cpy;
-
-  return 1;
-}
-
-// C := alpha*x*y' + alpha*y*x' + C
-int EIGEN_BLAS_FUNC(syr2)(char *uplo, int *n, RealScalar *palpha, RealScalar *px, int *incx, RealScalar *py, int *incy, RealScalar *pc, int *ldc)
-{
-//   typedef void (*functype)(int, const Scalar *, int, const Scalar *, int, Scalar *, int, Scalar);
-//   static functype func[2];
-//
-//   static bool init = false;
-//   if(!init)
-//   {
-//     for(int k=0; k<2; ++k)
-//       func[k] = 0;
-//
-//     func[UP] = (internal::selfadjoint_product<Scalar,ColMajor,ColMajor,false,UpperTriangular>::run);
-//     func[LO] = (internal::selfadjoint_product<Scalar,ColMajor,ColMajor,false,LowerTriangular>::run);
-//
-//     init = true;
-//   }
-  typedef void (*functype)(int, Scalar*, int, const Scalar*, const Scalar*, Scalar);
-  static functype func[2];
-
-  static bool init = false;
-  if(!init)
-  {
-    for(int k=0; k<2; ++k)
-      func[k] = 0;
-
-    func[UP] = (internal::rank2_update_selector<Scalar,int,Upper>::run);
-    func[LO] = (internal::rank2_update_selector<Scalar,int,Lower>::run);
-
-    init = true;
-  }
-
-  Scalar* x = reinterpret_cast<Scalar*>(px);
-  Scalar* y = reinterpret_cast<Scalar*>(py);
-  Scalar* c = reinterpret_cast<Scalar*>(pc);
-  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
-
-  int info = 0;
-  if(UPLO(*uplo)==INVALID)                                            info = 1;
-  else if(*n<0)                                                       info = 2;
-  else if(*incx==0)                                                   info = 5;
-  else if(*incy==0)                                                   info = 7;
-  else if(*ldc<std::max(1,*n))                                        info = 9;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"SYR2 ",&info,6);
-
-  if(alpha==Scalar(0))
-    return 1;
-
-  Scalar* x_cpy = get_compact_vector(x,*n,*incx);
-  Scalar* y_cpy = get_compact_vector(y,*n,*incy);
-  
-  int code = UPLO(*uplo);
-  if(code>=2 || func[code]==0)
-    return 0;
-
-  func[code](*n, c, *ldc, x_cpy, y_cpy, alpha);
-
-  if(x_cpy!=x)  delete[] x_cpy;
-  if(y_cpy!=y)  delete[] y_cpy;
-
-//   int code = UPLO(*uplo);
-//   if(code>=2 || func[code]==0)
-//     return 0;
-
-//   func[code](*n, a, *inca, b, *incb, c, *ldc, alpha);
-  return 1;
-}
-
-/**  DSBMV  performs the matrix-vector  operation
-  *
-  *     y := alpha*A*x + beta*y,
-  *
-  *  where alpha and beta are scalars, x and y are n element vectors and
-  *  A is an n by n symmetric band matrix, with k super-diagonals.
-  */
-// int EIGEN_BLAS_FUNC(sbmv)( char *uplo, int *n, int *k, RealScalar *alpha, RealScalar *a, int *lda,
-//                            RealScalar *x, int *incx, RealScalar *beta, RealScalar *y, int *incy)
-// {
-//   return 1;
-// }
-
-
-/**  DSPMV  performs the matrix-vector operation
-  *
-  *     y := alpha*A*x + beta*y,
-  *
-  *  where alpha and beta are scalars, x and y are n element vectors and
-  *  A is an n by n symmetric matrix, supplied in packed form.
-  *
-  */
-// int EIGEN_BLAS_FUNC(spmv)(char *uplo, int *n, RealScalar *alpha, RealScalar *ap, RealScalar *x, int *incx, RealScalar *beta, RealScalar *y, int *incy)
-// {
-//   return 1;
-// }
-
-/**  DSPR    performs the symmetric rank 1 operation
-  *
-  *     A := alpha*x*x' + A,
-  *
-  *  where alpha is a real scalar, x is an n element vector and A is an
-  *  n by n symmetric matrix, supplied in packed form.
-  */
-int EIGEN_BLAS_FUNC(spr)(char *uplo, int *n, Scalar *palpha, Scalar *px, int *incx, Scalar *pap)
-{
-  typedef void (*functype)(int, Scalar*, const Scalar*, Scalar);
-  static functype func[2];
-
-  static bool init = false;
-  if(!init)
-  {
-    for(int k=0; k<2; ++k)
-      func[k] = 0;
-
-    func[UP] = (internal::selfadjoint_packed_rank1_update<Scalar,int,ColMajor,Upper,false,false>::run);
-    func[LO] = (internal::selfadjoint_packed_rank1_update<Scalar,int,ColMajor,Lower,false,false>::run);
-
-    init = true;
-  }
-
-  Scalar* x = reinterpret_cast<Scalar*>(px);
-  Scalar* ap = reinterpret_cast<Scalar*>(pap);
-  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
-
-  int info = 0;
-  if(UPLO(*uplo)==INVALID)                                            info = 1;
-  else if(*n<0)                                                       info = 2;
-  else if(*incx==0)                                                   info = 5;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"SPR  ",&info,6);
-
-  if(alpha==Scalar(0))
-    return 1;
-
-  Scalar* x_cpy = get_compact_vector(x, *n, *incx);
-
-  int code = UPLO(*uplo);
-  if(code>=2 || func[code]==0)
-    return 0;
-
-  func[code](*n, ap, x_cpy, alpha);
-
-  if(x_cpy!=x)  delete[] x_cpy;
-
-  return 1;
-}
-
-/**  DSPR2  performs the symmetric rank 2 operation
-  *
-  *     A := alpha*x*y' + alpha*y*x' + A,
-  *
-  *  where alpha is a scalar, x and y are n element vectors and A is an
-  *  n by n symmetric matrix, supplied in packed form.
-  */
-int EIGEN_BLAS_FUNC(spr2)(char *uplo, int *n, RealScalar *palpha, RealScalar *px, int *incx, RealScalar *py, int *incy, RealScalar *pap)
-{
-  typedef void (*functype)(int, Scalar*, const Scalar*, const Scalar*, Scalar);
-  static functype func[2];
-
-  static bool init = false;
-  if(!init)
-  {
-    for(int k=0; k<2; ++k)
-      func[k] = 0;
-
-    func[UP] = (internal::packed_rank2_update_selector<Scalar,int,Upper>::run);
-    func[LO] = (internal::packed_rank2_update_selector<Scalar,int,Lower>::run);
-
-    init = true;
-  }
-
-  Scalar* x = reinterpret_cast<Scalar*>(px);
-  Scalar* y = reinterpret_cast<Scalar*>(py);
-  Scalar* ap = reinterpret_cast<Scalar*>(pap);
-  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
-
-  int info = 0;
-  if(UPLO(*uplo)==INVALID)                                            info = 1;
-  else if(*n<0)                                                       info = 2;
-  else if(*incx==0)                                                   info = 5;
-  else if(*incy==0)                                                   info = 7;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"SPR2 ",&info,6);
-
-  if(alpha==Scalar(0))
-    return 1;
-
-  Scalar* x_cpy = get_compact_vector(x, *n, *incx);
-  Scalar* y_cpy = get_compact_vector(y, *n, *incy);
-
-  int code = UPLO(*uplo);
-  if(code>=2 || func[code]==0)
-    return 0;
-
-  func[code](*n, ap, x_cpy, y_cpy, alpha);
-
-  if(x_cpy!=x)  delete[] x_cpy;
-  if(y_cpy!=y)  delete[] y_cpy;
-
-  return 1;
-}
-
-/**  DGER   performs the rank 1 operation
-  *
-  *     A := alpha*x*y' + A,
-  *
-  *  where alpha is a scalar, x is an m element vector, y is an n element
-  *  vector and A is an m by n matrix.
-  */
-int EIGEN_BLAS_FUNC(ger)(int *m, int *n, Scalar *palpha, Scalar *px, int *incx, Scalar *py, int *incy, Scalar *pa, int *lda)
-{
-  Scalar* x = reinterpret_cast<Scalar*>(px);
-  Scalar* y = reinterpret_cast<Scalar*>(py);
-  Scalar* a = reinterpret_cast<Scalar*>(pa);
-  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
-
-  int info = 0;
-       if(*m<0)                                                       info = 1;
-  else if(*n<0)                                                       info = 2;
-  else if(*incx==0)                                                   info = 5;
-  else if(*incy==0)                                                   info = 7;
-  else if(*lda<std::max(1,*m))                                        info = 9;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"GER  ",&info,6);
-
-  if(alpha==Scalar(0))
-    return 1;
-
-  Scalar* x_cpy = get_compact_vector(x,*m,*incx);
-  Scalar* y_cpy = get_compact_vector(y,*n,*incy);
-
-  internal::general_rank1_update<Scalar,int,ColMajor,false,false>::run(*m, *n, a, *lda, x_cpy, y_cpy, alpha);
-
-  if(x_cpy!=x)  delete[] x_cpy;
-  if(y_cpy!=y)  delete[] y_cpy;
-
-  return 1;
-}
-
-
diff --git a/resources/3rdparty/eigen/blas/level3_impl.h b/resources/3rdparty/eigen/blas/level3_impl.h
deleted file mode 100644
index 84c9f4f2b..000000000
--- a/resources/3rdparty/eigen/blas/level3_impl.h
+++ /dev/null
@@ -1,634 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#include "common.h"
-
-int EIGEN_BLAS_FUNC(gemm)(char *opa, char *opb, int *m, int *n, int *k, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *pb, int *ldb, RealScalar *pbeta, RealScalar *pc, int *ldc)
-{
-//   std::cerr << "in gemm " << *opa << " " << *opb << " " << *m << " " << *n << " " << *k << " " << *lda << " " << *ldb << " " << *ldc << " " << *palpha << " " << *pbeta << "\n";
-  typedef void (*functype)(DenseIndex, DenseIndex, DenseIndex, const Scalar *, DenseIndex, const Scalar *, DenseIndex, Scalar *, DenseIndex, Scalar, internal::level3_blocking<Scalar,Scalar>&, Eigen::internal::GemmParallelInfo<DenseIndex>*);
-  static functype func[12];
-
-  static bool init = false;
-  if(!init)
-  {
-    for(int k=0; k<12; ++k)
-      func[k] = 0;
-    func[NOTR  | (NOTR << 2)] = (internal::general_matrix_matrix_product<DenseIndex,Scalar,ColMajor,false,Scalar,ColMajor,false,ColMajor>::run);
-    func[TR    | (NOTR << 2)] = (internal::general_matrix_matrix_product<DenseIndex,Scalar,RowMajor,false,Scalar,ColMajor,false,ColMajor>::run);
-    func[ADJ   | (NOTR << 2)] = (internal::general_matrix_matrix_product<DenseIndex,Scalar,RowMajor,Conj, Scalar,ColMajor,false,ColMajor>::run);
-    func[NOTR  | (TR   << 2)] = (internal::general_matrix_matrix_product<DenseIndex,Scalar,ColMajor,false,Scalar,RowMajor,false,ColMajor>::run);
-    func[TR    | (TR   << 2)] = (internal::general_matrix_matrix_product<DenseIndex,Scalar,RowMajor,false,Scalar,RowMajor,false,ColMajor>::run);
-    func[ADJ   | (TR   << 2)] = (internal::general_matrix_matrix_product<DenseIndex,Scalar,RowMajor,Conj, Scalar,RowMajor,false,ColMajor>::run);
-    func[NOTR  | (ADJ  << 2)] = (internal::general_matrix_matrix_product<DenseIndex,Scalar,ColMajor,false,Scalar,RowMajor,Conj, ColMajor>::run);
-    func[TR    | (ADJ  << 2)] = (internal::general_matrix_matrix_product<DenseIndex,Scalar,RowMajor,false,Scalar,RowMajor,Conj, ColMajor>::run);
-    func[ADJ   | (ADJ  << 2)] = (internal::general_matrix_matrix_product<DenseIndex,Scalar,RowMajor,Conj, Scalar,RowMajor,Conj, ColMajor>::run);
-    init = true;
-  }
-
-  Scalar* a = reinterpret_cast<Scalar*>(pa);
-  Scalar* b = reinterpret_cast<Scalar*>(pb);
-  Scalar* c = reinterpret_cast<Scalar*>(pc);
-  Scalar alpha  = *reinterpret_cast<Scalar*>(palpha);
-  Scalar beta   = *reinterpret_cast<Scalar*>(pbeta);
-
-  int info = 0;
-  if(OP(*opa)==INVALID)                                               info = 1;
-  else if(OP(*opb)==INVALID)                                          info = 2;
-  else if(*m<0)                                                       info = 3;
-  else if(*n<0)                                                       info = 4;
-  else if(*k<0)                                                       info = 5;
-  else if(*lda<std::max(1,(OP(*opa)==NOTR)?*m:*k))                    info = 8;
-  else if(*ldb<std::max(1,(OP(*opb)==NOTR)?*k:*n))                    info = 10;
-  else if(*ldc<std::max(1,*m))                                        info = 13;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"GEMM ",&info,6);
-
-  if(beta!=Scalar(1))
-  {
-    if(beta==Scalar(0)) matrix(c, *m, *n, *ldc).setZero();
-    else                matrix(c, *m, *n, *ldc) *= beta;
-  }
-
-  internal::gemm_blocking_space<ColMajor,Scalar,Scalar,Dynamic,Dynamic,Dynamic> blocking(*m,*n,*k);
-
-  int code = OP(*opa) | (OP(*opb) << 2);
-  func[code](*m, *n, *k, a, *lda, b, *ldb, c, *ldc, alpha, blocking, 0);
-  return 0;
-}
-
-int EIGEN_BLAS_FUNC(trsm)(char *side, char *uplo, char *opa, char *diag, int *m, int *n, RealScalar *palpha,  RealScalar *pa, int *lda, RealScalar *pb, int *ldb)
-{
-//   std::cerr << "in trsm " << *side << " " << *uplo << " " << *opa << " " << *diag << " " << *m << "," << *n << " " << *palpha << " " << *lda << " " << *ldb<< "\n";
-  typedef void (*functype)(DenseIndex, DenseIndex, const Scalar *, DenseIndex, Scalar *, DenseIndex, internal::level3_blocking<Scalar,Scalar>&);
-  static functype func[32];
-
-  static bool init = false;
-  if(!init)
-  {
-    for(int k=0; k<32; ++k)
-      func[k] = 0;
-
-    func[NOTR  | (LEFT  << 2) | (UP << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Upper|0,          false,ColMajor,ColMajor>::run);
-    func[TR    | (LEFT  << 2) | (UP << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Lower|0,          false,RowMajor,ColMajor>::run);
-    func[ADJ   | (LEFT  << 2) | (UP << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Lower|0,          Conj, RowMajor,ColMajor>::run);
-
-    func[NOTR  | (RIGHT << 2) | (UP << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Upper|0,          false,ColMajor,ColMajor>::run);
-    func[TR    | (RIGHT << 2) | (UP << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Lower|0,          false,RowMajor,ColMajor>::run);
-    func[ADJ   | (RIGHT << 2) | (UP << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Lower|0,          Conj, RowMajor,ColMajor>::run);
-
-    func[NOTR  | (LEFT  << 2) | (LO << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Lower|0,          false,ColMajor,ColMajor>::run);
-    func[TR    | (LEFT  << 2) | (LO << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Upper|0,          false,RowMajor,ColMajor>::run);
-    func[ADJ   | (LEFT  << 2) | (LO << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Upper|0,          Conj, RowMajor,ColMajor>::run);
-
-    func[NOTR  | (RIGHT << 2) | (LO << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Lower|0,          false,ColMajor,ColMajor>::run);
-    func[TR    | (RIGHT << 2) | (LO << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Upper|0,          false,RowMajor,ColMajor>::run);
-    func[ADJ   | (RIGHT << 2) | (LO << 3) | (NUNIT << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Upper|0,          Conj, RowMajor,ColMajor>::run);
-
-
-    func[NOTR  | (LEFT  << 2) | (UP << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Upper|UnitDiag,false,ColMajor,ColMajor>::run);
-    func[TR    | (LEFT  << 2) | (UP << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Lower|UnitDiag,false,RowMajor,ColMajor>::run);
-    func[ADJ   | (LEFT  << 2) | (UP << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Lower|UnitDiag,Conj, RowMajor,ColMajor>::run);
-
-    func[NOTR  | (RIGHT << 2) | (UP << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Upper|UnitDiag,false,ColMajor,ColMajor>::run);
-    func[TR    | (RIGHT << 2) | (UP << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Lower|UnitDiag,false,RowMajor,ColMajor>::run);
-    func[ADJ   | (RIGHT << 2) | (UP << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Lower|UnitDiag,Conj, RowMajor,ColMajor>::run);
-
-    func[NOTR  | (LEFT  << 2) | (LO << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Lower|UnitDiag,false,ColMajor,ColMajor>::run);
-    func[TR    | (LEFT  << 2) | (LO << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Upper|UnitDiag,false,RowMajor,ColMajor>::run);
-    func[ADJ   | (LEFT  << 2) | (LO << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheLeft, Upper|UnitDiag,Conj, RowMajor,ColMajor>::run);
-
-    func[NOTR  | (RIGHT << 2) | (LO << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Lower|UnitDiag,false,ColMajor,ColMajor>::run);
-    func[TR    | (RIGHT << 2) | (LO << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Upper|UnitDiag,false,RowMajor,ColMajor>::run);
-    func[ADJ   | (RIGHT << 2) | (LO << 3) | (UNIT  << 4)] = (internal::triangular_solve_matrix<Scalar,DenseIndex,OnTheRight,Upper|UnitDiag,Conj, RowMajor,ColMajor>::run);
-
-    init = true;
-  }
-
-  Scalar* a = reinterpret_cast<Scalar*>(pa);
-  Scalar* b = reinterpret_cast<Scalar*>(pb);
-  Scalar  alpha = *reinterpret_cast<Scalar*>(palpha);
-
-  int info = 0;
-  if(SIDE(*side)==INVALID)                                            info = 1;
-  else if(UPLO(*uplo)==INVALID)                                       info = 2;
-  else if(OP(*opa)==INVALID)                                          info = 3;
-  else if(DIAG(*diag)==INVALID)                                       info = 4;
-  else if(*m<0)                                                       info = 5;
-  else if(*n<0)                                                       info = 6;
-  else if(*lda<std::max(1,(SIDE(*side)==LEFT)?*m:*n))                 info = 9;
-  else if(*ldb<std::max(1,*m))                                        info = 11;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"TRSM ",&info,6);
-
-  int code = OP(*opa) | (SIDE(*side) << 2) | (UPLO(*uplo) << 3) | (DIAG(*diag) << 4);
-  
-  if(SIDE(*side)==LEFT)
-  {
-    internal::gemm_blocking_space<ColMajor,Scalar,Scalar,Dynamic,Dynamic,Dynamic,4> blocking(*m,*n,*m);
-    func[code](*m, *n, a, *lda, b, *ldb, blocking);
-  }
-  else
-  {
-    internal::gemm_blocking_space<ColMajor,Scalar,Scalar,Dynamic,Dynamic,Dynamic,4> blocking(*m,*n,*n);
-    func[code](*n, *m, a, *lda, b, *ldb, blocking);
-  }
-
-  if(alpha!=Scalar(1))
-    matrix(b,*m,*n,*ldb) *= alpha;
-
-  return 0;
-}
-
-
-// b = alpha*op(a)*b  for side = 'L'or'l'
-// b = alpha*b*op(a)  for side = 'R'or'r'
-int EIGEN_BLAS_FUNC(trmm)(char *side, char *uplo, char *opa, char *diag, int *m, int *n, RealScalar *palpha,  RealScalar *pa, int *lda, RealScalar *pb, int *ldb)
-{
-//   std::cerr << "in trmm " << *side << " " << *uplo << " " << *opa << " " << *diag << " " << *m << " " << *n << " " << *lda << " " << *ldb << " " << *palpha << "\n";
-  typedef void (*functype)(DenseIndex, DenseIndex, DenseIndex, const Scalar *, DenseIndex, const Scalar *, DenseIndex, Scalar *, DenseIndex, Scalar, internal::level3_blocking<Scalar,Scalar>&);
-  static functype func[32];
-  static bool init = false;
-  if(!init)
-  {
-    for(int k=0; k<32; ++k)
-      func[k] = 0;
-
-    func[NOTR  | (LEFT  << 2) | (UP << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|0,          true, ColMajor,false,ColMajor,false,ColMajor>::run);
-    func[TR    | (LEFT  << 2) | (UP << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|0,          true, RowMajor,false,ColMajor,false,ColMajor>::run);
-    func[ADJ   | (LEFT  << 2) | (UP << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|0,          true, RowMajor,Conj, ColMajor,false,ColMajor>::run);
-
-    func[NOTR  | (RIGHT << 2) | (UP << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|0,          false,ColMajor,false,ColMajor,false,ColMajor>::run);
-    func[TR    | (RIGHT << 2) | (UP << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|0,          false,ColMajor,false,RowMajor,false,ColMajor>::run);
-    func[ADJ   | (RIGHT << 2) | (UP << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|0,          false,ColMajor,false,RowMajor,Conj, ColMajor>::run);
-
-    func[NOTR  | (LEFT  << 2) | (LO << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|0,          true, ColMajor,false,ColMajor,false,ColMajor>::run);
-    func[TR    | (LEFT  << 2) | (LO << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|0,          true, RowMajor,false,ColMajor,false,ColMajor>::run);
-    func[ADJ   | (LEFT  << 2) | (LO << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|0,          true, RowMajor,Conj, ColMajor,false,ColMajor>::run);
-
-    func[NOTR  | (RIGHT << 2) | (LO << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|0,          false,ColMajor,false,ColMajor,false,ColMajor>::run);
-    func[TR    | (RIGHT << 2) | (LO << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|0,          false,ColMajor,false,RowMajor,false,ColMajor>::run);
-    func[ADJ   | (RIGHT << 2) | (LO << 3) | (NUNIT << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|0,          false,ColMajor,false,RowMajor,Conj, ColMajor>::run);
-
-    func[NOTR  | (LEFT  << 2) | (UP << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|UnitDiag,true, ColMajor,false,ColMajor,false,ColMajor>::run);
-    func[TR    | (LEFT  << 2) | (UP << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|UnitDiag,true, RowMajor,false,ColMajor,false,ColMajor>::run);
-    func[ADJ   | (LEFT  << 2) | (UP << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|UnitDiag,true, RowMajor,Conj, ColMajor,false,ColMajor>::run);
-
-    func[NOTR  | (RIGHT << 2) | (UP << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|UnitDiag,false,ColMajor,false,ColMajor,false,ColMajor>::run);
-    func[TR    | (RIGHT << 2) | (UP << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|UnitDiag,false,ColMajor,false,RowMajor,false,ColMajor>::run);
-    func[ADJ   | (RIGHT << 2) | (UP << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|UnitDiag,false,ColMajor,false,RowMajor,Conj, ColMajor>::run);
-
-    func[NOTR  | (LEFT  << 2) | (LO << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|UnitDiag,true, ColMajor,false,ColMajor,false,ColMajor>::run);
-    func[TR    | (LEFT  << 2) | (LO << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|UnitDiag,true, RowMajor,false,ColMajor,false,ColMajor>::run);
-    func[ADJ   | (LEFT  << 2) | (LO << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|UnitDiag,true, RowMajor,Conj, ColMajor,false,ColMajor>::run);
-
-    func[NOTR  | (RIGHT << 2) | (LO << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Lower|UnitDiag,false,ColMajor,false,ColMajor,false,ColMajor>::run);
-    func[TR    | (RIGHT << 2) | (LO << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|UnitDiag,false,ColMajor,false,RowMajor,false,ColMajor>::run);
-    func[ADJ   | (RIGHT << 2) | (LO << 3) | (UNIT  << 4)] = (internal::product_triangular_matrix_matrix<Scalar,DenseIndex,Upper|UnitDiag,false,ColMajor,false,RowMajor,Conj, ColMajor>::run);
-
-    init = true;
-  }
-
-  Scalar* a = reinterpret_cast<Scalar*>(pa);
-  Scalar* b = reinterpret_cast<Scalar*>(pb);
-  Scalar  alpha = *reinterpret_cast<Scalar*>(palpha);
-
-  int info = 0;
-  if(SIDE(*side)==INVALID)                                            info = 1;
-  else if(UPLO(*uplo)==INVALID)                                       info = 2;
-  else if(OP(*opa)==INVALID)                                          info = 3;
-  else if(DIAG(*diag)==INVALID)                                       info = 4;
-  else if(*m<0)                                                       info = 5;
-  else if(*n<0)                                                       info = 6;
-  else if(*lda<std::max(1,(SIDE(*side)==LEFT)?*m:*n))                 info = 9;
-  else if(*ldb<std::max(1,*m))                                        info = 11;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"TRMM ",&info,6);
-
-  int code = OP(*opa) | (SIDE(*side) << 2) | (UPLO(*uplo) << 3) | (DIAG(*diag) << 4);
-
-  if(*m==0 || *n==0)
-    return 1;
-
-  // FIXME find a way to avoid this copy
-  Matrix<Scalar,Dynamic,Dynamic,ColMajor> tmp = matrix(b,*m,*n,*ldb);
-  matrix(b,*m,*n,*ldb).setZero();
-
-  if(SIDE(*side)==LEFT)
-  {
-    internal::gemm_blocking_space<ColMajor,Scalar,Scalar,Dynamic,Dynamic,Dynamic,4> blocking(*m,*n,*m);
-    func[code](*m, *n, *m, a, *lda, tmp.data(), tmp.outerStride(), b, *ldb, alpha, blocking);
-  }
-  else
-  {
-    internal::gemm_blocking_space<ColMajor,Scalar,Scalar,Dynamic,Dynamic,Dynamic,4> blocking(*m,*n,*n);
-    func[code](*m, *n, *n, tmp.data(), tmp.outerStride(), a, *lda, b, *ldb, alpha, blocking);
-  }
-  return 1;
-}
-
-// c = alpha*a*b + beta*c  for side = 'L'or'l'
-// c = alpha*b*a + beta*c  for side = 'R'or'r
-int EIGEN_BLAS_FUNC(symm)(char *side, char *uplo, int *m, int *n, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *pb, int *ldb, RealScalar *pbeta, RealScalar *pc, int *ldc)
-{
-//   std::cerr << "in symm " << *side << " " << *uplo << " " << *m << "x" << *n << " lda:" << *lda << " ldb:" << *ldb << " ldc:" << *ldc << " alpha:" << *palpha << " beta:" << *pbeta << "\n";
-  Scalar* a = reinterpret_cast<Scalar*>(pa);
-  Scalar* b = reinterpret_cast<Scalar*>(pb);
-  Scalar* c = reinterpret_cast<Scalar*>(pc);
-  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
-  Scalar beta  = *reinterpret_cast<Scalar*>(pbeta);
-
-  int info = 0;
-  if(SIDE(*side)==INVALID)                                            info = 1;
-  else if(UPLO(*uplo)==INVALID)                                       info = 2;
-  else if(*m<0)                                                       info = 3;
-  else if(*n<0)                                                       info = 4;
-  else if(*lda<std::max(1,(SIDE(*side)==LEFT)?*m:*n))                 info = 7;
-  else if(*ldb<std::max(1,*m))                                        info = 9;
-  else if(*ldc<std::max(1,*m))                                        info = 12;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"SYMM ",&info,6);
-
-  if(beta!=Scalar(1))
-  {
-    if(beta==Scalar(0)) matrix(c, *m, *n, *ldc).setZero();
-    else                matrix(c, *m, *n, *ldc) *= beta;
-  }
-
-  if(*m==0 || *n==0)
-  {
-    return 1;
-  }
-
-  #if ISCOMPLEX
-  // FIXME add support for symmetric complex matrix
-  int size = (SIDE(*side)==LEFT) ? (*m) : (*n);
-  Matrix<Scalar,Dynamic,Dynamic,ColMajor> matA(size,size);
-  if(UPLO(*uplo)==UP)
-  {
-    matA.triangularView<Upper>() = matrix(a,size,size,*lda);
-    matA.triangularView<Lower>() = matrix(a,size,size,*lda).transpose();
-  }
-  else if(UPLO(*uplo)==LO)
-  {
-    matA.triangularView<Lower>() = matrix(a,size,size,*lda);
-    matA.triangularView<Upper>() = matrix(a,size,size,*lda).transpose();
-  }
-  if(SIDE(*side)==LEFT)
-    matrix(c, *m, *n, *ldc) += alpha * matA * matrix(b, *m, *n, *ldb);
-  else if(SIDE(*side)==RIGHT)
-    matrix(c, *m, *n, *ldc) += alpha * matrix(b, *m, *n, *ldb) * matA;
-  #else
-  if(SIDE(*side)==LEFT)
-    if(UPLO(*uplo)==UP)       internal::product_selfadjoint_matrix<Scalar, DenseIndex, RowMajor,true,false, ColMajor,false,false, ColMajor>::run(*m, *n, a, *lda, b, *ldb, c, *ldc, alpha);
-    else if(UPLO(*uplo)==LO)  internal::product_selfadjoint_matrix<Scalar, DenseIndex, ColMajor,true,false, ColMajor,false,false, ColMajor>::run(*m, *n, a, *lda, b, *ldb, c, *ldc, alpha);
-    else                      return 0;
-  else if(SIDE(*side)==RIGHT)
-    if(UPLO(*uplo)==UP)       internal::product_selfadjoint_matrix<Scalar, DenseIndex, ColMajor,false,false, RowMajor,true,false, ColMajor>::run(*m, *n, b, *ldb, a, *lda, c, *ldc, alpha);
-    else if(UPLO(*uplo)==LO)  internal::product_selfadjoint_matrix<Scalar, DenseIndex, ColMajor,false,false, ColMajor,true,false, ColMajor>::run(*m, *n, b, *ldb, a, *lda, c, *ldc, alpha);
-    else                      return 0;
-  else
-    return 0;
-  #endif
-
-  return 0;
-}
-
-// c = alpha*a*a' + beta*c  for op = 'N'or'n'
-// c = alpha*a'*a + beta*c  for op = 'T'or't','C'or'c'
-int EIGEN_BLAS_FUNC(syrk)(char *uplo, char *op, int *n, int *k, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *pbeta, RealScalar *pc, int *ldc)
-{
-//   std::cerr << "in syrk " << *uplo << " " << *op << " " << *n << " " << *k << " " << *palpha << " " << *lda << " " << *pbeta << " " << *ldc << "\n";
-  #if !ISCOMPLEX
-  typedef void (*functype)(DenseIndex, DenseIndex, const Scalar *, DenseIndex, const Scalar *, DenseIndex, Scalar *, DenseIndex, Scalar);
-  static functype func[8];
-
-  static bool init = false;
-  if(!init)
-  {
-    for(int k=0; k<8; ++k)
-      func[k] = 0;
-
-    func[NOTR  | (UP << 2)] = (internal::general_matrix_matrix_triangular_product<DenseIndex,Scalar,ColMajor,false,Scalar,RowMajor,ColMajor,Conj, Upper>::run);
-    func[TR    | (UP << 2)] = (internal::general_matrix_matrix_triangular_product<DenseIndex,Scalar,RowMajor,false,Scalar,ColMajor,ColMajor,Conj, Upper>::run);
-    func[ADJ   | (UP << 2)] = (internal::general_matrix_matrix_triangular_product<DenseIndex,Scalar,RowMajor,Conj, Scalar,ColMajor,ColMajor,false,Upper>::run);
-
-    func[NOTR  | (LO << 2)] = (internal::general_matrix_matrix_triangular_product<DenseIndex,Scalar,ColMajor,false,Scalar,RowMajor,ColMajor,Conj, Lower>::run);
-    func[TR    | (LO << 2)] = (internal::general_matrix_matrix_triangular_product<DenseIndex,Scalar,RowMajor,false,Scalar,ColMajor,ColMajor,Conj, Lower>::run);
-    func[ADJ   | (LO << 2)] = (internal::general_matrix_matrix_triangular_product<DenseIndex,Scalar,RowMajor,Conj, Scalar,ColMajor,ColMajor,false,Lower>::run);
-
-    init = true;
-  }
-  #endif
-
-  Scalar* a = reinterpret_cast<Scalar*>(pa);
-  Scalar* c = reinterpret_cast<Scalar*>(pc);
-  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
-  Scalar beta  = *reinterpret_cast<Scalar*>(pbeta);
-
-  int info = 0;
-  if(UPLO(*uplo)==INVALID)                                            info = 1;
-  else if(OP(*op)==INVALID)                                           info = 2;
-  else if(*n<0)                                                       info = 3;
-  else if(*k<0)                                                       info = 4;
-  else if(*lda<std::max(1,(OP(*op)==NOTR)?*n:*k))                     info = 7;
-  else if(*ldc<std::max(1,*n))                                        info = 10;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"SYRK ",&info,6);
-
-  if(beta!=Scalar(1))
-  {
-    if(UPLO(*uplo)==UP)
-      if(beta==Scalar(0)) matrix(c, *n, *n, *ldc).triangularView<Upper>().setZero();
-      else                matrix(c, *n, *n, *ldc).triangularView<Upper>() *= beta;
-    else
-      if(beta==Scalar(0)) matrix(c, *n, *n, *ldc).triangularView<Lower>().setZero();
-      else                matrix(c, *n, *n, *ldc).triangularView<Lower>() *= beta;
-  }
-
-  #if ISCOMPLEX
-  // FIXME add support for symmetric complex matrix
-  if(UPLO(*uplo)==UP)
-  {
-    if(OP(*op)==NOTR)
-      matrix(c, *n, *n, *ldc).triangularView<Upper>() += alpha * matrix(a,*n,*k,*lda) * matrix(a,*n,*k,*lda).transpose();
-    else
-      matrix(c, *n, *n, *ldc).triangularView<Upper>() += alpha * matrix(a,*k,*n,*lda).transpose() * matrix(a,*k,*n,*lda);
-  }
-  else
-  {
-    if(OP(*op)==NOTR)
-      matrix(c, *n, *n, *ldc).triangularView<Lower>() += alpha * matrix(a,*n,*k,*lda) * matrix(a,*n,*k,*lda).transpose();
-    else
-      matrix(c, *n, *n, *ldc).triangularView<Lower>() += alpha * matrix(a,*k,*n,*lda).transpose() * matrix(a,*k,*n,*lda);
-  }
-  #else
-  int code = OP(*op) | (UPLO(*uplo) << 2);
-  func[code](*n, *k, a, *lda, a, *lda, c, *ldc, alpha);
-  #endif
-
-  return 0;
-}
-
-// c = alpha*a*b' + alpha*b*a' + beta*c  for op = 'N'or'n'
-// c = alpha*a'*b + alpha*b'*a + beta*c  for op = 'T'or't'
-int EIGEN_BLAS_FUNC(syr2k)(char *uplo, char *op, int *n, int *k, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *pb, int *ldb, RealScalar *pbeta, RealScalar *pc, int *ldc)
-{
-  Scalar* a = reinterpret_cast<Scalar*>(pa);
-  Scalar* b = reinterpret_cast<Scalar*>(pb);
-  Scalar* c = reinterpret_cast<Scalar*>(pc);
-  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
-  Scalar beta  = *reinterpret_cast<Scalar*>(pbeta);
-
-  int info = 0;
-  if(UPLO(*uplo)==INVALID)                                            info = 1;
-  else if(OP(*op)==INVALID)                                           info = 2;
-  else if(*n<0)                                                       info = 3;
-  else if(*k<0)                                                       info = 4;
-  else if(*lda<std::max(1,(OP(*op)==NOTR)?*n:*k))                     info = 7;
-  else if(*ldb<std::max(1,(OP(*op)==NOTR)?*n:*k))                     info = 9;
-  else if(*ldc<std::max(1,*n))                                        info = 12;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"SYR2K",&info,6);
-
-  if(beta!=Scalar(1))
-  {
-    if(UPLO(*uplo)==UP)
-      if(beta==Scalar(0)) matrix(c, *n, *n, *ldc).triangularView<Upper>().setZero();
-      else                matrix(c, *n, *n, *ldc).triangularView<Upper>() *= beta;
-    else
-      if(beta==Scalar(0)) matrix(c, *n, *n, *ldc).triangularView<Lower>().setZero();
-      else                matrix(c, *n, *n, *ldc).triangularView<Lower>() *= beta;
-  }
-
-  if(*k==0)
-    return 1;
-
-  if(OP(*op)==NOTR)
-  {
-    if(UPLO(*uplo)==UP)
-    {
-      matrix(c, *n, *n, *ldc).triangularView<Upper>()
-        += alpha *matrix(a, *n, *k, *lda)*matrix(b, *n, *k, *ldb).transpose()
-        +  alpha*matrix(b, *n, *k, *ldb)*matrix(a, *n, *k, *lda).transpose();
-    }
-    else if(UPLO(*uplo)==LO)
-      matrix(c, *n, *n, *ldc).triangularView<Lower>()
-        += alpha*matrix(a, *n, *k, *lda)*matrix(b, *n, *k, *ldb).transpose()
-        +  alpha*matrix(b, *n, *k, *ldb)*matrix(a, *n, *k, *lda).transpose();
-  }
-  else if(OP(*op)==TR || OP(*op)==ADJ)
-  {
-    if(UPLO(*uplo)==UP)
-      matrix(c, *n, *n, *ldc).triangularView<Upper>()
-        += alpha*matrix(a, *k, *n, *lda).transpose()*matrix(b, *k, *n, *ldb)
-        +  alpha*matrix(b, *k, *n, *ldb).transpose()*matrix(a, *k, *n, *lda);
-    else if(UPLO(*uplo)==LO)
-      matrix(c, *n, *n, *ldc).triangularView<Lower>()
-        += alpha*matrix(a, *k, *n, *lda).transpose()*matrix(b, *k, *n, *ldb)
-        +  alpha*matrix(b, *k, *n, *ldb).transpose()*matrix(a, *k, *n, *lda);
-  }
-
-  return 0;
-}
-
-
-#if ISCOMPLEX
-
-// c = alpha*a*b + beta*c  for side = 'L'or'l'
-// c = alpha*b*a + beta*c  for side = 'R'or'r
-int EIGEN_BLAS_FUNC(hemm)(char *side, char *uplo, int *m, int *n, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *pb, int *ldb, RealScalar *pbeta, RealScalar *pc, int *ldc)
-{
-  Scalar* a = reinterpret_cast<Scalar*>(pa);
-  Scalar* b = reinterpret_cast<Scalar*>(pb);
-  Scalar* c = reinterpret_cast<Scalar*>(pc);
-  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
-  Scalar beta  = *reinterpret_cast<Scalar*>(pbeta);
-
-//   std::cerr << "in hemm " << *side << " " << *uplo << " " << *m << " " << *n << " " << alpha << " " << *lda << " " << beta << " " << *ldc << "\n";
-
-  int info = 0;
-  if(SIDE(*side)==INVALID)                                            info = 1;
-  else if(UPLO(*uplo)==INVALID)                                       info = 2;
-  else if(*m<0)                                                       info = 3;
-  else if(*n<0)                                                       info = 4;
-  else if(*lda<std::max(1,(SIDE(*side)==LEFT)?*m:*n))                 info = 7;
-  else if(*ldb<std::max(1,*m))                                        info = 9;
-  else if(*ldc<std::max(1,*m))                                        info = 12;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"HEMM ",&info,6);
-
-  if(beta==Scalar(0))       matrix(c, *m, *n, *ldc).setZero();
-  else if(beta!=Scalar(1))  matrix(c, *m, *n, *ldc) *= beta;
-
-  if(*m==0 || *n==0)
-  {
-    return 1;
-  }
-
-  if(SIDE(*side)==LEFT)
-  {
-    if(UPLO(*uplo)==UP)       internal::product_selfadjoint_matrix<Scalar,DenseIndex,RowMajor,true,Conj,  ColMajor,false,false, ColMajor>
-                                ::run(*m, *n, a, *lda, b, *ldb, c, *ldc, alpha);
-    else if(UPLO(*uplo)==LO)  internal::product_selfadjoint_matrix<Scalar,DenseIndex,ColMajor,true,false, ColMajor,false,false, ColMajor>
-                                ::run(*m, *n, a, *lda, b, *ldb, c, *ldc, alpha);
-    else                      return 0;
-  }
-  else if(SIDE(*side)==RIGHT)
-  {
-    if(UPLO(*uplo)==UP)       matrix(c,*m,*n,*ldc) += alpha * matrix(b,*m,*n,*ldb) * matrix(a,*n,*n,*lda).selfadjointView<Upper>();/*internal::product_selfadjoint_matrix<Scalar,DenseIndex,ColMajor,false,false, RowMajor,true,Conj,  ColMajor>
-                                ::run(*m, *n, b, *ldb, a, *lda, c, *ldc, alpha);*/
-    else if(UPLO(*uplo)==LO)  internal::product_selfadjoint_matrix<Scalar,DenseIndex,ColMajor,false,false, ColMajor,true,false, ColMajor>
-                                ::run(*m, *n, b, *ldb, a, *lda, c, *ldc, alpha);
-    else                      return 0;
-  }
-  else
-  {
-    return 0;
-  }
-
-  return 0;
-}
-
-// c = alpha*a*conj(a') + beta*c  for op = 'N'or'n'
-// c = alpha*conj(a')*a + beta*c  for op  = 'C'or'c'
-int EIGEN_BLAS_FUNC(herk)(char *uplo, char *op, int *n, int *k, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *pbeta, RealScalar *pc, int *ldc)
-{
-  typedef void (*functype)(DenseIndex, DenseIndex, const Scalar *, DenseIndex, const Scalar *, DenseIndex, Scalar *, DenseIndex, Scalar);
-  static functype func[8];
-
-  static bool init = false;
-  if(!init)
-  {
-    for(int k=0; k<8; ++k)
-      func[k] = 0;
-
-    func[NOTR  | (UP << 2)] = (internal::general_matrix_matrix_triangular_product<DenseIndex,Scalar,ColMajor,false,Scalar,RowMajor,Conj, ColMajor,Upper>::run);
-    func[ADJ   | (UP << 2)] = (internal::general_matrix_matrix_triangular_product<DenseIndex,Scalar,RowMajor,Conj, Scalar,ColMajor,false,ColMajor,Upper>::run);
-
-    func[NOTR  | (LO << 2)] = (internal::general_matrix_matrix_triangular_product<DenseIndex,Scalar,ColMajor,false,Scalar,RowMajor,Conj, ColMajor,Lower>::run);
-    func[ADJ   | (LO << 2)] = (internal::general_matrix_matrix_triangular_product<DenseIndex,Scalar,RowMajor,Conj, Scalar,ColMajor,false,ColMajor,Lower>::run);
-
-    init = true;
-  }
-
-  Scalar* a = reinterpret_cast<Scalar*>(pa);
-  Scalar* c = reinterpret_cast<Scalar*>(pc);
-  RealScalar alpha = *palpha;
-  RealScalar beta  = *pbeta;
-
-//   std::cerr << "in herk " << *uplo << " " << *op << " " << *n << " " << *k << " " << alpha << " " << *lda << " " << beta << " " << *ldc << "\n";
-
-  int info = 0;
-  if(UPLO(*uplo)==INVALID)                                            info = 1;
-  else if((OP(*op)==INVALID) || (OP(*op)==TR))                        info = 2;
-  else if(*n<0)                                                       info = 3;
-  else if(*k<0)                                                       info = 4;
-  else if(*lda<std::max(1,(OP(*op)==NOTR)?*n:*k))                     info = 7;
-  else if(*ldc<std::max(1,*n))                                        info = 10;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"HERK ",&info,6);
-
-  int code = OP(*op) | (UPLO(*uplo) << 2);
-
-  if(beta!=RealScalar(1))
-  {
-    if(UPLO(*uplo)==UP)
-      if(beta==Scalar(0)) matrix(c, *n, *n, *ldc).triangularView<Upper>().setZero();
-      else                matrix(c, *n, *n, *ldc).triangularView<StrictlyUpper>() *= beta;
-    else
-      if(beta==Scalar(0)) matrix(c, *n, *n, *ldc).triangularView<Lower>().setZero();
-      else                matrix(c, *n, *n, *ldc).triangularView<StrictlyLower>() *= beta;
-  
-    if(beta!=Scalar(0))
-    {
-      matrix(c, *n, *n, *ldc).diagonal().real() *= beta;
-      matrix(c, *n, *n, *ldc).diagonal().imag().setZero();
-    }
-  }
-
-  if(*k>0 && alpha!=RealScalar(0))
-  {
-    func[code](*n, *k, a, *lda, a, *lda, c, *ldc, alpha);
-    matrix(c, *n, *n, *ldc).diagonal().imag().setZero();
-  }
-  return 0;
-}
-
-// c = alpha*a*conj(b') + conj(alpha)*b*conj(a') + beta*c,  for op = 'N'or'n'
-// c = alpha*conj(a')*b + conj(alpha)*conj(b')*a + beta*c,  for op = 'C'or'c'
-int EIGEN_BLAS_FUNC(her2k)(char *uplo, char *op, int *n, int *k, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *pb, int *ldb, RealScalar *pbeta, RealScalar *pc, int *ldc)
-{
-  Scalar* a = reinterpret_cast<Scalar*>(pa);
-  Scalar* b = reinterpret_cast<Scalar*>(pb);
-  Scalar* c = reinterpret_cast<Scalar*>(pc);
-  Scalar alpha = *reinterpret_cast<Scalar*>(palpha);
-  RealScalar beta  = *pbeta;
-
-  int info = 0;
-  if(UPLO(*uplo)==INVALID)                                            info = 1;
-  else if((OP(*op)==INVALID) || (OP(*op)==TR))                        info = 2;
-  else if(*n<0)                                                       info = 3;
-  else if(*k<0)                                                       info = 4;
-  else if(*lda<std::max(1,(OP(*op)==NOTR)?*n:*k))                     info = 7;
-  else if(*lda<std::max(1,(OP(*op)==NOTR)?*n:*k))                     info = 9;
-  else if(*ldc<std::max(1,*n))                                        info = 12;
-  if(info)
-    return xerbla_(SCALAR_SUFFIX_UP"HER2K",&info,6);
-
-  if(beta!=RealScalar(1))
-  {
-    if(UPLO(*uplo)==UP)
-      if(beta==Scalar(0)) matrix(c, *n, *n, *ldc).triangularView<Upper>().setZero();
-      else                matrix(c, *n, *n, *ldc).triangularView<StrictlyUpper>() *= beta;
-    else
-      if(beta==Scalar(0)) matrix(c, *n, *n, *ldc).triangularView<Lower>().setZero();
-      else                matrix(c, *n, *n, *ldc).triangularView<StrictlyLower>() *= beta;
-
-    if(beta!=Scalar(0))
-    {
-      matrix(c, *n, *n, *ldc).diagonal().real() *= beta;
-      matrix(c, *n, *n, *ldc).diagonal().imag().setZero();
-    }
-  }
-  else if(*k>0 && alpha!=Scalar(0))
-    matrix(c, *n, *n, *ldc).diagonal().imag().setZero();
-
-  if(*k==0)
-    return 1;
-
-  if(OP(*op)==NOTR)
-  {
-    if(UPLO(*uplo)==UP)
-    {
-      matrix(c, *n, *n, *ldc).triangularView<Upper>()
-        +=         alpha *matrix(a, *n, *k, *lda)*matrix(b, *n, *k, *ldb).adjoint()
-        +  internal::conj(alpha)*matrix(b, *n, *k, *ldb)*matrix(a, *n, *k, *lda).adjoint();
-    }
-    else if(UPLO(*uplo)==LO)
-      matrix(c, *n, *n, *ldc).triangularView<Lower>()
-        += alpha*matrix(a, *n, *k, *lda)*matrix(b, *n, *k, *ldb).adjoint()
-        +  internal::conj(alpha)*matrix(b, *n, *k, *ldb)*matrix(a, *n, *k, *lda).adjoint();
-  }
-  else if(OP(*op)==ADJ)
-  {
-    if(UPLO(*uplo)==UP)
-      matrix(c, *n, *n, *ldc).triangularView<Upper>()
-        += alpha*matrix(a, *k, *n, *lda).adjoint()*matrix(b, *k, *n, *ldb)
-        +  internal::conj(alpha)*matrix(b, *k, *n, *ldb).adjoint()*matrix(a, *k, *n, *lda);
-    else if(UPLO(*uplo)==LO)
-      matrix(c, *n, *n, *ldc).triangularView<Lower>()
-        += alpha*matrix(a, *k, *n, *lda).adjoint()*matrix(b, *k, *n, *ldb)
-        +  internal::conj(alpha)*matrix(b, *k, *n, *ldb).adjoint()*matrix(a, *k, *n, *lda);
-  }
-
-  return 1;
-}
-
-#endif // ISCOMPLEX
diff --git a/resources/3rdparty/eigen/blas/single.cpp b/resources/3rdparty/eigen/blas/single.cpp
deleted file mode 100644
index 836e3eee2..000000000
--- a/resources/3rdparty/eigen/blas/single.cpp
+++ /dev/null
@@ -1,22 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#define SCALAR        float
-#define SCALAR_SUFFIX s
-#define SCALAR_SUFFIX_UP "S"
-#define ISCOMPLEX     0
-
-#include "level1_impl.h"
-#include "level1_real_impl.h"
-#include "level2_impl.h"
-#include "level2_real_impl.h"
-#include "level3_impl.h"
-
-float BLASFUNC(sdsdot)(int* n, float* alpha, float* x, int* incx, float* y, int* incy)
-{ return *alpha + BLASFUNC(dsdot)(n, x, incx, y, incy); }
diff --git a/resources/3rdparty/eigen/blas/testing/dblat1.f b/resources/3rdparty/eigen/blas/testing/dblat1.f
deleted file mode 100644
index 30691f9bf..000000000
--- a/resources/3rdparty/eigen/blas/testing/dblat1.f
+++ /dev/null
@@ -1,1065 +0,0 @@
-*> \brief \b DBLAT1
-*
-*  =========== DOCUMENTATION ===========
-*
-* Online html documentation available at 
-*            http://www.netlib.org/lapack/explore-html/ 
-*
-*  Definition:
-*  ===========
-*
-*       PROGRAM DBLAT1
-* 
-*
-*> \par Purpose:
-*  =============
-*>
-*> \verbatim
-*>
-*>    Test program for the DOUBLE PRECISION Level 1 BLAS.
-*>
-*>    Based upon the original BLAS test routine together with:
-*>    F06EAF Example Program Text
-*> \endverbatim
-*
-*  Authors:
-*  ========
-*
-*> \author Univ. of Tennessee 
-*> \author Univ. of California Berkeley 
-*> \author Univ. of Colorado Denver 
-*> \author NAG Ltd. 
-*
-*> \date April 2012
-*
-*> \ingroup double_blas_testing
-*
-*  =====================================================================
-      PROGRAM DBLAT1
-*
-*  -- Reference BLAS test routine (version 3.4.1) --
-*  -- Reference BLAS is a software package provided by Univ. of Tennessee,    --
-*  -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
-*     April 2012
-*
-*  =====================================================================
-*
-*     .. Parameters ..
-      INTEGER          NOUT
-      PARAMETER        (NOUT=6)
-*     .. Scalars in Common ..
-      INTEGER          ICASE, INCX, INCY, N
-      LOGICAL          PASS
-*     .. Local Scalars ..
-      DOUBLE PRECISION SFAC
-      INTEGER          IC
-*     .. External Subroutines ..
-      EXTERNAL         CHECK0, CHECK1, CHECK2, CHECK3, HEADER
-*     .. Common blocks ..
-      COMMON           /COMBLA/ICASE, N, INCX, INCY, PASS
-*     .. Data statements ..
-      DATA             SFAC/9.765625D-4/
-*     .. Executable Statements ..
-      WRITE (NOUT,99999)
-      DO 20 IC = 1, 13
-         ICASE = IC
-         CALL HEADER
-*
-*        .. Initialize  PASS,  INCX,  and INCY for a new case. ..
-*        .. the value 9999 for INCX or INCY will appear in the ..
-*        .. detailed  output, if any, for cases  that do not involve ..
-*        .. these parameters ..
-*
-         PASS = .TRUE.
-         INCX = 9999
-         INCY = 9999
-         IF (ICASE.EQ.3 .OR. ICASE.EQ.11) THEN
-            CALL CHECK0(SFAC)
-         ELSE IF (ICASE.EQ.7 .OR. ICASE.EQ.8 .OR. ICASE.EQ.9 .OR.
-     +            ICASE.EQ.10) THEN
-            CALL CHECK1(SFAC)
-         ELSE IF (ICASE.EQ.1 .OR. ICASE.EQ.2 .OR. ICASE.EQ.5 .OR.
-     +            ICASE.EQ.6 .OR. ICASE.EQ.12 .OR. ICASE.EQ.13) THEN
-            CALL CHECK2(SFAC)
-         ELSE IF (ICASE.EQ.4) THEN
-            CALL CHECK3(SFAC)
-         END IF
-*        -- Print
-         IF (PASS) WRITE (NOUT,99998)
-   20 CONTINUE
-      STOP
-*
-99999 FORMAT (' Real BLAS Test Program Results',/1X)
-99998 FORMAT ('                                    ----- PASS -----')
-      END
-      SUBROUTINE HEADER
-*     .. Parameters ..
-      INTEGER          NOUT
-      PARAMETER        (NOUT=6)
-*     .. Scalars in Common ..
-      INTEGER          ICASE, INCX, INCY, N
-      LOGICAL          PASS
-*     .. Local Arrays ..
-      CHARACTER*6      L(13)
-*     .. Common blocks ..
-      COMMON           /COMBLA/ICASE, N, INCX, INCY, PASS
-*     .. Data statements ..
-      DATA             L(1)/' DDOT '/
-      DATA             L(2)/'DAXPY '/
-      DATA             L(3)/'DROTG '/
-      DATA             L(4)/' DROT '/
-      DATA             L(5)/'DCOPY '/
-      DATA             L(6)/'DSWAP '/
-      DATA             L(7)/'DNRM2 '/
-      DATA             L(8)/'DASUM '/
-      DATA             L(9)/'DSCAL '/
-      DATA             L(10)/'IDAMAX'/
-      DATA             L(11)/'DROTMG'/
-      DATA             L(12)/'DROTM '/
-      DATA             L(13)/'DSDOT '/
-*     .. Executable Statements ..
-      WRITE (NOUT,99999) ICASE, L(ICASE)
-      RETURN
-*
-99999 FORMAT (/' Test of subprogram number',I3,12X,A6)
-      END
-      SUBROUTINE CHECK0(SFAC)
-*     .. Parameters ..
-      INTEGER           NOUT
-      PARAMETER         (NOUT=6)
-*     .. Scalar Arguments ..
-      DOUBLE PRECISION  SFAC
-*     .. Scalars in Common ..
-      INTEGER           ICASE, INCX, INCY, N
-      LOGICAL           PASS
-*     .. Local Scalars ..
-      DOUBLE PRECISION  SA, SB, SC, SS, D12
-      INTEGER           I, K
-*     .. Local Arrays ..
-      DOUBLE PRECISION  DA1(8), DATRUE(8), DB1(8), DBTRUE(8), DC1(8),
-     $                  DS1(8), DAB(4,9), DTEMP(9), DTRUE(9,9)
-*     .. External Subroutines ..
-      EXTERNAL          DROTG, DROTMG, STEST1
-*     .. Common blocks ..
-      COMMON            /COMBLA/ICASE, N, INCX, INCY, PASS
-*     .. Data statements ..
-      DATA              DA1/0.3D0, 0.4D0, -0.3D0, -0.4D0, -0.3D0, 0.0D0,
-     +                  0.0D0, 1.0D0/
-      DATA              DB1/0.4D0, 0.3D0, 0.4D0, 0.3D0, -0.4D0, 0.0D0,
-     +                  1.0D0, 0.0D0/
-      DATA              DC1/0.6D0, 0.8D0, -0.6D0, 0.8D0, 0.6D0, 1.0D0,
-     +                  0.0D0, 1.0D0/
-      DATA              DS1/0.8D0, 0.6D0, 0.8D0, -0.6D0, 0.8D0, 0.0D0,
-     +                  1.0D0, 0.0D0/
-      DATA              DATRUE/0.5D0, 0.5D0, 0.5D0, -0.5D0, -0.5D0,
-     +                  0.0D0, 1.0D0, 1.0D0/
-      DATA              DBTRUE/0.0D0, 0.6D0, 0.0D0, -0.6D0, 0.0D0,
-     +                  0.0D0, 1.0D0, 0.0D0/
-*     INPUT FOR MODIFIED GIVENS
-      DATA DAB/ .1D0,.3D0,1.2D0,.2D0,
-     A          .7D0, .2D0, .6D0, 4.2D0,
-     B          0.D0,0.D0,0.D0,0.D0,
-     C          4.D0, -1.D0, 2.D0, 4.D0,
-     D          6.D-10, 2.D-2, 1.D5, 10.D0,
-     E          4.D10, 2.D-2, 1.D-5, 10.D0,
-     F          2.D-10, 4.D-2, 1.D5, 10.D0,
-     G          2.D10, 4.D-2, 1.D-5, 10.D0,
-     H          4.D0, -2.D0, 8.D0, 4.D0    /
-*    TRUE RESULTS FOR MODIFIED GIVENS
-      DATA DTRUE/0.D0,0.D0, 1.3D0, .2D0, 0.D0,0.D0,0.D0, .5D0, 0.D0,
-     A           0.D0,0.D0, 4.5D0, 4.2D0, 1.D0, .5D0, 0.D0,0.D0,0.D0,
-     B           0.D0,0.D0,0.D0,0.D0, -2.D0, 0.D0,0.D0,0.D0,0.D0,
-     C           0.D0,0.D0,0.D0, 4.D0, -1.D0, 0.D0,0.D0,0.D0,0.D0,
-     D           0.D0, 15.D-3, 0.D0, 10.D0, -1.D0, 0.D0, -1.D-4,
-     E           0.D0, 1.D0,
-     F           0.D0,0.D0, 6144.D-5, 10.D0, -1.D0, 4096.D0, -1.D6,
-     G           0.D0, 1.D0,
-     H           0.D0,0.D0,15.D0,10.D0,-1.D0, 5.D-5, 0.D0,1.D0,0.D0,
-     I           0.D0,0.D0, 15.D0, 10.D0, -1. D0, 5.D5, -4096.D0,
-     J           1.D0, 4096.D-6,
-     K           0.D0,0.D0, 7.D0, 4.D0, 0.D0,0.D0, -.5D0, -.25D0, 0.D0/
-*                   4096 = 2 ** 12
-      DATA D12  /4096.D0/
-      DTRUE(1,1) = 12.D0 / 130.D0
-      DTRUE(2,1) = 36.D0 / 130.D0
-      DTRUE(7,1) = -1.D0 / 6.D0
-      DTRUE(1,2) = 14.D0 / 75.D0
-      DTRUE(2,2) = 49.D0 / 75.D0
-      DTRUE(9,2) = 1.D0 / 7.D0
-      DTRUE(1,5) = 45.D-11 * (D12 * D12)
-      DTRUE(3,5) = 4.D5 / (3.D0 * D12)
-      DTRUE(6,5) = 1.D0 / D12
-      DTRUE(8,5) = 1.D4 / (3.D0 * D12)
-      DTRUE(1,6) = 4.D10 / (1.5D0 * D12 * D12)
-      DTRUE(2,6) = 2.D-2 / 1.5D0
-      DTRUE(8,6) = 5.D-7 * D12
-      DTRUE(1,7) = 4.D0 / 150.D0
-      DTRUE(2,7) = (2.D-10 / 1.5D0) * (D12 * D12)
-      DTRUE(7,7) = -DTRUE(6,5)
-      DTRUE(9,7) = 1.D4 / D12
-      DTRUE(1,8) = DTRUE(1,7)
-      DTRUE(2,8) = 2.D10 / (1.5D0 * D12 * D12)
-      DTRUE(1,9) = 32.D0 / 7.D0
-      DTRUE(2,9) = -16.D0 / 7.D0
-*     .. Executable Statements ..
-*
-*     Compute true values which cannot be prestored
-*     in decimal notation
-*
-      DBTRUE(1) = 1.0D0/0.6D0
-      DBTRUE(3) = -1.0D0/0.6D0
-      DBTRUE(5) = 1.0D0/0.6D0
-*
-      DO 20 K = 1, 8
-*        .. Set N=K for identification in output if any ..
-         N = K
-         IF (ICASE.EQ.3) THEN
-*           .. DROTG ..
-            IF (K.GT.8) GO TO 40
-            SA = DA1(K)
-            SB = DB1(K)
-            CALL DROTG(SA,SB,SC,SS)
-            CALL STEST1(SA,DATRUE(K),DATRUE(K),SFAC)
-            CALL STEST1(SB,DBTRUE(K),DBTRUE(K),SFAC)
-            CALL STEST1(SC,DC1(K),DC1(K),SFAC)
-            CALL STEST1(SS,DS1(K),DS1(K),SFAC)
-         ELSEIF (ICASE.EQ.11) THEN
-*           .. DROTMG ..
-            DO I=1,4
-               DTEMP(I)= DAB(I,K)
-               DTEMP(I+4) = 0.0
-            END DO
-            DTEMP(9) = 0.0
-            CALL DROTMG(DTEMP(1),DTEMP(2),DTEMP(3),DTEMP(4),DTEMP(5))
-            CALL STEST(9,DTEMP,DTRUE(1,K),DTRUE(1,K),SFAC)
-         ELSE
-            WRITE (NOUT,*) ' Shouldn''t be here in CHECK0'
-            STOP
-         END IF
-   20 CONTINUE
-   40 RETURN
-      END
-      SUBROUTINE CHECK1(SFAC)
-*     .. Parameters ..
-      INTEGER           NOUT
-      PARAMETER         (NOUT=6)
-*     .. Scalar Arguments ..
-      DOUBLE PRECISION  SFAC
-*     .. Scalars in Common ..
-      INTEGER           ICASE, INCX, INCY, N
-      LOGICAL           PASS
-*     .. Local Scalars ..
-      INTEGER           I, LEN, NP1
-*     .. Local Arrays ..
-      DOUBLE PRECISION  DTRUE1(5), DTRUE3(5), DTRUE5(8,5,2), DV(8,5,2),
-     +                  SA(10), STEMP(1), STRUE(8), SX(8)
-      INTEGER           ITRUE2(5)
-*     .. External Functions ..
-      DOUBLE PRECISION  DASUM, DNRM2
-      INTEGER           IDAMAX
-      EXTERNAL          DASUM, DNRM2, IDAMAX
-*     .. External Subroutines ..
-      EXTERNAL          ITEST1, DSCAL, STEST, STEST1
-*     .. Intrinsic Functions ..
-      INTRINSIC         MAX
-*     .. Common blocks ..
-      COMMON            /COMBLA/ICASE, N, INCX, INCY, PASS
-*     .. Data statements ..
-      DATA              SA/0.3D0, -1.0D0, 0.0D0, 1.0D0, 0.3D0, 0.3D0,
-     +                  0.3D0, 0.3D0, 0.3D0, 0.3D0/
-      DATA              DV/0.1D0, 2.0D0, 2.0D0, 2.0D0, 2.0D0, 2.0D0,
-     +                  2.0D0, 2.0D0, 0.3D0, 3.0D0, 3.0D0, 3.0D0, 3.0D0,
-     +                  3.0D0, 3.0D0, 3.0D0, 0.3D0, -0.4D0, 4.0D0,
-     +                  4.0D0, 4.0D0, 4.0D0, 4.0D0, 4.0D0, 0.2D0,
-     +                  -0.6D0, 0.3D0, 5.0D0, 5.0D0, 5.0D0, 5.0D0,
-     +                  5.0D0, 0.1D0, -0.3D0, 0.5D0, -0.1D0, 6.0D0,
-     +                  6.0D0, 6.0D0, 6.0D0, 0.1D0, 8.0D0, 8.0D0, 8.0D0,
-     +                  8.0D0, 8.0D0, 8.0D0, 8.0D0, 0.3D0, 9.0D0, 9.0D0,
-     +                  9.0D0, 9.0D0, 9.0D0, 9.0D0, 9.0D0, 0.3D0, 2.0D0,
-     +                  -0.4D0, 2.0D0, 2.0D0, 2.0D0, 2.0D0, 2.0D0,
-     +                  0.2D0, 3.0D0, -0.6D0, 5.0D0, 0.3D0, 2.0D0,
-     +                  2.0D0, 2.0D0, 0.1D0, 4.0D0, -0.3D0, 6.0D0,
-     +                  -0.5D0, 7.0D0, -0.1D0, 3.0D0/
-      DATA              DTRUE1/0.0D0, 0.3D0, 0.5D0, 0.7D0, 0.6D0/
-      DATA              DTRUE3/0.0D0, 0.3D0, 0.7D0, 1.1D0, 1.0D0/
-      DATA              DTRUE5/0.10D0, 2.0D0, 2.0D0, 2.0D0, 2.0D0,
-     +                  2.0D0, 2.0D0, 2.0D0, -0.3D0, 3.0D0, 3.0D0,
-     +                  3.0D0, 3.0D0, 3.0D0, 3.0D0, 3.0D0, 0.0D0, 0.0D0,
-     +                  4.0D0, 4.0D0, 4.0D0, 4.0D0, 4.0D0, 4.0D0,
-     +                  0.20D0, -0.60D0, 0.30D0, 5.0D0, 5.0D0, 5.0D0,
-     +                  5.0D0, 5.0D0, 0.03D0, -0.09D0, 0.15D0, -0.03D0,
-     +                  6.0D0, 6.0D0, 6.0D0, 6.0D0, 0.10D0, 8.0D0,
-     +                  8.0D0, 8.0D0, 8.0D0, 8.0D0, 8.0D0, 8.0D0,
-     +                  0.09D0, 9.0D0, 9.0D0, 9.0D0, 9.0D0, 9.0D0,
-     +                  9.0D0, 9.0D0, 0.09D0, 2.0D0, -0.12D0, 2.0D0,
-     +                  2.0D0, 2.0D0, 2.0D0, 2.0D0, 0.06D0, 3.0D0,
-     +                  -0.18D0, 5.0D0, 0.09D0, 2.0D0, 2.0D0, 2.0D0,
-     +                  0.03D0, 4.0D0, -0.09D0, 6.0D0, -0.15D0, 7.0D0,
-     +                  -0.03D0, 3.0D0/
-      DATA              ITRUE2/0, 1, 2, 2, 3/
-*     .. Executable Statements ..
-      DO 80 INCX = 1, 2
-         DO 60 NP1 = 1, 5
-            N = NP1 - 1
-            LEN = 2*MAX(N,1)
-*           .. Set vector arguments ..
-            DO 20 I = 1, LEN
-               SX(I) = DV(I,NP1,INCX)
-   20       CONTINUE
-*
-            IF (ICASE.EQ.7) THEN
-*              .. DNRM2 ..
-               STEMP(1) = DTRUE1(NP1)
-               CALL STEST1(DNRM2(N,SX,INCX),STEMP(1),STEMP,SFAC)
-            ELSE IF (ICASE.EQ.8) THEN
-*              .. DASUM ..
-               STEMP(1) = DTRUE3(NP1)
-               CALL STEST1(DASUM(N,SX,INCX),STEMP(1),STEMP,SFAC)
-            ELSE IF (ICASE.EQ.9) THEN
-*              .. DSCAL ..
-               CALL DSCAL(N,SA((INCX-1)*5+NP1),SX,INCX)
-               DO 40 I = 1, LEN
-                  STRUE(I) = DTRUE5(I,NP1,INCX)
-   40          CONTINUE
-               CALL STEST(LEN,SX,STRUE,STRUE,SFAC)
-            ELSE IF (ICASE.EQ.10) THEN
-*              .. IDAMAX ..
-               CALL ITEST1(IDAMAX(N,SX,INCX),ITRUE2(NP1))
-            ELSE
-               WRITE (NOUT,*) ' Shouldn''t be here in CHECK1'
-               STOP
-            END IF
-   60    CONTINUE
-   80 CONTINUE
-      RETURN
-      END
-      SUBROUTINE CHECK2(SFAC)
-*     .. Parameters ..
-      INTEGER           NOUT
-      PARAMETER         (NOUT=6)
-*     .. Scalar Arguments ..
-      DOUBLE PRECISION  SFAC
-*     .. Scalars in Common ..
-      INTEGER           ICASE, INCX, INCY, N
-      LOGICAL           PASS
-*     .. Local Scalars ..
-      DOUBLE PRECISION  SA
-      INTEGER           I, J, KI, KN, KNI, KPAR, KSIZE, LENX, LENY,
-     $                  MX, MY 
-*     .. Local Arrays ..
-      DOUBLE PRECISION  DT10X(7,4,4), DT10Y(7,4,4), DT7(4,4),
-     $                  DT8(7,4,4), DX1(7),
-     $                  DY1(7), SSIZE1(4), SSIZE2(14,2), SSIZE(7),
-     $                  STX(7), STY(7), SX(7), SY(7),
-     $                  DPAR(5,4), DT19X(7,4,16),DT19XA(7,4,4),
-     $                  DT19XB(7,4,4), DT19XC(7,4,4),DT19XD(7,4,4),
-     $                  DT19Y(7,4,16), DT19YA(7,4,4),DT19YB(7,4,4),
-     $                  DT19YC(7,4,4), DT19YD(7,4,4), DTEMP(5)
-      INTEGER           INCXS(4), INCYS(4), LENS(4,2), NS(4)
-*     .. External Functions ..
-      DOUBLE PRECISION  DDOT, DSDOT
-      EXTERNAL          DDOT, DSDOT
-*     .. External Subroutines ..
-      EXTERNAL          DAXPY, DCOPY, DROTM, DSWAP, STEST, STEST1
-*     .. Intrinsic Functions ..
-      INTRINSIC         ABS, MIN
-*     .. Common blocks ..
-      COMMON            /COMBLA/ICASE, N, INCX, INCY, PASS
-*     .. Data statements ..
-      EQUIVALENCE (DT19X(1,1,1),DT19XA(1,1,1)),(DT19X(1,1,5),
-     A   DT19XB(1,1,1)),(DT19X(1,1,9),DT19XC(1,1,1)),
-     B   (DT19X(1,1,13),DT19XD(1,1,1))
-      EQUIVALENCE (DT19Y(1,1,1),DT19YA(1,1,1)),(DT19Y(1,1,5),
-     A   DT19YB(1,1,1)),(DT19Y(1,1,9),DT19YC(1,1,1)),
-     B   (DT19Y(1,1,13),DT19YD(1,1,1))
-
-      DATA              SA/0.3D0/
-      DATA              INCXS/1, 2, -2, -1/
-      DATA              INCYS/1, -2, 1, -2/
-      DATA              LENS/1, 1, 2, 4, 1, 1, 3, 7/
-      DATA              NS/0, 1, 2, 4/
-      DATA              DX1/0.6D0, 0.1D0, -0.5D0, 0.8D0, 0.9D0, -0.3D0,
-     +                  -0.4D0/
-      DATA              DY1/0.5D0, -0.9D0, 0.3D0, 0.7D0, -0.6D0, 0.2D0,
-     +                  0.8D0/
-      DATA              DT7/0.0D0, 0.30D0, 0.21D0, 0.62D0, 0.0D0,
-     +                  0.30D0, -0.07D0, 0.85D0, 0.0D0, 0.30D0, -0.79D0,
-     +                  -0.74D0, 0.0D0, 0.30D0, 0.33D0, 1.27D0/
-      DATA              DT8/0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.68D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.68D0, -0.87D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.68D0, -0.87D0, 0.15D0,
-     +                  0.94D0, 0.0D0, 0.0D0, 0.0D0, 0.5D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.68D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.35D0, -0.9D0, 0.48D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.38D0, -0.9D0, 0.57D0, 0.7D0, -0.75D0,
-     +                  0.2D0, 0.98D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.68D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.35D0, -0.72D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.38D0,
-     +                  -0.63D0, 0.15D0, 0.88D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.68D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.68D0, -0.9D0, 0.33D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.68D0, -0.9D0, 0.33D0, 0.7D0,
-     +                  -0.75D0, 0.2D0, 1.04D0/
-      DATA              DT10X/0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.5D0, -0.9D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.5D0, -0.9D0, 0.3D0, 0.7D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.6D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.3D0, 0.1D0, 0.5D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.8D0, 0.1D0, -0.6D0,
-     +                  0.8D0, 0.3D0, -0.3D0, 0.5D0, 0.6D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.5D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, -0.9D0,
-     +                  0.1D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.7D0,
-     +                  0.1D0, 0.3D0, 0.8D0, -0.9D0, -0.3D0, 0.5D0,
-     +                  0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.5D0, 0.3D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.5D0, 0.3D0, -0.6D0, 0.8D0, 0.0D0, 0.0D0,
-     +                  0.0D0/
-      DATA              DT10Y/0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.6D0, 0.1D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.6D0, 0.1D0, -0.5D0, 0.8D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.0D0, -0.5D0, -0.9D0, 0.6D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.0D0, -0.4D0, -0.9D0, 0.9D0,
-     +                  0.7D0, -0.5D0, 0.2D0, 0.6D0, 0.5D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.6D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, -0.5D0,
-     +                  0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  -0.4D0, 0.9D0, -0.5D0, 0.6D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.6D0, -0.9D0, 0.1D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.6D0, -0.9D0, 0.1D0, 0.7D0,
-     +                  -0.5D0, 0.2D0, 0.8D0/
-      DATA              SSIZE1/0.0D0, 0.3D0, 1.6D0, 3.2D0/
-      DATA              SSIZE2/0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0,
-     +                  1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0,
-     +                  1.17D0, 1.17D0, 1.17D0/
-*
-*                         FOR DROTM
-*
-      DATA DPAR/-2.D0,  0.D0,0.D0,0.D0,0.D0,
-     A          -1.D0,  2.D0, -3.D0, -4.D0,  5.D0,
-     B           0.D0,  0.D0,  2.D0, -3.D0,  0.D0,
-     C           1.D0,  5.D0,  2.D0,  0.D0, -4.D0/
-*                        TRUE X RESULTS F0R ROTATIONS DROTM
-      DATA DT19XA/.6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     A            .6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     B            .6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     C            .6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     D            .6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     E           -.8D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     F           -.9D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     G           3.5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     H            .6D0,   .1D0,             0.D0,0.D0,0.D0,0.D0,0.D0,
-     I           -.8D0,  3.8D0,             0.D0,0.D0,0.D0,0.D0,0.D0,
-     J           -.9D0,  2.8D0,             0.D0,0.D0,0.D0,0.D0,0.D0,
-     K           3.5D0,  -.4D0,             0.D0,0.D0,0.D0,0.D0,0.D0,
-     L            .6D0,   .1D0,  -.5D0,   .8D0,          0.D0,0.D0,0.D0,
-     M           -.8D0,  3.8D0, -2.2D0, -1.2D0,          0.D0,0.D0,0.D0,
-     N           -.9D0,  2.8D0, -1.4D0, -1.3D0,          0.D0,0.D0,0.D0,
-     O           3.5D0,  -.4D0, -2.2D0,  4.7D0,          0.D0,0.D0,0.D0/
-*
-      DATA DT19XB/.6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     A            .6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     B            .6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     C            .6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     D            .6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     E           -.8D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     F           -.9D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     G           3.5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     H            .6D0,   .1D0,  -.5D0,             0.D0,0.D0,0.D0,0.D0,
-     I           0.D0,    .1D0, -3.0D0,             0.D0,0.D0,0.D0,0.D0,
-     J           -.3D0,   .1D0, -2.0D0,             0.D0,0.D0,0.D0,0.D0,
-     K           3.3D0,   .1D0, -2.0D0,             0.D0,0.D0,0.D0,0.D0,
-     L            .6D0,   .1D0,  -.5D0,   .8D0,   .9D0,  -.3D0,  -.4D0,
-     M          -2.0D0,   .1D0,  1.4D0,   .8D0,   .6D0,  -.3D0, -2.8D0,
-     N          -1.8D0,   .1D0,  1.3D0,   .8D0,  0.D0,   -.3D0, -1.9D0,
-     O           3.8D0,   .1D0, -3.1D0,   .8D0,  4.8D0,  -.3D0, -1.5D0 /
-*
-      DATA DT19XC/.6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     A            .6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     B            .6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     C            .6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     D            .6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     E           -.8D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     F           -.9D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     G           3.5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     H            .6D0,   .1D0,  -.5D0,             0.D0,0.D0,0.D0,0.D0,
-     I           4.8D0,   .1D0, -3.0D0,             0.D0,0.D0,0.D0,0.D0,
-     J           3.3D0,   .1D0, -2.0D0,             0.D0,0.D0,0.D0,0.D0,
-     K           2.1D0,   .1D0, -2.0D0,             0.D0,0.D0,0.D0,0.D0,
-     L            .6D0,   .1D0,  -.5D0,   .8D0,   .9D0,  -.3D0,  -.4D0,
-     M          -1.6D0,   .1D0, -2.2D0,   .8D0,  5.4D0,  -.3D0, -2.8D0,
-     N          -1.5D0,   .1D0, -1.4D0,   .8D0,  3.6D0,  -.3D0, -1.9D0,
-     O           3.7D0,   .1D0, -2.2D0,   .8D0,  3.6D0,  -.3D0, -1.5D0 /
-*
-      DATA DT19XD/.6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     A            .6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     B            .6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     C            .6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     D            .6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     E           -.8D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     F           -.9D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     G           3.5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     H            .6D0,   .1D0,             0.D0,0.D0,0.D0,0.D0,0.D0,
-     I           -.8D0, -1.0D0,             0.D0,0.D0,0.D0,0.D0,0.D0,
-     J           -.9D0,  -.8D0,             0.D0,0.D0,0.D0,0.D0,0.D0,
-     K           3.5D0,   .8D0,             0.D0,0.D0,0.D0,0.D0,0.D0,
-     L            .6D0,   .1D0,  -.5D0,   .8D0,          0.D0,0.D0,0.D0,
-     M           -.8D0, -1.0D0,  1.4D0, -1.6D0,          0.D0,0.D0,0.D0,
-     N           -.9D0,  -.8D0,  1.3D0, -1.6D0,          0.D0,0.D0,0.D0,
-     O           3.5D0,   .8D0, -3.1D0,  4.8D0,          0.D0,0.D0,0.D0/
-*                        TRUE Y RESULTS FOR ROTATIONS DROTM
-      DATA DT19YA/.5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     A            .5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     B            .5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     C            .5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     D            .5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     E            .7D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     F           1.7D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     G          -2.6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     H            .5D0,  -.9D0,             0.D0,0.D0,0.D0,0.D0,0.D0,
-     I            .7D0, -4.8D0,             0.D0,0.D0,0.D0,0.D0,0.D0,
-     J           1.7D0,  -.7D0,             0.D0,0.D0,0.D0,0.D0,0.D0,
-     K          -2.6D0,  3.5D0,             0.D0,0.D0,0.D0,0.D0,0.D0,
-     L            .5D0,  -.9D0,   .3D0,   .7D0,          0.D0,0.D0,0.D0,
-     M            .7D0, -4.8D0,  3.0D0,  1.1D0,          0.D0,0.D0,0.D0,
-     N           1.7D0,  -.7D0,  -.7D0,  2.3D0,          0.D0,0.D0,0.D0,
-     O          -2.6D0,  3.5D0,  -.7D0, -3.6D0,          0.D0,0.D0,0.D0/
-*
-      DATA DT19YB/.5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     A            .5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     B            .5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     C            .5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     D            .5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     E            .7D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     F           1.7D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     G          -2.6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     H            .5D0,  -.9D0,   .3D0,             0.D0,0.D0,0.D0,0.D0,
-     I           4.0D0,  -.9D0,  -.3D0,             0.D0,0.D0,0.D0,0.D0,
-     J           -.5D0,  -.9D0,  1.5D0,             0.D0,0.D0,0.D0,0.D0,
-     K          -1.5D0,  -.9D0, -1.8D0,             0.D0,0.D0,0.D0,0.D0,
-     L            .5D0,  -.9D0,   .3D0,   .7D0,  -.6D0,   .2D0,   .8D0,
-     M           3.7D0,  -.9D0, -1.2D0,   .7D0, -1.5D0,   .2D0,  2.2D0,
-     N           -.3D0,  -.9D0,  2.1D0,   .7D0, -1.6D0,   .2D0,  2.0D0,
-     O          -1.6D0,  -.9D0, -2.1D0,   .7D0,  2.9D0,   .2D0, -3.8D0 /
-*
-      DATA DT19YC/.5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     A            .5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     B            .5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     C            .5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     D            .5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     E            .7D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     F           1.7D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     G          -2.6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     H            .5D0,  -.9D0,             0.D0,0.D0,0.D0,0.D0,0.D0,
-     I           4.0D0, -6.3D0,             0.D0,0.D0,0.D0,0.D0,0.D0,
-     J           -.5D0,   .3D0,             0.D0,0.D0,0.D0,0.D0,0.D0,
-     K          -1.5D0,  3.0D0,             0.D0,0.D0,0.D0,0.D0,0.D0,
-     L            .5D0,  -.9D0,   .3D0,   .7D0,          0.D0,0.D0,0.D0,
-     M           3.7D0, -7.2D0,  3.0D0,  1.7D0,          0.D0,0.D0,0.D0,
-     N           -.3D0,   .9D0,  -.7D0,  1.9D0,          0.D0,0.D0,0.D0,
-     O          -1.6D0,  2.7D0,  -.7D0, -3.4D0,          0.D0,0.D0,0.D0/
-*
-      DATA DT19YD/.5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     A            .5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     B            .5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     C            .5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     D            .5D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     E            .7D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     F           1.7D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     G          -2.6D0,                  0.D0,0.D0,0.D0,0.D0,0.D0,0.D0,
-     H            .5D0,  -.9D0,   .3D0,             0.D0,0.D0,0.D0,0.D0,
-     I            .7D0,  -.9D0,  1.2D0,             0.D0,0.D0,0.D0,0.D0,
-     J           1.7D0,  -.9D0,   .5D0,             0.D0,0.D0,0.D0,0.D0,
-     K          -2.6D0,  -.9D0, -1.3D0,             0.D0,0.D0,0.D0,0.D0,
-     L            .5D0,  -.9D0,   .3D0,   .7D0,  -.6D0,   .2D0,   .8D0,
-     M            .7D0,  -.9D0,  1.2D0,   .7D0, -1.5D0,   .2D0,  1.6D0,
-     N           1.7D0,  -.9D0,   .5D0,   .7D0, -1.6D0,   .2D0,  2.4D0,
-     O          -2.6D0,  -.9D0, -1.3D0,   .7D0,  2.9D0,   .2D0, -4.0D0 /
-*    
-*     .. Executable Statements ..
-*
-      DO 120 KI = 1, 4
-         INCX = INCXS(KI)
-         INCY = INCYS(KI)
-         MX = ABS(INCX)
-         MY = ABS(INCY)
-*
-         DO 100 KN = 1, 4
-            N = NS(KN)
-            KSIZE = MIN(2,KN)
-            LENX = LENS(KN,MX)
-            LENY = LENS(KN,MY)
-*           .. Initialize all argument arrays ..
-            DO 20 I = 1, 7
-               SX(I) = DX1(I)
-               SY(I) = DY1(I)
-   20       CONTINUE
-*
-            IF (ICASE.EQ.1) THEN
-*              .. DDOT ..
-               CALL STEST1(DDOT(N,SX,INCX,SY,INCY),DT7(KN,KI),SSIZE1(KN)
-     +                     ,SFAC)
-            ELSE IF (ICASE.EQ.2) THEN
-*              .. DAXPY ..
-               CALL DAXPY(N,SA,SX,INCX,SY,INCY)
-               DO 40 J = 1, LENY
-                  STY(J) = DT8(J,KN,KI)
-   40          CONTINUE
-               CALL STEST(LENY,SY,STY,SSIZE2(1,KSIZE),SFAC)
-            ELSE IF (ICASE.EQ.5) THEN
-*              .. DCOPY ..
-               DO 60 I = 1, 7
-                  STY(I) = DT10Y(I,KN,KI)
-   60          CONTINUE
-               CALL DCOPY(N,SX,INCX,SY,INCY)
-               CALL STEST(LENY,SY,STY,SSIZE2(1,1),1.0D0)
-            ELSE IF (ICASE.EQ.6) THEN
-*              .. DSWAP ..
-               CALL DSWAP(N,SX,INCX,SY,INCY)
-               DO 80 I = 1, 7
-                  STX(I) = DT10X(I,KN,KI)
-                  STY(I) = DT10Y(I,KN,KI)
-   80          CONTINUE
-               CALL STEST(LENX,SX,STX,SSIZE2(1,1),1.0D0)
-               CALL STEST(LENY,SY,STY,SSIZE2(1,1),1.0D0)
-            ELSE IF (ICASE.EQ.12) THEN
-*              .. DROTM ..
-               KNI=KN+4*(KI-1)
-               DO KPAR=1,4
-                  DO I=1,7
-                     SX(I) = DX1(I)
-                     SY(I) = DY1(I)
-                     STX(I)= DT19X(I,KPAR,KNI)
-                     STY(I)= DT19Y(I,KPAR,KNI)
-                  END DO
-*
-                  DO I=1,5
-                     DTEMP(I) = DPAR(I,KPAR)
-                  END DO
-*
-                  DO  I=1,LENX
-                     SSIZE(I)=STX(I)
-                  END DO
-*                   SEE REMARK ABOVE ABOUT DT11X(1,2,7)
-*                       AND DT11X(5,3,8).
-                  IF ((KPAR .EQ. 2) .AND. (KNI .EQ. 7))
-     $               SSIZE(1) = 2.4D0
-                  IF ((KPAR .EQ. 3) .AND. (KNI .EQ. 8))
-     $               SSIZE(5) = 1.8D0
-*
-                  CALL   DROTM(N,SX,INCX,SY,INCY,DTEMP)
-                  CALL   STEST(LENX,SX,STX,SSIZE,SFAC)
-                  CALL   STEST(LENY,SY,STY,STY,SFAC)
-               END DO
-            ELSE IF (ICASE.EQ.13) THEN
-*              .. DSDOT ..
-            CALL TESTDSDOT(REAL(DSDOT(N,REAL(SX),INCX,REAL(SY),INCY)),
-     $                 REAL(DT7(KN,KI)),REAL(SSIZE1(KN)), .3125E-1)
-            ELSE
-               WRITE (NOUT,*) ' Shouldn''t be here in CHECK2'
-               STOP
-            END IF
-  100    CONTINUE
-  120 CONTINUE
-      RETURN
-      END
-      SUBROUTINE CHECK3(SFAC)
-*     .. Parameters ..
-      INTEGER           NOUT
-      PARAMETER         (NOUT=6)
-*     .. Scalar Arguments ..
-      DOUBLE PRECISION  SFAC
-*     .. Scalars in Common ..
-      INTEGER           ICASE, INCX, INCY, N
-      LOGICAL           PASS
-*     .. Local Scalars ..
-      DOUBLE PRECISION  SC, SS
-      INTEGER           I, K, KI, KN, KSIZE, LENX, LENY, MX, MY
-*     .. Local Arrays ..
-      DOUBLE PRECISION  COPYX(5), COPYY(5), DT9X(7,4,4), DT9Y(7,4,4),
-     +                  DX1(7), DY1(7), MWPC(11), MWPS(11), MWPSTX(5),
-     +                  MWPSTY(5), MWPTX(11,5), MWPTY(11,5), MWPX(5),
-     +                  MWPY(5), SSIZE2(14,2), STX(7), STY(7), SX(7),
-     +                  SY(7)
-      INTEGER           INCXS(4), INCYS(4), LENS(4,2), MWPINX(11),
-     +                  MWPINY(11), MWPN(11), NS(4)
-*     .. External Subroutines ..
-      EXTERNAL          DROT, STEST
-*     .. Intrinsic Functions ..
-      INTRINSIC         ABS, MIN
-*     .. Common blocks ..
-      COMMON            /COMBLA/ICASE, N, INCX, INCY, PASS
-*     .. Data statements ..
-      DATA              INCXS/1, 2, -2, -1/
-      DATA              INCYS/1, -2, 1, -2/
-      DATA              LENS/1, 1, 2, 4, 1, 1, 3, 7/
-      DATA              NS/0, 1, 2, 4/
-      DATA              DX1/0.6D0, 0.1D0, -0.5D0, 0.8D0, 0.9D0, -0.3D0,
-     +                  -0.4D0/
-      DATA              DY1/0.5D0, -0.9D0, 0.3D0, 0.7D0, -0.6D0, 0.2D0,
-     +                  0.8D0/
-      DATA              SC, SS/0.8D0, 0.6D0/
-      DATA              DT9X/0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.78D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.78D0, -0.46D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.78D0, -0.46D0, -0.22D0,
-     +                  1.06D0, 0.0D0, 0.0D0, 0.0D0, 0.6D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.78D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.66D0, 0.1D0, -0.1D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.96D0, 0.1D0, -0.76D0, 0.8D0, 0.90D0,
-     +                  -0.3D0, -0.02D0, 0.6D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.78D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, -0.06D0, 0.1D0,
-     +                  -0.1D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.90D0,
-     +                  0.1D0, -0.22D0, 0.8D0, 0.18D0, -0.3D0, -0.02D0,
-     +                  0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.78D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.78D0, 0.26D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.78D0, 0.26D0, -0.76D0, 1.12D0,
-     +                  0.0D0, 0.0D0, 0.0D0/
-      DATA              DT9Y/0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.04D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.04D0, -0.78D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.04D0, -0.78D0, 0.54D0,
-     +                  0.08D0, 0.0D0, 0.0D0, 0.0D0, 0.5D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.04D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.7D0,
-     +                  -0.9D0, -0.12D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.64D0, -0.9D0, -0.30D0, 0.7D0, -0.18D0, 0.2D0,
-     +                  0.28D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.04D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.7D0, -1.08D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.64D0, -1.26D0,
-     +                  0.54D0, 0.20D0, 0.0D0, 0.0D0, 0.0D0, 0.5D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.04D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.04D0, -0.9D0, 0.18D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.04D0, -0.9D0, 0.18D0, 0.7D0,
-     +                  -0.18D0, 0.2D0, 0.16D0/
-      DATA              SSIZE2/0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0,
-     +                  0.0D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0,
-     +                  1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0,
-     +                  1.17D0, 1.17D0, 1.17D0/
-*     .. Executable Statements ..
-*
-      DO 60 KI = 1, 4
-         INCX = INCXS(KI)
-         INCY = INCYS(KI)
-         MX = ABS(INCX)
-         MY = ABS(INCY)
-*
-         DO 40 KN = 1, 4
-            N = NS(KN)
-            KSIZE = MIN(2,KN)
-            LENX = LENS(KN,MX)
-            LENY = LENS(KN,MY)
-*
-            IF (ICASE.EQ.4) THEN
-*              .. DROT ..
-               DO 20 I = 1, 7
-                  SX(I) = DX1(I)
-                  SY(I) = DY1(I)
-                  STX(I) = DT9X(I,KN,KI)
-                  STY(I) = DT9Y(I,KN,KI)
-   20          CONTINUE
-               CALL DROT(N,SX,INCX,SY,INCY,SC,SS)
-               CALL STEST(LENX,SX,STX,SSIZE2(1,KSIZE),SFAC)
-               CALL STEST(LENY,SY,STY,SSIZE2(1,KSIZE),SFAC)
-            ELSE
-               WRITE (NOUT,*) ' Shouldn''t be here in CHECK3'
-               STOP
-            END IF
-   40    CONTINUE
-   60 CONTINUE
-*
-      MWPC(1) = 1
-      DO 80 I = 2, 11
-         MWPC(I) = 0
-   80 CONTINUE
-      MWPS(1) = 0
-      DO 100 I = 2, 6
-         MWPS(I) = 1
-  100 CONTINUE
-      DO 120 I = 7, 11
-         MWPS(I) = -1
-  120 CONTINUE
-      MWPINX(1) = 1
-      MWPINX(2) = 1
-      MWPINX(3) = 1
-      MWPINX(4) = -1
-      MWPINX(5) = 1
-      MWPINX(6) = -1
-      MWPINX(7) = 1
-      MWPINX(8) = 1
-      MWPINX(9) = -1
-      MWPINX(10) = 1
-      MWPINX(11) = -1
-      MWPINY(1) = 1
-      MWPINY(2) = 1
-      MWPINY(3) = -1
-      MWPINY(4) = -1
-      MWPINY(5) = 2
-      MWPINY(6) = 1
-      MWPINY(7) = 1
-      MWPINY(8) = -1
-      MWPINY(9) = -1
-      MWPINY(10) = 2
-      MWPINY(11) = 1
-      DO 140 I = 1, 11
-         MWPN(I) = 5
-  140 CONTINUE
-      MWPN(5) = 3
-      MWPN(10) = 3
-      DO 160 I = 1, 5
-         MWPX(I) = I
-         MWPY(I) = I
-         MWPTX(1,I) = I
-         MWPTY(1,I) = I
-         MWPTX(2,I) = I
-         MWPTY(2,I) = -I
-         MWPTX(3,I) = 6 - I
-         MWPTY(3,I) = I - 6
-         MWPTX(4,I) = I
-         MWPTY(4,I) = -I
-         MWPTX(6,I) = 6 - I
-         MWPTY(6,I) = I - 6
-         MWPTX(7,I) = -I
-         MWPTY(7,I) = I
-         MWPTX(8,I) = I - 6
-         MWPTY(8,I) = 6 - I
-         MWPTX(9,I) = -I
-         MWPTY(9,I) = I
-         MWPTX(11,I) = I - 6
-         MWPTY(11,I) = 6 - I
-  160 CONTINUE
-      MWPTX(5,1) = 1
-      MWPTX(5,2) = 3
-      MWPTX(5,3) = 5
-      MWPTX(5,4) = 4
-      MWPTX(5,5) = 5
-      MWPTY(5,1) = -1
-      MWPTY(5,2) = 2
-      MWPTY(5,3) = -2
-      MWPTY(5,4) = 4
-      MWPTY(5,5) = -3
-      MWPTX(10,1) = -1
-      MWPTX(10,2) = -3
-      MWPTX(10,3) = -5
-      MWPTX(10,4) = 4
-      MWPTX(10,5) = 5
-      MWPTY(10,1) = 1
-      MWPTY(10,2) = 2
-      MWPTY(10,3) = 2
-      MWPTY(10,4) = 4
-      MWPTY(10,5) = 3
-      DO 200 I = 1, 11
-         INCX = MWPINX(I)
-         INCY = MWPINY(I)
-         DO 180 K = 1, 5
-            COPYX(K) = MWPX(K)
-            COPYY(K) = MWPY(K)
-            MWPSTX(K) = MWPTX(I,K)
-            MWPSTY(K) = MWPTY(I,K)
-  180    CONTINUE
-         CALL DROT(MWPN(I),COPYX,INCX,COPYY,INCY,MWPC(I),MWPS(I))
-         CALL STEST(5,COPYX,MWPSTX,MWPSTX,SFAC)
-         CALL STEST(5,COPYY,MWPSTY,MWPSTY,SFAC)
-  200 CONTINUE
-      RETURN
-      END
-      SUBROUTINE STEST(LEN,SCOMP,STRUE,SSIZE,SFAC)
-*     ********************************* STEST **************************
-*
-*     THIS SUBR COMPARES ARRAYS  SCOMP() AND STRUE() OF LENGTH LEN TO
-*     SEE IF THE TERM BY TERM DIFFERENCES, MULTIPLIED BY SFAC, ARE
-*     NEGLIGIBLE.
-*
-*     C. L. LAWSON, JPL, 1974 DEC 10
-*
-*     .. Parameters ..
-      INTEGER          NOUT
-      DOUBLE PRECISION ZERO
-      PARAMETER        (NOUT=6, ZERO=0.0D0)
-*     .. Scalar Arguments ..
-      DOUBLE PRECISION SFAC
-      INTEGER          LEN
-*     .. Array Arguments ..
-      DOUBLE PRECISION SCOMP(LEN), SSIZE(LEN), STRUE(LEN)
-*     .. Scalars in Common ..
-      INTEGER          ICASE, INCX, INCY, N
-      LOGICAL          PASS
-*     .. Local Scalars ..
-      DOUBLE PRECISION SD
-      INTEGER          I
-*     .. External Functions ..
-      DOUBLE PRECISION SDIFF
-      EXTERNAL         SDIFF
-*     .. Intrinsic Functions ..
-      INTRINSIC        ABS
-*     .. Common blocks ..
-      COMMON           /COMBLA/ICASE, N, INCX, INCY, PASS
-*     .. Executable Statements ..
-*
-      DO 40 I = 1, LEN
-         SD = SCOMP(I) - STRUE(I)
-         IF (ABS(SFAC*SD) .LE. ABS(SSIZE(I))*EPSILON(ZERO))
-     +       GO TO 40
-*
-*                             HERE    SCOMP(I) IS NOT CLOSE TO STRUE(I).
-*
-         IF ( .NOT. PASS) GO TO 20
-*                             PRINT FAIL MESSAGE AND HEADER.
-         PASS = .FALSE.
-         WRITE (NOUT,99999)
-         WRITE (NOUT,99998)
-   20    WRITE (NOUT,99997) ICASE, N, INCX, INCY, I, SCOMP(I),
-     +     STRUE(I), SD, SSIZE(I)
-   40 CONTINUE
-      RETURN
-*
-99999 FORMAT ('                                       FAIL')
-99998 FORMAT (/' CASE  N INCX INCY  I                            ',
-     +       ' COMP(I)                             TRUE(I)  DIFFERENCE',
-     +       '     SIZE(I)',/1X)
-99997 FORMAT (1X,I4,I3,2I5,I3,2D36.8,2D12.4)
-      END
-      SUBROUTINE TESTDSDOT(SCOMP,STRUE,SSIZE,SFAC)
-*     ********************************* STEST **************************
-*
-*     THIS SUBR COMPARES ARRAYS  SCOMP() AND STRUE() OF LENGTH LEN TO
-*     SEE IF THE TERM BY TERM DIFFERENCES, MULTIPLIED BY SFAC, ARE
-*     NEGLIGIBLE.
-*
-*     C. L. LAWSON, JPL, 1974 DEC 10
-*
-*     .. Parameters ..
-      INTEGER          NOUT
-      REAL             ZERO
-      PARAMETER        (NOUT=6, ZERO=0.0E0)
-*     .. Scalar Arguments ..
-      REAL             SFAC, SCOMP, SSIZE, STRUE
-*     .. Scalars in Common ..
-      INTEGER          ICASE, INCX, INCY, N
-      LOGICAL          PASS
-*     .. Local Scalars ..
-      REAL             SD
-*     .. Intrinsic Functions ..
-      INTRINSIC        ABS
-*     .. Common blocks ..
-      COMMON           /COMBLA/ICASE, N, INCX, INCY, PASS
-*     .. Executable Statements ..
-*
-         SD = SCOMP - STRUE
-         IF (ABS(SFAC*SD) .LE. ABS(SSIZE) * EPSILON(ZERO))
-     +       GO TO 40
-*
-*                             HERE    SCOMP(I) IS NOT CLOSE TO STRUE(I).
-*
-         IF ( .NOT. PASS) GO TO 20
-*                             PRINT FAIL MESSAGE AND HEADER.
-         PASS = .FALSE.
-         WRITE (NOUT,99999)
-         WRITE (NOUT,99998)
-   20    WRITE (NOUT,99997) ICASE, N, INCX, INCY, SCOMP,
-     +     STRUE, SD, SSIZE
-   40 CONTINUE
-      RETURN
-*
-99999 FORMAT ('                                       FAIL')
-99998 FORMAT (/' CASE  N INCX INCY                           ',
-     +       ' COMP(I)                             TRUE(I)  DIFFERENCE',
-     +       '     SIZE(I)',/1X)
-99997 FORMAT (1X,I4,I3,1I5,I3,2E36.8,2E12.4)
-      END
-      SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC)
-*     ************************* STEST1 *****************************
-*
-*     THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN
-*     REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE
-*     ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT.
-*
-*     C.L. LAWSON, JPL, 1978 DEC 6
-*
-*     .. Scalar Arguments ..
-      DOUBLE PRECISION  SCOMP1, SFAC, STRUE1
-*     .. Array Arguments ..
-      DOUBLE PRECISION  SSIZE(*)
-*     .. Local Arrays ..
-      DOUBLE PRECISION  SCOMP(1), STRUE(1)
-*     .. External Subroutines ..
-      EXTERNAL          STEST
-*     .. Executable Statements ..
-*
-      SCOMP(1) = SCOMP1
-      STRUE(1) = STRUE1
-      CALL STEST(1,SCOMP,STRUE,SSIZE,SFAC)
-*
-      RETURN
-      END
-      DOUBLE PRECISION FUNCTION SDIFF(SA,SB)
-*     ********************************* SDIFF **************************
-*     COMPUTES DIFFERENCE OF TWO NUMBERS.  C. L. LAWSON, JPL 1974 FEB 15
-*
-*     .. Scalar Arguments ..
-      DOUBLE PRECISION                SA, SB
-*     .. Executable Statements ..
-      SDIFF = SA - SB
-      RETURN
-      END
-      SUBROUTINE ITEST1(ICOMP,ITRUE)
-*     ********************************* ITEST1 *************************
-*
-*     THIS SUBROUTINE COMPARES THE VARIABLES ICOMP AND ITRUE FOR
-*     EQUALITY.
-*     C. L. LAWSON, JPL, 1974 DEC 10
-*
-*     .. Parameters ..
-      INTEGER           NOUT
-      PARAMETER         (NOUT=6)
-*     .. Scalar Arguments ..
-      INTEGER           ICOMP, ITRUE
-*     .. Scalars in Common ..
-      INTEGER           ICASE, INCX, INCY, N
-      LOGICAL           PASS
-*     .. Local Scalars ..
-      INTEGER           ID
-*     .. Common blocks ..
-      COMMON            /COMBLA/ICASE, N, INCX, INCY, PASS
-*     .. Executable Statements ..
-*
-      IF (ICOMP.EQ.ITRUE) GO TO 40
-*
-*                            HERE ICOMP IS NOT EQUAL TO ITRUE.
-*
-      IF ( .NOT. PASS) GO TO 20
-*                             PRINT FAIL MESSAGE AND HEADER.
-      PASS = .FALSE.
-      WRITE (NOUT,99999)
-      WRITE (NOUT,99998)
-   20 ID = ICOMP - ITRUE
-      WRITE (NOUT,99997) ICASE, N, INCX, INCY, ICOMP, ITRUE, ID
-   40 CONTINUE
-      RETURN
-*
-99999 FORMAT ('                                       FAIL')
-99998 FORMAT (/' CASE  N INCX INCY                               ',
-     +       ' COMP                                TRUE     DIFFERENCE',
-     +       /1X)
-99997 FORMAT (1X,I4,I3,2I5,2I36,I12)
-      END
diff --git a/resources/3rdparty/eigen/blas/testing/sblat1.f b/resources/3rdparty/eigen/blas/testing/sblat1.f
deleted file mode 100644
index 6657c2693..000000000
--- a/resources/3rdparty/eigen/blas/testing/sblat1.f
+++ /dev/null
@@ -1,1021 +0,0 @@
-*> \brief \b SBLAT1
-*
-*  =========== DOCUMENTATION ===========
-*
-* Online html documentation available at 
-*            http://www.netlib.org/lapack/explore-html/ 
-*
-*  Definition:
-*  ===========
-*
-*       PROGRAM SBLAT1
-* 
-*
-*> \par Purpose:
-*  =============
-*>
-*> \verbatim
-*>
-*>    Test program for the REAL Level 1 BLAS.
-*>
-*>    Based upon the original BLAS test routine together with:
-*>    F06EAF Example Program Text
-*> \endverbatim
-*
-*  Authors:
-*  ========
-*
-*> \author Univ. of Tennessee 
-*> \author Univ. of California Berkeley 
-*> \author Univ. of Colorado Denver 
-*> \author NAG Ltd. 
-*
-*> \date April 2012
-*
-*> \ingroup single_blas_testing
-*
-*  =====================================================================
-      PROGRAM SBLAT1
-*
-*  -- Reference BLAS test routine (version 3.4.1) --
-*  -- Reference BLAS is a software package provided by Univ. of Tennessee,    --
-*  -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
-*     April 2012
-*
-*  =====================================================================
-*
-*     .. Parameters ..
-      INTEGER          NOUT
-      PARAMETER        (NOUT=6)
-*     .. Scalars in Common ..
-      INTEGER          ICASE, INCX, INCY, N
-      LOGICAL          PASS
-*     .. Local Scalars ..
-      REAL             SFAC
-      INTEGER          IC
-*     .. External Subroutines ..
-      EXTERNAL         CHECK0, CHECK1, CHECK2, CHECK3, HEADER
-*     .. Common blocks ..
-      COMMON           /COMBLA/ICASE, N, INCX, INCY, PASS
-*     .. Data statements ..
-      DATA             SFAC/9.765625E-4/
-*     .. Executable Statements ..
-      WRITE (NOUT,99999)
-      DO 20 IC = 1, 13
-         ICASE = IC
-         CALL HEADER
-*
-*        .. Initialize  PASS,  INCX,  and INCY for a new case. ..
-*        .. the value 9999 for INCX or INCY will appear in the ..
-*        .. detailed  output, if any, for cases  that do not involve ..
-*        .. these parameters ..
-*
-         PASS = .TRUE.
-         INCX = 9999
-         INCY = 9999
-         IF (ICASE.EQ.3 .OR. ICASE.EQ.11) THEN
-            CALL CHECK0(SFAC)
-         ELSE IF (ICASE.EQ.7 .OR. ICASE.EQ.8 .OR. ICASE.EQ.9 .OR.
-     +            ICASE.EQ.10) THEN
-            CALL CHECK1(SFAC)
-         ELSE IF (ICASE.EQ.1 .OR. ICASE.EQ.2 .OR. ICASE.EQ.5 .OR.
-     +            ICASE.EQ.6 .OR. ICASE.EQ.12 .OR. ICASE.EQ.13) THEN
-            CALL CHECK2(SFAC)
-         ELSE IF (ICASE.EQ.4) THEN
-            CALL CHECK3(SFAC)
-         END IF
-*        -- Print
-         IF (PASS) WRITE (NOUT,99998)
-   20 CONTINUE
-      STOP
-*
-99999 FORMAT (' Real BLAS Test Program Results',/1X)
-99998 FORMAT ('                                    ----- PASS -----')
-      END
-      SUBROUTINE HEADER
-*     .. Parameters ..
-      INTEGER          NOUT
-      PARAMETER        (NOUT=6)
-*     .. Scalars in Common ..
-      INTEGER          ICASE, INCX, INCY, N
-      LOGICAL          PASS
-*     .. Local Arrays ..
-      CHARACTER*6      L(13)
-*     .. Common blocks ..
-      COMMON           /COMBLA/ICASE, N, INCX, INCY, PASS
-*     .. Data statements ..
-      DATA             L(1)/' SDOT '/
-      DATA             L(2)/'SAXPY '/
-      DATA             L(3)/'SROTG '/
-      DATA             L(4)/' SROT '/
-      DATA             L(5)/'SCOPY '/
-      DATA             L(6)/'SSWAP '/
-      DATA             L(7)/'SNRM2 '/
-      DATA             L(8)/'SASUM '/
-      DATA             L(9)/'SSCAL '/
-      DATA             L(10)/'ISAMAX'/
-      DATA             L(11)/'SROTMG'/
-      DATA             L(12)/'SROTM '/
-      DATA             L(13)/'SDSDOT'/
-*     .. Executable Statements ..
-      WRITE (NOUT,99999) ICASE, L(ICASE)
-      RETURN
-*
-99999 FORMAT (/' Test of subprogram number',I3,12X,A6)
-      END
-      SUBROUTINE CHECK0(SFAC)
-*     .. Parameters ..
-      INTEGER           NOUT
-      PARAMETER         (NOUT=6)
-*     .. Scalar Arguments ..
-      REAL              SFAC
-*     .. Scalars in Common ..
-      INTEGER           ICASE, INCX, INCY, N
-      LOGICAL           PASS
-*     .. Local Scalars ..
-      REAL              D12, SA, SB, SC, SS
-      INTEGER           I, K
-*     .. Local Arrays ..
-      REAL              DA1(8), DATRUE(8), DB1(8), DBTRUE(8), DC1(8),
-     +                  DS1(8), DAB(4,9), DTEMP(9), DTRUE(9,9)
-*     .. External Subroutines ..
-      EXTERNAL          SROTG, SROTMG, STEST1
-*     .. Common blocks ..
-      COMMON            /COMBLA/ICASE, N, INCX, INCY, PASS
-*     .. Data statements ..
-      DATA              DA1/0.3E0, 0.4E0, -0.3E0, -0.4E0, -0.3E0, 0.0E0,
-     +                  0.0E0, 1.0E0/
-      DATA              DB1/0.4E0, 0.3E0, 0.4E0, 0.3E0, -0.4E0, 0.0E0,
-     +                  1.0E0, 0.0E0/
-      DATA              DC1/0.6E0, 0.8E0, -0.6E0, 0.8E0, 0.6E0, 1.0E0,
-     +                  0.0E0, 1.0E0/
-      DATA              DS1/0.8E0, 0.6E0, 0.8E0, -0.6E0, 0.8E0, 0.0E0,
-     +                  1.0E0, 0.0E0/
-      DATA              DATRUE/0.5E0, 0.5E0, 0.5E0, -0.5E0, -0.5E0,
-     +                  0.0E0, 1.0E0, 1.0E0/
-      DATA              DBTRUE/0.0E0, 0.6E0, 0.0E0, -0.6E0, 0.0E0,
-     +                  0.0E0, 1.0E0, 0.0E0/
-*     INPUT FOR MODIFIED GIVENS
-      DATA DAB/ .1E0,.3E0,1.2E0,.2E0,
-     A          .7E0, .2E0, .6E0, 4.2E0,
-     B          0.E0,0.E0,0.E0,0.E0,
-     C          4.E0, -1.E0, 2.E0, 4.E0,
-     D          6.E-10, 2.E-2, 1.E5, 10.E0,
-     E          4.E10, 2.E-2, 1.E-5, 10.E0,
-     F          2.E-10, 4.E-2, 1.E5, 10.E0,
-     G          2.E10, 4.E-2, 1.E-5, 10.E0,
-     H          4.E0, -2.E0, 8.E0, 4.E0    /
-*    TRUE RESULTS FOR MODIFIED GIVENS
-      DATA DTRUE/0.E0,0.E0, 1.3E0, .2E0, 0.E0,0.E0,0.E0, .5E0, 0.E0,
-     A           0.E0,0.E0, 4.5E0, 4.2E0, 1.E0, .5E0, 0.E0,0.E0,0.E0,
-     B           0.E0,0.E0,0.E0,0.E0, -2.E0, 0.E0,0.E0,0.E0,0.E0,
-     C           0.E0,0.E0,0.E0, 4.E0, -1.E0, 0.E0,0.E0,0.E0,0.E0,
-     D           0.E0, 15.E-3, 0.E0, 10.E0, -1.E0, 0.E0, -1.E-4,
-     E           0.E0, 1.E0,
-     F           0.E0,0.E0, 6144.E-5, 10.E0, -1.E0, 4096.E0, -1.E6,
-     G           0.E0, 1.E0,
-     H           0.E0,0.E0,15.E0,10.E0,-1.E0, 5.E-5, 0.E0,1.E0,0.E0,
-     I           0.E0,0.E0, 15.E0, 10.E0, -1. E0, 5.E5, -4096.E0,
-     J           1.E0, 4096.E-6,
-     K           0.E0,0.E0, 7.E0, 4.E0, 0.E0,0.E0, -.5E0, -.25E0, 0.E0/
-*                   4096 = 2 ** 12
-      DATA D12  /4096.E0/
-      DTRUE(1,1) = 12.E0 / 130.E0
-      DTRUE(2,1) = 36.E0 / 130.E0
-      DTRUE(7,1) = -1.E0 / 6.E0
-      DTRUE(1,2) = 14.E0 / 75.E0
-      DTRUE(2,2) = 49.E0 / 75.E0
-      DTRUE(9,2) = 1.E0 / 7.E0
-      DTRUE(1,5) = 45.E-11 * (D12 * D12)
-      DTRUE(3,5) = 4.E5 / (3.E0 * D12)
-      DTRUE(6,5) = 1.E0 / D12
-      DTRUE(8,5) = 1.E4 / (3.E0 * D12)
-      DTRUE(1,6) = 4.E10 / (1.5E0 * D12 * D12)
-      DTRUE(2,6) = 2.E-2 / 1.5E0
-      DTRUE(8,6) = 5.E-7 * D12
-      DTRUE(1,7) = 4.E0 / 150.E0
-      DTRUE(2,7) = (2.E-10 / 1.5E0) * (D12 * D12)
-      DTRUE(7,7) = -DTRUE(6,5)
-      DTRUE(9,7) = 1.E4 / D12
-      DTRUE(1,8) = DTRUE(1,7)
-      DTRUE(2,8) = 2.E10 / (1.5E0 * D12 * D12)
-      DTRUE(1,9) = 32.E0 / 7.E0
-      DTRUE(2,9) = -16.E0 / 7.E0
-*     .. Executable Statements ..
-*
-*     Compute true values which cannot be prestored
-*     in decimal notation
-*
-      DBTRUE(1) = 1.0E0/0.6E0
-      DBTRUE(3) = -1.0E0/0.6E0
-      DBTRUE(5) = 1.0E0/0.6E0
-*
-      DO 20 K = 1, 8
-*        .. Set N=K for identification in output if any ..
-         N = K
-         IF (ICASE.EQ.3) THEN
-*           .. SROTG ..
-            IF (K.GT.8) GO TO 40
-            SA = DA1(K)
-            SB = DB1(K)
-            CALL SROTG(SA,SB,SC,SS)
-            CALL STEST1(SA,DATRUE(K),DATRUE(K),SFAC)
-            CALL STEST1(SB,DBTRUE(K),DBTRUE(K),SFAC)
-            CALL STEST1(SC,DC1(K),DC1(K),SFAC)
-            CALL STEST1(SS,DS1(K),DS1(K),SFAC)
-         ELSEIF (ICASE.EQ.11) THEN
-*           .. SROTMG ..
-            DO I=1,4
-               DTEMP(I)= DAB(I,K)
-               DTEMP(I+4) = 0.0
-            END DO
-            DTEMP(9) = 0.0
-            CALL SROTMG(DTEMP(1),DTEMP(2),DTEMP(3),DTEMP(4),DTEMP(5))
-            CALL STEST(9,DTEMP,DTRUE(1,K),DTRUE(1,K),SFAC)
-         ELSE
-            WRITE (NOUT,*) ' Shouldn''t be here in CHECK0'
-            STOP
-         END IF
-   20 CONTINUE
-   40 RETURN
-      END
-      SUBROUTINE CHECK1(SFAC)
-*     .. Parameters ..
-      INTEGER           NOUT
-      PARAMETER         (NOUT=6)
-*     .. Scalar Arguments ..
-      REAL              SFAC
-*     .. Scalars in Common ..
-      INTEGER           ICASE, INCX, INCY, N
-      LOGICAL           PASS
-*     .. Local Scalars ..
-      INTEGER           I, LEN, NP1
-*     .. Local Arrays ..
-      REAL              DTRUE1(5), DTRUE3(5), DTRUE5(8,5,2), DV(8,5,2),
-     +                  SA(10), STEMP(1), STRUE(8), SX(8)
-      INTEGER           ITRUE2(5)
-*     .. External Functions ..
-      REAL              SASUM, SNRM2
-      INTEGER           ISAMAX
-      EXTERNAL          SASUM, SNRM2, ISAMAX
-*     .. External Subroutines ..
-      EXTERNAL          ITEST1, SSCAL, STEST, STEST1
-*     .. Intrinsic Functions ..
-      INTRINSIC         MAX
-*     .. Common blocks ..
-      COMMON            /COMBLA/ICASE, N, INCX, INCY, PASS
-*     .. Data statements ..
-      DATA              SA/0.3E0, -1.0E0, 0.0E0, 1.0E0, 0.3E0, 0.3E0,
-     +                  0.3E0, 0.3E0, 0.3E0, 0.3E0/
-      DATA              DV/0.1E0, 2.0E0, 2.0E0, 2.0E0, 2.0E0, 2.0E0,
-     +                  2.0E0, 2.0E0, 0.3E0, 3.0E0, 3.0E0, 3.0E0, 3.0E0,
-     +                  3.0E0, 3.0E0, 3.0E0, 0.3E0, -0.4E0, 4.0E0,
-     +                  4.0E0, 4.0E0, 4.0E0, 4.0E0, 4.0E0, 0.2E0,
-     +                  -0.6E0, 0.3E0, 5.0E0, 5.0E0, 5.0E0, 5.0E0,
-     +                  5.0E0, 0.1E0, -0.3E0, 0.5E0, -0.1E0, 6.0E0,
-     +                  6.0E0, 6.0E0, 6.0E0, 0.1E0, 8.0E0, 8.0E0, 8.0E0,
-     +                  8.0E0, 8.0E0, 8.0E0, 8.0E0, 0.3E0, 9.0E0, 9.0E0,
-     +                  9.0E0, 9.0E0, 9.0E0, 9.0E0, 9.0E0, 0.3E0, 2.0E0,
-     +                  -0.4E0, 2.0E0, 2.0E0, 2.0E0, 2.0E0, 2.0E0,
-     +                  0.2E0, 3.0E0, -0.6E0, 5.0E0, 0.3E0, 2.0E0,
-     +                  2.0E0, 2.0E0, 0.1E0, 4.0E0, -0.3E0, 6.0E0,
-     +                  -0.5E0, 7.0E0, -0.1E0, 3.0E0/
-      DATA              DTRUE1/0.0E0, 0.3E0, 0.5E0, 0.7E0, 0.6E0/
-      DATA              DTRUE3/0.0E0, 0.3E0, 0.7E0, 1.1E0, 1.0E0/
-      DATA              DTRUE5/0.10E0, 2.0E0, 2.0E0, 2.0E0, 2.0E0,
-     +                  2.0E0, 2.0E0, 2.0E0, -0.3E0, 3.0E0, 3.0E0,
-     +                  3.0E0, 3.0E0, 3.0E0, 3.0E0, 3.0E0, 0.0E0, 0.0E0,
-     +                  4.0E0, 4.0E0, 4.0E0, 4.0E0, 4.0E0, 4.0E0,
-     +                  0.20E0, -0.60E0, 0.30E0, 5.0E0, 5.0E0, 5.0E0,
-     +                  5.0E0, 5.0E0, 0.03E0, -0.09E0, 0.15E0, -0.03E0,
-     +                  6.0E0, 6.0E0, 6.0E0, 6.0E0, 0.10E0, 8.0E0,
-     +                  8.0E0, 8.0E0, 8.0E0, 8.0E0, 8.0E0, 8.0E0,
-     +                  0.09E0, 9.0E0, 9.0E0, 9.0E0, 9.0E0, 9.0E0,
-     +                  9.0E0, 9.0E0, 0.09E0, 2.0E0, -0.12E0, 2.0E0,
-     +                  2.0E0, 2.0E0, 2.0E0, 2.0E0, 0.06E0, 3.0E0,
-     +                  -0.18E0, 5.0E0, 0.09E0, 2.0E0, 2.0E0, 2.0E0,
-     +                  0.03E0, 4.0E0, -0.09E0, 6.0E0, -0.15E0, 7.0E0,
-     +                  -0.03E0, 3.0E0/
-      DATA              ITRUE2/0, 1, 2, 2, 3/
-*     .. Executable Statements ..
-      DO 80 INCX = 1, 2
-         DO 60 NP1 = 1, 5
-            N = NP1 - 1
-            LEN = 2*MAX(N,1)
-*           .. Set vector arguments ..
-            DO 20 I = 1, LEN
-               SX(I) = DV(I,NP1,INCX)
-   20       CONTINUE
-*
-            IF (ICASE.EQ.7) THEN
-*              .. SNRM2 ..
-               STEMP(1) = DTRUE1(NP1)
-               CALL STEST1(SNRM2(N,SX,INCX),STEMP(1),STEMP,SFAC)
-            ELSE IF (ICASE.EQ.8) THEN
-*              .. SASUM ..
-               STEMP(1) = DTRUE3(NP1)
-               CALL STEST1(SASUM(N,SX,INCX),STEMP(1),STEMP,SFAC)
-            ELSE IF (ICASE.EQ.9) THEN
-*              .. SSCAL ..
-               CALL SSCAL(N,SA((INCX-1)*5+NP1),SX,INCX)
-               DO 40 I = 1, LEN
-                  STRUE(I) = DTRUE5(I,NP1,INCX)
-   40          CONTINUE
-               CALL STEST(LEN,SX,STRUE,STRUE,SFAC)
-            ELSE IF (ICASE.EQ.10) THEN
-*              .. ISAMAX ..
-               CALL ITEST1(ISAMAX(N,SX,INCX),ITRUE2(NP1))
-            ELSE
-               WRITE (NOUT,*) ' Shouldn''t be here in CHECK1'
-               STOP
-            END IF
-   60    CONTINUE
-   80 CONTINUE
-      RETURN
-      END
-      SUBROUTINE CHECK2(SFAC)
-*     .. Parameters ..
-      INTEGER           NOUT
-      PARAMETER         (NOUT=6)
-*     .. Scalar Arguments ..
-      REAL              SFAC
-*     .. Scalars in Common ..
-      INTEGER           ICASE, INCX, INCY, N
-      LOGICAL           PASS
-*     .. Local Scalars ..
-      REAL              SA
-      INTEGER           I, J, KI, KN, KNI, KPAR, KSIZE, LENX, LENY,
-     $                  MX, MY 
-*     .. Local Arrays ..
-      REAL              DT10X(7,4,4), DT10Y(7,4,4), DT7(4,4),
-     $                  DT8(7,4,4), DX1(7),
-     $                  DY1(7), SSIZE1(4), SSIZE2(14,2), SSIZE3(4),
-     $                  SSIZE(7), STX(7), STY(7), SX(7), SY(7),
-     $                  DPAR(5,4), DT19X(7,4,16),DT19XA(7,4,4),
-     $                  DT19XB(7,4,4), DT19XC(7,4,4),DT19XD(7,4,4),
-     $                  DT19Y(7,4,16), DT19YA(7,4,4),DT19YB(7,4,4),
-     $                  DT19YC(7,4,4), DT19YD(7,4,4), DTEMP(5),
-     $                  ST7B(4,4)
-      INTEGER           INCXS(4), INCYS(4), LENS(4,2), NS(4)
-*     .. External Functions ..
-      REAL              SDOT, SDSDOT
-      EXTERNAL          SDOT, SDSDOT
-*     .. External Subroutines ..
-      EXTERNAL          SAXPY, SCOPY, SROTM, SSWAP, STEST, STEST1
-*     .. Intrinsic Functions ..
-      INTRINSIC         ABS, MIN
-*     .. Common blocks ..
-      COMMON            /COMBLA/ICASE, N, INCX, INCY, PASS
-*     .. Data statements ..
-      EQUIVALENCE (DT19X(1,1,1),DT19XA(1,1,1)),(DT19X(1,1,5),
-     A   DT19XB(1,1,1)),(DT19X(1,1,9),DT19XC(1,1,1)),
-     B   (DT19X(1,1,13),DT19XD(1,1,1))
-      EQUIVALENCE (DT19Y(1,1,1),DT19YA(1,1,1)),(DT19Y(1,1,5),
-     A   DT19YB(1,1,1)),(DT19Y(1,1,9),DT19YC(1,1,1)),
-     B   (DT19Y(1,1,13),DT19YD(1,1,1))
-
-      DATA              SA/0.3E0/
-      DATA              INCXS/1, 2, -2, -1/
-      DATA              INCYS/1, -2, 1, -2/
-      DATA              LENS/1, 1, 2, 4, 1, 1, 3, 7/
-      DATA              NS/0, 1, 2, 4/
-      DATA              DX1/0.6E0, 0.1E0, -0.5E0, 0.8E0, 0.9E0, -0.3E0,
-     +                  -0.4E0/
-      DATA              DY1/0.5E0, -0.9E0, 0.3E0, 0.7E0, -0.6E0, 0.2E0,
-     +                  0.8E0/
-      DATA              DT7/0.0E0, 0.30E0, 0.21E0, 0.62E0, 0.0E0,
-     +                  0.30E0, -0.07E0, 0.85E0, 0.0E0, 0.30E0, -0.79E0,
-     +                  -0.74E0, 0.0E0, 0.30E0, 0.33E0, 1.27E0/
-      DATA              ST7B/ .1, .4, .31, .72,     .1, .4, .03, .95,
-     +                  .1, .4, -.69, -.64,   .1, .4, .43, 1.37/
-      DATA              DT8/0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.68E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.68E0, -0.87E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.68E0, -0.87E0, 0.15E0,
-     +                  0.94E0, 0.0E0, 0.0E0, 0.0E0, 0.5E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.68E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.35E0, -0.9E0, 0.48E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.38E0, -0.9E0, 0.57E0, 0.7E0, -0.75E0,
-     +                  0.2E0, 0.98E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.68E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.35E0, -0.72E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.38E0,
-     +                  -0.63E0, 0.15E0, 0.88E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.68E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.68E0, -0.9E0, 0.33E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.68E0, -0.9E0, 0.33E0, 0.7E0,
-     +                  -0.75E0, 0.2E0, 1.04E0/
-      DATA              DT10X/0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.5E0, -0.9E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.5E0, -0.9E0, 0.3E0, 0.7E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.6E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.3E0, 0.1E0, 0.5E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.8E0, 0.1E0, -0.6E0,
-     +                  0.8E0, 0.3E0, -0.3E0, 0.5E0, 0.6E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.5E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, -0.9E0,
-     +                  0.1E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.7E0,
-     +                  0.1E0, 0.3E0, 0.8E0, -0.9E0, -0.3E0, 0.5E0,
-     +                  0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.5E0, 0.3E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.5E0, 0.3E0, -0.6E0, 0.8E0, 0.0E0, 0.0E0,
-     +                  0.0E0/
-      DATA              DT10Y/0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.6E0, 0.1E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.6E0, 0.1E0, -0.5E0, 0.8E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.0E0, -0.5E0, -0.9E0, 0.6E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.0E0, -0.4E0, -0.9E0, 0.9E0,
-     +                  0.7E0, -0.5E0, 0.2E0, 0.6E0, 0.5E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.6E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, -0.5E0,
-     +                  0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  -0.4E0, 0.9E0, -0.5E0, 0.6E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.6E0, -0.9E0, 0.1E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.6E0, -0.9E0, 0.1E0, 0.7E0,
-     +                  -0.5E0, 0.2E0, 0.8E0/
-      DATA              SSIZE1/0.0E0, 0.3E0, 1.6E0, 3.2E0/
-      DATA              SSIZE2/0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0,
-     +                  1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0,
-     +                  1.17E0, 1.17E0, 1.17E0/
-      DATA              SSIZE3/ .1, .4, 1.7, 3.3 /
-*
-*                         FOR DROTM
-*
-      DATA DPAR/-2.E0,  0.E0,0.E0,0.E0,0.E0,
-     A          -1.E0,  2.E0, -3.E0, -4.E0,  5.E0,
-     B           0.E0,  0.E0,  2.E0, -3.E0,  0.E0,
-     C           1.E0,  5.E0,  2.E0,  0.E0, -4.E0/
-*                        TRUE X RESULTS F0R ROTATIONS DROTM
-      DATA DT19XA/.6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     A            .6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     B            .6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     C            .6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     D            .6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     E           -.8E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     F           -.9E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     G           3.5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     H            .6E0,   .1E0,             0.E0,0.E0,0.E0,0.E0,0.E0,
-     I           -.8E0,  3.8E0,             0.E0,0.E0,0.E0,0.E0,0.E0,
-     J           -.9E0,  2.8E0,             0.E0,0.E0,0.E0,0.E0,0.E0,
-     K           3.5E0,  -.4E0,             0.E0,0.E0,0.E0,0.E0,0.E0,
-     L            .6E0,   .1E0,  -.5E0,   .8E0,          0.E0,0.E0,0.E0,
-     M           -.8E0,  3.8E0, -2.2E0, -1.2E0,          0.E0,0.E0,0.E0,
-     N           -.9E0,  2.8E0, -1.4E0, -1.3E0,          0.E0,0.E0,0.E0,
-     O           3.5E0,  -.4E0, -2.2E0,  4.7E0,          0.E0,0.E0,0.E0/
-*
-      DATA DT19XB/.6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     A            .6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     B            .6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     C            .6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     D            .6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     E           -.8E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     F           -.9E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     G           3.5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     H            .6E0,   .1E0,  -.5E0,             0.E0,0.E0,0.E0,0.E0,
-     I           0.E0,    .1E0, -3.0E0,             0.E0,0.E0,0.E0,0.E0,
-     J           -.3E0,   .1E0, -2.0E0,             0.E0,0.E0,0.E0,0.E0,
-     K           3.3E0,   .1E0, -2.0E0,             0.E0,0.E0,0.E0,0.E0,
-     L            .6E0,   .1E0,  -.5E0,   .8E0,   .9E0,  -.3E0,  -.4E0,
-     M          -2.0E0,   .1E0,  1.4E0,   .8E0,   .6E0,  -.3E0, -2.8E0,
-     N          -1.8E0,   .1E0,  1.3E0,   .8E0,  0.E0,   -.3E0, -1.9E0,
-     O           3.8E0,   .1E0, -3.1E0,   .8E0,  4.8E0,  -.3E0, -1.5E0 /
-*
-      DATA DT19XC/.6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     A            .6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     B            .6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     C            .6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     D            .6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     E           -.8E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     F           -.9E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     G           3.5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     H            .6E0,   .1E0,  -.5E0,             0.E0,0.E0,0.E0,0.E0,
-     I           4.8E0,   .1E0, -3.0E0,             0.E0,0.E0,0.E0,0.E0,
-     J           3.3E0,   .1E0, -2.0E0,             0.E0,0.E0,0.E0,0.E0,
-     K           2.1E0,   .1E0, -2.0E0,             0.E0,0.E0,0.E0,0.E0,
-     L            .6E0,   .1E0,  -.5E0,   .8E0,   .9E0,  -.3E0,  -.4E0,
-     M          -1.6E0,   .1E0, -2.2E0,   .8E0,  5.4E0,  -.3E0, -2.8E0,
-     N          -1.5E0,   .1E0, -1.4E0,   .8E0,  3.6E0,  -.3E0, -1.9E0,
-     O           3.7E0,   .1E0, -2.2E0,   .8E0,  3.6E0,  -.3E0, -1.5E0 /
-*
-      DATA DT19XD/.6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     A            .6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     B            .6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     C            .6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     D            .6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     E           -.8E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     F           -.9E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     G           3.5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     H            .6E0,   .1E0,             0.E0,0.E0,0.E0,0.E0,0.E0,
-     I           -.8E0, -1.0E0,             0.E0,0.E0,0.E0,0.E0,0.E0,
-     J           -.9E0,  -.8E0,             0.E0,0.E0,0.E0,0.E0,0.E0,
-     K           3.5E0,   .8E0,             0.E0,0.E0,0.E0,0.E0,0.E0,
-     L            .6E0,   .1E0,  -.5E0,   .8E0,          0.E0,0.E0,0.E0,
-     M           -.8E0, -1.0E0,  1.4E0, -1.6E0,          0.E0,0.E0,0.E0,
-     N           -.9E0,  -.8E0,  1.3E0, -1.6E0,          0.E0,0.E0,0.E0,
-     O           3.5E0,   .8E0, -3.1E0,  4.8E0,          0.E0,0.E0,0.E0/
-*                        TRUE Y RESULTS FOR ROTATIONS DROTM
-      DATA DT19YA/.5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     A            .5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     B            .5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     C            .5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     D            .5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     E            .7E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     F           1.7E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     G          -2.6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     H            .5E0,  -.9E0,             0.E0,0.E0,0.E0,0.E0,0.E0,
-     I            .7E0, -4.8E0,             0.E0,0.E0,0.E0,0.E0,0.E0,
-     J           1.7E0,  -.7E0,             0.E0,0.E0,0.E0,0.E0,0.E0,
-     K          -2.6E0,  3.5E0,             0.E0,0.E0,0.E0,0.E0,0.E0,
-     L            .5E0,  -.9E0,   .3E0,   .7E0,          0.E0,0.E0,0.E0,
-     M            .7E0, -4.8E0,  3.0E0,  1.1E0,          0.E0,0.E0,0.E0,
-     N           1.7E0,  -.7E0,  -.7E0,  2.3E0,          0.E0,0.E0,0.E0,
-     O          -2.6E0,  3.5E0,  -.7E0, -3.6E0,          0.E0,0.E0,0.E0/
-*
-      DATA DT19YB/.5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     A            .5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     B            .5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     C            .5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     D            .5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     E            .7E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     F           1.7E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     G          -2.6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     H            .5E0,  -.9E0,   .3E0,             0.E0,0.E0,0.E0,0.E0,
-     I           4.0E0,  -.9E0,  -.3E0,             0.E0,0.E0,0.E0,0.E0,
-     J           -.5E0,  -.9E0,  1.5E0,             0.E0,0.E0,0.E0,0.E0,
-     K          -1.5E0,  -.9E0, -1.8E0,             0.E0,0.E0,0.E0,0.E0,
-     L            .5E0,  -.9E0,   .3E0,   .7E0,  -.6E0,   .2E0,   .8E0,
-     M           3.7E0,  -.9E0, -1.2E0,   .7E0, -1.5E0,   .2E0,  2.2E0,
-     N           -.3E0,  -.9E0,  2.1E0,   .7E0, -1.6E0,   .2E0,  2.0E0,
-     O          -1.6E0,  -.9E0, -2.1E0,   .7E0,  2.9E0,   .2E0, -3.8E0 /
-*
-      DATA DT19YC/.5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     A            .5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     B            .5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     C            .5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     D            .5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     E            .7E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     F           1.7E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     G          -2.6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     H            .5E0,  -.9E0,             0.E0,0.E0,0.E0,0.E0,0.E0,
-     I           4.0E0, -6.3E0,             0.E0,0.E0,0.E0,0.E0,0.E0,
-     J           -.5E0,   .3E0,             0.E0,0.E0,0.E0,0.E0,0.E0,
-     K          -1.5E0,  3.0E0,             0.E0,0.E0,0.E0,0.E0,0.E0,
-     L            .5E0,  -.9E0,   .3E0,   .7E0,          0.E0,0.E0,0.E0,
-     M           3.7E0, -7.2E0,  3.0E0,  1.7E0,          0.E0,0.E0,0.E0,
-     N           -.3E0,   .9E0,  -.7E0,  1.9E0,          0.E0,0.E0,0.E0,
-     O          -1.6E0,  2.7E0,  -.7E0, -3.4E0,          0.E0,0.E0,0.E0/
-*
-      DATA DT19YD/.5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     A            .5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     B            .5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     C            .5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     D            .5E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     E            .7E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     F           1.7E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     G          -2.6E0,                  0.E0,0.E0,0.E0,0.E0,0.E0,0.E0,
-     H            .5E0,  -.9E0,   .3E0,             0.E0,0.E0,0.E0,0.E0,
-     I            .7E0,  -.9E0,  1.2E0,             0.E0,0.E0,0.E0,0.E0,
-     J           1.7E0,  -.9E0,   .5E0,             0.E0,0.E0,0.E0,0.E0,
-     K          -2.6E0,  -.9E0, -1.3E0,             0.E0,0.E0,0.E0,0.E0,
-     L            .5E0,  -.9E0,   .3E0,   .7E0,  -.6E0,   .2E0,   .8E0,
-     M            .7E0,  -.9E0,  1.2E0,   .7E0, -1.5E0,   .2E0,  1.6E0,
-     N           1.7E0,  -.9E0,   .5E0,   .7E0, -1.6E0,   .2E0,  2.4E0,
-     O          -2.6E0,  -.9E0, -1.3E0,   .7E0,  2.9E0,   .2E0, -4.0E0 /
-*
-*     .. Executable Statements ..
-*
-      DO 120 KI = 1, 4
-         INCX = INCXS(KI)
-         INCY = INCYS(KI)
-         MX = ABS(INCX)
-         MY = ABS(INCY)
-*
-         DO 100 KN = 1, 4
-            N = NS(KN)
-            KSIZE = MIN(2,KN)
-            LENX = LENS(KN,MX)
-            LENY = LENS(KN,MY)
-*           .. Initialize all argument arrays ..
-            DO 20 I = 1, 7
-               SX(I) = DX1(I)
-               SY(I) = DY1(I)
-   20       CONTINUE
-*
-            IF (ICASE.EQ.1) THEN
-*              .. SDOT ..
-               CALL STEST1(SDOT(N,SX,INCX,SY,INCY),DT7(KN,KI),SSIZE1(KN)
-     +                     ,SFAC)
-            ELSE IF (ICASE.EQ.2) THEN
-*              .. SAXPY ..
-               CALL SAXPY(N,SA,SX,INCX,SY,INCY)
-               DO 40 J = 1, LENY
-                  STY(J) = DT8(J,KN,KI)
-   40          CONTINUE
-               CALL STEST(LENY,SY,STY,SSIZE2(1,KSIZE),SFAC)
-            ELSE IF (ICASE.EQ.5) THEN
-*              .. SCOPY ..
-               DO 60 I = 1, 7
-                  STY(I) = DT10Y(I,KN,KI)
-   60          CONTINUE
-               CALL SCOPY(N,SX,INCX,SY,INCY)
-               CALL STEST(LENY,SY,STY,SSIZE2(1,1),1.0E0)
-            ELSE IF (ICASE.EQ.6) THEN
-*              .. SSWAP ..
-               CALL SSWAP(N,SX,INCX,SY,INCY)
-               DO 80 I = 1, 7
-                  STX(I) = DT10X(I,KN,KI)
-                  STY(I) = DT10Y(I,KN,KI)
-   80          CONTINUE
-               CALL STEST(LENX,SX,STX,SSIZE2(1,1),1.0E0)
-               CALL STEST(LENY,SY,STY,SSIZE2(1,1),1.0E0)
-            ELSEIF (ICASE.EQ.12) THEN
-*              .. SROTM ..
-               KNI=KN+4*(KI-1)
-               DO KPAR=1,4
-                  DO I=1,7
-                     SX(I) = DX1(I)
-                     SY(I) = DY1(I)
-                     STX(I)= DT19X(I,KPAR,KNI)
-                     STY(I)= DT19Y(I,KPAR,KNI)
-                  END DO
-*
-                  DO I=1,5
-                     DTEMP(I) = DPAR(I,KPAR)
-                  END DO
-*
-                  DO  I=1,LENX
-                     SSIZE(I)=STX(I)
-                  END DO
-*                   SEE REMARK ABOVE ABOUT DT11X(1,2,7)
-*                       AND DT11X(5,3,8).
-                  IF ((KPAR .EQ. 2) .AND. (KNI .EQ. 7))
-     $               SSIZE(1) = 2.4E0
-                  IF ((KPAR .EQ. 3) .AND. (KNI .EQ. 8))
-     $               SSIZE(5) = 1.8E0
-*
-                  CALL   SROTM(N,SX,INCX,SY,INCY,DTEMP)
-                  CALL   STEST(LENX,SX,STX,SSIZE,SFAC)
-                  CALL   STEST(LENY,SY,STY,STY,SFAC)
-               END DO
-            ELSEIF (ICASE.EQ.13) THEN
-*              .. SDSROT ..
-               CALL STEST1 (SDSDOT(N,.1,SX,INCX,SY,INCY),
-     $                 ST7B(KN,KI),SSIZE3(KN),SFAC)
-            ELSE
-               WRITE (NOUT,*) ' Shouldn''t be here in CHECK2'
-               STOP
-            END IF
-  100    CONTINUE
-  120 CONTINUE
-      RETURN
-      END
-      SUBROUTINE CHECK3(SFAC)
-*     .. Parameters ..
-      INTEGER           NOUT
-      PARAMETER         (NOUT=6)
-*     .. Scalar Arguments ..
-      REAL              SFAC
-*     .. Scalars in Common ..
-      INTEGER           ICASE, INCX, INCY, N
-      LOGICAL           PASS
-*     .. Local Scalars ..
-      REAL              SC, SS
-      INTEGER           I, K, KI, KN, KSIZE, LENX, LENY, MX, MY
-*     .. Local Arrays ..
-      REAL              COPYX(5), COPYY(5), DT9X(7,4,4), DT9Y(7,4,4),
-     +                  DX1(7), DY1(7), MWPC(11), MWPS(11), MWPSTX(5),
-     +                  MWPSTY(5), MWPTX(11,5), MWPTY(11,5), MWPX(5),
-     +                  MWPY(5), SSIZE2(14,2), STX(7), STY(7), SX(7),
-     +                  SY(7)
-      INTEGER           INCXS(4), INCYS(4), LENS(4,2), MWPINX(11),
-     +                  MWPINY(11), MWPN(11), NS(4)
-*     .. External Subroutines ..
-      EXTERNAL          SROT, STEST
-*     .. Intrinsic Functions ..
-      INTRINSIC         ABS, MIN
-*     .. Common blocks ..
-      COMMON            /COMBLA/ICASE, N, INCX, INCY, PASS
-*     .. Data statements ..
-      DATA              INCXS/1, 2, -2, -1/
-      DATA              INCYS/1, -2, 1, -2/
-      DATA              LENS/1, 1, 2, 4, 1, 1, 3, 7/
-      DATA              NS/0, 1, 2, 4/
-      DATA              DX1/0.6E0, 0.1E0, -0.5E0, 0.8E0, 0.9E0, -0.3E0,
-     +                  -0.4E0/
-      DATA              DY1/0.5E0, -0.9E0, 0.3E0, 0.7E0, -0.6E0, 0.2E0,
-     +                  0.8E0/
-      DATA              SC, SS/0.8E0, 0.6E0/
-      DATA              DT9X/0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.78E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.78E0, -0.46E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.78E0, -0.46E0, -0.22E0,
-     +                  1.06E0, 0.0E0, 0.0E0, 0.0E0, 0.6E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.78E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.66E0, 0.1E0, -0.1E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.96E0, 0.1E0, -0.76E0, 0.8E0, 0.90E0,
-     +                  -0.3E0, -0.02E0, 0.6E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.78E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, -0.06E0, 0.1E0,
-     +                  -0.1E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.90E0,
-     +                  0.1E0, -0.22E0, 0.8E0, 0.18E0, -0.3E0, -0.02E0,
-     +                  0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.78E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.78E0, 0.26E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.78E0, 0.26E0, -0.76E0, 1.12E0,
-     +                  0.0E0, 0.0E0, 0.0E0/
-      DATA              DT9Y/0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.04E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.04E0, -0.78E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.04E0, -0.78E0, 0.54E0,
-     +                  0.08E0, 0.0E0, 0.0E0, 0.0E0, 0.5E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.04E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.7E0,
-     +                  -0.9E0, -0.12E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.64E0, -0.9E0, -0.30E0, 0.7E0, -0.18E0, 0.2E0,
-     +                  0.28E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.04E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.7E0, -1.08E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.64E0, -1.26E0,
-     +                  0.54E0, 0.20E0, 0.0E0, 0.0E0, 0.0E0, 0.5E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.04E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.04E0, -0.9E0, 0.18E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.04E0, -0.9E0, 0.18E0, 0.7E0,
-     +                  -0.18E0, 0.2E0, 0.16E0/
-      DATA              SSIZE2/0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0,
-     +                  0.0E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0,
-     +                  1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0,
-     +                  1.17E0, 1.17E0, 1.17E0/
-*     .. Executable Statements ..
-*
-      DO 60 KI = 1, 4
-         INCX = INCXS(KI)
-         INCY = INCYS(KI)
-         MX = ABS(INCX)
-         MY = ABS(INCY)
-*
-         DO 40 KN = 1, 4
-            N = NS(KN)
-            KSIZE = MIN(2,KN)
-            LENX = LENS(KN,MX)
-            LENY = LENS(KN,MY)
-*
-            IF (ICASE.EQ.4) THEN
-*              .. SROT ..
-               DO 20 I = 1, 7
-                  SX(I) = DX1(I)
-                  SY(I) = DY1(I)
-                  STX(I) = DT9X(I,KN,KI)
-                  STY(I) = DT9Y(I,KN,KI)
-   20          CONTINUE
-               CALL SROT(N,SX,INCX,SY,INCY,SC,SS)
-               CALL STEST(LENX,SX,STX,SSIZE2(1,KSIZE),SFAC)
-               CALL STEST(LENY,SY,STY,SSIZE2(1,KSIZE),SFAC)
-            ELSE
-               WRITE (NOUT,*) ' Shouldn''t be here in CHECK3'
-               STOP
-            END IF
-   40    CONTINUE
-   60 CONTINUE
-*
-      MWPC(1) = 1
-      DO 80 I = 2, 11
-         MWPC(I) = 0
-   80 CONTINUE
-      MWPS(1) = 0
-      DO 100 I = 2, 6
-         MWPS(I) = 1
-  100 CONTINUE
-      DO 120 I = 7, 11
-         MWPS(I) = -1
-  120 CONTINUE
-      MWPINX(1) = 1
-      MWPINX(2) = 1
-      MWPINX(3) = 1
-      MWPINX(4) = -1
-      MWPINX(5) = 1
-      MWPINX(6) = -1
-      MWPINX(7) = 1
-      MWPINX(8) = 1
-      MWPINX(9) = -1
-      MWPINX(10) = 1
-      MWPINX(11) = -1
-      MWPINY(1) = 1
-      MWPINY(2) = 1
-      MWPINY(3) = -1
-      MWPINY(4) = -1
-      MWPINY(5) = 2
-      MWPINY(6) = 1
-      MWPINY(7) = 1
-      MWPINY(8) = -1
-      MWPINY(9) = -1
-      MWPINY(10) = 2
-      MWPINY(11) = 1
-      DO 140 I = 1, 11
-         MWPN(I) = 5
-  140 CONTINUE
-      MWPN(5) = 3
-      MWPN(10) = 3
-      DO 160 I = 1, 5
-         MWPX(I) = I
-         MWPY(I) = I
-         MWPTX(1,I) = I
-         MWPTY(1,I) = I
-         MWPTX(2,I) = I
-         MWPTY(2,I) = -I
-         MWPTX(3,I) = 6 - I
-         MWPTY(3,I) = I - 6
-         MWPTX(4,I) = I
-         MWPTY(4,I) = -I
-         MWPTX(6,I) = 6 - I
-         MWPTY(6,I) = I - 6
-         MWPTX(7,I) = -I
-         MWPTY(7,I) = I
-         MWPTX(8,I) = I - 6
-         MWPTY(8,I) = 6 - I
-         MWPTX(9,I) = -I
-         MWPTY(9,I) = I
-         MWPTX(11,I) = I - 6
-         MWPTY(11,I) = 6 - I
-  160 CONTINUE
-      MWPTX(5,1) = 1
-      MWPTX(5,2) = 3
-      MWPTX(5,3) = 5
-      MWPTX(5,4) = 4
-      MWPTX(5,5) = 5
-      MWPTY(5,1) = -1
-      MWPTY(5,2) = 2
-      MWPTY(5,3) = -2
-      MWPTY(5,4) = 4
-      MWPTY(5,5) = -3
-      MWPTX(10,1) = -1
-      MWPTX(10,2) = -3
-      MWPTX(10,3) = -5
-      MWPTX(10,4) = 4
-      MWPTX(10,5) = 5
-      MWPTY(10,1) = 1
-      MWPTY(10,2) = 2
-      MWPTY(10,3) = 2
-      MWPTY(10,4) = 4
-      MWPTY(10,5) = 3
-      DO 200 I = 1, 11
-         INCX = MWPINX(I)
-         INCY = MWPINY(I)
-         DO 180 K = 1, 5
-            COPYX(K) = MWPX(K)
-            COPYY(K) = MWPY(K)
-            MWPSTX(K) = MWPTX(I,K)
-            MWPSTY(K) = MWPTY(I,K)
-  180    CONTINUE
-         CALL SROT(MWPN(I),COPYX,INCX,COPYY,INCY,MWPC(I),MWPS(I))
-         CALL STEST(5,COPYX,MWPSTX,MWPSTX,SFAC)
-         CALL STEST(5,COPYY,MWPSTY,MWPSTY,SFAC)
-  200 CONTINUE
-      RETURN
-      END
-      SUBROUTINE STEST(LEN,SCOMP,STRUE,SSIZE,SFAC)
-*     ********************************* STEST **************************
-*
-*     THIS SUBR COMPARES ARRAYS  SCOMP() AND STRUE() OF LENGTH LEN TO
-*     SEE IF THE TERM BY TERM DIFFERENCES, MULTIPLIED BY SFAC, ARE
-*     NEGLIGIBLE.
-*
-*     C. L. LAWSON, JPL, 1974 DEC 10
-*
-*     .. Parameters ..
-      INTEGER          NOUT
-      REAL             ZERO
-      PARAMETER        (NOUT=6, ZERO=0.0E0)
-*     .. Scalar Arguments ..
-      REAL             SFAC
-      INTEGER          LEN
-*     .. Array Arguments ..
-      REAL             SCOMP(LEN), SSIZE(LEN), STRUE(LEN)
-*     .. Scalars in Common ..
-      INTEGER          ICASE, INCX, INCY, N
-      LOGICAL          PASS
-*     .. Local Scalars ..
-      REAL             SD
-      INTEGER          I
-*     .. External Functions ..
-      REAL             SDIFF
-      EXTERNAL         SDIFF
-*     .. Intrinsic Functions ..
-      INTRINSIC        ABS
-*     .. Common blocks ..
-      COMMON           /COMBLA/ICASE, N, INCX, INCY, PASS
-*     .. Executable Statements ..
-*
-      DO 40 I = 1, LEN
-         SD = SCOMP(I) - STRUE(I)
-         IF (ABS(SFAC*SD) .LE. ABS(SSIZE(I))*EPSILON(ZERO))
-     +       GO TO 40
-*
-*                             HERE    SCOMP(I) IS NOT CLOSE TO STRUE(I).
-*
-         IF ( .NOT. PASS) GO TO 20
-*                             PRINT FAIL MESSAGE AND HEADER.
-         PASS = .FALSE.
-         WRITE (NOUT,99999)
-         WRITE (NOUT,99998)
-   20    WRITE (NOUT,99997) ICASE, N, INCX, INCY, I, SCOMP(I),
-     +     STRUE(I), SD, SSIZE(I)
-   40 CONTINUE
-      RETURN
-*
-99999 FORMAT ('                                       FAIL')
-99998 FORMAT (/' CASE  N INCX INCY  I                            ',
-     +       ' COMP(I)                             TRUE(I)  DIFFERENCE',
-     +       '     SIZE(I)',/1X)
-99997 FORMAT (1X,I4,I3,2I5,I3,2E36.8,2E12.4)
-      END
-      SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC)
-*     ************************* STEST1 *****************************
-*
-*     THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN
-*     REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE
-*     ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT.
-*
-*     C.L. LAWSON, JPL, 1978 DEC 6
-*
-*     .. Scalar Arguments ..
-      REAL              SCOMP1, SFAC, STRUE1
-*     .. Array Arguments ..
-      REAL              SSIZE(*)
-*     .. Local Arrays ..
-      REAL              SCOMP(1), STRUE(1)
-*     .. External Subroutines ..
-      EXTERNAL          STEST
-*     .. Executable Statements ..
-*
-      SCOMP(1) = SCOMP1
-      STRUE(1) = STRUE1
-      CALL STEST(1,SCOMP,STRUE,SSIZE,SFAC)
-*
-      RETURN
-      END
-      REAL             FUNCTION SDIFF(SA,SB)
-*     ********************************* SDIFF **************************
-*     COMPUTES DIFFERENCE OF TWO NUMBERS.  C. L. LAWSON, JPL 1974 FEB 15
-*
-*     .. Scalar Arguments ..
-      REAL                            SA, SB
-*     .. Executable Statements ..
-      SDIFF = SA - SB
-      RETURN
-      END
-      SUBROUTINE ITEST1(ICOMP,ITRUE)
-*     ********************************* ITEST1 *************************
-*
-*     THIS SUBROUTINE COMPARES THE VARIABLES ICOMP AND ITRUE FOR
-*     EQUALITY.
-*     C. L. LAWSON, JPL, 1974 DEC 10
-*
-*     .. Parameters ..
-      INTEGER           NOUT
-      PARAMETER         (NOUT=6)
-*     .. Scalar Arguments ..
-      INTEGER           ICOMP, ITRUE
-*     .. Scalars in Common ..
-      INTEGER           ICASE, INCX, INCY, N
-      LOGICAL           PASS
-*     .. Local Scalars ..
-      INTEGER           ID
-*     .. Common blocks ..
-      COMMON            /COMBLA/ICASE, N, INCX, INCY, PASS
-*     .. Executable Statements ..
-*
-      IF (ICOMP.EQ.ITRUE) GO TO 40
-*
-*                            HERE ICOMP IS NOT EQUAL TO ITRUE.
-*
-      IF ( .NOT. PASS) GO TO 20
-*                             PRINT FAIL MESSAGE AND HEADER.
-      PASS = .FALSE.
-      WRITE (NOUT,99999)
-      WRITE (NOUT,99998)
-   20 ID = ICOMP - ITRUE
-      WRITE (NOUT,99997) ICASE, N, INCX, INCY, ICOMP, ITRUE, ID
-   40 CONTINUE
-      RETURN
-*
-99999 FORMAT ('                                       FAIL')
-99998 FORMAT (/' CASE  N INCX INCY                               ',
-     +       ' COMP                                TRUE     DIFFERENCE',
-     +       /1X)
-99997 FORMAT (1X,I4,I3,2I5,2I36,I12)
-      END
diff --git a/resources/3rdparty/eigen/cmake/FindMetis.cmake b/resources/3rdparty/eigen/cmake/FindMetis.cmake
deleted file mode 100644
index 627c3e9ae..000000000
--- a/resources/3rdparty/eigen/cmake/FindMetis.cmake
+++ /dev/null
@@ -1,25 +0,0 @@
-# Pastix requires METIS or METIS (partitioning and reordering tools)
-
-if (METIS_INCLUDES AND METIS_LIBRARIES)
-  set(METIS_FIND_QUIETLY TRUE)
-endif (METIS_INCLUDES AND METIS_LIBRARIES)
-
-find_path(METIS_INCLUDES 
-  NAMES 
-  metis.h 
-  PATHS 
-  $ENV{METISDIR} 
-  ${INCLUDE_INSTALL_DIR} 
-  PATH_SUFFIXES 
-  metis
-  include
-)
-
-
-find_library(METIS_LIBRARIES metis PATHS $ENV{METISDIR} ${LIB_INSTALL_DIR} PATH_SUFFIXES lib)
-
-include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(METIS DEFAULT_MSG
-                                  METIS_INCLUDES METIS_LIBRARIES)
-
-mark_as_advanced(METIS_INCLUDES METIS_LIBRARIES)
diff --git a/resources/3rdparty/eigen/demos/opengl/CMakeLists.txt b/resources/3rdparty/eigen/demos/opengl/CMakeLists.txt
deleted file mode 100644
index b98a30c01..000000000
--- a/resources/3rdparty/eigen/demos/opengl/CMakeLists.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-find_package(Qt4 REQUIRED)
-find_package(OpenGL REQUIRED)
-
-set(QT_USE_QTOPENGL TRUE)
-include(${QT_USE_FILE})
-
-set(CMAKE_INCLUDE_CURRENT_DIR ON)
-
-include_directories( ${QT_INCLUDE_DIR} )
-
-set(quaternion_demo_SRCS  gpuhelper.cpp icosphere.cpp camera.cpp trackball.cpp quaternion_demo.cpp)
-
-qt4_automoc(${quaternion_demo_SRCS})
-
-add_executable(quaternion_demo ${quaternion_demo_SRCS})
-add_dependencies(demos quaternion_demo)
-
-target_link_libraries(quaternion_demo
-  ${QT_QTCORE_LIBRARY}    ${QT_QTGUI_LIBRARY}
-  ${QT_QTOPENGL_LIBRARY}  ${OPENGL_LIBRARIES} )
diff --git a/resources/3rdparty/eigen/doc/C09_TutorialSparse.dox b/resources/3rdparty/eigen/doc/C09_TutorialSparse.dox
deleted file mode 100644
index 6a16c3ae2..000000000
--- a/resources/3rdparty/eigen/doc/C09_TutorialSparse.dox
+++ /dev/null
@@ -1,455 +0,0 @@
-namespace Eigen {
-
-/** \page TutorialSparse Tutorial page 9 - Sparse Matrix
-    \ingroup Tutorial
-
-\li \b Previous: \ref TutorialGeometry
-\li \b Next: \ref TutorialMapClass
-
-\b Table \b of \b contents \n
-  - \ref TutorialSparseIntro
-  - \ref TutorialSparseExample "Example"
-  - \ref TutorialSparseSparseMatrix
-  - \ref TutorialSparseFilling
-  - \ref TutorialSparseDirectSolvers
-  - \ref TutorialSparseFeatureSet
-    - \ref TutorialSparse_BasicOps
-    - \ref TutorialSparse_Products
-    - \ref TutorialSparse_TriangularSelfadjoint
-    - \ref TutorialSparse_Submat
-
-
-<hr>
-
-Manipulating and solving sparse problems involves various modules which are summarized below:
-
-<table class="manual">
-<tr><th>Module</th><th>Header file</th><th>Contents</th></tr>
-<tr><td>\link Sparse_Module SparseCore \endlink</td><td>\code#include <Eigen/SparseCore>\endcode</td><td>SparseMatrix and SparseVector classes, matrix assembly, basic sparse linear algebra (including sparse triangular solvers)</td></tr>
-<tr><td>\link SparseCholesky_Module SparseCholesky \endlink</td><td>\code#include <Eigen/SparseCholesky>\endcode</td><td>Direct sparse LLT and LDLT Cholesky factorization to solve sparse self-adjoint positive definite problems</td></tr>
-<tr><td>\link IterativeLinearSolvers_Module IterativeLinearSolvers \endlink</td><td>\code#include <Eigen/IterativeLinearSolvers>\endcode</td><td>Iterative solvers to solve large general linear square problems (including self-adjoint positive definite problems)</td></tr>
-<tr><td></td><td>\code#include <Eigen/Sparse>\endcode</td><td>Includes all the above modules</td></tr>
-</table>
-
-\section TutorialSparseIntro Sparse matrix representation
-
-In many applications (e.g., finite element methods) it is common to deal with very large matrices where only a few coefficients are different from zero.  In such cases, memory consumption can be reduced and performance increased by using a specialized representation storing only the nonzero coefficients. Such a matrix is called a sparse matrix.
-
-\b The \b %SparseMatrix \b class
-
-The class SparseMatrix is the main sparse matrix representation of Eigen's sparse module; it offers high performance and low memory usage.
-It implements a more versatile variant of the widely-used Compressed Column (or Row) Storage scheme.
-It consists of four compact arrays:
- - \c Values: stores the coefficient values of the non-zeros.
- - \c InnerIndices: stores the row (resp. column) indices of the non-zeros.
- - \c OuterStarts: stores for each column (resp. row) the index of the first non-zero in the previous two arrays.
- - \c InnerNNZs: stores the number of non-zeros of each column (resp. row).
-The word \c inner refers to an \em inner \em vector that is a column for a column-major matrix, or a row for a row-major matrix.
-The word \c outer refers to the other direction.
-
-This storage scheme is better explained on an example. The following matrix
-<table class="manual">
-<tr><td> 0</td><td>3</td><td> 0</td><td>0</td><td> 0</td></tr>
-<tr><td>22</td><td>0</td><td> 0</td><td>0</td><td>17</td></tr>
-<tr><td> 7</td><td>5</td><td> 0</td><td>1</td><td> 0</td></tr>
-<tr><td> 0</td><td>0</td><td> 0</td><td>0</td><td> 0</td></tr>
-<tr><td> 0</td><td>0</td><td>14</td><td>0</td><td> 8</td></tr>
-</table>
-
-and one of its possible sparse, \b column \b major representation:
-<table class="manual">
-<tr><td>Values:</td>        <td>22</td><td>7</td><td>_</td><td>3</td><td>5</td><td>14</td><td>_</td><td>_</td><td>1</td><td>_</td><td>17</td><td>8</td></tr>
-<tr><td>InnerIndices:</td>  <td> 1</td><td>2</td><td>_</td><td>0</td><td>2</td><td> 4</td><td>_</td><td>_</td><td>2</td><td>_</td><td> 1</td><td>4</td></tr>
-</table>
-<table class="manual">
-<tr><td>OuterStarts:</td><td>0</td><td>3</td><td>5</td><td>8</td><td>10</td><td>\em 12 </td></tr>
-<tr><td>InnerNNZs:</td>    <td>2</td><td>2</td><td>1</td><td>1</td><td> 2</td><td></td></tr>
-</table>
-
-Currently the elements of a given inner vector are guaranteed to be always sorted by increasing inner indices.
-The \c "_" indicates available free space to quickly insert new elements.
-Assuming no reallocation is needed, the insertion of a random element is therefore in O(nnz_j) where nnz_j is the number of nonzeros of the respective inner vector.
-On the other hand, inserting elements with increasing inner indices in a given inner vector is much more efficient since this only requires to increase the respective \c InnerNNZs entry that is a O(1) operation.
-
-The case where no empty space is available is a special case, and is refered as the \em compressed mode.
-It corresponds to the widely used Compressed Column (or Row) Storage schemes (CCS or CRS).
-Any SparseMatrix can be turned to this form by calling the SparseMatrix::makeCompressed() function.
-In this case, one can remark that the \c InnerNNZs array is redundant with \c OuterStarts because we the equality: \c InnerNNZs[j] = \c OuterStarts[j+1]-\c OuterStarts[j].
-Therefore, in practice a call to SparseMatrix::makeCompressed() frees this buffer.
-
-It is worth noting that most of our wrappers to external libraries requires compressed matrices as inputs.
-
-The results of %Eigen's operations always produces \b compressed sparse matrices.
-On the other hand, the insertion of a new element into a SparseMatrix converts this later to the \b uncompressed mode.
-
-Here is the previous matrix represented in compressed mode:
-<table class="manual">
-<tr><td>Values:</td>        <td>22</td><td>7</td><td>3</td><td>5</td><td>14</td><td>1</td><td>17</td><td>8</td></tr>
-<tr><td>InnerIndices:</td>  <td> 1</td><td>2</td><td>0</td><td>2</td><td> 4</td><td>2</td><td> 1</td><td>4</td></tr>
-</table>
-<table class="manual">
-<tr><td>OuterStarts:</td><td>0</td><td>2</td><td>4</td><td>5</td><td>6</td><td>\em 8 </td></tr>
-</table>
-
-A SparseVector is a special case of a SparseMatrix where only the \c Values and \c InnerIndices arrays are stored.
-There is no notion of compressed/uncompressed mode for a SparseVector.
-
-
-\section TutorialSparseExample First example
-
-Before describing each individual class, let's start with the following typical example: solving the Lapace equation \f$ \nabla u = 0 \f$ on a regular 2D grid using a finite difference scheme and Dirichlet boundary conditions.
-Such problem can be mathematically expressed as a linear problem of the form \f$ Ax=b \f$ where \f$ x \f$ is the vector of \c m unknowns (in our case, the values of the pixels), \f$ b \f$ is the right hand side vector resulting from the boundary conditions, and \f$ A \f$ is an \f$ m \times m \f$ matrix containing only a few non-zero elements resulting from the discretization of the Laplacian operator.
-
-<table class="manual">
-<tr><td>
-\include Tutorial_sparse_example.cpp
-</td>
-<td>
-\image html Tutorial_sparse_example.jpeg
-</td></tr></table>
-
-In this example, we start by defining a column-major sparse matrix type of double \c SparseMatrix<double>, and a triplet list of the same scalar type \c  Triplet<double>. A triplet is a simple object representing a non-zero entry as the triplet: \c row index, \c column index, \c value.
-
-In the main function, we declare a list \c coefficients of triplets (as a std vector) and the right hand side vector \f$ b \f$ which are filled by the \a buildProblem function.
-The raw and flat list of non-zero entries is then converted to a true SparseMatrix object \c A.
-Note that the elements of the list do not have to be sorted, and possible duplicate entries will be summed up.
-
-The last step consists of effectively solving the assembled problem.
-Since the resulting matrix \c A is symmetric by construction, we can perform a direct Cholesky factorization via the SimplicialLDLT class which behaves like its LDLT counterpart for dense objects.
-
-The resulting vector \c x contains the pixel values as a 1D array which is saved to a jpeg file shown on the right of the code above.
-
-Describing the \a buildProblem and \a save functions is out of the scope of this tutorial. They are given \ref TutorialSparse_example_details "here" for the curious and reproducibility purpose.
-
-
-
-
-\section TutorialSparseSparseMatrix The SparseMatrix class
-
-\b %Matrix \b and \b vector \b properties \n
-
-The SparseMatrix and SparseVector classes take three template arguments:
- * the scalar type (e.g., double)
- * the storage order (ColMajor or RowMajor, the default is RowMajor)
- * the inner index type (default is \c int).
-
-As for dense Matrix objects, constructors takes the size of the object.
-Here are some examples:
-
-\code
-SparseMatrix<std::complex<float> > mat(1000,2000);         // declares a 1000x2000 column-major compressed sparse matrix of complex<float>
-SparseMatrix<double,RowMajor> mat(1000,2000);              // declares a 1000x2000 row-major compressed sparse matrix of double
-SparseVector<std::complex<float> > vec(1000);              // declares a column sparse vector of complex<float> of size 1000
-SparseVector<double,RowMajor> vec(1000);                   // declares a row sparse vector of double of size 1000
-\endcode
-
-In the rest of the tutorial, \c mat and \c vec represent any sparse-matrix and sparse-vector objects, respectively.
-
-The dimensions of a matrix can be queried using the following functions:
-<table class="manual">
-<tr><td>Standard \n dimensions</td><td>\code
-mat.rows()
-mat.cols()\endcode</td>
-<td>\code
-vec.size() \endcode</td>
-</tr>
-<tr><td>Sizes along the \n inner/outer dimensions</td><td>\code
-mat.innerSize()
-mat.outerSize()\endcode</td>
-<td></td>
-</tr>
-<tr><td>Number of non \n zero coefficients</td><td>\code
-mat.nonZeros() \endcode</td>
-<td>\code
-vec.nonZeros() \endcode</td></tr>
-</table>
-
-
-\b Iterating \b over \b the \b nonzero \b coefficients \n
-
-Random access to the elements of a sparse object can be done through the \c coeffRef(i,j) function.
-However, this function involves a quite expensive binary search.
-In most cases, one only wants to iterate over the non-zeros elements. This is achieved by a standard loop over the outer dimension, and then by iterating over the non-zeros of the current inner vector via an InnerIterator. Thus, the non-zero entries have to be visited in the same order than the storage order.
-Here is an example:
-<table class="manual">
-<tr><td>
-\code
-SparseMatrix<double> mat(rows,cols);
-for (int k=0; k<mat.outerSize(); ++k)
-  for (SparseMatrix<double>::InnerIterator it(mat,k); it; ++it)
-  {
-    it.value();
-    it.row();   // row index
-    it.col();   // col index (here it is equal to k)
-    it.index(); // inner index, here it is equal to it.row()
-  }
-\endcode
-</td><td>
-\code
-SparseVector<double> vec(size);
-for (SparseVector<double>::InnerIterator it(vec); it; ++it)
-{
-  it.value(); // == vec[ it.index() ]
-  it.index();
-}
-\endcode
-</td></tr>
-</table>
-For a writable expression, the referenced value can be modified using the valueRef() function.
-If the type of the sparse matrix or vector depends on a template parameter, then the \c typename keyword is
-required to indicate that \c InnerIterator denotes a type; see \ref TopicTemplateKeyword for details.
-
-
-\section TutorialSparseFilling Filling a sparse matrix
-
-Because of the special storage scheme of a SparseMatrix, special care has to be taken when adding new nonzero entries.
-For instance, the cost of a single purely random insertion into a SparseMatrix is \c O(nnz), where \c nnz is the current number of non-zero coefficients.
-
-The simplest way to create a sparse matrix while guaranteeing good performance is thus to first build a list of so-called \em triplets, and then convert it to a SparseMatrix.
-
-Here is a typical usage example:
-\code
-typedef Eigen::Triplet<double> T;
-std::vector<T> tripletList;
-tripletList.reserve(estimation_of_entries);
-for(...)
-{
-  // ...
-  tripletList.push_back(T(i,j,v_ij));
-}
-SparseMatrixType mat(rows,cols);
-mat.setFromTriplets(tripletList.begin(), tripletList.end());
-// mat is ready to go!
-\endcode
-The \c std::vector of triplets might contain the elements in arbitrary order, and might even contain duplicated elements that will be summed up by setFromTriplets().
-See the SparseMatrix::setFromTriplets() function and class Triplet for more details.
-
-
-In some cases, however, slightly higher performance, and lower memory consumption can be reached by directly inserting the non-zeros into the destination matrix.
-A typical scenario of this approach is illustrated bellow:
-\code
-1: SparseMatrix<double> mat(rows,cols);         // default is column major
-2: mat.reserve(VectorXi::Constant(cols,6));
-3: for each i,j such that v_ij != 0
-4:   mat.insert(i,j) = v_ij;                    // alternative: mat.coeffRef(i,j) += v_ij;
-5: mat.makeCompressed();                        // optional
-\endcode
-
-- The key ingredient here is the line 2 where we reserve room for 6 non-zeros per column. In many cases, the number of non-zeros per column or row can easily be known in advance. If it varies significantly for each inner vector, then it is possible to specify a reserve size for each inner vector by providing a vector object with an operator[](int j) returning the reserve size of the \c j-th inner vector (e.g., via a VectorXi or std::vector<int>). If only a rought estimate of the number of nonzeros per inner-vector can be obtained, it is highly recommended to overestimate it rather than the opposite. If this line is omitted, then the first insertion of a new element will reserve room for 2 elements per inner vector.
-- The line 4 performs a sorted insertion. In this example, the ideal case is when the \c j-th column is not full and contains non-zeros whose inner-indices are smaller than \c i. In this case, this operation boils down to trivial O(1) operation.
-- When calling insert(i,j) the element \c i \c ,j must not already exists, otherwise use the coeffRef(i,j) method that will allow to, e.g., accumulate values. This method first performs a binary search and finally calls insert(i,j) if the element does not already exist. It is more flexible than insert() but also more costly.
-- The line 5 suppresses the remaining empty space and transforms the matrix into a compressed column storage.
-
-
-\section TutorialSparseDirectSolvers Solving linear problems
-
-%Eigen currently provides a limited set of built-in solvers, as well as wrappers to external solver libraries.
-They are summarized in the following table:
-
-<table class="manual">
-<tr><th>Class</th><th>Module</th><th>Solver kind</th><th>Matrix kind</th><th>Features related to performance</th>
-    <th>Dependencies,License</th><th class="width20em"><p>Notes</p></th></tr>
-<tr><td>SimplicialLLT    </td><td>\link SparseCholesky_Module SparseCholesky \endlink</td><td>Direct LLt factorization</td><td>SPD</td><td>Fill-in reducing</td>
-    <td>built-in, LGPL</td>
-    <td>SimplicialLDLT is often preferable</td></tr>
-<tr><td>SimplicialLDLT   </td><td>\link SparseCholesky_Module SparseCholesky \endlink</td><td>Direct LDLt factorization</td><td>SPD</td><td>Fill-in reducing</td>
-    <td>built-in, LGPL</td>
-    <td>Recommended for very sparse and not too large problems (e.g., 2D Poisson eq.)</td></tr>
-<tr><td>ConjugateGradient</td><td>\link IterativeLinearSolvers_Module IterativeLinearSolvers \endlink</td><td>Classic iterative CG</td><td>SPD</td><td>Preconditionning</td>
-    <td>built-in, LGPL</td>
-    <td>Recommended for large symmetric problems (e.g., 3D Poisson eq.)</td></tr>
-<tr><td>BiCGSTAB</td><td>\link IterativeLinearSolvers_Module IterativeLinearSolvers \endlink</td><td>Iterative stabilized bi-conjugate gradient</td><td>Square</td><td>Preconditionning</td>
-    <td>built-in, LGPL</td>
-    <td>Might not always converge</td></tr>
-
-
-<tr><td>PastixLLT \n PastixLDLT \n PastixLU</td><td>\link PaStiXSupport_Module PaStiXSupport \endlink</td><td>Direct LLt, LDLt, LU factorizations</td><td>SPD \n SPD \n Square</td><td>Fill-in reducing, Leverage fast dense algebra, Multithreading</td>
-    <td>Requires the <a href="http://pastix.gforge.inria.fr">PaStiX</a> package, \b CeCILL-C </td>
-    <td>optimized for tough problems and symmetric patterns</td></tr>
-<tr><td>CholmodSupernodalLLT</td><td>\link CholmodSupport_Module CholmodSupport \endlink</td><td>Direct LLt factorization</td><td>SPD</td><td>Fill-in reducing, Leverage fast dense algebra</td>
-    <td>Requires the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">SuiteSparse</a> package, \b GPL </td>
-    <td></td></tr>
-<tr><td>UmfPackLU</td><td>\link UmfPackSupport_Module UmfPackSupport \endlink</td><td>Direct LU factorization</td><td>Square</td><td>Fill-in reducing, Leverage fast dense algebra</td>
-    <td>Requires the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">SuiteSparse</a> package, \b GPL </td>
-    <td></td></tr>
-<tr><td>SuperLU</td><td>\link SuperLUSupport_Module SuperLUSupport \endlink</td><td>Direct LU factorization</td><td>Square</td><td>Fill-in reducing, Leverage fast dense algebra</td>
-    <td>Requires the <a href="http://crd-legacy.lbl.gov/~xiaoye/SuperLU/">SuperLU</a> library, (BSD-like)</td>
-    <td></td></tr>
-</table>
-
-Here \c SPD means symmetric positive definite.
-
-All these solvers follow the same general concept.
-Here is a typical and general example:
-\code
-#include <Eigen/RequiredModuleName>
-// ...
-SparseMatrix<double> A;
-// fill A
-VectorXd b, x;
-// fill b
-// solve Ax = b
-SolverClassName<SparseMatrix<double> > solver;
-solver.compute(A);
-if(solver.info()!=Success) {
-  // decomposition failed
-  return;
-}
-x = solver.solve(b);
-if(solver.info()!=Success) {
-  // solving failed
-  return;
-}
-// solve for another right hand side:
-x1 = solver.solve(b1);
-\endcode
-
-For \c SPD solvers, a second optional template argument allows to specify which triangular part have to be used, e.g.:
-
-\code
-#include <Eigen/IterativeLinearSolvers>
-
-ConjugateGradient<SparseMatrix<double>, Eigen::Upper> solver;
-x = solver.compute(A).solve(b);
-\endcode
-In the above example, only the upper triangular part of the input matrix A is considered for solving. The opposite triangle might either be empty or contain arbitrary values.
-
-In the case where multiple problems with the same sparcity pattern have to be solved, then the "compute" step can be decomposed as follow:
-\code
-SolverClassName<SparseMatrix<double> > solver;
-solver.analyzePattern(A);   // for this step the numerical values of A are not used
-solver.factorize(A);
-x1 = solver.solve(b1);
-x2 = solver.solve(b2);
-...
-A = ...;                    // modify the values of the nonzeros of A, the nonzeros pattern must stay unchanged
-solver.factorize(A);
-x1 = solver.solve(b1);
-x2 = solver.solve(b2);
-...
-\endcode
-The compute() method is equivalent to calling both analyzePattern() and factorize().
-
-Finally, each solver provides some specific features, such as determinant, access to the factors, controls of the iterations, and so on.
-More details are availble in the documentations of the respective classes.
-
-
-\section TutorialSparseFeatureSet Supported operators and functions
-
-Because of their special storage format, sparse matrices cannot offer the same level of flexbility than dense matrices.
-In Eigen's sparse module we chose to expose only the subset of the dense matrix API which can be efficiently implemented.
-In the following \em sm denotes a sparse matrix, \em sv a sparse vector, \em dm a dense matrix, and \em dv a dense vector.
-
-\subsection TutorialSparse_BasicOps Basic operations
-
-%Sparse expressions support most of the unary and binary coefficient wise operations:
-\code
-sm1.real()   sm1.imag()   -sm1                    0.5*sm1
-sm1+sm2      sm1-sm2      sm1.cwiseProduct(sm2)
-\endcode
-However, a strong restriction is that the storage orders must match. For instance, in the following example:
-\code
-sm4 = sm1 + sm2 + sm3;
-\endcode
-sm1, sm2, and sm3 must all be row-major or all column major.
-On the other hand, there is no restriction on the target matrix sm4.
-For instance, this means that for computing \f$ A^T + A \f$, the matrix \f$ A^T \f$ must be evaluated into a temporary matrix of compatible storage order:
-\code
-SparseMatrix<double> A, B;
-B = SparseMatrix<double>(A.transpose()) + A;
-\endcode
-
-Binary coefficient wise operators can also mix sparse and dense expressions:
-\code
-sm2 = sm1.cwiseProduct(dm1);
-dm2 = sm1 + dm1;
-\endcode
-
-
-%Sparse expressions also support transposition:
-\code
-sm1 = sm2.transpose();
-sm1 = sm2.adjoint();
-\endcode
-However, there is no transposeInPlace() method.
-
-
-\subsection TutorialSparse_Products Matrix products
-
-%Eigen supports various kind of sparse matrix products which are summarize below:
-  - \b sparse-dense:
-    \code
-dv2 = sm1 * dv1;
-dm2 = dm1 * sm1.adjoint();
-dm2 = 2. * sm1 * dm1;
-    \endcode
-  - \b symmetric \b sparse-dense. The product of a sparse symmetric matrix with a dense matrix (or vector) can also be optimized by specifying the symmetry with selfadjointView():
-    \code
-dm2 = sm1.selfadjointView<>() * dm1;        // if all coefficients of A are stored
-dm2 = A.selfadjointView<Upper>() * dm1;     // if only the upper part of A is stored
-dm2 = A.selfadjointView<Lower>() * dm1;     // if only the lower part of A is stored
-    \endcode
-  - \b sparse-sparse. For sparse-sparse products, two different algorithms are available. The default one is conservative and preserve the explicit zeros that might appear:
-    \code
-sm3 = sm1 * sm2;
-sm3 = 4 * sm1.adjoint() * sm2;
-    \endcode
-    The second algorithm prunes on the fly the explicit zeros, or the values smaller than a given threshold. It is enabled and controlled through the prune() functions:
-    \code
-sm3 = (sm1 * sm2).prune();                  // removes numerical zeros
-sm3 = (sm1 * sm2).prune(ref);               // removes elements much smaller than ref
-sm3 = (sm1 * sm2).prune(ref,epsilon);       // removes elements smaller than ref*epsilon
-    \endcode
-
-  - \b permutations. Finally, permutations can be applied to sparse matrices too:
-    \code
-PermutationMatrix<Dynamic,Dynamic> P = ...;
-sm2 = P * sm1;
-sm2 = sm1 * P.inverse();
-sm2 = sm1.transpose() * P;
-    \endcode
-
-
-\subsection TutorialSparse_TriangularSelfadjoint Triangular and selfadjoint views
-
-Just as with dense matrices, the triangularView() function can be used to address a triangular part of the matrix, and perform triangular solves with a dense right hand side:
-\code
-dm2 = sm1.triangularView<Lower>(dm1);
-dv2 = sm1.transpose().triangularView<Upper>(dv1);
-\endcode
-
-The selfadjointView() function permits various operations:
- - optimized sparse-dense matrix products:
-    \code
-dm2 = sm1.selfadjointView<>() * dm1;        // if all coefficients of A are stored
-dm2 = A.selfadjointView<Upper>() * dm1;     // if only the upper part of A is stored
-dm2 = A.selfadjointView<Lower>() * dm1;     // if only the lower part of A is stored
-    \endcode
- - copy of triangular parts:
-    \code
-sm2 = sm1.selfadjointView<Upper>();                               // makes a full selfadjoint matrix from the upper triangular part
-sm2.selfadjointView<Lower>() = sm1.selfadjointView<Upper>();      // copies the upper triangular part to the lower triangular part
-    \endcode
- - application of symmetric permutations:
- \code
-PermutationMatrix<Dynamic,Dynamic> P = ...;
-sm2 = A.selfadjointView<Upper>().twistedBy(P);                                // compute P S P' from the upper triangular part of A, and make it a full matrix
-sm2.selfadjointView<Lower>() = A.selfadjointView<Lower>().twistedBy(P);       // compute P S P' from the lower triangular part of A, and then only compute the lower part
- \endcode
-
-\subsection TutorialSparse_Submat Sub-matrices
-
-%Sparse matrices does not support yet the addressing of arbitrary sub matrices. Currently, one can only reference a set of contiguous \em inner vectors, i.e., a set of contiguous rows for a row-major matrix, or a set of contiguous columns for a column major matrix:
-\code
-  sm1.innerVector(j);       // returns an expression of the j-th column (resp. row) of the matrix if sm1 is col-major (resp. row-major)
-  sm1.innerVectors(j, nb);  // returns an expression of the nb columns (resp. row) starting from the j-th column (resp. row)
-                            // of the matrix if sm1 is col-major (resp. row-major)
-  sm1.middleRows(j, nb);    // for row major matrices only, get a range of nb rows
-  sm1.middleCols(j, nb);    // for column major matrices only, get a range of nb columns
-\endcode
-
-\li \b Next: \ref TutorialMapClass
-
-*/
-
-}
diff --git a/resources/3rdparty/eigen/doc/D01_StlContainers.dox b/resources/3rdparty/eigen/doc/D01_StlContainers.dox
deleted file mode 100644
index f55db3125..000000000
--- a/resources/3rdparty/eigen/doc/D01_StlContainers.dox
+++ /dev/null
@@ -1,65 +0,0 @@
-namespace Eigen {
-
-/** \page TopicStlContainers Using STL Containers with Eigen
-
-\b Table \b of \b contents
-  - \ref summary
-  - \ref allocator
-  - \ref vector
-
-\section summary Executive summary
-
-Using STL containers on \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", or classes having members of such types, requires taking the following two steps:
-
-\li A 16-byte-aligned allocator must be used. Eigen does provide one ready for use: aligned_allocator.
-\li If you want to use the std::vector container, you need to \#include <Eigen/StdVector>.
-
-These issues arise only with \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types" and \ref TopicStructHavingEigenMembers "structures having such Eigen objects as member". For other Eigen types, such as Vector3f or MatrixXd, no special care is needed when using STL containers.
-
-\section allocator Using an aligned allocator
-
-STL containers take an optional template parameter, the allocator type. When using STL containers on \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", you need tell the container to use an allocator that will always allocate memory at 16-byte-aligned locations. Fortunately, Eigen does provide such an allocator: Eigen::aligned_allocator.
-
-For example, instead of
-\code
-std::map<int, Eigen::Vector4f>
-\endcode
-you need to use
-\code
-std::map<int, Eigen::Vector4f, std::less<int>, 
-         Eigen::aligned_allocator<std::pair<const int, Eigen::Vector4f> > >
-\endcode
-Note that the third parameter "std::less<int>" is just the default value, but we have to include it because we want to specify the fourth parameter, which is the allocator type.
-
-\section vector The case of std::vector
-
-The situation with std::vector was even worse (explanation below) so we had to specialize it for the Eigen::aligned_allocator type. In practice you \b must use the Eigen::aligned_allocator (not another aligned allocator), \b and \#include <Eigen/StdVector>.
-
-Here is an example:
-\code
-#include<Eigen/StdVector>
-/* ... */
-std::vector<Eigen::Vector4f,Eigen::aligned_allocator<Eigen::Vector4f> >
-\endcode
-
-\subsection vector_spec An alternative - specializing std::vector for Eigen types
-
-As an alternative to the recommended approach described above, you have the option to specialize std::vector for Eigen types requiring alignment. 
-The advantage is that you won't need to declare std::vector all over with Eigen::allocator. One drawback on the other hand side is that
-the specialization needs to be defined before all code pieces in which e.g. std::vector<Vector2d> is used. Otherwise, without knowing the specialization
-the compiler will compile that particular instance with the default std::allocator and you program is most likely to crash.
-
-Here is an example:
-\code
-#include<Eigen/StdVector>
-/* ... */
-EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Matrix2d)
-std::vector<Eigen::Vector2d>
-\endcode
-
-<span class="note">\b Explanation: The resize() method of std::vector takes a value_type argument (defaulting to value_type()). So with std::vector<Eigen::Vector4f>, some Eigen::Vector4f objects will be passed by value, which discards any alignment modifiers, so a Eigen::Vector4f can be created at an unaligned location. In order to avoid that, the only solution we saw was to specialize std::vector to make it work on a slight modification of, here, Eigen::Vector4f, that is able to deal properly with this situation.
-</span>
-
-*/
-
-}
diff --git a/resources/3rdparty/eigen/doc/I02_HiPerformance.dox b/resources/3rdparty/eigen/doc/I02_HiPerformance.dox
deleted file mode 100644
index ab6cdfd44..000000000
--- a/resources/3rdparty/eigen/doc/I02_HiPerformance.dox
+++ /dev/null
@@ -1,128 +0,0 @@
-
-namespace Eigen {
-
-/** \page TopicWritingEfficientProductExpression Writing efficient matrix product expressions
-
-In general achieving good performance with Eigen does no require any special effort:
-simply write your expressions in the most high level way. This is especially true
-for small fixed size matrices. For large matrices, however, it might be useful to
-take some care when writing your expressions in order to minimize useless evaluations
-and optimize the performance.
-In this page we will give a brief overview of the Eigen's internal mechanism to simplify
-and evaluate complex product expressions, and discuss the current limitations.
-In particular we will focus on expressions matching level 2 and 3 BLAS routines, i.e,
-all kind of matrix products and triangular solvers.
-
-Indeed, in Eigen we have implemented a set of highly optimized routines which are very similar
-to BLAS's ones. Unlike BLAS, those routines are made available to user via a high level and
-natural API. Each of these routines can compute in a single evaluation a wide variety of expressions.
-Given an expression, the challenge is then to map it to a minimal set of routines.
-As explained latter, this mechanism has some limitations, and knowing them will allow
-you to write faster code by making your expressions more Eigen friendly.
-
-\section GEMM General Matrix-Matrix product (GEMM)
-
-Let's start with the most common primitive: the matrix product of general dense matrices.
-In the BLAS world this corresponds to the GEMM routine. Our equivalent primitive can
-perform the following operation:
-\f$ C.noalias() += \alpha op1(A) op2(B) \f$
-where A, B, and C are column and/or row major matrices (or sub-matrices),
-alpha is a scalar value, and op1, op2 can be transpose, adjoint, conjugate, or the identity.
-When Eigen detects a matrix product, it analyzes both sides of the product to extract a
-unique scalar factor alpha, and for each side, its effective storage order, shape, and conjugation states.
-More precisely each side is simplified by iteratively removing trivial expressions such as scalar multiple,
-negation and conjugation. Transpose and Block expressions are not evaluated and they only modify the storage order
-and shape. All other expressions are immediately evaluated.
-For instance, the following expression:
-\code m1.noalias() -= s4 * (s1 * m2.adjoint() * (-(s3*m3).conjugate()*s2))  \endcode
-is automatically simplified to:
-\code m1.noalias() += (s1*s2*conj(s3)*s4) * m2.adjoint() * m3.conjugate() \endcode
-which exactly matches our GEMM routine.
-
-\subsection GEMM_Limitations Limitations
-Unfortunately, this simplification mechanism is not perfect yet and not all expressions which could be
-handled by a single GEMM-like call are correctly detected.
-<table class="manual" style="width:100%">
-<tr>
-<th>Not optimal expression</th>
-<th>Evaluated as</th>
-<th>Optimal version (single evaluation)</th>
-<th>Comments</th>
-</tr>
-<tr>
-<td>\code
-m1 += m2 * m3; \endcode</td>
-<td>\code
-temp = m2 * m3;
-m1 += temp; \endcode</td>
-<td>\code
-m1.noalias() += m2 * m3; \endcode</td>
-<td>Use .noalias() to tell Eigen the result and right-hand-sides do not alias. 
-    Otherwise the product m2 * m3 is evaluated into a temporary.</td>
-</tr>
-<tr class="alt">
-<td></td>
-<td></td>
-<td>\code
-m1.noalias() += s1 * (m2 * m3); \endcode</td>
-<td>This is a special feature of Eigen. Here the product between a scalar
-    and a matrix product does not evaluate the matrix product but instead it
-    returns a matrix product expression tracking the scalar scaling factor. <br>
-    Without this optimization, the matrix product would be evaluated into a
-    temporary as in the next example.</td>
-</tr>
-<tr>
-<td>\code
-m1.noalias() += (m2 * m3).adjoint(); \endcode</td>
-<td>\code
-temp = m2 * m3;
-m1 += temp.adjoint(); \endcode</td>
-<td>\code
-m1.noalias() += m3.adjoint()
-*              * m2.adjoint(); \endcode</td>
-<td>This is because the product expression has the EvalBeforeNesting bit which
-    enforces the evaluation of the product by the Tranpose expression.</td>
-</tr>
-<tr class="alt">
-<td>\code
-m1 = m1 + m2 * m3; \endcode</td>
-<td>\code
-temp = m2 * m3;
-m1 = m1 + temp; \endcode</td>
-<td>\code m1.noalias() += m2 * m3; \endcode</td>
-<td>Here there is no way to detect at compile time that the two m1 are the same,
-    and so the matrix product will be immediately evaluated.</td>
-</tr>
-<tr>
-<td>\code
-m1.noalias() = m4 + m2 * m3; \endcode</td>
-<td>\code
-temp = m2 * m3;
-m1 = m4 + temp; \endcode</td>
-<td>\code
-m1 = m4;
-m1.noalias() += m2 * m3; \endcode</td>
-<td>First of all, here the .noalias() in the first expression is useless because
-    m2*m3 will be evaluated anyway. However, note how this expression can be rewritten
-    so that no temporary is required. (tip: for very small fixed size matrix
-    it is slighlty better to rewrite it like this: m1.noalias() = m2 * m3; m1 += m4;</td>
-</tr>
-<tr class="alt">
-<td>\code
-m1.noalias() += (s1*m2).block(..) * m3; \endcode</td>
-<td>\code
-temp = (s1*m2).block(..);
-m1 += temp * m3; \endcode</td>
-<td>\code
-m1.noalias() += s1 * m2.block(..) * m3; \endcode</td>
-<td>This is because our expression analyzer is currently not able to extract trivial
-    expressions nested in a Block expression. Therefore the nested scalar
-    multiple cannot be properly extracted.</td>
-</tr>
-</table>
-
-Of course all these remarks hold for all other kind of products involving triangular or selfadjoint matrices.
-
-*/
-
-}
diff --git a/resources/3rdparty/eigen/doc/I10_Assertions.dox b/resources/3rdparty/eigen/doc/I10_Assertions.dox
deleted file mode 100644
index e5bcbe536..000000000
--- a/resources/3rdparty/eigen/doc/I10_Assertions.dox
+++ /dev/null
@@ -1,114 +0,0 @@
-namespace Eigen {
-
-/** \page TopicAssertions Assertions
-
-\b Table \b of \b contents
-  - \ref PlainAssert
-    - \ref RedefineAssert
-    - \ref DisableAssert
-  - \ref StaticAssert
-    - \ref DerivedStaticAssert
-    - \ref DisableStaticAssert
-
-\section PlainAssert Assertions
-
-The macro eigen_assert is defined to be \c eigen_plain_assert by default. We use eigen_plain_assert instead of \c assert to work around a known bug for GCC <= 4.3. Basically, eigen_plain_assert \a is \c assert.
-
-\subsection RedefineAssert Redefining assertions
-
-Both eigen_assert and eigen_plain_assert are defined in Macros.h. Defining eigen_assert indirectly gives you a chance to change its behavior. You can redefine this macro if you want to do something else such as throwing an exception, and fall back to its default behavior with eigen_plain_assert. The code below tells Eigen to throw an std::runtime_error:
-
-\code
-#include <stdexcept>
-#undef eigen_assert
-#define eigen_assert(x) \
-  if (!x) { throw (std::runtime_error("Put your message here")); }
-\endcode
-
-\subsection DisableAssert Disabling assertions
-
-Assertions cost run time and can be turned off. You can suppress eigen_assert by defining \c EIGEN_NO_DEBUG \b before including Eigen headers. \c EIGEN_NO_DEBUG is undefined by default unless \c NDEBUG is defined.
-
-\section StaticAssert Static assertions
-
-Static assertions are not standardized until C++11. However, in the Eigen library, there are many conditions can and should be detectedat compile time. For instance, we use static assertions to prevent the code below from compiling.
-
-\code
-Matrix3d()  + Matrix4d();   // adding matrices of different sizes
-Matrix4cd() * Vector3cd();  // invalid product known at compile time
-\endcode
-
-Static assertions are defined in StaticAssert.h. If there is native static_assert, we use it. Otherwise, we have implemented an assertion macro that can show a limited range of messages.
-
-One can easily come up with static assertions without messages, such as:
-
-\code
-#define STATIC_ASSERT(x) \
-  switch(0) { case 0: case x:; }
-\endcode
-
-However, the example above obviously cannot tell why the assertion failed. Therefore, we define a \c struct in namespace Eigen::internal to handle available messages.
-
-\code
-template<bool condition>
-struct static_assertion {};
-
-template<>
-struct static_assertion<true>
-{
-  enum {
-    YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX,
-    YOU_MIXED_VECTORS_OF_DIFFERENT_SIZES,
-    // see StaticAssert.h for all enums.
-  };
-};
-\endcode
-
-And then, we define EIGEN_STATIC_ASSERT(CONDITION,MSG) to access Eigen::internal::static_assertion<bool(CONDITION)>::MSG. If the condition evaluates into \c false, your compiler displays a lot of messages explaining there is no MSG in static_assert<false>. Nevertheless, this is \a not in what we are interested. As you can see, all members of static_assert<true> are ALL_CAPS_AND_THEY_ARE_SHOUTING.
-
-\warning
-When using this macro, MSG should be a member of static_assertion<true>, or the static assertion \b always fails.
-Currently, it can only be used in function scope.
-
-\subsection DerivedStaticAssert Derived static assertions
-
-There are other macros derived from EIGEN_STATIC_ASSERT to enhance readability. Their names are self-explanatory.
-
-- \b EIGEN_STATIC_ASSERT_FIXED_SIZE(TYPE) - passes if \a TYPE is fixed size.
-- \b EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(TYPE) - passes if \a TYPE is dynamic size.
-- \b EIGEN_STATIC_ASSERT_LVALUE(Derived) - failes if \a Derived is read-only.
-- \b EIGEN_STATIC_ASSERT_ARRAYXPR(Derived) - passes if \a Derived is an array expression.
-- <b>EIGEN_STATIC_ASSERT_SAME_XPR_KIND(Derived1, Derived2)</b> - failes if the two expressions are an array one and a matrix one.
-
-Because Eigen handles both fixed-size and dynamic-size expressions, some conditions cannot be clearly determined at compile time. We classify them into strict assertions and permissive assertions.
-
-\subsubsection StrictAssertions Strict assertions
-
-These assertions fail if the condition <b>may not</b> be met. For example, MatrixXd may not be a vector, so it fails EIGEN_STATIC_ASSERT_VECTOR_ONLY.
-
-- \b EIGEN_STATIC_ASSERT_VECTOR_ONLY(TYPE) - passes if \a TYPE must be a vector type.
-- <b>EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(TYPE, SIZE)</b> - passes if \a TYPE must be a vector of the given size.
-- <b>EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(TYPE, ROWS, COLS)</b> - passes if \a TYPE must be a matrix with given rows and columns.
-
-\subsubsection PermissiveAssertions Permissive assertions
-
-These assertions fail if the condition \b cannot be met. For example, MatrixXd and Matrix4d may have the same size, so they pass EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE.
-
-- \b EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(TYPE0,TYPE1) - fails if the two vector expression types must have different sizes.
-- \b EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(TYPE0,TYPE1) - fails if the two matrix expression types must have different sizes.
-- \b EIGEN_STATIC_ASSERT_SIZE_1x1(TYPE) - fails if \a TYPE cannot be an 1x1 expression.
-
-See StaticAssert.h for details such as what messages they throw.
-
-\subsection DisableStaticAssert Disabling static assertions
-
-If \c EIGEN_NO_STATIC_ASSERT is defined, static assertions turn into <tt>eigen_assert</tt>'s, working like:
-
-\code
-#define EIGEN_STATIC_ASSERT(CONDITION,MSG) eigen_assert((CONDITION) && #MSG);
-\endcode
-
-This saves compile time but consumes more run time. \c EIGEN_NO_STATIC_ASSERT is undefined by default.
-
-*/
-}
diff --git a/resources/3rdparty/eigen/scripts/eigen_gen_docs b/resources/3rdparty/eigen/scripts/eigen_gen_docs
deleted file mode 100644
index 921d600ed..000000000
--- a/resources/3rdparty/eigen/scripts/eigen_gen_docs
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/sh
-
-# configuration
-# You should call this script with USER set as you want, else some default
-# will be used
-USER=${USER:-'orzel'}
-
-#ulimit -v 1024000
-
-# step 1 : build
-mkdir build -p
-(cd build && cmake .. && make doc) || { echo "make failed"; exit 1; }
-
-#step 2 : upload
-# (the '/' at the end of path is very important, see rsync documentation)
-rsync -az --no-p --delete build/doc/html/ $USER@ssh.tuxfamily.org:eigen/eigen.tuxfamily.org-web/htdocs/dox-devel/ || { echo "upload failed"; exit 1; }
-
-#step 3 : fix the perm
-ssh $USER@ssh.tuxfamily.org 'chmod -R g+w /home/eigen/eigen.tuxfamily.org-web/htdocs/dox-devel' || { echo "perm failed"; exit 1; }
-
-echo "Uploaded successfully"
-
diff --git a/resources/3rdparty/eigen/test/CMakeLists.txt b/resources/3rdparty/eigen/test/CMakeLists.txt
deleted file mode 100644
index cbea4dd0a..000000000
--- a/resources/3rdparty/eigen/test/CMakeLists.txt
+++ /dev/null
@@ -1,246 +0,0 @@
-
-# generate split test header file
-message(STATUS ${CMAKE_CURRENT_BINARY_DIR})
-file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h "")
-foreach(i RANGE 1 999)
-  file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h
-    "#ifdef EIGEN_TEST_PART_${i}\n"
-    "#define CALL_SUBTEST_${i}(FUNC) CALL_SUBTEST(FUNC)\n"
-    "#else\n"
-    "#define CALL_SUBTEST_${i}(FUNC)\n"
-    "#endif\n\n"
-    )
-endforeach()
-
-# configure blas/lapack (use Eigen's ones)
-set(BLAS_FOUND TRUE)
-set(LAPACK_FOUND TRUE)
-set(BLAS_LIBRARIES eigen_blas)
-set(LAPACK_LIBRARIES eigen_lapack)
-
-set(EIGEN_TEST_MATRIX_DIR "" CACHE STRING "Enable testing of realword sparse matrices contained in the specified path")
-if(EIGEN_TEST_MATRIX_DIR)
-  if(NOT WIN32)
-    message(STATUS "Test realworld sparse matrices: ${EIGEN_TEST_MATRIX_DIR}")
-    add_definitions( -DTEST_REAL_CASES="${EIGEN_TEST_MATRIX_DIR}" )
-  else(NOT WIN32)
-    message(STATUS "REAL CASES CAN NOT BE CURRENTLY TESTED ON WIN32")
-  endif(NOT WIN32)
-endif(EIGEN_TEST_MATRIX_DIR)
-
-set(SPARSE_LIBS " ")
-
-find_package(Cholmod)
-if(CHOLMOD_FOUND AND BLAS_FOUND AND LAPACK_FOUND)
-  add_definitions("-DEIGEN_CHOLMOD_SUPPORT")
-  include_directories(${CHOLMOD_INCLUDES})
-  set(SPARSE_LIBS ${SPARSE_LIBS} ${CHOLMOD_LIBRARIES} ${BLAS_LIBRARIES} ${LAPACK_LIBRARIES})
-  set(CHOLMOD_ALL_LIBS  ${CHOLMOD_LIBRARIES} ${BLAS_LIBRARIES} ${LAPACK_LIBRARIES})
-  ei_add_property(EIGEN_TESTED_BACKENDS "Cholmod, ")
-else()
-  ei_add_property(EIGEN_MISSING_BACKENDS "Cholmod, ")
-endif()
-
-find_package(Umfpack)
-if(UMFPACK_FOUND AND BLAS_FOUND)
-  add_definitions("-DEIGEN_UMFPACK_SUPPORT")
-  include_directories(${UMFPACK_INCLUDES})
-  set(SPARSE_LIBS ${SPARSE_LIBS} ${UMFPACK_LIBRARIES} ${BLAS_LIBRARIES})
-  set(UMFPACK_ALL_LIBS ${UMFPACK_LIBRARIES} ${BLAS_LIBRARIES})
-  ei_add_property(EIGEN_TESTED_BACKENDS "UmfPack, ")
-else()
-  ei_add_property(EIGEN_MISSING_BACKENDS "UmfPack, ")
-endif()
-
-find_package(SuperLU)
-if(SUPERLU_FOUND AND BLAS_FOUND)
-  add_definitions("-DEIGEN_SUPERLU_SUPPORT")
-  include_directories(${SUPERLU_INCLUDES})
-  set(SPARSE_LIBS ${SPARSE_LIBS} ${SUPERLU_LIBRARIES} ${BLAS_LIBRARIES})
-  set(SUPERLU_ALL_LIBS ${SUPERLU_LIBRARIES} ${BLAS_LIBRARIES})
-  ei_add_property(EIGEN_TESTED_BACKENDS  "SuperLU, ")
-else()
-  ei_add_property(EIGEN_MISSING_BACKENDS  "SuperLU, ")
-endif()
-
-
-find_package(Pastix)
-find_package(Scotch)
-find_package(Metis)
-if(PASTIX_FOUND AND BLAS_FOUND)
-  add_definitions("-DEIGEN_PASTIX_SUPPORT")
-  include_directories(${PASTIX_INCLUDES})
-  if(SCOTCH_FOUND)
-    include_directories(${SCOTCH_INCLUDES})
-    set(PASTIX_LIBRARIES ${PASTIX_LIBRARIES} ${SCOTCH_LIBRARIES})
-  elseif(METIS_FOUND)
-    include_directories(${METIS_INCLUDES})
-    set(PASTIX_LIBRARIES ${PASTIX_LIBRARIES} ${METIS_LIBRARIES})
-  else(SCOTCH_FOUND)
-    ei_add_property(EIGEN_MISSING_BACKENDS  "PaStiX, ")
-  endif(SCOTCH_FOUND)
-  set(SPARSE_LIBS ${SPARSE_LIBS} ${PASTIX_LIBRARIES} ${ORDERING_LIBRARIES} ${BLAS_LIBRARIES})
-  set(PASTIX_ALL_LIBS ${PASTIX_LIBRARIES} ${BLAS_LIBRARIES})
-  ei_add_property(EIGEN_TESTED_BACKENDS  "PaStiX, ")
-else()
-  ei_add_property(EIGEN_MISSING_BACKENDS  "PaStiX, ")
-endif()
-
-option(EIGEN_TEST_NOQT "Disable Qt support in unit tests" OFF)
-if(NOT EIGEN_TEST_NOQT)
-  find_package(Qt4)
-  if(QT4_FOUND)
-    include(${QT_USE_FILE})
-    ei_add_property(EIGEN_TESTED_BACKENDS  "Qt4 support, ")
-  else()
-    ei_add_property(EIGEN_MISSING_BACKENDS  "Qt4 support, ")
-  endif()
-endif(NOT EIGEN_TEST_NOQT)
-
-if(TEST_LIB)
-  add_definitions("-DEIGEN_EXTERN_INSTANTIATIONS=1")
-endif(TEST_LIB)
-
-ei_add_test(meta)
-ei_add_test(sizeof)
-ei_add_test(dynalloc)
-ei_add_test(nomalloc)
-ei_add_test(first_aligned)
-ei_add_test(mixingtypes)
-ei_add_test(packetmath)
-ei_add_test(unalignedassert)
-ei_add_test(vectorization_logic)
-ei_add_test(basicstuff)
-ei_add_test(linearstructure)
-ei_add_test(integer_types)
-ei_add_test(cwiseop)
-ei_add_test(unalignedcount)
-ei_add_test(exceptions)
-ei_add_test(redux)
-ei_add_test(visitor)
-ei_add_test(block)
-ei_add_test(corners)
-ei_add_test(product_small)
-ei_add_test(product_large)
-ei_add_test(product_extra)
-ei_add_test(diagonalmatrices)
-ei_add_test(adjoint)
-ei_add_test(diagonal)
-ei_add_test(miscmatrices)
-ei_add_test(commainitializer)
-ei_add_test(smallvectors)
-ei_add_test(map)
-ei_add_test(mapstride)
-ei_add_test(mapstaticmethods)
-ei_add_test(array)
-ei_add_test(array_for_matrix)
-ei_add_test(array_replicate)
-ei_add_test(array_reverse)
-ei_add_test(triangular)
-ei_add_test(selfadjoint)
-ei_add_test(product_selfadjoint)
-ei_add_test(product_symm)
-ei_add_test(product_syrk)
-ei_add_test(product_trmv)
-ei_add_test(product_trmm)
-ei_add_test(product_trsolve)
-ei_add_test(product_mmtr)
-ei_add_test(product_notemporary)
-ei_add_test(stable_norm)
-ei_add_test(bandmatrix)
-ei_add_test(cholesky)
-ei_add_test(lu)
-ei_add_test(determinant)
-ei_add_test(inverse)
-ei_add_test(qr)
-ei_add_test(qr_colpivoting)
-ei_add_test(qr_fullpivoting)
-ei_add_test(upperbidiagonalization)
-ei_add_test(hessenberg)
-ei_add_test(schur_real)
-ei_add_test(schur_complex)
-ei_add_test(eigensolver_selfadjoint)
-ei_add_test(eigensolver_generic)
-ei_add_test(eigensolver_complex)
-ei_add_test(real_qz)
-ei_add_test(eigensolver_generalized_real)
-ei_add_test(jacobi)
-ei_add_test(jacobisvd)
-ei_add_test(geo_orthomethods)
-ei_add_test(geo_homogeneous)
-ei_add_test(geo_quaternion)
-ei_add_test(geo_transformations)
-ei_add_test(geo_eulerangles)
-ei_add_test(geo_hyperplane)
-ei_add_test(geo_parametrizedline)
-ei_add_test(geo_alignedbox)
-ei_add_test(stdvector)
-ei_add_test(stdvector_overload)
-ei_add_test(stdlist)
-ei_add_test(stddeque)
-ei_add_test(resize)
-if(QT4_FOUND)
-  ei_add_test(qtvector "" "${QT_QTCORE_LIBRARY}")
-endif(QT4_FOUND)
-ei_add_test(sparse_vector)
-ei_add_test(sparse_basic)
-ei_add_test(sparse_product)
-ei_add_test(sparse_solvers)
-ei_add_test(umeyama)
-ei_add_test(householder)
-ei_add_test(swap)
-ei_add_test(conservative_resize)
-ei_add_test(permutationmatrices)
-ei_add_test(sparse_permutations)
-ei_add_test(eigen2support)
-ei_add_test(nullary)
-ei_add_test(nesting_ops "${CMAKE_CXX_FLAGS_DEBUG}")
-ei_add_test(zerosized)
-ei_add_test(dontalign)
-ei_add_test(evaluators)
-ei_add_test(sizeoverflow)
-ei_add_test(prec_inverse_4x4)
-ei_add_test(vectorwiseop)
-
-ei_add_test(simplicial_cholesky)
-ei_add_test(conjugate_gradient)
-ei_add_test(bicgstab)
-ei_add_test(sparselu)
-
-if(UMFPACK_FOUND)
-  ei_add_test(umfpack_support "" "${UMFPACK_ALL_LIBS}")
-endif()
-
-if(SUPERLU_FOUND)
-  ei_add_test(superlu_support "" "${SUPERLU_ALL_LIBS}")
-endif()
-
-if(CHOLMOD_FOUND)
-  ei_add_test(cholmod_support "" "${CHOLMOD_ALL_LIBS}")
-endif()
-
-if(PARDISO_FOUND)
-  ei_add_test(pardiso_support "" "${PARDISO_ALL_LIBS}")
-endif()
-
-if(PASTIX_FOUND AND (SCOTCH_FOUND OR METIS_FOUND))
-  ei_add_test(pastix_support "" "${PASTIX_ALL_LIBS}")
-endif()
-
-string(TOLOWER "${CMAKE_CXX_COMPILER}" cmake_cxx_compiler_tolower)
-if(cmake_cxx_compiler_tolower MATCHES "qcc")
-  set(CXX_IS_QCC "ON")
-endif()
-
-ei_add_property(EIGEN_TESTING_SUMMARY "CXX:               ${CMAKE_CXX_COMPILER}\n")
-if(CMAKE_COMPILER_IS_GNUCXX AND NOT CXX_IS_QCC)
-  execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version COMMAND head -n 1 OUTPUT_VARIABLE EIGEN_CXX_VERSION_STRING OUTPUT_STRIP_TRAILING_WHITESPACE)
-  ei_add_property(EIGEN_TESTING_SUMMARY "CXX_VERSION:       ${EIGEN_CXX_VERSION_STRING}\n")
-endif()
-ei_add_property(EIGEN_TESTING_SUMMARY "CXX_FLAGS:         ${CMAKE_CXX_FLAGS}\n")
-ei_add_property(EIGEN_TESTING_SUMMARY "Sparse lib flags:  ${SPARSE_LIBS}\n")
-
-option(EIGEN_TEST_EIGEN2 "Run whole Eigen2 test suite against EIGEN2_SUPPORT" OFF)
-if(EIGEN_TEST_EIGEN2)
-  add_subdirectory(eigen2)
-endif()
diff --git a/resources/3rdparty/eigen/test/cholesky.cpp b/resources/3rdparty/eigen/test/cholesky.cpp
deleted file mode 100644
index 49c79f9c8..000000000
--- a/resources/3rdparty/eigen/test/cholesky.cpp
+++ /dev/null
@@ -1,324 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_NO_ASSERTION_CHECKING
-#define EIGEN_NO_ASSERTION_CHECKING
-#endif
-
-static int nb_temporaries;
-
-#define EIGEN_DENSE_STORAGE_CTOR_PLUGIN { if(size!=0) nb_temporaries++; }
-
-#include "main.h"
-#include <Eigen/Cholesky>
-#include <Eigen/QR>
-
-#define VERIFY_EVALUATION_COUNT(XPR,N) {\
-    nb_temporaries = 0; \
-    XPR; \
-    if(nb_temporaries!=N) std::cerr << "nb_temporaries == " << nb_temporaries << "\n"; \
-    VERIFY( (#XPR) && nb_temporaries==N ); \
-  }
-
-template<typename MatrixType,template <typename,int> class CholType> void test_chol_update(const MatrixType& symm)
-{
-  typedef typename MatrixType::Scalar Scalar;
-  typedef typename MatrixType::RealScalar RealScalar;
-  typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
-
-  MatrixType symmLo = symm.template triangularView<Lower>();
-  MatrixType symmUp = symm.template triangularView<Upper>();
-  MatrixType symmCpy = symm;
-
-  CholType<MatrixType,Lower> chollo(symmLo);
-  CholType<MatrixType,Upper> cholup(symmUp);
-
-  for (int k=0; k<10; ++k)
-  {
-    VectorType vec = VectorType::Random(symm.rows());
-    RealScalar sigma = internal::random<RealScalar>();
-    symmCpy += sigma * vec * vec.adjoint();
-
-    // we are doing some downdates, so it might be the case that the matrix is not SPD anymore
-    CholType<MatrixType,Lower> chol(symmCpy);
-    if(chol.info()!=Success)
-      break;
-
-    chollo.rankUpdate(vec, sigma);
-    VERIFY_IS_APPROX(symmCpy, chollo.reconstructedMatrix());
-
-    cholup.rankUpdate(vec, sigma);
-    VERIFY_IS_APPROX(symmCpy, cholup.reconstructedMatrix());
-  }
-}
-
-template<typename MatrixType> void cholesky(const MatrixType& m)
-{
-  typedef typename MatrixType::Index Index;
-  /* this test covers the following files:
-     LLT.h LDLT.h
-  */
-  Index rows = m.rows();
-  Index cols = m.cols();
-
-  typedef typename MatrixType::Scalar Scalar;
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime> SquareMatrixType;
-  typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
-
-  MatrixType a0 = MatrixType::Random(rows,cols);
-  VectorType vecB = VectorType::Random(rows), vecX(rows);
-  MatrixType matB = MatrixType::Random(rows,cols), matX(rows,cols);
-  SquareMatrixType symm =  a0 * a0.adjoint();
-  // let's make sure the matrix is not singular or near singular
-  for (int k=0; k<3; ++k)
-  {
-    MatrixType a1 = MatrixType::Random(rows,cols);
-    symm += a1 * a1.adjoint();
-  }
-
-  SquareMatrixType symmUp = symm.template triangularView<Upper>();
-  SquareMatrixType symmLo = symm.template triangularView<Lower>();
-
-  // to test if really Cholesky only uses the upper triangular part, uncomment the following
-  // FIXME: currently that fails !!
-  //symm.template part<StrictlyLower>().setZero();
-
-  {
-    LLT<SquareMatrixType,Lower> chollo(symmLo);
-    VERIFY_IS_APPROX(symm, chollo.reconstructedMatrix());
-    vecX = chollo.solve(vecB);
-    VERIFY_IS_APPROX(symm * vecX, vecB);
-    matX = chollo.solve(matB);
-    VERIFY_IS_APPROX(symm * matX, matB);
-
-    // test the upper mode
-    LLT<SquareMatrixType,Upper> cholup(symmUp);
-    VERIFY_IS_APPROX(symm, cholup.reconstructedMatrix());
-    vecX = cholup.solve(vecB);
-    VERIFY_IS_APPROX(symm * vecX, vecB);
-    matX = cholup.solve(matB);
-    VERIFY_IS_APPROX(symm * matX, matB);
-
-    MatrixType neg = -symmLo;
-    chollo.compute(neg);
-    VERIFY(chollo.info()==NumericalIssue);
-
-    VERIFY_IS_APPROX(MatrixType(chollo.matrixL().transpose().conjugate()), MatrixType(chollo.matrixU()));
-    VERIFY_IS_APPROX(MatrixType(chollo.matrixU().transpose().conjugate()), MatrixType(chollo.matrixL()));
-    VERIFY_IS_APPROX(MatrixType(cholup.matrixL().transpose().conjugate()), MatrixType(cholup.matrixU()));
-    VERIFY_IS_APPROX(MatrixType(cholup.matrixU().transpose().conjugate()), MatrixType(cholup.matrixL()));
-  }
-
-  // LDLT
-  {
-    int sign = internal::random<int>()%2 ? 1 : -1;
-
-    if(sign == -1)
-    {
-      symm = -symm; // test a negative matrix
-    }
-
-    SquareMatrixType symmUp = symm.template triangularView<Upper>();
-    SquareMatrixType symmLo = symm.template triangularView<Lower>();
-
-    LDLT<SquareMatrixType,Lower> ldltlo(symmLo);
-    VERIFY_IS_APPROX(symm, ldltlo.reconstructedMatrix());
-    vecX = ldltlo.solve(vecB);
-    VERIFY_IS_APPROX(symm * vecX, vecB);
-    matX = ldltlo.solve(matB);
-    VERIFY_IS_APPROX(symm * matX, matB);
-
-    LDLT<SquareMatrixType,Upper> ldltup(symmUp);
-    VERIFY_IS_APPROX(symm, ldltup.reconstructedMatrix());
-    vecX = ldltup.solve(vecB);
-    VERIFY_IS_APPROX(symm * vecX, vecB);
-    matX = ldltup.solve(matB);
-    VERIFY_IS_APPROX(symm * matX, matB);
-
-    VERIFY_IS_APPROX(MatrixType(ldltlo.matrixL().transpose().conjugate()), MatrixType(ldltlo.matrixU()));
-    VERIFY_IS_APPROX(MatrixType(ldltlo.matrixU().transpose().conjugate()), MatrixType(ldltlo.matrixL()));
-    VERIFY_IS_APPROX(MatrixType(ldltup.matrixL().transpose().conjugate()), MatrixType(ldltup.matrixU()));
-    VERIFY_IS_APPROX(MatrixType(ldltup.matrixU().transpose().conjugate()), MatrixType(ldltup.matrixL()));
-
-    if(MatrixType::RowsAtCompileTime==Dynamic)
-    {
-      // note : each inplace permutation requires a small temporary vector (mask)
-
-      // check inplace solve
-      matX = matB;
-      VERIFY_EVALUATION_COUNT(matX = ldltlo.solve(matX), 0);
-      VERIFY_IS_APPROX(matX, ldltlo.solve(matB).eval());
-
-
-      matX = matB;
-      VERIFY_EVALUATION_COUNT(matX = ldltup.solve(matX), 0);
-      VERIFY_IS_APPROX(matX, ldltup.solve(matB).eval());
-    }
-
-    // restore
-    if(sign == -1)
-      symm = -symm;
-  }
-
-  // test some special use cases of SelfCwiseBinaryOp:
-  MatrixType m1 = MatrixType::Random(rows,cols), m2(rows,cols);
-  m2 = m1;
-  m2 += symmLo.template selfadjointView<Lower>().llt().solve(matB);
-  VERIFY_IS_APPROX(m2, m1 + symmLo.template selfadjointView<Lower>().llt().solve(matB));
-  m2 = m1;
-  m2 -= symmLo.template selfadjointView<Lower>().llt().solve(matB);
-  VERIFY_IS_APPROX(m2, m1 - symmLo.template selfadjointView<Lower>().llt().solve(matB));
-  m2 = m1;
-  m2.noalias() += symmLo.template selfadjointView<Lower>().llt().solve(matB);
-  VERIFY_IS_APPROX(m2, m1 + symmLo.template selfadjointView<Lower>().llt().solve(matB));
-  m2 = m1;
-  m2.noalias() -= symmLo.template selfadjointView<Lower>().llt().solve(matB);
-  VERIFY_IS_APPROX(m2, m1 - symmLo.template selfadjointView<Lower>().llt().solve(matB));
-
-  // update/downdate
-  CALL_SUBTEST(( test_chol_update<SquareMatrixType,LLT>(symm)  ));
-  CALL_SUBTEST(( test_chol_update<SquareMatrixType,LDLT>(symm) ));
-}
-
-template<typename MatrixType> void cholesky_cplx(const MatrixType& m)
-{
-  // classic test
-  cholesky(m);
-
-  // test mixing real/scalar types
-
-  typedef typename MatrixType::Index Index;
-
-  Index rows = m.rows();
-  Index cols = m.cols();
-
-  typedef typename MatrixType::Scalar Scalar;
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  typedef Matrix<RealScalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime> RealMatrixType;
-  typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
-
-  RealMatrixType a0 = RealMatrixType::Random(rows,cols);
-  VectorType vecB = VectorType::Random(rows), vecX(rows);
-  MatrixType matB = MatrixType::Random(rows,cols), matX(rows,cols);
-  RealMatrixType symm =  a0 * a0.adjoint();
-  // let's make sure the matrix is not singular or near singular
-  for (int k=0; k<3; ++k)
-  {
-    RealMatrixType a1 = RealMatrixType::Random(rows,cols);
-    symm += a1 * a1.adjoint();
-  }
-
-  {
-    RealMatrixType symmLo = symm.template triangularView<Lower>();
-
-    LLT<RealMatrixType,Lower> chollo(symmLo);
-    VERIFY_IS_APPROX(symm, chollo.reconstructedMatrix());
-    vecX = chollo.solve(vecB);
-    VERIFY_IS_APPROX(symm * vecX, vecB);
-//     matX = chollo.solve(matB);
-//     VERIFY_IS_APPROX(symm * matX, matB);
-  }
-
-  // LDLT
-  {
-    int sign = internal::random<int>()%2 ? 1 : -1;
-
-    if(sign == -1)
-    {
-      symm = -symm; // test a negative matrix
-    }
-
-    RealMatrixType symmLo = symm.template triangularView<Lower>();
-
-    LDLT<RealMatrixType,Lower> ldltlo(symmLo);
-    VERIFY_IS_APPROX(symm, ldltlo.reconstructedMatrix());
-    vecX = ldltlo.solve(vecB);
-    VERIFY_IS_APPROX(symm * vecX, vecB);
-//     matX = ldltlo.solve(matB);
-//     VERIFY_IS_APPROX(symm * matX, matB);
-  }
-}
-
-// regression test for bug 241
-template<typename MatrixType> void cholesky_bug241(const MatrixType& m)
-{
-  eigen_assert(m.rows() == 2 && m.cols() == 2);
-
-  typedef typename MatrixType::Scalar Scalar;
-  typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
-
-  MatrixType matA;
-  matA << 1, 1, 1, 1;
-  VectorType vecB;
-  vecB << 1, 1;
-  VectorType vecX = matA.ldlt().solve(vecB);
-  VERIFY_IS_APPROX(matA * vecX, vecB);
-}
-
-// LDLT is not guaranteed to work for indefinite matrices, but happens to work fine if matrix is diagonal.
-// This test checks that LDLT reports correctly that matrix is indefinite. 
-// See http://forum.kde.org/viewtopic.php?f=74&t=106942
-template<typename MatrixType> void cholesky_indefinite(const MatrixType& m)
-{
-  eigen_assert(m.rows() == 2 && m.cols() == 2);
-  MatrixType mat;
-  mat << 1, 0, 0, -1;
-  LDLT<MatrixType> ldlt(mat);
-  VERIFY(!ldlt.isNegative());
-  VERIFY(!ldlt.isPositive());
-}
-
-template<typename MatrixType> void cholesky_verify_assert()
-{
-  MatrixType tmp;
-
-  LLT<MatrixType> llt;
-  VERIFY_RAISES_ASSERT(llt.matrixL())
-  VERIFY_RAISES_ASSERT(llt.matrixU())
-  VERIFY_RAISES_ASSERT(llt.solve(tmp))
-  VERIFY_RAISES_ASSERT(llt.solveInPlace(&tmp))
-
-  LDLT<MatrixType> ldlt;
-  VERIFY_RAISES_ASSERT(ldlt.matrixL())
-  VERIFY_RAISES_ASSERT(ldlt.permutationP())
-  VERIFY_RAISES_ASSERT(ldlt.vectorD())
-  VERIFY_RAISES_ASSERT(ldlt.isPositive())
-  VERIFY_RAISES_ASSERT(ldlt.isNegative())
-  VERIFY_RAISES_ASSERT(ldlt.solve(tmp))
-  VERIFY_RAISES_ASSERT(ldlt.solveInPlace(&tmp))
-}
-
-void test_cholesky()
-{
-  int s;
-  for(int i = 0; i < g_repeat; i++) {
-    CALL_SUBTEST_1( cholesky(Matrix<double,1,1>()) );
-    CALL_SUBTEST_3( cholesky(Matrix2d()) );
-    CALL_SUBTEST_3( cholesky_bug241(Matrix2d()) );
-    CALL_SUBTEST_3( cholesky_indefinite(Matrix2d()) );
-    CALL_SUBTEST_4( cholesky(Matrix3f()) );
-    CALL_SUBTEST_5( cholesky(Matrix4d()) );
-    s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE);
-    CALL_SUBTEST_2( cholesky(MatrixXd(s,s)) );
-    s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2);
-    CALL_SUBTEST_6( cholesky_cplx(MatrixXcd(s,s)) );
-  }
-
-  CALL_SUBTEST_4( cholesky_verify_assert<Matrix3f>() );
-  CALL_SUBTEST_7( cholesky_verify_assert<Matrix3d>() );
-  CALL_SUBTEST_8( cholesky_verify_assert<MatrixXf>() );
-  CALL_SUBTEST_2( cholesky_verify_assert<MatrixXd>() );
-
-  // Test problem size constructors
-  CALL_SUBTEST_9( LLT<MatrixXf>(10) );
-  CALL_SUBTEST_9( LDLT<MatrixXf>(10) );
-  
-  EIGEN_UNUSED_VARIABLE(s)
-}
diff --git a/resources/3rdparty/eigen/test/diagonal.cpp b/resources/3rdparty/eigen/test/diagonal.cpp
deleted file mode 100644
index 0f09a9dfe..000000000
--- a/resources/3rdparty/eigen/test/diagonal.cpp
+++ /dev/null
@@ -1,80 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#include "main.h"
-
-template<typename MatrixType> void diagonal(const MatrixType& m)
-{
-  typedef typename MatrixType::Index Index;
-  typedef typename MatrixType::Scalar Scalar;
-  typedef typename MatrixType::RealScalar RealScalar;
-  typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
-  typedef Matrix<Scalar, 1, MatrixType::ColsAtCompileTime> RowVectorType;
-
-  Index rows = m.rows();
-  Index cols = m.cols();
-
-  MatrixType m1 = MatrixType::Random(rows, cols),
-             m2 = MatrixType::Random(rows, cols);
-
-  //check diagonal()
-  VERIFY_IS_APPROX(m1.diagonal(), m1.transpose().diagonal());
-  m2.diagonal() = 2 * m1.diagonal();
-  m2.diagonal()[0] *= 3;
-
-  if (rows>2)
-  {
-    enum {
-      N1 = MatrixType::RowsAtCompileTime>2 ?  2 : 0,
-      N2 = MatrixType::RowsAtCompileTime>1 ? -1 : 0
-    };
-
-    // check sub/super diagonal
-    if(MatrixType::SizeAtCompileTime!=Dynamic)
-    {
-      VERIFY(m1.template diagonal<N1>().RowsAtCompileTime == m1.diagonal(N1).size());
-      VERIFY(m1.template diagonal<N2>().RowsAtCompileTime == m1.diagonal(N2).size());
-    }
-
-    m2.template diagonal<N1>() = 2 * m1.template diagonal<N1>();
-    VERIFY_IS_APPROX(m2.template diagonal<N1>(), static_cast<Scalar>(2) * m1.diagonal(N1));
-    m2.template diagonal<N1>()[0] *= 3;
-    VERIFY_IS_APPROX(m2.template diagonal<N1>()[0], static_cast<Scalar>(6) * m1.template diagonal<N1>()[0]);
-
-
-    m2.template diagonal<N2>() = 2 * m1.template diagonal<N2>();
-    m2.template diagonal<N2>()[0] *= 3;
-    VERIFY_IS_APPROX(m2.template diagonal<N2>()[0], static_cast<Scalar>(6) * m1.template diagonal<N2>()[0]);
-
-    m2.diagonal(N1) = 2 * m1.diagonal(N1);
-    VERIFY_IS_APPROX(m2.diagonal<N1>(), static_cast<Scalar>(2) * m1.diagonal(N1));
-    m2.diagonal(N1)[0] *= 3;
-    VERIFY_IS_APPROX(m2.diagonal(N1)[0], static_cast<Scalar>(6) * m1.diagonal(N1)[0]);
-
-    m2.diagonal(N2) = 2 * m1.diagonal(N2);
-    VERIFY_IS_APPROX(m2.diagonal<N2>(), static_cast<Scalar>(2) * m1.diagonal(N2));
-    m2.diagonal(N2)[0] *= 3;
-    VERIFY_IS_APPROX(m2.diagonal(N2)[0], static_cast<Scalar>(6) * m1.diagonal(N2)[0]);
-  }
-}
-
-void test_diagonal()
-{
-  for(int i = 0; i < g_repeat; i++) {
-    CALL_SUBTEST_1( diagonal(Matrix<float, 1, 1>()) );
-    CALL_SUBTEST_1( diagonal(Matrix<float, 4, 9>()) );
-    CALL_SUBTEST_1( diagonal(Matrix<float, 7, 3>()) );
-    CALL_SUBTEST_2( diagonal(Matrix4d()) );
-    CALL_SUBTEST_2( diagonal(MatrixXcf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
-    CALL_SUBTEST_2( diagonal(MatrixXi(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
-    CALL_SUBTEST_2( diagonal(MatrixXcd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
-    CALL_SUBTEST_1( diagonal(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
-    CALL_SUBTEST_1( diagonal(Matrix<float,Dynamic,4>(3, 4)) );
-  }
-}
diff --git a/resources/3rdparty/eigen/test/eigensolver_complex.cpp b/resources/3rdparty/eigen/test/eigensolver_complex.cpp
deleted file mode 100644
index aef125739..000000000
--- a/resources/3rdparty/eigen/test/eigensolver_complex.cpp
+++ /dev/null
@@ -1,126 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#include "main.h"
-#include <limits>
-#include <Eigen/Eigenvalues>
-#include <Eigen/LU>
-
-/* Check that two column vectors are approximately equal upto permutations,
-   by checking that the k-th power sums are equal for k = 1, ..., vec1.rows() */
-template<typename VectorType>
-void verify_is_approx_upto_permutation(const VectorType& vec1, const VectorType& vec2)
-{
-  typedef typename NumTraits<typename VectorType::Scalar>::Real RealScalar;
-
-  VERIFY(vec1.cols() == 1);
-  VERIFY(vec2.cols() == 1);
-  VERIFY(vec1.rows() == vec2.rows());
-  for (int k = 1; k <= vec1.rows(); ++k)
-  {
-    VERIFY_IS_APPROX(vec1.array().pow(RealScalar(k)).sum(), vec2.array().pow(RealScalar(k)).sum());
-  }
-}
-
-
-template<typename MatrixType> void eigensolver(const MatrixType& m)
-{
-  typedef typename MatrixType::Index Index;
-  /* this test covers the following files:
-     ComplexEigenSolver.h, and indirectly ComplexSchur.h
-  */
-  Index rows = m.rows();
-  Index cols = m.cols();
-
-  typedef typename MatrixType::Scalar Scalar;
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
-  typedef Matrix<RealScalar, MatrixType::RowsAtCompileTime, 1> RealVectorType;
-  typedef typename std::complex<typename NumTraits<typename MatrixType::Scalar>::Real> Complex;
-
-  MatrixType a = MatrixType::Random(rows,cols);
-  MatrixType symmA =  a.adjoint() * a;
-
-  ComplexEigenSolver<MatrixType> ei0(symmA);
-  VERIFY_IS_EQUAL(ei0.info(), Success);
-  VERIFY_IS_APPROX(symmA * ei0.eigenvectors(), ei0.eigenvectors() * ei0.eigenvalues().asDiagonal());
-
-  ComplexEigenSolver<MatrixType> ei1(a);
-  VERIFY_IS_EQUAL(ei1.info(), Success);
-  VERIFY_IS_APPROX(a * ei1.eigenvectors(), ei1.eigenvectors() * ei1.eigenvalues().asDiagonal());
-  // Note: If MatrixType is real then a.eigenvalues() uses EigenSolver and thus
-  // another algorithm so results may differ slightly
-  verify_is_approx_upto_permutation(a.eigenvalues(), ei1.eigenvalues());
-
-  ComplexEigenSolver<MatrixType> ei2;
-  ei2.setMaxIterations(ComplexSchur<MatrixType>::m_maxIterationsPerRow * rows).compute(a);
-  VERIFY_IS_EQUAL(ei2.info(), Success);
-  VERIFY_IS_EQUAL(ei2.eigenvectors(), ei1.eigenvectors());
-  VERIFY_IS_EQUAL(ei2.eigenvalues(), ei1.eigenvalues());
-  if (rows > 2) {
-    ei2.setMaxIterations(1).compute(a);
-    VERIFY_IS_EQUAL(ei2.info(), NoConvergence);
-    VERIFY_IS_EQUAL(ei2.getMaxIterations(), 1);
-  }
-
-  ComplexEigenSolver<MatrixType> eiNoEivecs(a, false);
-  VERIFY_IS_EQUAL(eiNoEivecs.info(), Success);
-  VERIFY_IS_APPROX(ei1.eigenvalues(), eiNoEivecs.eigenvalues());
-
-  // Regression test for issue #66
-  MatrixType z = MatrixType::Zero(rows,cols);
-  ComplexEigenSolver<MatrixType> eiz(z);
-  VERIFY((eiz.eigenvalues().cwiseEqual(0)).all());
-
-  MatrixType id = MatrixType::Identity(rows, cols);
-  VERIFY_IS_APPROX(id.operatorNorm(), RealScalar(1));
-
-  if (rows > 1)
-  {
-    // Test matrix with NaN
-    a(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
-    ComplexEigenSolver<MatrixType> eiNaN(a);
-    VERIFY_IS_EQUAL(eiNaN.info(), NoConvergence);
-  }
-}
-
-template<typename MatrixType> void eigensolver_verify_assert(const MatrixType& m)
-{
-  ComplexEigenSolver<MatrixType> eig;
-  VERIFY_RAISES_ASSERT(eig.eigenvectors());
-  VERIFY_RAISES_ASSERT(eig.eigenvalues());
-
-  MatrixType a = MatrixType::Random(m.rows(),m.cols());
-  eig.compute(a, false);
-  VERIFY_RAISES_ASSERT(eig.eigenvectors());
-}
-
-void test_eigensolver_complex()
-{
-  int s;
-  for(int i = 0; i < g_repeat; i++) {
-    CALL_SUBTEST_1( eigensolver(Matrix4cf()) );
-    s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4);
-    CALL_SUBTEST_2( eigensolver(MatrixXcd(s,s)) );
-    CALL_SUBTEST_3( eigensolver(Matrix<std::complex<float>, 1, 1>()) );
-    CALL_SUBTEST_4( eigensolver(Matrix3f()) );
-  }
-
-  CALL_SUBTEST_1( eigensolver_verify_assert(Matrix4cf()) );
-  s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4);
-  CALL_SUBTEST_2( eigensolver_verify_assert(MatrixXcd(s,s)) );
-  CALL_SUBTEST_3( eigensolver_verify_assert(Matrix<std::complex<float>, 1, 1>()) );
-  CALL_SUBTEST_4( eigensolver_verify_assert(Matrix3f()) );
-
-  // Test problem size constructors
-  CALL_SUBTEST_5(ComplexEigenSolver<MatrixXf>(s));
-  
-  EIGEN_UNUSED_VARIABLE(s)
-}
diff --git a/resources/3rdparty/eigen/test/eigensolver_generic.cpp b/resources/3rdparty/eigen/test/eigensolver_generic.cpp
deleted file mode 100644
index ef499a989..000000000
--- a/resources/3rdparty/eigen/test/eigensolver_generic.cpp
+++ /dev/null
@@ -1,126 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#include "main.h"
-#include <limits>
-#include <Eigen/Eigenvalues>
-
-template<typename MatrixType> void eigensolver(const MatrixType& m)
-{
-  typedef typename MatrixType::Index Index;
-  /* this test covers the following files:
-     EigenSolver.h
-  */
-  Index rows = m.rows();
-  Index cols = m.cols();
-
-  typedef typename MatrixType::Scalar Scalar;
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
-  typedef Matrix<RealScalar, MatrixType::RowsAtCompileTime, 1> RealVectorType;
-  typedef typename std::complex<typename NumTraits<typename MatrixType::Scalar>::Real> Complex;
-
-  MatrixType a = MatrixType::Random(rows,cols);
-  MatrixType a1 = MatrixType::Random(rows,cols);
-  MatrixType symmA =  a.adjoint() * a + a1.adjoint() * a1;
-
-  EigenSolver<MatrixType> ei0(symmA);
-  VERIFY_IS_EQUAL(ei0.info(), Success);
-  VERIFY_IS_APPROX(symmA * ei0.pseudoEigenvectors(), ei0.pseudoEigenvectors() * ei0.pseudoEigenvalueMatrix());
-  VERIFY_IS_APPROX((symmA.template cast<Complex>()) * (ei0.pseudoEigenvectors().template cast<Complex>()),
-    (ei0.pseudoEigenvectors().template cast<Complex>()) * (ei0.eigenvalues().asDiagonal()));
-
-  EigenSolver<MatrixType> ei1(a);
-  VERIFY_IS_EQUAL(ei1.info(), Success);
-  VERIFY_IS_APPROX(a * ei1.pseudoEigenvectors(), ei1.pseudoEigenvectors() * ei1.pseudoEigenvalueMatrix());
-  VERIFY_IS_APPROX(a.template cast<Complex>() * ei1.eigenvectors(),
-                   ei1.eigenvectors() * ei1.eigenvalues().asDiagonal());
-  VERIFY_IS_APPROX(ei1.eigenvectors().colwise().norm(), RealVectorType::Ones(rows).transpose());
-  VERIFY_IS_APPROX(a.eigenvalues(), ei1.eigenvalues());
-
-  EigenSolver<MatrixType> ei2;
-  ei2.setMaxIterations(RealSchur<MatrixType>::m_maxIterationsPerRow * rows).compute(a);
-  VERIFY_IS_EQUAL(ei2.info(), Success);
-  VERIFY_IS_EQUAL(ei2.eigenvectors(), ei1.eigenvectors());
-  VERIFY_IS_EQUAL(ei2.eigenvalues(), ei1.eigenvalues());
-  if (rows > 2) {
-    ei2.setMaxIterations(1).compute(a);
-    VERIFY_IS_EQUAL(ei2.info(), NoConvergence);
-    VERIFY_IS_EQUAL(ei2.getMaxIterations(), 1);
-  }
-
-  EigenSolver<MatrixType> eiNoEivecs(a, false);
-  VERIFY_IS_EQUAL(eiNoEivecs.info(), Success);
-  VERIFY_IS_APPROX(ei1.eigenvalues(), eiNoEivecs.eigenvalues());
-  VERIFY_IS_APPROX(ei1.pseudoEigenvalueMatrix(), eiNoEivecs.pseudoEigenvalueMatrix());
-
-  MatrixType id = MatrixType::Identity(rows, cols);
-  VERIFY_IS_APPROX(id.operatorNorm(), RealScalar(1));
-
-  if (rows > 2)
-  {
-    // Test matrix with NaN
-    a(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
-    EigenSolver<MatrixType> eiNaN(a);
-    VERIFY_IS_EQUAL(eiNaN.info(), NoConvergence);
-  }
-}
-
-template<typename MatrixType> void eigensolver_verify_assert(const MatrixType& m)
-{
-  EigenSolver<MatrixType> eig;
-  VERIFY_RAISES_ASSERT(eig.eigenvectors());
-  VERIFY_RAISES_ASSERT(eig.pseudoEigenvectors());
-  VERIFY_RAISES_ASSERT(eig.pseudoEigenvalueMatrix());
-  VERIFY_RAISES_ASSERT(eig.eigenvalues());
-
-  MatrixType a = MatrixType::Random(m.rows(),m.cols());
-  eig.compute(a, false);
-  VERIFY_RAISES_ASSERT(eig.eigenvectors());
-  VERIFY_RAISES_ASSERT(eig.pseudoEigenvectors());
-}
-
-void test_eigensolver_generic()
-{
-  int s;
-  for(int i = 0; i < g_repeat; i++) {
-    CALL_SUBTEST_1( eigensolver(Matrix4f()) );
-    s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4);
-    CALL_SUBTEST_2( eigensolver(MatrixXd(s,s)) );
-
-    // some trivial but implementation-wise tricky cases
-    CALL_SUBTEST_2( eigensolver(MatrixXd(1,1)) );
-    CALL_SUBTEST_2( eigensolver(MatrixXd(2,2)) );
-    CALL_SUBTEST_3( eigensolver(Matrix<double,1,1>()) );
-    CALL_SUBTEST_4( eigensolver(Matrix2d()) );
-  }
-
-  CALL_SUBTEST_1( eigensolver_verify_assert(Matrix4f()) );
-  s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4);
-  CALL_SUBTEST_2( eigensolver_verify_assert(MatrixXd(s,s)) );
-  CALL_SUBTEST_3( eigensolver_verify_assert(Matrix<double,1,1>()) );
-  CALL_SUBTEST_4( eigensolver_verify_assert(Matrix2d()) );
-
-  // Test problem size constructors
-  CALL_SUBTEST_5(EigenSolver<MatrixXf>(s));
-
-  // regression test for bug 410
-  CALL_SUBTEST_2(
-  {
-     MatrixXd A(1,1);
-     A(0,0) = std::sqrt(-1.);
-     Eigen::EigenSolver<MatrixXd> solver(A);
-     MatrixXd V(1, 1);
-     V(0,0) = solver.eigenvectors()(0,0).real();
-  }
-  );
-  
-  EIGEN_UNUSED_VARIABLE(s)
-}
diff --git a/resources/3rdparty/eigen/test/schur_complex.cpp b/resources/3rdparty/eigen/test/schur_complex.cpp
deleted file mode 100644
index 5e869790f..000000000
--- a/resources/3rdparty/eigen/test/schur_complex.cpp
+++ /dev/null
@@ -1,91 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#include "main.h"
-#include <limits>
-#include <Eigen/Eigenvalues>
-
-template<typename MatrixType> void schur(int size = MatrixType::ColsAtCompileTime)
-{
-  typedef typename ComplexSchur<MatrixType>::ComplexScalar ComplexScalar;
-  typedef typename ComplexSchur<MatrixType>::ComplexMatrixType ComplexMatrixType;
-
-  // Test basic functionality: T is triangular and A = U T U*
-  for(int counter = 0; counter < g_repeat; ++counter) {
-    MatrixType A = MatrixType::Random(size, size);
-    ComplexSchur<MatrixType> schurOfA(A);
-    VERIFY_IS_EQUAL(schurOfA.info(), Success);
-    ComplexMatrixType U = schurOfA.matrixU();
-    ComplexMatrixType T = schurOfA.matrixT();
-    for(int row = 1; row < size; ++row) {
-      for(int col = 0; col < row; ++col) {
-	VERIFY(T(row,col) == (typename MatrixType::Scalar)0);
-      }
-    }
-    VERIFY_IS_APPROX(A.template cast<ComplexScalar>(), U * T * U.adjoint());
-  }
-
-  // Test asserts when not initialized
-  ComplexSchur<MatrixType> csUninitialized;
-  VERIFY_RAISES_ASSERT(csUninitialized.matrixT());
-  VERIFY_RAISES_ASSERT(csUninitialized.matrixU());
-  VERIFY_RAISES_ASSERT(csUninitialized.info());
-  
-  // Test whether compute() and constructor returns same result
-  MatrixType A = MatrixType::Random(size, size);
-  ComplexSchur<MatrixType> cs1;
-  cs1.compute(A);
-  ComplexSchur<MatrixType> cs2(A);
-  VERIFY_IS_EQUAL(cs1.info(), Success);
-  VERIFY_IS_EQUAL(cs2.info(), Success);
-  VERIFY_IS_EQUAL(cs1.matrixT(), cs2.matrixT());
-  VERIFY_IS_EQUAL(cs1.matrixU(), cs2.matrixU());
-
-  // Test maximum number of iterations
-  ComplexSchur<MatrixType> cs3;
-  cs3.setMaxIterations(ComplexSchur<MatrixType>::m_maxIterationsPerRow * size).compute(A);
-  VERIFY_IS_EQUAL(cs3.info(), Success);
-  VERIFY_IS_EQUAL(cs3.matrixT(), cs1.matrixT());
-  VERIFY_IS_EQUAL(cs3.matrixU(), cs1.matrixU());
-  cs3.setMaxIterations(1).compute(A);
-  VERIFY_IS_EQUAL(cs3.info(), size > 1 ? NoConvergence : Success);
-  VERIFY_IS_EQUAL(cs3.getMaxIterations(), 1);
-
-  MatrixType Atriangular = A;
-  Atriangular.template triangularView<StrictlyLower>().setZero(); 
-  cs3.setMaxIterations(1).compute(Atriangular); // triangular matrices do not need any iterations
-  VERIFY_IS_EQUAL(cs3.info(), Success);
-  VERIFY_IS_EQUAL(cs3.matrixT(), Atriangular.template cast<ComplexScalar>());
-  VERIFY_IS_EQUAL(cs3.matrixU(), ComplexMatrixType::Identity(size, size));
-
-  // Test computation of only T, not U
-  ComplexSchur<MatrixType> csOnlyT(A, false);
-  VERIFY_IS_EQUAL(csOnlyT.info(), Success);
-  VERIFY_IS_EQUAL(cs1.matrixT(), csOnlyT.matrixT());
-  VERIFY_RAISES_ASSERT(csOnlyT.matrixU());
-
-  if (size > 1)
-  {
-    // Test matrix with NaN
-    A(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
-    ComplexSchur<MatrixType> csNaN(A);
-    VERIFY_IS_EQUAL(csNaN.info(), NoConvergence);
-  }
-}
-
-void test_schur_complex()
-{
-  CALL_SUBTEST_1(( schur<Matrix4cd>() ));
-  CALL_SUBTEST_2(( schur<MatrixXcf>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4)) ));
-  CALL_SUBTEST_3(( schur<Matrix<std::complex<float>, 1, 1> >() ));
-  CALL_SUBTEST_4(( schur<Matrix<float, 3, 3, Eigen::RowMajor> >() ));
-
-  // Test problem size constructors
-  CALL_SUBTEST_5(ComplexSchur<MatrixXf>(10));
-}
diff --git a/resources/3rdparty/eigen/test/schur_real.cpp b/resources/3rdparty/eigen/test/schur_real.cpp
deleted file mode 100644
index 36b9c24d1..000000000
--- a/resources/3rdparty/eigen/test/schur_real.cpp
+++ /dev/null
@@ -1,112 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#include "main.h"
-#include <limits>
-#include <Eigen/Eigenvalues>
-
-template<typename MatrixType> void verifyIsQuasiTriangular(const MatrixType& T)
-{
-  typedef typename MatrixType::Index Index;
-
-  const Index size = T.cols();
-  typedef typename MatrixType::Scalar Scalar;
-
-  // Check T is lower Hessenberg
-  for(int row = 2; row < size; ++row) {
-    for(int col = 0; col < row - 1; ++col) {
-      VERIFY(T(row,col) == Scalar(0));
-    }
-  }
-
-  // Check that any non-zero on the subdiagonal is followed by a zero and is
-  // part of a 2x2 diagonal block with imaginary eigenvalues.
-  for(int row = 1; row < size; ++row) {
-    if (T(row,row-1) != Scalar(0)) {
-      VERIFY(row == size-1 || T(row+1,row) == 0);
-      Scalar tr = T(row-1,row-1) + T(row,row);
-      Scalar det = T(row-1,row-1) * T(row,row) - T(row-1,row) * T(row,row-1);
-      VERIFY(4 * det > tr * tr);
-    }
-  }
-}
-
-template<typename MatrixType> void schur(int size = MatrixType::ColsAtCompileTime)
-{
-  // Test basic functionality: T is quasi-triangular and A = U T U*
-  for(int counter = 0; counter < g_repeat; ++counter) {
-    MatrixType A = MatrixType::Random(size, size);
-    RealSchur<MatrixType> schurOfA(A);
-    VERIFY_IS_EQUAL(schurOfA.info(), Success);
-    MatrixType U = schurOfA.matrixU();
-    MatrixType T = schurOfA.matrixT();
-    verifyIsQuasiTriangular(T);
-    VERIFY_IS_APPROX(A, U * T * U.transpose());
-  }
-
-  // Test asserts when not initialized
-  RealSchur<MatrixType> rsUninitialized;
-  VERIFY_RAISES_ASSERT(rsUninitialized.matrixT());
-  VERIFY_RAISES_ASSERT(rsUninitialized.matrixU());
-  VERIFY_RAISES_ASSERT(rsUninitialized.info());
-  
-  // Test whether compute() and constructor returns same result
-  MatrixType A = MatrixType::Random(size, size);
-  RealSchur<MatrixType> rs1;
-  rs1.compute(A);
-  RealSchur<MatrixType> rs2(A);
-  VERIFY_IS_EQUAL(rs1.info(), Success);
-  VERIFY_IS_EQUAL(rs2.info(), Success);
-  VERIFY_IS_EQUAL(rs1.matrixT(), rs2.matrixT());
-  VERIFY_IS_EQUAL(rs1.matrixU(), rs2.matrixU());
-
-  // Test maximum number of iterations
-  RealSchur<MatrixType> rs3;
-  rs3.setMaxIterations(RealSchur<MatrixType>::m_maxIterationsPerRow * size).compute(A);
-  VERIFY_IS_EQUAL(rs3.info(), Success);
-  VERIFY_IS_EQUAL(rs3.matrixT(), rs1.matrixT());
-  VERIFY_IS_EQUAL(rs3.matrixU(), rs1.matrixU());
-  if (size > 2) {
-    rs3.setMaxIterations(1).compute(A);
-    VERIFY_IS_EQUAL(rs3.info(), NoConvergence);
-    VERIFY_IS_EQUAL(rs3.getMaxIterations(), 1);
-  }
-
-  MatrixType Atriangular = A;
-  Atriangular.template triangularView<StrictlyLower>().setZero(); 
-  rs3.setMaxIterations(1).compute(Atriangular); // triangular matrices do not need any iterations
-  VERIFY_IS_EQUAL(rs3.info(), Success);
-  VERIFY_IS_EQUAL(rs3.matrixT(), Atriangular);
-  VERIFY_IS_EQUAL(rs3.matrixU(), MatrixType::Identity(size, size));
-
-  // Test computation of only T, not U
-  RealSchur<MatrixType> rsOnlyT(A, false);
-  VERIFY_IS_EQUAL(rsOnlyT.info(), Success);
-  VERIFY_IS_EQUAL(rs1.matrixT(), rsOnlyT.matrixT());
-  VERIFY_RAISES_ASSERT(rsOnlyT.matrixU());
-
-  if (size > 2)
-  {
-    // Test matrix with NaN
-    A(0,0) = std::numeric_limits<typename MatrixType::Scalar>::quiet_NaN();
-    RealSchur<MatrixType> rsNaN(A);
-    VERIFY_IS_EQUAL(rsNaN.info(), NoConvergence);
-  }
-}
-
-void test_schur_real()
-{
-  CALL_SUBTEST_1(( schur<Matrix4f>() ));
-  CALL_SUBTEST_2(( schur<MatrixXd>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4)) ));
-  CALL_SUBTEST_3(( schur<Matrix<float, 1, 1> >() ));
-  CALL_SUBTEST_4(( schur<Matrix<double, 3, 3, Eigen::RowMajor> >() ));
-
-  // Test problem size constructors
-  CALL_SUBTEST_5(RealSchur<MatrixXf>(10));
-}
diff --git a/resources/3rdparty/eigen/test/sparse_basic.cpp b/resources/3rdparty/eigen/test/sparse_basic.cpp
deleted file mode 100644
index 4566de9f2..000000000
--- a/resources/3rdparty/eigen/test/sparse_basic.cpp
+++ /dev/null
@@ -1,436 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2008 Daniel Gomez Ferro <dgomezferro@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#include "sparse.h"
-
-template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& ref)
-{
-  typedef typename SparseMatrixType::Index Index;
-
-  const Index rows = ref.rows();
-  const Index cols = ref.cols();
-  typedef typename SparseMatrixType::Scalar Scalar;
-  enum { Flags = SparseMatrixType::Flags };
-
-  double density = (std::max)(8./(rows*cols), 0.01);
-  typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
-  typedef Matrix<Scalar,Dynamic,1> DenseVector;
-  Scalar eps = 1e-6;
-
-  SparseMatrixType m(rows, cols);
-  DenseMatrix refMat = DenseMatrix::Zero(rows, cols);
-  DenseVector vec1 = DenseVector::Random(rows);
-  Scalar s1 = internal::random<Scalar>();
-
-  std::vector<Vector2i> zeroCoords;
-  std::vector<Vector2i> nonzeroCoords;
-  initSparse<Scalar>(density, refMat, m, 0, &zeroCoords, &nonzeroCoords);
-
-  if (zeroCoords.size()==0 || nonzeroCoords.size()==0)
-    return;
-
-  // test coeff and coeffRef
-  for (int i=0; i<(int)zeroCoords.size(); ++i)
-  {
-    VERIFY_IS_MUCH_SMALLER_THAN( m.coeff(zeroCoords[i].x(),zeroCoords[i].y()), eps );
-    if(internal::is_same<SparseMatrixType,SparseMatrix<Scalar,Flags> >::value)
-      VERIFY_RAISES_ASSERT( m.coeffRef(zeroCoords[0].x(),zeroCoords[0].y()) = 5 );
-  }
-  VERIFY_IS_APPROX(m, refMat);
-
-  m.coeffRef(nonzeroCoords[0].x(), nonzeroCoords[0].y()) = Scalar(5);
-  refMat.coeffRef(nonzeroCoords[0].x(), nonzeroCoords[0].y()) = Scalar(5);
-
-  VERIFY_IS_APPROX(m, refMat);
-  /*
-  // test InnerIterators and Block expressions
-  for (int t=0; t<10; ++t)
-  {
-    int j = internal::random<int>(0,cols-1);
-    int i = internal::random<int>(0,rows-1);
-    int w = internal::random<int>(1,cols-j-1);
-    int h = internal::random<int>(1,rows-i-1);
-
-//     VERIFY_IS_APPROX(m.block(i,j,h,w), refMat.block(i,j,h,w));
-    for(int c=0; c<w; c++)
-    {
-      VERIFY_IS_APPROX(m.block(i,j,h,w).col(c), refMat.block(i,j,h,w).col(c));
-      for(int r=0; r<h; r++)
-      {
-//         VERIFY_IS_APPROX(m.block(i,j,h,w).col(c).coeff(r), refMat.block(i,j,h,w).col(c).coeff(r));
-      }
-    }
-//     for(int r=0; r<h; r++)
-//     {
-//       VERIFY_IS_APPROX(m.block(i,j,h,w).row(r), refMat.block(i,j,h,w).row(r));
-//       for(int c=0; c<w; c++)
-//       {
-//         VERIFY_IS_APPROX(m.block(i,j,h,w).row(r).coeff(c), refMat.block(i,j,h,w).row(r).coeff(c));
-//       }
-//     }
-  }
-
-  for(int c=0; c<cols; c++)
-  {
-    VERIFY_IS_APPROX(m.col(c) + m.col(c), (m + m).col(c));
-    VERIFY_IS_APPROX(m.col(c) + m.col(c), refMat.col(c) + refMat.col(c));
-  }
-
-  for(int r=0; r<rows; r++)
-  {
-    VERIFY_IS_APPROX(m.row(r) + m.row(r), (m + m).row(r));
-    VERIFY_IS_APPROX(m.row(r) + m.row(r), refMat.row(r) + refMat.row(r));
-  }
-  */
-
-    // test insert (inner random)
-    {
-      DenseMatrix m1(rows,cols);
-      m1.setZero();
-      SparseMatrixType m2(rows,cols);
-      if(internal::random<int>()%2)
-        m2.reserve(VectorXi::Constant(m2.outerSize(), 2));
-      for (int j=0; j<cols; ++j)
-      {
-        for (int k=0; k<rows/2; ++k)
-        {
-          int i = internal::random<int>(0,rows-1);
-          if (m1.coeff(i,j)==Scalar(0))
-            m2.insert(i,j) = m1(i,j) = internal::random<Scalar>();
-        }
-      }
-      m2.finalize();
-      VERIFY_IS_APPROX(m2,m1);
-    }
-
-    // test insert (fully random)
-    {
-      DenseMatrix m1(rows,cols);
-      m1.setZero();
-      SparseMatrixType m2(rows,cols);
-      if(internal::random<int>()%2)
-        m2.reserve(VectorXi::Constant(m2.outerSize(), 2));
-      for (int k=0; k<rows*cols; ++k)
-      {
-        int i = internal::random<int>(0,rows-1);
-        int j = internal::random<int>(0,cols-1);
-        if ((m1.coeff(i,j)==Scalar(0)) && (internal::random<int>()%2))
-          m2.insert(i,j) = m1(i,j) = internal::random<Scalar>();
-        else
-        {
-          Scalar v = internal::random<Scalar>();
-          m2.coeffRef(i,j) += v;
-          m1(i,j) += v;
-        }
-      }
-      VERIFY_IS_APPROX(m2,m1);
-    }
-    
-    // test insert (un-compressed)
-    for(int mode=0;mode<4;++mode)
-    {
-      DenseMatrix m1(rows,cols);
-      m1.setZero();
-      SparseMatrixType m2(rows,cols);
-      VectorXi r(VectorXi::Constant(m2.outerSize(), ((mode%2)==0) ? m2.innerSize() : std::max<int>(1,m2.innerSize()/8)));
-      m2.reserve(r);
-      for (int k=0; k<rows*cols; ++k)
-      {
-        int i = internal::random<int>(0,rows-1);
-        int j = internal::random<int>(0,cols-1);
-        if (m1.coeff(i,j)==Scalar(0))
-          m2.insert(i,j) = m1(i,j) = internal::random<Scalar>();
-        if(mode==3)
-          m2.reserve(r);
-      }
-      if(internal::random<int>()%2)
-        m2.makeCompressed();
-      VERIFY_IS_APPROX(m2,m1);
-    }
-
-  // test basic computations
-  {
-    DenseMatrix refM1 = DenseMatrix::Zero(rows, rows);
-    DenseMatrix refM2 = DenseMatrix::Zero(rows, rows);
-    DenseMatrix refM3 = DenseMatrix::Zero(rows, rows);
-    DenseMatrix refM4 = DenseMatrix::Zero(rows, rows);
-    SparseMatrixType m1(rows, rows);
-    SparseMatrixType m2(rows, rows);
-    SparseMatrixType m3(rows, rows);
-    SparseMatrixType m4(rows, rows);
-    initSparse<Scalar>(density, refM1, m1);
-    initSparse<Scalar>(density, refM2, m2);
-    initSparse<Scalar>(density, refM3, m3);
-    initSparse<Scalar>(density, refM4, m4);
-
-    VERIFY_IS_APPROX(m1+m2, refM1+refM2);
-    VERIFY_IS_APPROX(m1+m2+m3, refM1+refM2+refM3);
-    VERIFY_IS_APPROX(m3.cwiseProduct(m1+m2), refM3.cwiseProduct(refM1+refM2));
-    VERIFY_IS_APPROX(m1*s1-m2, refM1*s1-refM2);
-
-    VERIFY_IS_APPROX(m1*=s1, refM1*=s1);
-    VERIFY_IS_APPROX(m1/=s1, refM1/=s1);
-
-    VERIFY_IS_APPROX(m1+=m2, refM1+=refM2);
-    VERIFY_IS_APPROX(m1-=m2, refM1-=refM2);
-
-    if(SparseMatrixType::IsRowMajor)
-      VERIFY_IS_APPROX(m1.innerVector(0).dot(refM2.row(0)), refM1.row(0).dot(refM2.row(0)));
-    else
-      VERIFY_IS_APPROX(m1.innerVector(0).dot(refM2.row(0)), refM1.col(0).dot(refM2.row(0)));
-
-    VERIFY_IS_APPROX(m1.conjugate(), refM1.conjugate());
-    VERIFY_IS_APPROX(m1.real(), refM1.real());
-
-    refM4.setRandom();
-    // sparse cwise* dense
-    VERIFY_IS_APPROX(m3.cwiseProduct(refM4), refM3.cwiseProduct(refM4));
-//     VERIFY_IS_APPROX(m3.cwise()/refM4, refM3.cwise()/refM4);
-
-    // test aliasing
-    VERIFY_IS_APPROX((m1 = -m1), (refM1 = -refM1));
-    VERIFY_IS_APPROX((m1 = m1.transpose()), (refM1 = refM1.transpose().eval()));
-    VERIFY_IS_APPROX((m1 = -m1.transpose()), (refM1 = -refM1.transpose().eval()));
-    VERIFY_IS_APPROX((m1 += -m1), (refM1 += -refM1));
-  }
-
-  // test transpose
-  {
-    DenseMatrix refMat2 = DenseMatrix::Zero(rows, rows);
-    SparseMatrixType m2(rows, rows);
-    initSparse<Scalar>(density, refMat2, m2);
-    VERIFY_IS_APPROX(m2.transpose().eval(), refMat2.transpose().eval());
-    VERIFY_IS_APPROX(m2.transpose(), refMat2.transpose());
-
-    VERIFY_IS_APPROX(SparseMatrixType(m2.adjoint()), refMat2.adjoint());
-  }
-
-  // test innerVector()
-  {
-    DenseMatrix refMat2 = DenseMatrix::Zero(rows, rows);
-    SparseMatrixType m2(rows, rows);
-    initSparse<Scalar>(density, refMat2, m2);
-    int j0 = internal::random<int>(0,rows-1);
-    int j1 = internal::random<int>(0,rows-1);
-    if(SparseMatrixType::IsRowMajor)
-      VERIFY_IS_APPROX(m2.innerVector(j0), refMat2.row(j0));
-    else
-      VERIFY_IS_APPROX(m2.innerVector(j0), refMat2.col(j0));
-
-    if(SparseMatrixType::IsRowMajor)
-      VERIFY_IS_APPROX(m2.innerVector(j0)+m2.innerVector(j1), refMat2.row(j0)+refMat2.row(j1));
-    else
-      VERIFY_IS_APPROX(m2.innerVector(j0)+m2.innerVector(j1), refMat2.col(j0)+refMat2.col(j1));
-
-    SparseMatrixType m3(rows,rows);
-    m3.reserve(VectorXi::Constant(rows,rows/2));
-    for(int j=0; j<rows; ++j)
-      for(int k=0; k<j; ++k)
-        m3.insertByOuterInner(j,k) = k+1;
-    for(int j=0; j<rows; ++j)
-    {
-      VERIFY(j==internal::real(m3.innerVector(j).nonZeros()));
-      if(j>0)
-        VERIFY(j==internal::real(m3.innerVector(j).lastCoeff()));
-    }
-    m3.makeCompressed();
-    for(int j=0; j<rows; ++j)
-    {
-      VERIFY(j==internal::real(m3.innerVector(j).nonZeros()));
-      if(j>0)
-        VERIFY(j==internal::real(m3.innerVector(j).lastCoeff()));
-    }
-
-    //m2.innerVector(j0) = 2*m2.innerVector(j1);
-    //refMat2.col(j0) = 2*refMat2.col(j1);
-    //VERIFY_IS_APPROX(m2, refMat2);
-  }
-
-  // test innerVectors()
-  {
-    DenseMatrix refMat2 = DenseMatrix::Zero(rows, rows);
-    SparseMatrixType m2(rows, rows);
-    initSparse<Scalar>(density, refMat2, m2);
-    int j0 = internal::random<int>(0,rows-2);
-    int j1 = internal::random<int>(0,rows-2);
-    int n0 = internal::random<int>(1,rows-(std::max)(j0,j1));
-    if(SparseMatrixType::IsRowMajor)
-      VERIFY_IS_APPROX(m2.innerVectors(j0,n0), refMat2.block(j0,0,n0,cols));
-    else
-      VERIFY_IS_APPROX(m2.innerVectors(j0,n0), refMat2.block(0,j0,rows,n0));
-    if(SparseMatrixType::IsRowMajor)
-      VERIFY_IS_APPROX(m2.innerVectors(j0,n0)+m2.innerVectors(j1,n0),
-                      refMat2.block(j0,0,n0,cols)+refMat2.block(j1,0,n0,cols));
-    else
-      VERIFY_IS_APPROX(m2.innerVectors(j0,n0)+m2.innerVectors(j1,n0),
-                      refMat2.block(0,j0,rows,n0)+refMat2.block(0,j1,rows,n0));
-    //m2.innerVectors(j0,n0) = m2.innerVectors(j0,n0) + m2.innerVectors(j1,n0);
-    //refMat2.block(0,j0,rows,n0) = refMat2.block(0,j0,rows,n0) + refMat2.block(0,j1,rows,n0);
-  }
-
-  // test prune
-  {
-    SparseMatrixType m2(rows, rows);
-    DenseMatrix refM2(rows, rows);
-    refM2.setZero();
-    int countFalseNonZero = 0;
-    int countTrueNonZero = 0;
-    for (int j=0; j<m2.outerSize(); ++j)
-    {
-      m2.startVec(j);
-      for (int i=0; i<m2.innerSize(); ++i)
-      {
-        float x = internal::random<float>(0,1);
-        if (x<0.1)
-        {
-          // do nothing
-        }
-        else if (x<0.5)
-        {
-          countFalseNonZero++;
-          m2.insertBackByOuterInner(j,i) = Scalar(0);
-        }
-        else
-        {
-          countTrueNonZero++;
-          m2.insertBackByOuterInner(j,i) = Scalar(1);
-          if(SparseMatrixType::IsRowMajor)
-            refM2(j,i) = Scalar(1);
-          else
-            refM2(i,j) = Scalar(1);
-        }
-      }
-    }
-    m2.finalize();
-    VERIFY(countFalseNonZero+countTrueNonZero == m2.nonZeros());
-    VERIFY_IS_APPROX(m2, refM2);
-    m2.prune(Scalar(1));
-    VERIFY(countTrueNonZero==m2.nonZeros());
-    VERIFY_IS_APPROX(m2, refM2);
-  }
-
-  // test setFromTriplets
-  {
-    typedef Triplet<Scalar,Index> TripletType;
-    std::vector<TripletType> triplets;
-    int ntriplets = rows*cols;
-    triplets.reserve(ntriplets);
-    DenseMatrix refMat(rows,cols);
-    refMat.setZero();
-    for(int i=0;i<ntriplets;++i)
-    {
-      int r = internal::random<int>(0,rows-1);
-      int c = internal::random<int>(0,cols-1);
-      Scalar v = internal::random<Scalar>();
-      triplets.push_back(TripletType(r,c,v));
-      refMat(r,c) += v;
-    }
-    SparseMatrixType m(rows,cols);
-    m.setFromTriplets(triplets.begin(), triplets.end());
-    VERIFY_IS_APPROX(m, refMat);
-  }
-
-  // test triangularView
-  {
-    DenseMatrix refMat2(rows, rows), refMat3(rows, rows);
-    SparseMatrixType m2(rows, rows), m3(rows, rows);
-    initSparse<Scalar>(density, refMat2, m2);
-    refMat3 = refMat2.template triangularView<Lower>();
-    m3 = m2.template triangularView<Lower>();
-    VERIFY_IS_APPROX(m3, refMat3);
-
-    refMat3 = refMat2.template triangularView<Upper>();
-    m3 = m2.template triangularView<Upper>();
-    VERIFY_IS_APPROX(m3, refMat3);
-
-    refMat3 = refMat2.template triangularView<UnitUpper>();
-    m3 = m2.template triangularView<UnitUpper>();
-    VERIFY_IS_APPROX(m3, refMat3);
-
-    refMat3 = refMat2.template triangularView<UnitLower>();
-    m3 = m2.template triangularView<UnitLower>();
-    VERIFY_IS_APPROX(m3, refMat3);
-  }
-  
-  // test selfadjointView
-  if(!SparseMatrixType::IsRowMajor)
-  {
-    DenseMatrix refMat2(rows, rows), refMat3(rows, rows);
-    SparseMatrixType m2(rows, rows), m3(rows, rows);
-    initSparse<Scalar>(density, refMat2, m2);
-    refMat3 = refMat2.template selfadjointView<Lower>();
-    m3 = m2.template selfadjointView<Lower>();
-    VERIFY_IS_APPROX(m3, refMat3);
-  }
-  
-  // test sparseView
-  {
-    DenseMatrix refMat2 = DenseMatrix::Zero(rows, rows);
-    SparseMatrixType m2(rows, rows);
-    initSparse<Scalar>(density, refMat2, m2);
-    VERIFY_IS_APPROX(m2.eval(), refMat2.sparseView().eval());
-  }
-
-  // test diagonal
-  {
-    DenseMatrix refMat2 = DenseMatrix::Zero(rows, rows);
-    SparseMatrixType m2(rows, rows);
-    initSparse<Scalar>(density, refMat2, m2);
-    VERIFY_IS_APPROX(m2.diagonal(), refMat2.diagonal().eval());
-  }
-  
-  // test conservative resize
-  {
-      std::vector< std::pair<int,int> > inc;
-      inc.push_back(std::pair<int,int>(-3,-2));
-      inc.push_back(std::pair<int,int>(0,0));
-      inc.push_back(std::pair<int,int>(3,2));
-      inc.push_back(std::pair<int,int>(3,0));
-      inc.push_back(std::pair<int,int>(0,3));
-      
-      for(size_t i = 0; i< inc.size(); i++) {
-        int incRows = inc[i].first;
-        int incCols = inc[i].second;
-        SparseMatrixType m1(rows, cols);
-        DenseMatrix refMat1 = DenseMatrix::Zero(rows, cols);
-        initSparse<Scalar>(density, refMat1, m1);
-        
-        m1.conservativeResize(rows+incRows, cols+incCols);
-        refMat1.conservativeResize(rows+incRows, cols+incCols);
-        if (incRows > 0) refMat1.bottomRows(incRows).setZero();
-        if (incCols > 0) refMat1.rightCols(incCols).setZero();
-        
-        VERIFY_IS_APPROX(m1, refMat1);
-        
-        // Insert new values
-        if (incRows > 0) 
-          m1.insert(refMat1.rows()-1, 0) = refMat1(refMat1.rows()-1, 0) = 1;
-        if (incCols > 0) 
-          m1.insert(0, refMat1.cols()-1) = refMat1(0, refMat1.cols()-1) = 1;
-          
-        VERIFY_IS_APPROX(m1, refMat1);
-          
-          
-      }
-  }
-}
-
-void test_sparse_basic()
-{
-  for(int i = 0; i < g_repeat; i++) {
-    int s = Eigen::internal::random<int>(1,50);
-    CALL_SUBTEST_1(( sparse_basic(SparseMatrix<double>(8, 8)) ));
-    CALL_SUBTEST_2(( sparse_basic(SparseMatrix<std::complex<double>, ColMajor>(s, s)) ));
-    CALL_SUBTEST_2(( sparse_basic(SparseMatrix<std::complex<double>, RowMajor>(s, s)) ));
-    CALL_SUBTEST_1(( sparse_basic(SparseMatrix<double>(s, s)) ));
-    CALL_SUBTEST_1(( sparse_basic(SparseMatrix<double,ColMajor,long int>(s, s)) ));
-    CALL_SUBTEST_1(( sparse_basic(SparseMatrix<double,RowMajor,long int>(s, s)) ));
-  }
-}
diff --git a/resources/3rdparty/eigen/test/sparse_solver.h b/resources/3rdparty/eigen/test/sparse_solver.h
deleted file mode 100644
index 73d92874c..000000000
--- a/resources/3rdparty/eigen/test/sparse_solver.h
+++ /dev/null
@@ -1,309 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2011 Gael Guennebaud <g.gael@free.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#include "sparse.h"
-#include <Eigen/SparseCore>
-
-template<typename Solver, typename Rhs, typename DenseMat, typename DenseRhs>
-void check_sparse_solving(Solver& solver, const typename Solver::MatrixType& A, const Rhs& b, const DenseMat& dA, const DenseRhs& db)
-{
-  typedef typename Solver::MatrixType Mat;
-  typedef typename Mat::Scalar Scalar;
-
-  DenseRhs refX = dA.lu().solve(db);
-
-  Rhs x(b.rows(), b.cols());
-  Rhs oldb = b;
-
-  solver.compute(A);
-  if (solver.info() != Success)
-  {
-    std::cerr << "sparse solver testing: factorization failed (check_sparse_solving)\n";
-    exit(0);
-    return;
-  }
-  x = solver.solve(b);
-  if (solver.info() != Success)
-  {
-    std::cerr << "sparse solver testing: solving failed\n";
-    return;
-  }
-  VERIFY(oldb.isApprox(b) && "sparse solver testing: the rhs should not be modified!");
-
-  VERIFY(x.isApprox(refX,test_precision<Scalar>()));
-  
-  x.setZero();
-  // test the analyze/factorize API
-  solver.analyzePattern(A);
-  solver.factorize(A);
-  if (solver.info() != Success)
-  {
-    std::cerr << "sparse solver testing: factorization failed (check_sparse_solving)\n";
-    exit(0);
-    return;
-  }
-  x = solver.solve(b);
-  if (solver.info() != Success)
-  {
-    std::cerr << "sparse solver testing: solving failed\n";
-    return;
-  }
-  VERIFY(oldb.isApprox(b) && "sparse solver testing: the rhs should not be modified!");
-
-  VERIFY(x.isApprox(refX,test_precision<Scalar>()));
-  
-  // test Block as the result and rhs:
-  {
-    DenseRhs x(db.rows(), db.cols());
-    DenseRhs b(db), oldb(db);
-    x.setZero();
-    x.block(0,0,x.rows(),x.cols()) = solver.solve(b.block(0,0,b.rows(),b.cols()));
-    VERIFY(oldb.isApprox(b) && "sparse solver testing: the rhs should not be modified!");
-    VERIFY(x.isApprox(refX,test_precision<Scalar>()));
-  }
-}
-
-template<typename Solver, typename Rhs>
-void check_sparse_solving_real_cases(Solver& solver, const typename Solver::MatrixType& A, const Rhs& b, const Rhs& refX)
-{
-  typedef typename Solver::MatrixType Mat;
-  typedef typename Mat::Scalar Scalar;
-  typedef typename Mat::RealScalar RealScalar;
-  
-  Rhs x(b.rows(), b.cols());
-  
-  solver.compute(A);
-  if (solver.info() != Success)
-  {
-    std::cerr << "sparse solver testing: factorization failed (check_sparse_solving_real_cases)\n";
-    exit(0);
-    return;
-  }
-  x = solver.solve(b);
-  if (solver.info() != Success)
-  {
-    std::cerr << "sparse solver testing: solving failed\n";
-    return;
-  }
-  
-  RealScalar res_error;
-  // Compute the norm of the relative error
-  if(refX.size() != 0)
-    res_error = (refX - x).norm()/refX.norm();
-  else
-  { 
-    // Compute the relative residual norm
-    res_error = (b - A * x).norm()/b.norm();
-  }
-  if (res_error > test_precision<Scalar>() ){
-    std::cerr << "Test " << g_test_stack.back() << " failed in "EI_PP_MAKE_STRING(__FILE__) 
-    << " (" << EI_PP_MAKE_STRING(__LINE__) << ")" << std::endl << std::endl;
-    abort();
-  }
-  
-}
-template<typename Solver, typename DenseMat>
-void check_sparse_determinant(Solver& solver, const typename Solver::MatrixType& A, const DenseMat& dA)
-{
-  typedef typename Solver::MatrixType Mat;
-  typedef typename Mat::Scalar Scalar;
-  typedef typename Mat::RealScalar RealScalar;
-  
-  solver.compute(A);
-  if (solver.info() != Success)
-  {
-    std::cerr << "sparse solver testing: factorization failed (check_sparse_determinant)\n";
-    return;
-  }
-
-  Scalar refDet = dA.determinant();
-  VERIFY_IS_APPROX(refDet,solver.determinant());
-}
-
-
-template<typename Solver, typename DenseMat>
-int generate_sparse_spd_problem(Solver& , typename Solver::MatrixType& A, typename Solver::MatrixType& halfA, DenseMat& dA, int maxSize = 300)
-{
-  typedef typename Solver::MatrixType Mat;
-  typedef typename Mat::Scalar Scalar;
-  typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
-
-  int size = internal::random<int>(1,maxSize);
-  double density = (std::max)(8./(size*size), 0.01);
-
-  Mat M(size, size);
-  DenseMatrix dM(size, size);
-
-  initSparse<Scalar>(density, dM, M, ForceNonZeroDiag);
-
-  A = M * M.adjoint();
-  dA = dM * dM.adjoint();
-  
-  halfA.resize(size,size);
-  halfA.template selfadjointView<Solver::UpLo>().rankUpdate(M);
-  
-  return size;
-}
-
-
-#ifdef TEST_REAL_CASES
-template<typename Scalar>
-inline std::string get_matrixfolder()
-{
-  std::string mat_folder = TEST_REAL_CASES; 
-  if( internal::is_same<Scalar, std::complex<float> >::value || internal::is_same<Scalar, std::complex<double> >::value )
-    mat_folder  = mat_folder + static_cast<std::string>("/complex/");
-  else
-    mat_folder = mat_folder + static_cast<std::string>("/real/");
-  return mat_folder;
-}
-#endif
-
-template<typename Solver> void check_sparse_spd_solving(Solver& solver)
-{
-  typedef typename Solver::MatrixType Mat;
-  typedef typename Mat::Scalar Scalar;
-  typedef typename Mat::Index Index; 
-  typedef SparseMatrix<Scalar,ColMajor> SpMat;
-  typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
-  typedef Matrix<Scalar,Dynamic,1> DenseVector;
-
-  // generate the problem
-  Mat A, halfA;
-  DenseMatrix dA;
-  int size = generate_sparse_spd_problem(solver, A, halfA, dA);
-
-  // generate the right hand sides
-  int rhsCols = internal::random<int>(1,16);
-  double density = (std::max)(8./(size*rhsCols), 0.1);
-  SpMat B(size,rhsCols);
-  DenseVector b = DenseVector::Random(size);
-  DenseMatrix dB(size,rhsCols);
-  initSparse<Scalar>(density, dB, B, ForceNonZeroDiag);
-  
-  for (int i = 0; i < g_repeat; i++) {
-    check_sparse_solving(solver, A,     b,  dA, b);
-    check_sparse_solving(solver, halfA, b,  dA, b);
-    check_sparse_solving(solver, A,     dB, dA, dB);
-    check_sparse_solving(solver, halfA, dB, dA, dB);
-    check_sparse_solving(solver, A,     B,  dA, dB);
-    check_sparse_solving(solver, halfA, B,  dA, dB);
-  }
-
-  // First, get the folder 
-#ifdef TEST_REAL_CASES  
-  if (internal::is_same<Scalar, float>::value 
-      || internal::is_same<Scalar, std::complex<float> >::value)
-    return ;
-  
-  std::string mat_folder = get_matrixfolder<Scalar>();
-  MatrixMarketIterator<Scalar> it(mat_folder);
-  for (; it; ++it)
-  {
-    if (it.sym() == SPD){
-      Mat halfA;
-      PermutationMatrix<Dynamic, Dynamic, Index> pnull;
-      halfA.template selfadjointView<Solver::UpLo>() = it.matrix().template triangularView<Eigen::Lower>().twistedBy(pnull);
-      
-      std::cout<< " ==== SOLVING WITH MATRIX " << it.matname() << " ==== \n";
-      check_sparse_solving_real_cases(solver, it.matrix(), it.rhs(), it.refX());
-      check_sparse_solving_real_cases(solver, halfA, it.rhs(), it.refX());
-    }
-  }
-#endif
-}
-
-template<typename Solver> void check_sparse_spd_determinant(Solver& solver)
-{
-  typedef typename Solver::MatrixType Mat;
-  typedef typename Mat::Scalar Scalar;
-  typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
-
-  // generate the problem
-  Mat A, halfA;
-  DenseMatrix dA;
-  generate_sparse_spd_problem(solver, A, halfA, dA, 30);
-  
-  for (int i = 0; i < g_repeat; i++) {
-    check_sparse_determinant(solver, A,     dA);
-    check_sparse_determinant(solver, halfA, dA );
-  }
-}
-
-template<typename Solver, typename DenseMat>
-int generate_sparse_square_problem(Solver&, typename Solver::MatrixType& A, DenseMat& dA, int maxSize = 300)
-{
-  typedef typename Solver::MatrixType Mat;
-  typedef typename Mat::Scalar Scalar;
-  typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
-
-  int size = internal::random<int>(1,maxSize);
-  double density = (std::max)(8./(size*size), 0.01);
-  
-  A.resize(size,size);
-  dA.resize(size,size);
-
-  initSparse<Scalar>(density, dA, A, ForceNonZeroDiag);
-  
-  return size;
-}
-
-template<typename Solver> void check_sparse_square_solving(Solver& solver)
-{
-  typedef typename Solver::MatrixType Mat;
-  typedef typename Mat::Scalar Scalar;
-  typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
-  typedef Matrix<Scalar,Dynamic,1> DenseVector;
-
-  int rhsCols = internal::random<int>(1,16);
-
-  Mat A;
-  DenseMatrix dA;
-  int size = generate_sparse_square_problem(solver, A, dA);
-
-  DenseVector b = DenseVector::Random(size);
-  DenseMatrix dB = DenseMatrix::Random(size,rhsCols);
-  A.makeCompressed();
-  for (int i = 0; i < g_repeat; i++) {
-    check_sparse_solving(solver, A, b,  dA, b);
-    check_sparse_solving(solver, A, dB, dA, dB);
-  }
-   
-  // First, get the folder 
-#ifdef TEST_REAL_CASES
-  if (internal::is_same<Scalar, float>::value 
-      || internal::is_same<Scalar, std::complex<float> >::value)
-    return ;
-  
-  std::string mat_folder = get_matrixfolder<Scalar>();
-  MatrixMarketIterator<Scalar> it(mat_folder);
-  for (; it; ++it)
-  {
-    std::cout<< " ==== SOLVING WITH MATRIX " << it.matname() << " ==== \n";
-    check_sparse_solving_real_cases(solver, it.matrix(), it.rhs(), it.refX());
-  }
-#endif
-
-}
-
-template<typename Solver> void check_sparse_square_determinant(Solver& solver)
-{
-  typedef typename Solver::MatrixType Mat;
-  typedef typename Mat::Scalar Scalar;
-  typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
-
-  // generate the problem
-  Mat A;
-  DenseMatrix dA;
-  generate_sparse_square_problem(solver, A, dA, 30);
-  A.makeCompressed();
-  for (int i = 0; i < g_repeat; i++) {
-    check_sparse_determinant(solver, A, dA);
-  }
-}
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/IterativeSolvers b/resources/3rdparty/eigen/unsupported/Eigen/IterativeSolvers
deleted file mode 100644
index c3cc97cd2..000000000
--- a/resources/3rdparty/eigen/unsupported/Eigen/IterativeSolvers
+++ /dev/null
@@ -1,41 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_ITERATIVE_SOLVERS_MODULE_H
-#define EIGEN_ITERATIVE_SOLVERS_MODULE_H
-
-#include <Eigen/Sparse>
-
-/** \ingroup Unsupported_modules
-  * \defgroup IterativeSolvers_Module Iterative solvers module
-  * This module aims to provide various iterative linear and non linear solver algorithms.
-  * It currently provides:
-  *  - a constrained conjugate gradient
-  *  - a Householder GMRES implementation
-  * \code
-  * #include <unsupported/Eigen/IterativeSolvers>
-  * \endcode
-  */
-//@{
-
-#include "../../Eigen/src/misc/Solve.h"
-#include "../../Eigen/src/misc/SparseSolve.h"
-
-#include "src/IterativeSolvers/IterationController.h"
-#include "src/IterativeSolvers/ConstrainedConjGrad.h"
-#include "src/IterativeSolvers/IncompleteLU.h"
-#include "../../Eigen/Jacobi"
-#include "../../Eigen/Householder"
-#include "src/IterativeSolvers/GMRES.h"
-#include "src/IterativeSolvers/IncompleteCholesky.h"
-//#include "src/IterativeSolvers/SSORPreconditioner.h"
-
-//@}
-
-#endif // EIGEN_ITERATIVE_SOLVERS_MODULE_H
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/MatrixFunctions b/resources/3rdparty/eigen/unsupported/Eigen/MatrixFunctions
deleted file mode 100644
index 1a4d42de0..000000000
--- a/resources/3rdparty/eigen/unsupported/Eigen/MatrixFunctions
+++ /dev/null
@@ -1,446 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009 Jitse Niesen <jitse@maths.leeds.ac.uk>
-// Copyright (C) 2012 Chen-Pang He <jdh8@ms63.hinet.net>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_MATRIX_FUNCTIONS
-#define EIGEN_MATRIX_FUNCTIONS
-
-#include <cfloat>
-#include <list>
-#include <functional>
-#include <iterator>
-
-#include <Eigen/Core>
-#include <Eigen/LU>
-#include <Eigen/Eigenvalues>
-
-/** \ingroup Unsupported_modules
-  * \defgroup MatrixFunctions_Module Matrix functions module
-  * \brief This module aims to provide various methods for the computation of
-  * matrix functions. 
-  *
-  * To use this module, add 
-  * \code
-  * #include <unsupported/Eigen/MatrixFunctions>
-  * \endcode
-  * at the start of your source file.
-  *
-  * This module defines the following MatrixBase methods.
-  *  - \ref matrixbase_cos "MatrixBase::cos()", for computing the matrix cosine
-  *  - \ref matrixbase_cosh "MatrixBase::cosh()", for computing the matrix hyperbolic cosine
-  *  - \ref matrixbase_exp "MatrixBase::exp()", for computing the matrix exponential
-  *  - \ref matrixbase_log "MatrixBase::log()", for computing the matrix logarithm
-  *  - \ref matrixbase_pow "MatrixBase::pow()", for computing the matrix power
-  *  - \ref matrixbase_matrixfunction "MatrixBase::matrixFunction()", for computing general matrix functions
-  *  - \ref matrixbase_sin "MatrixBase::sin()", for computing the matrix sine
-  *  - \ref matrixbase_sinh "MatrixBase::sinh()", for computing the matrix hyperbolic sine
-  *  - \ref matrixbase_sqrt "MatrixBase::sqrt()", for computing the matrix square root
-  *
-  * These methods are the main entry points to this module. 
-  *
-  * %Matrix functions are defined as follows.  Suppose that \f$ f \f$
-  * is an entire function (that is, a function on the complex plane
-  * that is everywhere complex differentiable).  Then its Taylor
-  * series
-  * \f[ f(0) + f'(0) x + \frac{f''(0)}{2} x^2 + \frac{f'''(0)}{3!} x^3 + \cdots \f]
-  * converges to \f$ f(x) \f$. In this case, we can define the matrix
-  * function by the same series:
-  * \f[ f(M) = f(0) + f'(0) M + \frac{f''(0)}{2} M^2 + \frac{f'''(0)}{3!} M^3 + \cdots \f]
-  *
-  */
-
-#include "src/MatrixFunctions/MatrixExponential.h"
-#include "src/MatrixFunctions/MatrixFunction.h"
-#include "src/MatrixFunctions/MatrixSquareRoot.h"
-#include "src/MatrixFunctions/MatrixLogarithm.h"
-#include "src/MatrixFunctions/MatrixPowerBase.h"
-#include "src/MatrixFunctions/MatrixPower.h"
-
-
-/** 
-\page matrixbaseextra MatrixBase methods defined in the MatrixFunctions module
-\ingroup MatrixFunctions_Module
-
-The remainder of the page documents the following MatrixBase methods
-which are defined in the MatrixFunctions module.
-
-
-
-\section matrixbase_cos MatrixBase::cos()
-
-Compute the matrix cosine.
-
-\code
-const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::cos() const
-\endcode
-
-\param[in]  M  a square matrix.
-\returns  expression representing \f$ \cos(M) \f$.
-
-This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::cos().
-
-\sa \ref matrixbase_sin "sin()" for an example.
-
-
-
-\section matrixbase_cosh MatrixBase::cosh()
-
-Compute the matrix hyberbolic cosine.
-
-\code
-const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::cosh() const
-\endcode
-
-\param[in]  M  a square matrix.
-\returns  expression representing \f$ \cosh(M) \f$
-
-This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::cosh().
-
-\sa \ref matrixbase_sinh "sinh()" for an example.
-
-
-
-\section matrixbase_exp MatrixBase::exp()
-
-Compute the matrix exponential.
-
-\code
-const MatrixExponentialReturnValue<Derived> MatrixBase<Derived>::exp() const
-\endcode
-
-\param[in]  M  matrix whose exponential is to be computed.
-\returns    expression representing the matrix exponential of \p M.
-
-The matrix exponential of \f$ M \f$ is defined by
-\f[ \exp(M) = \sum_{k=0}^\infty \frac{M^k}{k!}. \f]
-The matrix exponential can be used to solve linear ordinary
-differential equations: the solution of \f$ y' = My \f$ with the
-initial condition \f$ y(0) = y_0 \f$ is given by
-\f$ y(t) = \exp(M) y_0 \f$.
-
-The cost of the computation is approximately \f$ 20 n^3 \f$ for
-matrices of size \f$ n \f$. The number 20 depends weakly on the
-norm of the matrix.
-
-The matrix exponential is computed using the scaling-and-squaring
-method combined with Pad&eacute; approximation. The matrix is first
-rescaled, then the exponential of the reduced matrix is computed
-approximant, and then the rescaling is undone by repeated
-squaring. The degree of the Pad&eacute; approximant is chosen such
-that the approximation error is less than the round-off
-error. However, errors may accumulate during the squaring phase.
-
-Details of the algorithm can be found in: Nicholas J. Higham, "The
-scaling and squaring method for the matrix exponential revisited,"
-<em>SIAM J. %Matrix Anal. Applic.</em>, <b>26</b>:1179&ndash;1193,
-2005.
-
-Example: The following program checks that
-\f[ \exp \left[ \begin{array}{ccc}
-      0 & \frac14\pi & 0 \\
-      -\frac14\pi & 0 & 0 \\
-      0 & 0 & 0
-    \end{array} \right] = \left[ \begin{array}{ccc}
-      \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\
-      \frac12\sqrt2 & \frac12\sqrt2 & 0 \\
-      0 & 0 & 1
-    \end{array} \right]. \f]
-This corresponds to a rotation of \f$ \frac14\pi \f$ radians around
-the z-axis.
-
-\include MatrixExponential.cpp
-Output: \verbinclude MatrixExponential.out
-
-\note \p M has to be a matrix of \c float, \c double, \c long double
-\c complex<float>, \c complex<double>, or \c complex<long double> .
-
-
-\section matrixbase_log MatrixBase::log()
-
-Compute the matrix logarithm.
-
-\code
-const MatrixLogarithmReturnValue<Derived> MatrixBase<Derived>::log() const
-\endcode
-
-\param[in]  M  invertible matrix whose logarithm is to be computed.
-\returns    expression representing the matrix logarithm root of \p M.
-
-The matrix logarithm of \f$ M \f$ is a matrix \f$ X \f$ such that 
-\f$ \exp(X) = M \f$ where exp denotes the matrix exponential. As for
-the scalar logarithm, the equation \f$ \exp(X) = M \f$ may have
-multiple solutions; this function returns a matrix whose eigenvalues
-have imaginary part in the interval \f$ (-\pi,\pi] \f$.
-
-In the real case, the matrix \f$ M \f$ should be invertible and
-it should have no eigenvalues which are real and negative (pairs of
-complex conjugate eigenvalues are allowed). In the complex case, it
-only needs to be invertible.
-
-This function computes the matrix logarithm using the Schur-Parlett
-algorithm as implemented by MatrixBase::matrixFunction(). The
-logarithm of an atomic block is computed by MatrixLogarithmAtomic,
-which uses direct computation for 1-by-1 and 2-by-2 blocks and an
-inverse scaling-and-squaring algorithm for bigger blocks, with the
-square roots computed by MatrixBase::sqrt().
-
-Details of the algorithm can be found in Section 11.6.2 of:
-Nicholas J. Higham,
-<em>Functions of Matrices: Theory and Computation</em>,
-SIAM 2008. ISBN 978-0-898716-46-7.
-
-Example: The following program checks that
-\f[ \log \left[ \begin{array}{ccc} 
-      \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\
-      \frac12\sqrt2 & \frac12\sqrt2 & 0 \\
-      0 & 0 & 1
-    \end{array} \right] = \left[ \begin{array}{ccc}
-      0 & \frac14\pi & 0 \\ 
-      -\frac14\pi & 0 & 0 \\
-      0 & 0 & 0 
-    \end{array} \right]. \f]
-This corresponds to a rotation of \f$ \frac14\pi \f$ radians around
-the z-axis. This is the inverse of the example used in the
-documentation of \ref matrixbase_exp "exp()".
-
-\include MatrixLogarithm.cpp
-Output: \verbinclude MatrixLogarithm.out
-
-\note \p M has to be a matrix of \c float, \c double, <tt>long
-double</tt>, \c complex<float>, \c complex<double>, or \c complex<long
-double> .
-
-\sa MatrixBase::exp(), MatrixBase::matrixFunction(), 
-    class MatrixLogarithmAtomic, MatrixBase::sqrt().
-
-
-\section matrixbase_pow MatrixBase::pow()
-
-Compute the matrix raised to arbitrary real power.
-
-\code
-const MatrixPowerReturnValue<Derived> MatrixBase<Derived>::pow(RealScalar p) const
-\endcode
-
-\param[in]  M  base of the matrix power, should be a square matrix.
-\param[in]  p  exponent of the matrix power, should be real.
-
-The matrix power \f$ M^p \f$ is defined as \f$ \exp(p \log(M)) \f$,
-where exp denotes the matrix exponential, and log denotes the matrix
-logarithm.
-
-The matrix \f$ M \f$ should meet the conditions to be an argument of
-matrix logarithm. If \p p is not of the real scalar type of \p M, it
-is casted into the real scalar type of \p M.
-
-This function computes the matrix power using the Schur-Pad&eacute;
-algorithm as implemented by class MatrixPower. The exponent is split
-into integral part and fractional part, where the fractional part is
-in the interval \f$ (-1, 1) \f$. The main diagonal and the first
-super-diagonal is directly computed.
-
-Details of the algorithm can be found in: Nicholas J. Higham and
-Lijing Lin, "A Schur-Pad&eacute; algorithm for fractional powers of a
-matrix," <em>SIAM J. %Matrix Anal. Applic.</em>,
-<b>32(3)</b>:1056&ndash;1078, 2011.
-
-Example: The following program checks that
-\f[ \left[ \begin{array}{ccc}
-      \cos1 & -\sin1 & 0 \\
-      \sin1 & \cos1 & 0 \\
-      0 & 0 & 1
-    \end{array} \right]^{\frac14\pi} = \left[ \begin{array}{ccc}
-      \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\
-      \frac12\sqrt2 & \frac12\sqrt2 & 0 \\
-      0 & 0 & 1
-    \end{array} \right]. \f]
-This corresponds to \f$ \frac14\pi \f$ rotations of 1 radian around
-the z-axis.
-
-\include MatrixPower.cpp
-Output: \verbinclude MatrixPower.out
-
-MatrixBase::pow() is user-friendly. However, there are some
-circumstances under which you should use class MatrixPower directly.
-MatrixPower can save the result of Schur decomposition, so it's
-better for computing various powers for the same matrix.
-
-Example:
-\include MatrixPower_optimal.cpp
-Output: \verbinclude MatrixPower_optimal.out
-
-\note \p M has to be a matrix of \c float, \c double, <tt>long
-double</tt>, \c complex<float>, \c complex<double>, or \c complex<long
-double> .
-
-\sa MatrixBase::exp(), MatrixBase::log(), class MatrixPower.
-
-
-\section matrixbase_matrixfunction MatrixBase::matrixFunction()
-
-Compute a matrix function.
-
-\code
-const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::matrixFunction(typename internal::stem_function<typename internal::traits<Derived>::Scalar>::type f) const
-\endcode
-
-\param[in]  M  argument of matrix function, should be a square matrix.
-\param[in]  f  an entire function; \c f(x,n) should compute the n-th
-derivative of f at x.
-\returns  expression representing \p f applied to \p M.
-
-Suppose that \p M is a matrix whose entries have type \c Scalar. 
-Then, the second argument, \p f, should be a function with prototype
-\code 
-ComplexScalar f(ComplexScalar, int) 
-\endcode
-where \c ComplexScalar = \c std::complex<Scalar> if \c Scalar is
-real (e.g., \c float or \c double) and \c ComplexScalar =
-\c Scalar if \c Scalar is complex. The return value of \c f(x,n)
-should be \f$ f^{(n)}(x) \f$, the n-th derivative of f at x.
-
-This routine uses the algorithm described in:
-Philip Davies and Nicholas J. Higham, 
-"A Schur-Parlett algorithm for computing matrix functions", 
-<em>SIAM J. %Matrix Anal. Applic.</em>, <b>25</b>:464&ndash;485, 2003.
-
-The actual work is done by the MatrixFunction class.
-
-Example: The following program checks that
-\f[ \exp \left[ \begin{array}{ccc} 
-      0 & \frac14\pi & 0 \\ 
-      -\frac14\pi & 0 & 0 \\
-      0 & 0 & 0 
-    \end{array} \right] = \left[ \begin{array}{ccc}
-      \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\
-      \frac12\sqrt2 & \frac12\sqrt2 & 0 \\
-      0 & 0 & 1
-    \end{array} \right]. \f]
-This corresponds to a rotation of \f$ \frac14\pi \f$ radians around
-the z-axis. This is the same example as used in the documentation
-of \ref matrixbase_exp "exp()".
-
-\include MatrixFunction.cpp
-Output: \verbinclude MatrixFunction.out
-
-Note that the function \c expfn is defined for complex numbers 
-\c x, even though the matrix \c A is over the reals. Instead of
-\c expfn, we could also have used StdStemFunctions::exp:
-\code
-A.matrixFunction(StdStemFunctions<std::complex<double> >::exp, &B);
-\endcode
-
-
-
-\section matrixbase_sin MatrixBase::sin()
-
-Compute the matrix sine.
-
-\code
-const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::sin() const
-\endcode
-
-\param[in]  M  a square matrix.
-\returns  expression representing \f$ \sin(M) \f$.
-
-This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::sin().
-
-Example: \include MatrixSine.cpp
-Output: \verbinclude MatrixSine.out
-
-
-
-\section matrixbase_sinh MatrixBase::sinh()
-
-Compute the matrix hyperbolic sine.
-
-\code
-MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::sinh() const
-\endcode
-
-\param[in]  M  a square matrix.
-\returns  expression representing \f$ \sinh(M) \f$
-
-This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::sinh().
-
-Example: \include MatrixSinh.cpp
-Output: \verbinclude MatrixSinh.out
-
-
-\section matrixbase_sqrt MatrixBase::sqrt()
-
-Compute the matrix square root.
-
-\code
-const MatrixSquareRootReturnValue<Derived> MatrixBase<Derived>::sqrt() const
-\endcode
-
-\param[in]  M  invertible matrix whose square root is to be computed.
-\returns    expression representing the matrix square root of \p M.
-
-The matrix square root of \f$ M \f$ is the matrix \f$ M^{1/2} \f$
-whose square is the original matrix; so if \f$ S = M^{1/2} \f$ then
-\f$ S^2 = M \f$. 
-
-In the <b>real case</b>, the matrix \f$ M \f$ should be invertible and
-it should have no eigenvalues which are real and negative (pairs of
-complex conjugate eigenvalues are allowed). In that case, the matrix
-has a square root which is also real, and this is the square root
-computed by this function. 
-
-The matrix square root is computed by first reducing the matrix to
-quasi-triangular form with the real Schur decomposition. The square
-root of the quasi-triangular matrix can then be computed directly. The
-cost is approximately \f$ 25 n^3 \f$ real flops for the real Schur
-decomposition and \f$ 3\frac13 n^3 \f$ real flops for the remainder
-(though the computation time in practice is likely more than this
-indicates).
-
-Details of the algorithm can be found in: Nicholas J. Highan,
-"Computing real square roots of a real matrix", <em>Linear Algebra
-Appl.</em>, 88/89:405&ndash;430, 1987.
-
-If the matrix is <b>positive-definite symmetric</b>, then the square
-root is also positive-definite symmetric. In this case, it is best to
-use SelfAdjointEigenSolver::operatorSqrt() to compute it.
-
-In the <b>complex case</b>, the matrix \f$ M \f$ should be invertible;
-this is a restriction of the algorithm. The square root computed by
-this algorithm is the one whose eigenvalues have an argument in the
-interval \f$ (-\frac12\pi, \frac12\pi] \f$. This is the usual branch
-cut.
-
-The computation is the same as in the real case, except that the
-complex Schur decomposition is used to reduce the matrix to a
-triangular matrix. The theoretical cost is the same. Details are in:
-&Aring;ke Bj&ouml;rck and Sven Hammarling, "A Schur method for the
-square root of a matrix", <em>Linear Algebra Appl.</em>,
-52/53:127&ndash;140, 1983.
-
-Example: The following program checks that the square root of
-\f[ \left[ \begin{array}{cc} 
-              \cos(\frac13\pi) & -\sin(\frac13\pi) \\
-              \sin(\frac13\pi) & \cos(\frac13\pi)
-    \end{array} \right], \f]
-corresponding to a rotation over 60 degrees, is a rotation over 30 degrees:
-\f[ \left[ \begin{array}{cc} 
-              \cos(\frac16\pi) & -\sin(\frac16\pi) \\
-              \sin(\frac16\pi) & \cos(\frac16\pi)
-    \end{array} \right]. \f]
-
-\include MatrixSquareRoot.cpp
-Output: \verbinclude MatrixSquareRoot.out
-
-\sa class RealSchur, class ComplexSchur, class MatrixSquareRoot,
-    SelfAdjointEigenSolver::operatorSqrt().
-
-*/
-
-#endif // EIGEN_MATRIX_FUNCTIONS
-
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h b/resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
deleted file mode 100644
index 6825a7882..000000000
--- a/resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
+++ /dev/null
@@ -1,451 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009, 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
-// Copyright (C) 2011 Chen-Pang He <jdh8@ms63.hinet.net>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_MATRIX_EXPONENTIAL
-#define EIGEN_MATRIX_EXPONENTIAL
-
-#include "StemFunction.h"
-
-namespace Eigen {
-
-/** \ingroup MatrixFunctions_Module
-  * \brief Class for computing the matrix exponential.
-  * \tparam MatrixType type of the argument of the exponential,
-  * expected to be an instantiation of the Matrix class template.
-  */
-template <typename MatrixType>
-class MatrixExponential {
-
-  public:
-
-    /** \brief Constructor.
-      * 
-      * The class stores a reference to \p M, so it should not be
-      * changed (or destroyed) before compute() is called.
-      *
-      * \param[in] M  matrix whose exponential is to be computed.
-      */
-    MatrixExponential(const MatrixType &M);
-
-    /** \brief Computes the matrix exponential.
-      *
-      * \param[out] result  the matrix exponential of \p M in the constructor.
-      */
-    template <typename ResultType> 
-    void compute(ResultType &result);
-
-  private:
-
-    // Prevent copying
-    MatrixExponential(const MatrixExponential&);
-    MatrixExponential& operator=(const MatrixExponential&);
-
-    /** \brief Compute the (3,3)-Pad&eacute; approximant to the exponential.
-     *
-     *  After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
-     *  approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
-     *
-     *  \param[in] A   Argument of matrix exponential
-     */
-    void pade3(const MatrixType &A);
-
-    /** \brief Compute the (5,5)-Pad&eacute; approximant to the exponential.
-     *
-     *  After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
-     *  approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
-     *
-     *  \param[in] A   Argument of matrix exponential
-     */
-    void pade5(const MatrixType &A);
-
-    /** \brief Compute the (7,7)-Pad&eacute; approximant to the exponential.
-     *
-     *  After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
-     *  approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
-     *
-     *  \param[in] A   Argument of matrix exponential
-     */
-    void pade7(const MatrixType &A);
-
-    /** \brief Compute the (9,9)-Pad&eacute; approximant to the exponential.
-     *
-     *  After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
-     *  approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
-     *
-     *  \param[in] A   Argument of matrix exponential
-     */
-    void pade9(const MatrixType &A);
-
-    /** \brief Compute the (13,13)-Pad&eacute; approximant to the exponential.
-     *
-     *  After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
-     *  approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
-     *
-     *  \param[in] A   Argument of matrix exponential
-     */
-    void pade13(const MatrixType &A);
-
-    /** \brief Compute the (17,17)-Pad&eacute; approximant to the exponential.
-     *
-     *  After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
-     *  approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
-     *
-     *  This function activates only if your long double is double-double or quadruple.
-     *
-     *  \param[in] A   Argument of matrix exponential
-     */
-    void pade17(const MatrixType &A);
-
-    /** \brief Compute Pad&eacute; approximant to the exponential.
-     *
-     * Computes \c m_U, \c m_V and \c m_squarings such that
-     * \f$ (V+U)(V-U)^{-1} \f$ is a Pad&eacute; of
-     * \f$ \exp(2^{-\mbox{squarings}}M) \f$ around \f$ M = 0 \f$. The
-     * degree of the Pad&eacute; approximant and the value of
-     * squarings are chosen such that the approximation error is no
-     * more than the round-off error.
-     *
-     * The argument of this function should correspond with the (real
-     * part of) the entries of \c m_M.  It is used to select the
-     * correct implementation using overloading.
-     */
-    void computeUV(double);
-
-    /** \brief Compute Pad&eacute; approximant to the exponential.
-     *
-     *  \sa computeUV(double);
-     */
-    void computeUV(float);
-    
-    /** \brief Compute Pad&eacute; approximant to the exponential.
-     *
-     *  \sa computeUV(double);
-     */
-    void computeUV(long double);
-
-    typedef typename internal::traits<MatrixType>::Scalar Scalar;
-    typedef typename NumTraits<Scalar>::Real RealScalar;
-    typedef typename std::complex<RealScalar> ComplexScalar;
-
-    /** \brief Reference to matrix whose exponential is to be computed. */
-    typename internal::nested<MatrixType>::type m_M;
-
-    /** \brief Odd-degree terms in numerator of Pad&eacute; approximant. */
-    MatrixType m_U;
-
-    /** \brief Even-degree terms in numerator of Pad&eacute; approximant. */
-    MatrixType m_V;
-
-    /** \brief Used for temporary storage. */
-    MatrixType m_tmp1;
-
-    /** \brief Used for temporary storage. */
-    MatrixType m_tmp2;
-
-    /** \brief Identity matrix of the same size as \c m_M. */
-    MatrixType m_Id;
-
-    /** \brief Number of squarings required in the last step. */
-    int m_squarings;
-
-    /** \brief L1 norm of m_M. */
-    RealScalar m_l1norm;
-};
-
-template <typename MatrixType>
-MatrixExponential<MatrixType>::MatrixExponential(const MatrixType &M) :
-  m_M(M),
-  m_U(M.rows(),M.cols()),
-  m_V(M.rows(),M.cols()),
-  m_tmp1(M.rows(),M.cols()),
-  m_tmp2(M.rows(),M.cols()),
-  m_Id(MatrixType::Identity(M.rows(), M.cols())),
-  m_squarings(0),
-  m_l1norm(M.cwiseAbs().colwise().sum().maxCoeff())
-{
-  /* empty body */
-}
-
-template <typename MatrixType>
-template <typename ResultType> 
-void MatrixExponential<MatrixType>::compute(ResultType &result)
-{
-#if LDBL_MANT_DIG > 112 // rarely happens
-  if(sizeof(RealScalar) > 14) {
-    result = m_M.matrixFunction(StdStemFunctions<ComplexScalar>::exp);
-    return;
-  }
-#endif
-  computeUV(RealScalar());
-  m_tmp1 = m_U + m_V;   // numerator of Pade approximant
-  m_tmp2 = -m_U + m_V;  // denominator of Pade approximant
-  result = m_tmp2.partialPivLu().solve(m_tmp1);
-  for (int i=0; i<m_squarings; i++)
-    result *= result;   // undo scaling by repeated squaring
-}
-
-template <typename MatrixType>
-EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade3(const MatrixType &A)
-{
-  const RealScalar b[] = {120., 60., 12., 1.};
-  m_tmp1.noalias() = A * A;
-  m_tmp2 = b[3]*m_tmp1 + b[1]*m_Id;
-  m_U.noalias() = A * m_tmp2;
-  m_V = b[2]*m_tmp1 + b[0]*m_Id;
-}
-
-template <typename MatrixType>
-EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade5(const MatrixType &A)
-{
-  const RealScalar b[] = {30240., 15120., 3360., 420., 30., 1.};
-  MatrixType A2 = A * A;
-  m_tmp1.noalias() = A2 * A2;
-  m_tmp2 = b[5]*m_tmp1 + b[3]*A2 + b[1]*m_Id;
-  m_U.noalias() = A * m_tmp2;
-  m_V = b[4]*m_tmp1 + b[2]*A2 + b[0]*m_Id;
-}
-
-template <typename MatrixType>
-EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade7(const MatrixType &A)
-{
-  const RealScalar b[] = {17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.};
-  MatrixType A2 = A * A;
-  MatrixType A4 = A2 * A2;
-  m_tmp1.noalias() = A4 * A2;
-  m_tmp2 = b[7]*m_tmp1 + b[5]*A4 + b[3]*A2 + b[1]*m_Id;
-  m_U.noalias() = A * m_tmp2;
-  m_V = b[6]*m_tmp1 + b[4]*A4 + b[2]*A2 + b[0]*m_Id;
-}
-
-template <typename MatrixType>
-EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade9(const MatrixType &A)
-{
-  const RealScalar b[] = {17643225600., 8821612800., 2075673600., 302702400., 30270240.,
-		      2162160., 110880., 3960., 90., 1.};
-  MatrixType A2 = A * A;
-  MatrixType A4 = A2 * A2;
-  MatrixType A6 = A4 * A2;
-  m_tmp1.noalias() = A6 * A2;
-  m_tmp2 = b[9]*m_tmp1 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*m_Id;
-  m_U.noalias() = A * m_tmp2;
-  m_V = b[8]*m_tmp1 + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*m_Id;
-}
-
-template <typename MatrixType>
-EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade13(const MatrixType &A)
-{
-  const RealScalar b[] = {64764752532480000., 32382376266240000., 7771770303897600.,
-		      1187353796428800., 129060195264000., 10559470521600., 670442572800.,
-		      33522128640., 1323241920., 40840800., 960960., 16380., 182., 1.};
-  MatrixType A2 = A * A;
-  MatrixType A4 = A2 * A2;
-  m_tmp1.noalias() = A4 * A2;
-  m_V = b[13]*m_tmp1 + b[11]*A4 + b[9]*A2; // used for temporary storage
-  m_tmp2.noalias() = m_tmp1 * m_V;
-  m_tmp2 += b[7]*m_tmp1 + b[5]*A4 + b[3]*A2 + b[1]*m_Id;
-  m_U.noalias() = A * m_tmp2;
-  m_tmp2 = b[12]*m_tmp1 + b[10]*A4 + b[8]*A2;
-  m_V.noalias() = m_tmp1 * m_tmp2;
-  m_V += b[6]*m_tmp1 + b[4]*A4 + b[2]*A2 + b[0]*m_Id;
-}
-
-#if LDBL_MANT_DIG > 64
-template <typename MatrixType>
-EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade17(const MatrixType &A)
-{
-  const RealScalar b[] = {830034394580628357120000.L, 415017197290314178560000.L,
-		      100610229646136770560000.L, 15720348382208870400000.L,
-		      1774878043152614400000.L, 153822763739893248000.L, 10608466464820224000.L,
-		      595373117923584000.L, 27563570274240000.L, 1060137318240000.L,
-		      33924394183680.L, 899510451840.L, 19554575040.L, 341863200.L, 4651200.L,
-		      46512.L, 306.L, 1.L};
-  MatrixType A2 = A * A;
-  MatrixType A4 = A2 * A2;
-  MatrixType A6 = A4 * A2;
-  m_tmp1.noalias() = A4 * A4;
-  m_V = b[17]*m_tmp1 + b[15]*A6 + b[13]*A4 + b[11]*A2; // used for temporary storage
-  m_tmp2.noalias() = m_tmp1 * m_V;
-  m_tmp2 += b[9]*m_tmp1 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*m_Id;
-  m_U.noalias() = A * m_tmp2;
-  m_tmp2 = b[16]*m_tmp1 + b[14]*A6 + b[12]*A4 + b[10]*A2;
-  m_V.noalias() = m_tmp1 * m_tmp2;
-  m_V += b[8]*m_tmp1 + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*m_Id;
-}
-#endif
-
-template <typename MatrixType>
-void MatrixExponential<MatrixType>::computeUV(float)
-{
-  using std::frexp;
-  using std::pow;
-  if (m_l1norm < 4.258730016922831e-001) {
-    pade3(m_M);
-  } else if (m_l1norm < 1.880152677804762e+000) {
-    pade5(m_M);
-  } else {
-    const float maxnorm = 3.925724783138660f;
-    frexp(m_l1norm / maxnorm, &m_squarings);
-    if (m_squarings < 0) m_squarings = 0;
-    MatrixType A = m_M / pow(Scalar(2), m_squarings);
-    pade7(A);
-  }
-}
-
-template <typename MatrixType>
-void MatrixExponential<MatrixType>::computeUV(double)
-{
-  using std::frexp;
-  using std::pow;
-  if (m_l1norm < 1.495585217958292e-002) {
-    pade3(m_M);
-  } else if (m_l1norm < 2.539398330063230e-001) {
-    pade5(m_M);
-  } else if (m_l1norm < 9.504178996162932e-001) {
-    pade7(m_M);
-  } else if (m_l1norm < 2.097847961257068e+000) {
-    pade9(m_M);
-  } else {
-    const double maxnorm = 5.371920351148152;
-    frexp(m_l1norm / maxnorm, &m_squarings);
-    if (m_squarings < 0) m_squarings = 0;
-    MatrixType A = m_M / pow(Scalar(2), m_squarings);
-    pade13(A);
-  }
-}
-
-template <typename MatrixType>
-void MatrixExponential<MatrixType>::computeUV(long double)
-{
-  using std::frexp;
-  using std::pow;
-#if   LDBL_MANT_DIG == 53   // double precision
-  computeUV(double());
-#elif LDBL_MANT_DIG <= 64   // extended precision
-  if (m_l1norm < 4.1968497232266989671e-003L) {
-    pade3(m_M);
-  } else if (m_l1norm < 1.1848116734693823091e-001L) {
-    pade5(m_M);
-  } else if (m_l1norm < 5.5170388480686700274e-001L) {
-    pade7(m_M);
-  } else if (m_l1norm < 1.3759868875587845383e+000L) {
-    pade9(m_M);
-  } else {
-    const long double maxnorm = 4.0246098906697353063L;
-    frexp(m_l1norm / maxnorm, &m_squarings);
-    if (m_squarings < 0) m_squarings = 0;
-    MatrixType A = m_M / pow(Scalar(2), m_squarings);
-    pade13(A);
-  }
-#elif LDBL_MANT_DIG <= 106  // double-double
-  if (m_l1norm < 3.2787892205607026992947488108213e-005L) {
-    pade3(m_M);
-  } else if (m_l1norm < 6.4467025060072760084130906076332e-003L) {
-    pade5(m_M);
-  } else if (m_l1norm < 6.8988028496595374751374122881143e-002L) {
-    pade7(m_M);
-  } else if (m_l1norm < 2.7339737518502231741495857201670e-001L) {
-    pade9(m_M);
-  } else if (m_l1norm < 1.3203382096514474905666448850278e+000L) {
-    pade13(m_M);
-  } else {
-    const long double maxnorm = 3.2579440895405400856599663723517L;
-    frexp(m_l1norm / maxnorm, &m_squarings);
-    if (m_squarings < 0) m_squarings = 0;
-    MatrixType A = m_M / pow(Scalar(2), m_squarings);
-    pade17(A);
-  }
-#elif LDBL_MANT_DIG <= 112  // quadruple precison
-  if (m_l1norm < 1.639394610288918690547467954466970e-005L) {
-    pade3(m_M);
-  } else if (m_l1norm < 4.253237712165275566025884344433009e-003L) {
-    pade5(m_M);
-  } else if (m_l1norm < 5.125804063165764409885122032933142e-002L) {
-    pade7(m_M);
-  } else if (m_l1norm < 2.170000765161155195453205651889853e-001L) {
-    pade9(m_M);
-  } else if (m_l1norm < 1.125358383453143065081397882891878e+000L) {
-    pade13(m_M);
-  } else {
-    const long double maxnorm = 2.884233277829519311757165057717815L;
-    frexp(m_l1norm / maxnorm, &m_squarings);
-    if (m_squarings < 0) m_squarings = 0;
-    MatrixType A = m_M / pow(Scalar(2), m_squarings);
-    pade17(A);
-  }
-#else
-  // this case should be handled in compute()
-  eigen_assert(false && "Bug in MatrixExponential"); 
-#endif  // LDBL_MANT_DIG
-}
-
-/** \ingroup MatrixFunctions_Module
-  *
-  * \brief Proxy for the matrix exponential of some matrix (expression).
-  *
-  * \tparam Derived  Type of the argument to the matrix exponential.
-  *
-  * This class holds the argument to the matrix exponential until it
-  * is assigned or evaluated for some other reason (so the argument
-  * should not be changed in the meantime). It is the return type of
-  * MatrixBase::exp() and most of the time this is the only way it is
-  * used.
-  */
-template<typename Derived> struct MatrixExponentialReturnValue
-: public ReturnByValue<MatrixExponentialReturnValue<Derived> >
-{
-    typedef typename Derived::Index Index;
-  public:
-    /** \brief Constructor.
-      *
-      * \param[in] src %Matrix (expression) forming the argument of the
-      * matrix exponential.
-      */
-    MatrixExponentialReturnValue(const Derived& src) : m_src(src) { }
-
-    /** \brief Compute the matrix exponential.
-      *
-      * \param[out] result the matrix exponential of \p src in the
-      * constructor.
-      */
-    template <typename ResultType>
-    inline void evalTo(ResultType& result) const
-    {
-      const typename Derived::PlainObject srcEvaluated = m_src.eval();
-      MatrixExponential<typename Derived::PlainObject> me(srcEvaluated);
-      me.compute(result);
-    }
-
-    Index rows() const { return m_src.rows(); }
-    Index cols() const { return m_src.cols(); }
-
-  protected:
-    const Derived& m_src;
-  private:
-    MatrixExponentialReturnValue& operator=(const MatrixExponentialReturnValue&);
-};
-
-namespace internal {
-template<typename Derived>
-struct traits<MatrixExponentialReturnValue<Derived> >
-{
-  typedef typename Derived::PlainObject ReturnType;
-};
-}
-
-template <typename Derived>
-const MatrixExponentialReturnValue<Derived> MatrixBase<Derived>::exp() const
-{
-  eigen_assert(rows() == cols());
-  return MatrixExponentialReturnValue<Derived>(derived());
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_MATRIX_EXPONENTIAL
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h b/resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
deleted file mode 100644
index e87a28f6c..000000000
--- a/resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
+++ /dev/null
@@ -1,590 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009-2011 Jitse Niesen <jitse@maths.leeds.ac.uk>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_MATRIX_FUNCTION
-#define EIGEN_MATRIX_FUNCTION
-
-#include "StemFunction.h"
-#include "MatrixFunctionAtomic.h"
-
-
-namespace Eigen { 
-
-/** \ingroup MatrixFunctions_Module
-  * \brief Class for computing matrix functions.
-  * \tparam  MatrixType  type of the argument of the matrix function,
-  *                      expected to be an instantiation of the Matrix class template.
-  * \tparam  AtomicType  type for computing matrix function of atomic blocks.
-  * \tparam  IsComplex   used internally to select correct specialization.
-  *
-  * This class implements the Schur-Parlett algorithm for computing matrix functions. The spectrum of the
-  * matrix is divided in clustered of eigenvalues that lies close together. This class delegates the
-  * computation of the matrix function on every block corresponding to these clusters to an object of type
-  * \p AtomicType and uses these results to compute the matrix function of the whole matrix. The class
-  * \p AtomicType should have a \p compute() member function for computing the matrix function of a block.
-  *
-  * \sa class MatrixFunctionAtomic, class MatrixLogarithmAtomic
-  */
-template <typename MatrixType, 
-	  typename AtomicType,  
-          int IsComplex = NumTraits<typename internal::traits<MatrixType>::Scalar>::IsComplex>
-class MatrixFunction
-{  
-  public:
-
-    /** \brief Constructor. 
-      *
-      * \param[in]  A       argument of matrix function, should be a square matrix.
-      * \param[in]  atomic  class for computing matrix function of atomic blocks.
-      *
-      * The class stores references to \p A and \p atomic, so they should not be
-      * changed (or destroyed) before compute() is called.
-      */
-    MatrixFunction(const MatrixType& A, AtomicType& atomic);
-
-    /** \brief Compute the matrix function.
-      *
-      * \param[out] result  the function \p f applied to \p A, as
-      * specified in the constructor.
-      *
-      * See MatrixBase::matrixFunction() for details on how this computation
-      * is implemented.
-      */
-    template <typename ResultType> 
-    void compute(ResultType &result);    
-};
-
-
-/** \internal \ingroup MatrixFunctions_Module 
-  * \brief Partial specialization of MatrixFunction for real matrices
-  */
-template <typename MatrixType, typename AtomicType>
-class MatrixFunction<MatrixType, AtomicType, 0>
-{  
-  private:
-
-    typedef internal::traits<MatrixType> Traits;
-    typedef typename Traits::Scalar Scalar;
-    static const int Rows = Traits::RowsAtCompileTime;
-    static const int Cols = Traits::ColsAtCompileTime;
-    static const int Options = MatrixType::Options;
-    static const int MaxRows = Traits::MaxRowsAtCompileTime;
-    static const int MaxCols = Traits::MaxColsAtCompileTime;
-
-    typedef std::complex<Scalar> ComplexScalar;
-    typedef Matrix<ComplexScalar, Rows, Cols, Options, MaxRows, MaxCols> ComplexMatrix;
-
-  public:
-
-    /** \brief Constructor. 
-      *
-      * \param[in]  A       argument of matrix function, should be a square matrix.
-      * \param[in]  atomic  class for computing matrix function of atomic blocks.
-      */
-    MatrixFunction(const MatrixType& A, AtomicType& atomic) : m_A(A), m_atomic(atomic) { }
-
-    /** \brief Compute the matrix function.
-      *
-      * \param[out] result  the function \p f applied to \p A, as
-      * specified in the constructor.
-      *
-      * This function converts the real matrix \c A to a complex matrix,
-      * uses MatrixFunction<MatrixType,1> and then converts the result back to
-      * a real matrix.
-      */
-    template <typename ResultType>
-    void compute(ResultType& result) 
-    {
-      ComplexMatrix CA = m_A.template cast<ComplexScalar>();
-      ComplexMatrix Cresult;
-      MatrixFunction<ComplexMatrix, AtomicType> mf(CA, m_atomic);
-      mf.compute(Cresult);
-      result = Cresult.real();
-    }
-
-  private:
-    typename internal::nested<MatrixType>::type m_A; /**< \brief Reference to argument of matrix function. */
-    AtomicType& m_atomic; /**< \brief Class for computing matrix function of atomic blocks. */
-
-    MatrixFunction& operator=(const MatrixFunction&);
-};
-
-      
-/** \internal \ingroup MatrixFunctions_Module 
-  * \brief Partial specialization of MatrixFunction for complex matrices
-  */
-template <typename MatrixType, typename AtomicType>
-class MatrixFunction<MatrixType, AtomicType, 1>
-{
-  private:
-
-    typedef internal::traits<MatrixType> Traits;
-    typedef typename MatrixType::Scalar Scalar;
-    typedef typename MatrixType::Index Index;
-    static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
-    static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
-    static const int Options = MatrixType::Options;
-    typedef typename NumTraits<Scalar>::Real RealScalar;
-    typedef Matrix<Scalar, Traits::RowsAtCompileTime, 1> VectorType;
-    typedef Matrix<Index, Traits::RowsAtCompileTime, 1> IntVectorType;
-    typedef Matrix<Index, Dynamic, 1> DynamicIntVectorType;
-    typedef std::list<Scalar> Cluster;
-    typedef std::list<Cluster> ListOfClusters;
-    typedef Matrix<Scalar, Dynamic, Dynamic, Options, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
-
-  public:
-
-    MatrixFunction(const MatrixType& A, AtomicType& atomic);
-    template <typename ResultType> void compute(ResultType& result);
-
-  private:
-
-    void computeSchurDecomposition();
-    void partitionEigenvalues();
-    typename ListOfClusters::iterator findCluster(Scalar key);
-    void computeClusterSize();
-    void computeBlockStart();
-    void constructPermutation();
-    void permuteSchur();
-    void swapEntriesInSchur(Index index);
-    void computeBlockAtomic();
-    Block<MatrixType> block(MatrixType& A, Index i, Index j);
-    void computeOffDiagonal();
-    DynMatrixType solveTriangularSylvester(const DynMatrixType& A, const DynMatrixType& B, const DynMatrixType& C);
-
-    typename internal::nested<MatrixType>::type m_A; /**< \brief Reference to argument of matrix function. */
-    AtomicType& m_atomic; /**< \brief Class for computing matrix function of atomic blocks. */
-    MatrixType m_T; /**< \brief Triangular part of Schur decomposition */
-    MatrixType m_U; /**< \brief Unitary part of Schur decomposition */
-    MatrixType m_fT; /**< \brief %Matrix function applied to #m_T */
-    ListOfClusters m_clusters; /**< \brief Partition of eigenvalues into clusters of ei'vals "close" to each other */
-    DynamicIntVectorType m_eivalToCluster; /**< \brief m_eivalToCluster[i] = j means i-th ei'val is in j-th cluster */
-    DynamicIntVectorType m_clusterSize; /**< \brief Number of eigenvalues in each clusters  */
-    DynamicIntVectorType m_blockStart; /**< \brief Row index at which block corresponding to i-th cluster starts */
-    IntVectorType m_permutation; /**< \brief Permutation which groups ei'vals in the same cluster together */
-
-    /** \brief Maximum distance allowed between eigenvalues to be considered "close".
-      *
-      * This is morally a \c static \c const \c Scalar, but only
-      * integers can be static constant class members in C++. The
-      * separation constant is set to 0.1, a value taken from the
-      * paper by Davies and Higham. */
-    static const RealScalar separation() { return static_cast<RealScalar>(0.1); }
-
-    MatrixFunction& operator=(const MatrixFunction&);
-};
-
-/** \brief Constructor. 
- *
- * \param[in]  A       argument of matrix function, should be a square matrix.
- * \param[in]  atomic  class for computing matrix function of atomic blocks.
- */
-template <typename MatrixType, typename AtomicType>
-MatrixFunction<MatrixType,AtomicType,1>::MatrixFunction(const MatrixType& A, AtomicType& atomic)
-  : m_A(A), m_atomic(atomic)
-{
-  /* empty body */
-}
-
-/** \brief Compute the matrix function.
-  *
-  * \param[out] result  the function \p f applied to \p A, as
-  * specified in the constructor.
-  */
-template <typename MatrixType, typename AtomicType>
-template <typename ResultType>
-void MatrixFunction<MatrixType,AtomicType,1>::compute(ResultType& result) 
-{
-  computeSchurDecomposition();
-  partitionEigenvalues();
-  computeClusterSize();
-  computeBlockStart();
-  constructPermutation();
-  permuteSchur();
-  computeBlockAtomic();
-  computeOffDiagonal();
-  result = m_U * (m_fT.template triangularView<Upper>() * m_U.adjoint());
-}
-
-/** \brief Store the Schur decomposition of #m_A in #m_T and #m_U */
-template <typename MatrixType, typename AtomicType>
-void MatrixFunction<MatrixType,AtomicType,1>::computeSchurDecomposition()
-{
-  const ComplexSchur<MatrixType> schurOfA(m_A);  
-  m_T = schurOfA.matrixT();
-  m_U = schurOfA.matrixU();
-}
-
-/** \brief Partition eigenvalues in clusters of ei'vals close to each other
-  * 
-  * This function computes #m_clusters. This is a partition of the
-  * eigenvalues of #m_T in clusters, such that
-  * # Any eigenvalue in a certain cluster is at most separation() away
-  *   from another eigenvalue in the same cluster.
-  * # The distance between two eigenvalues in different clusters is
-  *   more than separation().
-  * The implementation follows Algorithm 4.1 in the paper of Davies
-  * and Higham. 
-  */
-template <typename MatrixType, typename AtomicType>
-void MatrixFunction<MatrixType,AtomicType,1>::partitionEigenvalues()
-{
-  const Index rows = m_T.rows();
-  VectorType diag = m_T.diagonal(); // contains eigenvalues of A
-
-  for (Index i=0; i<rows; ++i) {
-    // Find set containing diag(i), adding a new set if necessary
-    typename ListOfClusters::iterator qi = findCluster(diag(i));
-    if (qi == m_clusters.end()) {
-      Cluster l;
-      l.push_back(diag(i));
-      m_clusters.push_back(l);
-      qi = m_clusters.end();
-      --qi;
-    }
-
-    // Look for other element to add to the set
-    for (Index j=i+1; j<rows; ++j) {
-      if (internal::abs(diag(j) - diag(i)) <= separation() && std::find(qi->begin(), qi->end(), diag(j)) == qi->end()) {
-	typename ListOfClusters::iterator qj = findCluster(diag(j));
-	if (qj == m_clusters.end()) {
-	  qi->push_back(diag(j));
-	} else {
-	  qi->insert(qi->end(), qj->begin(), qj->end());
-	  m_clusters.erase(qj);
-	}
-      }
-    }
-  }
-}
-
-/** \brief Find cluster in #m_clusters containing some value 
-  * \param[in] key Value to find
-  * \returns Iterator to cluster containing \c key, or
-  * \c m_clusters.end() if no cluster in m_clusters contains \c key.
-  */
-template <typename MatrixType, typename AtomicType>
-typename MatrixFunction<MatrixType,AtomicType,1>::ListOfClusters::iterator MatrixFunction<MatrixType,AtomicType,1>::findCluster(Scalar key)
-{
-  typename Cluster::iterator j;
-  for (typename ListOfClusters::iterator i = m_clusters.begin(); i != m_clusters.end(); ++i) {
-    j = std::find(i->begin(), i->end(), key);
-    if (j != i->end())
-      return i;
-  }
-  return m_clusters.end();
-}
-
-/** \brief Compute #m_clusterSize and #m_eivalToCluster using #m_clusters */
-template <typename MatrixType, typename AtomicType>
-void MatrixFunction<MatrixType,AtomicType,1>::computeClusterSize()
-{
-  const Index rows = m_T.rows();
-  VectorType diag = m_T.diagonal(); 
-  const Index numClusters = static_cast<Index>(m_clusters.size());
-
-  m_clusterSize.setZero(numClusters);
-  m_eivalToCluster.resize(rows);
-  Index clusterIndex = 0;
-  for (typename ListOfClusters::const_iterator cluster = m_clusters.begin(); cluster != m_clusters.end(); ++cluster) {
-    for (Index i = 0; i < diag.rows(); ++i) {
-      if (std::find(cluster->begin(), cluster->end(), diag(i)) != cluster->end()) {
-        ++m_clusterSize[clusterIndex];
-        m_eivalToCluster[i] = clusterIndex;
-      }
-    }
-    ++clusterIndex;
-  }
-}
-
-/** \brief Compute #m_blockStart using #m_clusterSize */
-template <typename MatrixType, typename AtomicType>
-void MatrixFunction<MatrixType,AtomicType,1>::computeBlockStart()
-{
-  m_blockStart.resize(m_clusterSize.rows());
-  m_blockStart(0) = 0;
-  for (Index i = 1; i < m_clusterSize.rows(); i++) {
-    m_blockStart(i) = m_blockStart(i-1) + m_clusterSize(i-1);
-  }
-}
-
-/** \brief Compute #m_permutation using #m_eivalToCluster and #m_blockStart */
-template <typename MatrixType, typename AtomicType>
-void MatrixFunction<MatrixType,AtomicType,1>::constructPermutation()
-{
-  DynamicIntVectorType indexNextEntry = m_blockStart;
-  m_permutation.resize(m_T.rows());
-  for (Index i = 0; i < m_T.rows(); i++) {
-    Index cluster = m_eivalToCluster[i];
-    m_permutation[i] = indexNextEntry[cluster];
-    ++indexNextEntry[cluster];
-  }
-}  
-
-/** \brief Permute Schur decomposition in #m_U and #m_T according to #m_permutation */
-template <typename MatrixType, typename AtomicType>
-void MatrixFunction<MatrixType,AtomicType,1>::permuteSchur()
-{
-  IntVectorType p = m_permutation;
-  for (Index i = 0; i < p.rows() - 1; i++) {
-    Index j;
-    for (j = i; j < p.rows(); j++) {
-      if (p(j) == i) break;
-    }
-    eigen_assert(p(j) == i);
-    for (Index k = j-1; k >= i; k--) {
-      swapEntriesInSchur(k);
-      std::swap(p.coeffRef(k), p.coeffRef(k+1));
-    }
-  }
-}
-
-/** \brief Swap rows \a index and \a index+1 in Schur decomposition in #m_U and #m_T */
-template <typename MatrixType, typename AtomicType>
-void MatrixFunction<MatrixType,AtomicType,1>::swapEntriesInSchur(Index index)
-{
-  JacobiRotation<Scalar> rotation;
-  rotation.makeGivens(m_T(index, index+1), m_T(index+1, index+1) - m_T(index, index));
-  m_T.applyOnTheLeft(index, index+1, rotation.adjoint());
-  m_T.applyOnTheRight(index, index+1, rotation);
-  m_U.applyOnTheRight(index, index+1, rotation);
-}  
-
-/** \brief Compute block diagonal part of #m_fT.
-  *
-  * This routine computes the matrix function applied to the block diagonal part of #m_T, with the blocking
-  * given by #m_blockStart. The matrix function of each diagonal block is computed by #m_atomic. The
-  * off-diagonal parts of #m_fT are set to zero.
-  */
-template <typename MatrixType, typename AtomicType>
-void MatrixFunction<MatrixType,AtomicType,1>::computeBlockAtomic()
-{ 
-  m_fT.resize(m_T.rows(), m_T.cols());
-  m_fT.setZero();
-  for (Index i = 0; i < m_clusterSize.rows(); ++i) {
-    block(m_fT, i, i) = m_atomic.compute(block(m_T, i, i));
-  }
-}
-
-/** \brief Return block of matrix according to blocking given by #m_blockStart */
-template <typename MatrixType, typename AtomicType>
-Block<MatrixType> MatrixFunction<MatrixType,AtomicType,1>::block(MatrixType& A, Index i, Index j)
-{
-  return A.block(m_blockStart(i), m_blockStart(j), m_clusterSize(i), m_clusterSize(j));
-}
-
-/** \brief Compute part of #m_fT above block diagonal.
-  *
-  * This routine assumes that the block diagonal part of #m_fT (which
-  * equals the matrix function applied to #m_T) has already been computed and computes
-  * the part above the block diagonal. The part below the diagonal is
-  * zero, because #m_T is upper triangular.
-  */
-template <typename MatrixType, typename AtomicType>
-void MatrixFunction<MatrixType,AtomicType,1>::computeOffDiagonal()
-{ 
-  for (Index diagIndex = 1; diagIndex < m_clusterSize.rows(); diagIndex++) {
-    for (Index blockIndex = 0; blockIndex < m_clusterSize.rows() - diagIndex; blockIndex++) {
-      // compute (blockIndex, blockIndex+diagIndex) block
-      DynMatrixType A = block(m_T, blockIndex, blockIndex);
-      DynMatrixType B = -block(m_T, blockIndex+diagIndex, blockIndex+diagIndex);
-      DynMatrixType C = block(m_fT, blockIndex, blockIndex) * block(m_T, blockIndex, blockIndex+diagIndex);
-      C -= block(m_T, blockIndex, blockIndex+diagIndex) * block(m_fT, blockIndex+diagIndex, blockIndex+diagIndex);
-      for (Index k = blockIndex + 1; k < blockIndex + diagIndex; k++) {
-	C += block(m_fT, blockIndex, k) * block(m_T, k, blockIndex+diagIndex);
-	C -= block(m_T, blockIndex, k) * block(m_fT, k, blockIndex+diagIndex);
-      }
-      block(m_fT, blockIndex, blockIndex+diagIndex) = solveTriangularSylvester(A, B, C);
-    }
-  }
-}
-
-/** \brief Solve a triangular Sylvester equation AX + XB = C 
-  *
-  * \param[in]  A  the matrix A; should be square and upper triangular
-  * \param[in]  B  the matrix B; should be square and upper triangular
-  * \param[in]  C  the matrix C; should have correct size.
-  *
-  * \returns the solution X.
-  *
-  * If A is m-by-m and B is n-by-n, then both C and X are m-by-n. 
-  * The (i,j)-th component of the Sylvester equation is
-  * \f[ 
-  *     \sum_{k=i}^m A_{ik} X_{kj} + \sum_{k=1}^j X_{ik} B_{kj} = C_{ij}. 
-  * \f]
-  * This can be re-arranged to yield:
-  * \f[ 
-  *     X_{ij} = \frac{1}{A_{ii} + B_{jj}} \Bigl( C_{ij}
-  *     - \sum_{k=i+1}^m A_{ik} X_{kj} - \sum_{k=1}^{j-1} X_{ik} B_{kj} \Bigr).
-  * \f]
-  * It is assumed that A and B are such that the numerator is never
-  * zero (otherwise the Sylvester equation does not have a unique
-  * solution). In that case, these equations can be evaluated in the
-  * order \f$ i=m,\ldots,1 \f$ and \f$ j=1,\ldots,n \f$.
-  */
-template <typename MatrixType, typename AtomicType>
-typename MatrixFunction<MatrixType,AtomicType,1>::DynMatrixType MatrixFunction<MatrixType,AtomicType,1>::solveTriangularSylvester(
-  const DynMatrixType& A, 
-  const DynMatrixType& B, 
-  const DynMatrixType& C)
-{
-  eigen_assert(A.rows() == A.cols());
-  eigen_assert(A.isUpperTriangular());
-  eigen_assert(B.rows() == B.cols());
-  eigen_assert(B.isUpperTriangular());
-  eigen_assert(C.rows() == A.rows());
-  eigen_assert(C.cols() == B.rows());
-
-  Index m = A.rows();
-  Index n = B.rows();
-  DynMatrixType X(m, n);
-
-  for (Index i = m - 1; i >= 0; --i) {
-    for (Index j = 0; j < n; ++j) {
-
-      // Compute AX = \sum_{k=i+1}^m A_{ik} X_{kj}
-      Scalar AX;
-      if (i == m - 1) {
-	AX = 0; 
-      } else {
-	Matrix<Scalar,1,1> AXmatrix = A.row(i).tail(m-1-i) * X.col(j).tail(m-1-i);
-	AX = AXmatrix(0,0);
-      }
-
-      // Compute XB = \sum_{k=1}^{j-1} X_{ik} B_{kj}
-      Scalar XB;
-      if (j == 0) {
-	XB = 0; 
-      } else {
-	Matrix<Scalar,1,1> XBmatrix = X.row(i).head(j) * B.col(j).head(j);
-	XB = XBmatrix(0,0);
-      }
-
-      X(i,j) = (C(i,j) - AX - XB) / (A(i,i) + B(j,j));
-    }
-  }
-  return X;
-}
-
-/** \ingroup MatrixFunctions_Module
-  *
-  * \brief Proxy for the matrix function of some matrix (expression).
-  *
-  * \tparam Derived  Type of the argument to the matrix function.
-  *
-  * This class holds the argument to the matrix function until it is
-  * assigned or evaluated for some other reason (so the argument
-  * should not be changed in the meantime). It is the return type of
-  * matrixBase::matrixFunction() and related functions and most of the
-  * time this is the only way it is used.
-  */
-template<typename Derived> class MatrixFunctionReturnValue
-: public ReturnByValue<MatrixFunctionReturnValue<Derived> >
-{
-  public:
-
-    typedef typename Derived::Scalar Scalar;
-    typedef typename Derived::Index Index;
-    typedef typename internal::stem_function<Scalar>::type StemFunction;
-
-   /** \brief Constructor.
-      *
-      * \param[in] A  %Matrix (expression) forming the argument of the
-      * matrix function.
-      * \param[in] f  Stem function for matrix function under consideration.
-      */
-    MatrixFunctionReturnValue(const Derived& A, StemFunction f) : m_A(A), m_f(f) { }
-
-    /** \brief Compute the matrix function.
-      *
-      * \param[out] result \p f applied to \p A, where \p f and \p A
-      * are as in the constructor.
-      */
-    template <typename ResultType>
-    inline void evalTo(ResultType& result) const
-    {
-      typedef typename Derived::PlainObject PlainObject;
-      typedef internal::traits<PlainObject> Traits;
-      static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
-      static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
-      static const int Options = PlainObject::Options;
-      typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
-      typedef Matrix<ComplexScalar, Dynamic, Dynamic, Options, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
-      typedef MatrixFunctionAtomic<DynMatrixType> AtomicType;
-      AtomicType atomic(m_f);
-
-      const PlainObject Aevaluated = m_A.eval();
-      MatrixFunction<PlainObject, AtomicType> mf(Aevaluated, atomic);
-      mf.compute(result);
-    }
-
-    Index rows() const { return m_A.rows(); }
-    Index cols() const { return m_A.cols(); }
-
-  private:
-    typename internal::nested<Derived>::type m_A;
-    StemFunction *m_f;
-
-    MatrixFunctionReturnValue& operator=(const MatrixFunctionReturnValue&);
-};
-
-namespace internal {
-template<typename Derived>
-struct traits<MatrixFunctionReturnValue<Derived> >
-{
-  typedef typename Derived::PlainObject ReturnType;
-};
-}
-
-
-/********** MatrixBase methods **********/
-
-
-template <typename Derived>
-const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::matrixFunction(typename internal::stem_function<typename internal::traits<Derived>::Scalar>::type f) const
-{
-  eigen_assert(rows() == cols());
-  return MatrixFunctionReturnValue<Derived>(derived(), f);
-}
-
-template <typename Derived>
-const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::sin() const
-{
-  eigen_assert(rows() == cols());
-  typedef typename internal::stem_function<Scalar>::ComplexScalar ComplexScalar;
-  return MatrixFunctionReturnValue<Derived>(derived(), StdStemFunctions<ComplexScalar>::sin);
-}
-
-template <typename Derived>
-const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::cos() const
-{
-  eigen_assert(rows() == cols());
-  typedef typename internal::stem_function<Scalar>::ComplexScalar ComplexScalar;
-  return MatrixFunctionReturnValue<Derived>(derived(), StdStemFunctions<ComplexScalar>::cos);
-}
-
-template <typename Derived>
-const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::sinh() const
-{
-  eigen_assert(rows() == cols());
-  typedef typename internal::stem_function<Scalar>::ComplexScalar ComplexScalar;
-  return MatrixFunctionReturnValue<Derived>(derived(), StdStemFunctions<ComplexScalar>::sinh);
-}
-
-template <typename Derived>
-const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::cosh() const
-{
-  eigen_assert(rows() == cols());
-  typedef typename internal::stem_function<Scalar>::ComplexScalar ComplexScalar;
-  return MatrixFunctionReturnValue<Derived>(derived(), StdStemFunctions<ComplexScalar>::cosh);
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_MATRIX_FUNCTION
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h b/resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
deleted file mode 100644
index e1e5b770c..000000000
--- a/resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
+++ /dev/null
@@ -1,486 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2011 Jitse Niesen <jitse@maths.leeds.ac.uk>
-// Copyright (C) 2011 Chen-Pang He <jdh8@ms63.hinet.net>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_MATRIX_LOGARITHM
-#define EIGEN_MATRIX_LOGARITHM
-
-#ifndef M_PI
-#define M_PI 3.141592653589793238462643383279503L
-#endif
-
-namespace Eigen { 
-
-/** \ingroup MatrixFunctions_Module
-  * \class MatrixLogarithmAtomic
-  * \brief Helper class for computing matrix logarithm of atomic matrices.
-  *
-  * \internal
-  * Here, an atomic matrix is a triangular matrix whose diagonal
-  * entries are close to each other.
-  *
-  * \sa class MatrixFunctionAtomic, MatrixBase::log()
-  */
-template <typename MatrixType>
-class MatrixLogarithmAtomic
-{
-public:
-
-  typedef typename MatrixType::Scalar Scalar;
-  // typedef typename MatrixType::Index Index;
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  // typedef typename internal::stem_function<Scalar>::type StemFunction;
-  // typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
-
-  /** \brief Constructor. */
-  MatrixLogarithmAtomic() { }
-
-  /** \brief Compute matrix logarithm of atomic matrix
-    * \param[in]  A  argument of matrix logarithm, should be upper triangular and atomic
-    * \returns  The logarithm of \p A.
-    */
-  MatrixType compute(const MatrixType& A);
-
-private:
-
-  void compute2x2(const MatrixType& A, MatrixType& result);
-  void computeBig(const MatrixType& A, MatrixType& result);
-  int getPadeDegree(float normTminusI);
-  int getPadeDegree(double normTminusI);
-  int getPadeDegree(long double normTminusI);
-  void computePade(MatrixType& result, const MatrixType& T, int degree);
-  void computePade3(MatrixType& result, const MatrixType& T);
-  void computePade4(MatrixType& result, const MatrixType& T);
-  void computePade5(MatrixType& result, const MatrixType& T);
-  void computePade6(MatrixType& result, const MatrixType& T);
-  void computePade7(MatrixType& result, const MatrixType& T);
-  void computePade8(MatrixType& result, const MatrixType& T);
-  void computePade9(MatrixType& result, const MatrixType& T);
-  void computePade10(MatrixType& result, const MatrixType& T);
-  void computePade11(MatrixType& result, const MatrixType& T);
-
-  static const int minPadeDegree = 3;
-  static const int maxPadeDegree = std::numeric_limits<RealScalar>::digits<= 24?  5:  // single precision
-                                   std::numeric_limits<RealScalar>::digits<= 53?  7:  // double precision
-                                   std::numeric_limits<RealScalar>::digits<= 64?  8:  // extended precision
-                                   std::numeric_limits<RealScalar>::digits<=106? 10:  // double-double
-                                                                                 11;  // quadruple precision
-
-  // Prevent copying
-  MatrixLogarithmAtomic(const MatrixLogarithmAtomic&);
-  MatrixLogarithmAtomic& operator=(const MatrixLogarithmAtomic&);
-};
-
-/** \brief Compute logarithm of triangular matrix with clustered eigenvalues. */
-template <typename MatrixType>
-MatrixType MatrixLogarithmAtomic<MatrixType>::compute(const MatrixType& A)
-{
-  using std::log;
-  MatrixType result(A.rows(), A.rows());
-  if (A.rows() == 1)
-    result(0,0) = log(A(0,0));
-  else if (A.rows() == 2)
-    compute2x2(A, result);
-  else
-    computeBig(A, result);
-  return result;
-}
-
-/** \brief Compute logarithm of 2x2 triangular matrix. */
-template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::compute2x2(const MatrixType& A, MatrixType& result)
-{
-  using std::abs;
-  using std::ceil;
-  using std::imag;
-  using std::log;
-
-  Scalar logA00 = log(A(0,0));
-  Scalar logA11 = log(A(1,1));
-
-  result(0,0) = logA00;
-  result(1,0) = Scalar(0);
-  result(1,1) = logA11;
-
-  if (A(0,0) == A(1,1)) {
-    result(0,1) = A(0,1) / A(0,0);
-  } else if ((abs(A(0,0)) < 0.5*abs(A(1,1))) || (abs(A(0,0)) > 2*abs(A(1,1)))) {
-    result(0,1) = A(0,1) * (logA11 - logA00) / (A(1,1) - A(0,0));
-  } else {
-    // computation in previous branch is inaccurate if A(1,1) \approx A(0,0)
-    int unwindingNumber = static_cast<int>(ceil((imag(logA11 - logA00) - M_PI) / (2*M_PI)));
-    Scalar y = A(1,1) - A(0,0), x = A(1,1) + A(0,0);
-    result(0,1) = A(0,1) * (Scalar(2) * internal::atanh2(y,x) + Scalar(0,2*M_PI*unwindingNumber)) / y;
-  }
-}
-
-/** \brief Compute logarithm of triangular matrices with size > 2. 
-  * \details This uses a inverse scale-and-square algorithm. */
-template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::computeBig(const MatrixType& A, MatrixType& result)
-{
-  int numberOfSquareRoots = 0;
-  int numberOfExtraSquareRoots = 0;
-  int degree;
-  MatrixType T = A;
-  const RealScalar maxNormForPade = maxPadeDegree<= 5? 5.3149729967117310e-1:                     // single precision
-                                    maxPadeDegree<= 7? 2.6429608311114350e-1:                     // double precision
-                                    maxPadeDegree<= 8? 2.32777776523703892094e-1L:                // extended precision
-                                    maxPadeDegree<=10? 1.05026503471351080481093652651105e-1L:    // double-double
-                                                       1.1880960220216759245467951592883642e-1L;  // quadruple precision
-
-  while (true) {
-    RealScalar normTminusI = (T - MatrixType::Identity(T.rows(), T.rows())).cwiseAbs().colwise().sum().maxCoeff();
-    if (normTminusI < maxNormForPade) {
-      degree = getPadeDegree(normTminusI);
-      int degree2 = getPadeDegree(normTminusI / RealScalar(2));
-      if ((degree - degree2 <= 1) || (numberOfExtraSquareRoots == 1)) 
-	break;
-      ++numberOfExtraSquareRoots;
-    }
-    MatrixType sqrtT;
-    MatrixSquareRootTriangular<MatrixType>(T).compute(sqrtT);
-    T = sqrtT;
-    ++numberOfSquareRoots;
-  }
-
-  computePade(result, T, degree);
-  result *= pow(RealScalar(2), numberOfSquareRoots);
-}
-
-/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = float) */
-template <typename MatrixType>
-int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(float normTminusI)
-{
-  const float maxNormForPade[] = { 2.5111573934555054e-1 /* degree = 3 */ , 4.0535837411880493e-1,
-            5.3149729967117310e-1 };
-  int degree = 3;
-  for (; degree <= maxPadeDegree; ++degree) 
-    if (normTminusI <= maxNormForPade[degree - minPadeDegree])
-      break;
-  return degree;
-}
-
-/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = double) */
-template <typename MatrixType>
-int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(double normTminusI)
-{
-  const double maxNormForPade[] = { 1.6206284795015624e-2 /* degree = 3 */ , 5.3873532631381171e-2,
-            1.1352802267628681e-1, 1.8662860613541288e-1, 2.642960831111435e-1 };
-  int degree = 3;
-  for (; degree <= maxPadeDegree; ++degree)
-    if (normTminusI <= maxNormForPade[degree - minPadeDegree])
-      break;
-  return degree;
-}
-
-/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = long double) */
-template <typename MatrixType>
-int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(long double normTminusI)
-{
-#if   LDBL_MANT_DIG == 53         // double precision
-  const long double maxNormForPade[] = { 1.6206284795015624e-2L /* degree = 3 */ , 5.3873532631381171e-2L,
-            1.1352802267628681e-1L, 1.8662860613541288e-1L, 2.642960831111435e-1L };
-#elif LDBL_MANT_DIG <= 64         // extended precision
-  const long double maxNormForPade[] = { 5.48256690357782863103e-3L /* degree = 3 */, 2.34559162387971167321e-2L,
-            5.84603923897347449857e-2L, 1.08486423756725170223e-1L, 1.68385767881294446649e-1L,
-            2.32777776523703892094e-1L };
-#elif LDBL_MANT_DIG <= 106        // double-double
-  const long double maxNormForPade[] = { 8.58970550342939562202529664318890e-5L /* degree = 3 */,
-            9.34074328446359654039446552677759e-4L, 4.26117194647672175773064114582860e-3L,
-            1.21546224740281848743149666560464e-2L, 2.61100544998339436713088248557444e-2L,
-            4.66170074627052749243018566390567e-2L, 7.32585144444135027565872014932387e-2L,
-            1.05026503471351080481093652651105e-1L };
-#else                             // quadruple precision
-  const long double maxNormForPade[] = { 4.7419931187193005048501568167858103e-5L /* degree = 3 */,
-            5.8853168473544560470387769480192666e-4L, 2.9216120366601315391789493628113520e-3L,
-            8.8415758124319434347116734705174308e-3L, 1.9850836029449446668518049562565291e-2L,
-            3.6688019729653446926585242192447447e-2L, 5.9290962294020186998954055264528393e-2L,
-            8.6998436081634343903250580992127677e-2L, 1.1880960220216759245467951592883642e-1L };
-#endif
-  int degree = 3;
-  for (; degree <= maxPadeDegree; ++degree)
-    if (normTminusI <= maxNormForPade[degree - minPadeDegree])
-      break;
-  return degree;
-}
-
-/* \brief Compute Pade approximation to matrix logarithm */
-template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::computePade(MatrixType& result, const MatrixType& T, int degree)
-{
-  switch (degree) {
-    case 3:  computePade3(result, T);  break;
-    case 4:  computePade4(result, T);  break;
-    case 5:  computePade5(result, T);  break;
-    case 6:  computePade6(result, T);  break;
-    case 7:  computePade7(result, T);  break;
-    case 8:  computePade8(result, T);  break;
-    case 9:  computePade9(result, T);  break;
-    case 10: computePade10(result, T); break;
-    case 11: computePade11(result, T); break;
-    default: assert(false); // should never happen
-  }
-} 
-
-template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::computePade3(MatrixType& result, const MatrixType& T)
-{
-  const int degree = 3;
-  const RealScalar nodes[]   = { 0.1127016653792583114820734600217600L, 0.5000000000000000000000000000000000L,
-            0.8872983346207416885179265399782400L };
-  const RealScalar weights[] = { 0.2777777777777777777777777777777778L, 0.4444444444444444444444444444444444L,
-            0.2777777777777777777777777777777778L };
-  assert(degree <= maxPadeDegree);
-  MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
-  result.setZero(T.rows(), T.rows());
-  for (int k = 0; k < degree; ++k)
-    result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
-                           .template triangularView<Upper>().solve(TminusI);
-}
-
-template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::computePade4(MatrixType& result, const MatrixType& T)
-{
-  const int degree = 4;
-  const RealScalar nodes[]   = { 0.0694318442029737123880267555535953L, 0.3300094782075718675986671204483777L,
-            0.6699905217924281324013328795516223L, 0.9305681557970262876119732444464048L };
-  const RealScalar weights[] = { 0.1739274225687269286865319746109997L, 0.3260725774312730713134680253890003L,
-            0.3260725774312730713134680253890003L, 0.1739274225687269286865319746109997L };
-  assert(degree <= maxPadeDegree);
-  MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
-  result.setZero(T.rows(), T.rows());
-  for (int k = 0; k < degree; ++k)
-    result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
-                           .template triangularView<Upper>().solve(TminusI);
-}
-
-template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::computePade5(MatrixType& result, const MatrixType& T)
-{
-  const int degree = 5;
-  const RealScalar nodes[]   = { 0.0469100770306680036011865608503035L, 0.2307653449471584544818427896498956L,
-            0.5000000000000000000000000000000000L, 0.7692346550528415455181572103501044L,
-            0.9530899229693319963988134391496965L };
-  const RealScalar weights[] = { 0.1184634425280945437571320203599587L, 0.2393143352496832340206457574178191L,
-            0.2844444444444444444444444444444444L, 0.2393143352496832340206457574178191L,
-            0.1184634425280945437571320203599587L };
-  assert(degree <= maxPadeDegree);
-  MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
-  result.setZero(T.rows(), T.rows());
-  for (int k = 0; k < degree; ++k)
-    result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
-                           .template triangularView<Upper>().solve(TminusI);
-}
-
-template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::computePade6(MatrixType& result, const MatrixType& T)
-{
-  const int degree = 6;
-  const RealScalar nodes[]   = { 0.0337652428984239860938492227530027L, 0.1693953067668677431693002024900473L,
-            0.3806904069584015456847491391596440L, 0.6193095930415984543152508608403560L,
-            0.8306046932331322568306997975099527L, 0.9662347571015760139061507772469973L };
-  const RealScalar weights[] = { 0.0856622461895851725201480710863665L, 0.1803807865240693037849167569188581L,
-            0.2339569672863455236949351719947755L, 0.2339569672863455236949351719947755L,
-            0.1803807865240693037849167569188581L, 0.0856622461895851725201480710863665L };
-  assert(degree <= maxPadeDegree);
-  MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
-  result.setZero(T.rows(), T.rows());
-  for (int k = 0; k < degree; ++k)
-    result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
-                           .template triangularView<Upper>().solve(TminusI);
-}
-
-template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::computePade7(MatrixType& result, const MatrixType& T)
-{
-  const int degree = 7;
-  const RealScalar nodes[]   = { 0.0254460438286207377369051579760744L, 0.1292344072003027800680676133596058L,
-            0.2970774243113014165466967939615193L, 0.5000000000000000000000000000000000L,
-            0.7029225756886985834533032060384807L, 0.8707655927996972199319323866403942L,
-            0.9745539561713792622630948420239256L };
-  const RealScalar weights[] = { 0.0647424830844348466353057163395410L, 0.1398526957446383339507338857118898L,
-            0.1909150252525594724751848877444876L, 0.2089795918367346938775510204081633L,
-            0.1909150252525594724751848877444876L, 0.1398526957446383339507338857118898L,
-            0.0647424830844348466353057163395410L };
-  assert(degree <= maxPadeDegree);
-  MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
-  result.setZero(T.rows(), T.rows());
-  for (int k = 0; k < degree; ++k)
-    result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
-                           .template triangularView<Upper>().solve(TminusI);
-}
-
-template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::computePade8(MatrixType& result, const MatrixType& T)
-{
-  const int degree = 8;
-  const RealScalar nodes[]   = { 0.0198550717512318841582195657152635L, 0.1016667612931866302042230317620848L,
-            0.2372337950418355070911304754053768L, 0.4082826787521750975302619288199080L,
-            0.5917173212478249024697380711800920L, 0.7627662049581644929088695245946232L,
-            0.8983332387068133697957769682379152L, 0.9801449282487681158417804342847365L };
-  const RealScalar weights[] = { 0.0506142681451881295762656771549811L, 0.1111905172266872352721779972131204L,
-            0.1568533229389436436689811009933007L, 0.1813418916891809914825752246385978L,
-            0.1813418916891809914825752246385978L, 0.1568533229389436436689811009933007L,
-            0.1111905172266872352721779972131204L, 0.0506142681451881295762656771549811L };
-  assert(degree <= maxPadeDegree);
-  MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
-  result.setZero(T.rows(), T.rows());
-  for (int k = 0; k < degree; ++k)
-    result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
-                           .template triangularView<Upper>().solve(TminusI);
-}
-
-template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::computePade9(MatrixType& result, const MatrixType& T)
-{
-  const int degree = 9;
-  const RealScalar nodes[]   = { 0.0159198802461869550822118985481636L, 0.0819844463366821028502851059651326L,
-            0.1933142836497048013456489803292629L, 0.3378732882980955354807309926783317L,
-            0.5000000000000000000000000000000000L, 0.6621267117019044645192690073216683L,
-            0.8066857163502951986543510196707371L, 0.9180155536633178971497148940348674L,
-            0.9840801197538130449177881014518364L };
-  const RealScalar weights[] = { 0.0406371941807872059859460790552618L, 0.0903240803474287020292360156214564L,
-            0.1303053482014677311593714347093164L, 0.1561735385200014200343152032922218L,
-            0.1651196775006298815822625346434870L, 0.1561735385200014200343152032922218L,
-            0.1303053482014677311593714347093164L, 0.0903240803474287020292360156214564L,
-            0.0406371941807872059859460790552618L };
-  assert(degree <= maxPadeDegree);
-  MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
-  result.setZero(T.rows(), T.rows());
-  for (int k = 0; k < degree; ++k)
-    result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
-                           .template triangularView<Upper>().solve(TminusI);
-}
-
-template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::computePade10(MatrixType& result, const MatrixType& T)
-{
-  const int degree = 10;
-  const RealScalar nodes[]   = { 0.0130467357414141399610179939577740L, 0.0674683166555077446339516557882535L,
-            0.1602952158504877968828363174425632L, 0.2833023029353764046003670284171079L,
-            0.4255628305091843945575869994351400L, 0.5744371694908156054424130005648600L,
-            0.7166976970646235953996329715828921L, 0.8397047841495122031171636825574368L,
-            0.9325316833444922553660483442117465L, 0.9869532642585858600389820060422260L };
-  const RealScalar weights[] = { 0.0333356721543440687967844049466659L, 0.0747256745752902965728881698288487L,
-            0.1095431812579910219977674671140816L, 0.1346333596549981775456134607847347L,
-            0.1477621123573764350869464973256692L, 0.1477621123573764350869464973256692L,
-            0.1346333596549981775456134607847347L, 0.1095431812579910219977674671140816L,
-            0.0747256745752902965728881698288487L, 0.0333356721543440687967844049466659L };
-  assert(degree <= maxPadeDegree);
-  MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
-  result.setZero(T.rows(), T.rows());
-  for (int k = 0; k < degree; ++k)
-    result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
-                           .template triangularView<Upper>().solve(TminusI);
-}
-
-template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::computePade11(MatrixType& result, const MatrixType& T)
-{
-  const int degree = 11;
-  const RealScalar nodes[]   = { 0.0108856709269715035980309994385713L, 0.0564687001159523504624211153480364L,
-            0.1349239972129753379532918739844233L, 0.2404519353965940920371371652706952L,
-            0.3652284220238275138342340072995692L, 0.5000000000000000000000000000000000L,
-            0.6347715779761724861657659927004308L, 0.7595480646034059079628628347293048L,
-            0.8650760027870246620467081260155767L, 0.9435312998840476495375788846519636L,
-            0.9891143290730284964019690005614287L };
-  const RealScalar weights[] = { 0.0278342835580868332413768602212743L, 0.0627901847324523123173471496119701L,
-            0.0931451054638671257130488207158280L, 0.1165968822959952399592618524215876L,
-            0.1314022722551233310903444349452546L, 0.1364625433889503153572417641681711L,
-            0.1314022722551233310903444349452546L, 0.1165968822959952399592618524215876L,
-            0.0931451054638671257130488207158280L, 0.0627901847324523123173471496119701L,
-            0.0278342835580868332413768602212743L };
-  assert(degree <= maxPadeDegree);
-  MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
-  result.setZero(T.rows(), T.rows());
-  for (int k = 0; k < degree; ++k)
-    result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
-                           .template triangularView<Upper>().solve(TminusI);
-}
-
-/** \ingroup MatrixFunctions_Module
-  *
-  * \brief Proxy for the matrix logarithm of some matrix (expression).
-  *
-  * \tparam Derived  Type of the argument to the matrix function.
-  *
-  * This class holds the argument to the matrix function until it is
-  * assigned or evaluated for some other reason (so the argument
-  * should not be changed in the meantime). It is the return type of
-  * MatrixBase::log() and most of the time this is the only way it
-  * is used.
-  */
-template<typename Derived> class MatrixLogarithmReturnValue
-: public ReturnByValue<MatrixLogarithmReturnValue<Derived> >
-{
-public:
-
-  typedef typename Derived::Scalar Scalar;
-  typedef typename Derived::Index Index;
-
-  /** \brief Constructor.
-    *
-    * \param[in]  A  %Matrix (expression) forming the argument of the matrix logarithm.
-    */
-  MatrixLogarithmReturnValue(const Derived& A) : m_A(A) { }
-  
-  /** \brief Compute the matrix logarithm.
-    *
-    * \param[out]  result  Logarithm of \p A, where \A is as specified in the constructor.
-    */
-  template <typename ResultType>
-  inline void evalTo(ResultType& result) const
-  {
-    typedef typename Derived::PlainObject PlainObject;
-    typedef internal::traits<PlainObject> Traits;
-    static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
-    static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
-    static const int Options = PlainObject::Options;
-    typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
-    typedef Matrix<ComplexScalar, Dynamic, Dynamic, Options, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
-    typedef MatrixLogarithmAtomic<DynMatrixType> AtomicType;
-    AtomicType atomic;
-    
-    const PlainObject Aevaluated = m_A.eval();
-    MatrixFunction<PlainObject, AtomicType> mf(Aevaluated, atomic);
-    mf.compute(result);
-  }
-
-  Index rows() const { return m_A.rows(); }
-  Index cols() const { return m_A.cols(); }
-  
-private:
-  typename internal::nested<Derived>::type m_A;
-  
-  MatrixLogarithmReturnValue& operator=(const MatrixLogarithmReturnValue&);
-};
-
-namespace internal {
-  template<typename Derived>
-  struct traits<MatrixLogarithmReturnValue<Derived> >
-  {
-    typedef typename Derived::PlainObject ReturnType;
-  };
-}
-
-
-/********** MatrixBase method **********/
-
-
-template <typename Derived>
-const MatrixLogarithmReturnValue<Derived> MatrixBase<Derived>::log() const
-{
-  eigen_assert(rows() == cols());
-  return MatrixLogarithmReturnValue<Derived>(derived());
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_MATRIX_LOGARITHM
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h b/resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h
deleted file mode 100644
index 3786510c0..000000000
--- a/resources/3rdparty/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h
+++ /dev/null
@@ -1,484 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2011 Jitse Niesen <jitse@maths.leeds.ac.uk>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_MATRIX_SQUARE_ROOT
-#define EIGEN_MATRIX_SQUARE_ROOT
-
-namespace Eigen { 
-
-/** \ingroup MatrixFunctions_Module
-  * \brief Class for computing matrix square roots of upper quasi-triangular matrices.
-  * \tparam  MatrixType  type of the argument of the matrix square root,
-  *                      expected to be an instantiation of the Matrix class template.
-  *
-  * This class computes the square root of the upper quasi-triangular
-  * matrix stored in the upper Hessenberg part of the matrix passed to
-  * the constructor.
-  *
-  * \sa MatrixSquareRoot, MatrixSquareRootTriangular
-  */
-template <typename MatrixType>
-class MatrixSquareRootQuasiTriangular
-{
-  public:
-
-    /** \brief Constructor. 
-      *
-      * \param[in]  A  upper quasi-triangular matrix whose square root 
-      *                is to be computed.
-      *
-      * The class stores a reference to \p A, so it should not be
-      * changed (or destroyed) before compute() is called.
-      */
-    MatrixSquareRootQuasiTriangular(const MatrixType& A) 
-      : m_A(A) 
-    {
-      eigen_assert(A.rows() == A.cols());
-    }
-    
-    /** \brief Compute the matrix square root
-      *
-      * \param[out] result  square root of \p A, as specified in the constructor.
-      *
-      * Only the upper Hessenberg part of \p result is updated, the
-      * rest is not touched.  See MatrixBase::sqrt() for details on
-      * how this computation is implemented.
-      */
-    template <typename ResultType> void compute(ResultType &result);    
-    
-  private:
-    typedef typename MatrixType::Index Index;
-    typedef typename MatrixType::Scalar Scalar;
-    
-    void computeDiagonalPartOfSqrt(MatrixType& sqrtT, const MatrixType& T);
-    void computeOffDiagonalPartOfSqrt(MatrixType& sqrtT, const MatrixType& T);
-    void compute2x2diagonalBlock(MatrixType& sqrtT, const MatrixType& T, typename MatrixType::Index i);
-    void compute1x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, 
-				  typename MatrixType::Index i, typename MatrixType::Index j);
-    void compute1x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, 
-				  typename MatrixType::Index i, typename MatrixType::Index j);
-    void compute2x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, 
-				  typename MatrixType::Index i, typename MatrixType::Index j);
-    void compute2x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, 
-				  typename MatrixType::Index i, typename MatrixType::Index j);
-  
-    template <typename SmallMatrixType>
-    static void solveAuxiliaryEquation(SmallMatrixType& X, const SmallMatrixType& A, 
-				     const SmallMatrixType& B, const SmallMatrixType& C);
-  
-    const MatrixType& m_A;
-};
-
-template <typename MatrixType>
-template <typename ResultType> 
-void MatrixSquareRootQuasiTriangular<MatrixType>::compute(ResultType &result)
-{
-  // Compute Schur decomposition of m_A
-  const RealSchur<MatrixType> schurOfA(m_A);  
-  const MatrixType& T = schurOfA.matrixT();
-  const MatrixType& U = schurOfA.matrixU();
-
-  // Compute square root of T
-  MatrixType sqrtT = MatrixType::Zero(m_A.rows(), m_A.rows());
-  computeDiagonalPartOfSqrt(sqrtT, T);
-  computeOffDiagonalPartOfSqrt(sqrtT, T);
-
-  // Compute square root of m_A
-  result = U * sqrtT * U.adjoint();
-}
-
-// pre:  T is quasi-upper-triangular and sqrtT is a zero matrix of the same size
-// post: the diagonal blocks of sqrtT are the square roots of the diagonal blocks of T
-template <typename MatrixType>
-void MatrixSquareRootQuasiTriangular<MatrixType>::computeDiagonalPartOfSqrt(MatrixType& sqrtT, 
-									  const MatrixType& T)
-{
-  const Index size = m_A.rows();
-  for (Index i = 0; i < size; i++) {
-    if (i == size - 1 || T.coeff(i+1, i) == 0) {
-      eigen_assert(T(i,i) > 0);
-      sqrtT.coeffRef(i,i) = internal::sqrt(T.coeff(i,i));
-    }
-    else {
-      compute2x2diagonalBlock(sqrtT, T, i);
-      ++i;
-    }
-  }
-}
-
-// pre:  T is quasi-upper-triangular and diagonal blocks of sqrtT are square root of diagonal blocks of T.
-// post: sqrtT is the square root of T.
-template <typename MatrixType>
-void MatrixSquareRootQuasiTriangular<MatrixType>::computeOffDiagonalPartOfSqrt(MatrixType& sqrtT, 
-									     const MatrixType& T)
-{
-  const Index size = m_A.rows();
-  for (Index j = 1; j < size; j++) {
-      if (T.coeff(j, j-1) != 0)  // if T(j-1:j, j-1:j) is a 2-by-2 block
-	continue;
-    for (Index i = j-1; i >= 0; i--) {
-      if (i > 0 && T.coeff(i, i-1) != 0)  // if T(i-1:i, i-1:i) is a 2-by-2 block
-	continue;
-      bool iBlockIs2x2 = (i < size - 1) && (T.coeff(i+1, i) != 0);
-      bool jBlockIs2x2 = (j < size - 1) && (T.coeff(j+1, j) != 0);
-      if (iBlockIs2x2 && jBlockIs2x2) 
-	compute2x2offDiagonalBlock(sqrtT, T, i, j);
-      else if (iBlockIs2x2 && !jBlockIs2x2) 
-	compute2x1offDiagonalBlock(sqrtT, T, i, j);
-      else if (!iBlockIs2x2 && jBlockIs2x2) 
-	compute1x2offDiagonalBlock(sqrtT, T, i, j);
-      else if (!iBlockIs2x2 && !jBlockIs2x2) 
-	compute1x1offDiagonalBlock(sqrtT, T, i, j);
-    }
-  }
-}
-
-// pre:  T.block(i,i,2,2) has complex conjugate eigenvalues
-// post: sqrtT.block(i,i,2,2) is square root of T.block(i,i,2,2)
-template <typename MatrixType>
-void MatrixSquareRootQuasiTriangular<MatrixType>
-     ::compute2x2diagonalBlock(MatrixType& sqrtT, const MatrixType& T, typename MatrixType::Index i)
-{
-  // TODO: This case (2-by-2 blocks with complex conjugate eigenvalues) is probably hidden somewhere
-  //       in EigenSolver. If we expose it, we could call it directly from here.
-  Matrix<Scalar,2,2> block = T.template block<2,2>(i,i);
-  EigenSolver<Matrix<Scalar,2,2> > es(block);
-  sqrtT.template block<2,2>(i,i)
-    = (es.eigenvectors() * es.eigenvalues().cwiseSqrt().asDiagonal() * es.eigenvectors().inverse()).real();
-}
-
-// pre:  block structure of T is such that (i,j) is a 1x1 block,
-//       all blocks of sqrtT to left of and below (i,j) are correct
-// post: sqrtT(i,j) has the correct value
-template <typename MatrixType>
-void MatrixSquareRootQuasiTriangular<MatrixType>
-     ::compute1x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, 
-				  typename MatrixType::Index i, typename MatrixType::Index j)
-{
-  Scalar tmp = (sqrtT.row(i).segment(i+1,j-i-1) * sqrtT.col(j).segment(i+1,j-i-1)).value();
-  sqrtT.coeffRef(i,j) = (T.coeff(i,j) - tmp) / (sqrtT.coeff(i,i) + sqrtT.coeff(j,j));
-}
-
-// similar to compute1x1offDiagonalBlock()
-template <typename MatrixType>
-void MatrixSquareRootQuasiTriangular<MatrixType>
-     ::compute1x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, 
-				  typename MatrixType::Index i, typename MatrixType::Index j)
-{
-  Matrix<Scalar,1,2> rhs = T.template block<1,2>(i,j);
-  if (j-i > 1)
-    rhs -= sqrtT.block(i, i+1, 1, j-i-1) * sqrtT.block(i+1, j, j-i-1, 2);
-  Matrix<Scalar,2,2> A = sqrtT.coeff(i,i) * Matrix<Scalar,2,2>::Identity();
-  A += sqrtT.template block<2,2>(j,j).transpose();
-  sqrtT.template block<1,2>(i,j).transpose() = A.fullPivLu().solve(rhs.transpose());
-}
-
-// similar to compute1x1offDiagonalBlock()
-template <typename MatrixType>
-void MatrixSquareRootQuasiTriangular<MatrixType>
-     ::compute2x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, 
-				  typename MatrixType::Index i, typename MatrixType::Index j)
-{
-  Matrix<Scalar,2,1> rhs = T.template block<2,1>(i,j);
-  if (j-i > 2)
-    rhs -= sqrtT.block(i, i+2, 2, j-i-2) * sqrtT.block(i+2, j, j-i-2, 1);
-  Matrix<Scalar,2,2> A = sqrtT.coeff(j,j) * Matrix<Scalar,2,2>::Identity();
-  A += sqrtT.template block<2,2>(i,i);
-  sqrtT.template block<2,1>(i,j) = A.fullPivLu().solve(rhs);
-}
-
-// similar to compute1x1offDiagonalBlock()
-template <typename MatrixType>
-void MatrixSquareRootQuasiTriangular<MatrixType>
-     ::compute2x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, 
-				  typename MatrixType::Index i, typename MatrixType::Index j)
-{
-  Matrix<Scalar,2,2> A = sqrtT.template block<2,2>(i,i);
-  Matrix<Scalar,2,2> B = sqrtT.template block<2,2>(j,j);
-  Matrix<Scalar,2,2> C = T.template block<2,2>(i,j);
-  if (j-i > 2)
-    C -= sqrtT.block(i, i+2, 2, j-i-2) * sqrtT.block(i+2, j, j-i-2, 2);
-  Matrix<Scalar,2,2> X;
-  solveAuxiliaryEquation(X, A, B, C);
-  sqrtT.template block<2,2>(i,j) = X;
-}
-
-// solves the equation A X + X B = C where all matrices are 2-by-2
-template <typename MatrixType>
-template <typename SmallMatrixType>
-void MatrixSquareRootQuasiTriangular<MatrixType>
-     ::solveAuxiliaryEquation(SmallMatrixType& X, const SmallMatrixType& A,
-			      const SmallMatrixType& B, const SmallMatrixType& C)
-{
-  EIGEN_STATIC_ASSERT((internal::is_same<SmallMatrixType, Matrix<Scalar,2,2> >::value),
-		      EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT);
-
-  Matrix<Scalar,4,4> coeffMatrix = Matrix<Scalar,4,4>::Zero();
-  coeffMatrix.coeffRef(0,0) = A.coeff(0,0) + B.coeff(0,0);
-  coeffMatrix.coeffRef(1,1) = A.coeff(0,0) + B.coeff(1,1);
-  coeffMatrix.coeffRef(2,2) = A.coeff(1,1) + B.coeff(0,0);
-  coeffMatrix.coeffRef(3,3) = A.coeff(1,1) + B.coeff(1,1);
-  coeffMatrix.coeffRef(0,1) = B.coeff(1,0);
-  coeffMatrix.coeffRef(0,2) = A.coeff(0,1);
-  coeffMatrix.coeffRef(1,0) = B.coeff(0,1);
-  coeffMatrix.coeffRef(1,3) = A.coeff(0,1);
-  coeffMatrix.coeffRef(2,0) = A.coeff(1,0);
-  coeffMatrix.coeffRef(2,3) = B.coeff(1,0);
-  coeffMatrix.coeffRef(3,1) = A.coeff(1,0);
-  coeffMatrix.coeffRef(3,2) = B.coeff(0,1);
-  
-  Matrix<Scalar,4,1> rhs;
-  rhs.coeffRef(0) = C.coeff(0,0);
-  rhs.coeffRef(1) = C.coeff(0,1);
-  rhs.coeffRef(2) = C.coeff(1,0);
-  rhs.coeffRef(3) = C.coeff(1,1);
-  
-  Matrix<Scalar,4,1> result;
-  result = coeffMatrix.fullPivLu().solve(rhs);
-
-  X.coeffRef(0,0) = result.coeff(0);
-  X.coeffRef(0,1) = result.coeff(1);
-  X.coeffRef(1,0) = result.coeff(2);
-  X.coeffRef(1,1) = result.coeff(3);
-}
-
-
-/** \ingroup MatrixFunctions_Module
-  * \brief Class for computing matrix square roots of upper triangular matrices.
-  * \tparam  MatrixType  type of the argument of the matrix square root,
-  *                      expected to be an instantiation of the Matrix class template.
-  *
-  * This class computes the square root of the upper triangular matrix
-  * stored in the upper triangular part (including the diagonal) of
-  * the matrix passed to the constructor.
-  *
-  * \sa MatrixSquareRoot, MatrixSquareRootQuasiTriangular
-  */
-template <typename MatrixType>
-class MatrixSquareRootTriangular
-{
-  public:
-    MatrixSquareRootTriangular(const MatrixType& A) 
-      : m_A(A) 
-    {
-      eigen_assert(A.rows() == A.cols());
-    }
-
-    /** \brief Compute the matrix square root
-      *
-      * \param[out] result  square root of \p A, as specified in the constructor.
-      *
-      * Only the upper triangular part (including the diagonal) of 
-      * \p result is updated, the rest is not touched.  See
-      * MatrixBase::sqrt() for details on how this computation is
-      * implemented.
-      */
-    template <typename ResultType> void compute(ResultType &result);    
-
- private:
-    const MatrixType& m_A;
-};
-
-template <typename MatrixType>
-template <typename ResultType> 
-void MatrixSquareRootTriangular<MatrixType>::compute(ResultType &result)
-{
-  // Compute Schur decomposition of m_A
-  const ComplexSchur<MatrixType> schurOfA(m_A);  
-  const MatrixType& T = schurOfA.matrixT();
-  const MatrixType& U = schurOfA.matrixU();
-
-  // Compute square root of T and store it in upper triangular part of result
-  // This uses that the square root of triangular matrices can be computed directly.
-  result.resize(m_A.rows(), m_A.cols());
-  typedef typename MatrixType::Index Index;
-  for (Index i = 0; i < m_A.rows(); i++) {
-    result.coeffRef(i,i) = internal::sqrt(T.coeff(i,i));
-  }
-  for (Index j = 1; j < m_A.cols(); j++) {
-    for (Index i = j-1; i >= 0; i--) {
-      typedef typename MatrixType::Scalar Scalar;
-      // if i = j-1, then segment has length 0 so tmp = 0
-      Scalar tmp = (result.row(i).segment(i+1,j-i-1) * result.col(j).segment(i+1,j-i-1)).value();
-      // denominator may be zero if original matrix is singular
-      result.coeffRef(i,j) = (T.coeff(i,j) - tmp) / (result.coeff(i,i) + result.coeff(j,j));
-    }
-  }
-
-  // Compute square root of m_A as U * result * U.adjoint()
-  MatrixType tmp;
-  tmp.noalias() = U * result.template triangularView<Upper>();
-  result.noalias() = tmp * U.adjoint();
-}
-
-
-/** \ingroup MatrixFunctions_Module
-  * \brief Class for computing matrix square roots of general matrices.
-  * \tparam  MatrixType  type of the argument of the matrix square root,
-  *                      expected to be an instantiation of the Matrix class template.
-  *
-  * \sa MatrixSquareRootTriangular, MatrixSquareRootQuasiTriangular, MatrixBase::sqrt()
-  */
-template <typename MatrixType, int IsComplex = NumTraits<typename internal::traits<MatrixType>::Scalar>::IsComplex>
-class MatrixSquareRoot
-{
-  public:
-
-    /** \brief Constructor. 
-      *
-      * \param[in]  A  matrix whose square root is to be computed.
-      *
-      * The class stores a reference to \p A, so it should not be
-      * changed (or destroyed) before compute() is called.
-      */
-    MatrixSquareRoot(const MatrixType& A); 
-    
-    /** \brief Compute the matrix square root
-      *
-      * \param[out] result  square root of \p A, as specified in the constructor.
-      *
-      * See MatrixBase::sqrt() for details on how this computation is
-      * implemented.
-      */
-    template <typename ResultType> void compute(ResultType &result);    
-};
-
-
-// ********** Partial specialization for real matrices **********
-
-template <typename MatrixType>
-class MatrixSquareRoot<MatrixType, 0>
-{
-  public:
-
-    MatrixSquareRoot(const MatrixType& A) 
-      : m_A(A) 
-    {  
-      eigen_assert(A.rows() == A.cols());
-    }
-  
-    template <typename ResultType> void compute(ResultType &result)
-    {
-      // Compute Schur decomposition of m_A
-      const RealSchur<MatrixType> schurOfA(m_A);  
-      const MatrixType& T = schurOfA.matrixT();
-      const MatrixType& U = schurOfA.matrixU();
-    
-      // Compute square root of T
-      MatrixSquareRootQuasiTriangular<MatrixType> tmp(T);
-      MatrixType sqrtT = MatrixType::Zero(m_A.rows(), m_A.rows());
-      tmp.compute(sqrtT);
-    
-      // Compute square root of m_A
-      result = U * sqrtT * U.adjoint();
-    }
-    
-  private:
-    const MatrixType& m_A;
-};
-
-
-// ********** Partial specialization for complex matrices **********
-
-template <typename MatrixType>
-class MatrixSquareRoot<MatrixType, 1>
-{
-  public:
-
-    MatrixSquareRoot(const MatrixType& A) 
-      : m_A(A) 
-    {  
-      eigen_assert(A.rows() == A.cols());
-    }
-  
-    template <typename ResultType> void compute(ResultType &result)
-    {
-      // Compute Schur decomposition of m_A
-      const ComplexSchur<MatrixType> schurOfA(m_A);  
-      const MatrixType& T = schurOfA.matrixT();
-      const MatrixType& U = schurOfA.matrixU();
-    
-      // Compute square root of T
-      MatrixSquareRootTriangular<MatrixType> tmp(T);
-      MatrixType sqrtT = MatrixType::Zero(m_A.rows(), m_A.rows());
-      tmp.compute(sqrtT);
-    
-      // Compute square root of m_A
-      result = U * sqrtT * U.adjoint();
-    }
-    
-  private:
-    const MatrixType& m_A;
-};
-
-
-/** \ingroup MatrixFunctions_Module
-  *
-  * \brief Proxy for the matrix square root of some matrix (expression).
-  *
-  * \tparam Derived  Type of the argument to the matrix square root.
-  *
-  * This class holds the argument to the matrix square root until it
-  * is assigned or evaluated for some other reason (so the argument
-  * should not be changed in the meantime). It is the return type of
-  * MatrixBase::sqrt() and most of the time this is the only way it is
-  * used.
-  */
-template<typename Derived> class MatrixSquareRootReturnValue
-: public ReturnByValue<MatrixSquareRootReturnValue<Derived> >
-{
-    typedef typename Derived::Index Index;
-  public:
-    /** \brief Constructor.
-      *
-      * \param[in]  src  %Matrix (expression) forming the argument of the
-      * matrix square root.
-      */
-    MatrixSquareRootReturnValue(const Derived& src) : m_src(src) { }
-
-    /** \brief Compute the matrix square root.
-      *
-      * \param[out]  result  the matrix square root of \p src in the
-      * constructor.
-      */
-    template <typename ResultType>
-    inline void evalTo(ResultType& result) const
-    {
-      const typename Derived::PlainObject srcEvaluated = m_src.eval();
-      MatrixSquareRoot<typename Derived::PlainObject> me(srcEvaluated);
-      me.compute(result);
-    }
-
-    Index rows() const { return m_src.rows(); }
-    Index cols() const { return m_src.cols(); }
-
-  protected:
-    const Derived& m_src;
-  private:
-    MatrixSquareRootReturnValue& operator=(const MatrixSquareRootReturnValue&);
-};
-
-namespace internal {
-template<typename Derived>
-struct traits<MatrixSquareRootReturnValue<Derived> >
-{
-  typedef typename Derived::PlainObject ReturnType;
-};
-}
-
-template <typename Derived>
-const MatrixSquareRootReturnValue<Derived> MatrixBase<Derived>::sqrt() const
-{
-  eigen_assert(rows() == cols());
-  return MatrixSquareRootReturnValue<Derived>(derived());
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_MATRIX_FUNCTION
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h b/resources/3rdparty/eigen/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h
deleted file mode 100644
index bf13cf21f..000000000
--- a/resources/3rdparty/eigen/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h
+++ /dev/null
@@ -1,232 +0,0 @@
-
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2012 Desire NUENTSA WAKAM <desire.nuentsa_wakam@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_BROWSE_MATRICES_H
-#define EIGEN_BROWSE_MATRICES_H
-
-namespace Eigen {
-
-enum {
-  SPD = 0x100,
-  NonSymmetric = 0x0
-}; 
-
-/** 
- * @brief Iterator to browse matrices from a specified folder
- * 
- * This is used to load all the matrices from a folder. 
- * The matrices should be in Matrix Market format
- * It is assumed that the matrices are named as matname.mtx
- * and matname_SPD.mtx if the matrix is Symmetric and positive definite (or Hermitian)
- * The right hand side vectors are loaded as well, if they exist.
- * They should be named as matname_b.mtx. 
- * Note that the right hand side for a SPD matrix is named as matname_SPD_b.mtx
- * 
- * Sometimes a reference solution is available. In this case, it should be named as matname_x.mtx
- * 
- * Sample code
- * \code
- * 
- * \endcode
- * 
- * \tparam Scalar The scalar type 
- */
-template <typename Scalar>
-class MatrixMarketIterator 
-{
-  public:
-    typedef Matrix<Scalar,Dynamic,1> VectorType; 
-    typedef SparseMatrix<Scalar,ColMajor> MatrixType; 
-  
-  public:
-    MatrixMarketIterator(const std::string folder):m_sym(0),m_isvalid(false),m_matIsLoaded(false),m_hasRhs(false),m_hasrefX(false),m_folder(folder)
-    {
-      m_folder_id = opendir(folder.c_str());
-      if (!m_folder_id){
-        m_isvalid = false;
-        std::cerr << "The provided Matrix folder could not be opened \n\n";
-        abort();
-      }
-      Getnextvalidmatrix();
-    }
-    
-    ~MatrixMarketIterator()
-    {
-      if (m_folder_id) closedir(m_folder_id); 
-    }
-    
-    inline MatrixMarketIterator& operator++()
-    {
-      m_matIsLoaded = false;
-      m_hasrefX = false;
-      m_hasRhs = false;
-      Getnextvalidmatrix();
-      return *this;
-    }
-    inline operator bool() const { return m_isvalid;}
-    
-    /** Return the sparse matrix corresponding to the current file */
-    inline MatrixType& matrix() 
-    { 
-      // Read the matrix
-      if (m_matIsLoaded) return m_mat;
-      
-      std::string matrix_file = m_folder + "/" + m_matname + ".mtx";
-      if ( !loadMarket(m_mat, matrix_file)) 
-      {
-        m_matIsLoaded = false;
-        return m_mat;
-      }
-      m_matIsLoaded = true; 
-      
-      if (m_sym != NonSymmetric) 
-      { // Store the upper part of the matrix. It is needed by the solvers dealing with nonsymmetric matrices ??
-        MatrixType B; 
-        B = m_mat;
-        m_mat = B.template selfadjointView<Lower>();
-      }
-      return m_mat; 
-    }
-    
-    /** Return the right hand side corresponding to the current matrix. 
-     * If the rhs file is not provided, a random rhs is generated
-     */
-    inline VectorType& rhs() 
-    { 
-       // Get the right hand side
-      if (m_hasRhs) return m_rhs;
-      
-      std::string rhs_file;
-      rhs_file = m_folder + "/" + m_matname + "_b.mtx"; // The pattern is matname_b.mtx
-      m_hasRhs = Fileexists(rhs_file);
-      if (m_hasRhs)
-      {
-        m_rhs.resize(m_mat.cols());
-        m_hasRhs = loadMarketVector(m_rhs, rhs_file);
-      }
-      if (!m_hasRhs)
-      {
-        // Generate a random right hand side
-        if (!m_matIsLoaded) this->matrix(); 
-        m_refX.resize(m_mat.cols());
-        m_refX.setRandom();
-        m_rhs = m_mat * m_refX;
-        m_hasrefX = true;
-        m_hasRhs = true;
-      }
-      return m_rhs; 
-    }
-    
-    /** Return a reference solution
-     * If it is not provided and if the right hand side is not available
-     * then refX is randomly generated such that A*refX = b 
-     * where A and b are the matrix and the rhs. 
-     * Note that when a rhs is provided, refX is not available 
-     */
-    inline VectorType& refX() 
-    { 
-      // Check if a reference solution is provided
-      if (m_hasrefX) return m_refX;
-      
-      std::string lhs_file;
-      lhs_file = m_folder + "/" + m_matname + "_x.mtx"; 
-      m_hasrefX = Fileexists(lhs_file);
-      if (m_hasrefX)
-      {
-        m_refX.resize(m_mat.cols());
-        m_hasrefX = loadMarketVector(m_refX, lhs_file);
-      }
-      return m_refX; 
-    }
-    
-    inline std::string& matname() { return m_matname; }
-    
-    inline int sym() { return m_sym; }
-    
-    inline bool hasRhs() {return m_hasRhs; }
-    inline bool hasrefX() {return m_hasrefX; }
-    
-  protected:
-    
-    inline bool Fileexists(std::string file)
-    {
-      std::ifstream file_id(file.c_str());
-      if (!file_id.good() ) 
-      {
-        return false;
-      }
-      else 
-      {
-        file_id.close();
-        return true;
-      }
-    }
-    
-    void Getnextvalidmatrix( )
-    {
-      m_isvalid = false;
-      // Here, we return with the next valid matrix in the folder
-      while ( (m_curs_id = readdir(m_folder_id)) != NULL) {
-        m_isvalid = false;
-        std::string curfile;
-        curfile = m_folder + "/" + m_curs_id->d_name;
-        // Discard if it is a folder
-        if (m_curs_id->d_type == DT_DIR) continue; //FIXME This may not be available on non BSD systems
-//         struct stat st_buf; 
-//         stat (curfile.c_str(), &st_buf);
-//         if (S_ISDIR(st_buf.st_mode)) continue;
-        
-        // Determine from the header if it is a matrix or a right hand side 
-        bool isvector,iscomplex=false;
-        if(!getMarketHeader(curfile,m_sym,iscomplex,isvector)) continue;
-        if(isvector) continue;
-        if (!iscomplex)
-        {
-          if(internal::is_same<Scalar, std::complex<float> >::value || internal::is_same<Scalar, std::complex<double> >::value)
-            continue; 
-        }
-        if (iscomplex)
-        {
-          if(internal::is_same<Scalar, float>::value || internal::is_same<Scalar, double>::value)
-            continue; 
-        }
-        
-        
-        // Get the matrix name
-        std::string filename = m_curs_id->d_name;
-        m_matname = filename.substr(0, filename.length()-4); 
-        
-        // Find if the matrix is SPD 
-        size_t found = m_matname.find("SPD");
-        if( (found!=std::string::npos) && (m_sym != NonSymmetric) )
-          m_sym = SPD;
-       
-        m_isvalid = true;
-        break; 
-      }
-    }
-    int m_sym; // Symmetry of the matrix
-    MatrixType m_mat; // Current matrix  
-    VectorType m_rhs;  // Current vector
-    VectorType m_refX; // The reference solution, if exists
-    std::string m_matname; // Matrix Name
-    bool m_isvalid; 
-    bool m_matIsLoaded; // Determine if the matrix has already been loaded from the file
-    bool m_hasRhs; // The right hand side exists
-    bool m_hasrefX; // A reference solution is provided
-    std::string m_folder;
-    DIR * m_folder_id;
-    struct dirent *m_curs_id; 
-    
-};
-
-} // end namespace Eigen
-
-#endif
diff --git a/resources/3rdparty/eigen/unsupported/Eigen/src/Splines/Spline.h b/resources/3rdparty/eigen/unsupported/Eigen/src/Splines/Spline.h
deleted file mode 100644
index 74048834a..000000000
--- a/resources/3rdparty/eigen/unsupported/Eigen/src/Splines/Spline.h
+++ /dev/null
@@ -1,479 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 20010-2011 Hauke Heibel <hauke.heibel@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_SPLINE_H
-#define EIGEN_SPLINE_H
-
-#include "SplineFwd.h"
-
-namespace Eigen
-{
-    /**
-     * \ingroup Splines_Module
-     * \class Spline class
-     * \brief A class representing multi-dimensional spline curves.
-     *
-     * The class represents B-splines with non-uniform knot vectors. Each control
-     * point of the B-spline is associated with a basis function
-     * \f{align*}
-     *   C(u) & = \sum_{i=0}^{n}N_{i,p}(u)P_i
-     * \f}
-     *
-     * \tparam _Scalar The underlying data type (typically float or double)
-     * \tparam _Dim The curve dimension (e.g. 2 or 3)
-     * \tparam _Degree Per default set to Dynamic; could be set to the actual desired
-     *                degree for optimization purposes (would result in stack allocation
-     *                of several temporary variables).
-     **/
-  template <typename _Scalar, int _Dim, int _Degree>
-  class Spline
-  {
-  public:
-    typedef _Scalar Scalar; /*!< The spline curve's scalar type. */
-    enum { Dimension = _Dim /*!< The spline curve's dimension. */ };
-    enum { Degree = _Degree /*!< The spline curve's degree. */ };
-
-    /** \brief The point type the spline is representing. */
-    typedef typename SplineTraits<Spline>::PointType PointType;
-    
-    /** \brief The data type used to store knot vectors. */
-    typedef typename SplineTraits<Spline>::KnotVectorType KnotVectorType;
-    
-    /** \brief The data type used to store non-zero basis functions. */
-    typedef typename SplineTraits<Spline>::BasisVectorType BasisVectorType;
-    
-    /** \brief The data type representing the spline's control points. */
-    typedef typename SplineTraits<Spline>::ControlPointVectorType ControlPointVectorType;
-    
-    /**
-    * \brief Creates a (constant) zero spline.
-    * For Splines with dynamic degree, the resulting degree will be 0.
-    **/
-    Spline() 
-    : m_knots(1, (Degree==Dynamic ? 2 : 2*Degree+2))
-    , m_ctrls(ControlPointVectorType::Zero(2,(Degree==Dynamic ? 1 : Degree+1))) 
-    {
-      // in theory this code can go to the initializer list but it will get pretty
-      // much unreadable ...
-      enum { MinDegree = (Degree==Dynamic ? 0 : Degree) };
-      m_knots.template segment<MinDegree+1>(0) = Array<Scalar,1,MinDegree+1>::Zero();
-      m_knots.template segment<MinDegree+1>(MinDegree+1) = Array<Scalar,1,MinDegree+1>::Ones();
-    }
-
-    /**
-    * \brief Creates a spline from a knot vector and control points.
-    * \param knots The spline's knot vector.
-    * \param ctrls The spline's control point vector.
-    **/
-    template <typename OtherVectorType, typename OtherArrayType>
-    Spline(const OtherVectorType& knots, const OtherArrayType& ctrls) : m_knots(knots), m_ctrls(ctrls) {}
-
-    /**
-    * \brief Copy constructor for splines.
-    * \param spline The input spline.
-    **/
-    template <int OtherDegree>
-    Spline(const Spline<Scalar, Dimension, OtherDegree>& spline) : 
-    m_knots(spline.knots()), m_ctrls(spline.ctrls()) {}
-
-    /**
-     * \brief Returns the knots of the underlying spline.
-     **/
-    const KnotVectorType& knots() const { return m_knots; }
-    
-    /**
-     * \brief Returns the knots of the underlying spline.
-     **/    
-    const ControlPointVectorType& ctrls() const { return m_ctrls; }
-
-    /**
-     * \brief Returns the spline value at a given site \f$u\f$.
-     *
-     * The function returns
-     * \f{align*}
-     *   C(u) & = \sum_{i=0}^{n}N_{i,p}P_i
-     * \f}
-     *
-     * \param u Parameter \f$u \in [0;1]\f$ at which the spline is evaluated.
-     * \return The spline value at the given location \f$u\f$.
-     **/
-    PointType operator()(Scalar u) const;
-
-    /**
-     * \brief Evaluation of spline derivatives of up-to given order.
-     *
-     * The function returns
-     * \f{align*}
-     *   \frac{d^i}{du^i}C(u) & = \sum_{i=0}^{n} \frac{d^i}{du^i} N_{i,p}(u)P_i
-     * \f}
-     * for i ranging between 0 and order.
-     *
-     * \param u Parameter \f$u \in [0;1]\f$ at which the spline derivative is evaluated.
-     * \param order The order up to which the derivatives are computed.
-     **/
-    typename SplineTraits<Spline>::DerivativeType
-      derivatives(Scalar u, DenseIndex order) const;
-
-    /**
-     * \copydoc Spline::derivatives
-     * Using the template version of this function is more efficieent since
-     * temporary objects are allocated on the stack whenever this is possible.
-     **/    
-    template <int DerivativeOrder>
-    typename SplineTraits<Spline,DerivativeOrder>::DerivativeType
-      derivatives(Scalar u, DenseIndex order = DerivativeOrder) const;
-
-    /**
-     * \brief Computes the non-zero basis functions at the given site.
-     *
-     * Splines have local support and a point from their image is defined
-     * by exactly \f$p+1\f$ control points \f$P_i\f$ where \f$p\f$ is the
-     * spline degree.
-     *
-     * This function computes the \f$p+1\f$ non-zero basis function values
-     * for a given parameter value \f$u\f$. It returns
-     * \f{align*}{
-     *   N_{i,p}(u), \hdots, N_{i+p+1,p}(u)
-     * \f}
-     *
-     * \param u Parameter \f$u \in [0;1]\f$ at which the non-zero basis functions 
-     *          are computed.
-     **/
-    typename SplineTraits<Spline>::BasisVectorType
-      basisFunctions(Scalar u) const;
-
-    /**
-     * \brief Computes the non-zero spline basis function derivatives up to given order.
-     *
-     * The function computes
-     * \f{align*}{
-     *   \frac{d^i}{du^i} N_{i,p}(u), \hdots, \frac{d^i}{du^i} N_{i+p+1,p}(u)
-     * \f}
-     * with i ranging from 0 up to the specified order.
-     *
-     * \param u Parameter \f$u \in [0;1]\f$ at which the non-zero basis function
-     *          derivatives are computed.
-     * \param order The order up to which the basis function derivatives are computes.
-     **/
-    typename SplineTraits<Spline>::BasisDerivativeType
-      basisFunctionDerivatives(Scalar u, DenseIndex order) const;
-
-    /**
-     * \copydoc Spline::basisFunctionDerivatives
-     * Using the template version of this function is more efficieent since
-     * temporary objects are allocated on the stack whenever this is possible.
-     **/    
-    template <int DerivativeOrder>
-    typename SplineTraits<Spline,DerivativeOrder>::BasisDerivativeType
-      basisFunctionDerivatives(Scalar u, DenseIndex order = DerivativeOrder) const;
-
-    /**
-     * \brief Returns the spline degree.
-     **/ 
-    DenseIndex degree() const;
-
-    /** 
-     * \brief Returns the span within the knot vector in which u is falling.
-     * \param u The site for which the span is determined.
-     **/
-    DenseIndex span(Scalar u) const;
-
-    /**
-     * \brief Computes the spang within the provided knot vector in which u is falling.
-     **/
-    static DenseIndex Span(typename SplineTraits<Spline>::Scalar u, DenseIndex degree, const typename SplineTraits<Spline>::KnotVectorType& knots);
-    
-    /**
-     * \brief Returns the spline's non-zero basis functions.
-     *
-     * The function computes and returns
-     * \f{align*}{
-     *   N_{i,p}(u), \hdots, N_{i+p+1,p}(u)
-     * \f}
-     *
-     * \param u The site at which the basis functions are computed.
-     * \param degree The degree of the underlying spline.
-     * \param knots The underlying spline's knot vector.
-     **/
-    static BasisVectorType BasisFunctions(Scalar u, DenseIndex degree, const KnotVectorType& knots);
-
-
-  private:
-    KnotVectorType m_knots; /*!< Knot vector. */
-    ControlPointVectorType  m_ctrls; /*!< Control points. */
-  };
-
-  template <typename _Scalar, int _Dim, int _Degree>
-  DenseIndex Spline<_Scalar, _Dim, _Degree>::Span(
-    typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::Scalar u,
-    DenseIndex degree,
-    const typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::KnotVectorType& knots)
-  {
-    // Piegl & Tiller, "The NURBS Book", A2.1 (p. 68)
-    if (u <= knots(0)) return degree;
-    const Scalar* pos = std::upper_bound(knots.data()+degree-1, knots.data()+knots.size()-degree-1, u);
-    return static_cast<DenseIndex>( std::distance(knots.data(), pos) - 1 );
-  }
-
-  template <typename _Scalar, int _Dim, int _Degree>
-  typename Spline<_Scalar, _Dim, _Degree>::BasisVectorType
-    Spline<_Scalar, _Dim, _Degree>::BasisFunctions(
-    typename Spline<_Scalar, _Dim, _Degree>::Scalar u,
-    DenseIndex degree,
-    const typename Spline<_Scalar, _Dim, _Degree>::KnotVectorType& knots)
-  {
-    typedef typename Spline<_Scalar, _Dim, _Degree>::BasisVectorType BasisVectorType;
-
-    const DenseIndex p = degree;
-    const DenseIndex i = Spline::Span(u, degree, knots);
-
-    const KnotVectorType& U = knots;
-
-    BasisVectorType left(p+1); left(0) = Scalar(0);
-    BasisVectorType right(p+1); right(0) = Scalar(0);        
-
-    VectorBlock<BasisVectorType,Degree>(left,1,p) = u - VectorBlock<const KnotVectorType,Degree>(U,i+1-p,p).reverse();
-    VectorBlock<BasisVectorType,Degree>(right,1,p) = VectorBlock<const KnotVectorType,Degree>(U,i+1,p) - u;
-
-    BasisVectorType N(1,p+1);
-    N(0) = Scalar(1);
-    for (DenseIndex j=1; j<=p; ++j)
-    {
-      Scalar saved = Scalar(0);
-      for (DenseIndex r=0; r<j; r++)
-      {
-        const Scalar tmp = N(r)/(right(r+1)+left(j-r));
-        N[r] = saved + right(r+1)*tmp;
-        saved = left(j-r)*tmp;
-      }
-      N(j) = saved;
-    }
-    return N;
-  }
-
-  template <typename _Scalar, int _Dim, int _Degree>
-  DenseIndex Spline<_Scalar, _Dim, _Degree>::degree() const
-  {
-    if (_Degree == Dynamic)
-      return m_knots.size() - m_ctrls.cols() - 1;
-    else
-      return _Degree;
-  }
-
-  template <typename _Scalar, int _Dim, int _Degree>
-  DenseIndex Spline<_Scalar, _Dim, _Degree>::span(Scalar u) const
-  {
-    return Spline::Span(u, degree(), knots());
-  }
-
-  template <typename _Scalar, int _Dim, int _Degree>
-  typename Spline<_Scalar, _Dim, _Degree>::PointType Spline<_Scalar, _Dim, _Degree>::operator()(Scalar u) const
-  {
-    enum { Order = SplineTraits<Spline>::OrderAtCompileTime };
-
-    const DenseIndex span = this->span(u);
-    const DenseIndex p = degree();
-    const BasisVectorType basis_funcs = basisFunctions(u);
-
-    const Replicate<BasisVectorType,Dimension,1> ctrl_weights(basis_funcs);
-    const Block<const ControlPointVectorType,Dimension,Order> ctrl_pts(ctrls(),0,span-p,Dimension,p+1);
-    return (ctrl_weights * ctrl_pts).rowwise().sum();
-  }
-
-  /* --------------------------------------------------------------------------------------------- */
-
-  template <typename SplineType, typename DerivativeType>
-  void derivativesImpl(const SplineType& spline, typename SplineType::Scalar u, DenseIndex order, DerivativeType& der)
-  {    
-    enum { Dimension = SplineTraits<SplineType>::Dimension };
-    enum { Order = SplineTraits<SplineType>::OrderAtCompileTime };
-    enum { DerivativeOrder = DerivativeType::ColsAtCompileTime };
-
-    typedef typename SplineTraits<SplineType>::Scalar Scalar;
-
-    typedef typename SplineTraits<SplineType>::BasisVectorType BasisVectorType;
-    typedef typename SplineTraits<SplineType>::ControlPointVectorType ControlPointVectorType;
-
-    typedef typename SplineTraits<SplineType,DerivativeOrder>::BasisDerivativeType BasisDerivativeType;
-    typedef typename BasisDerivativeType::ConstRowXpr BasisDerivativeRowXpr;    
-
-    const DenseIndex p = spline.degree();
-    const DenseIndex span = spline.span(u);
-
-    const DenseIndex n = (std::min)(p, order);
-
-    der.resize(Dimension,n+1);
-
-    // Retrieve the basis function derivatives up to the desired order...    
-    const BasisDerivativeType basis_func_ders = spline.template basisFunctionDerivatives<DerivativeOrder>(u, n+1);
-
-    // ... and perform the linear combinations of the control points.
-    for (DenseIndex der_order=0; der_order<n+1; ++der_order)
-    {
-      const Replicate<BasisDerivativeRowXpr,Dimension,1> ctrl_weights( basis_func_ders.row(der_order) );
-      const Block<const ControlPointVectorType,Dimension,Order> ctrl_pts(spline.ctrls(),0,span-p,Dimension,p+1);
-      der.col(der_order) = (ctrl_weights * ctrl_pts).rowwise().sum();
-    }
-  }
-
-  template <typename _Scalar, int _Dim, int _Degree>
-  typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::DerivativeType
-    Spline<_Scalar, _Dim, _Degree>::derivatives(Scalar u, DenseIndex order) const
-  {
-    typename SplineTraits< Spline >::DerivativeType res;
-    derivativesImpl(*this, u, order, res);
-    return res;
-  }
-
-  template <typename _Scalar, int _Dim, int _Degree>
-  template <int DerivativeOrder>
-  typename SplineTraits< Spline<_Scalar, _Dim, _Degree>, DerivativeOrder >::DerivativeType
-    Spline<_Scalar, _Dim, _Degree>::derivatives(Scalar u, DenseIndex order) const
-  {
-    typename SplineTraits< Spline, DerivativeOrder >::DerivativeType res;
-    derivativesImpl(*this, u, order, res);
-    return res;
-  }
-
-  template <typename _Scalar, int _Dim, int _Degree>
-  typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::BasisVectorType
-    Spline<_Scalar, _Dim, _Degree>::basisFunctions(Scalar u) const
-  {
-    return Spline::BasisFunctions(u, degree(), knots());
-  }
-
-  /* --------------------------------------------------------------------------------------------- */
-
-  template <typename SplineType, typename DerivativeType>
-  void basisFunctionDerivativesImpl(const SplineType& spline, typename SplineType::Scalar u, DenseIndex order, DerivativeType& N_)
-  {
-    enum { Order = SplineTraits<SplineType>::OrderAtCompileTime };
-
-    typedef typename SplineTraits<SplineType>::Scalar Scalar;
-    typedef typename SplineTraits<SplineType>::BasisVectorType BasisVectorType;
-    typedef typename SplineTraits<SplineType>::KnotVectorType KnotVectorType;
-    typedef typename SplineTraits<SplineType>::ControlPointVectorType ControlPointVectorType;
-
-    const KnotVectorType& U = spline.knots();
-
-    const DenseIndex p = spline.degree();
-    const DenseIndex span = spline.span(u);
-
-    const DenseIndex n = (std::min)(p, order);
-
-    N_.resize(n+1, p+1);
-
-    BasisVectorType left = BasisVectorType::Zero(p+1);
-    BasisVectorType right = BasisVectorType::Zero(p+1);
-
-    Matrix<Scalar,Order,Order> ndu(p+1,p+1);
-
-    double saved, temp;
-
-    ndu(0,0) = 1.0;
-
-    DenseIndex j;
-    for (j=1; j<=p; ++j)
-    {
-      left[j] = u-U[span+1-j];
-      right[j] = U[span+j]-u;
-      saved = 0.0;
-
-      for (DenseIndex r=0; r<j; ++r)
-      {
-        /* Lower triangle */
-        ndu(j,r) = right[r+1]+left[j-r];
-        temp = ndu(r,j-1)/ndu(j,r);
-        /* Upper triangle */
-        ndu(r,j) = static_cast<Scalar>(saved+right[r+1] * temp);
-        saved = left[j-r] * temp;
-      }
-
-      ndu(j,j) = static_cast<Scalar>(saved);
-    }
-
-    for (j = p; j>=0; --j) 
-      N_(0,j) = ndu(j,p);
-
-    // Compute the derivatives
-    DerivativeType a(n+1,p+1);
-    DenseIndex r=0;
-    for (; r<=p; ++r)
-    {
-      DenseIndex s1,s2;
-      s1 = 0; s2 = 1; // alternate rows in array a
-      a(0,0) = 1.0;
-
-      // Compute the k-th derivative
-      for (DenseIndex k=1; k<=static_cast<DenseIndex>(n); ++k)
-      {
-        double d = 0.0;
-        DenseIndex rk,pk,j1,j2;
-        rk = r-k; pk = p-k;
-
-        if (r>=k)
-        {
-          a(s2,0) = a(s1,0)/ndu(pk+1,rk);
-          d = a(s2,0)*ndu(rk,pk);
-        }
-
-        if (rk>=-1) j1 = 1;
-        else        j1 = -rk;
-
-        if (r-1 <= pk) j2 = k-1;
-        else           j2 = p-r;
-
-        for (j=j1; j<=j2; ++j)
-        {
-          a(s2,j) = (a(s1,j)-a(s1,j-1))/ndu(pk+1,rk+j);
-          d += a(s2,j)*ndu(rk+j,pk);
-        }
-
-        if (r<=pk)
-        {
-          a(s2,k) = -a(s1,k-1)/ndu(pk+1,r);
-          d += a(s2,k)*ndu(r,pk);
-        }
-
-        N_(k,r) = static_cast<Scalar>(d);
-        j = s1; s1 = s2; s2 = j; // Switch rows
-      }
-    }
-
-    /* Multiply through by the correct factors */
-    /* (Eq. [2.9])                             */
-    r = p;
-    for (DenseIndex k=1; k<=static_cast<DenseIndex>(n); ++k)
-    {
-      for (DenseIndex j=p; j>=0; --j) N_(k,j) *= r;
-      r *= p-k;
-    }
-  }
-
-  template <typename _Scalar, int _Dim, int _Degree>
-  typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::BasisDerivativeType
-    Spline<_Scalar, _Dim, _Degree>::basisFunctionDerivatives(Scalar u, DenseIndex order) const
-  {
-    typename SplineTraits< Spline >::BasisDerivativeType der;
-    basisFunctionDerivativesImpl(*this, u, order, der);
-    return der;
-  }
-
-  template <typename _Scalar, int _Dim, int _Degree>
-  template <int DerivativeOrder>
-  typename SplineTraits< Spline<_Scalar, _Dim, _Degree>, DerivativeOrder >::BasisDerivativeType
-    Spline<_Scalar, _Dim, _Degree>::basisFunctionDerivatives(Scalar u, DenseIndex order) const
-  {
-    typename SplineTraits< Spline, DerivativeOrder >::BasisDerivativeType der;
-    basisFunctionDerivativesImpl(*this, u, order, der);
-    return der;
-  }
-}
-
-#endif // EIGEN_SPLINE_H
diff --git a/resources/3rdparty/eigen/unsupported/test/CMakeLists.txt b/resources/3rdparty/eigen/unsupported/test/CMakeLists.txt
deleted file mode 100644
index ff0137ec6..000000000
--- a/resources/3rdparty/eigen/unsupported/test/CMakeLists.txt
+++ /dev/null
@@ -1,88 +0,0 @@
-
-include_directories(../../test ../../unsupported ../../Eigen 
-                    ${CMAKE_CURRENT_BINARY_DIR}/../../test)
-
-find_package(GoogleHash)
-if(GOOGLEHASH_FOUND)
-  add_definitions("-DEIGEN_GOOGLEHASH_SUPPORT")
-  include_directories(${GOOGLEHASH_INCLUDES})
-  ei_add_property(EIGEN_TESTED_BACKENDS  "GoogleHash, ")
-else(GOOGLEHASH_FOUND)
-  ei_add_property(EIGEN_MISSING_BACKENDS  "GoogleHash, ")
-endif(GOOGLEHASH_FOUND)
-
-find_package(Adolc)
-if(ADOLC_FOUND)
-  include_directories(${ADOLC_INCLUDES})
-  ei_add_property(EIGEN_TESTED_BACKENDS "Adolc, ")
-  ei_add_test(forward_adolc "" ${ADOLC_LIBRARIES})
-else(ADOLC_FOUND)
-  ei_add_property(EIGEN_MISSING_BACKENDS "Adolc, ")
-endif(ADOLC_FOUND)
-
-# this test seems to never have been successful on x87, so is considered to contain a FP-related bug.
-# see thread: "non-linear optimization test summary"
-#ei_add_test(NonLinearOptimization)
-
-ei_add_test(NumericalDiff)
-ei_add_test(autodiff)
-
-if (NOT CMAKE_CXX_COMPILER MATCHES "clang\\+\\+$")
-ei_add_test(BVH)
-endif()
-
-ei_add_test(matrix_exponential)
-ei_add_test(matrix_function)
-ei_add_test(matrix_power)
-ei_add_test(matrix_square_root)
-ei_add_test(alignedvector3)
-ei_add_test(FFT)
-
-find_package(MPFR 2.3.0)
-find_package(GMP)
-if(MPFR_FOUND)
-  include_directories(${MPFR_INCLUDES} ./mpreal)
-  ei_add_property(EIGEN_TESTED_BACKENDS "MPFR C++, ")
-  set(EIGEN_MPFR_TEST_LIBRARIES ${MPFR_LIBRARIES} ${GMP_LIBRARIES})
-  ei_add_test(mpreal_support "" "${EIGEN_MPFR_TEST_LIBRARIES}" )
-else()
-  ei_add_property(EIGEN_MISSING_BACKENDS "MPFR C++, ")
-endif()
-
-ei_add_test(sparse_extra   "" "")
-
-find_package(FFTW)
-if(FFTW_FOUND)
-  ei_add_property(EIGEN_TESTED_BACKENDS "fftw, ")
-  include_directories( ${FFTW_INCLUDES} )
-  if(FFTWL_LIB)
-    ei_add_test(FFTW  "-DEIGEN_FFTW_DEFAULT -DEIGEN_HAS_FFTWL" "${FFTW_LIBRARIES}" )
-  else()
-    ei_add_test(FFTW  "-DEIGEN_FFTW_DEFAULT" "${FFTW_LIBRARIES}" )
-  endif()
-else()
-  ei_add_property(EIGEN_MISSING_BACKENDS "fftw, ")
-endif()
-
-option(EIGEN_TEST_NO_OPENGL "Disable OpenGL support in unit tests" OFF)
-if(NOT EIGEN_TEST_NO_OPENGL)
-  find_package(OpenGL)
-  find_package(GLUT)
-  find_package(GLEW)
-  if(OPENGL_FOUND AND GLUT_FOUND AND GLEW_FOUND)
-    ei_add_property(EIGEN_TESTED_BACKENDS "OpenGL, ")
-    set(EIGEN_GL_LIB ${GLUT_LIBRARIES} ${GLEW_LIBRARIES})
-    ei_add_test(openglsupport  "" "${EIGEN_GL_LIB}" )
-  else()
-    ei_add_property(EIGEN_MISSING_BACKENDS "OpenGL, ")
-  endif()
-else()
-    ei_add_property(EIGEN_MISSING_BACKENDS "OpenGL, ")
-endif()
-
-ei_add_test(polynomialsolver)
-ei_add_test(polynomialutils)
-ei_add_test(kronecker_product)
-ei_add_test(splines)
-ei_add_test(gmres)
-
diff --git a/resources/3rdparty/eigen/unsupported/test/matrix_exponential.cpp b/resources/3rdparty/eigen/unsupported/test/matrix_exponential.cpp
deleted file mode 100644
index 50dec083d..000000000
--- a/resources/3rdparty/eigen/unsupported/test/matrix_exponential.cpp
+++ /dev/null
@@ -1,141 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009 Jitse Niesen <jitse@maths.leeds.ac.uk>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#include "matrix_functions.h"
-
-double binom(int n, int k)
-{
-  double res = 1;
-  for (int i=0; i<k; i++)
-    res = res * (n-k+i+1) / (i+1);
-  return res;
-}
-
-template <typename T>
-T expfn(T x, int)
-{
-  return std::exp(x);
-}
-
-template <typename T>
-void test2dRotation(double tol)
-{
-  Matrix<T,2,2> A, B, C;
-  T angle;
-
-  A << 0, 1, -1, 0;
-  for (int i=0; i<=20; i++)
-  {
-    angle = static_cast<T>(pow(10, i / 5. - 2));
-    B << std::cos(angle), std::sin(angle), -std::sin(angle), std::cos(angle);
-
-    C = (angle*A).matrixFunction(expfn);
-    std::cout << "test2dRotation: i = " << i << "   error funm = " << relerr(C, B);
-    VERIFY(C.isApprox(B, static_cast<T>(tol)));
-
-    C = (angle*A).exp();
-    std::cout << "   error expm = " << relerr(C, B) << "\n";
-    VERIFY(C.isApprox(B, static_cast<T>(tol)));
-  }
-}
-
-template <typename T>
-void test2dHyperbolicRotation(double tol)
-{
-  Matrix<std::complex<T>,2,2> A, B, C;
-  std::complex<T> imagUnit(0,1);
-  T angle, ch, sh;
-
-  for (int i=0; i<=20; i++)
-  {
-    angle = static_cast<T>((i-10) / 2.0);
-    ch = std::cosh(angle);
-    sh = std::sinh(angle);
-    A << 0, angle*imagUnit, -angle*imagUnit, 0;
-    B << ch, sh*imagUnit, -sh*imagUnit, ch;
-
-    C = A.matrixFunction(expfn);
-    std::cout << "test2dHyperbolicRotation: i = " << i << "   error funm = " << relerr(C, B);
-    VERIFY(C.isApprox(B, static_cast<T>(tol)));
-
-    C = A.exp();
-    std::cout << "   error expm = " << relerr(C, B) << "\n";
-    VERIFY(C.isApprox(B, static_cast<T>(tol)));
-  }
-}
-
-template <typename T>
-void testPascal(double tol)
-{
-  for (int size=1; size<20; size++)
-  {
-    Matrix<T,Dynamic,Dynamic> A(size,size), B(size,size), C(size,size);
-    A.setZero();
-    for (int i=0; i<size-1; i++)
-      A(i+1,i) = static_cast<T>(i+1);
-    B.setZero();
-    for (int i=0; i<size; i++)
-      for (int j=0; j<=i; j++)
-    B(i,j) = static_cast<T>(binom(i,j));
-
-    C = A.matrixFunction(expfn);
-    std::cout << "testPascal: size = " << size << "   error funm = " << relerr(C, B);
-    VERIFY(C.isApprox(B, static_cast<T>(tol)));
-
-    C = A.exp();
-    std::cout << "   error expm = " << relerr(C, B) << "\n";
-    VERIFY(C.isApprox(B, static_cast<T>(tol)));
-  }
-}
-
-template<typename MatrixType>
-void randomTest(const MatrixType& m, double tol)
-{
-  /* this test covers the following files:
-     Inverse.h
-  */
-  typename MatrixType::Index rows = m.rows();
-  typename MatrixType::Index cols = m.cols();
-  MatrixType m1(rows, cols), m2(rows, cols), identity = MatrixType::Identity(rows, cols);
-
-  typedef typename NumTraits<typename internal::traits<MatrixType>::Scalar>::Real RealScalar;
-
-  for(int i = 0; i < g_repeat; i++) {
-    m1 = MatrixType::Random(rows, cols);
-
-    m2 = m1.matrixFunction(expfn) * (-m1).matrixFunction(expfn);
-    std::cout << "randomTest: error funm = " << relerr(identity, m2);
-    VERIFY(identity.isApprox(m2, static_cast<RealScalar>(tol)));
-
-    m2 = m1.exp() * (-m1).exp();
-    std::cout << "   error expm = " << relerr(identity, m2) << "\n";
-    VERIFY(identity.isApprox(m2, static_cast<RealScalar>(tol)));
-  }
-}
-
-void test_matrix_exponential()
-{
-  CALL_SUBTEST_2(test2dRotation<double>(1e-13));
-  CALL_SUBTEST_1(test2dRotation<float>(2e-5));  // was 1e-5, relaxed for clang 2.8 / linux / x86-64
-  CALL_SUBTEST_8(test2dRotation<long double>(1e-13)); 
-  CALL_SUBTEST_2(test2dHyperbolicRotation<double>(1e-14));
-  CALL_SUBTEST_1(test2dHyperbolicRotation<float>(1e-5));
-  CALL_SUBTEST_8(test2dHyperbolicRotation<long double>(1e-14));
-  CALL_SUBTEST_6(testPascal<float>(1e-6));
-  CALL_SUBTEST_5(testPascal<double>(1e-15));
-  CALL_SUBTEST_2(randomTest(Matrix2d(), 1e-13));
-  CALL_SUBTEST_7(randomTest(Matrix<double,3,3,RowMajor>(), 1e-13));
-  CALL_SUBTEST_3(randomTest(Matrix4cd(), 1e-13));
-  CALL_SUBTEST_4(randomTest(MatrixXd(8,8), 1e-13));
-  CALL_SUBTEST_1(randomTest(Matrix2f(), 1e-4));
-  CALL_SUBTEST_5(randomTest(Matrix3cf(), 1e-4));
-  CALL_SUBTEST_1(randomTest(Matrix4f(), 1e-4));
-  CALL_SUBTEST_6(randomTest(MatrixXf(8,8), 1e-4));
-  CALL_SUBTEST_9(randomTest(Matrix<long double,Dynamic,Dynamic>(7,7), 1e-13));
-}
diff --git a/resources/3rdparty/eigen/unsupported/test/matrix_square_root.cpp b/resources/3rdparty/eigen/unsupported/test/matrix_square_root.cpp
deleted file mode 100644
index ea541e1ea..000000000
--- a/resources/3rdparty/eigen/unsupported/test/matrix_square_root.cpp
+++ /dev/null
@@ -1,31 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2011 Jitse Niesen <jitse@maths.leeds.ac.uk>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#include "matrix_functions.h"
-
-template<typename MatrixType>
-void testMatrixSqrt(const MatrixType& m)
-{
-  MatrixType A;
-  generateTestMatrix<MatrixType>::run(A, m.rows());
-  MatrixType sqrtA = A.sqrt();
-  VERIFY_IS_APPROX(sqrtA * sqrtA, A);
-}
-
-void test_matrix_square_root()
-{
-  for (int i = 0; i < g_repeat; i++) {
-    CALL_SUBTEST_1(testMatrixSqrt(Matrix3cf()));
-    CALL_SUBTEST_2(testMatrixSqrt(MatrixXcd(12,12)));
-    CALL_SUBTEST_3(testMatrixSqrt(Matrix4f()));
-    CALL_SUBTEST_4(testMatrixSqrt(Matrix<double,Dynamic,Dynamic,RowMajor>(9, 9)));
-    CALL_SUBTEST_5(testMatrixSqrt(Matrix<float,1,1>()));
-    CALL_SUBTEST_5(testMatrixSqrt(Matrix<std::complex<float>,1,1>()));
-  }
-}