diff --git a/resources/3rdparty/eigen/Eigen/MetisSupport b/resources/3rdparty/eigen/Eigen/MetisSupport
new file mode 100644
index 000000000..a44086ad9
--- /dev/null
+++ b/resources/3rdparty/eigen/Eigen/MetisSupport
@@ -0,0 +1,26 @@
+#ifndef EIGEN_METISSUPPORT_MODULE_H
+#define EIGEN_METISSUPPORT_MODULE_H
+
+#include "SparseCore"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+extern "C" {
+#include <metis.h>
+}
+
+
+/** \ingroup Support_modules
+  * \defgroup MetisSupport_Module MetisSupport module
+  *
+  * \code
+  * #include <Eigen/MetisSupport>
+  * \endcode
+  */
+
+
+#include "src/MetisSupport/MetisSupport.h"
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_METISSUPPORT_MODULE_H
diff --git a/resources/3rdparty/eigen/Eigen/SparseLU b/resources/3rdparty/eigen/Eigen/SparseLU
new file mode 100644
index 000000000..452bc9f83
--- /dev/null
+++ b/resources/3rdparty/eigen/Eigen/SparseLU
@@ -0,0 +1,17 @@
+#ifndef EIGEN_SPARSELU_MODULE_H
+#define EIGEN_SPARSELU_MODULE_H
+
+#include "SparseCore"
+
+
+/** \ingroup Sparse_modules
+  * \defgroup SparseLU_Module SparseLU module
+  *
+  */
+
+// Ordering interface
+#include "OrderingMethods"
+
+#include "src/SparseLU/SparseLU.h"
+
+#endif // EIGEN_SPARSELU_MODULE_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/AssignEvaluator.h b/resources/3rdparty/eigen/Eigen/src/Core/AssignEvaluator.h
new file mode 100644
index 000000000..5e134c83a
--- /dev/null
+++ b/resources/3rdparty/eigen/Eigen/src/Core/AssignEvaluator.h
@@ -0,0 +1,755 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2011-2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ASSIGN_EVALUATOR_H
+#define EIGEN_ASSIGN_EVALUATOR_H
+
+namespace Eigen {
+
+// This implementation is based on Assign.h
+
+namespace internal {
+  
+/***************************************************************************
+* Part 1 : the logic deciding a strategy for traversal and unrolling       *
+***************************************************************************/
+
+// copy_using_evaluator_traits is based on assign_traits
+
+template <typename Derived, typename OtherDerived>
+struct copy_using_evaluator_traits
+{
+public:
+  enum {
+    DstIsAligned = Derived::Flags & AlignedBit,
+    DstHasDirectAccess = Derived::Flags & DirectAccessBit,
+    SrcIsAligned = OtherDerived::Flags & AlignedBit,
+    JointAlignment = bool(DstIsAligned) && bool(SrcIsAligned) ? Aligned : Unaligned,
+    SrcEvalBeforeAssign = (evaluator_traits<OtherDerived>::HasEvalTo == 1)
+  };
+
+private:
+  enum {
+    InnerSize = int(Derived::IsVectorAtCompileTime) ? int(Derived::SizeAtCompileTime)
+              : int(Derived::Flags)&RowMajorBit ? int(Derived::ColsAtCompileTime)
+              : int(Derived::RowsAtCompileTime),
+    InnerMaxSize = int(Derived::IsVectorAtCompileTime) ? int(Derived::MaxSizeAtCompileTime)
+              : int(Derived::Flags)&RowMajorBit ? int(Derived::MaxColsAtCompileTime)
+              : int(Derived::MaxRowsAtCompileTime),
+    MaxSizeAtCompileTime = Derived::SizeAtCompileTime,
+    PacketSize = packet_traits<typename Derived::Scalar>::size
+  };
+
+  enum {
+    StorageOrdersAgree = (int(Derived::IsRowMajor) == int(OtherDerived::IsRowMajor)),
+    MightVectorize = StorageOrdersAgree
+                  && (int(Derived::Flags) & int(OtherDerived::Flags) & ActualPacketAccessBit),
+    MayInnerVectorize  = MightVectorize && int(InnerSize)!=Dynamic && int(InnerSize)%int(PacketSize)==0
+                       && int(DstIsAligned) && int(SrcIsAligned),
+    MayLinearize = StorageOrdersAgree && (int(Derived::Flags) & int(OtherDerived::Flags) & LinearAccessBit),
+    MayLinearVectorize = MightVectorize && MayLinearize && DstHasDirectAccess
+                       && (DstIsAligned || MaxSizeAtCompileTime == Dynamic),
+      /* If the destination isn't aligned, we have to do runtime checks and we don't unroll,
+         so it's only good for large enough sizes. */
+    MaySliceVectorize  = MightVectorize && DstHasDirectAccess
+                       && (int(InnerMaxSize)==Dynamic || int(InnerMaxSize)>=3*PacketSize)
+      /* slice vectorization can be slow, so we only want it if the slices are big, which is
+         indicated by InnerMaxSize rather than InnerSize, think of the case of a dynamic block
+         in a fixed-size matrix */
+  };
+
+public:
+  enum {
+    Traversal = int(SrcEvalBeforeAssign) ? int(AllAtOnceTraversal) 
+              : int(MayInnerVectorize)   ? int(InnerVectorizedTraversal)
+              : int(MayLinearVectorize)  ? int(LinearVectorizedTraversal)
+              : int(MaySliceVectorize)   ? int(SliceVectorizedTraversal)
+              : int(MayLinearize)        ? int(LinearTraversal)
+                                         : int(DefaultTraversal),
+    Vectorized = int(Traversal) == InnerVectorizedTraversal
+              || int(Traversal) == LinearVectorizedTraversal
+              || int(Traversal) == SliceVectorizedTraversal
+  };
+
+private:
+  enum {
+    UnrollingLimit      = EIGEN_UNROLLING_LIMIT * (Vectorized ? int(PacketSize) : 1),
+    MayUnrollCompletely = int(Derived::SizeAtCompileTime) != Dynamic
+                       && int(OtherDerived::CoeffReadCost) != Dynamic
+                       && int(Derived::SizeAtCompileTime) * int(OtherDerived::CoeffReadCost) <= int(UnrollingLimit),
+    MayUnrollInner      = int(InnerSize) != Dynamic
+                       && int(OtherDerived::CoeffReadCost) != Dynamic
+                       && int(InnerSize) * int(OtherDerived::CoeffReadCost) <= int(UnrollingLimit)
+  };
+
+public:
+  enum {
+    Unrolling = (int(Traversal) == int(InnerVectorizedTraversal) || int(Traversal) == int(DefaultTraversal))
+                ? (
+ 		    int(MayUnrollCompletely) ? int(CompleteUnrolling)
+                  : int(MayUnrollInner)      ? int(InnerUnrolling)
+                                             : int(NoUnrolling)
+                  )
+              : int(Traversal) == int(LinearVectorizedTraversal)
+                ? ( bool(MayUnrollCompletely) && bool(DstIsAligned) ? int(CompleteUnrolling) 
+                                                                    : int(NoUnrolling) )
+              : int(Traversal) == int(LinearTraversal)
+                ? ( bool(MayUnrollCompletely) ? int(CompleteUnrolling) 
+                                              : int(NoUnrolling) )
+              : int(NoUnrolling)
+  };
+
+#ifdef EIGEN_DEBUG_ASSIGN
+  static void debug()
+  {
+    EIGEN_DEBUG_VAR(DstIsAligned)
+    EIGEN_DEBUG_VAR(SrcIsAligned)
+    EIGEN_DEBUG_VAR(JointAlignment)
+    EIGEN_DEBUG_VAR(InnerSize)
+    EIGEN_DEBUG_VAR(InnerMaxSize)
+    EIGEN_DEBUG_VAR(PacketSize)
+    EIGEN_DEBUG_VAR(StorageOrdersAgree)
+    EIGEN_DEBUG_VAR(MightVectorize)
+    EIGEN_DEBUG_VAR(MayLinearize)
+    EIGEN_DEBUG_VAR(MayInnerVectorize)
+    EIGEN_DEBUG_VAR(MayLinearVectorize)
+    EIGEN_DEBUG_VAR(MaySliceVectorize)
+    EIGEN_DEBUG_VAR(Traversal)
+    EIGEN_DEBUG_VAR(UnrollingLimit)
+    EIGEN_DEBUG_VAR(MayUnrollCompletely)
+    EIGEN_DEBUG_VAR(MayUnrollInner)
+    EIGEN_DEBUG_VAR(Unrolling)
+  }
+#endif
+};
+
+/***************************************************************************
+* Part 2 : meta-unrollers
+***************************************************************************/
+
+/************************
+*** Default traversal ***
+************************/
+
+template<typename DstEvaluatorType, typename SrcEvaluatorType, int Index, int Stop>
+struct copy_using_evaluator_DefaultTraversal_CompleteUnrolling
+{
+  typedef typename DstEvaluatorType::XprType DstXprType;
+  
+  enum {
+    outer = Index / DstXprType::InnerSizeAtCompileTime,
+    inner = Index % DstXprType::InnerSizeAtCompileTime
+  };
+
+  EIGEN_STRONG_INLINE static void run(DstEvaluatorType &dstEvaluator, 
+				      SrcEvaluatorType &srcEvaluator)
+  {
+    dstEvaluator.copyCoeffByOuterInner(outer, inner, srcEvaluator);
+    copy_using_evaluator_DefaultTraversal_CompleteUnrolling
+      <DstEvaluatorType, SrcEvaluatorType, Index+1, Stop>
+      ::run(dstEvaluator, srcEvaluator);
+  }
+};
+
+template<typename DstEvaluatorType, typename SrcEvaluatorType, int Stop>
+struct copy_using_evaluator_DefaultTraversal_CompleteUnrolling<DstEvaluatorType, SrcEvaluatorType, Stop, Stop>
+{
+  EIGEN_STRONG_INLINE static void run(DstEvaluatorType&, SrcEvaluatorType&) { }
+};
+
+template<typename DstEvaluatorType, typename SrcEvaluatorType, int Index, int Stop>
+struct copy_using_evaluator_DefaultTraversal_InnerUnrolling
+{
+  EIGEN_STRONG_INLINE static void run(DstEvaluatorType &dstEvaluator, 
+				      SrcEvaluatorType &srcEvaluator, 
+				      int outer)
+  {
+    dstEvaluator.copyCoeffByOuterInner(outer, Index, srcEvaluator);
+    copy_using_evaluator_DefaultTraversal_InnerUnrolling
+      <DstEvaluatorType, SrcEvaluatorType, Index+1, Stop>
+      ::run(dstEvaluator, srcEvaluator, outer);
+  }
+};
+
+template<typename DstEvaluatorType, typename SrcEvaluatorType, int Stop>
+struct copy_using_evaluator_DefaultTraversal_InnerUnrolling<DstEvaluatorType, SrcEvaluatorType, Stop, Stop>
+{
+  EIGEN_STRONG_INLINE static void run(DstEvaluatorType&, SrcEvaluatorType&, int) { }
+};
+
+/***********************
+*** Linear traversal ***
+***********************/
+
+template<typename DstEvaluatorType, typename SrcEvaluatorType, int Index, int Stop>
+struct copy_using_evaluator_LinearTraversal_CompleteUnrolling
+{
+  EIGEN_STRONG_INLINE static void run(DstEvaluatorType &dstEvaluator, 
+				      SrcEvaluatorType &srcEvaluator)
+  {
+    dstEvaluator.copyCoeff(Index, srcEvaluator);
+    copy_using_evaluator_LinearTraversal_CompleteUnrolling
+      <DstEvaluatorType, SrcEvaluatorType, Index+1, Stop>
+      ::run(dstEvaluator, srcEvaluator);
+  }
+};
+
+template<typename DstEvaluatorType, typename SrcEvaluatorType, int Stop>
+struct copy_using_evaluator_LinearTraversal_CompleteUnrolling<DstEvaluatorType, SrcEvaluatorType, Stop, Stop>
+{
+  EIGEN_STRONG_INLINE static void run(DstEvaluatorType&, SrcEvaluatorType&) { }
+};
+
+/**************************
+*** Inner vectorization ***
+**************************/
+
+template<typename DstEvaluatorType, typename SrcEvaluatorType, int Index, int Stop>
+struct copy_using_evaluator_innervec_CompleteUnrolling
+{
+  typedef typename DstEvaluatorType::XprType DstXprType;
+  typedef typename SrcEvaluatorType::XprType SrcXprType;
+
+  enum {
+    outer = Index / DstXprType::InnerSizeAtCompileTime,
+    inner = Index % DstXprType::InnerSizeAtCompileTime,
+    JointAlignment = copy_using_evaluator_traits<DstXprType,SrcXprType>::JointAlignment
+  };
+
+  EIGEN_STRONG_INLINE static void run(DstEvaluatorType &dstEvaluator, 
+				      SrcEvaluatorType &srcEvaluator)
+  {
+    dstEvaluator.template copyPacketByOuterInner<Aligned, JointAlignment>(outer, inner, srcEvaluator);
+    enum { NextIndex = Index + packet_traits<typename DstXprType::Scalar>::size };
+    copy_using_evaluator_innervec_CompleteUnrolling
+      <DstEvaluatorType, SrcEvaluatorType, NextIndex, Stop>
+      ::run(dstEvaluator, srcEvaluator);
+  }
+};
+
+template<typename DstEvaluatorType, typename SrcEvaluatorType, int Stop>
+struct copy_using_evaluator_innervec_CompleteUnrolling<DstEvaluatorType, SrcEvaluatorType, Stop, Stop>
+{
+  EIGEN_STRONG_INLINE static void run(DstEvaluatorType&, SrcEvaluatorType&) { }
+};
+
+template<typename DstEvaluatorType, typename SrcEvaluatorType, int Index, int Stop>
+struct copy_using_evaluator_innervec_InnerUnrolling
+{
+  EIGEN_STRONG_INLINE static void run(DstEvaluatorType &dstEvaluator, 
+				      SrcEvaluatorType &srcEvaluator, 
+				      int outer)
+  {
+    dstEvaluator.template copyPacketByOuterInner<Aligned, Aligned>(outer, Index, srcEvaluator);
+    typedef typename DstEvaluatorType::XprType DstXprType;
+    enum { NextIndex = Index + packet_traits<typename DstXprType::Scalar>::size };
+    copy_using_evaluator_innervec_InnerUnrolling
+      <DstEvaluatorType, SrcEvaluatorType, NextIndex, Stop>
+      ::run(dstEvaluator, srcEvaluator, outer);
+  }
+};
+
+template<typename DstEvaluatorType, typename SrcEvaluatorType, int Stop>
+struct copy_using_evaluator_innervec_InnerUnrolling<DstEvaluatorType, SrcEvaluatorType, Stop, Stop>
+{
+  EIGEN_STRONG_INLINE static void run(DstEvaluatorType&, SrcEvaluatorType&, int) { }
+};
+
+/***************************************************************************
+* Part 3 : implementation of all cases
+***************************************************************************/
+
+// copy_using_evaluator_impl is based on assign_impl
+
+template<typename DstXprType, typename SrcXprType,
+         int Traversal = copy_using_evaluator_traits<DstXprType, SrcXprType>::Traversal,
+         int Unrolling = copy_using_evaluator_traits<DstXprType, SrcXprType>::Unrolling>
+struct copy_using_evaluator_impl;
+
+/************************
+*** Default traversal ***
+************************/
+
+template<typename DstXprType, typename SrcXprType>
+struct copy_using_evaluator_impl<DstXprType, SrcXprType, DefaultTraversal, NoUnrolling>
+{
+  static void run(DstXprType& dst, const SrcXprType& src)
+  {
+    typedef typename evaluator<DstXprType>::type DstEvaluatorType;
+    typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
+    typedef typename DstXprType::Index Index;
+
+    DstEvaluatorType dstEvaluator(dst);
+    SrcEvaluatorType srcEvaluator(src);
+
+    for(Index outer = 0; outer < dst.outerSize(); ++outer) {
+      for(Index inner = 0; inner < dst.innerSize(); ++inner) {
+	dstEvaluator.copyCoeffByOuterInner(outer, inner, srcEvaluator);
+      }
+    }
+  }
+};
+
+template<typename DstXprType, typename SrcXprType>
+struct copy_using_evaluator_impl<DstXprType, SrcXprType, DefaultTraversal, CompleteUnrolling>
+{
+  EIGEN_STRONG_INLINE static void run(DstXprType &dst, const SrcXprType &src)
+  {
+    typedef typename evaluator<DstXprType>::type DstEvaluatorType;
+    typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
+
+    DstEvaluatorType dstEvaluator(dst);
+    SrcEvaluatorType srcEvaluator(src);
+
+    copy_using_evaluator_DefaultTraversal_CompleteUnrolling
+      <DstEvaluatorType, SrcEvaluatorType, 0, DstXprType::SizeAtCompileTime>
+      ::run(dstEvaluator, srcEvaluator);
+  }
+};
+
+template<typename DstXprType, typename SrcXprType>
+struct copy_using_evaluator_impl<DstXprType, SrcXprType, DefaultTraversal, InnerUnrolling>
+{
+  typedef typename DstXprType::Index Index;
+  EIGEN_STRONG_INLINE static void run(DstXprType &dst, const SrcXprType &src)
+  {
+    typedef typename evaluator<DstXprType>::type DstEvaluatorType;
+    typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
+
+    DstEvaluatorType dstEvaluator(dst);
+    SrcEvaluatorType srcEvaluator(src);
+
+    const Index outerSize = dst.outerSize();
+    for(Index outer = 0; outer < outerSize; ++outer)
+      copy_using_evaluator_DefaultTraversal_InnerUnrolling
+	<DstEvaluatorType, SrcEvaluatorType, 0, DstXprType::InnerSizeAtCompileTime>
+        ::run(dstEvaluator, srcEvaluator, outer);
+  }
+};
+
+/***************************
+*** Linear vectorization ***
+***************************/
+
+template <bool IsAligned = false>
+struct unaligned_copy_using_evaluator_impl
+{
+  // if IsAligned = true, then do nothing
+  template <typename SrcEvaluatorType, typename DstEvaluatorType>
+  static EIGEN_STRONG_INLINE void run(const SrcEvaluatorType&, DstEvaluatorType&, 
+				      typename SrcEvaluatorType::Index, typename SrcEvaluatorType::Index) {}
+};
+
+template <>
+struct unaligned_copy_using_evaluator_impl<false>
+{
+  // MSVC must not inline this functions. If it does, it fails to optimize the
+  // packet access path.
+#ifdef _MSC_VER
+  template <typename DstEvaluatorType, typename SrcEvaluatorType>
+  static EIGEN_DONT_INLINE void run(DstEvaluatorType &dstEvaluator, 
+				    const SrcEvaluatorType &srcEvaluator, 
+				    typename DstEvaluatorType::Index start, 
+				    typename DstEvaluatorType::Index end)
+#else
+  template <typename DstEvaluatorType, typename SrcEvaluatorType>
+  static EIGEN_STRONG_INLINE void run(DstEvaluatorType &dstEvaluator, 
+				      const SrcEvaluatorType &srcEvaluator, 
+				      typename DstEvaluatorType::Index start, 
+				      typename DstEvaluatorType::Index end)
+#endif
+  {
+    for (typename DstEvaluatorType::Index index = start; index < end; ++index)
+      dstEvaluator.copyCoeff(index, srcEvaluator);
+  }
+};
+
+template<typename DstXprType, typename SrcXprType>
+struct copy_using_evaluator_impl<DstXprType, SrcXprType, LinearVectorizedTraversal, NoUnrolling>
+{
+  EIGEN_STRONG_INLINE static void run(DstXprType &dst, const SrcXprType &src)
+  {
+    typedef typename evaluator<DstXprType>::type DstEvaluatorType;
+    typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
+    typedef typename DstXprType::Index Index;
+
+    DstEvaluatorType dstEvaluator(dst);
+    SrcEvaluatorType srcEvaluator(src);
+
+    const Index size = dst.size();
+    typedef packet_traits<typename DstXprType::Scalar> PacketTraits;
+    enum {
+      packetSize = PacketTraits::size,
+      dstIsAligned = int(copy_using_evaluator_traits<DstXprType,SrcXprType>::DstIsAligned),
+      dstAlignment = PacketTraits::AlignedOnScalar ? Aligned : dstIsAligned,
+      srcAlignment = copy_using_evaluator_traits<DstXprType,SrcXprType>::JointAlignment
+    };
+    const Index alignedStart = dstIsAligned ? 0 : first_aligned(&dstEvaluator.coeffRef(0), size);
+    const Index alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize;
+
+    unaligned_copy_using_evaluator_impl<dstIsAligned!=0>::run(dstEvaluator, srcEvaluator, 0, alignedStart);
+
+    for(Index index = alignedStart; index < alignedEnd; index += packetSize)
+    {
+      dstEvaluator.template copyPacket<dstAlignment, srcAlignment>(index, srcEvaluator);
+    }
+
+    unaligned_copy_using_evaluator_impl<>::run(dstEvaluator, srcEvaluator, alignedEnd, size);
+  }
+};
+
+template<typename DstXprType, typename SrcXprType>
+struct copy_using_evaluator_impl<DstXprType, SrcXprType, LinearVectorizedTraversal, CompleteUnrolling>
+{
+  typedef typename DstXprType::Index Index;
+  EIGEN_STRONG_INLINE static void run(DstXprType &dst, const SrcXprType &src)
+  {
+    typedef typename evaluator<DstXprType>::type DstEvaluatorType;
+    typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
+
+    DstEvaluatorType dstEvaluator(dst);
+    SrcEvaluatorType srcEvaluator(src);
+
+    enum { size = DstXprType::SizeAtCompileTime,
+           packetSize = packet_traits<typename DstXprType::Scalar>::size,
+           alignedSize = (size/packetSize)*packetSize };
+
+    copy_using_evaluator_innervec_CompleteUnrolling
+      <DstEvaluatorType, SrcEvaluatorType, 0, alignedSize>
+      ::run(dstEvaluator, srcEvaluator);
+    copy_using_evaluator_DefaultTraversal_CompleteUnrolling
+      <DstEvaluatorType, SrcEvaluatorType, alignedSize, size>
+      ::run(dstEvaluator, srcEvaluator);
+  }
+};
+
+/**************************
+*** Inner vectorization ***
+**************************/
+
+template<typename DstXprType, typename SrcXprType>
+struct copy_using_evaluator_impl<DstXprType, SrcXprType, InnerVectorizedTraversal, NoUnrolling>
+{
+  inline static void run(DstXprType &dst, const SrcXprType &src)
+  {
+    typedef typename evaluator<DstXprType>::type DstEvaluatorType;
+    typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
+    typedef typename DstXprType::Index Index;
+
+    DstEvaluatorType dstEvaluator(dst);
+    SrcEvaluatorType srcEvaluator(src);
+
+    const Index innerSize = dst.innerSize();
+    const Index outerSize = dst.outerSize();
+    const Index packetSize = packet_traits<typename DstXprType::Scalar>::size;
+    for(Index outer = 0; outer < outerSize; ++outer)
+      for(Index inner = 0; inner < innerSize; inner+=packetSize) {
+	dstEvaluator.template copyPacketByOuterInner<Aligned, Aligned>(outer, inner, srcEvaluator);
+      }
+  }
+};
+
+template<typename DstXprType, typename SrcXprType>
+struct copy_using_evaluator_impl<DstXprType, SrcXprType, InnerVectorizedTraversal, CompleteUnrolling>
+{
+  EIGEN_STRONG_INLINE static void run(DstXprType &dst, const SrcXprType &src)
+  {
+    typedef typename evaluator<DstXprType>::type DstEvaluatorType;
+    typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
+
+    DstEvaluatorType dstEvaluator(dst);
+    SrcEvaluatorType srcEvaluator(src);
+
+    copy_using_evaluator_innervec_CompleteUnrolling
+      <DstEvaluatorType, SrcEvaluatorType, 0, DstXprType::SizeAtCompileTime>
+      ::run(dstEvaluator, srcEvaluator);
+  }
+};
+
+template<typename DstXprType, typename SrcXprType>
+struct copy_using_evaluator_impl<DstXprType, SrcXprType, InnerVectorizedTraversal, InnerUnrolling>
+{
+  typedef typename DstXprType::Index Index;
+  EIGEN_STRONG_INLINE static void run(DstXprType &dst, const SrcXprType &src)
+  {
+    typedef typename evaluator<DstXprType>::type DstEvaluatorType;
+    typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
+
+    DstEvaluatorType dstEvaluator(dst);
+    SrcEvaluatorType srcEvaluator(src);
+
+    const Index outerSize = dst.outerSize();
+    for(Index outer = 0; outer < outerSize; ++outer)
+      copy_using_evaluator_innervec_InnerUnrolling
+	<DstEvaluatorType, SrcEvaluatorType, 0, DstXprType::InnerSizeAtCompileTime>
+        ::run(dstEvaluator, srcEvaluator, outer);
+  }
+};
+
+/***********************
+*** Linear traversal ***
+***********************/
+
+template<typename DstXprType, typename SrcXprType>
+struct copy_using_evaluator_impl<DstXprType, SrcXprType, LinearTraversal, NoUnrolling>
+{
+  inline static void run(DstXprType &dst, const SrcXprType &src)
+  {
+    typedef typename evaluator<DstXprType>::type DstEvaluatorType;
+    typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
+    typedef typename DstXprType::Index Index;
+
+    DstEvaluatorType dstEvaluator(dst);
+    SrcEvaluatorType srcEvaluator(src);
+
+    const Index size = dst.size();
+    for(Index i = 0; i < size; ++i)
+      dstEvaluator.copyCoeff(i, srcEvaluator);
+  }
+};
+
+template<typename DstXprType, typename SrcXprType>
+struct copy_using_evaluator_impl<DstXprType, SrcXprType, LinearTraversal, CompleteUnrolling>
+{
+  EIGEN_STRONG_INLINE static void run(DstXprType &dst, const SrcXprType &src)
+  {
+    typedef typename evaluator<DstXprType>::type DstEvaluatorType;
+    typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
+
+    DstEvaluatorType dstEvaluator(dst);
+    SrcEvaluatorType srcEvaluator(src);
+
+    copy_using_evaluator_LinearTraversal_CompleteUnrolling
+      <DstEvaluatorType, SrcEvaluatorType, 0, DstXprType::SizeAtCompileTime>
+      ::run(dstEvaluator, srcEvaluator);
+  }
+};
+
+/**************************
+*** Slice vectorization ***
+***************************/
+
+template<typename DstXprType, typename SrcXprType>
+struct copy_using_evaluator_impl<DstXprType, SrcXprType, SliceVectorizedTraversal, NoUnrolling>
+{
+  inline static void run(DstXprType &dst, const SrcXprType &src)
+  {
+    typedef typename evaluator<DstXprType>::type DstEvaluatorType;
+    typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
+    typedef typename DstXprType::Index Index;
+
+    DstEvaluatorType dstEvaluator(dst);
+    SrcEvaluatorType srcEvaluator(src);
+
+    typedef packet_traits<typename DstXprType::Scalar> PacketTraits;
+    enum {
+      packetSize = PacketTraits::size,
+      alignable = PacketTraits::AlignedOnScalar,
+      dstAlignment = alignable ? Aligned : int(copy_using_evaluator_traits<DstXprType,SrcXprType>::DstIsAligned)
+    };
+    const Index packetAlignedMask = packetSize - 1;
+    const Index innerSize = dst.innerSize();
+    const Index outerSize = dst.outerSize();
+    const Index alignedStep = alignable ? (packetSize - dst.outerStride() % packetSize) & packetAlignedMask : 0;
+    Index alignedStart = ((!alignable) || copy_using_evaluator_traits<DstXprType,SrcXprType>::DstIsAligned) ? 0
+                       : first_aligned(&dstEvaluator.coeffRef(0,0), innerSize);
+
+    for(Index outer = 0; outer < outerSize; ++outer)
+    {
+      const Index alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask);
+      // do the non-vectorizable part of the assignment
+      for(Index inner = 0; inner<alignedStart ; ++inner) {
+        dstEvaluator.copyCoeffByOuterInner(outer, inner, srcEvaluator);
+      }
+
+      // do the vectorizable part of the assignment
+      for(Index inner = alignedStart; inner<alignedEnd; inner+=packetSize) {
+        dstEvaluator.template copyPacketByOuterInner<dstAlignment, Unaligned>(outer, inner, srcEvaluator);
+      }
+
+      // do the non-vectorizable part of the assignment
+      for(Index inner = alignedEnd; inner<innerSize ; ++inner) {
+        dstEvaluator.copyCoeffByOuterInner(outer, inner, srcEvaluator);
+      }
+
+      alignedStart = std::min<Index>((alignedStart+alignedStep)%packetSize, innerSize);
+    }
+  }
+};
+
+/****************************
+*** All-at-once traversal ***
+****************************/
+
+template<typename DstXprType, typename SrcXprType>
+struct copy_using_evaluator_impl<DstXprType, SrcXprType, AllAtOnceTraversal, NoUnrolling>
+{
+  inline static void run(DstXprType &dst, const SrcXprType &src)
+  {
+    typedef typename evaluator<DstXprType>::type DstEvaluatorType;
+    typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
+    typedef typename DstXprType::Index Index;
+
+    DstEvaluatorType dstEvaluator(dst);
+    SrcEvaluatorType srcEvaluator(src);
+
+    // Evaluate rhs in temporary to prevent aliasing problems in a = a * a;
+    // TODO: Do not pass the xpr object to evalTo()
+    srcEvaluator.evalTo(dstEvaluator, dst);
+  }
+};
+
+/***************************************************************************
+* Part 4 : Entry points
+***************************************************************************/
+
+// Based on DenseBase::LazyAssign()
+
+template<typename DstXprType, template <typename> class StorageBase, typename SrcXprType>
+EIGEN_STRONG_INLINE
+const DstXprType& copy_using_evaluator(const NoAlias<DstXprType, StorageBase>& dst, 
+				       const EigenBase<SrcXprType>& src)
+{
+  return noalias_copy_using_evaluator(dst.expression(), src.derived());
+}
+
+template<typename XprType, int AssumeAliasing = evaluator_traits<XprType>::AssumeAliasing>
+struct AddEvalIfAssumingAliasing;
+
+template<typename XprType>
+struct AddEvalIfAssumingAliasing<XprType, 0>
+{
+  static const XprType& run(const XprType& xpr) 
+  {
+    return xpr;
+  }
+};
+
+template<typename XprType>
+struct AddEvalIfAssumingAliasing<XprType, 1>
+{
+  static const EvalToTemp<XprType> run(const XprType& xpr)
+  {
+    return EvalToTemp<XprType>(xpr);
+  }
+};
+
+template<typename DstXprType, typename SrcXprType>
+EIGEN_STRONG_INLINE
+const DstXprType& copy_using_evaluator(const EigenBase<DstXprType>& dst, const EigenBase<SrcXprType>& src)
+{
+  return noalias_copy_using_evaluator(dst.const_cast_derived(), 
+				      AddEvalIfAssumingAliasing<SrcXprType>::run(src.derived()));
+}
+
+template<typename DstXprType, typename SrcXprType>
+EIGEN_STRONG_INLINE
+const DstXprType& noalias_copy_using_evaluator(const PlainObjectBase<DstXprType>& dst, const EigenBase<SrcXprType>& src)
+{
+#ifdef EIGEN_DEBUG_ASSIGN
+  internal::copy_using_evaluator_traits<DstXprType, SrcXprType>::debug();
+#endif
+#ifdef EIGEN_NO_AUTOMATIC_RESIZING
+  eigen_assert((dst.size()==0 || (IsVectorAtCompileTime ? (dst.size() == src.size())
+				  : (dst.rows() == src.rows() && dst.cols() == src.cols())))
+	       && "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined");
+#else
+  dst.const_cast_derived().resizeLike(src.derived());
+#endif
+  return copy_using_evaluator_without_resizing(dst.const_cast_derived(), src.derived());
+}
+
+template<typename DstXprType, typename SrcXprType>
+EIGEN_STRONG_INLINE
+const DstXprType& noalias_copy_using_evaluator(const EigenBase<DstXprType>& dst, const EigenBase<SrcXprType>& src)
+{
+  return copy_using_evaluator_without_resizing(dst.const_cast_derived(), src.derived());
+}
+
+template<typename DstXprType, typename SrcXprType>
+const DstXprType& copy_using_evaluator_without_resizing(const DstXprType& dst, const SrcXprType& src)
+{
+#ifdef EIGEN_DEBUG_ASSIGN
+  internal::copy_using_evaluator_traits<DstXprType, SrcXprType>::debug();
+#endif
+  eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
+  copy_using_evaluator_impl<DstXprType, SrcXprType>::run(const_cast<DstXprType&>(dst), src);
+  return dst;
+}
+
+// Based on DenseBase::swap()
+// TODO: Chech whether we need to do something special for swapping two
+//       Arrays or Matrices.
+
+template<typename DstXprType, typename SrcXprType>
+void swap_using_evaluator(const DstXprType& dst, const SrcXprType& src)
+{
+  copy_using_evaluator(SwapWrapper<DstXprType>(const_cast<DstXprType&>(dst)), src);
+}
+
+// Based on MatrixBase::operator+= (in CwiseBinaryOp.h)
+template<typename DstXprType, typename SrcXprType>
+void add_assign_using_evaluator(const MatrixBase<DstXprType>& dst, const MatrixBase<SrcXprType>& src)
+{
+  typedef typename DstXprType::Scalar Scalar;
+  SelfCwiseBinaryOp<internal::scalar_sum_op<Scalar>, DstXprType, SrcXprType> tmp(dst.const_cast_derived());
+  copy_using_evaluator(tmp, src.derived());
+}
+
+// Based on ArrayBase::operator+=
+template<typename DstXprType, typename SrcXprType>
+void add_assign_using_evaluator(const ArrayBase<DstXprType>& dst, const ArrayBase<SrcXprType>& src)
+{
+  typedef typename DstXprType::Scalar Scalar;
+  SelfCwiseBinaryOp<internal::scalar_sum_op<Scalar>, DstXprType, SrcXprType> tmp(dst.const_cast_derived());
+  copy_using_evaluator(tmp, src.derived());
+}
+
+// TODO: Add add_assign_using_evaluator for EigenBase ?
+
+template<typename DstXprType, typename SrcXprType>
+void subtract_assign_using_evaluator(const MatrixBase<DstXprType>& dst, const MatrixBase<SrcXprType>& src)
+{
+  typedef typename DstXprType::Scalar Scalar;
+  SelfCwiseBinaryOp<internal::scalar_difference_op<Scalar>, DstXprType, SrcXprType> tmp(dst.const_cast_derived());
+  copy_using_evaluator(tmp, src.derived());
+}
+
+template<typename DstXprType, typename SrcXprType>
+void subtract_assign_using_evaluator(const ArrayBase<DstXprType>& dst, const ArrayBase<SrcXprType>& src)
+{
+  typedef typename DstXprType::Scalar Scalar;
+  SelfCwiseBinaryOp<internal::scalar_difference_op<Scalar>, DstXprType, SrcXprType> tmp(dst.const_cast_derived());
+  copy_using_evaluator(tmp, src.derived());
+}
+
+template<typename DstXprType, typename SrcXprType>
+void multiply_assign_using_evaluator(const ArrayBase<DstXprType>& dst, const ArrayBase<SrcXprType>& src)
+{
+  typedef typename DstXprType::Scalar Scalar;
+  SelfCwiseBinaryOp<internal::scalar_product_op<Scalar>, DstXprType, SrcXprType> tmp(dst.const_cast_derived());
+  copy_using_evaluator(tmp, src.derived());
+}
+
+template<typename DstXprType, typename SrcXprType>
+void divide_assign_using_evaluator(const ArrayBase<DstXprType>& dst, const ArrayBase<SrcXprType>& src)
+{
+  typedef typename DstXprType::Scalar Scalar;
+  SelfCwiseBinaryOp<internal::scalar_quotient_op<Scalar>, DstXprType, SrcXprType> tmp(dst.const_cast_derived());
+  copy_using_evaluator(tmp, src.derived());
+}
+
+
+} // namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_ASSIGN_EVALUATOR_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/CoreEvaluators.h b/resources/3rdparty/eigen/Eigen/src/Core/CoreEvaluators.h
new file mode 100644
index 000000000..cca01251c
--- /dev/null
+++ b/resources/3rdparty/eigen/Eigen/src/Core/CoreEvaluators.h
@@ -0,0 +1,1299 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2011-2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+#ifndef EIGEN_COREEVALUATORS_H
+#define EIGEN_COREEVALUATORS_H
+
+namespace Eigen {
+
+namespace internal {
+
+// evaluator_traits<T> contains traits for evaluator_impl<T> 
+
+template<typename T>
+struct evaluator_traits
+{
+  // 1 if evaluator_impl<T>::evalTo() exists
+  // 0 if evaluator_impl<T> allows coefficient-based access
+  static const int HasEvalTo = 0;
+
+  // 1 if assignment A = B assumes aliasing when B is of type T and thus B needs to be evaluated into a
+  // temporary; 0 if not.
+  static const int AssumeAliasing = 0;
+};
+
+// expression class for evaluating nested expression to a temporary
+ 
+template<typename ArgType>
+class EvalToTemp;
+
+// evaluator<T>::type is type of evaluator for T
+// evaluator<T>::nestedType is type of evaluator if T is nested inside another evaluator
+ 
+template<typename T>
+struct evaluator_impl 
+{ };
+ 
+template<typename T, int Nested = evaluator_traits<T>::HasEvalTo>
+struct evaluator_nested_type;
+
+template<typename T>
+struct evaluator_nested_type<T, 0>
+{
+  typedef evaluator_impl<T> type;
+};
+
+template<typename T>
+struct evaluator_nested_type<T, 1>
+{
+  typedef evaluator_impl<EvalToTemp<T> > type;
+};
+
+template<typename T>
+struct evaluator
+{
+  typedef evaluator_impl<T> type;
+  typedef typename evaluator_nested_type<T>::type nestedType;
+};
+
+// TODO: Think about const-correctness
+
+template<typename T>
+struct evaluator<const T>
+  : evaluator<T>
+{ };
+
+// ---------- base class for all writable evaluators ----------
+
+template<typename ExpressionType>
+struct evaluator_impl_base
+{
+  typedef typename ExpressionType::Index Index;
+
+  template<typename OtherEvaluatorType>
+  void copyCoeff(Index row, Index col, const OtherEvaluatorType& other)
+  {
+    derived().coeffRef(row, col) = other.coeff(row, col);
+  }
+
+  template<typename OtherEvaluatorType>
+  void copyCoeffByOuterInner(Index outer, Index inner, const OtherEvaluatorType& other)
+  {
+    Index row = rowIndexByOuterInner(outer, inner); 
+    Index col = colIndexByOuterInner(outer, inner); 
+    derived().copyCoeff(row, col, other);
+  }
+
+  template<typename OtherEvaluatorType>
+  void copyCoeff(Index index, const OtherEvaluatorType& other)
+  {
+    derived().coeffRef(index) = other.coeff(index);
+  }
+
+  template<int StoreMode, int LoadMode, typename OtherEvaluatorType>
+  void copyPacket(Index row, Index col, const OtherEvaluatorType& other)
+  {
+    derived().template writePacket<StoreMode>(row, col, 
+      other.template packet<LoadMode>(row, col));
+  }
+
+  template<int StoreMode, int LoadMode, typename OtherEvaluatorType>
+  void copyPacketByOuterInner(Index outer, Index inner, const OtherEvaluatorType& other)
+  {
+    Index row = rowIndexByOuterInner(outer, inner); 
+    Index col = colIndexByOuterInner(outer, inner); 
+    derived().template copyPacket<StoreMode, LoadMode>(row, col, other);
+  }
+
+  template<int StoreMode, int LoadMode, typename OtherEvaluatorType>
+  void copyPacket(Index index, const OtherEvaluatorType& other)
+  {
+    derived().template writePacket<StoreMode>(index, 
+      other.template packet<LoadMode>(index));
+  }
+
+  Index rowIndexByOuterInner(Index outer, Index inner) const
+  {
+    return int(ExpressionType::RowsAtCompileTime) == 1 ? 0
+      : int(ExpressionType::ColsAtCompileTime) == 1 ? inner
+      : int(ExpressionType::Flags)&RowMajorBit ? outer
+      : inner;
+  }
+
+  Index colIndexByOuterInner(Index outer, Index inner) const
+  {
+    return int(ExpressionType::ColsAtCompileTime) == 1 ? 0
+      : int(ExpressionType::RowsAtCompileTime) == 1 ? inner
+      : int(ExpressionType::Flags)&RowMajorBit ? inner
+      : outer;
+  }
+
+  evaluator_impl<ExpressionType>& derived() 
+  {
+    return *static_cast<evaluator_impl<ExpressionType>*>(this); 
+  }
+};
+
+// -------------------- Matrix and Array --------------------
+//
+// evaluator_impl<PlainObjectBase> is a common base class for the
+// Matrix and Array evaluators.
+
+template<typename Derived>
+struct evaluator_impl<PlainObjectBase<Derived> >
+  : evaluator_impl_base<Derived>
+{
+  typedef PlainObjectBase<Derived> PlainObjectType;
+
+  enum {
+    IsRowMajor = PlainObjectType::IsRowMajor,
+    IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime,
+    RowsAtCompileTime = PlainObjectType::RowsAtCompileTime,
+    ColsAtCompileTime = PlainObjectType::ColsAtCompileTime
+  };
+
+  evaluator_impl(const PlainObjectType& m) 
+    : m_data(m.data()), m_outerStride(IsVectorAtCompileTime ? 0 : m.outerStride()) 
+  { }
+
+  typedef typename PlainObjectType::Index Index;
+  typedef typename PlainObjectType::Scalar Scalar;
+  typedef typename PlainObjectType::CoeffReturnType CoeffReturnType;
+  typedef typename PlainObjectType::PacketScalar PacketScalar;
+  typedef typename PlainObjectType::PacketReturnType PacketReturnType;
+
+  CoeffReturnType coeff(Index row, Index col) const
+  {
+    if (IsRowMajor)
+      return m_data[row * m_outerStride.value() + col];
+    else
+      return m_data[row + col * m_outerStride.value()];
+  }
+
+  CoeffReturnType coeff(Index index) const
+  {
+    return m_data[index];
+  }
+
+  Scalar& coeffRef(Index row, Index col)
+  {
+    if (IsRowMajor)
+      return const_cast<Scalar*>(m_data)[row * m_outerStride.value() + col];
+    else
+      return const_cast<Scalar*>(m_data)[row + col * m_outerStride.value()];
+  }
+
+  Scalar& coeffRef(Index index)
+  {
+    return const_cast<Scalar*>(m_data)[index];
+  }
+
+  template<int LoadMode> 
+  PacketReturnType packet(Index row, Index col) const
+  {
+    if (IsRowMajor)
+      return ploadt<PacketScalar, LoadMode>(m_data + row * m_outerStride.value() + col);
+    else
+      return ploadt<PacketScalar, LoadMode>(m_data + row + col * m_outerStride.value());
+  }
+
+  template<int LoadMode> 
+  PacketReturnType packet(Index index) const
+  {
+    return ploadt<PacketScalar, LoadMode>(m_data + index);
+  }
+
+  template<int StoreMode> 
+  void writePacket(Index row, Index col, const PacketScalar& x)
+  {
+    if (IsRowMajor)
+      return pstoret<Scalar, PacketScalar, StoreMode>
+	            (const_cast<Scalar*>(m_data) + row * m_outerStride.value() + col, x);
+    else
+      return pstoret<Scalar, PacketScalar, StoreMode>
+                    (const_cast<Scalar*>(m_data) + row + col * m_outerStride.value(), x);
+  }
+
+  template<int StoreMode> 
+  void writePacket(Index index, const PacketScalar& x)
+  {
+    return pstoret<Scalar, PacketScalar, StoreMode>(const_cast<Scalar*>(m_data) + index, x);
+  }
+
+protected:
+  const Scalar *m_data;
+
+  // We do not need to know the outer stride for vectors
+  variable_if_dynamic<Index, IsVectorAtCompileTime ? 0 
+		             : int(IsRowMajor) ? ColsAtCompileTime 
+		             : RowsAtCompileTime> m_outerStride;
+};
+
+template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
+struct evaluator_impl<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
+  : evaluator_impl<PlainObjectBase<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > >
+{
+  typedef Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType;
+
+  evaluator_impl(const XprType& m) 
+    : evaluator_impl<PlainObjectBase<XprType> >(m) 
+  { }
+};
+
+template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
+struct evaluator_impl<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
+  : evaluator_impl<PlainObjectBase<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > >
+{
+  typedef Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType;
+
+  evaluator_impl(const XprType& m) 
+    : evaluator_impl<PlainObjectBase<XprType> >(m) 
+  { }
+};
+
+// -------------------- EvalToTemp --------------------
+
+template<typename ArgType>
+struct traits<EvalToTemp<ArgType> >
+  : public traits<ArgType>
+{ };
+
+template<typename ArgType>
+class EvalToTemp
+  : public dense_xpr_base<EvalToTemp<ArgType> >::type
+{
+ public:
+ 
+  typedef typename dense_xpr_base<EvalToTemp>::type Base;
+  EIGEN_GENERIC_PUBLIC_INTERFACE(EvalToTemp)
+ 
+  EvalToTemp(const ArgType& arg)
+    : m_arg(arg)
+  { }
+ 
+  const ArgType& arg() const
+  {
+    return m_arg;
+  }
+
+  Index rows() const 
+  {
+    return m_arg.rows();
+  }
+
+  Index cols() const 
+  {
+    return m_arg.cols();
+  }
+
+ private:
+  const ArgType& m_arg;
+};
+ 
+template<typename ArgType>
+struct evaluator_impl<EvalToTemp<ArgType> >
+{
+  typedef EvalToTemp<ArgType> XprType;
+  typedef typename ArgType::PlainObject PlainObject;
+
+  evaluator_impl(const XprType& xpr) 
+    : m_result(xpr.rows(), xpr.cols()), m_resultImpl(m_result)
+  { 
+    copy_using_evaluator_without_resizing(m_result, xpr.arg());
+  }
+
+  // This constructor is used when nesting an EvalTo evaluator in another evaluator
+  evaluator_impl(const ArgType& arg) 
+    : m_result(arg.rows(), arg.cols()), m_resultImpl(m_result)
+  { 
+    copy_using_evaluator_without_resizing(m_result, arg);
+  }
+
+  typedef typename PlainObject::Index Index;
+  typedef typename PlainObject::Scalar Scalar;
+  typedef typename PlainObject::CoeffReturnType CoeffReturnType;
+  typedef typename PlainObject::PacketScalar PacketScalar;
+  typedef typename PlainObject::PacketReturnType PacketReturnType;
+
+  // All other functions are forwarded to m_resultImpl
+
+  CoeffReturnType coeff(Index row, Index col) const 
+  { 
+    return m_resultImpl.coeff(row, col); 
+  }
+  
+  CoeffReturnType coeff(Index index) const 
+  { 
+    return m_resultImpl.coeff(index); 
+  }
+  
+  Scalar& coeffRef(Index row, Index col) 
+  { 
+    return m_resultImpl.coeffRef(row, col); 
+  }
+  
+  Scalar& coeffRef(Index index) 
+  { 
+    return m_resultImpl.coeffRef(index); 
+  }
+
+  template<int LoadMode> 
+  PacketReturnType packet(Index row, Index col) const
+  {
+    return m_resultImpl.packet<LoadMode>(row, col);
+  }
+
+  template<int LoadMode> 
+  PacketReturnType packet(Index index) const
+  {
+    return m_resultImpl.packet<LoadMode>(index);
+  }
+
+  template<int StoreMode> 
+  void writePacket(Index row, Index col, const PacketScalar& x)
+  {
+    m_resultImpl.writePacket<StoreMode>(row, col, x);
+  }
+
+  template<int StoreMode> 
+  void writePacket(Index index, const PacketScalar& x)
+  {
+    m_resultImpl.writePacket<StoreMode>(index, x);
+  }
+
+protected:
+  PlainObject m_result;
+  typename evaluator<PlainObject>::nestedType m_resultImpl;
+};
+
+// -------------------- Transpose --------------------
+
+template<typename ArgType>
+struct evaluator_impl<Transpose<ArgType> >
+  : evaluator_impl_base<Transpose<ArgType> >
+{
+  typedef Transpose<ArgType> XprType;
+
+  evaluator_impl(const XprType& t) : m_argImpl(t.nestedExpression()) {}
+
+  typedef typename XprType::Index Index;
+  typedef typename XprType::Scalar Scalar;
+  typedef typename XprType::CoeffReturnType CoeffReturnType;
+  typedef typename XprType::PacketScalar PacketScalar;
+  typedef typename XprType::PacketReturnType PacketReturnType;
+
+  CoeffReturnType coeff(Index row, Index col) const
+  {
+    return m_argImpl.coeff(col, row);
+  }
+
+  CoeffReturnType coeff(Index index) const
+  {
+    return m_argImpl.coeff(index);
+  }
+
+  Scalar& coeffRef(Index row, Index col)
+  {
+    return m_argImpl.coeffRef(col, row);
+  }
+
+  typename XprType::Scalar& coeffRef(Index index)
+  {
+    return m_argImpl.coeffRef(index);
+  }
+
+  template<int LoadMode>
+  PacketReturnType packet(Index row, Index col) const
+  {
+    return m_argImpl.template packet<LoadMode>(col, row);
+  }
+
+  template<int LoadMode>
+  PacketReturnType packet(Index index) const
+  {
+    return m_argImpl.template packet<LoadMode>(index);
+  }
+
+  template<int StoreMode> 
+  void writePacket(Index row, Index col, const PacketScalar& x)
+  {
+    m_argImpl.template writePacket<StoreMode>(col, row, x);
+  }
+
+  template<int StoreMode> 
+  void writePacket(Index index, const PacketScalar& x)
+  {
+    m_argImpl.template writePacket<StoreMode>(index, x);
+  }
+
+protected:
+  typename evaluator<ArgType>::nestedType m_argImpl;
+};
+
+// -------------------- CwiseNullaryOp --------------------
+
+template<typename NullaryOp, typename PlainObjectType>
+struct evaluator_impl<CwiseNullaryOp<NullaryOp,PlainObjectType> >
+{
+  typedef CwiseNullaryOp<NullaryOp,PlainObjectType> XprType;
+
+  evaluator_impl(const XprType& n) 
+    : m_functor(n.functor()) 
+  { }
+
+  typedef typename XprType::Index Index;
+  typedef typename XprType::CoeffReturnType CoeffReturnType;
+  typedef typename XprType::PacketScalar PacketScalar;
+
+  CoeffReturnType coeff(Index row, Index col) const
+  {
+    return m_functor(row, col);
+  }
+
+  CoeffReturnType coeff(Index index) const
+  {
+    return m_functor(index);
+  }
+
+  template<int LoadMode>
+  PacketScalar packet(Index row, Index col) const
+  {
+    return m_functor.packetOp(row, col);
+  }
+
+  template<int LoadMode>
+  PacketScalar packet(Index index) const
+  {
+    return m_functor.packetOp(index);
+  }
+
+protected:
+  const NullaryOp m_functor;
+};
+
+// -------------------- CwiseUnaryOp --------------------
+
+template<typename UnaryOp, typename ArgType>
+struct evaluator_impl<CwiseUnaryOp<UnaryOp, ArgType> >
+{
+  typedef CwiseUnaryOp<UnaryOp, ArgType> XprType;
+
+  evaluator_impl(const XprType& op) 
+    : m_functor(op.functor()), 
+      m_argImpl(op.nestedExpression()) 
+  { }
+
+  typedef typename XprType::Index Index;
+  typedef typename XprType::CoeffReturnType CoeffReturnType;
+  typedef typename XprType::PacketScalar PacketScalar;
+
+  CoeffReturnType coeff(Index row, Index col) const
+  {
+    return m_functor(m_argImpl.coeff(row, col));
+  }
+
+  CoeffReturnType coeff(Index index) const
+  {
+    return m_functor(m_argImpl.coeff(index));
+  }
+
+  template<int LoadMode>
+  PacketScalar packet(Index row, Index col) const
+  {
+    return m_functor.packetOp(m_argImpl.template packet<LoadMode>(row, col));
+  }
+
+  template<int LoadMode>
+  PacketScalar packet(Index index) const
+  {
+    return m_functor.packetOp(m_argImpl.template packet<LoadMode>(index));
+  }
+
+protected:
+  const UnaryOp m_functor;
+  typename evaluator<ArgType>::nestedType m_argImpl;
+};
+
+// -------------------- CwiseBinaryOp --------------------
+
+template<typename BinaryOp, typename Lhs, typename Rhs>
+struct evaluator_impl<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
+{
+  typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
+
+  evaluator_impl(const XprType& xpr) 
+    : m_functor(xpr.functor()),
+      m_lhsImpl(xpr.lhs()), 
+      m_rhsImpl(xpr.rhs())  
+  { }
+
+  typedef typename XprType::Index Index;
+  typedef typename XprType::CoeffReturnType CoeffReturnType;
+  typedef typename XprType::PacketScalar PacketScalar;
+
+  CoeffReturnType coeff(Index row, Index col) const
+  {
+    return m_functor(m_lhsImpl.coeff(row, col), m_rhsImpl.coeff(row, col));
+  }
+
+  CoeffReturnType coeff(Index index) const
+  {
+    return m_functor(m_lhsImpl.coeff(index), m_rhsImpl.coeff(index));
+  }
+
+  template<int LoadMode>
+  PacketScalar packet(Index row, Index col) const
+  {
+    return m_functor.packetOp(m_lhsImpl.template packet<LoadMode>(row, col),
+			      m_rhsImpl.template packet<LoadMode>(row, col));
+  }
+
+  template<int LoadMode>
+  PacketScalar packet(Index index) const
+  {
+    return m_functor.packetOp(m_lhsImpl.template packet<LoadMode>(index),
+			      m_rhsImpl.template packet<LoadMode>(index));
+  }
+
+protected:
+  const BinaryOp m_functor;
+  typename evaluator<Lhs>::nestedType m_lhsImpl;
+  typename evaluator<Rhs>::nestedType m_rhsImpl;
+};
+
+// -------------------- CwiseUnaryView --------------------
+
+template<typename UnaryOp, typename ArgType>
+struct evaluator_impl<CwiseUnaryView<UnaryOp, ArgType> >
+  : evaluator_impl_base<CwiseUnaryView<UnaryOp, ArgType> >
+{
+  typedef CwiseUnaryView<UnaryOp, ArgType> XprType;
+
+  evaluator_impl(const XprType& op) 
+    : m_unaryOp(op.functor()), 
+      m_argImpl(op.nestedExpression()) 
+  { }
+
+  typedef typename XprType::Index Index;
+  typedef typename XprType::Scalar Scalar;
+  typedef typename XprType::CoeffReturnType CoeffReturnType;
+
+  CoeffReturnType coeff(Index row, Index col) const
+  {
+    return m_unaryOp(m_argImpl.coeff(row, col));
+  }
+
+  CoeffReturnType coeff(Index index) const
+  {
+    return m_unaryOp(m_argImpl.coeff(index));
+  }
+
+  Scalar& coeffRef(Index row, Index col)
+  {
+    return m_unaryOp(m_argImpl.coeffRef(row, col));
+  }
+
+  Scalar& coeffRef(Index index)
+  {
+    return m_unaryOp(m_argImpl.coeffRef(index));
+  }
+
+protected:
+  const UnaryOp m_unaryOp;
+  typename evaluator<ArgType>::nestedType m_argImpl;
+};
+
+// -------------------- Map --------------------
+
+template<typename Derived, int AccessorsType>
+struct evaluator_impl<MapBase<Derived, AccessorsType> >
+  : evaluator_impl_base<Derived>
+{
+  typedef MapBase<Derived, AccessorsType> MapType;
+  typedef Derived XprType;
+
+  typedef typename XprType::PointerType PointerType;
+  typedef typename XprType::Index Index;
+  typedef typename XprType::Scalar Scalar;
+  typedef typename XprType::CoeffReturnType CoeffReturnType;
+  typedef typename XprType::PacketScalar PacketScalar;
+  typedef typename XprType::PacketReturnType PacketReturnType;
+  
+  evaluator_impl(const XprType& map) 
+    : m_data(const_cast<PointerType>(map.data())),  
+      m_rowStride(map.rowStride()),
+      m_colStride(map.colStride())
+  { }
+ 
+  enum {
+    RowsAtCompileTime = XprType::RowsAtCompileTime
+  };
+ 
+  CoeffReturnType coeff(Index row, Index col) const 
+  { 
+    return m_data[col * m_colStride + row * m_rowStride];
+  }
+  
+  CoeffReturnType coeff(Index index) const 
+  { 
+    return coeff(RowsAtCompileTime == 1 ? 0 : index,
+		 RowsAtCompileTime == 1 ? index : 0);
+  }
+
+  Scalar& coeffRef(Index row, Index col) 
+  { 
+    return m_data[col * m_colStride + row * m_rowStride];
+  }
+  
+  Scalar& coeffRef(Index index) 
+  { 
+    return coeffRef(RowsAtCompileTime == 1 ? 0 : index,
+		    RowsAtCompileTime == 1 ? index : 0);
+  }
+ 
+  template<int LoadMode> 
+  PacketReturnType packet(Index row, Index col) const 
+  { 
+    PointerType ptr = m_data + row * m_rowStride + col * m_colStride;
+    return internal::ploadt<PacketScalar, LoadMode>(ptr);
+  }
+
+  template<int LoadMode> 
+  PacketReturnType packet(Index index) const 
+  { 
+    return packet<LoadMode>(RowsAtCompileTime == 1 ? 0 : index,
+			    RowsAtCompileTime == 1 ? index : 0);
+  }
+  
+  template<int StoreMode> 
+  void writePacket(Index row, Index col, const PacketScalar& x) 
+  { 
+    PointerType ptr = m_data + row * m_rowStride + col * m_colStride;
+    return internal::pstoret<Scalar, PacketScalar, StoreMode>(ptr, x);
+  }
+  
+  template<int StoreMode> 
+  void writePacket(Index index, const PacketScalar& x) 
+  { 
+    return writePacket<StoreMode>(RowsAtCompileTime == 1 ? 0 : index,
+				  RowsAtCompileTime == 1 ? index : 0,
+				  x);
+  }
+ 
+protected:
+  PointerType m_data;
+  int m_rowStride;
+  int m_colStride;
+};
+
+template<typename PlainObjectType, int MapOptions, typename StrideType> 
+struct evaluator_impl<Map<PlainObjectType, MapOptions, StrideType> >
+  : public evaluator_impl<MapBase<Map<PlainObjectType, MapOptions, StrideType> > >
+{
+  typedef Map<PlainObjectType, MapOptions, StrideType> XprType;
+
+  evaluator_impl(const XprType& map) 
+    : evaluator_impl<MapBase<XprType> >(map) 
+  { }
+};
+
+// -------------------- Block --------------------
+
+template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel> 
+struct evaluator_impl<Block<ArgType, BlockRows, BlockCols, InnerPanel, /* HasDirectAccess */ false> >
+  : evaluator_impl_base<Block<ArgType, BlockRows, BlockCols, InnerPanel, false> >
+{
+  typedef Block<ArgType, BlockRows, BlockCols, InnerPanel, false> XprType;
+
+  evaluator_impl(const XprType& block) 
+    : m_argImpl(block.nestedExpression()), 
+      m_startRow(block.startRow()), 
+      m_startCol(block.startCol()) 
+  { }
+ 
+  typedef typename XprType::Index Index;
+  typedef typename XprType::Scalar Scalar;
+  typedef typename XprType::CoeffReturnType CoeffReturnType;
+  typedef typename XprType::PacketScalar PacketScalar;
+  typedef typename XprType::PacketReturnType PacketReturnType;
+
+  enum {
+    RowsAtCompileTime = XprType::RowsAtCompileTime
+  };
+ 
+  CoeffReturnType coeff(Index row, Index col) const 
+  { 
+    return m_argImpl.coeff(m_startRow.value() + row, m_startCol.value() + col); 
+  }
+  
+  CoeffReturnType coeff(Index index) const 
+  { 
+    return coeff(RowsAtCompileTime == 1 ? 0 : index,
+		 RowsAtCompileTime == 1 ? index : 0);
+  }
+
+  Scalar& coeffRef(Index row, Index col) 
+  { 
+    return m_argImpl.coeffRef(m_startRow.value() + row, m_startCol.value() + col); 
+  }
+  
+  Scalar& coeffRef(Index index) 
+  { 
+    return coeffRef(RowsAtCompileTime == 1 ? 0 : index,
+		    RowsAtCompileTime == 1 ? index : 0);
+  }
+ 
+  template<int LoadMode> 
+  PacketReturnType packet(Index row, Index col) const 
+  { 
+    return m_argImpl.template packet<LoadMode>(m_startRow.value() + row, m_startCol.value() + col); 
+  }
+
+  template<int LoadMode> 
+  PacketReturnType packet(Index index) const 
+  { 
+    return packet<LoadMode>(RowsAtCompileTime == 1 ? 0 : index,
+			    RowsAtCompileTime == 1 ? index : 0);
+  }
+  
+  template<int StoreMode> 
+  void writePacket(Index row, Index col, const PacketScalar& x) 
+  { 
+    return m_argImpl.template writePacket<StoreMode>(m_startRow.value() + row, m_startCol.value() + col, x); 
+  }
+  
+  template<int StoreMode> 
+  void writePacket(Index index, const PacketScalar& x) 
+  { 
+    return writePacket<StoreMode>(RowsAtCompileTime == 1 ? 0 : index,
+				  RowsAtCompileTime == 1 ? index : 0,
+				  x);
+  }
+ 
+protected:
+  typename evaluator<ArgType>::nestedType m_argImpl;
+  const variable_if_dynamic<Index, ArgType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
+  const variable_if_dynamic<Index, ArgType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
+};
+
+// TODO: This evaluator does not actually use the child evaluator; 
+// all action is via the data() as returned by the Block expression.
+
+template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel> 
+struct evaluator_impl<Block<ArgType, BlockRows, BlockCols, InnerPanel, /* HasDirectAccess */ true> >
+  : evaluator_impl<MapBase<Block<ArgType, BlockRows, BlockCols, InnerPanel, true> > >
+{
+  typedef Block<ArgType, BlockRows, BlockCols, InnerPanel, true> XprType;
+
+  evaluator_impl(const XprType& block) 
+    : evaluator_impl<MapBase<XprType> >(block) 
+  { }
+};
+
+
+// -------------------- Select --------------------
+
+template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>
+struct evaluator_impl<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
+{
+  typedef Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> XprType;
+
+  evaluator_impl(const XprType& select) 
+    : m_conditionImpl(select.conditionMatrix()),
+      m_thenImpl(select.thenMatrix()),
+      m_elseImpl(select.elseMatrix())
+  { }
+ 
+  typedef typename XprType::Index Index;
+  typedef typename XprType::CoeffReturnType CoeffReturnType;
+
+  CoeffReturnType coeff(Index row, Index col) const
+  {
+    if (m_conditionImpl.coeff(row, col))
+      return m_thenImpl.coeff(row, col);
+    else
+      return m_elseImpl.coeff(row, col);
+  }
+
+  CoeffReturnType coeff(Index index) const
+  {
+    if (m_conditionImpl.coeff(index))
+      return m_thenImpl.coeff(index);
+    else
+      return m_elseImpl.coeff(index);
+  }
+ 
+protected:
+  typename evaluator<ConditionMatrixType>::nestedType m_conditionImpl;
+  typename evaluator<ThenMatrixType>::nestedType m_thenImpl;
+  typename evaluator<ElseMatrixType>::nestedType m_elseImpl;
+};
+
+
+// -------------------- Replicate --------------------
+
+template<typename ArgType, int RowFactor, int ColFactor> 
+struct evaluator_impl<Replicate<ArgType, RowFactor, ColFactor> >
+{
+  typedef Replicate<ArgType, RowFactor, ColFactor> XprType;
+
+  evaluator_impl(const XprType& replicate) 
+    : m_argImpl(replicate.nestedExpression()),
+      m_rows(replicate.nestedExpression().rows()),
+      m_cols(replicate.nestedExpression().cols())
+  { }
+ 
+  typedef typename XprType::Index Index;
+  typedef typename XprType::CoeffReturnType CoeffReturnType;
+  typedef typename XprType::PacketReturnType PacketReturnType;
+
+  CoeffReturnType coeff(Index row, Index col) const
+  {
+    // try to avoid using modulo; this is a pure optimization strategy
+    const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0
+                           : RowFactor==1 ? row
+                           : row % m_rows.value();
+    const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0
+                           : ColFactor==1 ? col
+                           : col % m_cols.value();
+    
+    return m_argImpl.coeff(actual_row, actual_col);
+  }
+
+  template<int LoadMode>
+  PacketReturnType packet(Index row, Index col) const
+  {
+    const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0
+                           : RowFactor==1 ? row
+                           : row % m_rows.value();
+    const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0
+                           : ColFactor==1 ? col
+                           : col % m_cols.value();
+
+    return m_argImpl.template packet<LoadMode>(actual_row, actual_col);
+  }
+ 
+protected:
+  typename evaluator<ArgType>::nestedType m_argImpl;
+  const variable_if_dynamic<Index, XprType::RowsAtCompileTime> m_rows;
+  const variable_if_dynamic<Index, XprType::ColsAtCompileTime> m_cols;
+};
+
+
+// -------------------- PartialReduxExpr --------------------
+//
+// This is a wrapper around the expression object. 
+// TODO: Find out how to write a proper evaluator without duplicating
+//       the row() and col() member functions.
+
+template< typename ArgType, typename MemberOp, int Direction>
+struct evaluator_impl<PartialReduxExpr<ArgType, MemberOp, Direction> >
+{
+  typedef PartialReduxExpr<ArgType, MemberOp, Direction> XprType;
+
+  evaluator_impl(const XprType expr)
+    : m_expr(expr)
+  { }
+
+  typedef typename XprType::Index Index;
+  typedef typename XprType::CoeffReturnType CoeffReturnType;
+ 
+  CoeffReturnType coeff(Index row, Index col) const 
+  { 
+    return m_expr.coeff(row, col);
+  }
+  
+  CoeffReturnType coeff(Index index) const 
+  { 
+    return m_expr.coeff(index);
+  }
+
+protected:
+  const XprType m_expr;
+};
+
+
+// -------------------- MatrixWrapper and ArrayWrapper --------------------
+//
+// evaluator_impl_wrapper_base<T> is a common base class for the
+// MatrixWrapper and ArrayWrapper evaluators.
+
+template<typename XprType>
+struct evaluator_impl_wrapper_base
+  : evaluator_impl_base<XprType>
+{
+  typedef typename remove_all<typename XprType::NestedExpressionType>::type ArgType;
+
+  evaluator_impl_wrapper_base(const ArgType& arg) : m_argImpl(arg) {}
+
+  typedef typename ArgType::Index Index;
+  typedef typename ArgType::Scalar Scalar;
+  typedef typename ArgType::CoeffReturnType CoeffReturnType;
+  typedef typename ArgType::PacketScalar PacketScalar;
+  typedef typename ArgType::PacketReturnType PacketReturnType;
+
+  CoeffReturnType coeff(Index row, Index col) const
+  {
+    return m_argImpl.coeff(row, col);
+  }
+
+  CoeffReturnType coeff(Index index) const
+  {
+    return m_argImpl.coeff(index);
+  }
+
+  Scalar& coeffRef(Index row, Index col)
+  {
+    return m_argImpl.coeffRef(row, col);
+  }
+
+  Scalar& coeffRef(Index index)
+  {
+    return m_argImpl.coeffRef(index);
+  }
+
+  template<int LoadMode> 
+  PacketReturnType packet(Index row, Index col) const
+  {
+    return m_argImpl.template packet<LoadMode>(row, col);
+  }
+
+  template<int LoadMode> 
+  PacketReturnType packet(Index index) const
+  {
+    return m_argImpl.template packet<LoadMode>(index);
+  }
+
+  template<int StoreMode> 
+  void writePacket(Index row, Index col, const PacketScalar& x)
+  {
+    m_argImpl.template writePacket<StoreMode>(row, col, x);
+  }
+
+  template<int StoreMode> 
+  void writePacket(Index index, const PacketScalar& x)
+  {
+    m_argImpl.template writePacket<StoreMode>(index, x);
+  }
+
+protected:
+  typename evaluator<ArgType>::nestedType m_argImpl;
+};
+
+template<typename ArgType>
+struct evaluator_impl<MatrixWrapper<ArgType> >
+  : evaluator_impl_wrapper_base<MatrixWrapper<ArgType> >
+{
+  typedef MatrixWrapper<ArgType> XprType;
+
+  evaluator_impl(const XprType& wrapper) 
+    : evaluator_impl_wrapper_base<MatrixWrapper<ArgType> >(wrapper.nestedExpression())
+  { }
+};
+
+template<typename ArgType>
+struct evaluator_impl<ArrayWrapper<ArgType> >
+  : evaluator_impl_wrapper_base<ArrayWrapper<ArgType> >
+{
+  typedef ArrayWrapper<ArgType> XprType;
+
+  evaluator_impl(const XprType& wrapper) 
+    : evaluator_impl_wrapper_base<ArrayWrapper<ArgType> >(wrapper.nestedExpression())
+  { }
+};
+
+
+// -------------------- Reverse --------------------
+
+// defined in Reverse.h:
+template<typename PacketScalar, bool ReversePacket> struct reverse_packet_cond;
+
+template<typename ArgType, int Direction>
+struct evaluator_impl<Reverse<ArgType, Direction> >
+  : evaluator_impl_base<Reverse<ArgType, Direction> >
+{
+  typedef Reverse<ArgType, Direction> XprType;
+  typedef typename XprType::Index Index;
+  typedef typename XprType::Scalar Scalar;
+  typedef typename XprType::CoeffReturnType CoeffReturnType;
+  typedef typename XprType::PacketScalar PacketScalar;
+  typedef typename XprType::PacketReturnType PacketReturnType;
+
+  enum {
+    PacketSize = internal::packet_traits<Scalar>::size,
+    IsRowMajor = XprType::IsRowMajor,
+    IsColMajor = !IsRowMajor,
+    ReverseRow = (Direction == Vertical)   || (Direction == BothDirections),
+    ReverseCol = (Direction == Horizontal) || (Direction == BothDirections),
+    OffsetRow  = ReverseRow && IsColMajor ? PacketSize : 1,
+    OffsetCol  = ReverseCol && IsRowMajor ? PacketSize : 1,
+    ReversePacket = (Direction == BothDirections)
+                    || ((Direction == Vertical)   && IsColMajor)
+                    || ((Direction == Horizontal) && IsRowMajor)
+  };
+  typedef internal::reverse_packet_cond<PacketScalar,ReversePacket> reverse_packet;
+
+  evaluator_impl(const XprType& reverse) 
+    : m_argImpl(reverse.nestedExpression()),
+      m_rows(ReverseRow ? reverse.nestedExpression().rows() : 0),
+      m_cols(ReverseCol ? reverse.nestedExpression().cols() : 0)
+  { }
+ 
+  CoeffReturnType coeff(Index row, Index col) const
+  {
+    return m_argImpl.coeff(ReverseRow ? m_rows.value() - row - 1 : row,
+			   ReverseCol ? m_cols.value() - col - 1 : col);
+  }
+
+  CoeffReturnType coeff(Index index) const
+  {
+    return m_argImpl.coeff(m_rows.value() * m_cols.value() - index - 1);
+  }
+
+  Scalar& coeffRef(Index row, Index col)
+  {
+    return m_argImpl.coeffRef(ReverseRow ? m_rows.value() - row - 1 : row,
+			      ReverseCol ? m_cols.value() - col - 1 : col);
+  }
+
+  Scalar& coeffRef(Index index)
+  {
+    return m_argImpl.coeffRef(m_rows.value() * m_cols.value() - index - 1);
+  }
+
+  template<int LoadMode>
+  PacketScalar packet(Index row, Index col) const
+  {
+    return reverse_packet::run(m_argImpl.template packet<LoadMode>(
+                                  ReverseRow ? m_rows.value() - row - OffsetRow : row,
+                                  ReverseCol ? m_cols.value() - col - OffsetCol : col));
+  }
+
+  template<int LoadMode>
+  PacketScalar packet(Index index) const
+  {
+    return preverse(m_argImpl.template packet<LoadMode>(m_rows.value() * m_cols.value() - index - PacketSize));
+  }
+
+  template<int LoadMode>
+  void writePacket(Index row, Index col, const PacketScalar& x)
+  {
+    m_argImpl.template writePacket<LoadMode>(
+                                  ReverseRow ? m_rows.value() - row - OffsetRow : row,
+                                  ReverseCol ? m_cols.value() - col - OffsetCol : col,
+                                  reverse_packet::run(x));
+  }
+
+  template<int LoadMode>
+  void writePacket(Index index, const PacketScalar& x)
+  {
+    m_argImpl.template writePacket<LoadMode>
+      (m_rows.value() * m_cols.value() - index - PacketSize, preverse(x));
+  }
+ 
+protected:
+  typename evaluator<ArgType>::nestedType m_argImpl;
+
+  // If we do not reverse rows, then we do not need to know the number of rows; same for columns
+  const variable_if_dynamic<Index, ReverseRow ? ArgType::RowsAtCompileTime : 0> m_rows;
+  const variable_if_dynamic<Index, ReverseCol ? ArgType::ColsAtCompileTime : 0> m_cols;
+};
+
+
+// -------------------- Diagonal --------------------
+
+template<typename ArgType, int DiagIndex>
+struct evaluator_impl<Diagonal<ArgType, DiagIndex> >
+  : evaluator_impl_base<Diagonal<ArgType, DiagIndex> >
+{
+  typedef Diagonal<ArgType, DiagIndex> XprType;
+
+  evaluator_impl(const XprType& diagonal) 
+    : m_argImpl(diagonal.nestedExpression()),
+      m_index(diagonal.index())
+  { }
+ 
+  typedef typename XprType::Index Index;
+  typedef typename XprType::Scalar Scalar;
+  typedef typename XprType::CoeffReturnType CoeffReturnType;
+
+  CoeffReturnType coeff(Index row, Index) const
+  {
+    return m_argImpl.coeff(row + rowOffset(), row + colOffset());
+  }
+
+  CoeffReturnType coeff(Index index) const
+  {
+    return m_argImpl.coeff(index + rowOffset(), index + colOffset());
+  }
+
+  Scalar& coeffRef(Index row, Index)
+  {
+    return m_argImpl.coeffRef(row + rowOffset(), row + colOffset());
+  }
+
+  Scalar& coeffRef(Index index)
+  {
+    return m_argImpl.coeffRef(index + rowOffset(), index + colOffset());
+  }
+
+protected:
+  typename evaluator<ArgType>::nestedType m_argImpl;
+  const internal::variable_if_dynamicindex<Index, XprType::DiagIndex> m_index;
+
+private:
+  EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value() > 0 ? 0 : -m_index.value(); }
+  EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value() > 0 ? m_index.value() : 0; }
+};
+
+
+// ---------- SwapWrapper ----------
+
+template<typename ArgType>
+struct evaluator_impl<SwapWrapper<ArgType> >
+  : evaluator_impl_base<SwapWrapper<ArgType> >
+{
+  typedef SwapWrapper<ArgType> XprType;
+
+  evaluator_impl(const XprType& swapWrapper) 
+    : m_argImpl(swapWrapper.expression())
+  { }
+ 
+  typedef typename XprType::Index Index;
+  typedef typename XprType::Scalar Scalar;
+  typedef typename XprType::Packet Packet;
+
+  // This function and the next one are needed by assign to correctly align loads/stores
+  // TODO make Assign use .data()
+  Scalar& coeffRef(Index row, Index col)
+  {
+    return m_argImpl.coeffRef(row, col);
+  }
+  
+  inline Scalar& coeffRef(Index index)
+  {
+    return m_argImpl.coeffRef(index);
+  }
+
+  template<typename OtherEvaluatorType>
+  void copyCoeff(Index row, Index col, const OtherEvaluatorType& other)
+  {
+    OtherEvaluatorType& nonconst_other = const_cast<OtherEvaluatorType&>(other);
+    Scalar tmp = m_argImpl.coeff(row, col);
+    m_argImpl.coeffRef(row, col) = nonconst_other.coeff(row, col);
+    nonconst_other.coeffRef(row, col) = tmp;
+  }
+
+  template<typename OtherEvaluatorType>
+  void copyCoeff(Index index, const OtherEvaluatorType& other)
+  {
+    OtherEvaluatorType& nonconst_other = const_cast<OtherEvaluatorType&>(other);
+    Scalar tmp = m_argImpl.coeff(index);
+    m_argImpl.coeffRef(index) = nonconst_other.coeff(index);
+    nonconst_other.coeffRef(index) = tmp;
+  }
+
+  template<int StoreMode, int LoadMode, typename OtherEvaluatorType>
+  void copyPacket(Index row, Index col, const OtherEvaluatorType& other)
+  {
+    OtherEvaluatorType& nonconst_other = const_cast<OtherEvaluatorType&>(other);
+    Packet tmp = m_argImpl.template packet<StoreMode>(row, col);
+    m_argImpl.template writePacket<StoreMode>
+      (row, col, nonconst_other.template packet<LoadMode>(row, col));
+    nonconst_other.template writePacket<LoadMode>(row, col, tmp);
+  }
+
+  template<int StoreMode, int LoadMode, typename OtherEvaluatorType>
+  void copyPacket(Index index, const OtherEvaluatorType& other)
+  {
+    OtherEvaluatorType& nonconst_other = const_cast<OtherEvaluatorType&>(other);
+    Packet tmp = m_argImpl.template packet<StoreMode>(index);
+    m_argImpl.template writePacket<StoreMode>
+      (index, nonconst_other.template packet<LoadMode>(index));
+    nonconst_other.template writePacket<LoadMode>(index, tmp);
+  }
+
+protected:
+  typename evaluator<ArgType>::nestedType m_argImpl;
+};
+
+
+// ---------- SelfCwiseBinaryOp ----------
+
+template<typename BinaryOp, typename LhsXpr, typename RhsXpr>
+struct evaluator_impl<SelfCwiseBinaryOp<BinaryOp, LhsXpr, RhsXpr> >
+  : evaluator_impl_base<SelfCwiseBinaryOp<BinaryOp, LhsXpr, RhsXpr> >
+{
+  typedef SelfCwiseBinaryOp<BinaryOp, LhsXpr, RhsXpr> XprType;
+
+  evaluator_impl(const XprType& selfCwiseBinaryOp) 
+    : m_argImpl(selfCwiseBinaryOp.expression()),
+      m_functor(selfCwiseBinaryOp.functor())
+  { }
+ 
+  typedef typename XprType::Index Index;
+  typedef typename XprType::Scalar Scalar;
+  typedef typename XprType::Packet Packet;
+
+  // This function and the next one are needed by assign to correctly align loads/stores
+  // TODO make Assign use .data()
+  Scalar& coeffRef(Index row, Index col)
+  {
+    return m_argImpl.coeffRef(row, col);
+  }
+  
+  inline Scalar& coeffRef(Index index)
+  {
+    return m_argImpl.coeffRef(index);
+  }
+
+  template<typename OtherEvaluatorType>
+  void copyCoeff(Index row, Index col, const OtherEvaluatorType& other)
+  {
+    Scalar& tmp = m_argImpl.coeffRef(row, col);
+    tmp = m_functor(tmp, other.coeff(row, col));
+  }
+
+  template<typename OtherEvaluatorType>
+  void copyCoeff(Index index, const OtherEvaluatorType& other)
+  {
+    Scalar& tmp = m_argImpl.coeffRef(index);
+    tmp = m_functor(tmp, other.coeff(index));
+  }
+
+  template<int StoreMode, int LoadMode, typename OtherEvaluatorType>
+  void copyPacket(Index row, Index col, const OtherEvaluatorType& other)
+  {
+    const Packet res = m_functor.packetOp(m_argImpl.template packet<StoreMode>(row, col),
+					  other.template packet<LoadMode>(row, col));
+    m_argImpl.template writePacket<StoreMode>(row, col, res);
+  }
+
+  template<int StoreMode, int LoadMode, typename OtherEvaluatorType>
+  void copyPacket(Index index, const OtherEvaluatorType& other)
+  {
+    const Packet res = m_functor.packetOp(m_argImpl.template packet<StoreMode>(index),
+					  other.template packet<LoadMode>(index));
+    m_argImpl.template writePacket<StoreMode>(index, res);
+  }
+
+protected:
+  typename evaluator<LhsXpr>::nestedType m_argImpl;
+  const BinaryOp m_functor;
+};
+
+
+} // namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_COREEVALUATORS_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/ProductEvaluators.h b/resources/3rdparty/eigen/Eigen/src/Core/ProductEvaluators.h
new file mode 100644
index 000000000..0c0570e44
--- /dev/null
+++ b/resources/3rdparty/eigen/Eigen/src/Core/ProductEvaluators.h
@@ -0,0 +1,411 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2011 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+#ifndef EIGEN_PRODUCTEVALUATORS_H
+#define EIGEN_PRODUCTEVALUATORS_H
+
+namespace Eigen {
+  
+namespace internal {
+  
+// We can evaluate the product either all at once, like GeneralProduct and its evalTo() function, or
+// traverse the matrix coefficient by coefficient, like CoeffBasedProduct.  Use the existing logic
+// in ProductReturnType to decide.
+
+template<typename XprType, typename ProductType>
+struct product_evaluator_dispatcher;
+
+template<typename Lhs, typename Rhs>
+struct evaluator_impl<Product<Lhs, Rhs> >
+  : product_evaluator_dispatcher<Product<Lhs, Rhs>, typename ProductReturnType<Lhs, Rhs>::Type> 
+{
+  typedef Product<Lhs, Rhs> XprType;
+  typedef product_evaluator_dispatcher<XprType, typename ProductReturnType<Lhs, Rhs>::Type> Base;
+
+  evaluator_impl(const XprType& xpr) : Base(xpr) 
+  { }
+};
+
+template<typename XprType, typename ProductType>
+struct product_evaluator_traits_dispatcher;
+
+template<typename Lhs, typename Rhs>
+struct evaluator_traits<Product<Lhs, Rhs> >
+  : product_evaluator_traits_dispatcher<Product<Lhs, Rhs>, typename ProductReturnType<Lhs, Rhs>::Type> 
+{ 
+  static const int AssumeAliasing = 1;
+};
+
+// Case 1: Evaluate all at once
+//
+// We can view the GeneralProduct class as a part of the product evaluator. 
+// Four sub-cases: InnerProduct, OuterProduct, GemmProduct and GemvProduct.
+// InnerProduct is special because GeneralProduct does not have an evalTo() method in this case.
+
+template<typename Lhs, typename Rhs>
+struct product_evaluator_traits_dispatcher<Product<Lhs, Rhs>, GeneralProduct<Lhs, Rhs, InnerProduct> > 
+{
+  static const int HasEvalTo = 0;
+};
+
+template<typename Lhs, typename Rhs>
+struct product_evaluator_dispatcher<Product<Lhs, Rhs>, GeneralProduct<Lhs, Rhs, InnerProduct> > 
+  : public evaluator<typename Product<Lhs, Rhs>::PlainObject>::type
+{
+  typedef Product<Lhs, Rhs> XprType;
+  typedef typename XprType::PlainObject PlainObject;
+  typedef typename evaluator<PlainObject>::type evaluator_base;
+
+  // TODO: Computation is too early (?)
+  product_evaluator_dispatcher(const XprType& xpr) : evaluator_base(m_result)
+  {
+    m_result.coeffRef(0,0) = (xpr.lhs().transpose().cwiseProduct(xpr.rhs())).sum();
+  }
+  
+protected:  
+  PlainObject m_result;
+};
+
+// For the other three subcases, simply call the evalTo() method of GeneralProduct
+// TODO: GeneralProduct should take evaluators, not expression objects.
+
+template<typename Lhs, typename Rhs, int ProductType>
+struct product_evaluator_traits_dispatcher<Product<Lhs, Rhs>, GeneralProduct<Lhs, Rhs, ProductType> > 
+{
+  static const int HasEvalTo = 1;
+};
+
+template<typename Lhs, typename Rhs, int ProductType>
+struct product_evaluator_dispatcher<Product<Lhs, Rhs>, GeneralProduct<Lhs, Rhs, ProductType> > 
+{
+  typedef Product<Lhs, Rhs> XprType;
+  typedef typename XprType::PlainObject PlainObject;
+  typedef typename evaluator<PlainObject>::type evaluator_base;
+  
+  product_evaluator_dispatcher(const XprType& xpr) : m_xpr(xpr)
+  { }
+  
+  template<typename DstEvaluatorType, typename DstXprType>
+  void evalTo(DstEvaluatorType /* not used */, DstXprType& dst)
+  {
+    dst.resize(m_xpr.rows(), m_xpr.cols());
+    GeneralProduct<Lhs, Rhs, ProductType>(m_xpr.lhs(), m_xpr.rhs()).evalTo(dst);
+  }
+  
+protected: 
+  const XprType& m_xpr;
+};
+
+// Case 2: Evaluate coeff by coeff
+//
+// This is mostly taken from CoeffBasedProduct.h
+// The main difference is that we add an extra argument to the etor_product_*_impl::run() function
+// for the inner dimension of the product, because evaluator object do not know their size.
+
+template<int Traversal, int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
+struct etor_product_coeff_impl;
+
+template<int StorageOrder, int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct etor_product_packet_impl;
+
+template<typename Lhs, typename Rhs, typename LhsNested, typename RhsNested, int Flags>
+struct product_evaluator_traits_dispatcher<Product<Lhs, Rhs>, CoeffBasedProduct<LhsNested, RhsNested, Flags> >
+{
+  static const int HasEvalTo = 0;
+};
+
+template<typename Lhs, typename Rhs, typename LhsNested, typename RhsNested, int Flags>
+struct product_evaluator_dispatcher<Product<Lhs, Rhs>, CoeffBasedProduct<LhsNested, RhsNested, Flags> >
+  : evaluator_impl_base<Product<Lhs, Rhs> >
+{
+  typedef Product<Lhs, Rhs> XprType;
+  typedef CoeffBasedProduct<LhsNested, RhsNested, Flags> CoeffBasedProductType;
+
+  product_evaluator_dispatcher(const XprType& xpr) 
+    : m_lhsImpl(xpr.lhs()), 
+      m_rhsImpl(xpr.rhs()),  
+      m_innerDim(xpr.lhs().cols())
+  { }
+
+  typedef typename XprType::Index Index;
+  typedef typename XprType::Scalar Scalar;
+  typedef typename XprType::CoeffReturnType CoeffReturnType;
+  typedef typename XprType::PacketScalar PacketScalar;
+  typedef typename XprType::PacketReturnType PacketReturnType;
+
+  // Everything below here is taken from CoeffBasedProduct.h
+
+  enum {
+    RowsAtCompileTime = traits<CoeffBasedProductType>::RowsAtCompileTime,
+    PacketSize = packet_traits<Scalar>::size,
+    InnerSize  = traits<CoeffBasedProductType>::InnerSize,
+    CoeffReadCost = traits<CoeffBasedProductType>::CoeffReadCost,
+    Unroll = CoeffReadCost != Dynamic && CoeffReadCost <= EIGEN_UNROLLING_LIMIT,
+    CanVectorizeInner = traits<CoeffBasedProductType>::CanVectorizeInner
+  };
+
+  typedef typename evaluator<Lhs>::type LhsEtorType;
+  typedef typename evaluator<Rhs>::type RhsEtorType;
+  typedef etor_product_coeff_impl<CanVectorizeInner ? InnerVectorizedTraversal : DefaultTraversal,
+                                  Unroll ? InnerSize-1 : Dynamic,
+                                  LhsEtorType, RhsEtorType, Scalar> CoeffImpl;
+
+  const CoeffReturnType coeff(Index row, Index col) const
+  {
+    Scalar res;
+    CoeffImpl::run(row, col, m_lhsImpl, m_rhsImpl, m_innerDim, res);
+    return res;
+  }
+
+  /* Allow index-based non-packet access. It is impossible though to allow index-based packed access,
+   * which is why we don't set the LinearAccessBit.
+   */
+  const CoeffReturnType coeff(Index index) const
+  {
+    Scalar res;
+    const Index row = RowsAtCompileTime == 1 ? 0 : index;
+    const Index col = RowsAtCompileTime == 1 ? index : 0;
+    CoeffImpl::run(row, col, m_lhsImpl, m_rhsImpl, m_innerDim, res);
+    return res;
+  }
+
+  template<int LoadMode>
+  const PacketReturnType packet(Index row, Index col) const
+  {
+    PacketScalar res;
+    typedef etor_product_packet_impl<Flags&RowMajorBit ? RowMajor : ColMajor,
+				     Unroll ? InnerSize-1 : Dynamic,
+				     LhsEtorType, RhsEtorType, PacketScalar, LoadMode> PacketImpl;
+    PacketImpl::run(row, col, m_lhsImpl, m_rhsImpl, m_innerDim, res);
+    return res;
+  }
+
+protected:
+  typename evaluator<Lhs>::type m_lhsImpl;
+  typename evaluator<Rhs>::type m_rhsImpl;
+
+  // TODO: Get rid of m_innerDim if known at compile time
+  Index m_innerDim;
+};
+
+/***************************************************************************
+* Normal product .coeff() implementation (with meta-unrolling)
+***************************************************************************/
+
+/**************************************
+*** Scalar path  - no vectorization ***
+**************************************/
+
+template<int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
+struct etor_product_coeff_impl<DefaultTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
+{
+  typedef typename Lhs::Index Index;
+  EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, RetScalar &res)
+  {
+    etor_product_coeff_impl<DefaultTraversal, UnrollingIndex-1, Lhs, Rhs, RetScalar>::run(row, col, lhs, rhs, innerDim, res);
+    res += lhs.coeff(row, UnrollingIndex) * rhs.coeff(UnrollingIndex, col);
+  }
+};
+
+template<typename Lhs, typename Rhs, typename RetScalar>
+struct etor_product_coeff_impl<DefaultTraversal, 0, Lhs, Rhs, RetScalar>
+{
+  typedef typename Lhs::Index Index;
+  EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, RetScalar &res)
+  {
+    res = lhs.coeff(row, 0) * rhs.coeff(0, col);
+  }
+};
+
+template<typename Lhs, typename Rhs, typename RetScalar>
+struct etor_product_coeff_impl<DefaultTraversal, Dynamic, Lhs, Rhs, RetScalar>
+{
+  typedef typename Lhs::Index Index;
+  EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, RetScalar& res)
+  {
+    eigen_assert(innerDim>0 && "you are using a non initialized matrix");
+    res = lhs.coeff(row, 0) * rhs.coeff(0, col);
+    for(Index i = 1; i < innerDim; ++i)
+      res += lhs.coeff(row, i) * rhs.coeff(i, col);
+  }
+};
+
+/*******************************************
+*** Scalar path with inner vectorization ***
+*******************************************/
+
+template<int UnrollingIndex, typename Lhs, typename Rhs, typename Packet>
+struct etor_product_coeff_vectorized_unroller
+{
+  typedef typename Lhs::Index Index;
+  enum { PacketSize = packet_traits<typename Lhs::Scalar>::size };
+  EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, typename Lhs::PacketScalar &pres)
+  {
+    etor_product_coeff_vectorized_unroller<UnrollingIndex-PacketSize, Lhs, Rhs, Packet>::run(row, col, lhs, rhs, innerDim, pres);
+    pres = padd(pres, pmul( lhs.template packet<Aligned>(row, UnrollingIndex) , rhs.template packet<Aligned>(UnrollingIndex, col) ));
+  }
+};
+
+template<typename Lhs, typename Rhs, typename Packet>
+struct etor_product_coeff_vectorized_unroller<0, Lhs, Rhs, Packet>
+{
+  typedef typename Lhs::Index Index;
+  EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, typename Lhs::PacketScalar &pres)
+  {
+    pres = pmul(lhs.template packet<Aligned>(row, 0) , rhs.template packet<Aligned>(0, col));
+  }
+};
+
+template<int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
+struct etor_product_coeff_impl<InnerVectorizedTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
+{
+  typedef typename Lhs::PacketScalar Packet;
+  typedef typename Lhs::Index Index;
+  enum { PacketSize = packet_traits<typename Lhs::Scalar>::size };
+  EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, RetScalar &res)
+  {
+    Packet pres;
+    etor_product_coeff_vectorized_unroller<UnrollingIndex+1-PacketSize, Lhs, Rhs, Packet>::run(row, col, lhs, rhs, innerDim, pres);
+    etor_product_coeff_impl<DefaultTraversal,UnrollingIndex,Lhs,Rhs,RetScalar>::run(row, col, lhs, rhs, innerDim, res);
+    res = predux(pres);
+  }
+};
+
+template<typename Lhs, typename Rhs, int LhsRows = Lhs::RowsAtCompileTime, int RhsCols = Rhs::ColsAtCompileTime>
+struct etor_product_coeff_vectorized_dyn_selector
+{
+  typedef typename Lhs::Index Index;
+  EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, typename Lhs::Scalar &res)
+  {
+    res = lhs.row(row).transpose().cwiseProduct(rhs.col(col)).sum();
+  }
+};
+
+// NOTE the 3 following specializations are because taking .col(0) on a vector is a bit slower
+// NOTE maybe they are now useless since we have a specialization for Block<Matrix>
+template<typename Lhs, typename Rhs, int RhsCols>
+struct etor_product_coeff_vectorized_dyn_selector<Lhs,Rhs,1,RhsCols>
+{
+  typedef typename Lhs::Index Index;
+  EIGEN_STRONG_INLINE static void run(Index /*row*/, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, typename Lhs::Scalar &res)
+  {
+    res = lhs.transpose().cwiseProduct(rhs.col(col)).sum();
+  }
+};
+
+template<typename Lhs, typename Rhs, int LhsRows>
+struct etor_product_coeff_vectorized_dyn_selector<Lhs,Rhs,LhsRows,1>
+{
+  typedef typename Lhs::Index Index;
+  EIGEN_STRONG_INLINE static void run(Index row, Index /*col*/, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, typename Lhs::Scalar &res)
+  {
+    res = lhs.row(row).transpose().cwiseProduct(rhs).sum();
+  }
+};
+
+template<typename Lhs, typename Rhs>
+struct etor_product_coeff_vectorized_dyn_selector<Lhs,Rhs,1,1>
+{
+  typedef typename Lhs::Index Index;
+  EIGEN_STRONG_INLINE static void run(Index /*row*/, Index /*col*/, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, typename Lhs::Scalar &res)
+  {
+    res = lhs.transpose().cwiseProduct(rhs).sum();
+  }
+};
+
+template<typename Lhs, typename Rhs, typename RetScalar>
+struct etor_product_coeff_impl<InnerVectorizedTraversal, Dynamic, Lhs, Rhs, RetScalar>
+{
+  typedef typename Lhs::Index Index;
+  EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, typename Lhs::Scalar &res)
+  {
+    etor_product_coeff_vectorized_dyn_selector<Lhs,Rhs>::run(row, col, lhs, rhs, innerDim, res);
+  }
+};
+
+/*******************
+*** Packet path  ***
+*******************/
+
+template<int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct etor_product_packet_impl<RowMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
+{
+  typedef typename Lhs::Index Index;
+  EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res)
+  {
+    etor_product_packet_impl<RowMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, innerDim, res);
+    res =  pmadd(pset1<Packet>(lhs.coeff(row, UnrollingIndex)), rhs.template packet<LoadMode>(UnrollingIndex, col), res);
+  }
+};
+
+template<int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct etor_product_packet_impl<ColMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
+{
+  typedef typename Lhs::Index Index;
+  EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res)
+  {
+    etor_product_packet_impl<ColMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, innerDim, res);
+    res =  pmadd(lhs.template packet<LoadMode>(row, UnrollingIndex), pset1<Packet>(rhs.coeff(UnrollingIndex, col)), res);
+  }
+};
+
+template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct etor_product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
+{
+  typedef typename Lhs::Index Index;
+  EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res)
+  {
+    res = pmul(pset1<Packet>(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col));
+  }
+};
+
+template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct etor_product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
+{
+  typedef typename Lhs::Index Index;
+  EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res)
+  {
+    res = pmul(lhs.template packet<LoadMode>(row, 0), pset1<Packet>(rhs.coeff(0, col)));
+  }
+};
+
+template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct etor_product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
+{
+  typedef typename Lhs::Index Index;
+  EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res)
+  {
+    eigen_assert(innerDim>0 && "you are using a non initialized matrix");
+    res = pmul(pset1<Packet>(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col));
+    for(Index i = 1; i < innerDim; ++i)
+      res =  pmadd(pset1<Packet>(lhs.coeff(row, i)), rhs.template packet<LoadMode>(i, col), res);
+  }
+};
+
+template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct etor_product_packet_impl<ColMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
+{
+  typedef typename Lhs::Index Index;
+  EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res)
+  {
+    eigen_assert(innerDim>0 && "you are using a non initialized matrix");
+    res = pmul(lhs.template packet<LoadMode>(row, 0), pset1<Packet>(rhs.coeff(0, col)));
+    for(Index i = 1; i < innerDim; ++i)
+      res =  pmadd(lhs.template packet<LoadMode>(row, i), pset1<Packet>(rhs.coeff(i, col)), res);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_PRODUCT_EVALUATORS_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Core/Ref.h b/resources/3rdparty/eigen/Eigen/src/Core/Ref.h
new file mode 100644
index 000000000..9c409eecf
--- /dev/null
+++ b/resources/3rdparty/eigen/Eigen/src/Core/Ref.h
@@ -0,0 +1,254 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_REF_H
+#define EIGEN_REF_H
+
+namespace Eigen { 
+
+template<typename Derived> class RefBase;
+template<typename PlainObjectType, int Options = 0,
+         typename StrideType = typename internal::conditional<PlainObjectType::IsVectorAtCompileTime,InnerStride<1>,OuterStride<> >::type > class Ref;
+
+/** \class Ref
+  * \ingroup Core_Module
+  *
+  * \brief A matrix or vector expression mapping an existing expressions
+  *
+  * \tparam PlainObjectType the equivalent matrix type of the mapped data
+  * \tparam Options specifies whether the pointer is \c #Aligned, or \c #Unaligned.
+  *                The default is \c #Unaligned.
+  * \tparam StrideType optionally specifies strides. By default, Ref implies a contiguous storage along the inner dimension (inner stride==1),
+  *                   but accept a variable outer stride (leading dimension).
+  *                   This can be overridden by specifying strides.
+  *                   The type passed here must be a specialization of the Stride template, see examples below.
+  *
+  * This class permits to write non template functions taking Eigen's object as parameters while limiting the number of copies.
+  * A Ref<> object can represent either a const expression or a l-value:
+  * \code
+  * // in-out argument:
+  * void foo1(Ref<VectorXf> x);
+  *
+  * // read-only const argument:
+  * void foo2(const Ref<const VectorXf>& x);
+  * \endcode
+  *
+  * In the in-out case, the input argument must satisfies the constraints of the actual Ref<> type, otherwise a compilation issue will be triggered.
+  * By default, a Ref<VectorXf> can reference any dense vector expression of float having a contiguous memory layout.
+  * Likewise, a Ref<MatrixXf> can reference any column major dense matrix expression of float whose column's elements are contiguously stored with
+  * the possibility to have a constant space inbetween each column, i.e.: the inner stride mmust be equal to 1, but the outer-stride (or leading dimension),
+  * can be greater than the number of rows.
+  *
+  * In the const case, if the input expression does not match the above requirement, then it is evaluated into a temporary before being passed to the function.
+  * Here are some examples:
+  * \code
+  * MatrixXf A;
+  * VectorXf a;
+  * foo1(a.head());             // OK
+  * foo1(A.col());              // OK
+  * foo1(A.row());              // compilation error because here innerstride!=1
+  * foo2(A.row());              // The row is copied into a contiguous temporary
+  * foo2(2*a);                  // The expression is evaluated into a temporary
+  * foo2(A.col().segment(2,4)); // No temporary
+  * \endcode
+  *
+  * The range of inputs that can be referenced without temporary can be enlarged using the last two template parameter.
+  * Here is an example accepting an innerstride!=1:
+  * \code
+  * // in-out argument:
+  * void foo3(Ref<VectorXf,0,InnerStride<> > x);
+  * foo3(A.row());              // OK
+  * \endcode
+  * The downside here is that the function foo3 might be significantly slower than foo1 because it won't be able to exploit vectorization, and will involved more
+  * expensive address computations even if the input is contiguously stored in memory. To overcome this issue, one might propose to overloads internally calling a
+  * template function, e.g.:
+  * \code
+  * // in the .h:
+  * void foo(const Ref<MatrixXf>& A);
+  * void foo(const Ref<MatrixXf,0,Stride<> >& A);
+  *
+  * // in the .cpp:
+  * template<typename TypeOfA> void foo_impl(const TypeOfA& A) {
+  *     ... // crazy code goes here
+  * }
+  * void foo(const Ref<MatrixXf>& A) { foo_impl(A); }
+  * void foo(const Ref<MatrixXf,0,Stride<> >& A) { foo_impl(A); }
+  * \endcode
+  *
+  *
+  * \sa PlainObjectBase::Map(), \ref TopicStorageOrders
+  */
+
+namespace internal {
+
+template<typename _PlainObjectType, int _Options, typename _StrideType>
+struct traits<Ref<_PlainObjectType, _Options, _StrideType> >
+  : public traits<Map<_PlainObjectType, _Options, _StrideType> >
+{
+  typedef _PlainObjectType PlainObjectType;
+  typedef _StrideType StrideType;
+  enum {
+    Options = _Options
+  };
+
+  template<typename Derived> struct match {
+    enum {
+      HasDirectAccess = internal::has_direct_access<Derived>::ret,
+      StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)),
+      InnerStrideMatch = int(StrideType::InnerStrideAtCompileTime)==int(Dynamic)
+                      || int(StrideType::InnerStrideAtCompileTime)==int(Derived::InnerStrideAtCompileTime)
+                      || (int(StrideType::InnerStrideAtCompileTime)==0 && int(Derived::InnerStrideAtCompileTime)==1),
+      OuterStrideMatch = Derived::IsVectorAtCompileTime
+                      || int(StrideType::OuterStrideAtCompileTime)==int(Dynamic) || int(StrideType::OuterStrideAtCompileTime)==int(Derived::OuterStrideAtCompileTime),
+      AlignmentMatch = (_Options!=Aligned) || ((PlainObjectType::Flags&AlignedBit)==0) || ((traits<Derived>::Flags&AlignedBit)==AlignedBit),
+      MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch
+    };
+    typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
+  };
+
+};
+
+template<typename Derived>
+struct traits<RefBase<Derived> > : public traits<Derived> {};
+
+}
+
+template<typename Derived> class RefBase
+ : public MapBase<Derived>
+{
+  typedef typename internal::traits<Derived>::PlainObjectType PlainObjectType;
+  typedef typename internal::traits<Derived>::StrideType StrideType;
+
+public:
+
+  typedef MapBase<Derived> Base;
+  EIGEN_DENSE_PUBLIC_INTERFACE(RefBase)
+
+  inline Index innerStride() const
+  {
+    return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1;
+  }
+
+  inline Index outerStride() const
+  {
+    return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer()
+         : IsVectorAtCompileTime ? this->size()
+         : int(Flags)&RowMajorBit ? this->cols()
+         : this->rows();
+  }
+
+  RefBase()
+    : Base(0,RowsAtCompileTime==Dynamic?0:RowsAtCompileTime,ColsAtCompileTime==Dynamic?0:ColsAtCompileTime),
+      // Stride<> does not allow default ctor for Dynamic strides, so let' initialize it with dummy values:
+      m_stride(StrideType::OuterStrideAtCompileTime==Dynamic?0:StrideType::OuterStrideAtCompileTime,
+               StrideType::InnerStrideAtCompileTime==Dynamic?0:StrideType::InnerStrideAtCompileTime)
+  {}
+
+protected:
+
+  typedef Stride<StrideType::OuterStrideAtCompileTime,StrideType::InnerStrideAtCompileTime> StrideBase;
+
+  template<typename Expression>
+  void construct(Expression& expr)
+  {
+    if(PlainObjectType::RowsAtCompileTime==1)
+    {
+      eigen_assert(expr.rows()==1 || expr.cols()==1);
+      ::new (static_cast<Base*>(this)) Base(expr.data(), 1, expr.size());
+    }
+    else if(PlainObjectType::ColsAtCompileTime==1)
+    {
+      eigen_assert(expr.rows()==1 || expr.cols()==1);
+      ::new (static_cast<Base*>(this)) Base(expr.data(), expr.size(), 1);
+    }
+    else
+      ::new (static_cast<Base*>(this)) Base(expr.data(), expr.rows(), expr.cols());
+    ::new (&m_stride) StrideBase(StrideType::OuterStrideAtCompileTime==0?0:expr.outerStride(),
+                                 StrideType::InnerStrideAtCompileTime==0?0:expr.innerStride());
+  }
+
+  StrideBase m_stride;
+};
+
+
+template<typename PlainObjectType, int Options, typename StrideType> class Ref
+  : public RefBase<Ref<PlainObjectType, Options, StrideType> >
+{
+    typedef internal::traits<Ref> Traits;
+  public:
+
+    typedef RefBase<Ref> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Ref)
+
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    template<typename Derived>
+    inline Ref(PlainObjectBase<Derived>& expr,
+               typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0)
+    {
+      Base::construct(expr);
+    }
+    template<typename Derived>
+    inline Ref(const DenseBase<Derived>& expr,
+               typename internal::enable_if<bool(internal::is_lvalue<Derived>::value&&bool(Traits::template match<Derived>::MatchAtCompileTime)),Derived>::type* = 0,
+               int = Derived::ThisConstantIsPrivateInPlainObjectBase)
+    #else
+    template<typename Derived>
+    inline Ref(DenseBase<Derived>& expr)
+    #endif
+    {
+      Base::construct(expr.const_cast_derived());
+    }
+
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Ref)
+
+};
+
+// this is the const ref version
+template<typename PlainObjectType, int Options, typename StrideType> class Ref<const PlainObjectType, Options, StrideType>
+  : public RefBase<Ref<const PlainObjectType, Options, StrideType> >
+{
+    typedef internal::traits<Ref> Traits;
+  public:
+
+    typedef RefBase<Ref> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Ref)
+
+    template<typename Derived>
+    inline Ref(const DenseBase<Derived>& expr)
+    {
+//      std::cout << match_helper<Derived>::HasDirectAccess << "," << match_helper<Derived>::OuterStrideMatch << "," << match_helper<Derived>::InnerStrideMatch << "\n";
+//      std::cout << int(StrideType::OuterStrideAtCompileTime) << " - " << int(Derived::OuterStrideAtCompileTime) << "\n";
+//      std::cout << int(StrideType::InnerStrideAtCompileTime) << " - " << int(Derived::InnerStrideAtCompileTime) << "\n";
+      construct(expr.derived(), typename Traits::template match<Derived>::type());
+    }
+
+  protected:
+
+    template<typename Expression>
+    void construct(const Expression& expr,internal::true_type)
+    {
+      Base::construct(expr);
+    }
+
+    template<typename Expression>
+    void construct(const Expression& expr, internal::false_type)
+    {
+//      std::cout << "Ref: copy\n";
+      m_object = expr;
+      Base::construct(m_object);
+    }
+
+  protected:
+    PlainObjectType m_object;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_REF_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h b/resources/3rdparty/eigen/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
new file mode 100644
index 000000000..0075880fe
--- /dev/null
+++ b/resources/3rdparty/eigen/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
@@ -0,0 +1,339 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_GENERALIZEDEIGENSOLVER_H
+#define EIGEN_GENERALIZEDEIGENSOLVER_H
+
+#include "./RealQZ.h"
+
+namespace Eigen { 
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+  *
+  *
+  * \class GeneralizedEigenSolver
+  *
+  * \brief Computes the generalized eigenvalues and eigenvectors of a pair of general matrices
+  *
+  * \tparam _MatrixType the type of the matrices of which we are computing the
+  * eigen-decomposition; this is expected to be an instantiation of the Matrix
+  * class template. Currently, only real matrices are supported.
+  *
+  * The generalized eigenvalues and eigenvectors of a matrix pair \f$ A \f$ and \f$ B \f$ are scalars
+  * \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda Bv \f$.  If
+  * \f$ D \f$ is a diagonal matrix with the eigenvalues on the diagonal, and
+  * \f$ V \f$ is a matrix with the eigenvectors as its columns, then \f$ A V =
+  * B V D \f$. The matrix \f$ V \f$ is almost always invertible, in which case we
+  * have \f$ A = B V D V^{-1} \f$. This is called the generalized eigen-decomposition.
+  *
+  * The generalized eigenvalues and eigenvectors of a matrix pair may be complex, even when the
+  * matrices are real. Moreover, the generalized eigenvalue might be infinite if the matrix B is
+  * singular. To workaround this difficulty, the eigenvalues are provided as a pair of complex \f$ \alpha \f$
+  * and real \f$ \beta \f$ such that: \f$ \lambda_i = \alpha_i / \beta_i \f$. If \f$ \beta_i \f$ is (nearly) zero,
+  * then one can consider the well defined left eigenvalue \f$ \mu = \beta_i / \alpha_i\f$ such that:
+  * \f$ \mu_i A v_i = B v_i \f$, or even \f$ \mu_i u_i^T A  = u_i^T B \f$ where \f$ u_i \f$ is
+  * called the left eigenvector.
+  *
+  * Call the function compute() to compute the generalized eigenvalues and eigenvectors of
+  * a given matrix pair. Alternatively, you can use the
+  * GeneralizedEigenSolver(const MatrixType&, const MatrixType&, bool) constructor which computes the
+  * eigenvalues and eigenvectors at construction time. Once the eigenvalue and
+  * eigenvectors are computed, they can be retrieved with the eigenvalues() and
+  * eigenvectors() functions.
+  *
+  * Here is an usage example of this class:
+  * Example: \include GeneralizedEigenSolver.cpp
+  * Output: \verbinclude GeneralizedEigenSolver.out
+  *
+  * \sa MatrixBase::eigenvalues(), class ComplexEigenSolver, class SelfAdjointEigenSolver
+  */
+template<typename _MatrixType> class GeneralizedEigenSolver
+{
+  public:
+
+    /** \brief Synonym for the template parameter \p _MatrixType. */
+    typedef _MatrixType MatrixType;
+
+    enum {
+      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      Options = MatrixType::Options,
+      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+    };
+
+    /** \brief Scalar type for matrices of type #MatrixType. */
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    typedef typename MatrixType::Index Index;
+
+    /** \brief Complex scalar type for #MatrixType. 
+      *
+      * This is \c std::complex<Scalar> if #Scalar is real (e.g.,
+      * \c float or \c double) and just \c Scalar if #Scalar is
+      * complex.
+      */
+    typedef std::complex<RealScalar> ComplexScalar;
+
+    /** \brief Type for vector of real scalar values eigenvalues as returned by betas().
+      *
+      * This is a column vector with entries of type #Scalar.
+      * The length of the vector is the size of #MatrixType.
+      */
+    typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> VectorType;
+
+    /** \brief Type for vector of complex scalar values eigenvalues as returned by betas().
+      *
+      * This is a column vector with entries of type #ComplexScalar.
+      * The length of the vector is the size of #MatrixType.
+      */
+    typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ComplexVectorType;
+
+    /** \brief Expression type for the eigenvalues as returned by eigenvalues().
+      */
+    typedef CwiseBinaryOp<internal::scalar_quotient_op<ComplexScalar,Scalar>,ComplexVectorType,VectorType> EigenvalueType;
+
+    /** \brief Type for matrix of eigenvectors as returned by eigenvectors(). 
+      *
+      * This is a square matrix with entries of type #ComplexScalar. 
+      * The size is the same as the size of #MatrixType.
+      */
+    typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorsType;
+
+    /** \brief Default constructor.
+      *
+      * The default constructor is useful in cases in which the user intends to
+      * perform decompositions via EigenSolver::compute(const MatrixType&, bool).
+      *
+      * \sa compute() for an example.
+      */
+    GeneralizedEigenSolver() : m_eivec(), m_alphas(), m_betas(), m_isInitialized(false), m_realQZ(), m_matS(), m_tmp() {}
+
+    /** \brief Default constructor with memory preallocation
+      *
+      * Like the default constructor but with preallocation of the internal data
+      * according to the specified problem \a size.
+      * \sa GeneralizedEigenSolver()
+      */
+    GeneralizedEigenSolver(Index size)
+      : m_eivec(size, size),
+        m_alphas(size),
+        m_betas(size),
+        m_isInitialized(false),
+        m_eigenvectorsOk(false),
+        m_realQZ(size),
+        m_matS(size, size),
+        m_tmp(size)
+    {}
+
+    /** \brief Constructor; computes the generalized eigendecomposition of given matrix pair.
+      * 
+      * \param[in]  A  Square matrix whose eigendecomposition is to be computed.
+      * \param[in]  B  Square matrix whose eigendecomposition is to be computed.
+      * \param[in]  computeEigenvectors  If true, both the eigenvectors and the
+      *    eigenvalues are computed; if false, only the eigenvalues are computed.
+      *
+      * This constructor calls compute() to compute the generalized eigenvalues
+      * and eigenvectors.
+      *
+      * \sa compute()
+      */
+    GeneralizedEigenSolver(const MatrixType& A, const MatrixType& B, bool computeEigenvectors = true)
+      : m_eivec(A.rows(), A.cols()),
+        m_alphas(A.cols()),
+        m_betas(A.cols()),
+        m_isInitialized(false),
+        m_eigenvectorsOk(false),
+        m_realQZ(A.cols()),
+        m_matS(A.rows(), A.cols()),
+        m_tmp(A.cols())
+    {
+      compute(A, B, computeEigenvectors);
+    }
+
+    /* \brief Returns the computed generalized eigenvectors.
+      *
+      * \returns  %Matrix whose columns are the (possibly complex) eigenvectors.
+      *
+      * \pre Either the constructor 
+      * GeneralizedEigenSolver(const MatrixType&,const MatrixType&, bool) or the member function
+      * compute(const MatrixType&, const MatrixType& bool) has been called before, and
+      * \p computeEigenvectors was set to true (the default).
+      *
+      * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding
+      * to eigenvalue number \f$ k \f$ as returned by eigenvalues().  The
+      * eigenvectors are normalized to have (Euclidean) norm equal to one. The
+      * matrix returned by this function is the matrix \f$ V \f$ in the
+      * generalized eigendecomposition \f$ A = B V D V^{-1} \f$, if it exists.
+      *
+      * \sa eigenvalues()
+      */
+//    EigenvectorsType eigenvectors() const;
+
+    /** \brief Returns an expression of the computed generalized eigenvalues.
+      *
+      * \returns An expression of the column vector containing the eigenvalues.
+      *
+      * It is a shortcut for \code this->alphas().cwiseQuotient(this->betas()); \endcode
+      * Not that betas might contain zeros. It is therefore not recommended to use this function,
+      * but rather directly deal with the alphas and betas vectors.
+      *
+      * \pre Either the constructor 
+      * GeneralizedEigenSolver(const MatrixType&,const MatrixType&,bool) or the member function
+      * compute(const MatrixType&,const MatrixType&,bool) has been called before.
+      *
+      * The eigenvalues are repeated according to their algebraic multiplicity,
+      * so there are as many eigenvalues as rows in the matrix. The eigenvalues 
+      * are not sorted in any particular order.
+      *
+      * \sa alphas(), betas(), eigenvectors()
+      */
+    EigenvalueType eigenvalues() const
+    {
+      eigen_assert(m_isInitialized && "GeneralizedEigenSolver is not initialized.");
+      return EigenvalueType(m_alphas,m_betas);
+    }
+
+    /** \returns A const reference to the vectors containing the alpha values
+      *
+      * This vector permits to reconstruct the j-th eigenvalues as alphas(i)/betas(j).
+      *
+      * \sa betas(), eigenvalues() */
+    ComplexVectorType alphas() const
+    {
+      eigen_assert(m_isInitialized && "GeneralizedEigenSolver is not initialized.");
+      return m_alphas;
+    }
+
+    /** \returns A const reference to the vectors containing the beta values
+      *
+      * This vector permits to reconstruct the j-th eigenvalues as alphas(i)/betas(j).
+      *
+      * \sa alphas(), eigenvalues() */
+    VectorType betas() const
+    {
+      eigen_assert(m_isInitialized && "GeneralizedEigenSolver is not initialized.");
+      return m_betas;
+    }
+
+    /** \brief Computes generalized eigendecomposition of given matrix.
+      * 
+      * \param[in]  A  Square matrix whose eigendecomposition is to be computed.
+      * \param[in]  B  Square matrix whose eigendecomposition is to be computed.
+      * \param[in]  computeEigenvectors  If true, both the eigenvectors and the
+      *    eigenvalues are computed; if false, only the eigenvalues are
+      *    computed. 
+      * \returns    Reference to \c *this
+      *
+      * This function computes the eigenvalues of the real matrix \p matrix.
+      * The eigenvalues() function can be used to retrieve them.  If 
+      * \p computeEigenvectors is true, then the eigenvectors are also computed
+      * and can be retrieved by calling eigenvectors().
+      *
+      * The matrix is first reduced to real generalized Schur form using the RealQZ
+      * class. The generalized Schur decomposition is then used to compute the eigenvalues
+      * and eigenvectors.
+      *
+      * The cost of the computation is dominated by the cost of the
+      * generalized Schur decomposition.
+      *
+      * This method reuses of the allocated data in the GeneralizedEigenSolver object.
+      */
+    GeneralizedEigenSolver& compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors = true);
+
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
+      return m_realQZ.info();
+    }
+
+    /** Sets the maximal number of iterations allowed.
+    */
+    GeneralizedEigenSolver& setMaxIterations(Index maxIters)
+    {
+      m_realQZ.setMaxIterations(maxIters);
+      return *this;
+    }
+
+  protected:
+    MatrixType m_eivec;
+    ComplexVectorType m_alphas;
+    VectorType m_betas;
+    bool m_isInitialized;
+    bool m_eigenvectorsOk;
+    RealQZ<MatrixType> m_realQZ;
+    MatrixType m_matS;
+
+    typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
+    ColumnVectorType m_tmp;
+};
+
+//template<typename MatrixType>
+//typename GeneralizedEigenSolver<MatrixType>::EigenvectorsType GeneralizedEigenSolver<MatrixType>::eigenvectors() const
+//{
+//  eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
+//  eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+//  Index n = m_eivec.cols();
+//  EigenvectorsType matV(n,n);
+//  // TODO
+//  return matV;
+//}
+
+template<typename MatrixType>
+GeneralizedEigenSolver<MatrixType>&
+GeneralizedEigenSolver<MatrixType>::compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors)
+{
+  eigen_assert(A.cols() == A.rows() && B.cols() == A.rows() && B.cols() == B.rows());
+
+  // Reduce to generalized real Schur form:
+  // A = Q S Z and B = Q T Z
+  m_realQZ.compute(A, B, computeEigenvectors);
+
+  if (m_realQZ.info() == Success)
+  {
+    m_matS = m_realQZ.matrixS();
+    if (computeEigenvectors)
+      m_eivec = m_realQZ.matrixZ().transpose();
+  
+    // Compute eigenvalues from matS
+    m_alphas.resize(A.cols());
+    m_betas.resize(A.cols());
+    Index i = 0;
+    while (i < A.cols())
+    {
+      if (i == A.cols() - 1 || m_matS.coeff(i+1, i) == Scalar(0))
+      {
+        m_alphas.coeffRef(i) = m_matS.coeff(i, i);
+        m_betas.coeffRef(i)  = m_realQZ.matrixT().coeff(i,i);
+        ++i;
+      }
+      else
+      {
+        Scalar p = Scalar(0.5) * (m_matS.coeff(i, i) - m_matS.coeff(i+1, i+1));
+        Scalar z = internal::sqrt(internal::abs(p * p + m_matS.coeff(i+1, i) * m_matS.coeff(i, i+1)));
+        m_alphas.coeffRef(i)   = ComplexScalar(m_matS.coeff(i+1, i+1) + p, z);
+        m_alphas.coeffRef(i+1) = ComplexScalar(m_matS.coeff(i+1, i+1) + p, -z);
+
+        m_betas.coeffRef(i)   = m_realQZ.matrixT().coeff(i,i);
+        m_betas.coeffRef(i+1) = m_realQZ.matrixT().coeff(i,i);
+        i += 2;
+      }
+    }
+  }
+
+  m_isInitialized = true;
+  m_eigenvectorsOk = false;//computeEigenvectors;
+
+  return *this;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_GENERALIZEDEIGENSOLVER_H
diff --git a/resources/3rdparty/eigen/Eigen/src/Eigenvalues/RealQZ.h b/resources/3rdparty/eigen/Eigen/src/Eigenvalues/RealQZ.h
new file mode 100644
index 000000000..fd6efdd56
--- /dev/null
+++ b/resources/3rdparty/eigen/Eigen/src/Eigenvalues/RealQZ.h
@@ -0,0 +1,618 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Alexey Korepanov <kaikaikai@yandex.ru>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_REAL_QZ_H
+#define EIGEN_REAL_QZ_H
+
+namespace Eigen {
+
+  /** \eigenvalues_module \ingroup Eigenvalues_Module
+   *
+   *
+   * \class RealQZ
+   *
+   * \brief Performs a real QZ decomposition of a pair of square matrices
+   *
+   * \tparam _MatrixType the type of the matrix of which we are computing the
+   * real QZ decomposition; this is expected to be an instantiation of the
+   * Matrix class template.
+   *
+   * Given a real square matrices A and B, this class computes the real QZ
+   * decomposition: \f$ A = Q S Z \f$, \f$ B = Q T Z \f$ where Q and Z are
+   * real orthogonal matrixes, T is upper-triangular matrix, and S is upper
+   * quasi-triangular matrix. An orthogonal matrix is a matrix whose
+   * inverse is equal to its transpose, \f$ U^{-1} = U^T \f$. A quasi-triangular
+   * matrix is a block-triangular matrix whose diagonal consists of 1-by-1
+   * blocks and 2-by-2 blocks where further reduction is impossible due to
+   * complex eigenvalues. 
+   *
+   * The eigenvalues of the pencil \f$ A - z B \f$ can be obtained from
+   * 1x1 and 2x2 blocks on the diagonals of S and T.
+   *
+   * Call the function compute() to compute the real QZ decomposition of a
+   * given pair of matrices. Alternatively, you can use the 
+   * RealQZ(const MatrixType& B, const MatrixType& B, bool computeQZ)
+   * constructor which computes the real QZ decomposition at construction
+   * time. Once the decomposition is computed, you can use the matrixS(),
+   * matrixT(), matrixQ() and matrixZ() functions to retrieve the matrices
+   * S, T, Q and Z in the decomposition. If computeQZ==false, some time
+   * is saved by not computing matrices Q and Z.
+   *
+   * Example: \include RealQZ_compute.cpp
+   * Output: \include RealQZ_compute.out
+   *
+   * \note The implementation is based on the algorithm in "Matrix Computations"
+   * by Gene H. Golub and Charles F. Van Loan, and a paper "An algorithm for
+   * generalized eigenvalue problems" by C.B.Moler and G.W.Stewart.
+   *
+   * \sa class RealSchur, class ComplexSchur, class EigenSolver, class ComplexEigenSolver
+   */
+
+  template<typename _MatrixType> class RealQZ
+  {
+    public:
+      typedef _MatrixType MatrixType;
+      enum {
+        RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+        ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+        Options = MatrixType::Options,
+        MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+        MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+      };
+      typedef typename MatrixType::Scalar Scalar;
+      typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
+      typedef typename MatrixType::Index Index;
+
+      typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;
+      typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
+
+      /** \brief Default constructor.
+       *
+       * \param [in] size  Positive integer, size of the matrix whose QZ decomposition will be computed.
+       *
+       * The default constructor is useful in cases in which the user intends to
+       * perform decompositions via compute().  The \p size parameter is only
+       * used as a hint. It is not an error to give a wrong \p size, but it may
+       * impair performance.
+       *
+       * \sa compute() for an example.
+       */
+      RealQZ(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime) : 
+        m_S(size, size),
+        m_T(size, size),
+        m_Q(size, size),
+        m_Z(size, size),
+        m_workspace(size*2),
+        m_maxIters(400),
+        m_isInitialized(false)
+        { }
+
+      /** \brief Constructor; computes real QZ decomposition of given matrices
+       * 
+       * \param[in]  A          Matrix A.
+       * \param[in]  B          Matrix B.
+       * \param[in]  computeQZ  If false, A and Z are not computed.
+       *
+       * This constructor calls compute() to compute the QZ decomposition.
+       */
+      RealQZ(const MatrixType& A, const MatrixType& B, bool computeQZ = true) :
+        m_S(A.rows(),A.cols()),
+        m_T(A.rows(),A.cols()),
+        m_Q(A.rows(),A.cols()),
+        m_Z(A.rows(),A.cols()),
+        m_workspace(A.rows()*2),
+        m_maxIters(400),
+        m_isInitialized(false) {
+          compute(A, B, computeQZ);
+        }
+
+      /** \brief Returns matrix Q in the QZ decomposition. 
+       *
+       * \returns A const reference to the matrix Q.
+       */
+      const MatrixType& matrixQ() const {
+        eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+        eigen_assert(m_computeQZ && "The matrices Q and Z have not been computed during the QZ decomposition.");
+        return m_Q;
+      }
+
+      /** \brief Returns matrix Z in the QZ decomposition. 
+       *
+       * \returns A const reference to the matrix Z.
+       */
+      const MatrixType& matrixZ() const {
+        eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+        eigen_assert(m_computeQZ && "The matrices Q and Z have not been computed during the QZ decomposition.");
+        return m_Z;
+      }
+
+      /** \brief Returns matrix S in the QZ decomposition. 
+       *
+       * \returns A const reference to the matrix S.
+       */
+      const MatrixType& matrixS() const {
+        eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+        return m_S;
+      }
+
+      /** \brief Returns matrix S in the QZ decomposition. 
+       *
+       * \returns A const reference to the matrix S.
+       */
+      const MatrixType& matrixT() const {
+        eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+        return m_T;
+      }
+
+      /** \brief Computes QZ decomposition of given matrix. 
+       * 
+       * \param[in]  A          Matrix A.
+       * \param[in]  B          Matrix B.
+       * \param[in]  computeQZ  If false, A and Z are not computed.
+       * \returns    Reference to \c *this
+       */
+      RealQZ& compute(const MatrixType& A, const MatrixType& B, bool computeQZ = true);
+
+      /** \brief Reports whether previous computation was successful.
+       *
+       * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+       */
+      ComputationInfo info() const
+      {
+        eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+        return m_info;
+      }
+
+      /** \brief Returns number of performed QR-like iterations.
+      */
+      Index iterations() const
+      {
+        eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+        return m_global_iter;
+      }
+
+      /** Sets the maximal number of iterations allowed to converge to one eigenvalue
+       * or decouple the problem.
+      */
+      RealQZ& setMaxIterations(Index maxIters)
+      {
+        m_maxIters = maxIters;
+        return *this;
+      }
+
+    private:
+
+      MatrixType m_S, m_T, m_Q, m_Z;
+      Matrix<Scalar,Dynamic,1> m_workspace;
+      ComputationInfo m_info;
+      Index m_maxIters;
+      bool m_isInitialized;
+      bool m_computeQZ;
+      Scalar m_normOfT, m_normOfS;
+      Index m_global_iter;
+
+      typedef Matrix<Scalar,3,1> Vector3s;
+      typedef Matrix<Scalar,2,1> Vector2s;
+      typedef Matrix<Scalar,2,2> Matrix2s;
+      typedef JacobiRotation<Scalar> JRs;
+
+      void hessenbergTriangular();
+      void computeNorms();
+      Index findSmallSubdiagEntry(Index iu);
+      Index findSmallDiagEntry(Index f, Index l);
+      void splitOffTwoRows(Index i);
+      void pushDownZero(Index z, Index f, Index l);
+      void step(Index f, Index l, Index iter);
+
+  }; // RealQZ
+
+  /** \internal Reduces S and T to upper Hessenberg - triangular form */
+  template<typename MatrixType>
+    void RealQZ<MatrixType>::hessenbergTriangular()
+    {
+
+      const Index dim = m_S.cols();
+
+      // perform QR decomposition of T, overwrite T with R, save Q
+      HouseholderQR<MatrixType> qrT(m_T);
+      m_T = qrT.matrixQR();
+      m_T.template triangularView<StrictlyLower>().setZero();
+      m_Q = qrT.householderQ();
+      // overwrite S with Q* S
+      m_S.applyOnTheLeft(m_Q.adjoint());
+      // init Z as Identity
+      if (m_computeQZ)
+        m_Z = MatrixType::Identity(dim,dim);
+      // reduce S to upper Hessenberg with Givens rotations
+      for (Index j=0; j<=dim-3; j++) {
+        for (Index i=dim-1; i>=j+2; i--) {
+          JRs G;
+          // kill S(i,j)
+          if(m_S.coeff(i,j) != 0)
+          {
+            G.makeGivens(m_S.coeff(i-1,j), m_S.coeff(i,j), &m_S.coeffRef(i-1, j));
+            m_S.coeffRef(i,j) = Scalar(0.0);
+            m_S.rightCols(dim-j-1).applyOnTheLeft(i-1,i,G.adjoint());
+            m_T.rightCols(dim-i+1).applyOnTheLeft(i-1,i,G.adjoint());
+          }
+          // update Q
+          if (m_computeQZ)
+            m_Q.applyOnTheRight(i-1,i,G);
+          // kill T(i,i-1)
+          if(m_T.coeff(i,i-1)!=Scalar(0))
+          {
+            G.makeGivens(m_T.coeff(i,i), m_T.coeff(i,i-1), &m_T.coeffRef(i,i));
+            m_T.coeffRef(i,i-1) = Scalar(0.0);
+            m_S.applyOnTheRight(i,i-1,G);
+            m_T.topRows(i).applyOnTheRight(i,i-1,G);
+          }
+          // update Z
+          if (m_computeQZ)
+            m_Z.applyOnTheLeft(i,i-1,G.adjoint());
+        }
+      }
+    }
+
+  /** \internal Computes vector L1 norms of S and T when in Hessenberg-Triangular form already */
+  template<typename MatrixType>
+    inline void RealQZ<MatrixType>::computeNorms()
+    {
+      const Index size = m_S.cols();
+      m_normOfS = Scalar(0.0);
+      m_normOfT = Scalar(0.0);
+      for (Index j = 0; j < size; ++j)
+      {
+        m_normOfS += m_S.col(j).segment(0, (std::min)(size,j+2)).cwiseAbs().sum();
+        m_normOfT += m_T.row(j).segment(j, size - j).cwiseAbs().sum();
+      }
+    }
+
+
+  /** \internal Look for single small sub-diagonal element S(res, res-1) and return res (or 0) */
+  template<typename MatrixType>
+    inline typename MatrixType::Index RealQZ<MatrixType>::findSmallSubdiagEntry(Index iu)
+    {
+      Index res = iu;
+      while (res > 0)
+      {
+        Scalar s = internal::abs(m_S.coeff(res-1,res-1)) + internal::abs(m_S.coeff(res,res));
+        if (s == Scalar(0.0))
+          s = m_normOfS;
+        if (internal::abs(m_S.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s)
+          break;
+        res--;
+      }
+      return res;
+    }
+
+  /** \internal Look for single small diagonal element T(res, res) for res between f and l, and return res (or f-1)  */
+  template<typename MatrixType>
+    inline typename MatrixType::Index RealQZ<MatrixType>::findSmallDiagEntry(Index f, Index l)
+    {
+      Index res = l;
+      while (res >= f) {
+        if (internal::abs(m_T.coeff(res,res)) <= NumTraits<Scalar>::epsilon() * m_normOfT)
+          break;
+        res--;
+      }
+      return res;
+    }
+
+  /** \internal decouple 2x2 diagonal block in rows i, i+1 if eigenvalues are real */
+  template<typename MatrixType>
+    inline void RealQZ<MatrixType>::splitOffTwoRows(Index i)
+    {
+      const Index dim=m_S.cols();
+      if (internal::abs(m_S.coeff(i+1,i)==Scalar(0)))
+        return;
+      Index z = findSmallDiagEntry(i,i+1);
+      if (z==i-1)
+      {
+        // block of (S T^{-1})
+        Matrix2s STi = m_T.template block<2,2>(i,i).template triangularView<Upper>().
+          template solve<OnTheRight>(m_S.template block<2,2>(i,i));
+        Scalar p = Scalar(0.5)*(STi(0,0)-STi(1,1));
+        Scalar q = p*p + STi(1,0)*STi(0,1);
+        if (q>=0) {
+          Scalar z = internal::sqrt(q);
+          // one QR-like iteration for ABi - lambda I
+          // is enough - when we know exact eigenvalue in advance,
+          // convergence is immediate
+          JRs G;
+          if (p>=0)
+            G.makeGivens(p + z, STi(1,0));
+          else
+            G.makeGivens(p - z, STi(1,0));
+          m_S.rightCols(dim-i).applyOnTheLeft(i,i+1,G.adjoint());
+          m_T.rightCols(dim-i).applyOnTheLeft(i,i+1,G.adjoint());
+          // update Q
+          if (m_computeQZ)
+            m_Q.applyOnTheRight(i,i+1,G);
+
+          G.makeGivens(m_T.coeff(i+1,i+1), m_T.coeff(i+1,i));
+          m_S.topRows(i+2).applyOnTheRight(i+1,i,G);
+          m_T.topRows(i+2).applyOnTheRight(i+1,i,G);
+          // update Z
+          if (m_computeQZ)
+            m_Z.applyOnTheLeft(i+1,i,G.adjoint());
+
+          m_S.coeffRef(i+1,i) = Scalar(0.0);
+          m_T.coeffRef(i+1,i) = Scalar(0.0);
+        }
+      }
+      else
+      {
+        pushDownZero(z,i,i+1);
+      }
+    }
+
+  /** \internal use zero in T(z,z) to zero S(l,l-1), working in block f..l */
+  template<typename MatrixType>
+    inline void RealQZ<MatrixType>::pushDownZero(Index z, Index f, Index l)
+    {
+      JRs G;
+      const Index dim = m_S.cols();
+      for (Index zz=z; zz<l; zz++)
+      {
+        // push 0 down
+        Index firstColS = zz>f ? (zz-1) : zz;
+        G.makeGivens(m_T.coeff(zz, zz+1), m_T.coeff(zz+1, zz+1));
+        m_S.rightCols(dim-firstColS).applyOnTheLeft(zz,zz+1,G.adjoint());
+        m_T.rightCols(dim-zz).applyOnTheLeft(zz,zz+1,G.adjoint());
+        m_T.coeffRef(zz+1,zz+1) = Scalar(0.0);
+        // update Q
+        if (m_computeQZ)
+          m_Q.applyOnTheRight(zz,zz+1,G);
+        // kill S(zz+1, zz-1)
+        if (zz>f)
+        {
+          G.makeGivens(m_S.coeff(zz+1, zz), m_S.coeff(zz+1,zz-1));
+          m_S.topRows(zz+2).applyOnTheRight(zz, zz-1,G);
+          m_T.topRows(zz+1).applyOnTheRight(zz, zz-1,G);
+          m_S.coeffRef(zz+1,zz-1) = Scalar(0.0);
+          // update Z
+          if (m_computeQZ)
+            m_Z.applyOnTheLeft(zz,zz-1,G.adjoint());
+        }
+      }
+      // finally kill S(l,l-1)
+      G.makeGivens(m_S.coeff(l,l), m_S.coeff(l,l-1));
+      m_S.applyOnTheRight(l,l-1,G);
+      m_T.applyOnTheRight(l,l-1,G);
+      m_S.coeffRef(l,l-1)=Scalar(0.0);
+      // update Z
+      if (m_computeQZ)
+        m_Z.applyOnTheLeft(l,l-1,G.adjoint());
+    }
+
+  /** \internal QR-like iterative step for block f..l */
+  template<typename MatrixType>
+    inline void RealQZ<MatrixType>::step(Index f, Index l, Index iter) {
+      const Index dim = m_S.cols();
+
+      // x, y, z
+      Scalar x, y, z;
+      if (iter==10)
+      {
+        // Wilkinson ad hoc shift
+        const Scalar
+          a11=m_S.coeff(f+0,f+0), a12=m_S.coeff(f+0,f+1),
+          a21=m_S.coeff(f+1,f+0), a22=m_S.coeff(f+1,f+1), a32=m_S.coeff(f+2,f+1),
+          b12=m_T.coeff(f+0,f+1),
+          b11i=Scalar(1.0)/m_T.coeff(f+0,f+0),
+          b22i=Scalar(1.0)/m_T.coeff(f+1,f+1),
+          a87=m_S.coeff(l-1,l-2),
+          a98=m_S.coeff(l-0,l-1),
+          b77i=Scalar(1.0)/m_T.coeff(l-2,l-2),
+          b88i=Scalar(1.0)/m_T.coeff(l-1,l-1);
+        Scalar ss = internal::abs(a87*b77i) + internal::abs(a98*b88i),
+               lpl = Scalar(1.5)*ss,
+               ll = ss*ss;
+        x = ll + a11*a11*b11i*b11i - lpl*a11*b11i + a12*a21*b11i*b22i
+          - a11*a21*b12*b11i*b11i*b22i;
+        y = a11*a21*b11i*b11i - lpl*a21*b11i + a21*a22*b11i*b22i 
+          - a21*a21*b12*b11i*b11i*b22i;
+        z = a21*a32*b11i*b22i;
+      }
+      else if (iter==16)
+      {
+        // another exceptional shift
+        x = m_S.coeff(f,f)/m_T.coeff(f,f)-m_S.coeff(l,l)/m_T.coeff(l,l) + m_S.coeff(l,l-1)*m_T.coeff(l-1,l) /
+          (m_T.coeff(l-1,l-1)*m_T.coeff(l,l));
+        y = m_S.coeff(f+1,f)/m_T.coeff(f,f);
+        z = 0;
+      }
+      else if (iter>23 && !(iter%8))
+      {
+        // extremely exceptional shift
+        x = internal::random<Scalar>(-1.0,1.0);
+        y = internal::random<Scalar>(-1.0,1.0);
+        z = internal::random<Scalar>(-1.0,1.0);
+      }
+      else
+      {
+        // Compute the shifts: (x,y,z,0...) = (AB^-1 - l1 I) (AB^-1 - l2 I) e1
+        // where l1 and l2 are the eigenvalues of the 2x2 matrix C = U V^-1 where
+        // U and V are 2x2 bottom right sub matrices of A and B. Thus:
+        //  = AB^-1AB^-1 + l1 l2 I - (l1+l2)(AB^-1)
+        //  = AB^-1AB^-1 + det(M) - tr(M)(AB^-1)
+        // Since we are only interested in having x, y, z with a correct ratio, we have:
+        const Scalar
+          a11 = m_S.coeff(f,f),     a12 = m_S.coeff(f,f+1),
+          a21 = m_S.coeff(f+1,f),   a22 = m_S.coeff(f+1,f+1),
+                                    a32 = m_S.coeff(f+2,f+1),
+
+          a88 = m_S.coeff(l-1,l-1), a89 = m_S.coeff(l-1,l),
+          a98 = m_S.coeff(l,l-1),   a99 = m_S.coeff(l,l),
+
+          b11 = m_T.coeff(f,f),     b12 = m_T.coeff(f,f+1),
+                                    b22 = m_T.coeff(f+1,f+1),
+
+          b88 = m_T.coeff(l-1,l-1), b89 = m_T.coeff(l-1,l),
+                                    b99 = m_T.coeff(l,l);
+
+        x = ( (a88/b88 - a11/b11)*(a99/b99 - a11/b11) - (a89/b99)*(a98/b88) + (a98/b88)*(b89/b99)*(a11/b11) ) * (b11/a21)
+          + a12/b22 - (a11/b11)*(b12/b22);
+        y = (a22/b22-a11/b11) - (a21/b11)*(b12/b22) - (a88/b88-a11/b11) - (a99/b99-a11/b11) + (a98/b88)*(b89/b99);
+        z = a32/b22;
+      }
+
+      JRs G;
+
+      for (Index k=f; k<=l-2; k++)
+      {
+        // variables for Householder reflections
+        Vector2s essential2;
+        Scalar tau, beta;
+
+        Vector3s hr(x,y,z);
+
+        // Q_k to annihilate S(k+1,k-1) and S(k+2,k-1)
+        hr.makeHouseholderInPlace(tau, beta);
+        essential2 = hr.template bottomRows<2>();
+        Index fc=(std::max)(k-1,Index(0));  // first col to update
+        m_S.template middleRows<3>(k).rightCols(dim-fc).applyHouseholderOnTheLeft(essential2, tau, m_workspace.data());
+        m_T.template middleRows<3>(k).rightCols(dim-fc).applyHouseholderOnTheLeft(essential2, tau, m_workspace.data());
+        if (m_computeQZ)
+          m_Q.template middleCols<3>(k).applyHouseholderOnTheRight(essential2, tau, m_workspace.data());
+        if (k>f)
+          m_S.coeffRef(k+2,k-1) = m_S.coeffRef(k+1,k-1) = Scalar(0.0);
+
+        // Z_{k1} to annihilate T(k+2,k+1) and T(k+2,k)
+        hr << m_T.coeff(k+2,k+2),m_T.coeff(k+2,k),m_T.coeff(k+2,k+1);
+        hr.makeHouseholderInPlace(tau, beta);
+        essential2 = hr.template bottomRows<2>();
+        {
+          Index lr = (std::min)(k+4,dim); // last row to update
+          Map<Matrix<Scalar,Dynamic,1> > tmp(m_workspace.data(),lr);
+          // S
+          tmp = m_S.template middleCols<2>(k).topRows(lr) * essential2;
+          tmp += m_S.col(k+2).head(lr);
+          m_S.col(k+2).head(lr) -= tau*tmp;
+          m_S.template middleCols<2>(k).topRows(lr) -= (tau*tmp) * essential2.adjoint();
+          // T
+          tmp = m_T.template middleCols<2>(k).topRows(lr) * essential2;
+          tmp += m_T.col(k+2).head(lr);
+          m_T.col(k+2).head(lr) -= tau*tmp;
+          m_T.template middleCols<2>(k).topRows(lr) -= (tau*tmp) * essential2.adjoint();
+        }
+        if (m_computeQZ)
+        {
+          // Z
+          Map<Matrix<Scalar,1,Dynamic> > tmp(m_workspace.data(),dim);
+          tmp = essential2.adjoint()*(m_Z.template middleRows<2>(k));
+          tmp += m_Z.row(k+2);
+          m_Z.row(k+2) -= tau*tmp;
+          m_Z.template middleRows<2>(k) -= essential2 * (tau*tmp);
+        }
+        m_T.coeffRef(k+2,k) = m_T.coeffRef(k+2,k+1) = Scalar(0.0);
+
+        // Z_{k2} to annihilate T(k+1,k)
+        G.makeGivens(m_T.coeff(k+1,k+1), m_T.coeff(k+1,k));
+        m_S.applyOnTheRight(k+1,k,G);
+        m_T.applyOnTheRight(k+1,k,G);
+        // update Z
+        if (m_computeQZ)
+          m_Z.applyOnTheLeft(k+1,k,G.adjoint());
+        m_T.coeffRef(k+1,k) = Scalar(0.0);
+
+        // update x,y,z
+        x = m_S.coeff(k+1,k);
+        y = m_S.coeff(k+2,k);
+        if (k < l-2)
+          z = m_S.coeff(k+3,k);
+      } // loop over k
+
+      // Q_{n-1} to annihilate y = S(l,l-2)
+      G.makeGivens(x,y);
+      m_S.applyOnTheLeft(l-1,l,G.adjoint());
+      m_T.applyOnTheLeft(l-1,l,G.adjoint());
+      if (m_computeQZ)
+        m_Q.applyOnTheRight(l-1,l,G);
+      m_S.coeffRef(l,l-2) = Scalar(0.0);
+
+      // Z_{n-1} to annihilate T(l,l-1)
+      G.makeGivens(m_T.coeff(l,l),m_T.coeff(l,l-1));
+      m_S.applyOnTheRight(l,l-1,G);
+      m_T.applyOnTheRight(l,l-1,G);
+      if (m_computeQZ)
+        m_Z.applyOnTheLeft(l,l-1,G.adjoint());
+      m_T.coeffRef(l,l-1) = Scalar(0.0);
+    }
+
+
+  template<typename MatrixType>
+    RealQZ<MatrixType>& RealQZ<MatrixType>::compute(const MatrixType& A_in, const MatrixType& B_in, bool computeQZ)
+    {
+
+      const Index dim = A_in.cols();
+
+      assert (A_in.rows()==dim && A_in.cols()==dim 
+          && B_in.rows()==dim && B_in.cols()==dim 
+          && "Need square matrices of the same dimension");
+
+      m_isInitialized = true;
+      m_computeQZ = computeQZ;
+      m_S = A_in; m_T = B_in;
+      m_workspace.resize(dim*2);
+      m_global_iter = 0;
+
+      // entrance point: hessenberg triangular decomposition
+      hessenbergTriangular();
+      // compute L1 vector norms of T, S into m_normOfS, m_normOfT
+      computeNorms();
+
+      Index l = dim-1, 
+            f, 
+            local_iter = 0;
+
+      while (l>0 && local_iter<m_maxIters)
+      {
+        f = findSmallSubdiagEntry(l);
+        // now rows and columns f..l (including) decouple from the rest of the problem
+        if (f>0) m_S.coeffRef(f,f-1) = Scalar(0.0);
+        if (f == l) // One root found
+        {
+          l--;
+          local_iter = 0;
+        }
+        else if (f == l-1) // Two roots found
+        {
+          splitOffTwoRows(f);
+          l -= 2;
+          local_iter = 0;
+        }
+        else // No convergence yet
+        {
+          // if there's zero on diagonal of T, we can isolate an eigenvalue with Givens rotations
+          Index z = findSmallDiagEntry(f,l);
+          if (z>=f)
+          {
+            // zero found
+            pushDownZero(z,f,l);
+          }
+          else
+          {
+            // We are sure now that S.block(f,f, l-f+1,l-f+1) is underuced upper-Hessenberg 
+            // and T.block(f,f, l-f+1,l-f+1) is invertible uper-triangular, which allows to
+            // apply a QR-like iteration to rows and columns f..l.
+            step(f,l, local_iter);
+            local_iter++;
+            m_global_iter++;
+          }
+        }
+      }
+      // check if we converged before reaching iterations limit
+      m_info = (local_iter<m_maxIters) ? Success : NoConvergence;
+      return *this;
+    } // end compute
+
+} // end namespace Eigen
+
+#endif //EIGEN_REAL_QZ
diff --git a/src/misc/const_templates.h b/src/misc/const_templates.h
new file mode 100644
index 000000000..a7877c6df
--- /dev/null
+++ b/src/misc/const_templates.h
@@ -0,0 +1,82 @@
+/*
+ * const_templates.h
+ *
+ *  Created on: 11.10.2012
+ *      Author: Thomas Heinemann
+ */
+
+#ifndef CONST_TEMPLATES_H_
+#define CONST_TEMPLATES_H_
+
+namespace mrmc {
+
+namespace misc {
+
+/*!
+ * Returns a constant value of 0 that is fit to the type it is being written to.
+ * As (at least) gcc has problems to use the correct template by the return value
+ * only, the function gets a pointer as a parameter to infer the return type.
+ *
+ * <b>Parameter</b>
+ *
+ * The parameter is a pointer which is used to infer the return type (So, if you want
+ * the return value to be of type double, the parameter has to be of type double*).
+ * In most cases, it is a good choice to use the address of the variable that is to be
+ * set.
+ */
+template<typename _Scalar>
+static inline _Scalar constGetZero(_Scalar*) {
+   return _Scalar(0);
+}
+
+/*! @cond TEMPLATE_SPECIALIZATION
+ * (exclude the specializations from the documentation) */
+template <>
+inline int_fast32_t constGetZero(int_fast32_t*) {
+   return 0;
+}
+
+/*! @internal
+ * Specialization of constGetZero for int_fast32_t
+ */
+template <>
+inline double constGetZero(double*) {
+   return 0.0;
+}
+/*! @endcond */
+
+/*!
+ * Returns a constant value of 0 that is fit to the type it is being written to.
+ * As (at least) gcc has problems to use the correct template by the return value
+ * only, the function gets a pointer as a parameter to infer the return type.
+ *
+ * <b>Parameter</b>
+ *
+ * The parameter is a pointer which is used to infer the return type (So, if you want
+ * the return value to be of type double, the parameter has to be of type double*).
+ * In most cases, it is a good choice to use the address of the variable that is to be
+ * set.
+ */
+template<typename _Scalar>
+static inline _Scalar constGetOne(_Scalar*) {
+   return _Scalar(1);
+}
+
+/*! @cond TEMPLATE_SPECIALIZATION
+ * (exclude the specializations from the documentation) */
+template<>
+inline int_fast32_t constGetOne(int_fast32_t*) {
+   return 1;
+}
+
+template<>
+inline double constGetOne(double*) {
+   return 1.0;
+}
+/*! @endcond */
+
+} //namespace misc
+} //namespace mrmc
+
+
+#endif /* CONST_TEMPLATES_H_ */
diff --git a/src/sparse/static_sparse_matrix.h b/src/sparse/static_sparse_matrix.h
index bc5abcb99..e919ccde0 100644
--- a/src/sparse/static_sparse_matrix.h
+++ b/src/sparse/static_sparse_matrix.h
@@ -11,6 +11,9 @@
 #include "src/exceptions/invalid_state.h"
 #include "src/exceptions/invalid_argument.h"
 #include "src/exceptions/out_of_range.h"
+#include "src/exceptions/file_IO_exception.h"
+
+#include "src/misc/const_templates.h"
 
 #include "Eigen/Sparse"
 
@@ -430,15 +433,58 @@ class StaticSparseMatrix {
 		uint_fast64_t row_start = row_indications[state - 1];
 		uint_fast64_t row_end = row_indications[state];
 
+
 		while (row_start < row_end) {
-			value_storage[row_start] = getConstZero();
+			value_storage[row_start] = mrmc::misc::constGetZero(value_storage);
 			row_start++;
 		}
 
-		diagonal_storage[state] = getConstOne();
+		diagonal_storage[state] = mrmc::misc::constGetOne(diagonal_storage);
 		return true;
 	}
 
+   /*!
+       Creates a DOT file which provides the graph of the transition structure
+       represented by the matrix.
+
+       @param  filename The Name of the file to write in. If the file already exists,
+                        it will be overwritten.
+
+    */
+   void toDOTFile(const char* filename) {
+      FILE *P;
+
+      P = fopen(filename, "w");
+
+      if (P == NULL) {
+         pantheios::log_ERROR("File could not be opened.");
+         throw mrmc::exceptions::file_IO_exception();
+      }
+
+      fprintf(P, "digraph dtmc {\n");
+
+      uint_fast64_t row = 0;
+
+      for (uint_fast64_t i = 0; i < non_zero_entry_count; i++ ) {
+         //Check whether we have to switch to the new row
+         while (row_indications[row] <= i) {
+            ++row;
+            //write diagonal entry/self loop first
+            if (diagonal_storage[row] != 0) {
+               fprintf(P, "\t%Lu -> %Lu [label=%f]\n",
+                     row, row, diagonal_storage[row]);
+            }
+         }
+         fprintf(P, "\t%Lu -> %Lu [label=%f]\n",
+               row, column_indications[i], value_storage[i]);
+      }
+
+      fprintf(P, "}\n");
+
+      fclose(P);
+   }
+
+
  private:
 	uint_fast64_t current_size;
 
@@ -492,35 +538,6 @@ class StaticSparseMatrix {
 
 	//! 
 
-	template<typename _Scalar>
-	_Scalar constGetZero() const {
-		return _Scalar(0);
-	}
-
-	template <>
-	int_fast32_t constGetZero() const {
-		return 0;	
-	}
-
-	template <>
-	double constGetZero() const {
-		return 0.0;	
-	}
-
-	template<typename _Scalar>
-	_Scalar constGetOne() const {
-		return _Scalar(1);
-	}
-
-	template<>
-	int_fast32_t constGetOne() const {
-		return 1;
-	}
-
-	template<>
-	double constGetOne() const {
-		return 1.0;
-	}
 
 	template <typename _Scalar, typename _Index>
 	bool isEigenRowMajor(Eigen::SparseMatrix<_Scalar, Eigen::RowMajor, _Index>) {