From 1929cfaf7793ddae2c4d700e10f508812acb1ea9 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Wed, 29 Jul 2020 16:35:12 +0200
Subject: [PATCH 01/48] utility/vector: Added a few asserts in utility
 functions.

---
 src/storm/utility/vector.h | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/src/storm/utility/vector.h b/src/storm/utility/vector.h
index af6098038..b70453aaa 100644
--- a/src/storm/utility/vector.h
+++ b/src/storm/utility/vector.h
@@ -63,6 +63,7 @@ namespace storm {
              */
             template<class T>
             void setVectorValues(std::vector<T>& vector, storm::storage::BitVector const& positions, std::vector<T> const& values) {
+                STORM_LOG_ASSERT(positions.getNumberOfSetBits() <= values.size(), "The number of selected positions (" << positions.getNumberOfSetBits() << ") exceeds the size of the input vector (" << values.size() << ").");
                 uint_fast64_t oldPosition = 0;
                 for (auto position : positions) {
                     vector[position] = values[oldPosition++];
@@ -156,6 +157,8 @@ namespace storm {
              */
             template<class T>
             void selectVectorValues(std::vector<T>& vector, storm::storage::BitVector const& positions, std::vector<T> const& values) {
+                STORM_LOG_ASSERT(positions.getNumberOfSetBits() <= vector.size(), "The number of selected positions (" << positions.getNumberOfSetBits() << ") exceeds the size of the target vector (" << vector.size() << ").");
+                STORM_LOG_ASSERT(positions.size() == values.size(), "Size mismatch of the positions vector (" << positions.size() << ") and the values vector (" << values.size() << ").");
                 auto targetIt = vector.begin();
                 for (auto position : positions) {
                     *targetIt = values[position];
@@ -207,6 +210,8 @@ namespace storm {
              */
             template<class T>
             void selectVectorValues(std::vector<T>& vector, std::vector<uint_fast64_t> const& indexSequence, std::vector<T> const& values) {
+                STORM_LOG_ASSERT(indexSequence.size() <= vector.size(), "The number of selected positions (" << indexSequence.size() << ") exceeds the size of the target vector (" << vector.size() << ").");
+                
                 for (uint_fast64_t vectorIndex = 0; vectorIndex < vector.size(); ++vectorIndex){
                     vector[vectorIndex] = values[indexSequence[vectorIndex]];
                 }

From d49210ac2e0df0755f9d51b3c845ebe9243c6b93 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Wed, 29 Jul 2020 16:51:35 +0200
Subject: [PATCH 02/48] Added DdType::None to encode that an explicit
 representation should be used.

---
 src/storm/storage/dd/DdType.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/src/storm/storage/dd/DdType.h b/src/storm/storage/dd/DdType.h
index d23933eed..712645b72 100644
--- a/src/storm/storage/dd/DdType.h
+++ b/src/storm/storage/dd/DdType.h
@@ -4,6 +4,7 @@
 namespace storm {
     namespace dd {
         enum class DdType {
+            None,
             CUDD,
             Sylvan
         };

From 486d62ff2c8670fa32acb46e570a4623630f6d5a Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Wed, 29 Jul 2020 16:54:45 +0200
Subject: [PATCH 03/48] First version of newly structured ModelCheckerHelpers
 (only MDP LRA properties, for now)

---
 .../helper/ModelCheckerHelper.cpp             |  47 ++
 .../modelchecker/helper/ModelCheckerHelper.h  |  72 +++
 .../helper/SingleValueModelCheckerHelper.cpp  |  83 +++
 .../helper/SingleValueModelCheckerHelper.h    | 100 +++
 ...eNondeterministicInfiniteHorizonHelper.cpp | 570 ++++++++++++++++++
 ...rseNondeterministicInfiniteHorizonHelper.h | 117 ++++
 6 files changed, 989 insertions(+)
 create mode 100644 src/storm/modelchecker/helper/ModelCheckerHelper.cpp
 create mode 100644 src/storm/modelchecker/helper/ModelCheckerHelper.h
 create mode 100644 src/storm/modelchecker/helper/SingleValueModelCheckerHelper.cpp
 create mode 100644 src/storm/modelchecker/helper/SingleValueModelCheckerHelper.h
 create mode 100644 src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
 create mode 100644 src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h

diff --git a/src/storm/modelchecker/helper/ModelCheckerHelper.cpp b/src/storm/modelchecker/helper/ModelCheckerHelper.cpp
new file mode 100644
index 000000000..f2c7e12f4
--- /dev/null
+++ b/src/storm/modelchecker/helper/ModelCheckerHelper.cpp
@@ -0,0 +1,47 @@
+#include "ModelCheckerHelper.h"
+
+#include "storm/utility/macros.h"
+
+namespace storm {
+    namespace modelchecker {
+        namespace helper {
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            void ModelCheckerHelper<ValueType, DdType>::setRelevantStates(StateSet const& relevantStates) {
+                _relevantStates = relevantStates;
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            void ModelCheckerHelper<ValueType, DdType>::clearRelevantStates() {
+                _relevantStates = boost::none;
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            bool ModelCheckerHelper<ValueType, DdType>::hasRelevantStates() const {
+                return _relevantStates.is_initialized();
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            boost::optional<typename ModelCheckerHelper<ValueType, DdType>::StateSet> const& ModelCheckerHelper<ValueType, DdType>::getOptionalRelevantStates() const {
+                return _relevantStates;
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            typename ModelCheckerHelper<ValueType, DdType>::StateSet const& ModelCheckerHelper<ValueType, DdType>::getRelevantStates() const {
+                STORM_LOG_ASSERT(hasRelevantStates(), "Retrieving relevant states although none have been set.");
+                return _relevantStates.get();
+            }
+            
+            template class ModelCheckerHelper<double, storm::dd::DdType::None>;
+            template class ModelCheckerHelper<storm::RationalNumber, storm::dd::DdType::None>;
+            template class ModelCheckerHelper<storm::RationalFunction, storm::dd::DdType::None>;
+            
+            template class ModelCheckerHelper<double, storm::dd::DdType::Sylvan>;
+            template class ModelCheckerHelper<storm::RationalNumber, storm::dd::DdType::Sylvan>;
+            template class ModelCheckerHelper<storm::RationalFunction, storm::dd::DdType::Sylvan>;
+            
+            template class ModelCheckerHelper<double, storm::dd::DdType::CUDD>;
+            
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/storm/modelchecker/helper/ModelCheckerHelper.h b/src/storm/modelchecker/helper/ModelCheckerHelper.h
new file mode 100644
index 000000000..05169ae17
--- /dev/null
+++ b/src/storm/modelchecker/helper/ModelCheckerHelper.h
@@ -0,0 +1,72 @@
+#pragma once
+
+#include <type_traits>
+#include <boost/optional.hpp>
+
+#include "storm/storage/dd/DdType.h"
+#include "storm/storage/dd/Bdd.h"
+
+#include "storm/storage/BitVector.h"
+
+namespace storm {
+    namespace modelchecker {
+        namespace helper {
+            
+            /*!
+             * Helper class for solving a model checking query.
+             * @tparam ValueType The type of a single value.
+             * @tparam DdType The used library for Dds (or None in case of a sparse representation).
+             */
+            template <typename ValueType, storm::dd::DdType DdType = storm::dd::DdType::None>
+            class ModelCheckerHelper {
+            public:
+                ModelCheckerHelper() = default;
+                ~ModelCheckerHelper() = default;
+                
+                /*!
+                 * Identifies a subset of the model states
+                 */
+                using StateSet = typename std::conditional<DdType == storm::dd::DdType::None, storm::storage::BitVector, storm::dd::Bdd<DdType>>::type;
+                
+                /*!
+                 * Sets relevant states.
+                 * If relevant states are set, it is assumed that the model checking result is only relevant for the given states.
+                 * In this case, an arbitrary result can be set to non-relevant states.
+                 */
+                void setRelevantStates(StateSet const& relevantStates);
+                
+                /*!
+                 * Clears the relevant states.
+                 * If no relevant states are set, it is assumed that a result is required for all (initial- and non-initial) states.
+                 */
+                void clearRelevantStates();
+                
+                /*!
+                 * @return true if there are relevant states set.
+                 * If relevant states are set, it is assumed that the model checking result is only relevant for the given states.
+                 * In this case, an arbitrary result can be set to non-relevant states.
+                 */
+                bool hasRelevantStates() const;
+                
+                /*!
+                 * @return relevant states (if there are any) or boost::none (otherwise).
+                 * If relevant states are set, it is assumed that the model checking result is only relevant for the given states.
+                 * In this case, an arbitrary result can be set to non-relevant states.
+                 */
+                boost::optional<StateSet> const& getOptionalRelevantStates() const;
+                
+                /*!
+                 * @pre Relevant states have to be set before calling this.
+                 * @return the relevant states. Should only be called if there are any.
+                 * If relevant states are set, it is assumed that the model checking result is only relevant for the given states.
+                 * In this case, an arbitrary result can be set to non-relevant states.
+                 *
+                 */
+                StateSet const& getRelevantStates() const;
+
+            private:
+                boost::optional<StateSet> _relevantStates;
+            };
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.cpp b/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.cpp
new file mode 100644
index 000000000..845543f26
--- /dev/null
+++ b/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.cpp
@@ -0,0 +1,83 @@
+#include "SingleValueModelCheckerHelper.h"
+
+
+namespace storm {
+    namespace modelchecker {
+        namespace helper {
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            void SingleValueModelCheckerHelper<ValueType, DdType>::setOptimizationDirection(storm::solver::OptimizationDirection const& direction) {
+                _optimizationDirection = direction;
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            void SingleValueModelCheckerHelper<ValueType, DdType>::clearOptimizationDirection() {
+                _optimizationDirection = boost::none;
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            bool SingleValueModelCheckerHelper<ValueType, DdType>::isOptimizationDirectionSet() const {
+                return _optimizationDirection.is_initialized();
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            storm::solver::OptimizationDirection SingleValueModelCheckerHelper<ValueType, DdType>::getOptimizationDirection() const {
+                STORM_LOG_ASSERT(isOptimizationDirectionSet(), "Requested optimization direction but none was set.");
+                return _optimizationDirection.get();
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            bool SingleValueModelCheckerHelper<ValueType, DdType>::minimize() const {
+                return storm::solver::minimize(getOptimizationDirection());
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            bool SingleValueModelCheckerHelper<ValueType, DdType>::maximize() const {
+                return storm::solver::maximize(getOptimizationDirection());
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            boost::optional<storm::solver::OptimizationDirection> SingleValueModelCheckerHelper<ValueType, DdType>::getOptionalOptimizationDirection() const {
+                return _optimizationDirection;
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            void SingleValueModelCheckerHelper<ValueType, DdType>::setValueThreshold(storm::logic::ComparisonType const& comparisonType, ValueType const& threshold) {
+                _valueThreshold = std::make_pair(comparisonType, threshold);
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            void SingleValueModelCheckerHelper<ValueType, DdType>::clearValueThreshold() {
+                _valueThreshold = boost::none;
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            bool SingleValueModelCheckerHelper<ValueType, DdType>::isValueThresholdSet() const {
+                return _valueThreshold.is_initialized();
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            storm::logic::ComparisonType const& SingleValueModelCheckerHelper<ValueType, DdType>::getValueThresholdComparisonType() const {
+                STORM_LOG_ASSERT(isValueThresholdSet(), "Value Threshold comparison type was requested but not set before.");
+                return _valueThreshold->first;
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            ValueType const& SingleValueModelCheckerHelper<ValueType, DdType>::getValueThresholdValue() const {
+                STORM_LOG_ASSERT(isValueThresholdSet(), "Value Threshold comparison type was requested but not set before.");
+                return _valueThreshold->second;
+            }
+ 
+            template class SingleValueModelCheckerHelper<double, storm::dd::DdType::None>;
+            template class SingleValueModelCheckerHelper<storm::RationalNumber, storm::dd::DdType::None>;
+            template class SingleValueModelCheckerHelper<storm::RationalFunction, storm::dd::DdType::None>;
+            
+            template class SingleValueModelCheckerHelper<double, storm::dd::DdType::Sylvan>;
+            template class SingleValueModelCheckerHelper<storm::RationalNumber, storm::dd::DdType::Sylvan>;
+            template class SingleValueModelCheckerHelper<storm::RationalFunction, storm::dd::DdType::Sylvan>;
+            
+            template class SingleValueModelCheckerHelper<double, storm::dd::DdType::CUDD>;
+            
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.h b/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.h
new file mode 100644
index 000000000..67bb2b7df
--- /dev/null
+++ b/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.h
@@ -0,0 +1,100 @@
+#pragma once
+
+#include "ModelCheckerHelper.h"
+
+#include "storm/solver/OptimizationDirection.h"
+#include "storm/logic/ComparisonType.h"
+
+namespace storm {
+    namespace modelchecker {
+        namespace helper {
+            
+            /*!
+             * Helper for model checking queries where we are interested in (optimizing) a single value per state.
+             * @tparam ValueType The type of a value
+             * @tparam DdType The used library for Dds (or None in case of a sparse representation)
+             */
+            template <typename ValueType, storm::dd::DdType DdType = storm::dd::DdType::None>
+            class SingleValueModelCheckerHelper : public ModelCheckerHelper<ValueType, DdType> {
+            public:
+                SingleValueModelCheckerHelper() = default;
+                ~SingleValueModelCheckerHelper() = default;
+                
+                /*!
+                 * Sets the optimization direction, i.e., whether we want to minimize or maximize the value for each state
+                 * Has no effect for models without nondeterminism.
+                 * Has to be set if there is nondeterminism in the model.
+                 */
+                void setOptimizationDirection(storm::solver::OptimizationDirection const& direction);
+                
+                /*!
+                 * Clears the optimization direction if it was set before.
+                 */
+                void clearOptimizationDirection();
+                
+                /*!
+                 * @return true if there is an optimization direction set
+                 */
+                bool isOptimizationDirectionSet() const;
+                
+                /*!
+                 * @pre an optimization direction has to be set before calling this.
+                 * @return the optimization direction.
+                 */
+                storm::solver::OptimizationDirection getOptimizationDirection() const;
+                
+                /*!
+                 * @pre an optimization direction has to be set before calling this.
+                 * @return true iff the optimization goal is to minimize the value for each state
+                 */
+                bool minimize() const;
+                
+                /*!
+                 * @pre an optimization direction has to be set before calling this.
+                 * @return true iff the optimization goal is to maximize the value for each state
+                 */
+                bool maximize() const;
+                
+                /*!
+                 * @return The optimization direction (if it was set)
+                 */
+                boost::optional<storm::solver::OptimizationDirection> getOptionalOptimizationDirection() const;
+                
+                /*!
+                 * Sets a goal threshold for the value at each state. If such a threshold is set, it is assumed that we are only interested
+                 * in the satisfaction of the threshold. Setting this allows the helper to compute values only up to the precision
+                 * where satisfaction of the threshold can be decided.
+                 * @param comparisonType The relation used when comparing computed values (left hand side) with the given threshold value (right hand side).
+                 * @param thresholdValue The value used on the right hand side of the comparison relation.
+                 */
+                void setValueThreshold(storm::logic::ComparisonType const& comparisonType, ValueType const& thresholdValue);
+                
+                /*!
+                 * Clears the valueThreshold if it was set before.
+                 */
+                void clearValueThreshold();
+                
+                /*!
+                 * @return true, if a value threshold has been set.
+                 */
+                bool isValueThresholdSet() const;
+                
+                /*!
+                 * @pre A value threshold has to be set before calling this.
+                 * @return The relation used when comparing computed values (left hand side) with the specified threshold value (right hand side).
+                 */
+                storm::logic::ComparisonType const& getValueThresholdComparisonType() const;
+                
+                /*!
+                 * @pre A value threshold has to be set before calling this.
+                 * @return The value used on the right hand side of the comparison relation.
+                 */
+                ValueType const& getValueThresholdValue() const;
+                
+            private:
+                boost::optional<storm::solver::OptimizationDirection> _optimizationDirection;
+                boost::optional<std::pair<storm::logic::ComparisonType, ValueType>> _valueThreshold;
+            };
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
new file mode 100644
index 000000000..40e8d2dcd
--- /dev/null
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
@@ -0,0 +1,570 @@
+#include "SparseNondeterministicInfiniteHorizonHelper.h"
+
+#include "storm/solver/MinMaxLinearEquationSolver.h"
+#include "storm/solver/Multiplier.h"
+#include "storm/solver/LpSolver.h"
+
+#include "storm/utility/graph.h"
+#include "storm/utility/SignalHandler.h"
+#include "storm/utility/solver.h"
+#include "storm/utility/vector.h"
+
+#include "storm/environment/solver/LongRunAverageSolverEnvironment.h"
+#include "storm/environment/solver/MinMaxSolverEnvironment.h"
+
+#include "storm/exceptions/NotImplementedException.h"
+#include "storm/exceptions/UnmetRequirementException.h"
+
+namespace storm {
+    namespace modelchecker {
+        namespace helper {
+        
+            template <typename ValueType>
+            SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions) : _transitionMatrix(transitionMatrix), _backwardTransitions(backwardTransitions), _markovianStates(nullptr), _exitRates(nullptr), _produceScheduler(false) {
+                // Intentionally left empty.
+            }
+            
+            template <typename ValueType>
+            SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates) : _transitionMatrix(transitionMatrix), _backwardTransitions(backwardTransitions), _markovianStates(&markovianStates), _exitRates(&exitRates) {
+                // Intentionally left empty.
+            }
+            
+            template <typename ValueType>
+            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLongRunAverageProbabilities(Environment const& env, storm::storage::BitVector const& psiStates) {
+                return computeLongRunAverageValues(env, [&psiStates] (uint64_t stateIndex, uint64_t) { return psiStates.get(stateIndex) ? storm::utility::one<ValueType>() : storm::utility::zero<ValueType>();});
+            }
+            
+            
+            template <typename ValueType>
+            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLongRunAverageRewards(Environment const& env, storm::models::sparse::StandardRewardModel<ValueType> const& rewardModel) {
+                if (_markovianStates) {
+                    return computeLongRunAverageValues(env, [&] (uint64_t stateIndex, uint64_t globalChoiceIndex) {
+                        if (rewardModel.hasStateRewards() && _markovianStates->get(stateIndex)) {
+                            return rewardModel.getTotalStateActionReward(stateIndex, globalChoiceIndex, _transitionMatrix, (ValueType) (storm::utility::one<ValueType>() / (*_exitRates)[stateIndex]));
+                        } else {
+                            return rewardModel.getTotalStateActionReward(stateIndex, globalChoiceIndex, _transitionMatrix, storm::utility::zero<ValueType>());
+                        }
+                    });
+                } else {
+                    return computeLongRunAverageValues(env, [&] (uint64_t stateIndex, uint64_t globalChoiceIndex) {
+                        return rewardModel.getTotalStateActionReward(stateIndex, globalChoiceIndex, _transitionMatrix);
+                    });
+                }
+            }
+            
+            template <typename ValueType>
+            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const& combinedStateActionRewards) {
+                return computeLongRunAverageValues(env, [&combinedStateActionRewards] (uint64_t, uint64_t globalChoiceIndex) {
+                    return combinedStateActionRewards[globalChoiceIndex];
+                });
+            }
+            
+            template <typename ValueType>
+            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLongRunAverageValues(Environment const& env, std::function<ValueType(uint64_t stateIndex, uint64_t globalChoiceIndex)> const& combinedStateActionRewardsGetter) {
+                
+                 // Prepare an environment for the underlying solvers
+                auto underlyingSolverEnvironment = env;
+                if (env.solver().isForceSoundness()) {
+                    // For sound computations, the error in the MECS plus the error in the remaining system should not exceed the user defined precsion.
+                    underlyingSolverEnvironment.solver().minMax().setPrecision(env.solver().lra().getPrecision() / storm::utility::convertNumber<storm::RationalNumber>(2));
+                    underlyingSolverEnvironment.solver().minMax().setRelativeTerminationCriterion(env.solver().lra().getRelativeTerminationCriterion());
+                    underlyingSolverEnvironment.solver().lra().setPrecision(env.solver().lra().getPrecision() / storm::utility::convertNumber<storm::RationalNumber>(2));
+                }
+                
+                // If requested, allocate memory for the choices made
+                if (isProduceSchedulerSet()) {
+                    if (!_producedOptimalChoices.is_initialized()) {
+                        _producedOptimalChoices.emplace();
+                    }
+                    _producedOptimalChoices->resize(_transitionMatrix.getRowGroupCount());
+                }
+                
+                // Start by decomposing the Model into its MECs.
+                storm::storage::MaximalEndComponentDecomposition<ValueType> mecDecomposition(_transitionMatrix, _backwardTransitions);
+
+                // Compute the long-run average for all end components in isolation.
+                std::vector<ValueType> mecLraValues;
+                mecLraValues.reserve(mecDecomposition.size());
+                for (auto const& mec : mecDecomposition) {
+                    mecLraValues.push_back(computeLraForMec(underlyingSolverEnvironment, combinedStateActionRewardsGetter, mec));
+                }
+                
+                // Solve the resulting SSP where end components are collapsed into single auxiliary states
+                return buildAndSolveSsp(underlyingSolverEnvironment, mecDecomposition, mecLraValues);
+            }
+            
+            
+            template <typename ValueType>
+            void SparseNondeterministicInfiniteHorizonHelper<ValueType>::setProduceScheduler(bool value) {
+                _produceScheduler = value;
+            }
+            
+            template <typename ValueType>
+            bool SparseNondeterministicInfiniteHorizonHelper<ValueType>::isProduceSchedulerSet() const {
+                return _produceScheduler;
+            }
+            
+            template <typename ValueType>
+            std::vector<uint64_t> const& SparseNondeterministicInfiniteHorizonHelper<ValueType>::getProducedOptimalChoices() const {
+                STORM_LOG_ASSERT(isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
+                STORM_LOG_ASSERT(_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
+                return _producedOptimalChoices.get();
+            }
+            
+            template <typename ValueType>
+            std::vector<uint64_t>& SparseNondeterministicInfiniteHorizonHelper<ValueType>::getProducedOptimalChoices() {
+                STORM_LOG_ASSERT(isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
+                STORM_LOG_ASSERT(_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
+                return _producedOptimalChoices.get();
+            }
+            
+            template <typename ValueType>
+            storm::storage::Scheduler<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::extractScheduler() const {
+                auto const& optimalChoices = getProducedOptimalChoices();
+                storm::storage::Scheduler<ValueType> scheduler(optimalChoices.size());
+                for (uint64_t state = 0; state < optimalChoices.size(); ++state) {
+                        scheduler.setChoice(optimalChoices[state], state);
+                }
+                return scheduler;
+            }
+            
+            template <typename ValueType>
+            ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMec(Environment const& env, std::function<ValueType(uint64_t stateIndex, uint64_t globalChoiceIndex)> const& combinedStateActionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
+                
+                // FIXME: MA
+                // If the mec only consists of a single state, we compute the LRA value directly
+                if (++mec.begin() == mec.end()) {
+                    uint64_t state = mec.begin()->first;
+                    auto choiceIt = mec.begin()->second.begin();
+                    ValueType result = combinedStateActionRewardsGetter(state, *choiceIt);
+                    uint64_t bestChoice = *choiceIt;
+                    for (++choiceIt; choiceIt != mec.begin()->second.end(); ++choiceIt) {
+                        ValueType choiceValue = combinedStateActionRewardsGetter(state, *choiceIt);
+                        if (this->minimize()) {
+                            if (result > choiceValue) {
+                                result = std::move(choiceValue);
+                                bestChoice = *choiceIt;
+                            }
+                        } else {
+                             if (result < choiceValue) {
+                                    result = std::move(choiceValue);
+                                    bestChoice = *choiceIt;
+                             }
+                        }
+                    }
+                    if (isProduceSchedulerSet()) {
+                        _producedOptimalChoices.get()[state] = bestChoice - _transitionMatrix.getRowGroupIndices()[state];
+                    }
+                    return result;
+                }
+                
+                // Solve MEC with the method specified in the settings
+                storm::solver::LraMethod method = env.solver().lra().getNondetLraMethod();
+                if ((storm::NumberTraits<ValueType>::IsExact || env.solver().isForceExact()) && env.solver().lra().isNondetLraMethodSetFromDefault() && method != storm::solver::LraMethod::LinearProgramming) {
+                    STORM_LOG_INFO("Selecting 'LP' as the solution technique for long-run properties to guarantee exact results. If you want to override this, please explicitly specify a different LRA method.");
+                    method = storm::solver::LraMethod::LinearProgramming;
+                } else if (env.solver().isForceSoundness() && env.solver().lra().isNondetLraMethodSetFromDefault() && method != storm::solver::LraMethod::ValueIteration) {
+                    STORM_LOG_INFO("Selecting 'VI' as the solution technique for long-run properties to guarantee sound results. If you want to override this, please explicitly specify a different LRA method.");
+                    method = storm::solver::LraMethod::ValueIteration;
+                }
+                STORM_LOG_ERROR_COND(!isProduceSchedulerSet() || method == storm::solver::LraMethod::ValueIteration, "Scheduler generation not supported for the chosen LRA method. Try value-iteration.");
+                if (method == storm::solver::LraMethod::LinearProgramming) {
+                    return computeLraForMecLp(env, combinedStateActionRewardsGetter, mec);
+                } else if (method == storm::solver::LraMethod::ValueIteration) {
+                    return computeLraForMecVi(env, combinedStateActionRewardsGetter, mec);
+                } else {
+                    STORM_LOG_THROW(false, storm::exceptions::InvalidSettingsException, "Unsupported technique.");
+                }
+            }
+            
+            template <typename ValueType>
+            ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMecVi(Environment const& env, std::function<ValueType(uint64_t stateIndex, uint64_t globalChoiceIndex)> const& combinedStateActionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
+                // Initialize data about the mec
+                storm::storage::BitVector mecStates(_transitionMatrix.getRowGroupCount(), false);
+                storm::storage::BitVector mecChoices(_transitionMatrix.getRowCount(), false);
+                for (auto const& stateChoicesPair : mec) {
+                    mecStates.set(stateChoicesPair.first);
+                    for (auto const& choice : stateChoicesPair.second) {
+                        mecChoices.set(choice);
+                    }
+                }
+                
+                boost::container::flat_map<uint64_t, uint64_t> toSubModelStateMapping;
+                uint64_t currState = 0;
+                toSubModelStateMapping.reserve(mecStates.getNumberOfSetBits());
+                for (auto const& mecState : mecStates) {
+                    toSubModelStateMapping.insert(std::pair<uint64_t, uint64_t>(mecState, currState));
+                    ++currState;
+                }
+                
+                // Get a transition matrix that only considers the states and choices within the MEC
+                storm::storage::SparseMatrixBuilder<ValueType> mecTransitionBuilder(mecChoices.getNumberOfSetBits(), mecStates.getNumberOfSetBits(), 0, true, true, mecStates.getNumberOfSetBits());
+                std::vector<ValueType> choiceValues;
+                choiceValues.reserve(mecChoices.getNumberOfSetBits());
+                uint64_t currRow = 0;
+                ValueType selfLoopProb = storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor());
+                ValueType scalingFactor = storm::utility::one<ValueType>() - selfLoopProb;
+                for (auto const& mecState : mecStates) {
+                    mecTransitionBuilder.newRowGroup(currRow);
+                    uint64_t groupStart = _transitionMatrix.getRowGroupIndices()[mecState];
+                    uint64_t groupEnd = _transitionMatrix.getRowGroupIndices()[mecState + 1];
+                    for (uint64_t choice = mecChoices.getNextSetIndex(groupStart); choice < groupEnd; choice = mecChoices.getNextSetIndex(choice + 1)) {
+                        bool insertedDiagElement = false;
+                        for (auto const& entry : _transitionMatrix.getRow(choice)) {
+                            uint64_t column = toSubModelStateMapping[entry.getColumn()];
+                            if (!insertedDiagElement && entry.getColumn() > mecState) {
+                                mecTransitionBuilder.addNextValue(currRow, toSubModelStateMapping[mecState], selfLoopProb);
+                                insertedDiagElement = true;
+                            }
+                            if (!insertedDiagElement && entry.getColumn() == mecState) {
+                                mecTransitionBuilder.addNextValue(currRow, column, selfLoopProb + scalingFactor * entry.getValue());
+                                insertedDiagElement = true;
+                            } else {
+                                mecTransitionBuilder.addNextValue(currRow, column,  scalingFactor * entry.getValue());
+                            }
+                        }
+                        if (!insertedDiagElement) {
+                            mecTransitionBuilder.addNextValue(currRow, toSubModelStateMapping[mecState], selfLoopProb);
+                        }
+                        
+                        // Compute the rewards obtained for this choice
+                        choiceValues.push_back(scalingFactor * combinedStateActionRewardsGetter(mecState, choice));
+                        
+                        ++currRow;
+                    }
+                }
+                auto mecTransitions = mecTransitionBuilder.build();
+                STORM_LOG_ASSERT(mecTransitions.isProbabilistic(), "The MEC-Matrix is not probabilistic.");
+                
+                // start the iterations
+                ValueType precision = storm::utility::convertNumber<ValueType>(env.solver().lra().getPrecision()) / scalingFactor;
+                bool relative = env.solver().lra().getRelativeTerminationCriterion();
+                std::vector<ValueType> x(mecTransitions.getRowGroupCount(), storm::utility::zero<ValueType>());
+                std::vector<ValueType> xPrime = x;
+                auto dir = this->getOptimizationDirection();
+                
+                auto multiplier = storm::solver::MultiplierFactory<ValueType>().create(env, mecTransitions);
+                ValueType maxDiff, minDiff;
+                
+                uint64_t iter = 0;
+                boost::optional<uint64_t> maxIter;
+                if (env.solver().lra().isMaximalIterationCountSet()) {
+                    maxIter = env.solver().lra().getMaximalIterationCount();
+                }
+                while (!maxIter.is_initialized() || iter < maxIter.get()) {
+                    ++iter;
+                    // Compute the obtained values for the next step
+                    multiplier->multiplyAndReduce(env, dir, x, &choiceValues, x);
+                    
+                    // update xPrime and check for convergence
+                    // to avoid large (and numerically unstable) x-values, we substract a reference value.
+                    auto xIt = x.begin();
+                    auto xPrimeIt = xPrime.begin();
+                    ValueType refVal = *xIt;
+                    maxDiff = *xIt - *xPrimeIt;
+                    minDiff = maxDiff;
+                    *xIt -= refVal;
+                    *xPrimeIt = *xIt;
+                    for (++xIt, ++xPrimeIt; xIt != x.end(); ++xIt, ++xPrimeIt) {
+                        ValueType diff = *xIt - *xPrimeIt;
+                        maxDiff = std::max(maxDiff, diff);
+                        minDiff = std::min(minDiff, diff);
+                        *xIt -= refVal;
+                        *xPrimeIt = *xIt;
+                    }
+
+                    if ((maxDiff - minDiff) <= (relative ? (precision * minDiff) : precision)) {
+                        break;
+                    }
+                    if (storm::utility::resources::isTerminate()) {
+                        break;
+                    }
+                }
+                if (maxIter.is_initialized() && iter == maxIter.get()) {
+                    STORM_LOG_WARN("LRA computation did not converge within " << iter << " iterations.");
+                } else {
+                    STORM_LOG_TRACE("LRA computation converged after " << iter << " iterations.");
+                }
+                
+                if (isProduceSchedulerSet()) {
+                    std::vector<uint_fast64_t> localMecChoices(mecTransitions.getRowGroupCount(), 0);
+                    multiplier->multiplyAndReduce(env, dir, x, &choiceValues, x, &localMecChoices);
+                    auto localMecChoiceIt = localMecChoices.begin();
+                    for (auto const& mecState : mecStates) {
+                        // Get the choice index of the selected mec choice with respect to the global transition matrix.
+                        uint_fast64_t globalChoice = mecChoices.getNextSetIndex(_transitionMatrix.getRowGroupIndices()[mecState]);
+                        for (uint_fast64_t i = 0; i < *localMecChoiceIt; ++i) {
+                            globalChoice = mecChoices.getNextSetIndex(globalChoice + 1);
+                        }
+                        STORM_LOG_ASSERT(globalChoice < _transitionMatrix.getRowGroupIndices()[mecState + 1], "Invalid global choice for mec state.");
+                        _producedOptimalChoices.get()[mecState] = globalChoice - _transitionMatrix.getRowGroupIndices()[mecState];
+                        ++localMecChoiceIt;
+                    }
+                }
+                return (maxDiff + minDiff) / (storm::utility::convertNumber<ValueType>(2.0) * scalingFactor);
+
+            }
+            
+            template <typename ValueType>
+            ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMecLp(Environment const& env, std::function<ValueType(uint64_t stateIndex, uint64_t globalChoiceIndex)> const& combinedStateActionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
+                std::shared_ptr<storm::solver::LpSolver<ValueType>> solver = storm::utility::solver::getLpSolver<ValueType>("LRA for MEC");
+                solver->setOptimizationDirection(invert(this->getOptimizationDirection()));
+                
+                // First, we need to create the variables for the problem.
+                std::map<uint_fast64_t, storm::expressions::Variable> stateToVariableMap;
+                for (auto const& stateChoicesPair : mec) {
+                    std::string variableName = "h" + std::to_string(stateChoicesPair.first);
+                    stateToVariableMap[stateChoicesPair.first] = solver->addUnboundedContinuousVariable(variableName);
+                }
+                storm::expressions::Variable lambda = solver->addUnboundedContinuousVariable("L", 1);
+                solver->update();
+                
+                // Now we encode the problem as constraints.
+                for (auto const& stateChoicesPair : mec) {
+                    uint_fast64_t state = stateChoicesPair.first;
+                    
+                    // Now, based on the type of the state, create a suitable constraint.
+                    for (auto choice : stateChoicesPair.second) {
+                        storm::expressions::Expression constraint = -lambda;
+                        
+                        for (auto element : _transitionMatrix.getRow(choice)) {
+                            constraint = constraint + stateToVariableMap.at(element.getColumn()) * solver->getConstant(element.getValue());
+                        }
+                        constraint = solver->getConstant(combinedStateActionRewardsGetter(state, choice)) + constraint;
+                        
+                        if (this->minimize()) {
+                            constraint = stateToVariableMap.at(state) <= constraint;
+                        } else {
+                            constraint = stateToVariableMap.at(state) >= constraint;
+                        }
+                        solver->addConstraint("state" + std::to_string(state) + "," + std::to_string(choice), constraint);
+                    }
+                }
+                
+                solver->optimize();
+                return solver->getContinuousValue(lambda);
+            }
+            
+            /*!
+             * Auxiliary function that adds the entries of the Ssp Matrix for a single choice (i.e., row)
+             * Transitions that lead to a MEC state will be redirected to a new auxiliary state (there is one aux. state for each MEC).
+             * Transitions that don't lead to a MEC state are copied (taking a state index mapping into account).
+             */
+            template <typename ValueType>
+            void addSspMatrixChoice(uint64_t const& inputMatrixChoice, storm::storage::SparseMatrix<ValueType> const& inputTransitionMatrix, std::vector<uint64_t> const& inputToSspStateMap, uint64_t const& numberOfStatesNotInMecs, uint64_t const& currentSspChoice, storm::storage::SparseMatrixBuilder<ValueType>& sspMatrixBuilder) {
+            
+                // As there could be multiple transitions to the same MEC, we accumulate them in this map before adding them to the matrix builder.
+                std::map<uint64_t, ValueType> auxiliaryStateToProbabilityMap;
+                
+                for (auto transition : inputTransitionMatrix.getRow(inputMatrixChoice)) {
+                    if (!storm::utility::isZero(transition.getValue())) {
+                        auto const& sspTransitionTarget = inputToSspStateMap[transition.getColumn()];
+                        // Since the auxiliary MEC states are appended at the end of the matrix, we can use this check to
+                        // decide whether the transition leads to a MEC state or not
+                        if (sspTransitionTarget < numberOfStatesNotInMecs) {
+                            // If the target state is not contained in a MEC, we can copy over the entry.
+                            sspMatrixBuilder.addNextValue(currentSspChoice, sspTransitionTarget, transition.getValue());
+                        } else {
+                            // If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector
+                            // so that we are able to write the cumulative probability to the MEC into the matrix.
+                            auto insertionRes = auxiliaryStateToProbabilityMap.emplace(sspTransitionTarget, transition.getValue());
+                            if (!insertionRes.second) {
+                                // sspTransitionTarget already existed in the map, i.e., there already was a transition to that MEC.
+                                // Hence, we add up the probabilities.
+                                insertionRes.first->second += transition.getValue();
+                            }
+                        }
+                    }
+                }
+                
+                // Now insert all (cumulative) probability values that target a MEC.
+                for (auto const& mecToProbEntry : auxiliaryStateToProbabilityMap) {
+                    sspMatrixBuilder.addNextValue(currentSspChoice, mecToProbEntry.first, mecToProbEntry.second);
+                }
+            }
+            
+            template <typename ValueType>
+            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::buildAndSolveSsp(Environment const& env, storm::storage::MaximalEndComponentDecomposition<ValueType> const& mecDecomposition, std::vector<ValueType> const& mecLraValues) {
+                
+                // Let's improve readability a bit
+                uint64_t numberOfStates = _transitionMatrix.getRowGroupCount();
+                auto const& nondeterministicChoiceIndices = _transitionMatrix.getRowGroupIndices();
+                
+                // For fast transition rewriting, we build a mapping from the input state indices to the state indices of a new transition matrix
+                // which redirects all transitions leading to a former MEC state to a new auxiliary state.
+                // There will be one auxiliary state for each MEC. These states will be appended to the end of the matrix.
+                
+                // First gather the states that are part of a MEC
+                // and create a mapping from states that lie in a MEC to the corresponding MEC index.
+                storm::storage::BitVector statesInMecs(numberOfStates);
+                std::vector<uint64_t> inputToSspStateMap(numberOfStates, std::numeric_limits<uint64_t>::max());
+                for (uint64_t currentMecIndex = 0; currentMecIndex < mecDecomposition.size(); ++currentMecIndex) {
+                    for (auto const& stateChoicesPair : mecDecomposition[currentMecIndex]) {
+                        statesInMecs.set(stateChoicesPair.first);
+                        inputToSspStateMap[stateChoicesPair.first] = currentMecIndex;
+                    }
+                }
+                // Now take care of the non-mec states. Note that the order of these states will be preserved.
+                uint64_t numberOfStatesNotInMecs = 0;
+                storm::storage::BitVector statesNotContainedInAnyMec = ~statesInMecs;
+                for (auto const& nonMecState : statesNotContainedInAnyMec) {
+                    inputToSspStateMap[nonMecState] = numberOfStatesNotInMecs;
+                    ++numberOfStatesNotInMecs;
+                }
+                // Finalize the mapping for the mec states which now still assigns mec states to to their Mec index.
+                // To make sure that they point to the auxiliary states (located at the end of the SspMatrix), we need to shift them by the
+                // number of states that are not in a mec.
+                for (auto const& mecState : statesInMecs) {
+                    inputToSspStateMap[mecState] += numberOfStatesNotInMecs;
+                }
+                
+                // For scheduler extraction, we will need to create a mapping between choices at the auxiliary states and the
+                // corresponding choices in the original model.
+                std::vector<std::pair<uint_fast64_t, uint_fast64_t>> sspMecExitChoicesToOriginalMap;
+                
+                // The next step is to create the SSP matrix and the right-hand side of the SSP.
+                std::vector<ValueType> rhs;
+                uint64_t numberOfSspStates = numberOfStatesNotInMecs + mecDecomposition.size();
+                typename storm::storage::SparseMatrixBuilder<ValueType> sspMatrixBuilder(0, numberOfSspStates , 0, false, true, numberOfSspStates);
+                // If the source state of a transition is not contained in any MEC, we copy its choices (and perform the necessary modifications).
+                uint64_t currentSspChoice = 0;
+                for (auto const& nonMecState : statesNotContainedInAnyMec) {
+                    sspMatrixBuilder.newRowGroup(currentSspChoice);
+                    
+                    for (uint64_t choice = nondeterministicChoiceIndices[nonMecState]; choice < nondeterministicChoiceIndices[nonMecState + 1]; ++choice, ++currentSspChoice) {
+                        rhs.push_back(storm::utility::zero<ValueType>());
+                        addSspMatrixChoice(choice, _transitionMatrix, inputToSspStateMap, numberOfStatesNotInMecs, currentSspChoice, sspMatrixBuilder);
+                    }
+                }
+                // Now we construct the choices for the auxiliary states which reflect former MEC states.
+                for (uint64_t mecIndex = 0; mecIndex < mecDecomposition.size(); ++mecIndex) {
+                    storm::storage::MaximalEndComponent const& mec = mecDecomposition[mecIndex];
+                    sspMatrixBuilder.newRowGroup(currentSspChoice);
+                    for (auto const& stateChoicesPair : mec) {
+                        uint64_t const& mecState = stateChoicesPair.first;
+                        auto const& choicesInMec = stateChoicesPair.second;
+                        for (uint64_t choice = nondeterministicChoiceIndices[mecState]; choice < nondeterministicChoiceIndices[mecState + 1]; ++choice) {
+                            // If the choice is not contained in the MEC itself, we have to add a similar distribution to the auxiliary state.
+                            if (choicesInMec.find(choice) == choicesInMec.end()) {
+                                rhs.push_back(storm::utility::zero<ValueType>());
+                                addSspMatrixChoice(choice, _transitionMatrix, inputToSspStateMap, numberOfStatesNotInMecs, currentSspChoice, sspMatrixBuilder);
+                                if (isProduceSchedulerSet()) {
+                                    // Later we need to be able to map this choice back to the original input model
+                                    sspMecExitChoicesToOriginalMap.emplace_back(mecState, choice - nondeterministicChoiceIndices[mecState]);
+                                }
+                                ++currentSspChoice;
+                            }
+                        }
+                    }
+                    // For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the MEC.
+                    rhs.push_back(mecLraValues[mecIndex]);
+                    if (isProduceSchedulerSet()) {
+                        // Insert some invalid values so we can later detect that this choice is not an exit choice
+                        sspMecExitChoicesToOriginalMap.emplace_back(std::numeric_limits<uint_fast64_t>::max(), std::numeric_limits<uint_fast64_t>::max());
+                    }
+                    ++currentSspChoice;
+                }
+                storm::storage::SparseMatrix<ValueType> sspMatrix = sspMatrixBuilder.build(currentSspChoice, numberOfSspStates, numberOfSspStates);
+                
+                // Set-up a solver
+                storm::solver::GeneralMinMaxLinearEquationSolverFactory<ValueType> minMaxLinearEquationSolverFactory;
+                storm::solver::MinMaxLinearEquationSolverRequirements requirements = minMaxLinearEquationSolverFactory.getRequirements(env, true, true, this->getOptimizationDirection(), false, this->isProduceSchedulerSet());
+                requirements.clearBounds();
+                STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
+                std::unique_ptr<storm::solver::MinMaxLinearEquationSolver<ValueType>> solver = minMaxLinearEquationSolverFactory.create(env, sspMatrix);
+                solver->setHasUniqueSolution();
+                solver->setHasNoEndComponents();
+                solver->setTrackScheduler(isProduceSchedulerSet());
+                auto lowerUpperBounds = std::minmax_element(mecLraValues.begin(), mecLraValues.end());
+                solver->setLowerBound(*lowerUpperBounds.first);
+                solver->setUpperBound(*lowerUpperBounds.second);
+                solver->setRequirementsChecked();
+                
+                // Solve the equation system
+                std::vector<ValueType> x(numberOfSspStates);
+                solver->solveEquations(env, this->getOptimizationDirection(), x, rhs);
+
+                // Prepare scheduler (if requested)
+                if (isProduceSchedulerSet() && solver->hasScheduler()) {
+                    // Translate result for ssp matrix to original model
+                    auto const& sspChoices = solver->getSchedulerChoices();
+                    // We first take care of non-mec states
+                    storm::utility::vector::setVectorValues(_producedOptimalChoices.get(), statesNotContainedInAnyMec, sspChoices);
+                    // Secondly, we consider MEC states. There are 3 cases for each MEC state:
+                    // 1. The SSP choices encode that we want to stay in the MEC
+                    // 2. The SSP choices encode that we want to leave the MEC and
+                    //      a) we take an exit (non-MEC) choice at the given state
+                    //      b) we have to take a MEC choice at the given state in a way that eventually an exit state of the MEC is reached
+                    uint64_t exitChoiceOffset = sspMatrix.getRowGroupIndices()[numberOfStatesNotInMecs];
+                    for (auto const& mec : mecDecomposition) {
+                        // Get the sspState of this MEC (using one representative mec state)
+                        auto const& sspState = inputToSspStateMap[mec.begin()->first];
+                        uint64_t sspChoiceIndex = sspMatrix.getRowGroupIndices()[sspState] + sspChoices[sspState];
+                        // Obtain the state and choice of the original model to which the selected choice corresponds.
+                        auto const& originalStateChoice = sspMecExitChoicesToOriginalMap[sspChoiceIndex - exitChoiceOffset];
+                        // Check if we are in Case 1 or 2
+                        if (originalStateChoice.first == std::numeric_limits<uint_fast64_t>::max()) {
+                            // The optimal choice is to stay in this mec (Case 1)
+                            // In this case, no further operations are necessary. The scheduler has already been set to the optimal choices during the call of computeLraForMec.
+                            STORM_LOG_ASSERT(sspMatrix.getRow(sspState, sspChoices[sspState]).getNumberOfEntries() == 0, "Expected empty row at choice that stays in MEC.");
+                        } else {
+                            // The best choice is to leave this MEC via the selected state and choice. (Case 2)
+                            // Set the exit choice (Case 2.a)
+                            _producedOptimalChoices.get()[originalStateChoice.first] = originalStateChoice.second;
+                            // The remaining states in this MEC need to reach the state with the exit choice with probability 1. (Case 2.b)
+                            // Perform a backwards search from the exit state, only using MEC choices
+                            // We start by setting an invalid choice to all remaining mec states (so that we can easily detect them as unprocessed)
+                            for (auto const& stateActions : mec) {
+                                if (stateActions.first != originalStateChoice.first) {
+                                    _producedOptimalChoices.get()[stateActions.first] = std::numeric_limits<uint64_t>::max();
+                                }
+                            }
+                            // Now start a backwards DFS
+                            std::vector<uint64_t> stack = {originalStateChoice.first};
+                            while (!stack.empty()) {
+                                uint64_t currentState = stack.back();
+                                stack.pop_back();
+                                for (auto const& backwardsTransition : _backwardTransitions.getRowGroup(currentState)) {
+                                    uint64_t predecessorState = backwardsTransition.getColumn();
+                                    if (mec.containsState(predecessorState)) {
+                                        auto& selectedPredChoice = _producedOptimalChoices.get()[predecessorState];
+                                        if (selectedPredChoice == std::numeric_limits<uint64_t>::max()) {
+                                            // We don't already have a choice for this predecessor.
+                                            // We now need to check whether there is a *MEC* choice leading to currentState
+                                            for (auto const& predChoice : mec.getChoicesForState(predecessorState)) {
+                                                for (auto const& forwardTransition : _transitionMatrix.getRow(predChoice)) {
+                                                    if (forwardTransition.getColumn() == currentState && !storm::utility::isZero(forwardTransition.getValue())) {
+                                                        // Playing this choice (infinitely often) will lead to current state (infinitely often)!
+                                                        selectedPredChoice = predChoice - nondeterministicChoiceIndices[predecessorState];
+                                                        stack.push_back(predecessorState);
+                                                        break;
+                                                    }
+                                                }
+                                                if (selectedPredChoice != std::numeric_limits<uint64_t>::max()) {
+                                                    break;
+                                                }
+                                            }
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                } else {
+                    STORM_LOG_ERROR_COND(!isProduceSchedulerSet(), "Requested to produce a scheduler, but no scheduler was generated.");
+                }
+                
+                // Prepare result vector.
+                // For efficiency reasons, we re-use the memory of our rhs for this!
+                std::vector<ValueType> result = std::move(rhs);
+                result.resize(numberOfStates);
+                result.shrink_to_fit();
+                storm::utility::vector::selectVectorValues(result, inputToSspStateMap, x);
+                return result;
+            }
+            
+            template class SparseNondeterministicInfiniteHorizonHelper<double>;
+            template class SparseNondeterministicInfiniteHorizonHelper<storm::RationalNumber>;
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h
new file mode 100644
index 000000000..7ca921aee
--- /dev/null
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h
@@ -0,0 +1,117 @@
+#pragma once
+#include "storm/modelchecker/helper/SingleValueModelCheckerHelper.h"
+
+#include "storm/storage/SparseMatrix.h"
+#include "storm/storage/MaximalEndComponentDecomposition.h"
+#include "storm/models/sparse/StandardRewardModel.h"
+
+namespace storm {
+    class Environment;
+    
+    namespace modelchecker {
+        namespace helper {
+        
+            /*!
+             * Helper class for model checking queries that depend on the long run behavior of the (nondeterministic) system.
+             */
+            template <typename ValueType>
+            class SparseNondeterministicInfiniteHorizonHelper : public SingleValueModelCheckerHelper<ValueType> {
+
+            public:
+                /*!
+                 * Initializes the helper for a discrete time (i.e. MDP)
+                 */
+                SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions);
+                
+                /*!
+                 * Initializes the helper for a continuous time (i.e. MA)
+                 */
+                SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates);
+                
+                /*!
+                 * Computes the long run average probabilities, i.e., the fraction of the time we are in a psiState
+                 * @return a value for each state
+                 */
+                std::vector<ValueType> computeLongRunAverageProbabilities(Environment const& env, storm::storage::BitVector const& psiStates);
+                
+                /*!
+                 * Computes the long run average rewards, i.e., the average reward collected per time unit
+                 * @return a value for each state
+                 */
+                std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, storm::models::sparse::StandardRewardModel<ValueType> const& rewardModel);
+                
+                /*!
+                 * Computes the long run average value given the provided action-based rewards
+                 * @return a value for each state
+                 */
+                std::vector<ValueType> computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const& combinedStateActionRewards);
+                
+                /*!
+                 * Computes the long run average value given the provided state-action-based rewards
+                 * @return a value for each state
+                 */
+                std::vector<ValueType> computeLongRunAverageValues(Environment const& env, std::function<ValueType(uint64_t stateIndex, uint64_t globalChoiceIndex)> const& combinedStateActionRewardsGetter);
+                
+                /*!
+                 * Sets whether an optimal scheduler shall be constructed during the computation
+                 */
+                void setProduceScheduler(bool value);
+                
+                /*!
+                 * @return whether an optimal scheduler shall be constructed during the computation
+                 */
+                bool isProduceSchedulerSet() const;
+                
+                /*!
+                 * @pre before calling this, a computation call should have been performed during which scheduler production was enabled.
+                 * @return the produced scheduler of the most recent call.
+                 */
+                std::vector<uint64_t> const& getProducedOptimalChoices() const;
+                
+                /*!
+                 * @pre before calling this, a computation call should have been performed during which scheduler production was enabled.
+                 * @return the produced scheduler of the most recent call.
+                 */
+                std::vector<uint64_t>& getProducedOptimalChoices();
+                
+                /*!
+                 * @pre before calling this, a computation call should have been performed during which scheduler production was enabled.
+                 * @return a new scheduler containing optimal choices for each state that yield the long run average values of the most recent call.
+                 */
+                storm::storage::Scheduler<ValueType> extractScheduler() const;
+
+            protected:
+                /*!
+                 * @pre if scheduler production is enabled, the _producedOptimalChoices vector should be initialized and sufficiently large
+                 * @return the (unique) optimal LRA value for the given mec.
+                 * @post _producedOptimalChoices contains choices for the states of the given MEC which yield the returned LRA value.
+                 */
+                ValueType computeLraForMec(Environment const& env, std::function<ValueType(uint64_t stateIndex, uint64_t globalChoiceIndex)> const& combinedStateActionRewardsGetter, storm::storage::MaximalEndComponent const& mec);
+                
+                /*!
+                 * As computeLraForMec but uses value iteration as a solution method (independent of what is set in env)
+                 */
+                ValueType computeLraForMecVi(Environment const& env, std::function<ValueType(uint64_t stateIndex, uint64_t globalChoiceIndex)> const& combinedStateActionRewardsGetter, storm::storage::MaximalEndComponent const& mec);
+                /*!
+                 * As computeLraForMec but uses linear programming as a solution method (independent of what is set in env)
+                 */
+                ValueType computeLraForMecLp(Environment const& env, std::function<ValueType(uint64_t stateIndex, uint64_t globalChoiceIndex)> const& combinedStateActionRewardsGetter, storm::storage::MaximalEndComponent const& mec);
+                
+                /*!
+                 * @return Lra values for each state
+                 */
+                std::vector<ValueType> buildAndSolveSsp(Environment const& env, storm::storage::MaximalEndComponentDecomposition<ValueType> const& mecDecomposition, std::vector<ValueType> const& mecLraValues);
+            
+            private:
+                storm::storage::SparseMatrix<ValueType> const& _transitionMatrix;
+                storm::storage::SparseMatrix<ValueType> const& _backwardTransitions;
+                storm::storage::BitVector const* _markovianStates;
+                std::vector<ValueType> const* _exitRates;
+                bool _produceScheduler;
+                boost::optional<std::vector<uint64_t>> _producedOptimalChoices;
+            };
+
+        
+        }
+    }
+}
\ No newline at end of file

From 3c84e682168b8eb8063138f3aaf3f9ba1e3f375b Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Wed, 29 Jul 2020 16:55:13 +0200
Subject: [PATCH 04/48] Using the new helper for MDP LRA properties.

---
 .../prctl/SparseMdpPrctlModelChecker.cpp      | 23 ++++++++++++-------
 1 file changed, 15 insertions(+), 8 deletions(-)

diff --git a/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp b/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp
index f62035f73..46a2357c1 100644
--- a/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp
+++ b/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp
@@ -15,6 +15,7 @@
 #include "storm/models/sparse/StandardRewardModel.h"
 
 #include "storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h"
+#include "storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h"
 
 #include "storm/modelchecker/prctl/helper/rewardbounded/QuantileHelper.h"
 #include "storm/modelchecker/multiobjective/multiObjectiveModelChecking.h"
@@ -224,10 +225,13 @@ namespace storm {
 			STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
 			std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
 			ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult();
-            auto ret = storm::modelchecker::helper::SparseMdpPrctlHelper<ValueType>::computeLongRunAverageProbabilities(env, storm::solver::SolveGoal<ValueType>(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(),  subResult.getTruthValuesVector(), checkTask.isProduceSchedulersSet());
-            std::unique_ptr<CheckResult> result(new ExplicitQuantitativeCheckResult<ValueType>(std::move(ret.values)));
-            if (checkTask.isProduceSchedulersSet() && ret.scheduler) {
-                result->asExplicitQuantitativeCheckResult<ValueType>().setScheduler(std::move(ret.scheduler));
+			storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions());
+			helper.setOptimizationDirection(checkTask.getOptimizationDirection());
+			helper.setProduceScheduler(checkTask.isProduceSchedulersSet());
+			auto values = helper.computeLongRunAverageProbabilities(env, subResult.getTruthValuesVector());
+            std::unique_ptr<CheckResult> result(new ExplicitQuantitativeCheckResult<ValueType>(std::move(values)));
+            if (checkTask.isProduceSchedulersSet()) {
+                result->asExplicitQuantitativeCheckResult<ValueType>().setScheduler(std::make_unique<storm::storage::Scheduler<ValueType>>(helper.extractScheduler()));
             }
             return result;
 		}
@@ -236,10 +240,13 @@ namespace storm {
         std::unique_ptr<CheckResult> SparseMdpPrctlModelChecker<SparseMdpModelType>::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask<storm::logic::LongRunAverageRewardFormula, ValueType> const& checkTask) {
             STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
             auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
-            auto ret = storm::modelchecker::helper::SparseMdpPrctlHelper<ValueType>::computeLongRunAverageRewards(env, storm::solver::SolveGoal<ValueType>(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), rewardModel.get(), checkTask.isProduceSchedulersSet());
-            std::unique_ptr<CheckResult> result(new ExplicitQuantitativeCheckResult<ValueType>(std::move(ret.values)));
-            if (checkTask.isProduceSchedulersSet() && ret.scheduler) {
-                result->asExplicitQuantitativeCheckResult<ValueType>().setScheduler(std::move(ret.scheduler));
+            storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions());
+			helper.setOptimizationDirection(checkTask.getOptimizationDirection());
+			helper.setProduceScheduler(checkTask.isProduceSchedulersSet());
+			auto values = helper.computeLongRunAverageRewards(env, rewardModel.get());
+            std::unique_ptr<CheckResult> result(new ExplicitQuantitativeCheckResult<ValueType>(std::move(values)));
+            if (checkTask.isProduceSchedulersSet()) {
+                result->asExplicitQuantitativeCheckResult<ValueType>().setScheduler(std::make_unique<storm::storage::Scheduler<ValueType>>(helper.extractScheduler()));
             }
             return result;
         }

From d06a39eb79d52bd09e1c31c29e8b0fcb889b7971 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Wed, 29 Jul 2020 16:55:54 +0200
Subject: [PATCH 05/48] Dropping old MDP LRA code.

---
 .../prctl/helper/SparseMdpPrctlHelper.cpp     | 467 ------------------
 .../prctl/helper/SparseMdpPrctlHelper.h       |  13 -
 2 files changed, 480 deletions(-)

diff --git a/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp b/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp
index 3b69fa62d..5282f9e60 100644
--- a/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp
+++ b/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp
@@ -39,7 +39,6 @@
 #include "storm/transformer/EndComponentEliminator.h"
 
 #include "storm/environment/solver/MinMaxSolverEnvironment.h"
-#include "storm/environment/solver/LongRunAverageSolverEnvironment.h"
 
 #include "storm/exceptions/InvalidStateException.h"
 #include "storm/exceptions/InvalidPropertyException.h"
@@ -1208,464 +1207,6 @@ namespace storm {
                 return MDPSparseModelCheckingHelperReturnType<ValueType>(std::move(result), std::move(scheduler));
             }
             
-            template<typename ValueType>
-            MDPSparseModelCheckingHelperReturnType<ValueType> SparseMdpPrctlHelper<ValueType>::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& psiStates, bool produceScheduler) {
-                
-                // If there are no goal states, we avoid the computation and directly return zero.
-                if (psiStates.empty()) {
-                    return std::vector<ValueType>(transitionMatrix.getRowGroupCount(), storm::utility::zero<ValueType>());
-                }
-                
-                // Likewise, if all bits are set, we can avoid the computation and set.
-                if (psiStates.full()) {
-                    return std::vector<ValueType>(transitionMatrix.getRowGroupCount(), storm::utility::one<ValueType>());
-                }
-                
-                // Reduce long run average probabilities to long run average rewards by
-                // building a reward model assigning one reward to every psi state
-                std::vector<ValueType> stateRewards(psiStates.size(), storm::utility::zero<ValueType>());
-                storm::utility::vector::setVectorValues(stateRewards, psiStates, storm::utility::one<ValueType>());
-                storm::models::sparse::StandardRewardModel<ValueType> rewardModel(std::move(stateRewards));
-                return computeLongRunAverageRewards(env, std::move(goal), transitionMatrix, backwardTransitions, rewardModel, produceScheduler);
-            }
-            
-            template<typename ValueType>
-            template<typename RewardModelType>
-            MDPSparseModelCheckingHelperReturnType<ValueType> SparseMdpPrctlHelper<ValueType>::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, RewardModelType const& rewardModel, bool produceScheduler) {
-                
-                uint64_t numberOfStates = transitionMatrix.getRowGroupCount();
-
-                std::unique_ptr<storm::storage::Scheduler<ValueType>> scheduler;
-                if (produceScheduler) {
-                    scheduler = std::make_unique<storm::storage::Scheduler<ValueType>>(numberOfStates);
-                }
-                
-                // Start by decomposing the MDP into its MECs.
-                storm::storage::MaximalEndComponentDecomposition<ValueType> mecDecomposition(transitionMatrix, backwardTransitions);
-                
-                // Get some data members for convenience.
-                std::vector<uint_fast64_t> const& nondeterministicChoiceIndices = transitionMatrix.getRowGroupIndices();
-                ValueType zero = storm::utility::zero<ValueType>();
-                
-                //first calculate LRA for the Maximal End Components.
-                storm::storage::BitVector statesInMecs(numberOfStates);
-                std::vector<uint_fast64_t> stateToMecIndexMap(transitionMatrix.getColumnCount());
-                std::vector<ValueType> lraValuesForEndComponents(mecDecomposition.size(), zero);
-                
-                auto underlyingSolverEnvironment = env;
-                if (env.solver().isForceSoundness()) {
-                    // For sound computations, the error in the MECS plus the error in the remaining system should be less than the user defined precsion.
-                    underlyingSolverEnvironment.solver().lra().setPrecision(env.solver().lra().getPrecision() / storm::utility::convertNumber<storm::RationalNumber>(2));
-                    underlyingSolverEnvironment.solver().minMax().setPrecision(env.solver().lra().getPrecision() / storm::utility::convertNumber<storm::RationalNumber>(2));
-                    underlyingSolverEnvironment.solver().minMax().setRelativeTerminationCriterion(env.solver().lra().getRelativeTerminationCriterion());
-                }
-                
-                for (uint_fast64_t currentMecIndex = 0; currentMecIndex < mecDecomposition.size(); ++currentMecIndex) {
-                    storm::storage::MaximalEndComponent const& mec = mecDecomposition[currentMecIndex];
-                    
-                    lraValuesForEndComponents[currentMecIndex] = computeLraForMaximalEndComponent(underlyingSolverEnvironment, goal.direction(), transitionMatrix, rewardModel, mec, scheduler);
-                    
-                    // Gather information for later use.
-                    for (auto const& stateChoicesPair : mec) {
-                        statesInMecs.set(stateChoicesPair.first);
-                        stateToMecIndexMap[stateChoicesPair.first] = currentMecIndex;
-                    }
-                }
-                
-                // For fast transition rewriting, we build some auxiliary data structures.
-                storm::storage::BitVector statesNotContainedInAnyMec = ~statesInMecs;
-                uint_fast64_t firstAuxiliaryStateIndex = statesNotContainedInAnyMec.getNumberOfSetBits();
-                uint_fast64_t lastStateNotInMecs = 0;
-                uint_fast64_t numberOfStatesNotInMecs = 0;
-                std::vector<uint_fast64_t> statesNotInMecsBeforeIndex;
-                statesNotInMecsBeforeIndex.reserve(numberOfStates);
-                for (auto state : statesNotContainedInAnyMec) {
-                    while (lastStateNotInMecs <= state) {
-                        statesNotInMecsBeforeIndex.push_back(numberOfStatesNotInMecs);
-                        ++lastStateNotInMecs;
-                    }
-                    ++numberOfStatesNotInMecs;
-                }
-                
-                // Finally, we are ready to create the SSP matrix and right-hand side of the SSP.
-                std::vector<ValueType> b;
-                uint64_t numberOfSspStates = numberOfStatesNotInMecs + mecDecomposition.size();
-
-                typename storm::storage::SparseMatrixBuilder<ValueType> sspMatrixBuilder(0, numberOfSspStates, 0, false, true, numberOfSspStates);
-                
-                // If the source state is not contained in any MEC, we copy its choices (and perform the necessary modifications).
-                uint_fast64_t currentChoice = 0;
-                for (auto state : statesNotContainedInAnyMec) {
-                    sspMatrixBuilder.newRowGroup(currentChoice);
-                    
-                    for (uint_fast64_t choice = nondeterministicChoiceIndices[state]; choice < nondeterministicChoiceIndices[state + 1]; ++choice, ++currentChoice) {
-                        std::vector<ValueType> auxiliaryStateToProbabilityMap(mecDecomposition.size());
-                        b.push_back(storm::utility::zero<ValueType>());
-                        
-                        for (auto element : transitionMatrix.getRow(choice)) {
-                            if (statesNotContainedInAnyMec.get(element.getColumn())) {
-                                // If the target state is not contained in an MEC, we can copy over the entry.
-                                sspMatrixBuilder.addNextValue(currentChoice, statesNotInMecsBeforeIndex[element.getColumn()], element.getValue());
-                            } else {
-                                // If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector
-                                // so that we are able to write the cumulative probability to the MEC into the matrix.
-                                auxiliaryStateToProbabilityMap[stateToMecIndexMap[element.getColumn()]] += element.getValue();
-                            }
-                        }
-                        
-                        // Now insert all (cumulative) probability values that target an MEC.
-                        for (uint_fast64_t mecIndex = 0; mecIndex < auxiliaryStateToProbabilityMap.size(); ++mecIndex) {
-                            if (!storm::utility::isZero(auxiliaryStateToProbabilityMap[mecIndex])) {
-                                sspMatrixBuilder.addNextValue(currentChoice, firstAuxiliaryStateIndex + mecIndex, auxiliaryStateToProbabilityMap[mecIndex]);
-                            }
-                        }
-                    }
-                }
-                
-                std::vector<std::pair<uint_fast64_t, uint_fast64_t>> sspMecChoicesToOriginalMap; // for scheduler extraction
-                
-                // Now we are ready to construct the choices for the auxiliary states.
-                for (uint_fast64_t mecIndex = 0; mecIndex < mecDecomposition.size(); ++mecIndex) {
-                    storm::storage::MaximalEndComponent const& mec = mecDecomposition[mecIndex];
-                    sspMatrixBuilder.newRowGroup(currentChoice);
-                    
-                    for (auto const& stateChoicesPair : mec) {
-                        uint_fast64_t state = stateChoicesPair.first;
-                        storm::storage::FlatSet<uint_fast64_t> const& choicesInMec = stateChoicesPair.second;
-                        
-                        for (uint_fast64_t choice = nondeterministicChoiceIndices[state]; choice < nondeterministicChoiceIndices[state + 1]; ++choice) {
-                            // If the choice is not contained in the MEC itself, we have to add a similar distribution to the auxiliary state.
-                            if (choicesInMec.find(choice) == choicesInMec.end()) {
-                                std::vector<ValueType> auxiliaryStateToProbabilityMap(mecDecomposition.size());
-                                b.push_back(storm::utility::zero<ValueType>());
-                                
-                                for (auto element : transitionMatrix.getRow(choice)) {
-                                    if (statesNotContainedInAnyMec.get(element.getColumn())) {
-                                        // If the target state is not contained in an MEC, we can copy over the entry.
-                                        sspMatrixBuilder.addNextValue(currentChoice, statesNotInMecsBeforeIndex[element.getColumn()], element.getValue());
-                                    } else {
-                                        // If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector
-                                        // so that we are able to write the cumulative probability to the MEC into the matrix.
-                                        auxiliaryStateToProbabilityMap[stateToMecIndexMap[element.getColumn()]] += element.getValue();
-                                    }
-                                }
-                                
-                                // Now insert all (cumulative) probability values that target an MEC.
-                                for (uint_fast64_t targetMecIndex = 0; targetMecIndex < auxiliaryStateToProbabilityMap.size(); ++targetMecIndex) {
-                                    if (!storm::utility::isZero(auxiliaryStateToProbabilityMap[targetMecIndex])) {
-                                        sspMatrixBuilder.addNextValue(currentChoice, firstAuxiliaryStateIndex + targetMecIndex, auxiliaryStateToProbabilityMap[targetMecIndex]);
-                                    }
-                                }
-                                
-                                if (produceScheduler) {
-                                    sspMecChoicesToOriginalMap.emplace_back(state, choice - nondeterministicChoiceIndices[state]);
-                                }
-                                ++currentChoice;
-                            }
-                        }
-                    }
-                    
-                    // For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the MEC.
-                    ++currentChoice;
-                    b.push_back(lraValuesForEndComponents[mecIndex]);
-                    if (produceScheduler) {
-                        // Insert some invalid values
-                        sspMecChoicesToOriginalMap.emplace_back(std::numeric_limits<uint_fast64_t>::max(), std::numeric_limits<uint_fast64_t>::max());
-                    }
-                }
-                
-                // Finalize the matrix and solve the corresponding system of equations.
-                storm::storage::SparseMatrix<ValueType> sspMatrix = sspMatrixBuilder.build(currentChoice, numberOfSspStates, numberOfSspStates);
-                
-                // Check for requirements of the solver.
-                storm::solver::GeneralMinMaxLinearEquationSolverFactory<ValueType> minMaxLinearEquationSolverFactory;
-                storm::solver::MinMaxLinearEquationSolverRequirements requirements = minMaxLinearEquationSolverFactory.getRequirements(underlyingSolverEnvironment, true, true, goal.direction(), false, produceScheduler);
-                requirements.clearBounds();
-                STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UncheckedRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
-
-                
-                std::vector<ValueType> sspResult(numberOfSspStates);
-                goal.restrictRelevantValues(statesNotContainedInAnyMec);
-                std::unique_ptr<storm::solver::MinMaxLinearEquationSolver<ValueType>> solver = storm::solver::configureMinMaxLinearEquationSolver(underlyingSolverEnvironment, std::move(goal), minMaxLinearEquationSolverFactory, sspMatrix);
-                solver->setLowerBound(storm::utility::zero<ValueType>());
-                solver->setUpperBound(*std::max_element(lraValuesForEndComponents.begin(), lraValuesForEndComponents.end()));
-                solver->setHasUniqueSolution();
-                solver->setHasNoEndComponents();
-                solver->setTrackScheduler(produceScheduler);
-                solver->setRequirementsChecked();
-                solver->solveEquations(underlyingSolverEnvironment, sspResult, b);
-                
-                // Prepare result vector.
-                std::vector<ValueType> result(numberOfStates, zero);
-                
-                // Set the values for states not contained in MECs.
-                storm::utility::vector::setVectorValues(result, statesNotContainedInAnyMec, sspResult);
-                
-                // Set the values for all states in MECs.
-                for (auto state : statesInMecs) {
-                    result[state] = sspResult[firstAuxiliaryStateIndex + stateToMecIndexMap[state]];
-                }
-                
-                if (produceScheduler && solver->hasScheduler()) {
-                    // Translate result for ssp matrix to original model
-                    auto const& sspChoices = solver->getSchedulerChoices();
-                    uint64_t sspState = 0;
-                    for (auto state : statesNotContainedInAnyMec) {
-                        scheduler->setChoice(sspChoices[sspState], state);
-                        ++sspState;
-                    }
-                    // The other sspStates correspond to MECS in the original system.
-                    uint_fast64_t rowOffset = sspMatrix.getRowGroupIndices()[sspState];
-                    for (uint_fast64_t mecIndex = 0; mecIndex < mecDecomposition.size(); ++mecIndex) {
-                        // Obtain the state and choice of the original model to which the selected choice corresponds.
-                        auto const& originalStateChoice = sspMecChoicesToOriginalMap[sspMatrix.getRowGroupIndices()[sspState] + sspChoices[sspState] - rowOffset];
-                        // Check if the best choice is to stay in this MEC
-                        if (originalStateChoice.first == std::numeric_limits<uint_fast64_t>::max()) {
-                            STORM_LOG_ASSERT(sspMatrix.getRow(sspState, sspChoices[sspState]).getNumberOfEntries() == 0, "Expected empty row at choice that stays in MEC.");
-                            // In this case, no further operations are necessary. The scheduler has already been set to the optimal choices during the call of computeLraForMaximalEndComponent.
-                        } else {
-                            // The best choice is to leave this MEC via the selected state and choice.
-                            scheduler->setChoice(originalStateChoice.second, originalStateChoice.first);
-                            // The remaining states in this MEC need to reach this state with probability 1.
-                            storm::storage::BitVector exitStateAsBitVector(transitionMatrix.getRowGroupCount(), false);
-                            exitStateAsBitVector.set(originalStateChoice.first, true);
-                            storm::storage::BitVector otherStatesAsBitVector(transitionMatrix.getRowGroupCount(), false);
-                            for (auto const& stateChoices : mecDecomposition[mecIndex]) {
-                                if (stateChoices.first != originalStateChoice.first) {
-                                    otherStatesAsBitVector.set(stateChoices.first, true);
-                                }
-                            }
-                            storm::utility::graph::computeSchedulerProb1E(otherStatesAsBitVector, transitionMatrix, backwardTransitions, otherStatesAsBitVector, exitStateAsBitVector, *scheduler);
-                        }
-                        ++sspState;
-                    }
-                    assert(sspState == sspMatrix.getRowGroupCount());
-                } else {
-                    STORM_LOG_ERROR_COND(!produceScheduler, "Requested to produce a scheduler, but no scheduler was generated.");
-                }
-                
-                return MDPSparseModelCheckingHelperReturnType<ValueType>(std::move(result), std::move(scheduler));
-            }
-            
-            template<typename ValueType>
-            template<typename RewardModelType>
-            ValueType SparseMdpPrctlHelper<ValueType>::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<ValueType>>& scheduler) {
-                
-                // If the mec only consists of a single state, we compute the LRA value directly
-                if (++mec.begin() == mec.end()) {
-                    uint64_t state = mec.begin()->first;
-                    auto choiceIt = mec.begin()->second.begin();
-                    ValueType result = rewardModel.getTotalStateActionReward(state, *choiceIt, transitionMatrix);
-                    uint_fast64_t bestChoice = *choiceIt;
-                    for (++choiceIt; choiceIt != mec.begin()->second.end(); ++choiceIt) {
-                        ValueType choiceValue = rewardModel.getTotalStateActionReward(state, *choiceIt, transitionMatrix);
-                        if (storm::solver::minimize(dir)) {
-                            if (result > choiceValue) {
-                                result = std::move(choiceValue);
-                                bestChoice = *choiceIt;
-                            }
-                        } else {
-                             if (result < choiceValue) {
-                                    result = std::move(choiceValue);
-                                    bestChoice = *choiceIt;
-                             }
-                        }
-                    }
-                    if (scheduler) {
-                        scheduler->setChoice(bestChoice - transitionMatrix.getRowGroupIndices()[state], state);
-                    }
-                    return result;
-                }
-                
-                // Solve MEC with the method specified in the settings
-                storm::solver::LraMethod method = env.solver().lra().getNondetLraMethod();
-                if ((storm::NumberTraits<ValueType>::IsExact || env.solver().isForceExact()) && env.solver().lra().isNondetLraMethodSetFromDefault() && method != storm::solver::LraMethod::LinearProgramming) {
-                    STORM_LOG_INFO("Selecting 'LP' as the solution technique for long-run properties to guarantee exact results. If you want to override this, please explicitly specify a different LRA method.");
-                    method = storm::solver::LraMethod::LinearProgramming;
-                } else if (env.solver().isForceSoundness() && env.solver().lra().isNondetLraMethodSetFromDefault() && method != storm::solver::LraMethod::ValueIteration) {
-                    STORM_LOG_INFO("Selecting 'VI' as the solution technique for long-run properties to guarantee sound results. If you want to override this, please explicitly specify a different LRA method.");
-                    method = storm::solver::LraMethod::ValueIteration;
-                }
-                STORM_LOG_ERROR_COND(scheduler == nullptr || method == storm::solver::LraMethod::ValueIteration, "Scheduler generation not supported for the chosen LRA method. Try value-iteration.");
-                if (method == storm::solver::LraMethod::LinearProgramming) {
-                    return computeLraForMaximalEndComponentLP(env, dir, transitionMatrix, rewardModel, mec);
-                } else if (method == storm::solver::LraMethod::ValueIteration) {
-                    return computeLraForMaximalEndComponentVI(env, dir, transitionMatrix, rewardModel, mec, scheduler);
-                } else {
-                    STORM_LOG_THROW(false, storm::exceptions::InvalidSettingsException, "Unsupported technique.");
-                }
-            }
-            
-            template<typename ValueType>
-            template<typename RewardModelType>
-            ValueType SparseMdpPrctlHelper<ValueType>::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<ValueType>>& scheduler) {
-                
-                // Initialize data about the mec
-                storm::storage::BitVector mecStates(transitionMatrix.getRowGroupCount(), false);
-                storm::storage::BitVector mecChoices(transitionMatrix.getRowCount(), false);
-                for (auto const& stateChoicesPair : mec) {
-                    mecStates.set(stateChoicesPair.first);
-                    for (auto const& choice : stateChoicesPair.second) {
-                        mecChoices.set(choice);
-                    }
-                }
-                
-                boost::container::flat_map<uint64_t, uint64_t> toSubModelStateMapping;
-                uint64_t currState = 0;
-                toSubModelStateMapping.reserve(mecStates.getNumberOfSetBits());
-                for (auto const& mecState : mecStates) {
-                    toSubModelStateMapping.insert(std::pair<uint64_t, uint64_t>(mecState, currState));
-                    ++currState;
-                }
-                
-                // Get a transition matrix that only considers the states and choices within the MEC
-                storm::storage::SparseMatrixBuilder<ValueType> mecTransitionBuilder(mecChoices.getNumberOfSetBits(), mecStates.getNumberOfSetBits(), 0, true, true, mecStates.getNumberOfSetBits());
-                std::vector<ValueType> choiceRewards;
-                choiceRewards.reserve(mecChoices.getNumberOfSetBits());
-                uint64_t currRow = 0;
-                ValueType selfLoopProb = storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor());
-                ValueType scalingFactor = storm::utility::one<ValueType>() - selfLoopProb;
-                for (auto const& mecState : mecStates) {
-                    mecTransitionBuilder.newRowGroup(currRow);
-                    uint64_t groupStart = transitionMatrix.getRowGroupIndices()[mecState];
-                    uint64_t groupEnd = transitionMatrix.getRowGroupIndices()[mecState + 1];
-                    for (uint64_t choice = mecChoices.getNextSetIndex(groupStart); choice < groupEnd; choice = mecChoices.getNextSetIndex(choice + 1)) {
-                        bool insertedDiagElement = false;
-                        for (auto const& entry : transitionMatrix.getRow(choice)) {
-                            uint64_t column = toSubModelStateMapping[entry.getColumn()];
-                            if (!insertedDiagElement && entry.getColumn() > mecState) {
-                                mecTransitionBuilder.addNextValue(currRow, toSubModelStateMapping[mecState], selfLoopProb);
-                                insertedDiagElement = true;
-                            }
-                            if (!insertedDiagElement && entry.getColumn() == mecState) {
-                                mecTransitionBuilder.addNextValue(currRow, column, selfLoopProb + scalingFactor * entry.getValue());
-                                insertedDiagElement = true;
-                            } else {
-                                mecTransitionBuilder.addNextValue(currRow, column,  scalingFactor * entry.getValue());
-                            }
-                        }
-                        if (!insertedDiagElement) {
-                            mecTransitionBuilder.addNextValue(currRow, toSubModelStateMapping[mecState], selfLoopProb);
-                        }
-                        
-                        // Compute the rewards obtained for this choice
-                        choiceRewards.push_back(scalingFactor * rewardModel.getTotalStateActionReward(mecState, choice, transitionMatrix));
-                        
-                        ++currRow;
-                    }
-                }
-                auto mecTransitions = mecTransitionBuilder.build();
-                STORM_LOG_ASSERT(mecTransitions.isProbabilistic(), "The MEC-Matrix is not probabilistic.");
-                
-                // start the iterations
-                ValueType precision = storm::utility::convertNumber<ValueType>(env.solver().lra().getPrecision()) / scalingFactor;
-                bool relative = env.solver().lra().getRelativeTerminationCriterion();
-                std::vector<ValueType> x(mecTransitions.getRowGroupCount(), storm::utility::zero<ValueType>());
-                std::vector<ValueType> xPrime = x;
-                
-                auto multiplier = storm::solver::MultiplierFactory<ValueType>().create(env, mecTransitions);
-                ValueType maxDiff, minDiff;
-                
-                uint64_t iter = 0;
-                boost::optional<uint64_t> maxIter;
-                if (env.solver().lra().isMaximalIterationCountSet()) {
-                    maxIter = env.solver().lra().getMaximalIterationCount();
-                }
-                while (!maxIter.is_initialized() || iter < maxIter.get()) {
-                    ++iter;
-                    // Compute the obtained rewards for the next step
-                    multiplier->multiplyAndReduce(env, dir, x, &choiceRewards, x);
-                    
-                    // update xPrime and check for convergence
-                    // to avoid large (and numerically unstable) x-values, we substract a reference value.
-                    auto xIt = x.begin();
-                    auto xPrimeIt = xPrime.begin();
-                    ValueType refVal = *xIt;
-                    maxDiff = *xIt - *xPrimeIt;
-                    minDiff = maxDiff;
-                    *xIt -= refVal;
-                    *xPrimeIt = *xIt;
-                    for (++xIt, ++xPrimeIt; xIt != x.end(); ++xIt, ++xPrimeIt) {
-                        ValueType diff = *xIt - *xPrimeIt;
-                        maxDiff = std::max(maxDiff, diff);
-                        minDiff = std::min(minDiff, diff);
-                        *xIt -= refVal;
-                        *xPrimeIt = *xIt;
-                    }
-
-                    if ((maxDiff - minDiff) <= (relative ? (precision * minDiff) : precision)) {
-                        break;
-                    }
-                    if (storm::utility::resources::isTerminate()) {
-                        break;
-                    }
-                }
-                if (maxIter.is_initialized() && iter == maxIter.get()) {
-                    STORM_LOG_WARN("LRA computation did not converge within " << iter << " iterations.");
-                } else {
-                    STORM_LOG_TRACE("LRA computation converged after " << iter << " iterations.");
-                }
-                
-                if (scheduler) {
-                    std::vector<uint_fast64_t> localMecChoices(mecTransitions.getRowGroupCount(), 0);
-                    multiplier->multiplyAndReduce(env, dir, x, &choiceRewards, x, &localMecChoices);
-                    auto localMecChoiceIt = localMecChoices.begin();
-                    for (auto const& mecState : mecStates) {
-                        // Get the choice index of the selected mec choice with respect to the global transition matrix.
-                        uint_fast64_t globalChoice = mecChoices.getNextSetIndex(transitionMatrix.getRowGroupIndices()[mecState]);
-                        for (uint_fast64_t i = 0; i < *localMecChoiceIt; ++i) {
-                            globalChoice = mecChoices.getNextSetIndex(globalChoice + 1);
-                        }
-                        STORM_LOG_ASSERT(globalChoice < transitionMatrix.getRowGroupIndices()[mecState + 1], "Invalid global choice for mec state.");
-                        scheduler->setChoice(globalChoice - transitionMatrix.getRowGroupIndices()[mecState], mecState);
-                        ++localMecChoiceIt;
-                    }
-                }
-                return (maxDiff + minDiff) / (storm::utility::convertNumber<ValueType>(2.0) * scalingFactor);
-            }
-            
-            template<typename ValueType>
-            template<typename RewardModelType>
-            ValueType SparseMdpPrctlHelper<ValueType>::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec) {
-                std::shared_ptr<storm::solver::LpSolver<ValueType>> solver = storm::utility::solver::getLpSolver<ValueType>("LRA for MEC");
-                solver->setOptimizationDirection(invert(dir));
-                
-                // First, we need to create the variables for the problem.
-                std::map<uint_fast64_t, storm::expressions::Variable> stateToVariableMap;
-                for (auto const& stateChoicesPair : mec) {
-                    std::string variableName = "h" + std::to_string(stateChoicesPair.first);
-                    stateToVariableMap[stateChoicesPair.first] = solver->addUnboundedContinuousVariable(variableName);
-                }
-                storm::expressions::Variable lambda = solver->addUnboundedContinuousVariable("L", 1);
-                solver->update();
-                
-                // Now we encode the problem as constraints.
-                for (auto const& stateChoicesPair : mec) {
-                    uint_fast64_t state = stateChoicesPair.first;
-                    
-                    // Now, based on the type of the state, create a suitable constraint.
-                    for (auto choice : stateChoicesPair.second) {
-                        storm::expressions::Expression constraint = -lambda;
-                        
-                        for (auto element : transitionMatrix.getRow(choice)) {
-                            constraint = constraint + stateToVariableMap.at(element.getColumn()) * solver->getConstant(element.getValue());
-                        }
-                        typename RewardModelType::ValueType r = rewardModel.getTotalStateActionReward(state, choice, transitionMatrix);
-                        constraint = solver->getConstant(r) + constraint;
-                        
-                        if (dir == OptimizationDirection::Minimize) {
-                            constraint = stateToVariableMap.at(state) <= constraint;
-                        } else {
-                            constraint = stateToVariableMap.at(state) >= constraint;
-                        }
-                        solver->addConstraint("state" + std::to_string(state) + "," + std::to_string(choice), constraint);
-                    }
-                }
-                
-                solver->optimize();
-                return solver->getContinuousValue(lambda);
-            }
-            
             template<typename ValueType>
             std::unique_ptr<CheckResult> SparseMdpPrctlHelper<ValueType>::computeConditionalProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& targetStates, storm::storage::BitVector const& conditionStates) {
                 
@@ -1818,10 +1359,6 @@ namespace storm {
             template std::vector<double> SparseMdpPrctlHelper<double>::computeCumulativeRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::models::sparse::StandardRewardModel<double> const& rewardModel, uint_fast64_t stepBound);
             template MDPSparseModelCheckingHelperReturnType<double> SparseMdpPrctlHelper<double>::computeReachabilityRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::storage::SparseMatrix<double> const& backwardTransitions, storm::models::sparse::StandardRewardModel<double> const& rewardModel, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint);
             template MDPSparseModelCheckingHelperReturnType<double> SparseMdpPrctlHelper<double>::computeTotalRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::storage::SparseMatrix<double> const& backwardTransitions, storm::models::sparse::StandardRewardModel<double> const& rewardModel, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint);
-            template MDPSparseModelCheckingHelperReturnType<double> SparseMdpPrctlHelper<double>::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::storage::SparseMatrix<double> const& backwardTransitions, storm::models::sparse::StandardRewardModel<double> const& rewardModel, bool produceScheduler);
-            template double SparseMdpPrctlHelper<double>::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::models::sparse::StandardRewardModel<double> const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<double>>& scheduler);
-            template double SparseMdpPrctlHelper<double>::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::models::sparse::StandardRewardModel<double> const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<double>>& scheduler);
-            template double SparseMdpPrctlHelper<double>::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::models::sparse::StandardRewardModel<double> const& rewardModel, storm::storage::MaximalEndComponent const& mec);
 
 #ifdef STORM_HAVE_CARL
             template class SparseMdpPrctlHelper<storm::RationalNumber>;
@@ -1829,10 +1366,6 @@ namespace storm {
             template std::vector<storm::RationalNumber> SparseMdpPrctlHelper<storm::RationalNumber>::computeCumulativeRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, uint_fast64_t stepBound);
             template MDPSparseModelCheckingHelperReturnType<storm::RationalNumber> SparseMdpPrctlHelper<storm::RationalNumber>::computeReachabilityRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::storage::SparseMatrix<storm::RationalNumber> const& backwardTransitions, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint);
             template MDPSparseModelCheckingHelperReturnType<storm::RationalNumber> SparseMdpPrctlHelper<storm::RationalNumber>::computeTotalRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::storage::SparseMatrix<storm::RationalNumber> const& backwardTransitions, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint);
-            template MDPSparseModelCheckingHelperReturnType<storm::RationalNumber> SparseMdpPrctlHelper<storm::RationalNumber>::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::storage::SparseMatrix<storm::RationalNumber> const& backwardTransitions, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, bool produceScheduler);
-            template storm::RationalNumber SparseMdpPrctlHelper<storm::RationalNumber>::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<storm::RationalNumber>>& scheduler);
-            template storm::RationalNumber SparseMdpPrctlHelper<storm::RationalNumber>::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<storm::RationalNumber>>& scheduler);
-            template storm::RationalNumber SparseMdpPrctlHelper<storm::RationalNumber>::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, storm::storage::MaximalEndComponent const& mec);
 #endif
         }
     }
diff --git a/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h b/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h
index 441e6ce41..57cb1ad4d 100644
--- a/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h
+++ b/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h
@@ -67,24 +67,11 @@ namespace storm {
                 static std::vector<ValueType> computeReachabilityRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::models::sparse::StandardRewardModel<storm::Interval> const& intervalRewardModel, bool lowerBoundOfIntervals, storm::storage::BitVector const& targetStates, bool qualitative);
 #endif
                 
-                static MDPSparseModelCheckingHelperReturnType<ValueType> computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& psiStates, bool produceScheduler);
-
-                
-                template<typename RewardModelType>
-                static MDPSparseModelCheckingHelperReturnType<ValueType> computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, RewardModelType const& rewardModel, bool produceScheduler);
-
                 static std::unique_ptr<CheckResult> computeConditionalProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& targetStates, storm::storage::BitVector const& conditionStates);
                 
             private:
                 static MDPSparseModelCheckingHelperReturnType<ValueType> computeReachabilityRewardsHelper(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::function<std::vector<ValueType>(uint_fast64_t, storm::storage::SparseMatrix<ValueType> const&, storm::storage::BitVector const&)> const& totalStateRewardVectorGetter, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, std::function<storm::storage::BitVector()> const& zeroRewardStatesGetter, std::function<storm::storage::BitVector()> const& zeroRewardChoicesGetter, ModelCheckerHint const& hint = ModelCheckerHint());
 
-                template<typename RewardModelType>
-                static ValueType computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<ValueType>>& scheduler);
-                template<typename RewardModelType>
-                static ValueType computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<ValueType>>& scheduler);
-                template<typename RewardModelType>
-                static ValueType computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec);
-
             };
             
         }

From 9d3de84122f5b23d6b7fa3d06b87218934a41f7a Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Fri, 31 Jul 2020 16:24:15 +0200
Subject: [PATCH 06/48] MaximalEndComponent: Added size() method.

---
 src/storm/storage/MaximalEndComponent.cpp | 4 ++++
 src/storm/storage/MaximalEndComponent.h   | 5 +++++
 2 files changed, 9 insertions(+)

diff --git a/src/storm/storage/MaximalEndComponent.cpp b/src/storm/storage/MaximalEndComponent.cpp
index d6e080583..68e8eda99 100644
--- a/src/storm/storage/MaximalEndComponent.cpp
+++ b/src/storm/storage/MaximalEndComponent.cpp
@@ -36,6 +36,10 @@ namespace storm {
             stateToChoicesMapping.emplace(state, std::move(choices));
         }
         
+        std::size_t MaximalEndComponent::size() const {
+            return stateToChoicesMapping.size();
+        }
+        
         MaximalEndComponent::set_type const& MaximalEndComponent::getChoicesForState(uint_fast64_t state) const {
             auto stateChoicePair = stateToChoicesMapping.find(state);
             
diff --git a/src/storm/storage/MaximalEndComponent.h b/src/storm/storage/MaximalEndComponent.h
index c46a4c0d6..b40da6455 100644
--- a/src/storm/storage/MaximalEndComponent.h
+++ b/src/storm/storage/MaximalEndComponent.h
@@ -68,6 +68,11 @@ namespace storm {
              */
             void addState(uint_fast64_t state, set_type&& choices);
             
+            /*!
+             * @return The number of states in this mec.
+             */
+            std::size_t size() const;
+            
             /*!
              * Retrieves the choices for the given state that are contained in this MEC under the assumption that the
              * state is in the MEC.

From 31dd1d8f499ea2d78eb7dfa48e8cfffe09d70975 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Fri, 31 Jul 2020 16:25:01 +0200
Subject: [PATCH 07/48] sparse/StandardRewardModel: Added a method that only
 yields the action-based rewards (excl. state rewards)

---
 .../models/sparse/StandardRewardModel.cpp     | 30 ++++++++-----------
 src/storm/models/sparse/StandardRewardModel.h |  9 ++++++
 2 files changed, 22 insertions(+), 17 deletions(-)

diff --git a/src/storm/models/sparse/StandardRewardModel.cpp b/src/storm/models/sparse/StandardRewardModel.cpp
index f73704b89..052a95af0 100644
--- a/src/storm/models/sparse/StandardRewardModel.cpp
+++ b/src/storm/models/sparse/StandardRewardModel.cpp
@@ -155,24 +155,20 @@ namespace storm {
             
             template<typename ValueType>
             template<typename MatrixValueType>
-            ValueType StandardRewardModel<ValueType>::getTotalStateActionReward(uint_fast64_t stateIndex, uint_fast64_t choiceIndex, storm::storage::SparseMatrix<MatrixValueType> const& transitionMatrix, MatrixValueType const& stateRewardWeight, MatrixValueType const& actionRewardWeight) const {
-                ValueType result = this->hasStateRewards() ? (this->hasStateActionRewards() ? (ValueType) (this->getStateReward(stateIndex) * stateRewardWeight + this->getStateActionReward(choiceIndex) * actionRewardWeight)
-                                                                                            : (ValueType) (this->getStateReward(stateIndex) * stateRewardWeight))
-                                                           : (this->hasStateActionRewards() ? (ValueType) (this->getStateActionReward(choiceIndex) * actionRewardWeight)
-                                                                                            : storm::utility::zero<ValueType>());
+            ValueType StandardRewardModel<ValueType>::getStateActionAndTransitionReward(uint_fast64_t choiceIndex, storm::storage::SparseMatrix<MatrixValueType> const& transitionMatrix) const {
+                ValueType result = this->hasStateActionRewards() ? this->getStateActionReward(choiceIndex) : storm::utility::zero<ValueType>();
                 if (this->hasTransitionRewards()) {
-                    auto rewMatrixEntryIt = this->getTransitionRewardMatrix().begin(choiceIndex);
-                    for (auto const& transitionEntry : transitionMatrix.getRow(choiceIndex)) {
-                        assert(rewMatrixEntryIt != this->getTransitionRewardMatrix().end(choiceIndex));
-                        if (transitionEntry.getColumn() < rewMatrixEntryIt->getColumn()) {
-                            continue;
-                        } else {
-                            // We assume that the transition reward matrix is a submatrix of the given transition matrix. Hence, the following must hold
-                            assert(transitionEntry.getColumn() == rewMatrixEntryIt->getColumn());
-                            result += actionRewardWeight * rewMatrixEntryIt->getValue() * storm::utility::convertNumber<ValueType>(transitionEntry.getValue());
-                            ++rewMatrixEntryIt;
-                        }
-                    }
+                    result += transitionMatrix.getPointwiseProductRowSum(getTransitionRewardMatrix(), choiceIndex);
+                }
+                return result;
+            }
+            
+            template<typename ValueType>
+            template<typename MatrixValueType>
+            ValueType StandardRewardModel<ValueType>::getTotalStateActionReward(uint_fast64_t stateIndex, uint_fast64_t choiceIndex, storm::storage::SparseMatrix<MatrixValueType> const& transitionMatrix, MatrixValueType const& stateRewardWeight, MatrixValueType const& actionRewardWeight) const {
+                ValueType result = actionRewardWeight * getStateActionAndTransitionReward(choiceIndex, transitionMatrix);
+                if (this->hasStateRewards()) {
+                    result += stateRewardWeight * this->getStateReward(stateIndex);
                 }
                 return result;
             }
diff --git a/src/storm/models/sparse/StandardRewardModel.h b/src/storm/models/sparse/StandardRewardModel.h
index e4b0c6636..e84faee19 100644
--- a/src/storm/models/sparse/StandardRewardModel.h
+++ b/src/storm/models/sparse/StandardRewardModel.h
@@ -167,6 +167,15 @@ namespace storm {
                  */
                 boost::optional<storm::storage::SparseMatrix<ValueType>> const& getOptionalTransitionRewardMatrix() const;
 
+                /*!
+                 * @param choiceIndex The index of the considered choice
+                 * @param transitionMatrix The matrix that is used to weight the values of the transition reward matrix.
+                 * @return the sum of the action reward and the weighted transition rewards for the given choice, excluding potential state rewards
+                 * @note returns zero if there is neither action nor transition reward.
+                 */
+                template<typename MatrixValueType>
+                ValueType getStateActionAndTransitionReward(uint_fast64_t choiceIndex, storm::storage::SparseMatrix<MatrixValueType> const& transitionMatrix) const;
+                
                 /*!
                  * Retrieves the total reward for the given state action pair (including (scaled) state rewards, action rewards and transition rewards
                  *

From fc66e01ed5dcf19d0eaf06b0af1ba3fd26c2bd52 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Fri, 31 Jul 2020 16:53:50 +0200
Subject: [PATCH 08/48] Nondeterministic Infinite horizon: Split value getters
 into StateValueGetter and ActionValueGetters. Made VI code more general, so
 that they may also be used for Markov Automata.

---
 ...eNondeterministicInfiniteHorizonHelper.cpp | 438 ++++++++++++------
 ...rseNondeterministicInfiniteHorizonHelper.h |  16 +-
 2 files changed, 313 insertions(+), 141 deletions(-)

diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
index 40e8d2dcd..0b8ba57cc 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
@@ -31,36 +31,56 @@ namespace storm {
             
             template <typename ValueType>
             std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLongRunAverageProbabilities(Environment const& env, storm::storage::BitVector const& psiStates) {
-                return computeLongRunAverageValues(env, [&psiStates] (uint64_t stateIndex, uint64_t) { return psiStates.get(stateIndex) ? storm::utility::one<ValueType>() : storm::utility::zero<ValueType>();});
+                return computeLongRunAverageValues(env,
+                            [&psiStates] (uint64_t stateIndex) { return psiStates.get(stateIndex) ? storm::utility::one<ValueType>() : storm::utility::zero<ValueType>(); },
+                            [] (uint64_t) { return storm::utility::zero<ValueType>(); }
+                    );
             }
             
             
             template <typename ValueType>
             std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLongRunAverageRewards(Environment const& env, storm::models::sparse::StandardRewardModel<ValueType> const& rewardModel) {
-                if (_markovianStates) {
-                    return computeLongRunAverageValues(env, [&] (uint64_t stateIndex, uint64_t globalChoiceIndex) {
-                        if (rewardModel.hasStateRewards() && _markovianStates->get(stateIndex)) {
-                            return rewardModel.getTotalStateActionReward(stateIndex, globalChoiceIndex, _transitionMatrix, (ValueType) (storm::utility::one<ValueType>() / (*_exitRates)[stateIndex]));
-                        } else {
-                            return rewardModel.getTotalStateActionReward(stateIndex, globalChoiceIndex, _transitionMatrix, storm::utility::zero<ValueType>());
-                        }
-                    });
+                std::function<ValueType(uint64_t stateIndex)> stateRewardsGetter;
+                if (rewardModel.hasStateRewards()) {
+                    stateRewardsGetter = [&rewardModel] (uint64_t stateIndex) { return rewardModel.getStateReward(stateIndex); };
+                } else {
+                    stateRewardsGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
+                }
+                std::function<ValueType(uint64_t globalChoiceIndex)> actionRewardsGetter;
+                if (rewardModel.hasStateActionRewards() || rewardModel.hasTransitionRewards()) {
+                    if (rewardModel.hasTransitionRewards()) {
+                        actionRewardsGetter = [&] (uint64_t globalChoiceIndex) { return rewardModel.getStateActionAndTransitionReward(globalChoiceIndex, this->_transitionMatrix); };
+                    } else {
+                        actionRewardsGetter = [&] (uint64_t globalChoiceIndex) { return rewardModel.getStateActionReward(globalChoiceIndex); };
+                    }
                 } else {
-                    return computeLongRunAverageValues(env, [&] (uint64_t stateIndex, uint64_t globalChoiceIndex) {
-                        return rewardModel.getTotalStateActionReward(stateIndex, globalChoiceIndex, _transitionMatrix);
-                    });
+                    stateRewardsGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
                 }
+                
+                return computeLongRunAverageValues(env, stateRewardsGetter, actionRewardsGetter);
             }
             
             template <typename ValueType>
-            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const& combinedStateActionRewards) {
-                return computeLongRunAverageValues(env, [&combinedStateActionRewards] (uint64_t, uint64_t globalChoiceIndex) {
-                    return combinedStateActionRewards[globalChoiceIndex];
-                });
+            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const* stateValues, std::vector<ValueType> const* actionValues) {
+                std::function<ValueType(uint64_t stateIndex)> stateValuesGetter;
+                if (stateValues) {
+                    stateValuesGetter = [&stateValues] (uint64_t stateIndex) { return (*stateValues)[stateIndex]; };
+                } else {
+                    stateValuesGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
+                }
+                std::function<ValueType(uint64_t actionIndex)> actionValuesGetter;
+                if (actionValues) {
+                    actionValuesGetter = [&actionValues] (uint64_t globalChoiceIndex) { return (*actionValues)[globalChoiceIndex]; };
+                } else {
+                    actionValuesGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
+                }
+                
+                return computeLongRunAverageValues(env, stateValuesGetter, actionValuesGetter);
+
             }
             
             template <typename ValueType>
-            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLongRunAverageValues(Environment const& env, std::function<ValueType(uint64_t stateIndex, uint64_t globalChoiceIndex)> const& combinedStateActionRewardsGetter) {
+            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLongRunAverageValues(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter) {
                 
                  // Prepare an environment for the underlying solvers
                 auto underlyingSolverEnvironment = env;
@@ -86,7 +106,7 @@ namespace storm {
                 std::vector<ValueType> mecLraValues;
                 mecLraValues.reserve(mecDecomposition.size());
                 for (auto const& mec : mecDecomposition) {
-                    mecLraValues.push_back(computeLraForMec(underlyingSolverEnvironment, combinedStateActionRewardsGetter, mec));
+                    mecLraValues.push_back(computeLraForMec(underlyingSolverEnvironment, stateRewardsGetter, actionRewardsGetter, mec));
                 }
                 
                 // Solve the resulting SSP where end components are collapsed into single auxiliary states
@@ -129,36 +149,45 @@ namespace storm {
             }
             
             template <typename ValueType>
-            ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMec(Environment const& env, std::function<ValueType(uint64_t stateIndex, uint64_t globalChoiceIndex)> const& combinedStateActionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
+            bool SparseNondeterministicInfiniteHorizonHelper<ValueType>::isContinuousTime() const {
+                STORM_LOG_ASSERT((_markovianStates == nullptr) == (_exitRates == nullptr), "Inconsistent information given: Have Markovian states but no exit rates (or vice versa)." );
+                return _markovianStates != nullptr;
+            }
+    
+            template <typename ValueType>
+            ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMec(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
                 
-                // FIXME: MA
                 // If the mec only consists of a single state, we compute the LRA value directly
-                if (++mec.begin() == mec.end()) {
+                if (mec.size() == 1) {
                     uint64_t state = mec.begin()->first;
                     auto choiceIt = mec.begin()->second.begin();
-                    ValueType result = combinedStateActionRewardsGetter(state, *choiceIt);
-                    uint64_t bestChoice = *choiceIt;
-                    for (++choiceIt; choiceIt != mec.begin()->second.end(); ++choiceIt) {
-                        ValueType choiceValue = combinedStateActionRewardsGetter(state, *choiceIt);
-                        if (this->minimize()) {
-                            if (result > choiceValue) {
-                                result = std::move(choiceValue);
+                    if (isContinuousTime()) {
+                        // Singleton MECs have to consist of a Markovian state because of the non-Zenoness assumption. Then, there is just one possible choice.
+                        STORM_LOG_THROW(_markovianStates->get(state), storm::exceptions::InvalidOperationException, "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported.");
+                        STORM_LOG_ASSERT(mec.begin()->second.size() == 1, "Markovian state has Nondeterministic behavior.");
+                        if (isProduceSchedulerSet()) {
+                            _producedOptimalChoices.get()[state] = 0;
+                        }
+                        return stateRewardsGetter(state) + (*_exitRates)[state] * actionRewardsGetter(*choiceIt);
+                    } else {
+                        // Find the choice with the highest/lowest reward
+                        ValueType bestValue = actionRewardsGetter(*choiceIt);
+                        uint64_t bestChoice = *choiceIt;
+                        for (++choiceIt; choiceIt != mec.begin()->second.end(); ++choiceIt) {
+                            ValueType currentValue = actionRewardsGetter(*choiceIt);
+                            if ((this->minimize() &&  currentValue < bestValue) || (this->maximize() && currentValue > bestValue)) {
+                                bestValue = std::move(currentValue);
                                 bestChoice = *choiceIt;
                             }
-                        } else {
-                             if (result < choiceValue) {
-                                    result = std::move(choiceValue);
-                                    bestChoice = *choiceIt;
-                             }
                         }
+                        if (isProduceSchedulerSet()) {
+                            _producedOptimalChoices.get()[state] = bestChoice - _transitionMatrix.getRowGroupIndices()[state];
+                        }
+                        return bestValue + stateRewardsGetter(state);
                     }
-                    if (isProduceSchedulerSet()) {
-                        _producedOptimalChoices.get()[state] = bestChoice - _transitionMatrix.getRowGroupIndices()[state];
-                    }
-                    return result;
                 }
                 
-                // Solve MEC with the method specified in the settings
+                // Solve nontrivial MEC with the method specified in the settings
                 storm::solver::LraMethod method = env.solver().lra().getNondetLraMethod();
                 if ((storm::NumberTraits<ValueType>::IsExact || env.solver().isForceExact()) && env.solver().lra().isNondetLraMethodSetFromDefault() && method != storm::solver::LraMethod::LinearProgramming) {
                     STORM_LOG_INFO("Selecting 'LP' as the solution technique for long-run properties to guarantee exact results. If you want to override this, please explicitly specify a different LRA method.");
@@ -169,144 +198,281 @@ namespace storm {
                 }
                 STORM_LOG_ERROR_COND(!isProduceSchedulerSet() || method == storm::solver::LraMethod::ValueIteration, "Scheduler generation not supported for the chosen LRA method. Try value-iteration.");
                 if (method == storm::solver::LraMethod::LinearProgramming) {
-                    return computeLraForMecLp(env, combinedStateActionRewardsGetter, mec);
+                    return computeLraForMecLp(env, stateRewardsGetter, actionRewardsGetter, mec);
                 } else if (method == storm::solver::LraMethod::ValueIteration) {
-                    return computeLraForMecVi(env, combinedStateActionRewardsGetter, mec);
+                    return computeLraForMecVi(env, stateRewardsGetter, actionRewardsGetter, mec);
                 } else {
                     STORM_LOG_THROW(false, storm::exceptions::InvalidSettingsException, "Unsupported technique.");
                 }
             }
-            
+    
+            /*!
+             * Abstract helper class that performs a single iteration of the value iteration method
+             */
             template <typename ValueType>
-            ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMecVi(Environment const& env, std::function<ValueType(uint64_t stateIndex, uint64_t globalChoiceIndex)> const& combinedStateActionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
-                // Initialize data about the mec
-                storm::storage::BitVector mecStates(_transitionMatrix.getRowGroupCount(), false);
-                storm::storage::BitVector mecChoices(_transitionMatrix.getRowCount(), false);
-                for (auto const& stateChoicesPair : mec) {
-                    mecStates.set(stateChoicesPair.first);
-                    for (auto const& choice : stateChoicesPair.second) {
-                        mecChoices.set(choice);
+            class LraViHelper {
+            public:
+                LraViHelper(storm::storage::MaximalEndComponent const& mec, storm::storage::SparseMatrix<ValueType> const& transitionMatrix) : _mec(mec), _transitionMatrix(transitionMatrix) {
+                    // Intentionally left empty
+                }
+                virtual ~LraViHelper() = default;
+
+                /*!
+                 * performs a single iteration step.
+                 * If a choices vector is given, the optimal choices will be inserted at the appropriate states.
+                 * Note that these choices will be inserted w.r.t. the original model states/choices, i.e. the size of the vector should match the state-count of the input model
+                 * @return the current estimate of the LRA value
+                 */
+                virtual void iterate(Environment const& env, storm::solver::OptimizationDirection const& dir, std::vector<uint64_t>* choices = nullptr) = 0;
+                
+                struct ConvergenceCheckResult {
+                    bool isPrecisionAchieved;
+                    ValueType currentValue;
+                };
+                
+                /*!
+                 * Checks whether the curently computed value achieves the desired precision
+                 */
+                virtual ConvergenceCheckResult checkConvergence(bool relative, ValueType precision) = 0;
+                
+                /*!
+                 * Must be called between two calls of iterate.
+                 */
+                virtual void prepareNextIteration() = 0;
+                
+            protected:
+                
+                /*!
+                 *
+                 * @param xPrevious the 'old' values
+                 * @param xCurrent the 'new' values
+                 * @param threshold the threshold
+                 * @param relative whether the relative difference should be considered
+                 * @return The first component is true if the (relative) difference between the maximal and the minimal entry-wise change of the two value vectors is below or equal to the provided threshold.
+                 *          In this case, the second component is the average of the maximal and the minimal change.
+                 *          If the threshold is exceeded, the computation is aborted early and the second component is only an approximation of the averages.
+                 */
+                std::pair<bool, ValueType> checkMinMaxDiffBelowThreshold(std::vector<ValueType> const& xPrevious, std::vector<ValueType> const& xCurrent, ValueType const& threshold, bool relative) const {
+                    STORM_LOG_ASSERT(xPrevious.size() == xCurrent.size(), "Unexpected Dimension Mismatch");
+                    STORM_LOG_ASSERT(threshold > storm::utility::zero<ValueType>(), "Did not expect a non-positive threshold.");
+                    auto x1It = xPrevious.begin();
+                    auto x1Ite = xPrevious.end();
+                    auto x2It = xCurrent.begin();
+                    ValueType maxDiff = (*x2It - *x1It);
+                    ValueType minDiff = maxDiff;
+                    bool result = true;
+                    // The difference between maxDiff and minDiff is zero at this point. Thus, it doesn't make sense to check the threshold now.
+                    for (++x1It, ++x2It; x1It != x1Ite; ++x1It, ++x2It) {
+                        ValueType diff = (*x2It - *x1It);
+                        // Potentially update maxDiff or minDiff
+                        bool skipCheck = false;
+                        if (maxDiff < diff) {
+                            maxDiff = diff;
+                        } else if (minDiff > diff) {
+                            minDiff = diff;
+                        } else {
+                            skipCheck = true;
+                        }
+                        // Check convergence
+                        if (!skipCheck && (maxDiff - minDiff) > (relative ? (threshold * minDiff) : threshold)) {
+                            result = false;
+                            break;
+                        }
                     }
+                    ValueType avgDiff = (maxDiff + minDiff) / (storm::utility::convertNumber<ValueType>(2.0));
+                    return {result, avgDiff};
                 }
                 
-                boost::container::flat_map<uint64_t, uint64_t> toSubModelStateMapping;
-                uint64_t currState = 0;
-                toSubModelStateMapping.reserve(mecStates.getNumberOfSetBits());
-                for (auto const& mecState : mecStates) {
-                    toSubModelStateMapping.insert(std::pair<uint64_t, uint64_t>(mecState, currState));
-                    ++currState;
-                }
-                
-                // Get a transition matrix that only considers the states and choices within the MEC
-                storm::storage::SparseMatrixBuilder<ValueType> mecTransitionBuilder(mecChoices.getNumberOfSetBits(), mecStates.getNumberOfSetBits(), 0, true, true, mecStates.getNumberOfSetBits());
-                std::vector<ValueType> choiceValues;
-                choiceValues.reserve(mecChoices.getNumberOfSetBits());
-                uint64_t currRow = 0;
-                ValueType selfLoopProb = storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor());
-                ValueType scalingFactor = storm::utility::one<ValueType>() - selfLoopProb;
-                for (auto const& mecState : mecStates) {
-                    mecTransitionBuilder.newRowGroup(currRow);
-                    uint64_t groupStart = _transitionMatrix.getRowGroupIndices()[mecState];
-                    uint64_t groupEnd = _transitionMatrix.getRowGroupIndices()[mecState + 1];
-                    for (uint64_t choice = mecChoices.getNextSetIndex(groupStart); choice < groupEnd; choice = mecChoices.getNextSetIndex(choice + 1)) {
-                        bool insertedDiagElement = false;
-                        for (auto const& entry : _transitionMatrix.getRow(choice)) {
-                            uint64_t column = toSubModelStateMapping[entry.getColumn()];
-                            if (!insertedDiagElement && entry.getColumn() > mecState) {
-                                mecTransitionBuilder.addNextValue(currRow, toSubModelStateMapping[mecState], selfLoopProb);
-                                insertedDiagElement = true;
+                storm::storage::MaximalEndComponent const& _mec;
+                storm::storage::SparseMatrix<ValueType> const& _transitionMatrix;
+            };
+    
+            /*!
+             * Abstract helper class that performs a single iteration of the value iteration method
+             * @see Ashok et al.: Value Iteration for Long-Run Average Reward in Markov Decision Processes (CAV'17), https://doi.org/10.1007/978-3-319-63387-9_10
+             */
+            template <typename ValueType>
+            class MdpLraViHelper : public LraViHelper<ValueType> {
+            public:
+                
+                MdpLraViHelper(storm::storage::MaximalEndComponent const& mec, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, ValueType const& aperiodicFactor) : LraViHelper<ValueType>(mec, transitionMatrix), _x1(mec.size(), storm::utility::zero<ValueType>()), _x2(_x1), _x1IsCurrent(true) {
+                    
+                    // We add a selfloop to each state (which is necessary for convergence)
+                    // Very roughly, this selfloop avoids that the values can flip around like this: [1, 0] -> [0, 1] -> [1, 0] -> ...
+                    ValueType selfLoopProb = aperiodicFactor;
+                    // Introducing the selfloop also requires the rewards to be scaled by the following factor.
+                    _scalingFactor = storm::utility::one<ValueType>() - selfLoopProb;
+                    
+                    uint64_t numMecStates = this->_mec.size();
+                    boost::container::flat_map<uint64_t, uint64_t> toSubModelStateMapping;
+                    toSubModelStateMapping.reserve(this->_mec.size());
+                    uint64_t currState = 0;
+                    uint64_t numMecChoices = 0;
+                    for (auto const& stateChoices : this->_mec) {
+                        toSubModelStateMapping.insert(std::pair<uint64_t, uint64_t>(stateChoices.first, currState));
+                        ++currState;
+                        numMecChoices += stateChoices.second.size();
+                    }
+                    assert(currState == numMecStates);
+                    
+                    // Get a transition matrix that only considers the states and choices within the MEC
+                    storm::storage::SparseMatrixBuilder<ValueType> mecTransitionBuilder(numMecChoices, numMecStates, 0, true, true, numMecStates);
+                    _choiceValues.reserve(numMecChoices);
+                    uint64_t currRow = 0;
+                    for (auto const& stateChoices : this->_mec) {
+                        auto const& mecState = stateChoices.first;
+                        auto const& mecChoices = stateChoices.second;
+                        mecTransitionBuilder.newRowGroup(currRow);
+                        for (auto const& choice : mecChoices) {
+                            bool insertedDiagElement = false;
+                            for (auto const& entry : this->_transitionMatrix.getRow(choice)) {
+                                uint64_t column = toSubModelStateMapping[entry.getColumn()];
+                                if (!insertedDiagElement && entry.getColumn() > mecState) {
+                                    mecTransitionBuilder.addNextValue(currRow, toSubModelStateMapping[mecState], selfLoopProb);
+                                    insertedDiagElement = true;
+                                }
+                                if (!insertedDiagElement && entry.getColumn() == mecState) {
+                                    mecTransitionBuilder.addNextValue(currRow, column, selfLoopProb + _scalingFactor * entry.getValue());
+                                    insertedDiagElement = true;
+                                } else {
+                                    mecTransitionBuilder.addNextValue(currRow, column,  _scalingFactor * entry.getValue());
+                                }
                             }
-                            if (!insertedDiagElement && entry.getColumn() == mecState) {
-                                mecTransitionBuilder.addNextValue(currRow, column, selfLoopProb + scalingFactor * entry.getValue());
-                                insertedDiagElement = true;
-                            } else {
-                                mecTransitionBuilder.addNextValue(currRow, column,  scalingFactor * entry.getValue());
+                            if (!insertedDiagElement) {
+                                mecTransitionBuilder.addNextValue(currRow, toSubModelStateMapping[mecState], selfLoopProb);
                             }
+                            
+                            // Compute the rewards obtained for this choice
+                            _choiceValues.push_back(_scalingFactor * (stateRewardsGetter(mecState) + actionRewardsGetter(choice)));
+                            
+                            ++currRow;
                         }
-                        if (!insertedDiagElement) {
-                            mecTransitionBuilder.addNextValue(currRow, toSubModelStateMapping[mecState], selfLoopProb);
+                    }
+                    
+                    _mecTransitions = mecTransitionBuilder.build();
+                    
+                    STORM_LOG_ASSERT(_mecTransitions.isProbabilistic(), "The MEC-Matrix is not probabilistic.");
+                    STORM_LOG_ASSERT(_mecTransitions.getRowGroupCount() == _x1.size(), "Unexpected size mismatch for created matrix.");
+                    STORM_LOG_ASSERT(_x1.size() == _x2.size(), "Unexpected size mismatch for created matrix.");
+                }
+                
+                virtual void iterate(Environment const& env, storm::solver::OptimizationDirection const& dir, std::vector<uint64_t>* choices = nullptr) override {
+                    // Initialize a multipler if it does not exist, yet
+                    if (!_multiplier) {
+                        _multiplier = storm::solver::MultiplierFactory<ValueType>().create(env, _mecTransitions);
+                    }
+                    
+                    if (choices == nullptr) {
+                        // Perform a simple matrix-vector multiplication
+                        _multiplier->multiplyAndReduce(env, dir, xCurrent(), &_choiceValues, xPrevious());
+                    } else {
+                        // Perform a simple matrix-vector multiplication but also keep track of the choices within the _mecTransitions
+                        std::vector<uint64_t> mecChoices(_mecTransitions.getRowGroupCount());
+                        _multiplier->multiplyAndReduce(env, dir, xCurrent(), &_choiceValues, xPrevious(), &mecChoices);
+                        // Transform the local choices (within this mec) to global indices
+                        uint64_t mecState = 0;
+                        for (auto const& stateChoices : this->_mec) {
+                            uint64_t mecChoice = mecChoices[mecState];
+                            STORM_LOG_ASSERT(mecChoice < stateChoices.second.size(), "The selected choice does not seem to exist.");
+                            uint64_t globalChoiceIndex = *(stateChoices.second.begin() + mecChoice);
+                            (*choices)[stateChoices.first] = globalChoiceIndex - this->_transitionMatrix.getRowGroupIndices()[stateChoices.first];
+                            ++mecState;
                         }
-                        
-                        // Compute the rewards obtained for this choice
-                        choiceValues.push_back(scalingFactor * combinedStateActionRewardsGetter(mecState, choice));
-                        
-                        ++currRow;
                     }
+                    
+                    // Swap current and previous x vectors
+                    _x1IsCurrent = !_x1IsCurrent;
+                    
                 }
-                auto mecTransitions = mecTransitionBuilder.build();
-                STORM_LOG_ASSERT(mecTransitions.isProbabilistic(), "The MEC-Matrix is not probabilistic.");
                 
-                // start the iterations
-                ValueType precision = storm::utility::convertNumber<ValueType>(env.solver().lra().getPrecision()) / scalingFactor;
-                bool relative = env.solver().lra().getRelativeTerminationCriterion();
-                std::vector<ValueType> x(mecTransitions.getRowGroupCount(), storm::utility::zero<ValueType>());
-                std::vector<ValueType> xPrime = x;
-                auto dir = this->getOptimizationDirection();
+                virtual typename LraViHelper<ValueType>::ConvergenceCheckResult checkConvergence(bool relative, ValueType precision) override {
+                    typename LraViHelper<ValueType>::ConvergenceCheckResult res;
+                    std::tie(res.isPrecisionAchieved, res.currentValue) = this->checkMinMaxDiffBelowThreshold(xPrevious(), xCurrent(), precision, relative);
+                    res.currentValue /= _scalingFactor; // "Undo" the scaling of the rewards
+                    return res;
+                }
                 
-                auto multiplier = storm::solver::MultiplierFactory<ValueType>().create(env, mecTransitions);
-                ValueType maxDiff, minDiff;
+                virtual void prepareNextIteration() override {
+                    // To avoid large (and numerically unstable) x-values, we substract a reference value.
+                    ValueType referenceValue = xCurrent().front();
+                    storm::utility::vector::applyPointwise<ValueType, ValueType>(xCurrent(), xCurrent(), [&referenceValue] (ValueType const& x_i) -> ValueType { return x_i - referenceValue; });
+                }
                 
-                uint64_t iter = 0;
+            private:
+                
+                std::vector<ValueType>& xCurrent() {
+                    return _x1IsCurrent ? _x1 : _x2;
+                }
+                
+                std::vector<ValueType>& xPrevious() {
+                    return _x1IsCurrent ? _x2 : _x1;
+                }
+                
+                storm::storage::SparseMatrix<ValueType> _mecTransitions;
+                std::vector<ValueType> _x1, _x2, _choiceValues;
+                bool _x1IsCurrent;
+                std::unique_ptr<storm::solver::Multiplier<ValueType>> _multiplier;
+                ValueType _scalingFactor;
+            };
+            
+            template <typename ValueType>
+            ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMecVi(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
+
+                // Collect some parameters of the computation.
+                ValueType aperiodicFactor = storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor());
+                ValueType precision = storm::utility::convertNumber<ValueType>(env.solver().lra().getPrecision()) / aperiodicFactor;
+                bool relative = env.solver().lra().getRelativeTerminationCriterion();
                 boost::optional<uint64_t> maxIter;
                 if (env.solver().lra().isMaximalIterationCountSet()) {
                     maxIter = env.solver().lra().getMaximalIterationCount();
                 }
+                auto dir = this->getOptimizationDirection();
+                
+                // Create an object for the iterations
+                std::shared_ptr<LraViHelper<ValueType>> iterationHelper;
+                if (isContinuousTime()) {
+                    // TODO
+                } else {
+                    iterationHelper = std::make_shared<MdpLraViHelper<ValueType>>(mec, _transitionMatrix, stateRewardsGetter, actionRewardsGetter, aperiodicFactor);
+                }
+                
+                // start the iterations
+                ValueType result = storm::utility::zero<ValueType>();
+                uint64_t iter = 0;
                 while (!maxIter.is_initialized() || iter < maxIter.get()) {
                     ++iter;
-                    // Compute the obtained values for the next step
-                    multiplier->multiplyAndReduce(env, dir, x, &choiceValues, x);
-                    
-                    // update xPrime and check for convergence
-                    // to avoid large (and numerically unstable) x-values, we substract a reference value.
-                    auto xIt = x.begin();
-                    auto xPrimeIt = xPrime.begin();
-                    ValueType refVal = *xIt;
-                    maxDiff = *xIt - *xPrimeIt;
-                    minDiff = maxDiff;
-                    *xIt -= refVal;
-                    *xPrimeIt = *xIt;
-                    for (++xIt, ++xPrimeIt; xIt != x.end(); ++xIt, ++xPrimeIt) {
-                        ValueType diff = *xIt - *xPrimeIt;
-                        maxDiff = std::max(maxDiff, diff);
-                        minDiff = std::min(minDiff, diff);
-                        *xIt -= refVal;
-                        *xPrimeIt = *xIt;
-                    }
-
-                    if ((maxDiff - minDiff) <= (relative ? (precision * minDiff) : precision)) {
+                    iterationHelper->iterate(env, dir);
+                    // Check if we are done
+                    auto convergenceCheckResult = iterationHelper->checkConvergence(relative, precision);
+                    result = convergenceCheckResult.currentValue;
+                    if (convergenceCheckResult.isPrecisionAchieved) {
                         break;
                     }
                     if (storm::utility::resources::isTerminate()) {
                         break;
                     }
+                    
+                    iterationHelper->prepareNextIteration();
+                    
                 }
                 if (maxIter.is_initialized() && iter == maxIter.get()) {
                     STORM_LOG_WARN("LRA computation did not converge within " << iter << " iterations.");
+                } else if (storm::utility::resources::isTerminate()) {
+                    STORM_LOG_WARN("LRA computation aborted after " << iter << " iterations.");
                 } else {
                     STORM_LOG_TRACE("LRA computation converged after " << iter << " iterations.");
                 }
                 
                 if (isProduceSchedulerSet()) {
-                    std::vector<uint_fast64_t> localMecChoices(mecTransitions.getRowGroupCount(), 0);
-                    multiplier->multiplyAndReduce(env, dir, x, &choiceValues, x, &localMecChoices);
-                    auto localMecChoiceIt = localMecChoices.begin();
-                    for (auto const& mecState : mecStates) {
-                        // Get the choice index of the selected mec choice with respect to the global transition matrix.
-                        uint_fast64_t globalChoice = mecChoices.getNextSetIndex(_transitionMatrix.getRowGroupIndices()[mecState]);
-                        for (uint_fast64_t i = 0; i < *localMecChoiceIt; ++i) {
-                            globalChoice = mecChoices.getNextSetIndex(globalChoice + 1);
-                        }
-                        STORM_LOG_ASSERT(globalChoice < _transitionMatrix.getRowGroupIndices()[mecState + 1], "Invalid global choice for mec state.");
-                        _producedOptimalChoices.get()[mecState] = globalChoice - _transitionMatrix.getRowGroupIndices()[mecState];
-                        ++localMecChoiceIt;
-                    }
+                    // We will be doing one more iteration step and track scheduler choices this time.
+                    iterationHelper->prepareNextIteration();
+                    iterationHelper->iterate(env, dir, &_producedOptimalChoices.get());
                 }
-                return (maxDiff + minDiff) / (storm::utility::convertNumber<ValueType>(2.0) * scalingFactor);
-
+                return result;
             }
             
             template <typename ValueType>
-            ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMecLp(Environment const& env, std::function<ValueType(uint64_t stateIndex, uint64_t globalChoiceIndex)> const& combinedStateActionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
+            ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMecLp(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
                 std::shared_ptr<storm::solver::LpSolver<ValueType>> solver = storm::utility::solver::getLpSolver<ValueType>("LRA for MEC");
                 solver->setOptimizationDirection(invert(this->getOptimizationDirection()));
                 
@@ -330,7 +496,7 @@ namespace storm {
                         for (auto element : _transitionMatrix.getRow(choice)) {
                             constraint = constraint + stateToVariableMap.at(element.getColumn()) * solver->getConstant(element.getValue());
                         }
-                        constraint = solver->getConstant(combinedStateActionRewardsGetter(state, choice)) + constraint;
+                        constraint = solver->getConstant(stateRewardsGetter(state) + actionRewardsGetter(choice)) + constraint;
                         
                         if (this->minimize()) {
                             constraint = stateToVariableMap.at(state) <= constraint;
diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h
index 7ca921aee..b2d30fc4d 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h
@@ -44,13 +44,13 @@ namespace storm {
                  * Computes the long run average value given the provided action-based rewards
                  * @return a value for each state
                  */
-                std::vector<ValueType> computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const& combinedStateActionRewards);
+                std::vector<ValueType> computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const* stateValues = nullptr, std::vector<ValueType> const* actionValues = nullptr);
                 
                 /*!
                  * Computes the long run average value given the provided state-action-based rewards
                  * @return a value for each state
                  */
-                std::vector<ValueType> computeLongRunAverageValues(Environment const& env, std::function<ValueType(uint64_t stateIndex, uint64_t globalChoiceIndex)> const& combinedStateActionRewardsGetter);
+                std::vector<ValueType> computeLongRunAverageValues(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateValuesGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionValuesGetter);
                 
                 /*!
                  * Sets whether an optimal scheduler shall be constructed during the computation
@@ -81,21 +81,27 @@ namespace storm {
                 storm::storage::Scheduler<ValueType> extractScheduler() const;
 
             protected:
+                
+                /*!
+                 * @return true iff this is a computation on a continuous time model (i.e. MA)
+                 */
+                bool isContinuousTime() const;
+                
                 /*!
                  * @pre if scheduler production is enabled, the _producedOptimalChoices vector should be initialized and sufficiently large
                  * @return the (unique) optimal LRA value for the given mec.
                  * @post _producedOptimalChoices contains choices for the states of the given MEC which yield the returned LRA value.
                  */
-                ValueType computeLraForMec(Environment const& env, std::function<ValueType(uint64_t stateIndex, uint64_t globalChoiceIndex)> const& combinedStateActionRewardsGetter, storm::storage::MaximalEndComponent const& mec);
+                ValueType computeLraForMec(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateValuesGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec);
                 
                 /*!
                  * As computeLraForMec but uses value iteration as a solution method (independent of what is set in env)
                  */
-                ValueType computeLraForMecVi(Environment const& env, std::function<ValueType(uint64_t stateIndex, uint64_t globalChoiceIndex)> const& combinedStateActionRewardsGetter, storm::storage::MaximalEndComponent const& mec);
+                ValueType computeLraForMecVi(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateValuesGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec);
                 /*!
                  * As computeLraForMec but uses linear programming as a solution method (independent of what is set in env)
                  */
-                ValueType computeLraForMecLp(Environment const& env, std::function<ValueType(uint64_t stateIndex, uint64_t globalChoiceIndex)> const& combinedStateActionRewardsGetter, storm::storage::MaximalEndComponent const& mec);
+                ValueType computeLraForMecLp(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateValuesGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec);
                 
                 /*!
                  * @return Lra values for each state

From 092873e99ab11436734d543d27d0b0041e9ea1c7 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Mon, 3 Aug 2020 14:53:25 +0200
Subject: [PATCH 09/48] LRA VI: Added helper for Markov Automata.

---
 ...eNondeterministicInfiniteHorizonHelper.cpp | 300 +++++++++++++++++-
 1 file changed, 287 insertions(+), 13 deletions(-)

diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
index 0b8ba57cc..979337f5f 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
@@ -12,7 +12,6 @@
 #include "storm/environment/solver/LongRunAverageSolverEnvironment.h"
 #include "storm/environment/solver/MinMaxSolverEnvironment.h"
 
-#include "storm/exceptions/NotImplementedException.h"
 #include "storm/exceptions/UnmetRequirementException.h"
 
 namespace storm {
@@ -25,7 +24,7 @@ namespace storm {
             }
             
             template <typename ValueType>
-            SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates) : _transitionMatrix(transitionMatrix), _backwardTransitions(backwardTransitions), _markovianStates(&markovianStates), _exitRates(&exitRates) {
+            SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates) : _transitionMatrix(transitionMatrix), _backwardTransitions(backwardTransitions), _markovianStates(&markovianStates), _exitRates(&exitRates), _produceScheduler(false) {
                 // Intentionally left empty.
             }
             
@@ -37,7 +36,6 @@ namespace storm {
                     );
             }
             
-            
             template <typename ValueType>
             std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLongRunAverageRewards(Environment const& env, storm::models::sparse::StandardRewardModel<ValueType> const& rewardModel) {
                 std::function<ValueType(uint64_t stateIndex)> stateRewardsGetter;
@@ -238,7 +236,7 @@ namespace storm {
                 /*!
                  * Must be called between two calls of iterate.
                  */
-                virtual void prepareNextIteration() = 0;
+                virtual void prepareNextIteration(Environment const& env) = 0;
                 
             protected:
                 
@@ -288,7 +286,7 @@ namespace storm {
             };
     
             /*!
-             * Abstract helper class that performs a single iteration of the value iteration method
+             * Abstract helper class that performs a single iteration of the value iteration method for MDP
              * @see Ashok et al.: Value Iteration for Long-Run Average Reward in Markov Decision Processes (CAV'17), https://doi.org/10.1007/978-3-319-63387-9_10
              */
             template <typename ValueType>
@@ -305,11 +303,11 @@ namespace storm {
                     
                     uint64_t numMecStates = this->_mec.size();
                     boost::container::flat_map<uint64_t, uint64_t> toSubModelStateMapping;
-                    toSubModelStateMapping.reserve(this->_mec.size());
+                    toSubModelStateMapping.reserve(numMecStates);
                     uint64_t currState = 0;
                     uint64_t numMecChoices = 0;
                     for (auto const& stateChoices : this->_mec) {
-                        toSubModelStateMapping.insert(std::pair<uint64_t, uint64_t>(stateChoices.first, currState));
+                        toSubModelStateMapping.emplace(stateChoices.first, currState);
                         ++currState;
                         numMecChoices += stateChoices.second.size();
                     }
@@ -392,7 +390,7 @@ namespace storm {
                     return res;
                 }
                 
-                virtual void prepareNextIteration() override {
+                virtual void prepareNextIteration(Environment const&) override {
                     // To avoid large (and numerically unstable) x-values, we substract a reference value.
                     ValueType referenceValue = xCurrent().front();
                     storm::utility::vector::applyPointwise<ValueType, ValueType>(xCurrent(), xCurrent(), [&referenceValue] (ValueType const& x_i) -> ValueType { return x_i - referenceValue; });
@@ -415,6 +413,282 @@ namespace storm {
                 ValueType _scalingFactor;
             };
             
+            /*!
+             * Abstract helper class that performs a single iteration of the value iteration method for MA
+             * @see Butkova, Wimmer, Hermanns: Long-Run Rewards for Markov Automata (TACAS'17), https://doi.org/10.1007/978-3-662-54580-5_11
+             */
+            template <typename ValueType>
+            class MaLraViHelper : public LraViHelper<ValueType> {
+            public:
+                
+                MaLraViHelper(storm::storage::MaximalEndComponent const& mec, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, ValueType const& aperiodicFactor) : LraViHelper<ValueType>(mec, transitionMatrix), _markovianStates(markovianStates), _Msx1IsCurrent(false) {
+                    
+                    // Run through the Mec and collect some data:
+                    // We consider two submodels, one consisting of the Markovian MEC states and one consisting of the probabilistic MEC states.
+                    // For this, we create a state index map that point from state indices of the input model to indices of the corresponding submodel of that state.
+                    boost::container::flat_map<uint64_t, uint64_t> toSubModelStateMapping;
+                    // We also obtain state and choices counts of the two submodels
+                    uint64_t numPsSubModelStates(0), numPsSubModelChoices(0);
+                    uint64_t numMsSubModelStates(0); // The number of choices coincide
+                    // We will need to uniformize the Markovian MEC states by introducing a selfloop.
+                    // For this, we need to find a uniformization rate which will be a little higher (given by aperiodicFactor) than the maximum rate occurring in the MEC.
+                    _uniformizationRate = storm::utility::zero<ValueType>();
+                    // Now run over the MEC and collect the required data.
+                    for (auto const& stateChoices : this->_mec) {
+                        uint64_t const& mecState = stateChoices.first;
+                        if (_markovianStates.get(mecState)) {
+                            toSubModelStateMapping.emplace(mecState, numMsSubModelStates);
+                            ++numMsSubModelStates;
+                            STORM_LOG_ASSERT(stateChoices.second.size() == 1, "Markovian state has multiple MEC choices.");
+                            _uniformizationRate = std::max(_uniformizationRate, exitRates[mecState]);
+                        } else {
+                            toSubModelStateMapping.emplace(mecState, numPsSubModelStates);
+                            ++numPsSubModelStates;
+                            numPsSubModelChoices += stateChoices.second.size();
+                        }
+                    }
+                    assert(numPsSubModelStates + numMsSubModelStates == mec.size());
+                    STORM_LOG_THROW(numMsSubModelStates > 0, storm::exceptions::InvalidOperationException, "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported.");
+
+                    _hasProbabilisticStates = numPsSubModelStates > 0;
+                    
+                    // We make sure that every Markovian state gets a selfloop to make the model aperiodic
+                    _uniformizationRate *= storm::utility::one<ValueType>() + aperiodicFactor;
+
+                    // Now build the Markovian and the Probabilistic submodels.
+                    // In addition, we also need the transitions between the two.
+                    storm::storage::SparseMatrixBuilder<ValueType> msTransitionsBuilder(numMsSubModelStates, numMsSubModelStates);
+                    _MsChoiceValues.reserve(numMsSubModelStates);
+                    storm::storage::SparseMatrixBuilder<ValueType> msToPsTransitionsBuilder, psTransitionsBuilder, psToMsTransitionsBuilder;
+                    if (_hasProbabilisticStates) {
+                        msToPsTransitionsBuilder = storm::storage::SparseMatrixBuilder<ValueType>(numMsSubModelStates, numPsSubModelStates);
+                        psTransitionsBuilder = storm::storage::SparseMatrixBuilder<ValueType>(numPsSubModelChoices, numPsSubModelStates, 0, true, true, numPsSubModelStates);
+                        psToMsTransitionsBuilder = storm::storage::SparseMatrixBuilder<ValueType>(numPsSubModelChoices, numMsSubModelStates, 0, true, true, numPsSubModelStates);
+                        _PsChoiceValues.reserve(numPsSubModelChoices);
+                    }
+                    uint64_t currMsRow = 0;
+                    uint64_t currPsRow = 0;
+                    for (auto const& stateChoices : this->_mec) {
+                        uint64_t const& mecState = stateChoices.first;
+                        auto const& mecChoices = stateChoices.second;
+                        if (!_hasProbabilisticStates || _markovianStates.get(mecState)) {
+                            // The currently processed state is Markovian.
+                            // We need to uniformize!
+                            ValueType uniformizationFactor = exitRates[mecState] / _uniformizationRate;
+                            ValueType selfLoopProb = storm::utility::one<ValueType>() - uniformizationFactor;
+                            STORM_LOG_ASSERT(mecChoices.size() == 1, "Unexpected number of choices at Markovian state.");
+                            for (auto const& mecChoice : mecChoices) {
+                                bool insertedDiagElement = false;
+                                for (auto const& entry : this->_transitionMatrix.getRow(mecChoice)) {
+                                    uint64_t subModelColumn = toSubModelStateMapping[entry.getColumn()];
+                                    if (!_hasProbabilisticStates || _markovianStates.get(entry.getColumn())) {
+                                        // We have a transition from a Markovian state to a Markovian state
+                                        STORM_LOG_ASSERT(subModelColumn < numMsSubModelStates, "Invalid state for Markovian submodel");
+                                        if (!insertedDiagElement && subModelColumn > currMsRow) {
+                                            // We passed the diagonal entry, so add it now before moving on to the next entry
+                                            msTransitionsBuilder.addNextValue(currMsRow, currMsRow, selfLoopProb);
+                                            insertedDiagElement = true;
+                                        }
+                                        if (!insertedDiagElement && subModelColumn == currMsRow) {
+                                            // The current entry is the diagonal (selfloop) entry
+                                            msTransitionsBuilder.addNextValue(currMsRow, subModelColumn, selfLoopProb + uniformizationFactor * entry.getValue());
+                                            insertedDiagElement = true;
+                                        } else {
+                                            // The diagonal element either has been inserted already or still lies in front
+                                            msTransitionsBuilder.addNextValue(currMsRow, subModelColumn,  uniformizationFactor * entry.getValue());
+                                        }
+                                    } else {
+                                        // We have a transition from a Markovian to a probabilistic state
+                                        STORM_LOG_ASSERT(subModelColumn < numPsSubModelStates, "Invalid state for probabilistic submodel");
+                                        msToPsTransitionsBuilder.addNextValue(currMsRow, subModelColumn, uniformizationFactor * entry.getValue());
+                                    }
+                                }
+                                // If the diagonal entry for the MS matrix still has not been set, we do that now
+                                if (!insertedDiagElement) {
+                                    msTransitionsBuilder.addNextValue(currMsRow, currMsRow, selfLoopProb);
+                                }
+                                // Compute the rewards obtained for this choice.
+                                _MsChoiceValues.push_back(stateRewardsGetter(mecState) / _uniformizationRate + actionRewardsGetter(mecChoice) * exitRates[mecState] / _uniformizationRate);
+                                ++currMsRow;
+                            }
+                        } else {
+                            // The currently processed state is probabilistic
+                            psTransitionsBuilder.newRowGroup(currPsRow);
+                            psToMsTransitionsBuilder.newRowGroup(currPsRow);
+                            for (auto const& mecChoice : mecChoices) {
+                                for (auto const& entry : this->_transitionMatrix.getRow(mecChoice)) {
+                                    uint64_t subModelColumn = toSubModelStateMapping[entry.getColumn()];
+                                    if (_markovianStates.get(entry.getColumn())) {
+                                        // We have a transition from a probabilistic state to a Markovian state
+                                        STORM_LOG_ASSERT(subModelColumn < numMsSubModelStates, "Invalid state for Markovian submodel");
+                                        psToMsTransitionsBuilder.addNextValue(currPsRow, subModelColumn, entry.getValue());
+                                    } else {
+                                        // We have a transition from a probabilistic to a probabilistic state
+                                        STORM_LOG_ASSERT(subModelColumn < numPsSubModelStates, "Invalid state for probabilistic submodel");
+                                        psTransitionsBuilder.addNextValue(currPsRow, subModelColumn, entry.getValue());
+                                    }
+                                }
+                                // Compute the rewards obtained for this choice.
+                                // State rewards do not count here since no time passes in probabilistic states.
+                                _PsChoiceValues.push_back(actionRewardsGetter(mecChoice));
+                                ++currPsRow;
+                            }
+                        }
+                    }
+                    _MsTransitions = msTransitionsBuilder.build();
+                    if (_hasProbabilisticStates) {
+                        _MsToPsTransitions = msToPsTransitionsBuilder.build();
+                        _PsTransitions = psTransitionsBuilder.build();
+                        _PsToMsTransitions = psToMsTransitionsBuilder.build();
+                    }
+                }
+                
+                void initializeIterations(Environment const& env, storm::solver::OptimizationDirection const& dir) {
+                    _Msx1.resize(_MsTransitions.getRowGroupCount(), storm::utility::zero<ValueType>());
+                    _Msx2 = _Msx1;
+                    _MsMultiplier = storm::solver::MultiplierFactory<ValueType>().create(env, _MsTransitions);
+                    if (_hasProbabilisticStates) {
+                        if (_PsTransitions.getNonzeroEntryCount() > 0) {
+                            // Set-up a solver for transitions within PS states
+                            _PsSolverEnv = env;
+                            if (env.solver().isForceSoundness()) {
+                                // To get correct results, the inner equation systems are solved exactly.
+                                // TODO investigate how an error would propagate
+                                _PsSolverEnv.solver().setForceExact(true);
+                            }
+                            storm::solver::GeneralMinMaxLinearEquationSolverFactory<ValueType> factory;
+                            bool isAcyclic = !storm::utility::graph::hasCycle(_PsTransitions);
+                            if (isAcyclic) {
+                                STORM_LOG_INFO("Probabilistic transitions are acyclic.");
+                                _PsSolverEnv.solver().minMax().setMethod(storm::solver::MinMaxMethod::Acyclic);
+                            }
+                            _PsSolver = factory.create(_PsSolverEnv, _PsTransitions);
+                            _PsSolver->setHasUniqueSolution(true); // Assume non-zeno MA
+                            _PsSolver->setHasNoEndComponents(true); // assume non-zeno MA
+                            _PsSolver->setCachingEnabled(true);
+                            _PsSolver->setRequirementsChecked(true);
+                            auto req = _PsSolver->getRequirements(_PsSolverEnv, dir);
+                            req.clearUniqueSolution();
+                            if (isAcyclic) {
+                                req.clearAcyclic();
+                            }
+                            // Computing a priori lower/upper bounds is not particularly easy, as there might be selfloops with high probabilities
+                            // Which accumulate a lot of reward. Moreover, the right-hand-side of the equation system changes dynamically.
+                            STORM_LOG_THROW(!req.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "The solver requirement " << req.getEnabledRequirementsAsString() << " has not been checked.");
+                        }
+                        
+                        // Set up multipliers for transitions connecting Markovian and probabilistic states
+                        _MsToPsMultiplier = storm::solver::MultiplierFactory<ValueType>().create(env, _MsToPsTransitions);
+                        _PsToMsMultiplier = storm::solver::MultiplierFactory<ValueType>().create(env, _PsToMsTransitions);
+
+                        // Set-up vectors for storing intermediate results for PS states.
+                        _Psx.resize(_PsTransitions.getRowGroupCount(), storm::utility::zero<ValueType>());
+                        _Psb = _PsChoiceValues;
+                    }
+                    
+                }
+                
+                void setInputModelChoices(std::vector<uint64_t>& choices, std::vector<uint64_t> const& localMecChoices, bool setChoiceZeroToMarkovianStates) {
+                    // Transform the local choices (within this mec) to choice indices for the input model
+                    uint64_t mecState = 0;
+                    for (auto const& stateChoices : this->_mec) {
+                        if (setChoiceZeroToMarkovianStates && _markovianStates.get(stateChoices.first)) {
+                            choices[stateChoices.first] = 0;
+                        } else {
+                            uint64_t mecChoice = localMecChoices[mecState];
+                            STORM_LOG_ASSERT(mecChoice < stateChoices.second.size(), "The selected choice does not seem to exist.");
+                            uint64_t globalChoiceIndex = *(stateChoices.second.begin() + mecChoice);
+                            choices[stateChoices.first] = globalChoiceIndex - this->_transitionMatrix.getRowGroupIndices()[stateChoices.first];
+                            ++mecState;
+                        }
+                    }
+                    STORM_LOG_ASSERT(mecState == localMecChoices.size(), "Did not traverse all mec states.");
+                }
+                
+                virtual void iterate(Environment const& env, storm::solver::OptimizationDirection const& dir, std::vector<uint64_t>* choices = nullptr) override {
+                    // Initialize value vectors, multiplers, and solver if this has not been done, yet
+                    if (!_MsMultiplier) {
+                        initializeIterations(env, dir);
+                    }
+                    
+                    // Compute new x values for the Markovian states
+                    // Flip what is current and what is previous
+                    _Msx1IsCurrent = !_Msx1IsCurrent;
+                    // At this point, xPrevious() points to what has been computed in the previous call of iterate (initially, this is the 0-vector).
+                    // The result of this computation will be stored in xCurrent()
+                    
+                    // Compute the values obtained by a single uniformization step between Markovian states only
+                    _MsMultiplier->multiply(env, xPrevious(), &_MsChoiceValues, xCurrent());
+                    if (_hasProbabilisticStates) {
+                        // Add the values obtained by taking a single uniformization step that leads to a Probabilistic state followed by arbitrarily many probabilistic steps.
+                        // First compute the total values when taking arbitrarily many probabilistic transitions (in no time)
+                        if (_PsSolver) {
+                            // We might need to track the optimal choices.
+                            if (choices == nullptr) {
+                                _PsSolver->solveEquations(_PsSolverEnv, dir, _Psx, _Psb);
+                            } else {
+                                _PsSolver->setTrackScheduler();
+                                _PsSolver->solveEquations(_PsSolverEnv, dir, _Psx, _Psb);
+                                setInputModelChoices(*choices, _PsSolver->getSchedulerChoices(), true);
+                            }
+                        } else {
+                            STORM_LOG_ASSERT(_PsTransitions.getNonzeroEntryCount() == 0, "If no solver was initialized, an empty matrix would have been expected.");
+                            if (choices == nullptr) {
+                                storm::utility::vector::reduceVectorMinOrMax(dir, _Psb, _Psx, _PsTransitions.getRowGroupIndices());
+                            } else {
+                                std::vector<uint64_t> psMecChoices(_PsTransitions.getRowGroupCount());
+                                storm::utility::vector::reduceVectorMinOrMax(dir, _Psb, _Psx, _PsTransitions.getRowGroupIndices(), &psMecChoices);
+                                setInputModelChoices(*choices, _PsSolver->getSchedulerChoices(), true);
+                            }
+                        }
+                        // Now add the (weighted) values of the probabilistic states to the values of the Markovian states.
+                        _MsToPsMultiplier->multiply(env, _Psx, &xCurrent(), xCurrent());
+                    }
+                }
+                
+                virtual typename LraViHelper<ValueType>::ConvergenceCheckResult checkConvergence(bool relative, ValueType precision) override {
+                    typename LraViHelper<ValueType>::ConvergenceCheckResult res;
+                    // All values are scaled according to the uniformizationRate.
+                    // We need to 'revert' this scaling when computing the absolute precision.
+                    // However, for relative precision, the scaling cancels out.
+                    ValueType threshold = relative ? precision : ValueType(precision / _uniformizationRate);
+                    std::tie(res.isPrecisionAchieved, res.currentValue) = this->checkMinMaxDiffBelowThreshold(xPrevious(), xCurrent(), threshold, relative);
+                    res.currentValue *= _uniformizationRate; // "Undo" the scaling of the values
+                    return res;
+                }
+                
+                virtual void prepareNextIteration(Environment const& env) override {
+                    // To avoid large (and numerically unstable) x-values, we substract a reference value.
+                    ValueType referenceValue = xCurrent().front();
+                    storm::utility::vector::applyPointwise<ValueType, ValueType>(xCurrent(), xCurrent(), [&referenceValue] (ValueType const& x_i) -> ValueType { return x_i - referenceValue; });
+                    if (_hasProbabilisticStates) {
+                        // Update the RHS of the equation system for the probabilistic states by taking the new values of Markovian states into account.
+                        _PsToMsMultiplier->multiply(env, xCurrent(), &_PsChoiceValues, _Psb);
+                    }
+                }
+                
+            private:
+                
+                std::vector<ValueType>& xCurrent() {
+                    return _Msx1IsCurrent ? _Msx1 : _Msx2;
+                }
+                
+                std::vector<ValueType>& xPrevious() {
+                    return _Msx1IsCurrent ? _Msx2 : _Msx1;
+                }
+                
+                storm::storage::BitVector const& _markovianStates;
+                bool _hasProbabilisticStates;
+                ValueType _uniformizationRate;
+                storm::storage::SparseMatrix<ValueType> _MsTransitions, _MsToPsTransitions, _PsTransitions, _PsToMsTransitions;
+                std::vector<ValueType> _Msx1, _Msx2, _MsChoiceValues;
+                bool _Msx1IsCurrent;
+                std::vector<ValueType> _Psx, _Psb, _PsChoiceValues;
+                std::unique_ptr<storm::solver::Multiplier<ValueType>> _MsMultiplier, _MsToPsMultiplier, _PsToMsMultiplier;
+                std::unique_ptr<storm::solver::MinMaxLinearEquationSolver<ValueType>> _PsSolver;
+                Environment _PsSolverEnv;
+            };
+            
             template <typename ValueType>
             ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMecVi(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
 
@@ -431,7 +705,7 @@ namespace storm {
                 // Create an object for the iterations
                 std::shared_ptr<LraViHelper<ValueType>> iterationHelper;
                 if (isContinuousTime()) {
-                    // TODO
+                    iterationHelper = std::make_shared<MaLraViHelper<ValueType>>(mec, _transitionMatrix, *_markovianStates, *_exitRates, stateRewardsGetter, actionRewardsGetter, aperiodicFactor);
                 } else {
                     iterationHelper = std::make_shared<MdpLraViHelper<ValueType>>(mec, _transitionMatrix, stateRewardsGetter, actionRewardsGetter, aperiodicFactor);
                 }
@@ -452,7 +726,7 @@ namespace storm {
                         break;
                     }
                     
-                    iterationHelper->prepareNextIteration();
+                    iterationHelper->prepareNextIteration(env);
                     
                 }
                 if (maxIter.is_initialized() && iter == maxIter.get()) {
@@ -465,7 +739,7 @@ namespace storm {
                 
                 if (isProduceSchedulerSet()) {
                     // We will be doing one more iteration step and track scheduler choices this time.
-                    iterationHelper->prepareNextIteration();
+                    iterationHelper->prepareNextIteration(env);
                     iterationHelper->iterate(env, dir, &_producedOptimalChoices.get());
                 }
                 return result;
@@ -493,7 +767,7 @@ namespace storm {
                     for (auto choice : stateChoicesPair.second) {
                         storm::expressions::Expression constraint = -lambda;
                         
-                        for (auto element : _transitionMatrix.getRow(choice)) {
+                        for (auto const& element : _transitionMatrix.getRow(choice)) {
                             constraint = constraint + stateToVariableMap.at(element.getColumn()) * solver->getConstant(element.getValue());
                         }
                         constraint = solver->getConstant(stateRewardsGetter(state) + actionRewardsGetter(choice)) + constraint;
@@ -522,7 +796,7 @@ namespace storm {
                 // As there could be multiple transitions to the same MEC, we accumulate them in this map before adding them to the matrix builder.
                 std::map<uint64_t, ValueType> auxiliaryStateToProbabilityMap;
                 
-                for (auto transition : inputTransitionMatrix.getRow(inputMatrixChoice)) {
+                for (auto const& transition : inputTransitionMatrix.getRow(inputMatrixChoice)) {
                     if (!storm::utility::isZero(transition.getValue())) {
                         auto const& sspTransitionTarget = inputToSspStateMap[transition.getColumn()];
                         // Since the auxiliary MEC states are appended at the end of the matrix, we can use this check to

From b5bd7aa0c277599f61c2953f2d3ce36656115b90 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Mon, 3 Aug 2020 14:57:12 +0200
Subject: [PATCH 10/48] Introduced a utility function that sets information
 from a CheckTask to a helper.

---
 .../utility/SetInformationFromCheckTask.h     | 31 +++++++++++++++++++
 .../prctl/SparseMdpPrctlModelChecker.cpp      | 11 ++++---
 2 files changed, 37 insertions(+), 5 deletions(-)
 create mode 100644 src/storm/modelchecker/helper/utility/SetInformationFromCheckTask.h

diff --git a/src/storm/modelchecker/helper/utility/SetInformationFromCheckTask.h b/src/storm/modelchecker/helper/utility/SetInformationFromCheckTask.h
new file mode 100644
index 000000000..e1348cd03
--- /dev/null
+++ b/src/storm/modelchecker/helper/utility/SetInformationFromCheckTask.h
@@ -0,0 +1,31 @@
+#pragma once
+
+#include "storm/modelchecker/CheckTask.h"
+
+namespace storm {
+    namespace modelchecker {
+        namespace helper {
+            
+            /*!
+             * Forwards relevant information stored in the given CheckTask to the given helper
+             */
+            template <typename HelperType, typename FormulaType, typename ModelType>
+            void setInformationFromCheckTaskNondeterministic(HelperType& helper, storm::modelchecker::CheckTask<FormulaType, typename ModelType::ValueType> const& checkTask, ModelType const& model) {
+                // Relevancy of initial states.
+			    if (checkTask.isOnlyInitialStatesRelevantSet()) {
+			        helper.setRelevantStates(model.getInitialStates());
+			    }
+                // Value threshold to which the result will be compared
+			    if (checkTask.isBoundSet()) {
+			        helper.setValueThreshold(checkTask.getBoundComparisonType(), checkTask.getBoundThreshold());
+			    }
+			    // Optimization direction
+                if (checkTask.isOptimizationDirectionSet()) {
+                    helper.setOptimizationDirection(checkTask.getOptimizationDirection());
+                }
+                // Scheduler Production
+			    helper.setProduceScheduler(checkTask.isProduceSchedulersSet());
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp b/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp
index 46a2357c1..bc0321317 100644
--- a/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp
+++ b/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp
@@ -16,6 +16,7 @@
 
 #include "storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h"
 #include "storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h"
+#include "storm/modelchecker/helper/utility/SetInformationFromCheckTask.h"
 
 #include "storm/modelchecker/prctl/helper/rewardbounded/QuantileHelper.h"
 #include "storm/modelchecker/multiobjective/multiObjectiveModelChecking.h"
@@ -221,14 +222,15 @@ namespace storm {
 
 		template<typename SparseMdpModelType>
 		std::unique_ptr<CheckResult> SparseMdpPrctlModelChecker<SparseMdpModelType>::computeLongRunAverageProbabilities(Environment const& env, CheckTask<storm::logic::StateFormula, ValueType> const& checkTask) {
-            storm::logic::StateFormula const& stateFormula = checkTask.getFormula();
+		    storm::logic::StateFormula const& stateFormula = checkTask.getFormula();
 			STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
 			std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
 			ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult();
+			
 			storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions());
-			helper.setOptimizationDirection(checkTask.getOptimizationDirection());
-			helper.setProduceScheduler(checkTask.isProduceSchedulersSet());
+            storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel());
 			auto values = helper.computeLongRunAverageProbabilities(env, subResult.getTruthValuesVector());
+			
             std::unique_ptr<CheckResult> result(new ExplicitQuantitativeCheckResult<ValueType>(std::move(values)));
             if (checkTask.isProduceSchedulersSet()) {
                 result->asExplicitQuantitativeCheckResult<ValueType>().setScheduler(std::make_unique<storm::storage::Scheduler<ValueType>>(helper.extractScheduler()));
@@ -241,8 +243,7 @@ namespace storm {
             STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
             auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
             storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions());
-			helper.setOptimizationDirection(checkTask.getOptimizationDirection());
-			helper.setProduceScheduler(checkTask.isProduceSchedulersSet());
+            storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel());
 			auto values = helper.computeLongRunAverageRewards(env, rewardModel.get());
             std::unique_ptr<CheckResult> result(new ExplicitQuantitativeCheckResult<ValueType>(std::move(values)));
             if (checkTask.isProduceSchedulersSet()) {

From 32503594d5f32f1c0dedbbee8e9222b746590288 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Mon, 3 Aug 2020 14:57:38 +0200
Subject: [PATCH 11/48] Use new LRA helper for Markov automata.

---
 .../SparseMarkovAutomatonCslModelChecker.cpp  | 25 ++++++++++++++++---
 1 file changed, 21 insertions(+), 4 deletions(-)

diff --git a/src/storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.cpp b/src/storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.cpp
index eac2e12a8..f68db3db2 100644
--- a/src/storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.cpp
+++ b/src/storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.cpp
@@ -1,6 +1,8 @@
 #include "storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.h"
 
 #include "storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.h"
+#include "storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h"
+#include "storm/modelchecker/helper/utility/SetInformationFromCheckTask.h"
 
 #include "storm/modelchecker/multiobjective/multiObjectiveModelChecking.h"
 
@@ -140,8 +142,15 @@ namespace storm {
             std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
             ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult();
 
-            std::vector<ValueType> result = storm::modelchecker::helper::SparseMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(env, checkTask.getOptimizationDirection(), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), this->getModel().getExitRates(), this->getModel().getMarkovianStates(), subResult.getTruthValuesVector());
-            return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(std::move(result)));
+            storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), this->getModel().getMarkovianStates(), this->getModel().getExitRates());
+            storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel());
+			auto values = helper.computeLongRunAverageProbabilities(env, subResult.getTruthValuesVector());
+
+            std::unique_ptr<CheckResult> result(new ExplicitQuantitativeCheckResult<ValueType>(std::move(values)));
+            if (checkTask.isProduceSchedulersSet()) {
+                result->asExplicitQuantitativeCheckResult<ValueType>().setScheduler(std::make_unique<storm::storage::Scheduler<ValueType>>(helper.extractScheduler()));
+            }
+            return result;
         }
         
         template<typename SparseMarkovAutomatonModelType>
@@ -149,8 +158,16 @@ namespace storm {
             STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
             STORM_LOG_THROW(this->getModel().isClosed(), storm::exceptions::InvalidPropertyException, "Unable to compute long run average rewards in non-closed Markov automaton.");
             auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
-            std::vector<ValueType> result = storm::modelchecker::helper::SparseMarkovAutomatonCslHelper::computeLongRunAverageRewards<ValueType, RewardModelType>(env, checkTask.getOptimizationDirection(), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), this->getModel().getExitRates(), this->getModel().getMarkovianStates(), rewardModel.get());
-            return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(std::move(result)));
+            
+            storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), this->getModel().getMarkovianStates(), this->getModel().getExitRates());
+            storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel());
+            auto values = helper.computeLongRunAverageRewards(env, rewardModel.get());
+
+            std::unique_ptr<CheckResult> result(new ExplicitQuantitativeCheckResult<ValueType>(std::move(values)));
+            if (checkTask.isProduceSchedulersSet()) {
+                result->asExplicitQuantitativeCheckResult<ValueType>().setScheduler(std::make_unique<storm::storage::Scheduler<ValueType>>(helper.extractScheduler()));
+            }
+            return result;
         }
         
         template<typename SparseMarkovAutomatonModelType>

From 9d2e5c219319bdb4a148d035f648ff673b3ee82f Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Mon, 3 Aug 2020 14:58:35 +0200
Subject: [PATCH 12/48] Relaxed precision requirements on an MA LRA test-case
 to correctly represent a relative precision criterion.

---
 .../modelchecker/csl/MarkovAutomatonCslModelCheckerTest.cpp     | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/test/storm/modelchecker/csl/MarkovAutomatonCslModelCheckerTest.cpp b/src/test/storm/modelchecker/csl/MarkovAutomatonCslModelCheckerTest.cpp
index 03c54a27a..745c32f55 100755
--- a/src/test/storm/modelchecker/csl/MarkovAutomatonCslModelCheckerTest.cpp
+++ b/src/test/storm/modelchecker/csl/MarkovAutomatonCslModelCheckerTest.cpp
@@ -341,7 +341,7 @@ namespace {
             EXPECT_NEAR(this->parseNumber("0"), this->getQuantitativeResultAtInitialState(model, result), this->precision());
      
             result = checker->check(this->env(), tasks[8]);
-            EXPECT_NEAR(this->parseNumber("407"), this->getQuantitativeResultAtInitialState(model, result), this->precision());
+            EXPECT_NEAR(this->parseNumber("407"), this->getQuantitativeResultAtInitialState(model, result), this->precision() * this->parseNumber("407")); // use relative precision!
      
             result = checker->check(this->env(), tasks[9]);
             EXPECT_NEAR(this->parseNumber("27"), this->getQuantitativeResultAtInitialState(model, result), this->precision());

From 5917b020fc626c2929496ebef632bd13c01b7fa0 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Mon, 3 Aug 2020 15:09:48 +0200
Subject: [PATCH 13/48] GMM Multiplier: Support for y += A*x

---
 .../3rdparty/gmm-5.2/include/gmm/gmm_blas.h   | 25 +++++++++++++++++++
 src/storm/solver/GmmxxMultiplier.cpp          | 12 +++++++--
 2 files changed, 35 insertions(+), 2 deletions(-)

diff --git a/resources/3rdparty/gmm-5.2/include/gmm/gmm_blas.h b/resources/3rdparty/gmm-5.2/include/gmm/gmm_blas.h
index 4fbe070d3..9ee3f967a 100644
--- a/resources/3rdparty/gmm-5.2/include/gmm/gmm_blas.h
+++ b/resources/3rdparty/gmm-5.2/include/gmm/gmm_blas.h
@@ -1797,6 +1797,11 @@ namespace gmm {
   void mult_add_by_row_parallel(const L1& l1, const L2& l2, const L3& l3, L4& l4, abstract_dense) {
     tbb::parallel_for(tbb::blocked_range<unsigned long>(0, vect_size(l4), 10), TbbMultAddFunctor<L1, L2, L3, L4>(l1, l2, l3, l4));
   }
+  
+  template <typename L1, typename L2, typename L3>
+  void mult_add_by_row_parallel(const L1& l1, const L2& l2, L3& l3, abstract_dense) {
+    tbb::parallel_for(tbb::blocked_range<unsigned long>(0, vect_size(l3), 10), TbbMultAddFunctor<L1, L2, L3, L3>(l1, l2, l3, l3));
+  }
 #endif
     
   template <typename L1, typename L2, typename L3>
@@ -1949,6 +1954,22 @@ namespace gmm {
     }
   }
     
+  /** Multiply-accumulate. l3 += l1*l2; */
+  template <typename L1, typename L2, typename L3> inline
+  void mult_add_parallel(const L1& l1, const L2& l2, L3& l3) {
+    size_type m = mat_nrows(l1), n = mat_ncols(l1);
+    if (!m || !n) return;
+    GMM_ASSERT2(n==vect_size(l2) && m==vect_size(l3), "dimensions mismatch");
+    if (!same_origin(l2, l3)) {
+      mult_add_parallel_spec(l1, l2, l3, typename principal_orientation_type<typename linalg_traits<L1>::sub_orientation>::potype());
+    } else {
+      GMM_WARNING2("Warning, A temporary is used for mult\n");
+      typename temporary_vector<L3>::vector_type temp(vect_size(l2));
+      copy(l2, temp);
+      mult_add_parallel_spec(l1, temp, l3, typename principal_orientation_type<typename linalg_traits<L1>::sub_orientation>::potype());
+    }
+  }
+  
   /** Multiply-accumulate. l4 = l1*l2 + l3; */
   template <typename L1, typename L2, typename L3, typename L4> inline
   void mult_add_parallel(const L1& l1, const L2& l2, const L3& l3, L4& l4) {
@@ -2056,6 +2077,10 @@ namespace gmm {
   template <typename L1, typename L2, typename L3, typename L4> inline
   void mult_add_parallel_spec(const L1& l1, const L2& l2, const L3& l3, L4& l4, row_major)
   { mult_add_by_row_parallel(l1, l2, l3, l4, typename linalg_traits<L4>::storage_type()); }
+  
+  template <typename L1, typename L2, typename L3> inline
+  void mult_add_parallel_spec(const L1& l1, const L2& l2, L3& l3, row_major)
+  { mult_add_by_row_parallel(l1, l2, l3, typename linalg_traits<L4>::storage_type()); }
 #endif
 
   template <typename L1, typename L2, typename L3> inline
diff --git a/src/storm/solver/GmmxxMultiplier.cpp b/src/storm/solver/GmmxxMultiplier.cpp
index d8c4c2468..6e0f727b0 100644
--- a/src/storm/solver/GmmxxMultiplier.cpp
+++ b/src/storm/solver/GmmxxMultiplier.cpp
@@ -120,7 +120,11 @@ namespace storm {
         template<typename ValueType>
         void GmmxxMultiplier<ValueType>::multAdd(std::vector<ValueType> const& x, std::vector<ValueType> const* b, std::vector<ValueType>& result) const {
             if (b) {
-                gmm::mult_add(gmmMatrix, x, *b, result);
+                if (b == &result) {
+                    gmm::mult_add(gmmMatrix, x, result);
+                } else {
+                    gmm::mult_add(gmmMatrix, x, *b, result);
+                }
             } else {
                 gmm::mult(gmmMatrix, x, result);
             }
@@ -257,7 +261,11 @@ namespace storm {
         void GmmxxMultiplier<ValueType>::multAddParallel(std::vector<ValueType> const& x, std::vector<ValueType> const* b, std::vector<ValueType>& result) const {
 #ifdef STORM_HAVE_INTELTBB
             if (b) {
-                gmm::mult_add_parallel(gmmMatrix, x, *b, result);
+                if (b == &result) {
+                    gmm::mult_add_parallel(gmmMatrix, x, result);
+                } else {
+                    gmm::mult_add_parallel(gmmMatrix, x, *b, result);
+                }
             } else {
                 gmm::mult_parallel(gmmMatrix, x, result);
             }

From 51c8779e73e289509e4352fb951cf9df9c77a26e Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Mon, 3 Aug 2020 16:44:18 +0200
Subject: [PATCH 14/48] Added missing template instantiations.

---
 src/storm/models/sparse/StandardRewardModel.cpp | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/src/storm/models/sparse/StandardRewardModel.cpp b/src/storm/models/sparse/StandardRewardModel.cpp
index 052a95af0..8bceb6452 100644
--- a/src/storm/models/sparse/StandardRewardModel.cpp
+++ b/src/storm/models/sparse/StandardRewardModel.cpp
@@ -449,6 +449,7 @@ namespace storm {
             template storm::storage::BitVector StandardRewardModel<double>::getStatesWithFilter(storm::storage::SparseMatrix<double> const& transitionMatrix, std::function<bool(double const&)> const& filter) const;
             template storm::storage::BitVector StandardRewardModel<double>::getChoicesWithZeroReward(storm::storage::SparseMatrix<double> const& transitionMatrix) const;
             template storm::storage::BitVector StandardRewardModel<double>::getChoicesWithFilter(storm::storage::SparseMatrix<double> const& transitionMatrix, std::function<bool(double const&)> const& filter) const;
+            template double StandardRewardModel<double>::getStateActionAndTransitionReward(uint_fast64_t stateIndex, storm::storage::SparseMatrix<double> const& transitionMatrix) const;
             template double StandardRewardModel<double>::getTotalStateActionReward(uint_fast64_t stateIndex, uint_fast64_t choiceIndex, storm::storage::SparseMatrix<double> const& transitionMatrix, double const& stateRewardWeight, double const& actionRewardWeight) const;
             template void StandardRewardModel<double>::clearRewardAtState(uint_fast64_t state, storm::storage::SparseMatrix<double> const& transitionMatrix);
             template void StandardRewardModel<double>::reduceToStateBasedRewards(storm::storage::SparseMatrix<double> const& transitionMatrix, bool reduceToStateRewards, std::vector<double> const* weights);
@@ -476,6 +477,7 @@ namespace storm {
             template storm::storage::BitVector StandardRewardModel<storm::RationalNumber>::getStatesWithFilter(storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, std::function<bool(storm::RationalNumber const&)> const& filter) const;
             template storm::storage::BitVector StandardRewardModel<storm::RationalNumber>::getChoicesWithZeroReward(storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix) const;
             template storm::storage::BitVector StandardRewardModel<storm::RationalNumber>::getChoicesWithFilter(storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, std::function<bool(storm::RationalNumber const&)> const& filter) const;
+            template storm::RationalNumber StandardRewardModel<storm::RationalNumber>::getStateActionAndTransitionReward(uint_fast64_t stateIndex, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix) const;
             template storm::RationalNumber StandardRewardModel<storm::RationalNumber>::getTotalStateActionReward(uint_fast64_t stateIndex, uint_fast64_t choiceIndex, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::RationalNumber const& stateRewardWeight, storm::RationalNumber const& actionRewardWeight) const;
             template void StandardRewardModel<storm::RationalNumber>::reduceToStateBasedRewards(storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, bool reduceToStateRewards, std::vector<storm::RationalNumber> const* weights);
             template void StandardRewardModel<storm::RationalNumber>::clearRewardAtState(uint_fast64_t state, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix);
@@ -493,6 +495,7 @@ namespace storm {
             template storm::storage::BitVector StandardRewardModel<storm::RationalFunction>::getChoicesWithFilter(storm::storage::SparseMatrix<storm::RationalFunction> const& transitionMatrix, std::function<bool(storm::RationalFunction const&)> const& filter) const;
             template void StandardRewardModel<storm::RationalFunction>::clearRewardAtState(uint_fast64_t state, storm::storage::SparseMatrix<storm::RationalFunction> const& transitionMatrix);
             template std::vector<storm::RationalFunction> StandardRewardModel<storm::RationalFunction>::getTotalActionRewardVector(storm::storage::SparseMatrix<storm::RationalFunction> const& transitionMatrix,  std::vector<storm::RationalFunction> const& stateRewardWeights) const;
+            template storm::RationalFunction StandardRewardModel<storm::RationalFunction>::getStateActionAndTransitionReward(uint_fast64_t stateIndex, storm::storage::SparseMatrix<storm::RationalFunction> const& transitionMatrix) const;
             template storm::RationalFunction StandardRewardModel<storm::RationalFunction>::getTotalStateActionReward(uint_fast64_t stateIndex, uint_fast64_t choiceIndex, storm::storage::SparseMatrix<storm::RationalFunction> const& transitionMatrix, storm::RationalFunction const& stateRewardWeight, storm::RationalFunction const& actionRewardWeight) const;
             template void StandardRewardModel<storm::RationalFunction>::reduceToStateBasedRewards(storm::storage::SparseMatrix<storm::RationalFunction> const& transitionMatrix, bool reduceToStateRewards, std::vector<storm::RationalFunction> const* weights);
             template void StandardRewardModel<storm::RationalFunction>::setStateActionReward(uint_fast64_t choiceIndex, storm::RationalFunction const & newValue);

From a618147192347bca38035172eed8d83e1ab013cc Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Mon, 3 Aug 2020 16:45:03 +0200
Subject: [PATCH 15/48] gmm Multiplier: Added support for computing y += A*x in
 Parallel.

---
 resources/3rdparty/gmm-5.2/include/gmm/gmm_blas.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/resources/3rdparty/gmm-5.2/include/gmm/gmm_blas.h b/resources/3rdparty/gmm-5.2/include/gmm/gmm_blas.h
index 9ee3f967a..5380fbbec 100644
--- a/resources/3rdparty/gmm-5.2/include/gmm/gmm_blas.h
+++ b/resources/3rdparty/gmm-5.2/include/gmm/gmm_blas.h
@@ -2080,7 +2080,7 @@ namespace gmm {
   
   template <typename L1, typename L2, typename L3> inline
   void mult_add_parallel_spec(const L1& l1, const L2& l2, L3& l3, row_major)
-  { mult_add_by_row_parallel(l1, l2, l3, typename linalg_traits<L4>::storage_type()); }
+  { mult_add_by_row_parallel(l1, l2, l3, typename linalg_traits<L3>::storage_type()); }
 #endif
 
   template <typename L1, typename L2, typename L3> inline

From 6e55dba8d4d11491ccf018bcc317c3f7517e361a Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Wed, 5 Aug 2020 11:39:03 +0200
Subject: [PATCH 16/48] Moved LraViHelper to a separate file. Merged MDP and MA
 implementation.

---
 .../infinitehorizon/internal/LraViHelper.cpp  | 517 ++++++++++++++++++
 .../infinitehorizon/internal/LraViHelper.h    | 151 +++++
 2 files changed, 668 insertions(+)
 create mode 100644 src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
 create mode 100644 src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h

diff --git a/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
new file mode 100644
index 000000000..d4c3ba856
--- /dev/null
+++ b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
@@ -0,0 +1,517 @@
+#include "LraViHelper.h"
+
+#include "storm/solver/LinearEquationSolver.h"
+#include "storm/solver/MinMaxLinearEquationSolver.h"
+#include "storm/solver/Multiplier.h"
+
+#include "storm/storage/MaximalEndComponent.h"
+#include "storm/storage/StronglyConnectedComponent.h"
+
+#include "storm/utility/graph.h"
+#include "storm/utility/vector.h"
+#include "storm/utility/macros.h"
+#include "storm/utility/SignalHandler.h"
+
+#include "storm/environment/solver/SolverEnvironment.h"
+#include "storm/environment/solver/LongRunAverageSolverEnvironment.h"
+#include "storm/environment/solver/MinMaxSolverEnvironment.h"
+
+
+#include "storm/exceptions/UnmetRequirementException.h"
+
+
+namespace storm {
+    namespace modelchecker {
+        namespace helper {
+            namespace internal {
+                
+                /// Auxiliary functions that deal with the different kinds of components (MECs on potentially nondeterministic models and BSCCs on deterministic models)
+                // BSCCS:
+                uint64_t getComponentElementState(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return element; }
+                uint64_t getComponentElementChoiceCount(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return 1; } // Assumes deterministic model!
+                uint64_t const* getComponentChoicesBegin(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return &element; }
+                uint64_t const* getComponentChoicesEnd(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return &element + 1; }
+                // MECS:
+                uint64_t getComponentElementState(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.first; }
+                uint64_t getComponentElementChoiceCount(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.second.size(); }
+                typename storm::storage::MaximalEndComponent::set_type::const_iterator getComponentChoicesBegin(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.second.begin(); }
+                typename storm::storage::MaximalEndComponent::set_type::const_iterator getComponentChoicesEnd(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.second.end(); }
+                
+                template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
+                LraViHelper<ValueType, ComponentType, TransitionsType>::LraViHelper(ComponentType const& component, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, ValueType const& aperiodicFactor, storm::storage::BitVector const* timedStates, std::vector<ValueType> const* exitRates) : _component(component), _transitionMatrix(transitionMatrix), _timedStates(timedStates), _hasInstantStates(TransitionsType == LraViTransitionsType::DetTsNondetIs || TransitionsType == LraViTransitionsType::DetTsDetIs) {
+                    // Run through the component and collect some data:
+                    // We create two submodels, one consisting of the timed states of the component and one consisting of the instant states of the component.
+                    // For this, we create a state index map that point from state indices of the input model to indices of the corresponding submodel of that state.
+                    boost::container::flat_map<uint64_t, uint64_t> toSubModelStateMapping;
+                    // We also obtain state and choices counts of the two submodels
+                    uint64_t numTsSubModelStates(0), numTsSubModelChoices(0);
+                    uint64_t numIsSubModelStates(0), numIsSubModelChoices(0);
+                    // We will need to uniformize the timed MEC states by introducing a selfloop.
+                    // For this, we need to find a uniformization rate which will be a little higher (given by aperiodicFactor) than the maximum rate occurring in the component.
+                    _uniformizationRate = exitRates == nullptr ? storm::utility::one<ValueType>() : storm::utility::zero<ValueType>();
+                    // Now run over the MEC and collect the required data.
+                    for (auto const& element : _component) {
+                        uint64_t const& componentState = getComponentElementState(element);
+                        if (isTimedState(componentState)) {
+                            toSubModelStateMapping.emplace(componentState, numTsSubModelStates);
+                            ++numTsSubModelStates;
+                            numTsSubModelChoices += getComponentElementChoiceCount(element);
+                            STORM_LOG_ASSERT(nondetTs() || getComponentElementChoiceCount(element) == 1, "Timed state has multiple choices but only a single choice was expected.");
+                            if (exitRates) {
+                                _uniformizationRate = std::max(_uniformizationRate, (*exitRates)[componentState]);
+                            }
+                        } else {
+                            toSubModelStateMapping.emplace(componentState, numIsSubModelStates);
+                            ++numIsSubModelStates;
+                            numIsSubModelChoices += getComponentElementChoiceCount(element);
+                            STORM_LOG_ASSERT(nondetIs() || getComponentElementChoiceCount(element) == 1, "Instant state has multiple choices but only a single choice was expected.");
+                        }
+                    }
+                    assert(numIsSubModelStates + numTsSubModelStates == _component.size());
+                    assert(_hasInstantStates || numIsSubModelStates == 0);
+                    STORM_LOG_ASSERT(nondetTs() || numTsSubModelStates == numTsSubModelChoices, "Unexpected choice count of deterministic timed submodel.");
+                    STORM_LOG_ASSERT(nondetIs() || numIsSubModelStates == numIsSubModelChoices, "Unexpected choice count of deterministic instant submodel.");
+                    _hasInstantStates = _hasInstantStates && numIsSubModelStates > 0;
+                    STORM_LOG_THROW(numTsSubModelStates > 0, storm::exceptions::InvalidOperationException, "Bottom Component has no timed states. Computation of Long Run Average values not supported. Is this a Markov Automaton with Zeno behavior?");
+
+                    // We make sure that every timed state gets a selfloop to make the model aperiodic
+                    _uniformizationRate *= storm::utility::one<ValueType>() + aperiodicFactor;
+
+                    // Now build the timed and the instant submodels.
+                    // In addition, we also need the transitions between the two.
+                    storm::storage::SparseMatrixBuilder<ValueType> tsTransitionsBuilder(numTsSubModelChoices, numTsSubModelStates, 0, true, nondetTs(), nondetTs() ? numTsSubModelStates : 0);
+                    storm::storage::SparseMatrixBuilder<ValueType> tsToIsTransitionsBuilder, isTransitionsBuilder, isToTsTransitionsBuilder;
+                    if (_hasInstantStates) {
+                        tsToIsTransitionsBuilder = storm::storage::SparseMatrixBuilder<ValueType>(numTsSubModelChoices, numIsSubModelStates, 0, true, nondetTs(), nondetTs() ? numTsSubModelStates : 0);
+                        isTransitionsBuilder = storm::storage::SparseMatrixBuilder<ValueType>(numIsSubModelChoices, numIsSubModelStates, 0, true, nondetIs(), nondetIs() ? numIsSubModelStates : 0);
+                        isToTsTransitionsBuilder = storm::storage::SparseMatrixBuilder<ValueType>(numIsSubModelChoices, numTsSubModelStates, 0, true, nondetIs(), nondetIs() ? numIsSubModelStates : 0);
+                        _IsChoiceValues.reserve(numIsSubModelChoices);
+                    }
+                    ValueType uniformizationFactor = storm::utility::one<ValueType>() / _uniformizationRate;
+                    uint64_t currTsRow = 0;
+                    uint64_t currIsRow = 0;
+                    for (auto const& element : _component) {
+                        uint64_t const& componentState = getComponentElementState(element);
+                        if (isTimedState(componentState)) {
+                            // The currently processed state is timed.
+                            if (nondetTs()) {
+                                tsTransitionsBuilder.newRowGroup(currTsRow);
+                                tsToIsTransitionsBuilder.newRowGroup(currTsRow);
+                            }
+                            // We need to uniformize which means that a diagonal entry for the selfloop will be inserted.
+                            // If there are exit rates, the uniformization factor needs to be updated.
+                            if (exitRates) {
+                                uniformizationFactor = (*exitRates)[componentState] / _uniformizationRate;
+                            }
+                            ValueType selfLoopProb = storm::utility::one<ValueType>() - uniformizationFactor;
+                            uint64_t selfLoopColumn = toSubModelStateMapping[componentState];
+                            for (auto componentChoiceIt = getComponentChoicesBegin(element); componentChoiceIt != getComponentChoicesEnd(element); ++componentChoiceIt) {
+                                bool insertedDiagElement = false;
+                                for (auto const& entry : this->_transitionMatrix.getRow(*componentChoiceIt)) {
+                                    uint64_t subModelColumn = toSubModelStateMapping[entry.getColumn()];
+                                    if (isTimedState(entry.getColumn())) {
+                                        // We have a transition from a timed state to a timed state
+                                        STORM_LOG_ASSERT(subModelColumn < numTsSubModelStates, "Invalid state for timed submodel");
+                                        if (!insertedDiagElement && subModelColumn > selfLoopColumn) {
+                                            // We passed the diagonal entry, so add it now before moving on to the next entry
+                                            tsTransitionsBuilder.addNextValue(currTsRow, selfLoopColumn, selfLoopProb);
+                                            insertedDiagElement = true;
+                                        }
+                                        if (!insertedDiagElement && subModelColumn == selfLoopColumn) {
+                                            // The current entry is the diagonal (selfloop) entry
+                                            tsTransitionsBuilder.addNextValue(currTsRow, selfLoopColumn, selfLoopProb + uniformizationFactor * entry.getValue());
+                                            insertedDiagElement = true;
+                                        } else {
+                                            // The diagonal element either has been inserted already or still lies in front
+                                            tsTransitionsBuilder.addNextValue(currTsRow, subModelColumn,  uniformizationFactor * entry.getValue());
+                                        }
+                                    } else {
+                                        // We have a transition from a timed to a instant state
+                                        STORM_LOG_ASSERT(subModelColumn < numIsSubModelStates, "Invalid state for instant submodel");
+                                        tsToIsTransitionsBuilder.addNextValue(currTsRow, subModelColumn, uniformizationFactor * entry.getValue());
+                                    }
+                                }
+                                // If the diagonal entry for the MS matrix still has not been set, we do that now
+                                if (!insertedDiagElement) {
+                                    tsTransitionsBuilder.addNextValue(currTsRow, selfLoopColumn, selfLoopProb);
+                                }
+                                ++currTsRow;
+                            }
+                        } else {
+                            // The currently processed state is instant
+                            if (nondetIs()) {
+                                isTransitionsBuilder.newRowGroup(currIsRow);
+                                isToTsTransitionsBuilder.newRowGroup(currIsRow);
+                            }
+                            for (auto componentChoiceIt = getComponentChoicesBegin(element); componentChoiceIt != getComponentChoicesEnd(element); ++componentChoiceIt) {
+                                for (auto const& entry : this->_transitionMatrix.getRow(*componentChoiceIt)) {
+                                    uint64_t subModelColumn = toSubModelStateMapping[entry.getColumn()];
+                                    if (isTimedState(entry.getColumn())) {
+                                        // We have a transition from an instant state to a timed state
+                                        STORM_LOG_ASSERT(subModelColumn < numTsSubModelStates, "Invalid state for timed submodel");
+                                        isToTsTransitionsBuilder.addNextValue(currIsRow, subModelColumn, entry.getValue());
+                                    } else {
+                                        // We have a transition from an instant to an instant state
+                                        STORM_LOG_ASSERT(subModelColumn < numIsSubModelStates, "Invalid state for instant submodel");
+                                        isTransitionsBuilder.addNextValue(currIsRow, subModelColumn, entry.getValue());
+                                    }
+                                }
+                                ++currIsRow;
+                            }
+                        }
+                    }
+                    _TsTransitions = tsTransitionsBuilder.build();
+                    if (_hasInstantStates) {
+                        _TsToIsTransitions = tsToIsTransitionsBuilder.build();
+                        _IsTransitions = isTransitionsBuilder.build();
+                        _IsToTsTransitions = isToTsTransitionsBuilder.build();
+                    }
+                }
+                
+                template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
+                ValueType LraViHelper<ValueType, ComponentType, TransitionsType>::performValueIteration(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, std::vector<ValueType> const* exitRates, storm::solver::OptimizationDirection const* dir, std::vector<uint64_t>* choices) {
+                    
+                    initializeNewValues(stateValueGetter, actionValueGetter, exitRates);
+                    
+                    ValueType precision = storm::utility::convertNumber<ValueType>(env.solver().lra().getPrecision());
+                    bool relative = env.solver().lra().getRelativeTerminationCriterion();
+                    boost::optional<uint64_t> maxIter;
+                    if (env.solver().lra().isMaximalIterationCountSet()) {
+                        maxIter = env.solver().lra().getMaximalIterationCount();
+                    }
+                
+                    // start the iterations
+                    ValueType result = storm::utility::zero<ValueType>();
+                    uint64_t iter = 0;
+                    while (!maxIter.is_initialized() || iter < maxIter.get()) {
+                        ++iter;
+                        performIterationStep(env, dir);
+                        
+                        // Check if we are done
+                        auto convergenceCheckResult = checkConvergence(relative, precision);
+                        result = convergenceCheckResult.currentValue;
+                        if (convergenceCheckResult.isPrecisionAchieved) {
+                            break;
+                        }
+                        if (storm::utility::resources::isTerminate()) {
+                            break;
+                        }
+                        // If there will be a next iteration, we have to prepare it.
+                        prepareNextIteration(env);
+                        
+                    }
+                    if (maxIter.is_initialized() && iter == maxIter.get()) {
+                        STORM_LOG_WARN("LRA computation did not converge within " << iter << " iterations.");
+                    } else if (storm::utility::resources::isTerminate()) {
+                        STORM_LOG_WARN("LRA computation aborted after " << iter << " iterations.");
+                    } else {
+                        STORM_LOG_TRACE("LRA computation converged after " << iter << " iterations.");
+                    }
+                    
+                    if (choices) {
+                        // We will be doing one more iteration step and track scheduler choices this time.
+                        prepareNextIteration(env);
+                        performIterationStep(env, dir, choices);
+                    }
+                    return result;
+                }
+                
+                template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
+                void LraViHelper<ValueType, ComponentType, TransitionsType>::initializeNewValues(ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, std::vector<ValueType> const* exitRates) {
+                    // clear potential old values and reserve enough space for new values
+                    _TsChoiceValues.clear();
+                    _TsChoiceValues.reserve(_TsTransitions.getRowCount());
+                    if (_hasInstantStates) {
+                        _IsChoiceValues.clear();
+                        _IsChoiceValues.reserve(_IsTransitions.getRowCount());
+                    }
+                    
+                    // Set the new choice-based values
+                    ValueType actionRewardScalingFactor = storm::utility::one<ValueType>() / _uniformizationRate;
+                    for (auto const& element : _component) {
+                        uint64_t const& componentState = getComponentElementState(element);
+                        if (isTimedState(componentState)) {
+                            if (exitRates) {
+                                actionRewardScalingFactor = (*exitRates)[componentState] / _uniformizationRate;
+                            }
+                            for (auto componentChoiceIt = getComponentChoicesBegin(element); componentChoiceIt != getComponentChoicesEnd(element); ++componentChoiceIt) {
+                                // Compute the values obtained for this choice.
+                                _TsChoiceValues.push_back(stateValueGetter(componentState) / _uniformizationRate + actionValueGetter(*componentChoiceIt) * actionRewardScalingFactor);
+                            }
+                        } else {
+                            for (auto componentChoiceIt = getComponentChoicesBegin(element); componentChoiceIt != getComponentChoicesEnd(element); ++componentChoiceIt) {
+                                // Compute the values obtained for this choice.
+                                // State values do not count here since no time passes in instant states.
+                                _IsChoiceValues.push_back(actionValueGetter(*componentChoiceIt));
+                            }
+                        }
+                    }
+
+                    // Set-up new iteration vectors for timed states
+                    _Tsx1.assign(_TsTransitions.getRowGroupCount(), storm::utility::zero<ValueType>());
+                    _Tsx2 = _Tsx1;
+                    
+                    if (_hasInstantStates) {
+                        // Set-up vectors for storing intermediate results for instant states.
+                        _Isx.resize(_IsTransitions.getRowGroupCount(), storm::utility::zero<ValueType>());
+                        _Isb = _IsChoiceValues;
+                    }
+                }
+                
+                template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
+                void LraViHelper<ValueType, ComponentType, TransitionsType>::prepareSolversAndMultipliers(const Environment& env, storm::solver::OptimizationDirection const* dir) {
+                    _TsMultiplier = storm::solver::MultiplierFactory<ValueType>().create(env, _TsTransitions);
+                    if (_hasInstantStates) {
+                        if (_IsTransitions.getNonzeroEntryCount() > 0) {
+                            // Set-up a solver for transitions within instant states
+                            _IsSolverEnv = std::make_unique<storm::Environment>(env);
+                            if (env.solver().isForceSoundness()) {
+                                // To get correct results, the inner equation systems are solved exactly.
+                                // TODO investigate how an error would propagate
+                                _IsSolverEnv->solver().setForceExact(true);
+                            }
+                            bool isAcyclic = !storm::utility::graph::hasCycle(_IsTransitions);
+                            if (isAcyclic) {
+                                STORM_LOG_INFO("Instant transitions are acyclic.");
+                                if (_IsSolverEnv->solver().minMax().isMethodSetFromDefault()) {
+                                    _IsSolverEnv->solver().minMax().setMethod(storm::solver::MinMaxMethod::Acyclic);
+                                }
+                                if (_IsSolverEnv->solver().isLinearEquationSolverTypeSetFromDefaultValue()) {
+                                   _IsSolverEnv->solver().setLinearEquationSolverType(storm::solver::EquationSolverType::Acyclic);
+                                }
+                            }
+                            if (nondetIs()) {
+                                storm::solver::GeneralMinMaxLinearEquationSolverFactory<ValueType> factory;
+                                _NondetIsSolver = factory.create(*_IsSolverEnv, _IsTransitions);
+                                _NondetIsSolver->setHasUniqueSolution(true); // Assume non-zeno MA
+                                _NondetIsSolver->setHasNoEndComponents(true); // assume non-zeno MA
+                                _NondetIsSolver->setCachingEnabled(true);
+                                auto req = _NondetIsSolver->getRequirements(*_IsSolverEnv, *dir);
+                                req.clearUniqueSolution();
+                                if (isAcyclic) {
+                                    req.clearAcyclic();
+                                }
+                                // Computing a priori lower/upper bounds is not particularly easy, as there might be selfloops with high probabilities
+                                // Which accumulate a lot of reward. Moreover, the right-hand-side of the equation system changes dynamically.
+                                STORM_LOG_THROW(!req.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "The solver requirement " << req.getEnabledRequirementsAsString() << " has not been cleared.");
+                                _NondetIsSolver->setRequirementsChecked(true);
+                            } else {
+                                storm::solver::GeneralLinearEquationSolverFactory<ValueType> factory;
+                                if (factory.getEquationProblemFormat(*_IsSolverEnv) != storm::solver::LinearEquationSolverProblemFormat::FixedPointSystem) {
+                                    // We need to convert the transition matrix connecting instant states
+                                    // TODO: This could have been done already during construction of the matrix.
+                                    // Insert diagonal entries.
+                                    storm::storage::SparseMatrix<ValueType> converted(_IsTransitions, true);
+                                    // Compute A' = 1-A
+                                    converted.convertToEquationSystem();
+                                    STORM_LOG_WARN("The selected equation solver requires to create a temporary " << converted.getDimensionsAsString());
+                                    // Note that the solver has ownership of the converted matrix.
+                                    _DetIsSolver = factory.create(*_IsSolverEnv, std::move(converted));
+                                } else {
+                                    _DetIsSolver = factory.create(*_IsSolverEnv, _IsTransitions);
+                                }
+                                _DetIsSolver->setCachingEnabled(true);
+                                auto req = _DetIsSolver->getRequirements(*_IsSolverEnv);
+                                if (isAcyclic) {
+                                    req.clearAcyclic();
+                                }
+                                // A priori lower/upper bounds are hard (see MinMax version of this above)
+                                STORM_LOG_THROW(!req.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "The solver requirement " << req.getEnabledRequirementsAsString() << " has not been cleared.");
+                            }
+                        }
+                        
+                        // Set up multipliers for transitions connecting timed and instant states
+                        _TsToIsMultiplier = storm::solver::MultiplierFactory<ValueType>().create(env, _TsToIsTransitions);
+                        _IsToTsMultiplier = storm::solver::MultiplierFactory<ValueType>().create(env, _IsToTsTransitions);
+                    }
+                }
+                
+                template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
+                void LraViHelper<ValueType, ComponentType, TransitionsType>::setInputModelChoices(std::vector<uint64_t>& choices, std::vector<uint64_t> const& localMecChoices, bool setChoiceZeroToTimedStates, bool setChoiceZeroToInstantStates) const {
+                    // Transform the local choices (within this mec) to choice indices for the input model
+                    uint64_t localState = 0;
+                    for (auto const& element : _component) {
+                        uint64_t elementState = getComponentElementState(element);
+                        if ((setChoiceZeroToTimedStates && isTimedState(elementState)) || (setChoiceZeroToInstantStates && !isTimedState(elementState))) {
+                            choices[elementState] = 0;
+                        } else {
+                            uint64_t choice = localMecChoices[localState];
+                            STORM_LOG_ASSERT(choice < getComponentElementChoiceCount(element), "The selected choice does not seem to exist.");
+                            uint64_t globalChoiceIndex = *(getComponentChoicesBegin(element) + choice);
+                            choices[elementState] = globalChoiceIndex - _transitionMatrix.getRowGroupIndices()[elementState];
+                            ++localState;
+                        }
+                    }
+                    STORM_LOG_ASSERT(localState == localMecChoices.size(), "Did not traverse all component states.");
+                }
+                
+                template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
+                void LraViHelper<ValueType, ComponentType, TransitionsType>::performIterationStep(Environment const& env, storm::solver::OptimizationDirection const* dir, std::vector<uint64_t>* choices) {
+                    STORM_LOG_ASSERT(!((nondetTs() || nondetIs()) && dir == nullptr), "No optimization direction provided for model with nondeterminism");
+                    // Initialize value vectors, multiplers, and solver if this has not been done, yet
+                    if (!_TsMultiplier) {
+                        prepareSolversAndMultipliers(env, dir);
+                    }
+                    
+                    // Compute new x values for the timed states
+                    // Flip what is new and what is old
+                    _Tsx1IsCurrent = !_Tsx1IsCurrent;
+                    // At this point, xOld() points to what has been computed in the most recent call of performIterationStep (initially, this is the 0-vector).
+                    // The result of this ongoing computation will be stored in xNew()
+                    
+                    // Compute the values obtained by a single uniformization step between timed states only
+                    if (nondetTs()) {
+                        if (choices == nullptr) {
+                            _TsMultiplier->multiplyAndReduce(env, *dir, xOld(), &_TsChoiceValues, xNew());
+                        } else {
+                            // Also keep track of the choices made.
+                            std::vector<uint64_t> tsChoices(_TsTransitions.getRowGroupCount());
+                            _TsMultiplier->multiplyAndReduce(env, *dir, xOld(), &_TsChoiceValues, xNew(), &tsChoices);
+                            // Note that nondeterminism within the timed states means that there can not be instant states (We either have MDPs or MAs)
+                            // Hence, in this branch we don't have to care for choices at instant states.
+                            STORM_LOG_ASSERT(!_hasInstantStates, "Nondeterministic timed states are only supported if there are no instant states.");
+                            setInputModelChoices(*choices, tsChoices);
+                        }
+                    } else {
+                        _TsMultiplier->multiply(env, xOld(), &_TsChoiceValues, xNew());
+                    }
+                    if (_hasInstantStates) {
+                        // Add the values obtained by taking a single uniformization step that leads to an instant state followed by arbitrarily many instant steps.
+                        // First compute the total values when taking arbitrarily many instant transitions (in no time)
+                        if (_NondetIsSolver) {
+                            // We might need to track the optimal choices.
+                            if (choices == nullptr) {
+                                _NondetIsSolver->solveEquations(*_IsSolverEnv, *dir, _Isx, _Isb);
+                            } else {
+                                _NondetIsSolver->setTrackScheduler();
+                                _NondetIsSolver->solveEquations(*_IsSolverEnv, *dir, _Isx, _Isb);
+                                setInputModelChoices(*choices, _NondetIsSolver->getSchedulerChoices(), true);
+                            }
+                        } else if (_DetIsSolver) {
+                            _DetIsSolver->solveEquations(*_IsSolverEnv, _Isx, _Isb);
+                        } else {
+                            STORM_LOG_ASSERT(_IsTransitions.getNonzeroEntryCount() == 0, "If no solver was initialized, an empty matrix would have been expected.");
+                            if (nondetIs()) {
+                                if (choices == nullptr) {
+                                    storm::utility::vector::reduceVectorMinOrMax(*dir, _Isb, _Isx, _IsTransitions.getRowGroupIndices());
+                                } else {
+                                    std::vector<uint64_t> psChoices(_IsTransitions.getRowGroupCount());
+                                    storm::utility::vector::reduceVectorMinOrMax(*dir, _Isb, _Isx, _IsTransitions.getRowGroupIndices(), &psChoices);
+                                    setInputModelChoices(*choices, psChoices, true);
+                                }
+                            } else {
+                                // For deterministic instant states, there is nothing to reduce, i.e., we could just set _Isx = _Isb.
+                                // For efficiency reasons, we do a swap instead:
+                                _Isx.swap(_Isb);
+                                // Note that at this point we have changed the contents of _Isb, but they will be overwritten anyway.
+                                if (choices) {
+                                    // Set choice 0 to all states.
+                                    setInputModelChoices(*choices, {}, true, true);
+                                }
+                            }
+                        }
+                        // Now add the (weighted) values of the instant states to the values of the timed states.
+                        _TsToIsMultiplier->multiply(env, _Isx, &xNew(), xNew());
+                    }
+                }
+                
+                template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
+                typename LraViHelper<ValueType, ComponentType, TransitionsType>::ConvergenceCheckResult LraViHelper<ValueType, ComponentType, TransitionsType>::checkConvergence(bool relative, ValueType precision) const {
+                    STORM_LOG_ASSERT(_TsMultiplier, "tried to check for convergence without doing an iteration first.");
+                    // All values are scaled according to the uniformizationRate.
+                    // We need to 'revert' this scaling when computing the absolute precision.
+                    // However, for relative precision, the scaling cancels out.
+                    ValueType threshold = relative ? precision : ValueType(precision / _uniformizationRate);
+                    
+                    ConvergenceCheckResult res = { true, storm::utility::one<ValueType>() };
+                    // Now check whether the currently produced results are precise enough
+                    STORM_LOG_ASSERT(threshold > storm::utility::zero<ValueType>(), "Did not expect a non-positive threshold.");
+                    auto x1It = xOld().begin();
+                    auto x1Ite = xOld().end();
+                    auto x2It = xNew().begin();
+                    ValueType maxDiff = (*x2It - *x1It);
+                    ValueType minDiff = maxDiff;
+                    // The difference between maxDiff and minDiff is zero at this point. Thus, it doesn't make sense to check the threshold now.
+                    for (++x1It, ++x2It; x1It != x1Ite; ++x1It, ++x2It) {
+                        ValueType diff = (*x2It - *x1It);
+                        // Potentially update maxDiff or minDiff
+                        bool skipCheck = false;
+                        if (maxDiff < diff) {
+                            maxDiff = diff;
+                        } else if (minDiff > diff) {
+                            minDiff = diff;
+                        } else {
+                            skipCheck = true;
+                        }
+                        // Check convergence
+                        if (!skipCheck && (maxDiff - minDiff) > (relative ? (threshold * minDiff) : threshold)) {
+                            res.isPrecisionAchieved = false;
+                            break;
+                        }
+                    }
+                    
+                    // Compute the average of the maximal and the minimal difference.
+                    ValueType avgDiff = (maxDiff + minDiff) / (storm::utility::convertNumber<ValueType>(2.0));
+                    
+                    // "Undo" the scaling of the values
+                    res.currentValue = avgDiff * _uniformizationRate;
+                    return res;
+                }
+                
+                template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
+                void LraViHelper<ValueType, ComponentType, TransitionsType>::prepareNextIteration(Environment const& env) {
+                    // To avoid large (and numerically unstable) x-values, we substract a reference value.
+                    ValueType referenceValue = xNew().front();
+                    storm::utility::vector::applyPointwise<ValueType, ValueType>(xNew(), xNew(), [&referenceValue] (ValueType const& x_i) -> ValueType { return x_i - referenceValue; });
+                    if (_hasInstantStates) {
+                        // Update the RHS of the equation system for the instant states by taking the new values of timed states into account.
+                        STORM_LOG_ASSERT(!nondetTs(), "Nondeterministic timed states not expected when there are also instant states.");
+                        _IsToTsMultiplier->multiply(env, xNew(), &_IsChoiceValues, _Isb);
+                    }
+                }
+                
+                template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
+                bool LraViHelper<ValueType, ComponentType, TransitionsType>::isTimedState(uint64_t const& inputModelStateIndex) const {
+                    STORM_LOG_ASSERT(!_hasInstantStates || inputModelStateIndex < _timedStates->size(), "Unable to determine whether state " << inputModelStateIndex << " is timed.");
+                    return !_hasInstantStates || _timedStates->get(inputModelStateIndex);
+                }
+                
+                template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
+                std::vector<ValueType>& LraViHelper<ValueType, ComponentType, TransitionsType>::xNew() {
+                    return _Tsx1IsCurrent ? _Tsx1 : _Tsx2;
+                }
+                
+                template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
+                std::vector<ValueType> const& LraViHelper<ValueType, ComponentType, TransitionsType>::xNew() const {
+                    return _Tsx1IsCurrent ? _Tsx1 : _Tsx2;
+                }
+                
+                template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
+                std::vector<ValueType>& LraViHelper<ValueType, ComponentType, TransitionsType>::xOld() {
+                    return _Tsx1IsCurrent ? _Tsx2 : _Tsx1;
+                }
+                
+                template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
+                std::vector<ValueType> const& LraViHelper<ValueType, ComponentType, TransitionsType>::xOld() const {
+                    return _Tsx1IsCurrent ? _Tsx2 : _Tsx1;
+                }
+                
+                template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
+                bool LraViHelper<ValueType, ComponentType, TransitionsType>::nondetTs() const {
+                    return TransitionsType == LraViTransitionsType::NondetTsNoIs;
+                }
+                
+                template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
+                bool LraViHelper<ValueType, ComponentType, TransitionsType>::nondetIs() const {
+                    return TransitionsType == LraViTransitionsType::DetTsNondetIs;
+                }
+                
+                template class LraViHelper<double, storm::storage::MaximalEndComponent, LraViTransitionsType::NondetTsNoIs>;
+                template class LraViHelper<storm::RationalNumber, storm::storage::MaximalEndComponent, LraViTransitionsType::NondetTsNoIs>;
+                template class LraViHelper<double, storm::storage::MaximalEndComponent, LraViTransitionsType::DetTsNondetIs>;
+                template class LraViHelper<storm::RationalNumber, storm::storage::MaximalEndComponent, LraViTransitionsType::DetTsNondetIs>;
+                
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h
new file mode 100644
index 000000000..3fb5066c4
--- /dev/null
+++ b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h
@@ -0,0 +1,151 @@
+#pragma once
+
+
+#include "storm/storage/SparseMatrix.h"
+
+namespace storm {
+    class Environment;
+    
+    namespace solver {
+        template<typename ValueType> class LinearEquationSolver;
+        template<typename ValueType> class MinMaxLinearEquationSolver;
+        template<typename ValueType> class Multiplier;
+    }
+    
+    namespace modelchecker {
+        namespace helper {
+            namespace internal {
+                
+                /*!
+                 * Specifies differnt kinds of transition types with which this helper can be used
+                 * Ts means timed states (cf. Markovian states in a Markov Automaton) and Is means instant states (cf. probabilistic states in a Markov automaton).
+                 * The way to think about this is that time can only pass in a timed state, whereas transitions emerging from an instant state fire immediately
+                 * In an MDP, all states are seen as timed.
+                 * In this enum, we also specify whether there can be a nondeterministic choice at the corresponding states or not.
+                 */
+                enum class LraViTransitionsType {
+                    DetTsNoIs, /// deterministic choice at timed states, no instant states (as in DTMCs and CTMCs)
+                    DetTsNondetIs, /// deterministic choice at timed states, nondeterministic choice at instant states (as in Markov Automata)
+                    DetTsDetIs, /// deterministic choice at timed states, deterministic choice at instant states (as in Markov Automata without any nondeterminisim)
+                    NondetTsNoIs /// nondeterministic choice at timed states, no instant states (as in MDPs)
+                };
+            
+                /*!
+                 * Helper class that performs iterations of the value iteration method.
+                 * The purpose of the template parameters ComponentType and TransitionsType are used to make this work for various model types.
+                 *
+                 * @see Ashok et al.: Value Iteration for Long-Run Average Reward in Markov Decision Processes (CAV'17), https://doi.org/10.1007/978-3-319-63387-9_10
+                 * @see Butkova, Wimmer, Hermanns: Long-Run Rewards for Markov Automata (TACAS'17), https://doi.org/10.1007/978-3-662-54580-5_11
+                 *
+                 * @tparam ValueType The type of a value
+                 * @tparam ComponentType The type of a 'bottom component' of the model (e.g. a BSCC (for purely deterministic models) or a MEC (for models with potential nondeterminism).
+                 * @tparam TransitionsType The kind of transitions that occur.
+                 */
+                template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
+                class LraViHelper {
+                public:
+                    /// Function mapping from indices to values
+                    typedef std::function<ValueType(uint64_t)> ValueGetter;
+                    
+                    /*!
+                     * Initializes a new VI helper for the provided MEC or BSCC
+                     * @param component the MEC or BSCC
+                     * @param transitionMatrix The transition matrix of the input model
+                     * @param aperiodicFactor a non-zero factor that is used for making the MEC aperiodic (by adding selfloops to each state)
+                     * @param timedStates States in which time can pass (Markovian states in a Markov automaton). If nullptr, it is assumed that all states are timed states
+                     * @param exitRates The exit rates of the timed states (relevant for continuous time models). If nullptr, all rates are assumed to be 1 (which corresponds to a discrete time model)
+                     * @note All indices and vectors must be w.r.t. the states as described by the provided transition matrix
+                     */
+                    LraViHelper(ComponentType const& component, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, ValueType const& aperiodicFactor, storm::storage::BitVector const* timedStates = nullptr, std::vector<ValueType> const* exitRates = nullptr);
+
+                    /*!
+                     * Performs value iteration with the given state- and action values.
+                     * @param env The environment, containing information on the precision of this computation.
+                     * @param stateValueGetter function that returns for each state index (w.r.t. the input transition matrix) the reward for staying in state. Will only be called for timed states.
+                     * @param actionValueGetter function that returns for each global choice index (w.r.t. the input transition matrix) the reward for taking that choice
+                     * @param exitRates (as in the constructor)
+                     * @param dir Optimization direction. Must be not nullptr in case of nondeterminism
+                     * @param choices if not nullptr, the optimal choices will be inserted in this vector. The vector's size must then be equal to the number of row groups of the input transition matrix.
+                     * @return The (optimal) long run average value of the specified component.
+                     * @note it is possible to call this method multiple times with different values. However, other changes to the environment or the optimization direction might not have the expected effect due to caching.
+                     */
+                    ValueType performValueIteration(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, std::vector<ValueType> const* exitRates = nullptr, storm::solver::OptimizationDirection const* dir = nullptr, std::vector<uint64_t>* choices = nullptr);
+                    
+                private:
+                    
+                    /*!
+                     * Initializes the value iterations with the provided values.
+                     * Resets all information from potential previous calls.
+                     * Must be called before the first call to performIterationStep.
+                     * @param stateValueGetter Function that returns for each state index (w.r.t. the input transitions) the value (e.g. reward) for that state
+                     * @param stateValueGetter Function that returns for each global choice index (w.r.t. the input transitions) the value (e.g. reward) for that choice
+                     */
+                    void initializeNewValues(ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, std::vector<ValueType> const* exitRates = nullptr);
+                    
+                    /*!
+                     * Performs a single iteration step.
+                     * @param env The environment.
+                     * @param dir The optimization direction. Has to be given if there is nondeterminism (otherwise it will be ignored)
+                     * @param choices If given, the optimal choices will be inserted at the appropriate states.
+                     *                Note that these choices will be inserted w.r.t. the original model states/choices, i.e. the size of the vector should match the state-count of the input model
+                     * @pre when calling this the first time, initializeNewValues must have been called before. Moreover, prepareNextIteration must be called between two calls of this.
+                    */
+                    void performIterationStep(Environment const& env, storm::solver::OptimizationDirection const* dir = nullptr, std::vector<uint64_t>* choices = nullptr);
+                    
+                    struct ConvergenceCheckResult {
+                        bool isPrecisionAchieved;
+                        ValueType currentValue;
+                    };
+                
+                    /*!
+                     * Checks whether the curently computed value achieves the desired precision
+                     */
+                    ConvergenceCheckResult checkConvergence(bool relative, ValueType precision) const;
+                    
+                    /*!
+                     * Must be called between two calls of performIterationStep.
+                     */
+                    void prepareNextIteration(Environment const& env);
+                    
+                    /// Prepares the necessary solvers and multipliers for doing the iterations.
+                    void prepareSolversAndMultipliers(Environment const& env, storm::solver::OptimizationDirection const* dir = nullptr);
+                    
+                    void setInputModelChoices(std::vector<uint64_t>& choices, std::vector<uint64_t> const& localMecChoices, bool setChoiceZeroToMarkovianStates = false, bool setChoiceZeroToProbabilisticStates = false) const;
+                    
+                    /// Returns true iff the given state is a timed state
+                    bool isTimedState(uint64_t const& inputModelStateIndex) const;
+                    
+                    /// The result for timed states of the most recent iteration
+                    std::vector<ValueType>& xNew();
+                    std::vector<ValueType> const& xNew() const;
+                    
+                    /// The result for timed states of the previous iteration
+                    std::vector<ValueType>& xOld();
+                    std::vector<ValueType> const& xOld() const;
+
+                    /// @return true iff there potentially is a nondeterministic choice at timed states
+                    bool nondetTs() const;
+                    
+                    /// @return true iff there potentially is a nondeterministic choice at instant states. Returns false if there are no instant states.
+                    bool nondetIs() const;
+
+                    
+                    
+                    ComponentType const& _component;
+                    storm::storage::SparseMatrix<ValueType> const& _transitionMatrix;
+                    storm::storage::BitVector const* _timedStates; // e.g. Markovian states of a Markov automaton.
+                    bool _hasInstantStates;
+                    ValueType _uniformizationRate;
+                    storm::storage::SparseMatrix<ValueType> _TsTransitions, _TsToIsTransitions, _IsTransitions, _IsToTsTransitions;
+                    std::vector<ValueType> _Tsx1, _Tsx2, _TsChoiceValues;
+                    bool _Tsx1IsCurrent;
+                    std::vector<ValueType> _Isx, _Isb, _IsChoiceValues;
+                    std::unique_ptr<storm::solver::Multiplier<ValueType>> _TsMultiplier, _TsToIsMultiplier, _IsToTsMultiplier;
+                    std::unique_ptr<storm::solver::MinMaxLinearEquationSolver<ValueType>> _NondetIsSolver;
+                    std::unique_ptr<storm::solver::LinearEquationSolver<ValueType>> _DetIsSolver;
+                    std::unique_ptr<storm::Environment> _IsSolverEnv;
+                };
+            }
+        }
+    }
+}
\ No newline at end of file

From 7e65e797fab35bb698bebc0a50318e8a03a77f63 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Wed, 5 Aug 2020 11:40:43 +0200
Subject: [PATCH 17/48] SingleValueModelCheckerHelper: Fixed signature of
 getOptimizationDirection so that a const& is returned.

---
 src/storm/modelchecker/helper/SingleValueModelCheckerHelper.cpp | 2 +-
 src/storm/modelchecker/helper/SingleValueModelCheckerHelper.h   | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.cpp b/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.cpp
index 845543f26..b884d6ee2 100644
--- a/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.cpp
+++ b/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.cpp
@@ -21,7 +21,7 @@ namespace storm {
             }
             
             template <typename ValueType, storm::dd::DdType DdType>
-            storm::solver::OptimizationDirection SingleValueModelCheckerHelper<ValueType, DdType>::getOptimizationDirection() const {
+            storm::solver::OptimizationDirection const& SingleValueModelCheckerHelper<ValueType, DdType>::getOptimizationDirection() const {
                 STORM_LOG_ASSERT(isOptimizationDirectionSet(), "Requested optimization direction but none was set.");
                 return _optimizationDirection.get();
             }
diff --git a/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.h b/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.h
index 67bb2b7df..83ea27ae0 100644
--- a/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.h
+++ b/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.h
@@ -41,7 +41,7 @@ namespace storm {
                  * @pre an optimization direction has to be set before calling this.
                  * @return the optimization direction.
                  */
-                storm::solver::OptimizationDirection getOptimizationDirection() const;
+                storm::solver::OptimizationDirection const& getOptimizationDirection() const;
                 
                 /*!
                  * @pre an optimization direction has to be set before calling this.

From f113ac7187afbfbe4539b73eda2435bdc657c830 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Wed, 5 Aug 2020 11:41:23 +0200
Subject: [PATCH 18/48] NondeterministicLraHelper: Removed old ViHelper code.

---
 ...eNondeterministicInfiniteHorizonHelper.cpp | 551 +-----------------
 1 file changed, 18 insertions(+), 533 deletions(-)

diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
index 979337f5f..7a9cb003e 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
@@ -1,10 +1,12 @@
 #include "SparseNondeterministicInfiniteHorizonHelper.h"
 
+#include "storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h"
+
 #include "storm/solver/MinMaxLinearEquationSolver.h"
+#include "storm/solver/LinearEquationSolver.h"
 #include "storm/solver/Multiplier.h"
 #include "storm/solver/LpSolver.h"
 
-#include "storm/utility/graph.h"
 #include "storm/utility/SignalHandler.h"
 #include "storm/utility/solver.h"
 #include "storm/utility/vector.h"
@@ -84,9 +86,11 @@ namespace storm {
                 auto underlyingSolverEnvironment = env;
                 if (env.solver().isForceSoundness()) {
                     // For sound computations, the error in the MECS plus the error in the remaining system should not exceed the user defined precsion.
-                    underlyingSolverEnvironment.solver().minMax().setPrecision(env.solver().lra().getPrecision() / storm::utility::convertNumber<storm::RationalNumber>(2));
+                    storm::RationalNumber newPrecision = env.solver().lra().getPrecision() / storm::utility::convertNumber<storm::RationalNumber>(2);
+                    underlyingSolverEnvironment.solver().minMax().setPrecision(newPrecision);
                     underlyingSolverEnvironment.solver().minMax().setRelativeTerminationCriterion(env.solver().lra().getRelativeTerminationCriterion());
-                    underlyingSolverEnvironment.solver().lra().setPrecision(env.solver().lra().getPrecision() / storm::utility::convertNumber<storm::RationalNumber>(2));
+                    underlyingSolverEnvironment.solver().setLinearEquationSolverPrecision(newPrecision, env.solver().lra().getRelativeTerminationCriterion());
+                    underlyingSolverEnvironment.solver().lra().setPrecision(newPrecision);
                 }
                 
                 // If requested, allocate memory for the choices made
@@ -204,545 +208,26 @@ namespace storm {
                 }
             }
     
-            /*!
-             * Abstract helper class that performs a single iteration of the value iteration method
-             */
-            template <typename ValueType>
-            class LraViHelper {
-            public:
-                LraViHelper(storm::storage::MaximalEndComponent const& mec, storm::storage::SparseMatrix<ValueType> const& transitionMatrix) : _mec(mec), _transitionMatrix(transitionMatrix) {
-                    // Intentionally left empty
-                }
-                virtual ~LraViHelper() = default;
-
-                /*!
-                 * performs a single iteration step.
-                 * If a choices vector is given, the optimal choices will be inserted at the appropriate states.
-                 * Note that these choices will be inserted w.r.t. the original model states/choices, i.e. the size of the vector should match the state-count of the input model
-                 * @return the current estimate of the LRA value
-                 */
-                virtual void iterate(Environment const& env, storm::solver::OptimizationDirection const& dir, std::vector<uint64_t>* choices = nullptr) = 0;
-                
-                struct ConvergenceCheckResult {
-                    bool isPrecisionAchieved;
-                    ValueType currentValue;
-                };
-                
-                /*!
-                 * Checks whether the curently computed value achieves the desired precision
-                 */
-                virtual ConvergenceCheckResult checkConvergence(bool relative, ValueType precision) = 0;
-                
-                /*!
-                 * Must be called between two calls of iterate.
-                 */
-                virtual void prepareNextIteration(Environment const& env) = 0;
-                
-            protected:
-                
-                /*!
-                 *
-                 * @param xPrevious the 'old' values
-                 * @param xCurrent the 'new' values
-                 * @param threshold the threshold
-                 * @param relative whether the relative difference should be considered
-                 * @return The first component is true if the (relative) difference between the maximal and the minimal entry-wise change of the two value vectors is below or equal to the provided threshold.
-                 *          In this case, the second component is the average of the maximal and the minimal change.
-                 *          If the threshold is exceeded, the computation is aborted early and the second component is only an approximation of the averages.
-                 */
-                std::pair<bool, ValueType> checkMinMaxDiffBelowThreshold(std::vector<ValueType> const& xPrevious, std::vector<ValueType> const& xCurrent, ValueType const& threshold, bool relative) const {
-                    STORM_LOG_ASSERT(xPrevious.size() == xCurrent.size(), "Unexpected Dimension Mismatch");
-                    STORM_LOG_ASSERT(threshold > storm::utility::zero<ValueType>(), "Did not expect a non-positive threshold.");
-                    auto x1It = xPrevious.begin();
-                    auto x1Ite = xPrevious.end();
-                    auto x2It = xCurrent.begin();
-                    ValueType maxDiff = (*x2It - *x1It);
-                    ValueType minDiff = maxDiff;
-                    bool result = true;
-                    // The difference between maxDiff and minDiff is zero at this point. Thus, it doesn't make sense to check the threshold now.
-                    for (++x1It, ++x2It; x1It != x1Ite; ++x1It, ++x2It) {
-                        ValueType diff = (*x2It - *x1It);
-                        // Potentially update maxDiff or minDiff
-                        bool skipCheck = false;
-                        if (maxDiff < diff) {
-                            maxDiff = diff;
-                        } else if (minDiff > diff) {
-                            minDiff = diff;
-                        } else {
-                            skipCheck = true;
-                        }
-                        // Check convergence
-                        if (!skipCheck && (maxDiff - minDiff) > (relative ? (threshold * minDiff) : threshold)) {
-                            result = false;
-                            break;
-                        }
-                    }
-                    ValueType avgDiff = (maxDiff + minDiff) / (storm::utility::convertNumber<ValueType>(2.0));
-                    return {result, avgDiff};
-                }
-                
-                storm::storage::MaximalEndComponent const& _mec;
-                storm::storage::SparseMatrix<ValueType> const& _transitionMatrix;
-            };
-    
-            /*!
-             * Abstract helper class that performs a single iteration of the value iteration method for MDP
-             * @see Ashok et al.: Value Iteration for Long-Run Average Reward in Markov Decision Processes (CAV'17), https://doi.org/10.1007/978-3-319-63387-9_10
-             */
-            template <typename ValueType>
-            class MdpLraViHelper : public LraViHelper<ValueType> {
-            public:
-                
-                MdpLraViHelper(storm::storage::MaximalEndComponent const& mec, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, ValueType const& aperiodicFactor) : LraViHelper<ValueType>(mec, transitionMatrix), _x1(mec.size(), storm::utility::zero<ValueType>()), _x2(_x1), _x1IsCurrent(true) {
-                    
-                    // We add a selfloop to each state (which is necessary for convergence)
-                    // Very roughly, this selfloop avoids that the values can flip around like this: [1, 0] -> [0, 1] -> [1, 0] -> ...
-                    ValueType selfLoopProb = aperiodicFactor;
-                    // Introducing the selfloop also requires the rewards to be scaled by the following factor.
-                    _scalingFactor = storm::utility::one<ValueType>() - selfLoopProb;
-                    
-                    uint64_t numMecStates = this->_mec.size();
-                    boost::container::flat_map<uint64_t, uint64_t> toSubModelStateMapping;
-                    toSubModelStateMapping.reserve(numMecStates);
-                    uint64_t currState = 0;
-                    uint64_t numMecChoices = 0;
-                    for (auto const& stateChoices : this->_mec) {
-                        toSubModelStateMapping.emplace(stateChoices.first, currState);
-                        ++currState;
-                        numMecChoices += stateChoices.second.size();
-                    }
-                    assert(currState == numMecStates);
-                    
-                    // Get a transition matrix that only considers the states and choices within the MEC
-                    storm::storage::SparseMatrixBuilder<ValueType> mecTransitionBuilder(numMecChoices, numMecStates, 0, true, true, numMecStates);
-                    _choiceValues.reserve(numMecChoices);
-                    uint64_t currRow = 0;
-                    for (auto const& stateChoices : this->_mec) {
-                        auto const& mecState = stateChoices.first;
-                        auto const& mecChoices = stateChoices.second;
-                        mecTransitionBuilder.newRowGroup(currRow);
-                        for (auto const& choice : mecChoices) {
-                            bool insertedDiagElement = false;
-                            for (auto const& entry : this->_transitionMatrix.getRow(choice)) {
-                                uint64_t column = toSubModelStateMapping[entry.getColumn()];
-                                if (!insertedDiagElement && entry.getColumn() > mecState) {
-                                    mecTransitionBuilder.addNextValue(currRow, toSubModelStateMapping[mecState], selfLoopProb);
-                                    insertedDiagElement = true;
-                                }
-                                if (!insertedDiagElement && entry.getColumn() == mecState) {
-                                    mecTransitionBuilder.addNextValue(currRow, column, selfLoopProb + _scalingFactor * entry.getValue());
-                                    insertedDiagElement = true;
-                                } else {
-                                    mecTransitionBuilder.addNextValue(currRow, column,  _scalingFactor * entry.getValue());
-                                }
-                            }
-                            if (!insertedDiagElement) {
-                                mecTransitionBuilder.addNextValue(currRow, toSubModelStateMapping[mecState], selfLoopProb);
-                            }
-                            
-                            // Compute the rewards obtained for this choice
-                            _choiceValues.push_back(_scalingFactor * (stateRewardsGetter(mecState) + actionRewardsGetter(choice)));
-                            
-                            ++currRow;
-                        }
-                    }
-                    
-                    _mecTransitions = mecTransitionBuilder.build();
-                    
-                    STORM_LOG_ASSERT(_mecTransitions.isProbabilistic(), "The MEC-Matrix is not probabilistic.");
-                    STORM_LOG_ASSERT(_mecTransitions.getRowGroupCount() == _x1.size(), "Unexpected size mismatch for created matrix.");
-                    STORM_LOG_ASSERT(_x1.size() == _x2.size(), "Unexpected size mismatch for created matrix.");
-                }
-                
-                virtual void iterate(Environment const& env, storm::solver::OptimizationDirection const& dir, std::vector<uint64_t>* choices = nullptr) override {
-                    // Initialize a multipler if it does not exist, yet
-                    if (!_multiplier) {
-                        _multiplier = storm::solver::MultiplierFactory<ValueType>().create(env, _mecTransitions);
-                    }
-                    
-                    if (choices == nullptr) {
-                        // Perform a simple matrix-vector multiplication
-                        _multiplier->multiplyAndReduce(env, dir, xCurrent(), &_choiceValues, xPrevious());
-                    } else {
-                        // Perform a simple matrix-vector multiplication but also keep track of the choices within the _mecTransitions
-                        std::vector<uint64_t> mecChoices(_mecTransitions.getRowGroupCount());
-                        _multiplier->multiplyAndReduce(env, dir, xCurrent(), &_choiceValues, xPrevious(), &mecChoices);
-                        // Transform the local choices (within this mec) to global indices
-                        uint64_t mecState = 0;
-                        for (auto const& stateChoices : this->_mec) {
-                            uint64_t mecChoice = mecChoices[mecState];
-                            STORM_LOG_ASSERT(mecChoice < stateChoices.second.size(), "The selected choice does not seem to exist.");
-                            uint64_t globalChoiceIndex = *(stateChoices.second.begin() + mecChoice);
-                            (*choices)[stateChoices.first] = globalChoiceIndex - this->_transitionMatrix.getRowGroupIndices()[stateChoices.first];
-                            ++mecState;
-                        }
-                    }
-                    
-                    // Swap current and previous x vectors
-                    _x1IsCurrent = !_x1IsCurrent;
-                    
-                }
-                
-                virtual typename LraViHelper<ValueType>::ConvergenceCheckResult checkConvergence(bool relative, ValueType precision) override {
-                    typename LraViHelper<ValueType>::ConvergenceCheckResult res;
-                    std::tie(res.isPrecisionAchieved, res.currentValue) = this->checkMinMaxDiffBelowThreshold(xPrevious(), xCurrent(), precision, relative);
-                    res.currentValue /= _scalingFactor; // "Undo" the scaling of the rewards
-                    return res;
-                }
-                
-                virtual void prepareNextIteration(Environment const&) override {
-                    // To avoid large (and numerically unstable) x-values, we substract a reference value.
-                    ValueType referenceValue = xCurrent().front();
-                    storm::utility::vector::applyPointwise<ValueType, ValueType>(xCurrent(), xCurrent(), [&referenceValue] (ValueType const& x_i) -> ValueType { return x_i - referenceValue; });
-                }
-                
-            private:
-                
-                std::vector<ValueType>& xCurrent() {
-                    return _x1IsCurrent ? _x1 : _x2;
-                }
-                
-                std::vector<ValueType>& xPrevious() {
-                    return _x1IsCurrent ? _x2 : _x1;
-                }
-                
-                storm::storage::SparseMatrix<ValueType> _mecTransitions;
-                std::vector<ValueType> _x1, _x2, _choiceValues;
-                bool _x1IsCurrent;
-                std::unique_ptr<storm::solver::Multiplier<ValueType>> _multiplier;
-                ValueType _scalingFactor;
-            };
-            
-            /*!
-             * Abstract helper class that performs a single iteration of the value iteration method for MA
-             * @see Butkova, Wimmer, Hermanns: Long-Run Rewards for Markov Automata (TACAS'17), https://doi.org/10.1007/978-3-662-54580-5_11
-             */
-            template <typename ValueType>
-            class MaLraViHelper : public LraViHelper<ValueType> {
-            public:
-                
-                MaLraViHelper(storm::storage::MaximalEndComponent const& mec, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, ValueType const& aperiodicFactor) : LraViHelper<ValueType>(mec, transitionMatrix), _markovianStates(markovianStates), _Msx1IsCurrent(false) {
-                    
-                    // Run through the Mec and collect some data:
-                    // We consider two submodels, one consisting of the Markovian MEC states and one consisting of the probabilistic MEC states.
-                    // For this, we create a state index map that point from state indices of the input model to indices of the corresponding submodel of that state.
-                    boost::container::flat_map<uint64_t, uint64_t> toSubModelStateMapping;
-                    // We also obtain state and choices counts of the two submodels
-                    uint64_t numPsSubModelStates(0), numPsSubModelChoices(0);
-                    uint64_t numMsSubModelStates(0); // The number of choices coincide
-                    // We will need to uniformize the Markovian MEC states by introducing a selfloop.
-                    // For this, we need to find a uniformization rate which will be a little higher (given by aperiodicFactor) than the maximum rate occurring in the MEC.
-                    _uniformizationRate = storm::utility::zero<ValueType>();
-                    // Now run over the MEC and collect the required data.
-                    for (auto const& stateChoices : this->_mec) {
-                        uint64_t const& mecState = stateChoices.first;
-                        if (_markovianStates.get(mecState)) {
-                            toSubModelStateMapping.emplace(mecState, numMsSubModelStates);
-                            ++numMsSubModelStates;
-                            STORM_LOG_ASSERT(stateChoices.second.size() == 1, "Markovian state has multiple MEC choices.");
-                            _uniformizationRate = std::max(_uniformizationRate, exitRates[mecState]);
-                        } else {
-                            toSubModelStateMapping.emplace(mecState, numPsSubModelStates);
-                            ++numPsSubModelStates;
-                            numPsSubModelChoices += stateChoices.second.size();
-                        }
-                    }
-                    assert(numPsSubModelStates + numMsSubModelStates == mec.size());
-                    STORM_LOG_THROW(numMsSubModelStates > 0, storm::exceptions::InvalidOperationException, "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported.");
-
-                    _hasProbabilisticStates = numPsSubModelStates > 0;
-                    
-                    // We make sure that every Markovian state gets a selfloop to make the model aperiodic
-                    _uniformizationRate *= storm::utility::one<ValueType>() + aperiodicFactor;
-
-                    // Now build the Markovian and the Probabilistic submodels.
-                    // In addition, we also need the transitions between the two.
-                    storm::storage::SparseMatrixBuilder<ValueType> msTransitionsBuilder(numMsSubModelStates, numMsSubModelStates);
-                    _MsChoiceValues.reserve(numMsSubModelStates);
-                    storm::storage::SparseMatrixBuilder<ValueType> msToPsTransitionsBuilder, psTransitionsBuilder, psToMsTransitionsBuilder;
-                    if (_hasProbabilisticStates) {
-                        msToPsTransitionsBuilder = storm::storage::SparseMatrixBuilder<ValueType>(numMsSubModelStates, numPsSubModelStates);
-                        psTransitionsBuilder = storm::storage::SparseMatrixBuilder<ValueType>(numPsSubModelChoices, numPsSubModelStates, 0, true, true, numPsSubModelStates);
-                        psToMsTransitionsBuilder = storm::storage::SparseMatrixBuilder<ValueType>(numPsSubModelChoices, numMsSubModelStates, 0, true, true, numPsSubModelStates);
-                        _PsChoiceValues.reserve(numPsSubModelChoices);
-                    }
-                    uint64_t currMsRow = 0;
-                    uint64_t currPsRow = 0;
-                    for (auto const& stateChoices : this->_mec) {
-                        uint64_t const& mecState = stateChoices.first;
-                        auto const& mecChoices = stateChoices.second;
-                        if (!_hasProbabilisticStates || _markovianStates.get(mecState)) {
-                            // The currently processed state is Markovian.
-                            // We need to uniformize!
-                            ValueType uniformizationFactor = exitRates[mecState] / _uniformizationRate;
-                            ValueType selfLoopProb = storm::utility::one<ValueType>() - uniformizationFactor;
-                            STORM_LOG_ASSERT(mecChoices.size() == 1, "Unexpected number of choices at Markovian state.");
-                            for (auto const& mecChoice : mecChoices) {
-                                bool insertedDiagElement = false;
-                                for (auto const& entry : this->_transitionMatrix.getRow(mecChoice)) {
-                                    uint64_t subModelColumn = toSubModelStateMapping[entry.getColumn()];
-                                    if (!_hasProbabilisticStates || _markovianStates.get(entry.getColumn())) {
-                                        // We have a transition from a Markovian state to a Markovian state
-                                        STORM_LOG_ASSERT(subModelColumn < numMsSubModelStates, "Invalid state for Markovian submodel");
-                                        if (!insertedDiagElement && subModelColumn > currMsRow) {
-                                            // We passed the diagonal entry, so add it now before moving on to the next entry
-                                            msTransitionsBuilder.addNextValue(currMsRow, currMsRow, selfLoopProb);
-                                            insertedDiagElement = true;
-                                        }
-                                        if (!insertedDiagElement && subModelColumn == currMsRow) {
-                                            // The current entry is the diagonal (selfloop) entry
-                                            msTransitionsBuilder.addNextValue(currMsRow, subModelColumn, selfLoopProb + uniformizationFactor * entry.getValue());
-                                            insertedDiagElement = true;
-                                        } else {
-                                            // The diagonal element either has been inserted already or still lies in front
-                                            msTransitionsBuilder.addNextValue(currMsRow, subModelColumn,  uniformizationFactor * entry.getValue());
-                                        }
-                                    } else {
-                                        // We have a transition from a Markovian to a probabilistic state
-                                        STORM_LOG_ASSERT(subModelColumn < numPsSubModelStates, "Invalid state for probabilistic submodel");
-                                        msToPsTransitionsBuilder.addNextValue(currMsRow, subModelColumn, uniformizationFactor * entry.getValue());
-                                    }
-                                }
-                                // If the diagonal entry for the MS matrix still has not been set, we do that now
-                                if (!insertedDiagElement) {
-                                    msTransitionsBuilder.addNextValue(currMsRow, currMsRow, selfLoopProb);
-                                }
-                                // Compute the rewards obtained for this choice.
-                                _MsChoiceValues.push_back(stateRewardsGetter(mecState) / _uniformizationRate + actionRewardsGetter(mecChoice) * exitRates[mecState] / _uniformizationRate);
-                                ++currMsRow;
-                            }
-                        } else {
-                            // The currently processed state is probabilistic
-                            psTransitionsBuilder.newRowGroup(currPsRow);
-                            psToMsTransitionsBuilder.newRowGroup(currPsRow);
-                            for (auto const& mecChoice : mecChoices) {
-                                for (auto const& entry : this->_transitionMatrix.getRow(mecChoice)) {
-                                    uint64_t subModelColumn = toSubModelStateMapping[entry.getColumn()];
-                                    if (_markovianStates.get(entry.getColumn())) {
-                                        // We have a transition from a probabilistic state to a Markovian state
-                                        STORM_LOG_ASSERT(subModelColumn < numMsSubModelStates, "Invalid state for Markovian submodel");
-                                        psToMsTransitionsBuilder.addNextValue(currPsRow, subModelColumn, entry.getValue());
-                                    } else {
-                                        // We have a transition from a probabilistic to a probabilistic state
-                                        STORM_LOG_ASSERT(subModelColumn < numPsSubModelStates, "Invalid state for probabilistic submodel");
-                                        psTransitionsBuilder.addNextValue(currPsRow, subModelColumn, entry.getValue());
-                                    }
-                                }
-                                // Compute the rewards obtained for this choice.
-                                // State rewards do not count here since no time passes in probabilistic states.
-                                _PsChoiceValues.push_back(actionRewardsGetter(mecChoice));
-                                ++currPsRow;
-                            }
-                        }
-                    }
-                    _MsTransitions = msTransitionsBuilder.build();
-                    if (_hasProbabilisticStates) {
-                        _MsToPsTransitions = msToPsTransitionsBuilder.build();
-                        _PsTransitions = psTransitionsBuilder.build();
-                        _PsToMsTransitions = psToMsTransitionsBuilder.build();
-                    }
-                }
-                
-                void initializeIterations(Environment const& env, storm::solver::OptimizationDirection const& dir) {
-                    _Msx1.resize(_MsTransitions.getRowGroupCount(), storm::utility::zero<ValueType>());
-                    _Msx2 = _Msx1;
-                    _MsMultiplier = storm::solver::MultiplierFactory<ValueType>().create(env, _MsTransitions);
-                    if (_hasProbabilisticStates) {
-                        if (_PsTransitions.getNonzeroEntryCount() > 0) {
-                            // Set-up a solver for transitions within PS states
-                            _PsSolverEnv = env;
-                            if (env.solver().isForceSoundness()) {
-                                // To get correct results, the inner equation systems are solved exactly.
-                                // TODO investigate how an error would propagate
-                                _PsSolverEnv.solver().setForceExact(true);
-                            }
-                            storm::solver::GeneralMinMaxLinearEquationSolverFactory<ValueType> factory;
-                            bool isAcyclic = !storm::utility::graph::hasCycle(_PsTransitions);
-                            if (isAcyclic) {
-                                STORM_LOG_INFO("Probabilistic transitions are acyclic.");
-                                _PsSolverEnv.solver().minMax().setMethod(storm::solver::MinMaxMethod::Acyclic);
-                            }
-                            _PsSolver = factory.create(_PsSolverEnv, _PsTransitions);
-                            _PsSolver->setHasUniqueSolution(true); // Assume non-zeno MA
-                            _PsSolver->setHasNoEndComponents(true); // assume non-zeno MA
-                            _PsSolver->setCachingEnabled(true);
-                            _PsSolver->setRequirementsChecked(true);
-                            auto req = _PsSolver->getRequirements(_PsSolverEnv, dir);
-                            req.clearUniqueSolution();
-                            if (isAcyclic) {
-                                req.clearAcyclic();
-                            }
-                            // Computing a priori lower/upper bounds is not particularly easy, as there might be selfloops with high probabilities
-                            // Which accumulate a lot of reward. Moreover, the right-hand-side of the equation system changes dynamically.
-                            STORM_LOG_THROW(!req.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "The solver requirement " << req.getEnabledRequirementsAsString() << " has not been checked.");
-                        }
-                        
-                        // Set up multipliers for transitions connecting Markovian and probabilistic states
-                        _MsToPsMultiplier = storm::solver::MultiplierFactory<ValueType>().create(env, _MsToPsTransitions);
-                        _PsToMsMultiplier = storm::solver::MultiplierFactory<ValueType>().create(env, _PsToMsTransitions);
-
-                        // Set-up vectors for storing intermediate results for PS states.
-                        _Psx.resize(_PsTransitions.getRowGroupCount(), storm::utility::zero<ValueType>());
-                        _Psb = _PsChoiceValues;
-                    }
-                    
-                }
-                
-                void setInputModelChoices(std::vector<uint64_t>& choices, std::vector<uint64_t> const& localMecChoices, bool setChoiceZeroToMarkovianStates) {
-                    // Transform the local choices (within this mec) to choice indices for the input model
-                    uint64_t mecState = 0;
-                    for (auto const& stateChoices : this->_mec) {
-                        if (setChoiceZeroToMarkovianStates && _markovianStates.get(stateChoices.first)) {
-                            choices[stateChoices.first] = 0;
-                        } else {
-                            uint64_t mecChoice = localMecChoices[mecState];
-                            STORM_LOG_ASSERT(mecChoice < stateChoices.second.size(), "The selected choice does not seem to exist.");
-                            uint64_t globalChoiceIndex = *(stateChoices.second.begin() + mecChoice);
-                            choices[stateChoices.first] = globalChoiceIndex - this->_transitionMatrix.getRowGroupIndices()[stateChoices.first];
-                            ++mecState;
-                        }
-                    }
-                    STORM_LOG_ASSERT(mecState == localMecChoices.size(), "Did not traverse all mec states.");
-                }
-                
-                virtual void iterate(Environment const& env, storm::solver::OptimizationDirection const& dir, std::vector<uint64_t>* choices = nullptr) override {
-                    // Initialize value vectors, multiplers, and solver if this has not been done, yet
-                    if (!_MsMultiplier) {
-                        initializeIterations(env, dir);
-                    }
-                    
-                    // Compute new x values for the Markovian states
-                    // Flip what is current and what is previous
-                    _Msx1IsCurrent = !_Msx1IsCurrent;
-                    // At this point, xPrevious() points to what has been computed in the previous call of iterate (initially, this is the 0-vector).
-                    // The result of this computation will be stored in xCurrent()
-                    
-                    // Compute the values obtained by a single uniformization step between Markovian states only
-                    _MsMultiplier->multiply(env, xPrevious(), &_MsChoiceValues, xCurrent());
-                    if (_hasProbabilisticStates) {
-                        // Add the values obtained by taking a single uniformization step that leads to a Probabilistic state followed by arbitrarily many probabilistic steps.
-                        // First compute the total values when taking arbitrarily many probabilistic transitions (in no time)
-                        if (_PsSolver) {
-                            // We might need to track the optimal choices.
-                            if (choices == nullptr) {
-                                _PsSolver->solveEquations(_PsSolverEnv, dir, _Psx, _Psb);
-                            } else {
-                                _PsSolver->setTrackScheduler();
-                                _PsSolver->solveEquations(_PsSolverEnv, dir, _Psx, _Psb);
-                                setInputModelChoices(*choices, _PsSolver->getSchedulerChoices(), true);
-                            }
-                        } else {
-                            STORM_LOG_ASSERT(_PsTransitions.getNonzeroEntryCount() == 0, "If no solver was initialized, an empty matrix would have been expected.");
-                            if (choices == nullptr) {
-                                storm::utility::vector::reduceVectorMinOrMax(dir, _Psb, _Psx, _PsTransitions.getRowGroupIndices());
-                            } else {
-                                std::vector<uint64_t> psMecChoices(_PsTransitions.getRowGroupCount());
-                                storm::utility::vector::reduceVectorMinOrMax(dir, _Psb, _Psx, _PsTransitions.getRowGroupIndices(), &psMecChoices);
-                                setInputModelChoices(*choices, _PsSolver->getSchedulerChoices(), true);
-                            }
-                        }
-                        // Now add the (weighted) values of the probabilistic states to the values of the Markovian states.
-                        _MsToPsMultiplier->multiply(env, _Psx, &xCurrent(), xCurrent());
-                    }
-                }
-                
-                virtual typename LraViHelper<ValueType>::ConvergenceCheckResult checkConvergence(bool relative, ValueType precision) override {
-                    typename LraViHelper<ValueType>::ConvergenceCheckResult res;
-                    // All values are scaled according to the uniformizationRate.
-                    // We need to 'revert' this scaling when computing the absolute precision.
-                    // However, for relative precision, the scaling cancels out.
-                    ValueType threshold = relative ? precision : ValueType(precision / _uniformizationRate);
-                    std::tie(res.isPrecisionAchieved, res.currentValue) = this->checkMinMaxDiffBelowThreshold(xPrevious(), xCurrent(), threshold, relative);
-                    res.currentValue *= _uniformizationRate; // "Undo" the scaling of the values
-                    return res;
-                }
-                
-                virtual void prepareNextIteration(Environment const& env) override {
-                    // To avoid large (and numerically unstable) x-values, we substract a reference value.
-                    ValueType referenceValue = xCurrent().front();
-                    storm::utility::vector::applyPointwise<ValueType, ValueType>(xCurrent(), xCurrent(), [&referenceValue] (ValueType const& x_i) -> ValueType { return x_i - referenceValue; });
-                    if (_hasProbabilisticStates) {
-                        // Update the RHS of the equation system for the probabilistic states by taking the new values of Markovian states into account.
-                        _PsToMsMultiplier->multiply(env, xCurrent(), &_PsChoiceValues, _Psb);
-                    }
-                }
-                
-            private:
-                
-                std::vector<ValueType>& xCurrent() {
-                    return _Msx1IsCurrent ? _Msx1 : _Msx2;
-                }
-                
-                std::vector<ValueType>& xPrevious() {
-                    return _Msx1IsCurrent ? _Msx2 : _Msx1;
-                }
-                
-                storm::storage::BitVector const& _markovianStates;
-                bool _hasProbabilisticStates;
-                ValueType _uniformizationRate;
-                storm::storage::SparseMatrix<ValueType> _MsTransitions, _MsToPsTransitions, _PsTransitions, _PsToMsTransitions;
-                std::vector<ValueType> _Msx1, _Msx2, _MsChoiceValues;
-                bool _Msx1IsCurrent;
-                std::vector<ValueType> _Psx, _Psb, _PsChoiceValues;
-                std::unique_ptr<storm::solver::Multiplier<ValueType>> _MsMultiplier, _MsToPsMultiplier, _PsToMsMultiplier;
-                std::unique_ptr<storm::solver::MinMaxLinearEquationSolver<ValueType>> _PsSolver;
-                Environment _PsSolverEnv;
-            };
-            
             template <typename ValueType>
             ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMecVi(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
 
-                // Collect some parameters of the computation.
+                // Collect some parameters of the computation
                 ValueType aperiodicFactor = storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor());
-                ValueType precision = storm::utility::convertNumber<ValueType>(env.solver().lra().getPrecision()) / aperiodicFactor;
-                bool relative = env.solver().lra().getRelativeTerminationCriterion();
-                boost::optional<uint64_t> maxIter;
-                if (env.solver().lra().isMaximalIterationCountSet()) {
-                    maxIter = env.solver().lra().getMaximalIterationCount();
+                std::vector<uint64_t>* optimalChoices = nullptr;
+                if (isProduceSchedulerSet()) {
+                    optimalChoices = &_producedOptimalChoices.get();
                 }
-                auto dir = this->getOptimizationDirection();
                 
-                // Create an object for the iterations
-                std::shared_ptr<LraViHelper<ValueType>> iterationHelper;
+                // Now create a helper and perform the algorithm
                 if (isContinuousTime()) {
-                    iterationHelper = std::make_shared<MaLraViHelper<ValueType>>(mec, _transitionMatrix, *_markovianStates, *_exitRates, stateRewardsGetter, actionRewardsGetter, aperiodicFactor);
-                } else {
-                    iterationHelper = std::make_shared<MdpLraViHelper<ValueType>>(mec, _transitionMatrix, stateRewardsGetter, actionRewardsGetter, aperiodicFactor);
-                }
-                
-                // start the iterations
-                ValueType result = storm::utility::zero<ValueType>();
-                uint64_t iter = 0;
-                while (!maxIter.is_initialized() || iter < maxIter.get()) {
-                    ++iter;
-                    iterationHelper->iterate(env, dir);
-                    // Check if we are done
-                    auto convergenceCheckResult = iterationHelper->checkConvergence(relative, precision);
-                    result = convergenceCheckResult.currentValue;
-                    if (convergenceCheckResult.isPrecisionAchieved) {
-                        break;
-                    }
-                    if (storm::utility::resources::isTerminate()) {
-                        break;
-                    }
-                    
-                    iterationHelper->prepareNextIteration(env);
-                    
-                }
-                if (maxIter.is_initialized() && iter == maxIter.get()) {
-                    STORM_LOG_WARN("LRA computation did not converge within " << iter << " iterations.");
-                } else if (storm::utility::resources::isTerminate()) {
-                    STORM_LOG_WARN("LRA computation aborted after " << iter << " iterations.");
+                    // We assume a Markov Automaton (with deterministic timed states and nondeterministic instant states)
+                    storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::MaximalEndComponent, storm::modelchecker::helper::internal::LraViTransitionsType::DetTsNondetIs> viHelper(mec, _transitionMatrix, aperiodicFactor, _markovianStates, _exitRates);
+                    return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter, _exitRates, &this->getOptimizationDirection(), optimalChoices);
                 } else {
-                    STORM_LOG_TRACE("LRA computation converged after " << iter << " iterations.");
+                    // We assume an MDP (with nondeterministic timed states and no instant states)
+                    storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::MaximalEndComponent, storm::modelchecker::helper::internal::LraViTransitionsType::NondetTsNoIs> viHelper(mec, _transitionMatrix, aperiodicFactor);
+                    return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter, nullptr, &this->getOptimizationDirection(), optimalChoices);
                 }
-                
-                if (isProduceSchedulerSet()) {
-                    // We will be doing one more iteration step and track scheduler choices this time.
-                    iterationHelper->prepareNextIteration(env);
-                    iterationHelper->iterate(env, dir, &_producedOptimalChoices.get());
-                }
-                return result;
             }
             
             template <typename ValueType>

From 9fbb58788418c095fa0d6c1c006234286a48d28d Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Wed, 5 Aug 2020 11:46:19 +0200
Subject: [PATCH 19/48] LraViHelper: Fix for NondetTsNoIs

---
 .../helper/infinitehorizon/internal/LraViHelper.cpp           | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
index d4c3ba856..af4ab9dfd 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
+++ b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
@@ -96,7 +96,9 @@ namespace storm {
                             // The currently processed state is timed.
                             if (nondetTs()) {
                                 tsTransitionsBuilder.newRowGroup(currTsRow);
-                                tsToIsTransitionsBuilder.newRowGroup(currTsRow);
+                                if (_hasInstantStates) {
+                                    tsToIsTransitionsBuilder.newRowGroup(currTsRow);
+                                }
                             }
                             // We need to uniformize which means that a diagonal entry for the selfloop will be inserted.
                             // If there are exit rates, the uniformization factor needs to be updated.

From 68b4d8dbd2db3f97181d639c2e32550d2da66427 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Thu, 6 Aug 2020 10:13:59 +0200
Subject: [PATCH 20/48] Nondet Lra: Fixed LP implementation for Markov
 automata.

---
 ...eNondeterministicInfiniteHorizonHelper.cpp | 62 ++++++++++++++-----
 ...rseNondeterministicInfiniteHorizonHelper.h |  1 +
 2 files changed, 47 insertions(+), 16 deletions(-)

diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
index 7a9cb003e..6adbfc4d4 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
@@ -81,6 +81,8 @@ namespace storm {
             
             template <typename ValueType>
             std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLongRunAverageValues(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter) {
+                // We will compute the long run average value for each MEC individually and then set-up a MinMax Equation system to compute the value also at non-mec states.
+                // For a description of this approach see, e.g., Guck et al.: Modelling and Analysis of Markov Reward Automata (ATVA'14), https://doi.org/10.1007/978-3-319-11936-6_13
                 
                  // Prepare an environment for the underlying solvers
                 auto underlyingSolverEnvironment = env;
@@ -232,42 +234,70 @@ namespace storm {
             
             template <typename ValueType>
             ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMecLp(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
-                std::shared_ptr<storm::solver::LpSolver<ValueType>> solver = storm::utility::solver::getLpSolver<ValueType>("LRA for MEC");
+                // Create an LP solver
+                auto solver = storm::utility::solver::LpSolverFactory<ValueType>().create("LRA for MEC");
+                
+                // Now build the LP formulation as described in:
+                // Guck et al.: Modelling and Analysis of Markov Reward Automata (ATVA'14), https://doi.org/10.1007/978-3-319-11936-6_13
                 solver->setOptimizationDirection(invert(this->getOptimizationDirection()));
                 
-                // First, we need to create the variables for the problem.
+                // Create variables
+                // TODO: Investigate whether we can easily make the variables bounded
                 std::map<uint_fast64_t, storm::expressions::Variable> stateToVariableMap;
                 for (auto const& stateChoicesPair : mec) {
-                    std::string variableName = "h" + std::to_string(stateChoicesPair.first);
+                    std::string variableName = "x" + std::to_string(stateChoicesPair.first);
                     stateToVariableMap[stateChoicesPair.first] = solver->addUnboundedContinuousVariable(variableName);
                 }
-                storm::expressions::Variable lambda = solver->addUnboundedContinuousVariable("L", 1);
+                storm::expressions::Variable k = solver->addUnboundedContinuousVariable("k", storm::utility::one<ValueType>());
                 solver->update();
                 
-                // Now we encode the problem as constraints.
+                // Add constraints.
                 for (auto const& stateChoicesPair : mec) {
                     uint_fast64_t state = stateChoicesPair.first;
+                    bool stateIsMarkovian = _markovianStates && _markovianStates->get(state);
                     
-                    // Now, based on the type of the state, create a suitable constraint.
+                    // Now create a suitable constraint for each choice
+                    // x_s  {≤, ≥}  -k/rate(s) + sum_s' P(s,act,s') * x_s' + (value(s)/rate(s) + value(s,act))
                     for (auto choice : stateChoicesPair.second) {
-                        storm::expressions::Expression constraint = -lambda;
-                        
-                        for (auto const& element : _transitionMatrix.getRow(choice)) {
-                            constraint = constraint + stateToVariableMap.at(element.getColumn()) * solver->getConstant(element.getValue());
+                        std::vector<storm::expressions::Expression> summands;
+                        auto matrixRow = _transitionMatrix.getRow(choice);
+                        summands.reserve(matrixRow.getNumberOfEntries() + 2);
+                        // add -k/rate(s) (only if s is either a Markovian state or we have an MDP)
+                        if (stateIsMarkovian) {
+                            summands.push_back(-(k / solver->getManager().rational((*_exitRates)[state])));
+                        } else if (!isContinuousTime()) {
+                            summands.push_back(-k);
+                        }
+                        // add sum_s' P(s,act,s') * x_s'
+                        for (auto const& element : matrixRow) {
+                            summands.push_back(stateToVariableMap.at(element.getColumn()) * solver->getConstant(element.getValue()));
+                        }
+                        // add value for state and choice
+                        ValueType value;
+                        if (stateIsMarkovian) {
+                            // divide state reward with exit rate
+                            value = stateRewardsGetter(state) / (*_exitRates)[state] + actionRewardsGetter(choice);
+                        } else if (!isContinuousTime()) {
+                            // in discrete time models no scaling is needed
+                            value = stateRewardsGetter(state) + actionRewardsGetter(choice);
+                        } else {
+                            // state is a probabilistic state of a Markov automaton. The state reward is not collected
+                            value = actionRewardsGetter(choice);
                         }
-                        constraint = solver->getConstant(stateRewardsGetter(state) + actionRewardsGetter(choice)) + constraint;
-                        
+                        summands.push_back(solver->getConstant(value));
+                        storm::expressions::Expression constraint;
                         if (this->minimize()) {
-                            constraint = stateToVariableMap.at(state) <= constraint;
+                            constraint = stateToVariableMap.at(state) <= storm::expressions::sum(summands);
                         } else {
-                            constraint = stateToVariableMap.at(state) >= constraint;
+                            constraint = stateToVariableMap.at(state) >= storm::expressions::sum(summands);
                         }
-                        solver->addConstraint("state" + std::to_string(state) + "," + std::to_string(choice), constraint);
+                        solver->addConstraint("s" + std::to_string(state) + "," + std::to_string(choice), constraint);
                     }
                 }
                 
                 solver->optimize();
-                return solver->getContinuousValue(lambda);
+                STORM_LOG_THROW(!isProduceSchedulerSet(), storm::exceptions::NotImplementedException, "Scheduler extraction is not yet implemented for LP based LRA method.");
+                return solver->getContinuousValue(k);
             }
             
             /*!
diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h
index b2d30fc4d..a58d91025 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h
@@ -100,6 +100,7 @@ namespace storm {
                 ValueType computeLraForMecVi(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateValuesGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec);
                 /*!
                  * As computeLraForMec but uses linear programming as a solution method (independent of what is set in env)
+                 * @see Guck et al.: Modelling and Analysis of Markov Reward Automata (ATVA'14), https://doi.org/10.1007/978-3-319-11936-6_13
                  */
                 ValueType computeLraForMecLp(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateValuesGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec);
                 

From 626b7a819a681afbf4817b9541bfd385e4ba0a23 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Thu, 6 Aug 2020 14:50:16 +0200
Subject: [PATCH 21/48] InfiniteHorizon: Fixed storing backwardstransition
 properly. Allowed to specify a mec decomposition. Pushed _produceScheduler
 flag to the SingleValueModelCheckerHelper.

---
 .../SparseMarkovAutomatonCslModelChecker.cpp  |  4 +-
 .../helper/SingleValueModelCheckerHelper.cpp  | 15 +++
 .../helper/SingleValueModelCheckerHelper.h    | 14 ++-
 ...eNondeterministicInfiniteHorizonHelper.cpp | 91 +++++++++++--------
 ...rseNondeterministicInfiniteHorizonHelper.h | 36 +++++---
 .../prctl/SparseMdpPrctlModelChecker.cpp      |  4 +-
 6 files changed, 106 insertions(+), 58 deletions(-)

diff --git a/src/storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.cpp b/src/storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.cpp
index f68db3db2..d3fa819b7 100644
--- a/src/storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.cpp
+++ b/src/storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.cpp
@@ -142,7 +142,7 @@ namespace storm {
             std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
             ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult();
 
-            storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), this->getModel().getMarkovianStates(), this->getModel().getExitRates());
+            storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix(), this->getModel().getMarkovianStates(), this->getModel().getExitRates());
             storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel());
 			auto values = helper.computeLongRunAverageProbabilities(env, subResult.getTruthValuesVector());
 
@@ -159,7 +159,7 @@ namespace storm {
             STORM_LOG_THROW(this->getModel().isClosed(), storm::exceptions::InvalidPropertyException, "Unable to compute long run average rewards in non-closed Markov automaton.");
             auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
             
-            storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), this->getModel().getMarkovianStates(), this->getModel().getExitRates());
+            storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix(), this->getModel().getMarkovianStates(), this->getModel().getExitRates());
             storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel());
             auto values = helper.computeLongRunAverageRewards(env, rewardModel.get());
 
diff --git a/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.cpp b/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.cpp
index b884d6ee2..0487cb673 100644
--- a/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.cpp
+++ b/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.cpp
@@ -5,6 +5,11 @@ namespace storm {
     namespace modelchecker {
         namespace helper {
             
+            template <typename ValueType, storm::dd::DdType DdType>
+            SingleValueModelCheckerHelper<ValueType, DdType>::SingleValueModelCheckerHelper() : _produceScheduler(false) {
+                // Intentionally left empty
+            }
+            
             template <typename ValueType, storm::dd::DdType DdType>
             void SingleValueModelCheckerHelper<ValueType, DdType>::setOptimizationDirection(storm::solver::OptimizationDirection const& direction) {
                 _optimizationDirection = direction;
@@ -67,6 +72,16 @@ namespace storm {
                 STORM_LOG_ASSERT(isValueThresholdSet(), "Value Threshold comparison type was requested but not set before.");
                 return _valueThreshold->second;
             }
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            void SingleValueModelCheckerHelper<ValueType, DdType>::setProduceScheduler(bool value) {
+                _produceScheduler = value;
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            bool SingleValueModelCheckerHelper<ValueType, DdType>::isProduceSchedulerSet() const {
+                return _produceScheduler;
+            }
  
             template class SingleValueModelCheckerHelper<double, storm::dd::DdType::None>;
             template class SingleValueModelCheckerHelper<storm::RationalNumber, storm::dd::DdType::None>;
diff --git a/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.h b/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.h
index 83ea27ae0..82184ad12 100644
--- a/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.h
+++ b/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.h
@@ -17,7 +17,8 @@ namespace storm {
             template <typename ValueType, storm::dd::DdType DdType = storm::dd::DdType::None>
             class SingleValueModelCheckerHelper : public ModelCheckerHelper<ValueType, DdType> {
             public:
-                SingleValueModelCheckerHelper() = default;
+
+                SingleValueModelCheckerHelper();
                 ~SingleValueModelCheckerHelper() = default;
                 
                 /*!
@@ -91,9 +92,20 @@ namespace storm {
                  */
                 ValueType const& getValueThresholdValue() const;
                 
+                /*!
+                 * Sets whether an optimal scheduler shall be constructed during the computation
+                 */
+                void setProduceScheduler(bool value);
+                
+                /*!
+                 * @return whether an optimal scheduler shall be constructed during the computation
+                 */
+                bool isProduceSchedulerSet() const;
+                
             private:
                 boost::optional<storm::solver::OptimizationDirection> _optimizationDirection;
                 boost::optional<std::pair<storm::logic::ComparisonType, ValueType>> _valueThreshold;
+                bool _produceScheduler;
             };
         }
     }
diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
index 6adbfc4d4..cc68d9fb7 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
@@ -21,15 +21,27 @@ namespace storm {
         namespace helper {
         
             template <typename ValueType>
-            SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions) : _transitionMatrix(transitionMatrix), _backwardTransitions(backwardTransitions), _markovianStates(nullptr), _exitRates(nullptr), _produceScheduler(false) {
+            SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix) : _transitionMatrix(transitionMatrix), _backwardTransitions(nullptr), _mecDecomposition(nullptr), _markovianStates(nullptr), _exitRates(nullptr) {
                 // Intentionally left empty.
             }
             
             template <typename ValueType>
-            SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates) : _transitionMatrix(transitionMatrix), _backwardTransitions(backwardTransitions), _markovianStates(&markovianStates), _exitRates(&exitRates), _produceScheduler(false) {
+            SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates) : _transitionMatrix(transitionMatrix), _backwardTransitions(nullptr), _mecDecomposition(nullptr), _markovianStates(&markovianStates), _exitRates(&exitRates) {
                 // Intentionally left empty.
             }
             
+            template <typename ValueType>
+            void SparseNondeterministicInfiniteHorizonHelper<ValueType>::provideBackwardTransitions(storm::storage::SparseMatrix<ValueType> const& backwardTransitions) {
+                STORM_LOG_WARN_COND(_backwardTransitions == nullptr, "Backwards transitions were provided but they were already computed or set before.");
+                _backwardTransitions = &backwardTransitions;
+            }
+            
+            template <typename ValueType>
+            void SparseNondeterministicInfiniteHorizonHelper<ValueType>::provideMaximalEndComponentDecomposition(storm::storage::MaximalEndComponentDecomposition<ValueType> const& mecDecomposition) {
+                STORM_LOG_WARN_COND(_mecDecomposition == nullptr, "Backwards transitions were provided but they were already computed or set before.");
+                _mecDecomposition = &mecDecomposition;
+            }
+            
             template <typename ValueType>
             std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLongRunAverageProbabilities(Environment const& env, storm::storage::BitVector const& psiStates) {
                 return computeLongRunAverageValues(env,
@@ -96,7 +108,7 @@ namespace storm {
                 }
                 
                 // If requested, allocate memory for the choices made
-                if (isProduceSchedulerSet()) {
+                if (this->isProduceSchedulerSet()) {
                     if (!_producedOptimalChoices.is_initialized()) {
                         _producedOptimalChoices.emplace();
                     }
@@ -104,40 +116,37 @@ namespace storm {
                 }
                 
                 // Start by decomposing the Model into its MECs.
-                storm::storage::MaximalEndComponentDecomposition<ValueType> mecDecomposition(_transitionMatrix, _backwardTransitions);
+                if (_mecDecomposition == nullptr) {
+                    // The decomposition has not been provided or computed, yet.
+                    if (_backwardTransitions == nullptr) {
+                        _computedBackwardTransitions = _transitionMatrix.transpose(true);
+                        _backwardTransitions = &_computedBackwardTransitions;
+                    }
+                    _computedMecDecomposition = storm::storage::MaximalEndComponentDecomposition<ValueType>(_transitionMatrix, *_backwardTransitions);
+                    _mecDecomposition = &_computedMecDecomposition;
+                }
 
                 // Compute the long-run average for all end components in isolation.
                 std::vector<ValueType> mecLraValues;
-                mecLraValues.reserve(mecDecomposition.size());
-                for (auto const& mec : mecDecomposition) {
+                mecLraValues.reserve(_mecDecomposition->size());
+                for (auto const& mec : *_mecDecomposition) {
                     mecLraValues.push_back(computeLraForMec(underlyingSolverEnvironment, stateRewardsGetter, actionRewardsGetter, mec));
                 }
                 
                 // Solve the resulting SSP where end components are collapsed into single auxiliary states
-                return buildAndSolveSsp(underlyingSolverEnvironment, mecDecomposition, mecLraValues);
-            }
-            
-            
-            template <typename ValueType>
-            void SparseNondeterministicInfiniteHorizonHelper<ValueType>::setProduceScheduler(bool value) {
-                _produceScheduler = value;
-            }
-            
-            template <typename ValueType>
-            bool SparseNondeterministicInfiniteHorizonHelper<ValueType>::isProduceSchedulerSet() const {
-                return _produceScheduler;
+                return buildAndSolveSsp(underlyingSolverEnvironment, mecLraValues);
             }
             
             template <typename ValueType>
             std::vector<uint64_t> const& SparseNondeterministicInfiniteHorizonHelper<ValueType>::getProducedOptimalChoices() const {
-                STORM_LOG_ASSERT(isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
+                STORM_LOG_ASSERT(this->isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
                 STORM_LOG_ASSERT(_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
                 return _producedOptimalChoices.get();
             }
             
             template <typename ValueType>
             std::vector<uint64_t>& SparseNondeterministicInfiniteHorizonHelper<ValueType>::getProducedOptimalChoices() {
-                STORM_LOG_ASSERT(isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
+                STORM_LOG_ASSERT(this->isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
                 STORM_LOG_ASSERT(_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
                 return _producedOptimalChoices.get();
             }
@@ -169,7 +178,7 @@ namespace storm {
                         // Singleton MECs have to consist of a Markovian state because of the non-Zenoness assumption. Then, there is just one possible choice.
                         STORM_LOG_THROW(_markovianStates->get(state), storm::exceptions::InvalidOperationException, "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported.");
                         STORM_LOG_ASSERT(mec.begin()->second.size() == 1, "Markovian state has Nondeterministic behavior.");
-                        if (isProduceSchedulerSet()) {
+                        if (this->isProduceSchedulerSet()) {
                             _producedOptimalChoices.get()[state] = 0;
                         }
                         return stateRewardsGetter(state) + (*_exitRates)[state] * actionRewardsGetter(*choiceIt);
@@ -184,7 +193,7 @@ namespace storm {
                                 bestChoice = *choiceIt;
                             }
                         }
-                        if (isProduceSchedulerSet()) {
+                        if (this->isProduceSchedulerSet()) {
                             _producedOptimalChoices.get()[state] = bestChoice - _transitionMatrix.getRowGroupIndices()[state];
                         }
                         return bestValue + stateRewardsGetter(state);
@@ -200,7 +209,7 @@ namespace storm {
                     STORM_LOG_INFO("Selecting 'VI' as the solution technique for long-run properties to guarantee sound results. If you want to override this, please explicitly specify a different LRA method.");
                     method = storm::solver::LraMethod::ValueIteration;
                 }
-                STORM_LOG_ERROR_COND(!isProduceSchedulerSet() || method == storm::solver::LraMethod::ValueIteration, "Scheduler generation not supported for the chosen LRA method. Try value-iteration.");
+                STORM_LOG_ERROR_COND(!this->isProduceSchedulerSet() || method == storm::solver::LraMethod::ValueIteration, "Scheduler generation not supported for the chosen LRA method. Try value-iteration.");
                 if (method == storm::solver::LraMethod::LinearProgramming) {
                     return computeLraForMecLp(env, stateRewardsGetter, actionRewardsGetter, mec);
                 } else if (method == storm::solver::LraMethod::ValueIteration) {
@@ -216,7 +225,7 @@ namespace storm {
                 // Collect some parameters of the computation
                 ValueType aperiodicFactor = storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor());
                 std::vector<uint64_t>* optimalChoices = nullptr;
-                if (isProduceSchedulerSet()) {
+                if (this->isProduceSchedulerSet()) {
                     optimalChoices = &_producedOptimalChoices.get();
                 }
                 
@@ -296,7 +305,7 @@ namespace storm {
                 }
                 
                 solver->optimize();
-                STORM_LOG_THROW(!isProduceSchedulerSet(), storm::exceptions::NotImplementedException, "Scheduler extraction is not yet implemented for LP based LRA method.");
+                STORM_LOG_THROW(!this->isProduceSchedulerSet(), storm::exceptions::NotImplementedException, "Scheduler extraction is not yet implemented for LP based LRA method.");
                 return solver->getContinuousValue(k);
             }
             
@@ -339,7 +348,8 @@ namespace storm {
             }
             
             template <typename ValueType>
-            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::buildAndSolveSsp(Environment const& env, storm::storage::MaximalEndComponentDecomposition<ValueType> const& mecDecomposition, std::vector<ValueType> const& mecLraValues) {
+            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues) {
+                STORM_LOG_ASSERT(_mecDecomposition != nullptr, "Decomposition not computed, yet.");
                 
                 // Let's improve readability a bit
                 uint64_t numberOfStates = _transitionMatrix.getRowGroupCount();
@@ -353,8 +363,8 @@ namespace storm {
                 // and create a mapping from states that lie in a MEC to the corresponding MEC index.
                 storm::storage::BitVector statesInMecs(numberOfStates);
                 std::vector<uint64_t> inputToSspStateMap(numberOfStates, std::numeric_limits<uint64_t>::max());
-                for (uint64_t currentMecIndex = 0; currentMecIndex < mecDecomposition.size(); ++currentMecIndex) {
-                    for (auto const& stateChoicesPair : mecDecomposition[currentMecIndex]) {
+                for (uint64_t currentMecIndex = 0; currentMecIndex < _mecDecomposition->size(); ++currentMecIndex) {
+                    for (auto const& stateChoicesPair : (*_mecDecomposition)[currentMecIndex]) {
                         statesInMecs.set(stateChoicesPair.first);
                         inputToSspStateMap[stateChoicesPair.first] = currentMecIndex;
                     }
@@ -379,7 +389,7 @@ namespace storm {
                 
                 // The next step is to create the SSP matrix and the right-hand side of the SSP.
                 std::vector<ValueType> rhs;
-                uint64_t numberOfSspStates = numberOfStatesNotInMecs + mecDecomposition.size();
+                uint64_t numberOfSspStates = numberOfStatesNotInMecs + _mecDecomposition->size();
                 typename storm::storage::SparseMatrixBuilder<ValueType> sspMatrixBuilder(0, numberOfSspStates , 0, false, true, numberOfSspStates);
                 // If the source state of a transition is not contained in any MEC, we copy its choices (and perform the necessary modifications).
                 uint64_t currentSspChoice = 0;
@@ -392,8 +402,8 @@ namespace storm {
                     }
                 }
                 // Now we construct the choices for the auxiliary states which reflect former MEC states.
-                for (uint64_t mecIndex = 0; mecIndex < mecDecomposition.size(); ++mecIndex) {
-                    storm::storage::MaximalEndComponent const& mec = mecDecomposition[mecIndex];
+                for (uint64_t mecIndex = 0; mecIndex < _mecDecomposition->size(); ++mecIndex) {
+                    storm::storage::MaximalEndComponent const& mec = (*_mecDecomposition)[mecIndex];
                     sspMatrixBuilder.newRowGroup(currentSspChoice);
                     for (auto const& stateChoicesPair : mec) {
                         uint64_t const& mecState = stateChoicesPair.first;
@@ -403,7 +413,7 @@ namespace storm {
                             if (choicesInMec.find(choice) == choicesInMec.end()) {
                                 rhs.push_back(storm::utility::zero<ValueType>());
                                 addSspMatrixChoice(choice, _transitionMatrix, inputToSspStateMap, numberOfStatesNotInMecs, currentSspChoice, sspMatrixBuilder);
-                                if (isProduceSchedulerSet()) {
+                                if (this->isProduceSchedulerSet()) {
                                     // Later we need to be able to map this choice back to the original input model
                                     sspMecExitChoicesToOriginalMap.emplace_back(mecState, choice - nondeterministicChoiceIndices[mecState]);
                                 }
@@ -413,7 +423,7 @@ namespace storm {
                     }
                     // For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the MEC.
                     rhs.push_back(mecLraValues[mecIndex]);
-                    if (isProduceSchedulerSet()) {
+                    if (this->isProduceSchedulerSet()) {
                         // Insert some invalid values so we can later detect that this choice is not an exit choice
                         sspMecExitChoicesToOriginalMap.emplace_back(std::numeric_limits<uint_fast64_t>::max(), std::numeric_limits<uint_fast64_t>::max());
                     }
@@ -429,7 +439,7 @@ namespace storm {
                 std::unique_ptr<storm::solver::MinMaxLinearEquationSolver<ValueType>> solver = minMaxLinearEquationSolverFactory.create(env, sspMatrix);
                 solver->setHasUniqueSolution();
                 solver->setHasNoEndComponents();
-                solver->setTrackScheduler(isProduceSchedulerSet());
+                solver->setTrackScheduler(this->isProduceSchedulerSet());
                 auto lowerUpperBounds = std::minmax_element(mecLraValues.begin(), mecLraValues.end());
                 solver->setLowerBound(*lowerUpperBounds.first);
                 solver->setUpperBound(*lowerUpperBounds.second);
@@ -440,7 +450,7 @@ namespace storm {
                 solver->solveEquations(env, this->getOptimizationDirection(), x, rhs);
 
                 // Prepare scheduler (if requested)
-                if (isProduceSchedulerSet() && solver->hasScheduler()) {
+                if (this->isProduceSchedulerSet() && solver->hasScheduler()) {
                     // Translate result for ssp matrix to original model
                     auto const& sspChoices = solver->getSchedulerChoices();
                     // We first take care of non-mec states
@@ -451,7 +461,7 @@ namespace storm {
                     //      a) we take an exit (non-MEC) choice at the given state
                     //      b) we have to take a MEC choice at the given state in a way that eventually an exit state of the MEC is reached
                     uint64_t exitChoiceOffset = sspMatrix.getRowGroupIndices()[numberOfStatesNotInMecs];
-                    for (auto const& mec : mecDecomposition) {
+                    for (auto const& mec : *_mecDecomposition) {
                         // Get the sspState of this MEC (using one representative mec state)
                         auto const& sspState = inputToSspStateMap[mec.begin()->first];
                         uint64_t sspChoiceIndex = sspMatrix.getRowGroupIndices()[sspState] + sspChoices[sspState];
@@ -474,12 +484,17 @@ namespace storm {
                                     _producedOptimalChoices.get()[stateActions.first] = std::numeric_limits<uint64_t>::max();
                                 }
                             }
+                            // Ensure that backwards transitions are available
+                            if (_backwardTransitions == nullptr) {
+                                _computedBackwardTransitions = _transitionMatrix.transpose(true);
+                                _backwardTransitions = &_computedBackwardTransitions;
+                            }
                             // Now start a backwards DFS
                             std::vector<uint64_t> stack = {originalStateChoice.first};
                             while (!stack.empty()) {
                                 uint64_t currentState = stack.back();
                                 stack.pop_back();
-                                for (auto const& backwardsTransition : _backwardTransitions.getRowGroup(currentState)) {
+                                for (auto const& backwardsTransition : _backwardTransitions->getRowGroup(currentState)) {
                                     uint64_t predecessorState = backwardsTransition.getColumn();
                                     if (mec.containsState(predecessorState)) {
                                         auto& selectedPredChoice = _producedOptimalChoices.get()[predecessorState];
@@ -506,7 +521,7 @@ namespace storm {
                         }
                     }
                 } else {
-                    STORM_LOG_ERROR_COND(!isProduceSchedulerSet(), "Requested to produce a scheduler, but no scheduler was generated.");
+                    STORM_LOG_ERROR_COND(!this->isProduceSchedulerSet(), "Requested to produce a scheduler, but no scheduler was generated.");
                 }
                 
                 // Prepare result vector.
diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h
index a58d91025..3a63e7946 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h
@@ -21,12 +21,26 @@ namespace storm {
                 /*!
                  * Initializes the helper for a discrete time (i.e. MDP)
                  */
-                SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions);
+                SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix);
                 
                 /*!
                  * Initializes the helper for a continuous time (i.e. MA)
                  */
-                SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates);
+                SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates);
+                
+                /*!
+                 * Provides backward transitions that can be used during the computation.
+                 * Providing them is optional. If they are not provided, they will be computed internally
+                 * Be aware that this class does not take ownership, i.e. the caller has to make sure that the reference to the backwardstransitions remains valid.
+                 */
+                void provideBackwardTransitions(storm::storage::SparseMatrix<ValueType> const& backwardsTransitions);
+    
+                /*!
+                 * Provides the maximal end component decomposition that can be used during the computation.
+                 * Providing the decomposition is optional. If they are not provided, they will be computed internally
+                 * Be aware that this class does not take ownership, i.e. the caller has to make sure that the reference to the decomposition remains valid.
+                 */
+                void provideMaximalEndComponentDecomposition(storm::storage::MaximalEndComponentDecomposition<ValueType> const& decomposition);
                 
                 /*!
                  * Computes the long run average probabilities, i.e., the fraction of the time we are in a psiState
@@ -52,16 +66,6 @@ namespace storm {
                  */
                 std::vector<ValueType> computeLongRunAverageValues(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateValuesGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionValuesGetter);
                 
-                /*!
-                 * Sets whether an optimal scheduler shall be constructed during the computation
-                 */
-                void setProduceScheduler(bool value);
-                
-                /*!
-                 * @return whether an optimal scheduler shall be constructed during the computation
-                 */
-                bool isProduceSchedulerSet() const;
-                
                 /*!
                  * @pre before calling this, a computation call should have been performed during which scheduler production was enabled.
                  * @return the produced scheduler of the most recent call.
@@ -107,14 +111,16 @@ namespace storm {
                 /*!
                  * @return Lra values for each state
                  */
-                std::vector<ValueType> buildAndSolveSsp(Environment const& env, storm::storage::MaximalEndComponentDecomposition<ValueType> const& mecDecomposition, std::vector<ValueType> const& mecLraValues);
+                std::vector<ValueType> buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues);
             
             private:
                 storm::storage::SparseMatrix<ValueType> const& _transitionMatrix;
-                storm::storage::SparseMatrix<ValueType> const& _backwardTransitions;
+                storm::storage::SparseMatrix<ValueType> const* _backwardTransitions;
+                storm::storage::SparseMatrix<ValueType> _computedBackwardTransitions;
+                storm::storage::MaximalEndComponentDecomposition<ValueType> const* _mecDecomposition;
+                storm::storage::MaximalEndComponentDecomposition<ValueType> _computedMecDecomposition;
                 storm::storage::BitVector const* _markovianStates;
                 std::vector<ValueType> const* _exitRates;
-                bool _produceScheduler;
                 boost::optional<std::vector<uint64_t>> _producedOptimalChoices;
             };
 
diff --git a/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp b/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp
index bc0321317..3b9062ea3 100644
--- a/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp
+++ b/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp
@@ -227,7 +227,7 @@ namespace storm {
 			std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
 			ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult();
 			
-			storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions());
+			storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix());
             storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel());
 			auto values = helper.computeLongRunAverageProbabilities(env, subResult.getTruthValuesVector());
 			
@@ -242,7 +242,7 @@ namespace storm {
         std::unique_ptr<CheckResult> SparseMdpPrctlModelChecker<SparseMdpModelType>::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask<storm::logic::LongRunAverageRewardFormula, ValueType> const& checkTask) {
             STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
             auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
-            storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions());
+            storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix());
             storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel());
 			auto values = helper.computeLongRunAverageRewards(env, rewardModel.get());
             std::unique_ptr<CheckResult> result(new ExplicitQuantitativeCheckResult<ValueType>(std::move(values)));

From 7795ce5f35e28366d79e4f4681d0e0cc10965de1 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Thu, 6 Aug 2020 14:51:29 +0200
Subject: [PATCH 22/48] ModelCheckerHelper: Added utility function that copies
 model checking information from one helper to another.

---
 .../modelchecker/helper/ModelCheckerHelper.h  |  6 ++--
 .../utility/SetInformationFromOtherHelper.h   | 31 +++++++++++++++++++
 2 files changed, 35 insertions(+), 2 deletions(-)
 create mode 100644 src/storm/modelchecker/helper/utility/SetInformationFromOtherHelper.h

diff --git a/src/storm/modelchecker/helper/ModelCheckerHelper.h b/src/storm/modelchecker/helper/ModelCheckerHelper.h
index 05169ae17..02ee35531 100644
--- a/src/storm/modelchecker/helper/ModelCheckerHelper.h
+++ b/src/storm/modelchecker/helper/ModelCheckerHelper.h
@@ -14,12 +14,14 @@ namespace storm {
             
             /*!
              * Helper class for solving a model checking query.
-             * @tparam ValueType The type of a single value.
+             * @tparam VT The value type of a single value.
              * @tparam DdType The used library for Dds (or None in case of a sparse representation).
              */
-            template <typename ValueType, storm::dd::DdType DdType = storm::dd::DdType::None>
+            template <typename VT, storm::dd::DdType DdType = storm::dd::DdType::None>
             class ModelCheckerHelper {
             public:
+                typedef VT ValueType;
+
                 ModelCheckerHelper() = default;
                 ~ModelCheckerHelper() = default;
                 
diff --git a/src/storm/modelchecker/helper/utility/SetInformationFromOtherHelper.h b/src/storm/modelchecker/helper/utility/SetInformationFromOtherHelper.h
new file mode 100644
index 000000000..44dcc467c
--- /dev/null
+++ b/src/storm/modelchecker/helper/utility/SetInformationFromOtherHelper.h
@@ -0,0 +1,31 @@
+#pragma once
+
+#include "storm/modelchecker/CheckTask.h"
+
+namespace storm {
+    namespace modelchecker {
+        namespace helper {
+            
+            /*!
+             * Forwards relevant information stored in another helper to the given helper
+             */
+            template <typename TargetHelperType, typename SourceHelperType>
+            void setInformationFromOtherHelperNondeterministic(TargetHelperType& targetHelper, SourceHelperType const& sourceHelperType, std::function<typename TargetHelperType::StateSet(typename SourceHelperType::StateSet const&)> const& stateSetTransformer) {
+                // Relevancy of initial states.
+			    if (sourceHelperType.hasRelevantStates()) {
+			        targetHelper.setRelevantStates(stateSetTransformer(sourceHelperType.getRelevantStates()));
+			    }
+                // Value threshold to which the result will be compared
+			    if (sourceHelperType.isValueThresholdSet()) {
+			        targetHelper.setValueThreshold(sourceHelperType.getValueThresholdComparisonType(), storm::utility::convertNumber<typename TargetHelperType::ValueType>(sourceHelperType.getValueThresholdValue()));
+			    }
+			    // Optimization direction
+                if (sourceHelperType.isOptimizationDirectionSet()) {
+                    targetHelper.setOptimizationDirection(sourceHelperType.getOptimizationDirection());
+                }
+                // Scheduler Production
+			    targetHelper.setProduceScheduler(sourceHelperType.isProduceSchedulerSet());
+            }
+        }
+    }
+}
\ No newline at end of file

From aabe3ce77614043246fda69a998b2b24298c7898 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Thu, 6 Aug 2020 14:52:19 +0200
Subject: [PATCH 23/48] Added simple infinite horizon helper for the hybrid
 engine.

---
 .../HybridMarkovAutomatonCslModelChecker.cpp  | 25 +++--
 ...dNondeterministicInfiniteHorizonHelper.cpp | 99 +++++++++++++++++++
 ...ridNondeterministicInfiniteHorizonHelper.h | 64 ++++++++++++
 3 files changed, 178 insertions(+), 10 deletions(-)
 create mode 100644 src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.cpp
 create mode 100644 src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.h

diff --git a/src/storm/modelchecker/csl/HybridMarkovAutomatonCslModelChecker.cpp b/src/storm/modelchecker/csl/HybridMarkovAutomatonCslModelChecker.cpp
index 8cec970c6..b2833ee78 100644
--- a/src/storm/modelchecker/csl/HybridMarkovAutomatonCslModelChecker.cpp
+++ b/src/storm/modelchecker/csl/HybridMarkovAutomatonCslModelChecker.cpp
@@ -5,6 +5,8 @@
 #include "storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.h"
 #include "storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.h"
 #include "storm/modelchecker/prctl/helper/HybridMdpPrctlHelper.h"
+#include "storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.h"
+#include "storm/modelchecker/helper/utility/SetInformationFromCheckTask.h"
 
 #include "storm/modelchecker/results/SymbolicQualitativeCheckResult.h"
 
@@ -100,20 +102,23 @@ namespace storm {
         
         template<typename ModelType>
         std::unique_ptr<CheckResult> HybridMarkovAutomatonCslModelChecker<ModelType>::computeLongRunAverageProbabilities(Environment const& env, CheckTask<storm::logic::StateFormula, ValueType> const& checkTask) {
-        storm::logic::StateFormula const& stateFormula = checkTask.getFormula();
-        std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
-        SymbolicQualitativeCheckResult<DdType> const& subResult = subResultPointer->asSymbolicQualitativeCheckResult<DdType>();
-        STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
-
-        return storm::modelchecker::helper::HybridMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(env, checkTask.getOptimizationDirection(), this->getModel(), this->getModel().getTransitionMatrix(), this->getModel().getMarkovianStates(), this->getModel().getExitRateVector(), subResult.getTruthValuesVector());
-        
+            storm::logic::StateFormula const& stateFormula = checkTask.getFormula();
+            std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
+            SymbolicQualitativeCheckResult<DdType> const& subResult = subResultPointer->asSymbolicQualitativeCheckResult<DdType>();
+            STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
+    
+            storm::modelchecker::helper::HybridNondeterministicInfiniteHorizonHelper<ValueType, DdType> helper(this->getModel(), this->getModel().getTransitionMatrix(), this->getModel().getMarkovianStates(), this->getModel().getExitRateVector());
+            storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel());
+            return helper.computeLongRunAverageProbabilities(env, subResult.getTruthValuesVector());
         }
         
         template<typename ModelType>
         std::unique_ptr<CheckResult> HybridMarkovAutomatonCslModelChecker<ModelType>::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask<storm::logic::LongRunAverageRewardFormula, ValueType> const& checkTask) {
-        STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
-        auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
-        return storm::modelchecker::helper::HybridMarkovAutomatonCslHelper::computeLongRunAverageRewards(env, checkTask.getOptimizationDirection(), this->getModel(), this->getModel().getTransitionMatrix(), this->getModel().getMarkovianStates(), this->getModel().getExitRateVector(), rewardModel.get());
+            STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
+            auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
+            storm::modelchecker::helper::HybridNondeterministicInfiniteHorizonHelper<ValueType, DdType> helper(this->getModel(), this->getModel().getTransitionMatrix(), this->getModel().getMarkovianStates(), this->getModel().getExitRateVector());
+            storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel());
+            return helper.computeLongRunAverageRewards(env, rewardModel.get());
         }
         
         // Explicitly instantiate the model checker.
diff --git a/src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.cpp
new file mode 100644
index 000000000..b9868f5a9
--- /dev/null
+++ b/src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.cpp
@@ -0,0 +1,99 @@
+#include "HybridNondeterministicInfiniteHorizonHelper.h"
+
+#include "storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h"
+#include "storm/modelchecker/helper/utility/SetInformationFromOtherHelper.h"
+
+#include "storm/utility/macros.h"
+
+#include "storm/exceptions/NotSupportedException.h"
+
+namespace storm {
+    namespace modelchecker {
+        namespace helper {
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            HybridNondeterministicInfiniteHorizonHelper<ValueType, DdType>::HybridNondeterministicInfiniteHorizonHelper(storm::models::symbolic::NondeterministicModel<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix) : _model(model), _transitionMatrix(transitionMatrix), _markovianStates(nullptr), _exitRates(nullptr) {
+                // Intentionally left empty.
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            HybridNondeterministicInfiniteHorizonHelper<ValueType, DdType>::HybridNondeterministicInfiniteHorizonHelper(storm::models::symbolic::NondeterministicModel<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& markovianStates, storm::dd::Add<DdType, ValueType> const& exitRateVector) : _model(model), _transitionMatrix(transitionMatrix), _markovianStates(&markovianStates), _exitRates(&exitRateVector) {
+                // Intentionally left empty.
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            std::unique_ptr<HybridQuantitativeCheckResult<DdType, ValueType>> HybridNondeterministicInfiniteHorizonHelper<ValueType, DdType>::computeLongRunAverageProbabilities(Environment const& env, storm::dd::Bdd<DdType> const& psiStates) {
+                // Convert this query to an instance for the sparse engine.
+                // Create ODD for the translation.
+                storm::dd::Odd odd = _model.getReachableStates().createOdd();
+                storm::storage::SparseMatrix<ValueType> explicitTransitionMatrix = _transitionMatrix.toMatrix(_model.getNondeterminismVariables(), odd, odd);
+                std::unique_ptr<storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType>> sparseHelper;
+                std::vector<ValueType> explicitExitRateVector;
+                storm::storage::BitVector explicitMarkovianStates;
+                if (isContinuousTime()) {
+                    explicitExitRateVector = _exitRates->toVector(odd);
+                    explicitMarkovianStates = _markovianStates->toVector(odd);
+                    sparseHelper = std::make_unique<storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType>>(explicitTransitionMatrix, explicitMarkovianStates, explicitExitRateVector);
+                } else {
+                    sparseHelper = std::make_unique<storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType>>(explicitTransitionMatrix);
+                }
+                storm::modelchecker::helper::setInformationFromOtherHelperNondeterministic(*sparseHelper, *this, [&odd](storm::dd::Bdd<DdType> const& s){ return s.toVector(odd); });
+                STORM_LOG_WARN_COND(!this->isProduceSchedulerSet(), "Scheduler extraction not supported in Hybrid engine.");
+                auto explicitResult = sparseHelper->computeLongRunAverageProbabilities(env, psiStates.toVector(odd));
+                return std::make_unique<HybridQuantitativeCheckResult<DdType, ValueType>>(_model.getReachableStates(), _model.getManager().getBddZero(), _model.getManager().template getAddZero<ValueType>(), _model.getReachableStates(), std::move(odd), std::move(explicitResult));
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            std::unique_ptr<HybridQuantitativeCheckResult<DdType, ValueType>> HybridNondeterministicInfiniteHorizonHelper<ValueType, DdType>::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::StandardRewardModel<DdType, ValueType> const& rewardModel) {
+                // Convert this query to an instance for the sparse engine.
+                // Create ODD for the translation.
+                storm::dd::Odd odd = _model.getReachableStates().createOdd();
+                
+                // Create matrix and reward vectors
+                storm::storage::SparseMatrix<ValueType> explicitTransitionMatrix;
+                std::vector<ValueType> explicitStateRewards, explicitActionRewards;
+                if (rewardModel.hasStateRewards()) {
+                    explicitStateRewards = rewardModel.getStateRewardVector().toVector(odd);
+                }
+                if (rewardModel.hasStateActionRewards()) {
+                    // Matrix and action-based vector have to be produced at the same time to guarantee the correct order
+                    auto matrixRewards = _transitionMatrix.toMatrixVector(rewardModel.getStateActionRewardVector(), _model.getNondeterminismVariables(), odd, odd);
+                    explicitTransitionMatrix = std::move(matrixRewards.first);
+                    explicitActionRewards = std::move(matrixRewards.second);
+                } else {
+                    // Translate matrix only
+                    explicitTransitionMatrix = _transitionMatrix.toMatrix(_model.getNondeterminismVariables(), odd, odd);
+                }
+                STORM_LOG_THROW(!rewardModel.hasTransitionRewards(), storm::exceptions::NotSupportedException, "Transition rewards are not supported in this engine.");
+                
+                // Create remaining components and helper
+                std::vector<ValueType> explicitExitRateVector;
+                storm::storage::BitVector explicitMarkovianStates;
+                std::unique_ptr<storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType>> sparseHelper;
+                if (isContinuousTime()) {
+                    explicitExitRateVector = _exitRates->toVector(odd);
+                    explicitMarkovianStates = _markovianStates->toVector(odd);
+                    sparseHelper = std::make_unique<storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType>>(explicitTransitionMatrix, explicitMarkovianStates, explicitExitRateVector);
+                } else {
+                    sparseHelper = std::make_unique<storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType>>(explicitTransitionMatrix);
+                }
+                storm::modelchecker::helper::setInformationFromOtherHelperNondeterministic(*sparseHelper, *this, [&odd](storm::dd::Bdd<DdType> const& s){ return s.toVector(odd); });
+
+                STORM_LOG_WARN_COND(!this->isProduceSchedulerSet(), "Scheduler extraction not supported in Hybrid engine.");
+                auto explicitResult = sparseHelper->computeLongRunAverageValues(env, rewardModel.hasStateRewards() ? &explicitStateRewards : nullptr, rewardModel.hasStateActionRewards() ? &explicitActionRewards : nullptr);
+                return std::make_unique<HybridQuantitativeCheckResult<DdType, ValueType>>(_model.getReachableStates(), _model.getManager().getBddZero(), _model.getManager().template getAddZero<ValueType>(), _model.getReachableStates(), std::move(odd), std::move(explicitResult));
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType>
+            bool HybridNondeterministicInfiniteHorizonHelper<ValueType, DdType>::isContinuousTime() const {
+                STORM_LOG_ASSERT((_markovianStates == nullptr) == (_exitRates == nullptr), "Inconsistent information given: Have Markovian states but no exit rates (or vice versa)." );
+                return _markovianStates != nullptr;
+            }
+            
+            template class HybridNondeterministicInfiniteHorizonHelper<double, storm::dd::DdType::CUDD>;
+            template class HybridNondeterministicInfiniteHorizonHelper<double, storm::dd::DdType::Sylvan>;
+            template class HybridNondeterministicInfiniteHorizonHelper<storm::RationalNumber, storm::dd::DdType::Sylvan>;
+            
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.h b/src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.h
new file mode 100644
index 000000000..d6a67089b
--- /dev/null
+++ b/src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.h
@@ -0,0 +1,64 @@
+#pragma once
+#include "storm/modelchecker/helper/SingleValueModelCheckerHelper.h"
+
+#include "storm/modelchecker/results/HybridQuantitativeCheckResult.h"
+
+#include "storm/models/symbolic/NondeterministicModel.h"
+#include "storm/models/symbolic/StandardRewardModel.h"
+
+#include "storm/storage/dd/DdManager.h"
+#include "storm/storage/dd/Add.h"
+#include "storm/storage/dd/Bdd.h"
+
+namespace storm {
+    class Environment;
+    
+    namespace modelchecker {
+        namespace helper {
+        
+            /*!
+             * Helper class for model checking queries that depend on the long run behavior of the (nondeterministic) system.
+             */
+            template <typename ValueType, storm::dd::DdType DdType>
+            class HybridNondeterministicInfiniteHorizonHelper : public SingleValueModelCheckerHelper<ValueType, DdType> {
+
+            public:
+                /*!
+                 * Initializes the helper for a discrete time (i.e. MDP)
+                 */
+                HybridNondeterministicInfiniteHorizonHelper(storm::models::symbolic::NondeterministicModel<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix);
+                
+                /*!
+                 * Initializes the helper for a continuous time (i.e. MA)
+                 */
+                HybridNondeterministicInfiniteHorizonHelper(storm::models::symbolic::NondeterministicModel<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& markovianStates, storm::dd::Add<DdType, ValueType> const& _exitRates);
+                
+                /*!
+                 * Computes the long run average probabilities, i.e., the fraction of the time we are in a psiState
+                 * @return a value for each state
+                 */
+                std::unique_ptr<HybridQuantitativeCheckResult<DdType, ValueType>> computeLongRunAverageProbabilities(Environment const& env, storm::dd::Bdd<DdType> const& psiStates);
+                
+                /*!
+                 * Computes the long run average rewards, i.e., the average reward collected per time unit
+                 * @return a value for each state
+                 */
+                std::unique_ptr<HybridQuantitativeCheckResult<DdType, ValueType>> computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::StandardRewardModel<DdType, ValueType> const& rewardModel);
+                
+            protected:
+                
+                /*!
+                 * @return true iff this is a computation on a continuous time model (i.e. MA)
+                 */
+                bool isContinuousTime() const;
+
+
+            private:
+                storm::models::symbolic::NondeterministicModel<DdType, ValueType> const& _model;
+                storm::dd::Add<DdType, ValueType> const& _transitionMatrix;
+                storm::dd::Bdd<DdType> const* _markovianStates;
+                storm::dd::Add<DdType, ValueType> const* _exitRates;
+            };
+        }
+    }
+}

From fa6c47db64f0f997d2c950439a02fb96f5af0213 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Thu, 6 Aug 2020 14:53:18 +0200
Subject: [PATCH 24/48] Removed old LRA code for Markov automata.

---
 .../helper/HybridMarkovAutomatonCslHelper.cpp |  54 --
 .../helper/HybridMarkovAutomatonCslHelper.h   |   7 -
 .../helper/SparseMarkovAutomatonCslHelper.cpp | 475 ------------------
 .../helper/SparseMarkovAutomatonCslHelper.h   |  28 --
 4 files changed, 564 deletions(-)

diff --git a/src/storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.cpp b/src/storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.cpp
index d09a0e58a..4b27befa7 100644
--- a/src/storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.cpp
+++ b/src/storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.cpp
@@ -77,73 +77,19 @@ namespace storm {
 
             }
             
-            template<storm::dd::DdType DdType, class ValueType>
-            std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& markovianStates, storm::dd::Add<DdType, ValueType> const& exitRateVector, storm::dd::Bdd<DdType> const& psiStates) {
-                
-                // Convert this query to an instance for the sparse engine.
-                storm::utility::Stopwatch conversionWatch(true);
-                // Create ODD for the translation.
-                storm::dd::Odd odd = model.getReachableStates().createOdd();
-                storm::storage::SparseMatrix<ValueType> explicitTransitionMatrix = transitionMatrix.toMatrix(model.getNondeterminismVariables(), odd, odd);
-                std::vector<ValueType> explicitExitRateVector = exitRateVector.toVector(odd);
-                conversionWatch.stop();
-                STORM_LOG_INFO("Converting symbolic matrix to explicit representation done in " << conversionWatch.getTimeInMilliseconds() << "ms.");
-                
-                auto explicitResult = storm::modelchecker::helper::SparseMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(env, dir, explicitTransitionMatrix, explicitTransitionMatrix.transpose(true), explicitExitRateVector, markovianStates.toVector(odd), psiStates.toVector(odd));
-                return std::unique_ptr<CheckResult>(new HybridQuantitativeCheckResult<DdType, ValueType>(model.getReachableStates(), model.getManager().getBddZero(), model.getManager().template getAddZero<ValueType>(), model.getReachableStates(), std::move(odd), std::move(explicitResult)));
-                
-            }
-            
-            template<storm::dd::DdType DdType, class ValueType>
-            std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& markovianStates, storm::dd::Add<DdType, ValueType> const& exitRateVector, typename storm::models::symbolic::Model<DdType, ValueType>::RewardModelType const& rewardModel) {
-                
-                // Convert this query to an instance for the sparse engine.
-                storm::utility::Stopwatch conversionWatch(true);
-                // Create ODD for the translation.
-                storm::dd::Odd odd = model.getReachableStates().createOdd();
-                std::vector<ValueType> explicitExitRateVector = exitRateVector.toVector(odd);
-                storm::storage::SparseMatrix<ValueType> explicitTransitionMatrix;
-                boost::optional<std::vector<ValueType>> optionalStateRewards, optionalStateActionRewards;
-                if (rewardModel.hasStateRewards()) {
-                    optionalStateRewards = rewardModel.getStateRewardVector().toVector(odd);
-                }
-                if (rewardModel.hasStateActionRewards()) {
-                    auto matrixRewards = transitionMatrix.toMatrixVector(rewardModel.getStateActionRewardVector(), model.getNondeterminismVariables(), odd, odd);
-                    explicitTransitionMatrix = std::move(matrixRewards.first);
-                    optionalStateActionRewards = std::move(matrixRewards.second);
-                } else {
-                    explicitTransitionMatrix = transitionMatrix.toMatrix(model.getNondeterminismVariables(), odd, odd);
-                }
-                STORM_LOG_THROW(!rewardModel.hasTransitionRewards(), storm::exceptions::NotSupportedException, "Transition rewards are not supported in this engine.");
-                storm::models::sparse::StandardRewardModel<ValueType> explicitRewardModel(optionalStateRewards, optionalStateActionRewards);
-                conversionWatch.stop();
-                STORM_LOG_INFO("Converting symbolic matrix to explicit representation done in " << conversionWatch.getTimeInMilliseconds() << "ms.");
-                
-                auto explicitResult = storm::modelchecker::helper::SparseMarkovAutomatonCslHelper::computeLongRunAverageRewards(env, dir, explicitTransitionMatrix, explicitTransitionMatrix.transpose(true), explicitExitRateVector, markovianStates.toVector(odd), explicitRewardModel);
-                return std::unique_ptr<CheckResult>(new HybridQuantitativeCheckResult<DdType, ValueType>(model.getReachableStates(), model.getManager().getBddZero(), model.getManager().template getAddZero<ValueType>(), model.getReachableStates(), std::move(odd), std::move(explicitResult)));
-                
-            }
- 
             // Explicit instantiations.
             
             // Cudd, double.
             template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeReachabilityRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::CUDD, double> const& model, storm::dd::Add<storm::dd::DdType::CUDD, double> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::CUDD> const& markovianStates, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::CUDD, double>::RewardModelType const& rewardModel, storm::dd::Bdd<storm::dd::DdType::CUDD> const& targetStates, bool qualitative);
             template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::CUDD, double> const& model, storm::dd::Add<storm::dd::DdType::CUDD, double> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::CUDD> const& markovianStates, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::CUDD> const& phiStates,  storm::dd::Bdd<storm::dd::DdType::CUDD> const& psiStates, bool qualitative, double lowerBound, double upperBound);
-            template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::CUDD, double> const& model, storm::dd::Add<storm::dd::DdType::CUDD, double> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::CUDD> const& markovianStates, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::CUDD> const& psiStates);
-            template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::CUDD, double> const& model, storm::dd::Add<storm::dd::DdType::CUDD, double> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::CUDD> const& markovianStates, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::CUDD, double>::RewardModelType const& rewardModel);
  
             // Sylvan, double.
             template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeReachabilityRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::Sylvan, double> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& markovianStates, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, double>::RewardModelType const& rewardModel, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& targetStates, bool qualitative);
             template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::Sylvan, double> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& markovianStates, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& phiStates, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates, bool qualitative, double lowerBound, double upperBound);
-            template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::Sylvan, double> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& markovianStates, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates);
-            template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::Sylvan, double> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& markovianStates, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, double>::RewardModelType const& rewardModel);
   
             // Sylvan, rational number.
             template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeReachabilityRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& markovianStates, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, storm::RationalNumber>::RewardModelType const& rewardModel, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& targetStates, bool qualitative);
             template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& markovianStates, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& phiStates, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates, bool qualitative, double lowerBound, double upperBound);
-            template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& markovianStates, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates);
-            template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& markovianStates, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, storm::RationalNumber>::RewardModelType const& rewardModel);
-
 
         }
     }
diff --git a/src/storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.h b/src/storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.h
index 45a82c0ff..092aeeb06 100644
--- a/src/storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.h
+++ b/src/storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.h
@@ -27,13 +27,6 @@ namespace storm {
                 template<storm::dd::DdType DdType, typename ValueType, typename std::enable_if<!storm::NumberTraits<ValueType>::SupportsExponential, int>::type = 0>
                 static std::unique_ptr<CheckResult> computeBoundedUntilProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& markovianStates, storm::dd::Add<DdType, ValueType> const& exitRateVector, storm::dd::Bdd<DdType> const& phiStates, storm::dd::Bdd<DdType> const& psiStates, bool qualitative, double lowerBound, double upperBound);
                 
-                template<storm::dd::DdType DdType, typename ValueType>
-                static std::unique_ptr<CheckResult> computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& markovianStates, storm::dd::Add<DdType, ValueType> const& exitRateVector, storm::dd::Bdd<DdType> const& psiStates);
-                
-                template<storm::dd::DdType DdType, typename ValueType>
-                static std::unique_ptr<CheckResult> computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& markovianStates, storm::dd::Add<DdType, ValueType> const& exitRateVector, typename storm::models::symbolic::Model<DdType, ValueType>::RewardModelType const& rewardModel);
-                
-
             };
             
         }
diff --git a/src/storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.cpp b/src/storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.cpp
index 6647d83a6..bdd1f544b 100644
--- a/src/storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.cpp
+++ b/src/storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.cpp
@@ -717,198 +717,6 @@ namespace storm {
                 return SparseMdpPrctlHelper<ValueType>::computeReachabilityRewards(env, dir, transitionMatrix, backwardTransitions, scaledRewardModel, psiStates, false, produceScheduler);
             }
             
-            template<typename ValueType>
-            std::vector<ValueType> SparseMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates) {
-            
-                uint64_t numberOfStates = transitionMatrix.getRowGroupCount();
-
-                // If there are no goal states, we avoid the computation and directly return zero.
-                if (psiStates.empty()) {
-                    return std::vector<ValueType>(numberOfStates, storm::utility::zero<ValueType>());
-                }
-                
-                // Likewise, if all bits are set, we can avoid the computation and set.
-                if (psiStates.full()) {
-                    return std::vector<ValueType>(numberOfStates, storm::utility::one<ValueType>());
-                }
-                
-                // Otherwise, reduce the long run average probabilities to long run average rewards.
-                // Every Markovian goal state gets reward one.
-                std::vector<ValueType> stateRewards(transitionMatrix.getRowGroupCount(), storm::utility::zero<ValueType>());
-                storm::utility::vector::setVectorValues(stateRewards, markovianStates & psiStates, storm::utility::one<ValueType>());
-                storm::models::sparse::StandardRewardModel<ValueType> rewardModel(std::move(stateRewards));
-                
-                return computeLongRunAverageRewards(env, dir, transitionMatrix, backwardTransitions, exitRateVector, markovianStates, rewardModel);
-                
-            }
-            
-            template<typename ValueType, typename RewardModelType>
-            std::vector<ValueType> SparseMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel) {
-                
-                uint64_t numberOfStates = transitionMatrix.getRowGroupCount();
-
-                // Start by decomposing the Markov automaton into its MECs.
-                storm::storage::MaximalEndComponentDecomposition<ValueType> mecDecomposition(transitionMatrix, backwardTransitions);
-                
-                // Get some data members for convenience.
-                std::vector<uint64_t> const& nondeterministicChoiceIndices = transitionMatrix.getRowGroupIndices();
-                
-                // Now start with compute the long-run average for all end components in isolation.
-                std::vector<ValueType> lraValuesForEndComponents;
-                
-                // While doing so, we already gather some information for the following steps.
-                std::vector<uint64_t> stateToMecIndexMap(numberOfStates);
-                storm::storage::BitVector statesInMecs(numberOfStates);
-                
-                auto underlyingSolverEnvironment = env;
-                if (env.solver().isForceSoundness()) {
-                    // For sound computations, the error in the MECS plus the error in the remaining system should be less then the user defined precsion.
-                    underlyingSolverEnvironment.solver().minMax().setPrecision(env.solver().lra().getPrecision() / storm::utility::convertNumber<storm::RationalNumber>(2));
-                    underlyingSolverEnvironment.solver().minMax().setRelativeTerminationCriterion(env.solver().lra().getRelativeTerminationCriterion());
-                    underlyingSolverEnvironment.solver().lra().setPrecision(env.solver().lra().getPrecision() / storm::utility::convertNumber<storm::RationalNumber>(2));
-                }
-                
-                for (uint64_t currentMecIndex = 0; currentMecIndex < mecDecomposition.size(); ++currentMecIndex) {
-                    storm::storage::MaximalEndComponent const& mec = mecDecomposition[currentMecIndex];
-                    
-                    // Gather information for later use.
-                    for (auto const& stateChoicesPair : mec) {
-                        uint64_t state = stateChoicesPair.first;
-                        
-                        statesInMecs.set(state);
-                        stateToMecIndexMap[state] = currentMecIndex;
-                    }
-                    
-                    // Compute the LRA value for the current MEC.
-                    lraValuesForEndComponents.push_back(computeLraForMaximalEndComponent(underlyingSolverEnvironment, dir, transitionMatrix, exitRateVector, markovianStates, rewardModel, mec));
-                }
-                
-                // For fast transition rewriting, we build some auxiliary data structures.
-                storm::storage::BitVector statesNotContainedInAnyMec = ~statesInMecs;
-                uint64_t firstAuxiliaryStateIndex = statesNotContainedInAnyMec.getNumberOfSetBits();
-                uint64_t lastStateNotInMecs = 0;
-                uint64_t numberOfStatesNotInMecs = 0;
-                std::vector<uint64_t> statesNotInMecsBeforeIndex;
-                statesNotInMecsBeforeIndex.reserve(numberOfStates);
-                for (auto state : statesNotContainedInAnyMec) {
-                    while (lastStateNotInMecs <= state) {
-                        statesNotInMecsBeforeIndex.push_back(numberOfStatesNotInMecs);
-                        ++lastStateNotInMecs;
-                    }
-                    ++numberOfStatesNotInMecs;
-                }
-                uint64_t numberOfSspStates = numberOfStatesNotInMecs + mecDecomposition.size();
-                
-                // Finally, we are ready to create the SSP matrix and right-hand side of the SSP.
-                std::vector<ValueType> b;
-                typename storm::storage::SparseMatrixBuilder<ValueType> sspMatrixBuilder(0, numberOfSspStates , 0, false, true, numberOfSspStates);
-                
-                // If the source state is not contained in any MEC, we copy its choices (and perform the necessary modifications).
-                uint64_t currentChoice = 0;
-                for (auto state : statesNotContainedInAnyMec) {
-                    sspMatrixBuilder.newRowGroup(currentChoice);
-                    
-                    for (uint64_t choice = nondeterministicChoiceIndices[state]; choice < nondeterministicChoiceIndices[state + 1]; ++choice, ++currentChoice) {
-                        std::vector<ValueType> auxiliaryStateToProbabilityMap(mecDecomposition.size());
-                        b.push_back(storm::utility::zero<ValueType>());
-                        
-                        for (auto element : transitionMatrix.getRow(choice)) {
-                            if (statesNotContainedInAnyMec.get(element.getColumn())) {
-                                // If the target state is not contained in an MEC, we can copy over the entry.
-                                sspMatrixBuilder.addNextValue(currentChoice, statesNotInMecsBeforeIndex[element.getColumn()], element.getValue());
-                            } else {
-                                // If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector
-                                // so that we are able to write the cumulative probability to the MEC into the matrix.
-                                auxiliaryStateToProbabilityMap[stateToMecIndexMap[element.getColumn()]] += element.getValue();
-                            }
-                        }
-                        
-                        // Now insert all (cumulative) probability values that target an MEC.
-                        for (uint64_t mecIndex = 0; mecIndex < auxiliaryStateToProbabilityMap.size(); ++mecIndex) {
-                            if (auxiliaryStateToProbabilityMap[mecIndex] != 0) {
-                                sspMatrixBuilder.addNextValue(currentChoice, firstAuxiliaryStateIndex + mecIndex, auxiliaryStateToProbabilityMap[mecIndex]);
-                            }
-                        }
-                    }
-                }
-                
-                // Now we are ready to construct the choices for the auxiliary states.
-                for (uint64_t mecIndex = 0; mecIndex < mecDecomposition.size(); ++mecIndex) {
-                    storm::storage::MaximalEndComponent const& mec = mecDecomposition[mecIndex];
-                    sspMatrixBuilder.newRowGroup(currentChoice);
-                    
-                    for (auto const& stateChoicesPair : mec) {
-                        uint64_t state = stateChoicesPair.first;
-                        storm::storage::FlatSet<uint64_t> const& choicesInMec = stateChoicesPair.second;
-                        
-                        for (uint64_t choice = nondeterministicChoiceIndices[state]; choice < nondeterministicChoiceIndices[state + 1]; ++choice) {
-                            
-                            // If the choice is not contained in the MEC itself, we have to add a similar distribution to the auxiliary state.
-                            if (choicesInMec.find(choice) == choicesInMec.end()) {
-                                std::vector<ValueType> auxiliaryStateToProbabilityMap(mecDecomposition.size());
-                                b.push_back(storm::utility::zero<ValueType>());
-                                
-                                for (auto element : transitionMatrix.getRow(choice)) {
-                                    if (statesNotContainedInAnyMec.get(element.getColumn())) {
-                                        // If the target state is not contained in an MEC, we can copy over the entry.
-                                        sspMatrixBuilder.addNextValue(currentChoice, statesNotInMecsBeforeIndex[element.getColumn()], element.getValue());
-                                    } else {
-                                        // If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector
-                                        // so that we are able to write the cumulative probability to the MEC into the matrix.
-                                        auxiliaryStateToProbabilityMap[stateToMecIndexMap[element.getColumn()]] += element.getValue();
-                                    }
-                                }
-                                
-                                // Now insert all (cumulative) probability values that target an MEC.
-                                for (uint64_t targetMecIndex = 0; targetMecIndex < auxiliaryStateToProbabilityMap.size(); ++targetMecIndex) {
-                                    if (auxiliaryStateToProbabilityMap[targetMecIndex] != 0) {
-                                        sspMatrixBuilder.addNextValue(currentChoice, firstAuxiliaryStateIndex + targetMecIndex, auxiliaryStateToProbabilityMap[targetMecIndex]);
-                                    }
-                                }
-                                
-                                ++currentChoice;
-                            }
-                        }
-                    }
-                    
-                    // For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the MEC.
-                    ++currentChoice;
-                    b.push_back(lraValuesForEndComponents[mecIndex]);
-                }
-                
-                // Finalize the matrix and solve the corresponding system of equations.
-                storm::storage::SparseMatrix<ValueType> sspMatrix = sspMatrixBuilder.build(currentChoice, numberOfSspStates, numberOfSspStates);
-                
-                std::vector<ValueType> x(numberOfSspStates);
-                
-                // Check for requirements of the solver.
-                storm::solver::GeneralMinMaxLinearEquationSolverFactory<ValueType> minMaxLinearEquationSolverFactory;
-                storm::solver::MinMaxLinearEquationSolverRequirements requirements = minMaxLinearEquationSolverFactory.getRequirements(underlyingSolverEnvironment, true, true, dir);
-                requirements.clearBounds();
-                STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UncheckedRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
-
-                std::unique_ptr<storm::solver::MinMaxLinearEquationSolver<ValueType>> solver = minMaxLinearEquationSolverFactory.create(underlyingSolverEnvironment, sspMatrix);
-                solver->setHasUniqueSolution();
-                solver->setHasNoEndComponents();
-                solver->setLowerBound(storm::utility::zero<ValueType>());
-                solver->setUpperBound(*std::max_element(lraValuesForEndComponents.begin(), lraValuesForEndComponents.end()));
-                solver->setRequirementsChecked();
-                solver->solveEquations(underlyingSolverEnvironment, dir, x, b);
-                
-                // Prepare result vector.
-                std::vector<ValueType> result(numberOfStates);
-                
-                // Set the values for states not contained in MECs.
-                storm::utility::vector::setVectorValues(result, statesNotContainedInAnyMec, x);
-                
-                // Set the values for all states in MECs.
-                for (auto state : statesInMecs) {
-                    result[state] = x[firstAuxiliaryStateIndex + stateToMecIndexMap[state]];
-                }
-                
-                return result;
-            }
-            
             template <typename ValueType>
             MDPSparseModelCheckingHelperReturnType<ValueType> SparseMarkovAutomatonCslHelper::computeReachabilityTimes(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, bool produceScheduler) {
                 
@@ -922,268 +730,6 @@ namespace storm {
                 return SparseMdpPrctlHelper<ValueType>::computeReachabilityRewards(env, dir, transitionMatrix, backwardTransitions, rewardModel, psiStates, false, produceScheduler);
             }
 
-            template<typename ValueType, typename RewardModelType>
-            ValueType SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec) {
-                
-                // If the mec only consists of a single state, we compute the LRA value directly
-                if (++mec.begin() == mec.end()) {
-                    uint64_t state = mec.begin()->first;
-                    STORM_LOG_THROW(markovianStates.get(state), storm::exceptions::InvalidOperationException, "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported.");
-                    ValueType result = rewardModel.hasStateRewards() ? rewardModel.getStateReward(state) : storm::utility::zero<ValueType>();
-                    if (rewardModel.hasStateActionRewards() || rewardModel.hasTransitionRewards()) {
-                        STORM_LOG_ASSERT(mec.begin()->second.size() == 1, "Markovian state has nondeterministic behavior.");
-                        uint64_t choice = *mec.begin()->second.begin();
-                        result += exitRateVector[state] * rewardModel.getTotalStateActionReward(state, choice, transitionMatrix, storm::utility::zero<ValueType>());
-                    }
-                    return result;
-                }
-                
-                // Solve MEC with the method specified in the settings
-                storm::solver::LraMethod method = env.solver().lra().getNondetLraMethod();
-                if ((storm::NumberTraits<ValueType>::IsExact || env.solver().isForceExact()) && env.solver().lra().isNondetLraMethodSetFromDefault() && method != storm::solver::LraMethod::LinearProgramming) {
-                    STORM_LOG_INFO("Selecting 'LP' as the solution technique for long-run properties to guarantee exact results. If you want to override this, please explicitly specify a different LRA method.");
-                    method = storm::solver::LraMethod::LinearProgramming;
-                } else if (env.solver().isForceSoundness() && env.solver().lra().isNondetLraMethodSetFromDefault() && method != storm::solver::LraMethod::ValueIteration) {
-                    STORM_LOG_INFO("Selecting 'VI' as the solution technique for long-run properties to guarantee sound results. If you want to override this, please explicitly specify a different LRA method.");
-                    method = storm::solver::LraMethod::ValueIteration;
-                }
-                if (method == storm::solver::LraMethod::LinearProgramming) {
-                    return computeLraForMaximalEndComponentLP(env, dir, transitionMatrix, exitRateVector, markovianStates, rewardModel, mec);
-                } else if (method == storm::solver::LraMethod::ValueIteration) {
-                    return computeLraForMaximalEndComponentVI(env, dir, transitionMatrix, exitRateVector, markovianStates, rewardModel, mec);
-                } else {
-                    STORM_LOG_THROW(false, storm::exceptions::InvalidSettingsException, "Unsupported technique.");
-                }
-            }
-            
-            template<typename ValueType, typename RewardModelType>
-            ValueType SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec) {
-                std::unique_ptr<storm::utility::solver::LpSolverFactory<ValueType>> lpSolverFactory(new storm::utility::solver::LpSolverFactory<ValueType>());
-                std::unique_ptr<storm::solver::LpSolver<ValueType>> solver = lpSolverFactory->create("LRA for MEC");
-                solver->setOptimizationDirection(invert(dir));
-                
-                // First, we need to create the variables for the problem.
-                std::map<uint64_t, storm::expressions::Variable> stateToVariableMap;
-                for (auto const& stateChoicesPair : mec) {
-                    std::string variableName = "x" + std::to_string(stateChoicesPair.first);
-                    stateToVariableMap[stateChoicesPair.first] = solver->addUnboundedContinuousVariable(variableName);
-                }
-                storm::expressions::Variable k = solver->addUnboundedContinuousVariable("k", storm::utility::one<ValueType>());
-                solver->update();
-                
-                // Now we encode the problem as constraints.
-                std::vector<uint64_t> const& nondeterministicChoiceIndices = transitionMatrix.getRowGroupIndices();
-                for (auto const& stateChoicesPair : mec) {
-                    uint64_t state = stateChoicesPair.first;
-                    
-                    // Now, based on the type of the state, create a suitable constraint.
-                    if (markovianStates.get(state)) {
-                        STORM_LOG_ASSERT(stateChoicesPair.second.size() == 1, "Markovian state " << state << " is not deterministic: It has " << stateChoicesPair.second.size() << " choices.");
-                        uint64_t choice = *stateChoicesPair.second.begin();
-                        
-                        storm::expressions::Expression constraint = stateToVariableMap.at(state);
-                        
-                        for (auto element : transitionMatrix.getRow(nondeterministicChoiceIndices[state])) {
-                            constraint = constraint - stateToVariableMap.at(element.getColumn()) * solver->getManager().rational((element.getValue()));
-                        }
-                        
-                        constraint = constraint + solver->getManager().rational(storm::utility::one<ValueType>() / exitRateVector[state]) * k;
-                        
-                        storm::expressions::Expression rightHandSide = solver->getManager().rational(rewardModel.getTotalStateActionReward(state, choice, transitionMatrix, (ValueType) (storm::utility::one<ValueType>() / exitRateVector[state])));
-                        if (dir == OptimizationDirection::Minimize) {
-                            constraint = constraint <= rightHandSide;
-                        } else {
-                            constraint = constraint >= rightHandSide;
-                        }
-                        solver->addConstraint("state" + std::to_string(state), constraint);
-                    } else {
-                        // For probabilistic states, we want to add the constraint x_s <= sum P(s, a, s') * x_s' where a is the current action
-                        // and the sum ranges over all states s'.
-                        for (auto choice : stateChoicesPair.second) {
-                            storm::expressions::Expression constraint = stateToVariableMap.at(state);
-                            
-                            for (auto element : transitionMatrix.getRow(choice)) {
-                                constraint = constraint - stateToVariableMap.at(element.getColumn()) * solver->getManager().rational(element.getValue());
-                            }
-
-                            storm::expressions::Expression rightHandSide = solver->getManager().rational(rewardModel.getTotalStateActionReward(state, choice, transitionMatrix, storm::utility::zero<ValueType>()));
-                            if (dir == OptimizationDirection::Minimize) {
-                                constraint = constraint <= rightHandSide;
-                            } else {
-                                constraint = constraint >= rightHandSide;
-                            }
-                            solver->addConstraint("state" + std::to_string(state), constraint);
-                        }
-                    }
-                }
-                
-                solver->optimize();
-                return solver->getContinuousValue(k);
-            }
-            
-            template<typename ValueType, typename RewardModelType>
-            ValueType SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec) {
-                
-                // Initialize data about the mec
-                
-                storm::storage::BitVector mecStates(transitionMatrix.getRowGroupCount(), false);
-                storm::storage::BitVector mecChoices(transitionMatrix.getRowCount(), false);
-                for (auto const& stateChoicesPair : mec) {
-                    mecStates.set(stateChoicesPair.first);
-                    for (auto const& choice : stateChoicesPair.second) {
-                        mecChoices.set(choice);
-                    }
-                }
-                storm::storage::BitVector markovianMecStates = mecStates & markovianStates;
-                storm::storage::BitVector probabilisticMecStates = mecStates & ~markovianStates;
-                storm::storage::BitVector probabilisticMecChoices = transitionMatrix.getRowFilter(probabilisticMecStates) & mecChoices;
-                STORM_LOG_THROW(!markovianMecStates.empty(), storm::exceptions::InvalidOperationException, "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported.");
-                bool hasProbabilisticStates = !probabilisticMecStates.empty();
-                // Get the uniformization rate
-                
-                ValueType uniformizationRate = storm::utility::vector::max_if(exitRateVector, markovianMecStates);
-                // To ensure that the model is aperiodic, we need to make sure that every Markovian state gets a self loop.
-                // Hence, we increase the uniformization rate a little.
-                uniformizationRate *= (storm::utility::one<ValueType>() + storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor()));
-
-                // Get the transitions of the submodel, that is
-                // * a matrix aMarkovian with all (uniformized) transitions from Markovian mec states to all Markovian mec states.
-                // * a matrix aMarkovianToProbabilistic with all (uniformized) transitions from Markovian mec states to all probabilistic mec states.
-                // * a matrix aProbabilistic with all transitions from probabilistic mec states to other probabilistic mec states.
-                // * a matrix aProbabilisticToMarkovian with all  transitions from probabilistic mec states to all Markovian mec states.
-                typename storm::storage::SparseMatrix<ValueType> aMarkovian = transitionMatrix.getSubmatrix(true, markovianMecStates, markovianMecStates, true);
-                typename storm::storage::SparseMatrix<ValueType> aMarkovianToProbabilistic, aProbabilistic, aProbabilisticToMarkovian;
-                if (hasProbabilisticStates) {
-                    aMarkovianToProbabilistic = transitionMatrix.getSubmatrix(true, markovianMecStates, probabilisticMecStates);
-                    aProbabilistic = transitionMatrix.getSubmatrix(false, probabilisticMecChoices, probabilisticMecStates);
-                    aProbabilisticToMarkovian = transitionMatrix.getSubmatrix(false, probabilisticMecChoices, markovianMecStates);
-                }
-                
-                // The matrices with transitions from Markovian states need to be uniformized.
-                uint64_t subState = 0;
-                for (auto state : markovianMecStates) {
-                    ValueType uniformizationFactor = exitRateVector[state] / uniformizationRate;
-                    if (hasProbabilisticStates) {
-                        for (auto& entry : aMarkovianToProbabilistic.getRow(subState)) {
-                            entry.setValue(entry.getValue() * uniformizationFactor);
-                        }
-                    }
-                    for (auto& entry : aMarkovian.getRow(subState)) {
-                        if (entry.getColumn() == subState) {
-                            entry.setValue(storm::utility::one<ValueType>() - uniformizationFactor * (storm::utility::one<ValueType>() - entry.getValue()));
-                        } else {
-                            entry.setValue(entry.getValue() * uniformizationFactor);
-                        }
-                    }
-                    ++subState;
-                }
-
-                // Compute the rewards obtained in a single uniformization step
-                
-                std::vector<ValueType> markovianChoiceRewards;
-                markovianChoiceRewards.reserve(aMarkovian.getRowCount());
-                for (auto const& state : markovianMecStates) {
-                    ValueType stateRewardScalingFactor = storm::utility::one<ValueType>() / uniformizationRate;
-                    ValueType actionRewardScalingFactor = exitRateVector[state] / uniformizationRate;
-                    assert(transitionMatrix.getRowGroupSize(state) == 1);
-                    uint64_t choice = transitionMatrix.getRowGroupIndices()[state];
-                    markovianChoiceRewards.push_back(rewardModel.getTotalStateActionReward(state, choice, transitionMatrix, stateRewardScalingFactor, actionRewardScalingFactor));
-                }
-                
-                std::vector<ValueType> probabilisticChoiceRewards;
-                if (hasProbabilisticStates) {
-                    probabilisticChoiceRewards.reserve(aProbabilistic.getRowCount());
-                    for (auto const& state : probabilisticMecStates) {
-                        uint64_t groupStart = transitionMatrix.getRowGroupIndices()[state];
-                        uint64_t groupEnd = transitionMatrix.getRowGroupIndices()[state + 1];
-                        for (uint64_t choice = probabilisticMecChoices.getNextSetIndex(groupStart); choice < groupEnd; choice = probabilisticMecChoices.getNextSetIndex(choice + 1)) {
-                            probabilisticChoiceRewards.push_back(rewardModel.getTotalStateActionReward(state, choice, transitionMatrix, storm::utility::zero<ValueType>()));
-                        }
-                    }
-                }
-                
-                // start the iterations
-                
-                ValueType precision = storm::utility::convertNumber<ValueType>(env.solver().lra().getPrecision()) / uniformizationRate;
-                bool relative = env.solver().lra().getRelativeTerminationCriterion();
-                std::vector<ValueType> v(aMarkovian.getRowCount(), storm::utility::zero<ValueType>());
-                std::vector<ValueType> w = v;
-                std::vector<ValueType> x, b;
-                auto solverEnv = env;
-                std::unique_ptr<storm::solver::MinMaxLinearEquationSolver<ValueType>> solver;
-                if (hasProbabilisticStates) {
-                    if (env.solver().isForceSoundness()) {
-                        // To get correct results, the inner equation systems are solved exactly.
-                        // TODO investigate how an error would propagate
-                        solverEnv.solver().setForceExact(true);
-                    }
-                    
-                    x.resize(aProbabilistic.getRowGroupCount(), storm::utility::zero<ValueType>());
-                    b = probabilisticChoiceRewards;
-                    
-                    solver = setUpProbabilisticStatesSolver(solverEnv, dir, aProbabilistic);
-                }
-                
-                uint64_t iter = 0;
-                boost::optional<uint64_t> maxIter;
-                if (env.solver().lra().isMaximalIterationCountSet()) {
-                    maxIter = env.solver().lra().getMaximalIterationCount();
-                }
-                while (!maxIter.is_initialized() || iter < maxIter.get()) {
-                    ++iter;
-                    // Compute the expected total rewards for the probabilistic states
-                    if (hasProbabilisticStates) {
-                        if (solver) {
-                            solver->solveEquations(solverEnv, dir, x, b);
-                        } else {
-                            storm::utility::vector::reduceVectorMinOrMax(dir, b, x, aProbabilistic.getRowGroupIndices());
-                        }
-                    }
-                    // now compute the values for the markovian states. We also keep track of the maximal and minimal difference between two values (for convergence checking)
-                    auto vIt = v.begin();
-                    uint64_t row = 0;
-                    ValueType newValue = markovianChoiceRewards[row] + aMarkovian.multiplyRowWithVector(row, w);
-                    if (hasProbabilisticStates) {
-                        newValue += aMarkovianToProbabilistic.multiplyRowWithVector(row, x);
-                    }
-                    ValueType maxDiff = newValue - *vIt;
-                    ValueType minDiff = maxDiff;
-                    *vIt = newValue;
-                    for (++vIt, ++row; row < aMarkovian.getRowCount(); ++vIt, ++row) {
-                        newValue = markovianChoiceRewards[row] + aMarkovian.multiplyRowWithVector(row, w);
-                        if (hasProbabilisticStates) {
-                            newValue += aMarkovianToProbabilistic.multiplyRowWithVector(row, x);
-                        }
-                        ValueType diff = newValue - *vIt;
-                        maxDiff = std::max(maxDiff, diff);
-                        minDiff = std::min(minDiff, diff);
-                        *vIt = newValue;
-                    }
-
-                    // Check for convergence
-                    if ((maxDiff - minDiff) <= (relative ? (precision * (v.front() + minDiff)) : precision)) {
-                        break;
-                    }
-                    if (storm::utility::resources::isTerminate()) {
-                        break;
-                    }
-                    
-                    // update the rhs of the MinMax equation system
-                    ValueType referenceValue = v.front();
-                    storm::utility::vector::applyPointwise<ValueType, ValueType>(v, w, [&referenceValue] (ValueType const& v_i) -> ValueType { return v_i - referenceValue; });
-                    if (hasProbabilisticStates) {
-                        aProbabilisticToMarkovian.multiplyWithVector(w, b);
-                        storm::utility::vector::addVectors(b, probabilisticChoiceRewards, b);
-                    }
-                }
-                if (maxIter.is_initialized() && iter == maxIter.get()) {
-                    STORM_LOG_WARN("LRA computation did not converge within " << iter << " iterations.");
-                } else {
-                    STORM_LOG_TRACE("LRA computation converged after " << iter << " iterations.");
-                }
-                return v.front() * uniformizationRate;
-            }
-            
             template std::vector<double> SparseMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& transitionMatrix, std::vector<double> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, std::pair<double, double> const& boundsPair);
                 
             template MDPSparseModelCheckingHelperReturnType<double> SparseMarkovAutomatonCslHelper::computeUntilProbabilities(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::storage::SparseMatrix<double> const& backwardTransitions, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, bool qualitative, bool produceScheduler);
@@ -1192,18 +738,8 @@ namespace storm {
 
             template MDPSparseModelCheckingHelperReturnType<double> SparseMarkovAutomatonCslHelper::computeTotalRewards(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::storage::SparseMatrix<double> const& backwardTransitions, std::vector<double> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel<double> const& rewardModel, bool produceScheduler);
 
-            template std::vector<double> SparseMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::storage::SparseMatrix<double> const& backwardTransitions, std::vector<double> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates);
-                
-            template std::vector<double> SparseMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::storage::SparseMatrix<double> const& backwardTransitions, std::vector<double> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel<double> const& rewardModel);
-            
             template MDPSparseModelCheckingHelperReturnType<double> SparseMarkovAutomatonCslHelper::computeReachabilityTimes(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::storage::SparseMatrix<double> const& backwardTransitions, std::vector<double> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, bool produceScheduler);
             
-            template double SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, std::vector<double> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel<double> const& rewardModel, storm::storage::MaximalEndComponent const& mec);
-                
-            template double SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, std::vector<double> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel<double> const& rewardModel, storm::storage::MaximalEndComponent const& mec);
-            
-            template double SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, std::vector<double> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel<double> const& rewardModel, storm::storage::MaximalEndComponent const& mec);
-            
             template std::vector<storm::RationalNumber> SparseMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, std::vector<storm::RationalNumber> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, std::pair<double, double> const& boundsPair);
                 
             template MDPSparseModelCheckingHelperReturnType<storm::RationalNumber> SparseMarkovAutomatonCslHelper::computeUntilProbabilities(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::storage::SparseMatrix<storm::RationalNumber> const& backwardTransitions, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, bool qualitative, bool produceScheduler);
@@ -1212,18 +748,7 @@ namespace storm {
 
             template MDPSparseModelCheckingHelperReturnType<storm::RationalNumber> SparseMarkovAutomatonCslHelper::computeTotalRewards(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::storage::SparseMatrix<storm::RationalNumber> const& backwardTransitions, std::vector<storm::RationalNumber> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, bool produceScheduler);
 
-            template std::vector<storm::RationalNumber> SparseMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::storage::SparseMatrix<storm::RationalNumber> const& backwardTransitions, std::vector<storm::RationalNumber> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates);
-            
-            template std::vector<storm::RationalNumber> SparseMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::storage::SparseMatrix<storm::RationalNumber> const& backwardTransitions, std::vector<storm::RationalNumber> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel);
-            
             template MDPSparseModelCheckingHelperReturnType<storm::RationalNumber> SparseMarkovAutomatonCslHelper::computeReachabilityTimes(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::storage::SparseMatrix<storm::RationalNumber> const& backwardTransitions, std::vector<storm::RationalNumber> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, bool produceScheduler);
-                
-            template storm::RationalNumber SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, std::vector<storm::RationalNumber> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, storm::storage::MaximalEndComponent const& mec);
-            
-            template storm::RationalNumber SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, std::vector<storm::RationalNumber> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, storm::storage::MaximalEndComponent const& mec);
-            
-            template storm::RationalNumber SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, std::vector<storm::RationalNumber> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, storm::storage::MaximalEndComponent const& mec);
-                
         }
     }
 }
diff --git a/src/storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.h b/src/storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.h
index ed0dfddef..e52017aed 100644
--- a/src/storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.h
+++ b/src/storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.h
@@ -34,37 +34,9 @@ namespace storm {
                 template <typename ValueType, typename RewardModelType>
                 static MDPSparseModelCheckingHelperReturnType<ValueType> computeReachabilityRewards(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::BitVector const& psiStates, bool produceScheduler);
 
-                template <typename ValueType>
-                static std::vector<ValueType> computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates);
-                
-                template <typename ValueType, typename RewardModelType>
-                static std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel);
-                
                 template <typename ValueType>
                 static MDPSparseModelCheckingHelperReturnType<ValueType> computeReachabilityTimes(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, bool produceScheduler);
                 
-            private:
-                /*!
-                 * Computes the long-run average value for the given maximal end component of a Markov automaton.
-                 *
-                 * Implementations are based on Linear Programming (LP) and Value Iteration (VI).
-                 *
-                 * @param dir Sets whether the long-run average is to be minimized or maximized.
-                 * @param transitionMatrix The transition matrix of the underlying Markov automaton.
-                 * @param markovianStates A bit vector storing all markovian states.
-                 * @param exitRateVector A vector with exit rates for all states. Exit rates of probabilistic states are
-                 * assumed to be zero.
-                 * @param rewardModel The considered reward model
-                 * @param actionRewards The action rewards (earned instantaneously).
-                 * @param mec The maximal end component to consider for computing the long-run average.
-                 * @return The long-run average of being in a goal state for the given MEC.
-                 */
-                template <typename ValueType, typename RewardModelType>
-                static ValueType computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec);
-                template <typename ValueType, typename RewardModelType>
-                static ValueType computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec);
-                template <typename ValueType, typename RewardModelType>
-                static ValueType computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec);
                 
             };
             

From 35c57fe980126beb5d1e93a1a4df7ea30736fcfc Mon Sep 17 00:00:00 2001
From: TimQu <tim.quatmann@cs.rwth-aachen.de>
Date: Fri, 7 Aug 2020 11:52:59 +0200
Subject: [PATCH 25/48] LraViHelper: Put component utility functions to
 separate file.

---
 .../internal/ComponentUtility.h               | 22 +++++++++++++++++++
 .../infinitehorizon/internal/LraViHelper.cpp  | 14 ++----------
 2 files changed, 24 insertions(+), 12 deletions(-)
 create mode 100644 src/storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h

diff --git a/src/storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h b/src/storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h
new file mode 100644
index 000000000..221dc0430
--- /dev/null
+++ b/src/storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h
@@ -0,0 +1,22 @@
+#pragma once
+
+namespace storm {
+    namespace modelchecker {
+        namespace helper {
+            namespace internal {
+                
+                /// Auxiliary functions that deal with the different kinds of components (MECs on potentially nondeterministic models and BSCCs on deterministic models)
+                // BSCCS:
+                uint64_t inline getComponentElementState(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return element; }
+                uint64_t inline getComponentElementChoiceCount(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return 1; } // Assumes deterministic model!
+                uint64_t inline const* getComponentChoicesBegin(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return &element; }
+                uint64_t inline const* getComponentChoicesEnd(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return &element + 1; }
+                // MECS:
+                uint64_t inline getComponentElementState(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.first; }
+                uint64_t inline getComponentElementChoiceCount(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.second.size(); }
+                typename storm::storage::MaximalEndComponent::set_type::const_iterator inline getComponentChoicesBegin(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.second.begin(); }
+                typename storm::storage::MaximalEndComponent::set_type::const_iterator inline getComponentChoicesEnd(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.second.end(); }
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
index af4ab9dfd..3e4dfaa69 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
+++ b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
@@ -1,5 +1,7 @@
 #include "LraViHelper.h"
 
+#include "storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h"
+
 #include "storm/solver/LinearEquationSolver.h"
 #include "storm/solver/MinMaxLinearEquationSolver.h"
 #include "storm/solver/Multiplier.h"
@@ -25,18 +27,6 @@ namespace storm {
         namespace helper {
             namespace internal {
                 
-                /// Auxiliary functions that deal with the different kinds of components (MECs on potentially nondeterministic models and BSCCs on deterministic models)
-                // BSCCS:
-                uint64_t getComponentElementState(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return element; }
-                uint64_t getComponentElementChoiceCount(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return 1; } // Assumes deterministic model!
-                uint64_t const* getComponentChoicesBegin(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return &element; }
-                uint64_t const* getComponentChoicesEnd(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return &element + 1; }
-                // MECS:
-                uint64_t getComponentElementState(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.first; }
-                uint64_t getComponentElementChoiceCount(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.second.size(); }
-                typename storm::storage::MaximalEndComponent::set_type::const_iterator getComponentChoicesBegin(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.second.begin(); }
-                typename storm::storage::MaximalEndComponent::set_type::const_iterator getComponentChoicesEnd(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.second.end(); }
-                
                 template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
                 LraViHelper<ValueType, ComponentType, TransitionsType>::LraViHelper(ComponentType const& component, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, ValueType const& aperiodicFactor, storm::storage::BitVector const* timedStates, std::vector<ValueType> const* exitRates) : _component(component), _transitionMatrix(transitionMatrix), _timedStates(timedStates), _hasInstantStates(TransitionsType == LraViTransitionsType::DetTsNondetIs || TransitionsType == LraViTransitionsType::DetTsDetIs) {
                     // Run through the component and collect some data:

From 485d75f466a1c1ddb2b25135a5f273029fdd6ea9 Mon Sep 17 00:00:00 2001
From: TimQu <tim.quatmann@cs.rwth-aachen.de>
Date: Fri, 7 Aug 2020 11:53:41 +0200
Subject: [PATCH 26/48] Towards using the infinite horizon helpers also for
 deterministic models.

---
 ...eNondeterministicInfiniteHorizonHelper.cpp | 282 +++++++++++-------
 ...rseNondeterministicInfiniteHorizonHelper.h |  75 +++--
 2 files changed, 234 insertions(+), 123 deletions(-)

diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
index cc68d9fb7..5cb86d83e 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
@@ -1,7 +1,14 @@
 #include "SparseNondeterministicInfiniteHorizonHelper.h"
 
+#include "storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h"
 #include "storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h"
 
+#include "storm/models/sparse/StandardRewardModel.h"
+
+#include "storm/storage/SparseMatrix.h"
+#include "storm/storage/MaximalEndComponentDecomposition.h"
+#include "storm/storage/StronglyConnectedComponentDecomposition.h"
+
 #include "storm/solver/MinMaxLinearEquationSolver.h"
 #include "storm/solver/LinearEquationSolver.h"
 #include "storm/solver/Multiplier.h"
@@ -20,38 +27,38 @@ namespace storm {
     namespace modelchecker {
         namespace helper {
         
-            template <typename ValueType>
-            SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix) : _transitionMatrix(transitionMatrix), _backwardTransitions(nullptr), _mecDecomposition(nullptr), _markovianStates(nullptr), _exitRates(nullptr) {
+            template <typename ValueType, bool Nondeterministic>
+            SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix) : _transitionMatrix(transitionMatrix), _backwardTransitions(nullptr), _longRunComponentDecomposition(nullptr), _markovianStates(nullptr), _exitRates(nullptr) {
                 // Intentionally left empty.
             }
             
-            template <typename ValueType>
-            SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates) : _transitionMatrix(transitionMatrix), _backwardTransitions(nullptr), _mecDecomposition(nullptr), _markovianStates(&markovianStates), _exitRates(&exitRates) {
+            template <typename ValueType, bool Nondeterministic>
+            SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates) : _transitionMatrix(transitionMatrix), _backwardTransitions(nullptr), _longRunComponentDecomposition(nullptr), _markovianStates(&markovianStates), _exitRates(&exitRates) {
                 // Intentionally left empty.
             }
             
-            template <typename ValueType>
-            void SparseNondeterministicInfiniteHorizonHelper<ValueType>::provideBackwardTransitions(storm::storage::SparseMatrix<ValueType> const& backwardTransitions) {
-                STORM_LOG_WARN_COND(_backwardTransitions == nullptr, "Backwards transitions were provided but they were already computed or set before.");
+            template <typename ValueType, bool Nondeterministic>
+            void SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::provideBackwardTransitions(storm::storage::SparseMatrix<ValueType> const& backwardTransitions) {
+                STORM_LOG_WARN_COND(_backwardTransitions == nullptr, "Backwards transitions were provided but they were already computed or provided before.");
                 _backwardTransitions = &backwardTransitions;
             }
             
-            template <typename ValueType>
-            void SparseNondeterministicInfiniteHorizonHelper<ValueType>::provideMaximalEndComponentDecomposition(storm::storage::MaximalEndComponentDecomposition<ValueType> const& mecDecomposition) {
-                STORM_LOG_WARN_COND(_mecDecomposition == nullptr, "Backwards transitions were provided but they were already computed or set before.");
-                _mecDecomposition = &mecDecomposition;
+            template <typename ValueType, bool Nondeterministic>
+            void SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::provideLongRunComponentDecomposition(storm::storage::Decomposition<LongRunComponent> const& decomposition) {
+                STORM_LOG_WARN_COND(_longRunComponentDecomposition == nullptr, "Long Run Component Decomposition was provided but it was already computed or provided before.");
+                _longRunComponentDecomposition = &decomposition;
             }
             
-            template <typename ValueType>
-            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLongRunAverageProbabilities(Environment const& env, storm::storage::BitVector const& psiStates) {
+            template <typename ValueType, bool Nondeterministic>
+            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageProbabilities(Environment const& env, storm::storage::BitVector const& psiStates) {
                 return computeLongRunAverageValues(env,
                             [&psiStates] (uint64_t stateIndex) { return psiStates.get(stateIndex) ? storm::utility::one<ValueType>() : storm::utility::zero<ValueType>(); },
                             [] (uint64_t) { return storm::utility::zero<ValueType>(); }
                     );
             }
             
-            template <typename ValueType>
-            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLongRunAverageRewards(Environment const& env, storm::models::sparse::StandardRewardModel<ValueType> const& rewardModel) {
+            template <typename ValueType, bool Nondeterministic>
+            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageRewards(Environment const& env, storm::models::sparse::StandardRewardModel<ValueType> const& rewardModel) {
                 std::function<ValueType(uint64_t stateIndex)> stateRewardsGetter;
                 if (rewardModel.hasStateRewards()) {
                     stateRewardsGetter = [&rewardModel] (uint64_t stateIndex) { return rewardModel.getStateReward(stateIndex); };
@@ -72,8 +79,8 @@ namespace storm {
                 return computeLongRunAverageValues(env, stateRewardsGetter, actionRewardsGetter);
             }
             
-            template <typename ValueType>
-            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const* stateValues, std::vector<ValueType> const* actionValues) {
+            template <typename ValueType, bool Nondeterministic>
+            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const* stateValues, std::vector<ValueType> const* actionValues) {
                 std::function<ValueType(uint64_t stateIndex)> stateValuesGetter;
                 if (stateValues) {
                     stateValuesGetter = [&stateValues] (uint64_t stateIndex) { return (*stateValues)[stateIndex]; };
@@ -91,8 +98,8 @@ namespace storm {
 
             }
             
-            template <typename ValueType>
-            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLongRunAverageValues(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter) {
+            template <typename ValueType, bool Nondeterministic>
+            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageValues(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter) {
                 // We will compute the long run average value for each MEC individually and then set-up a MinMax Equation system to compute the value also at non-mec states.
                 // For a description of this approach see, e.g., Guck et al.: Modelling and Analysis of Markov Reward Automata (ATVA'14), https://doi.org/10.1007/978-3-319-11936-6_13
                 
@@ -108,51 +115,58 @@ namespace storm {
                 }
                 
                 // If requested, allocate memory for the choices made
-                if (this->isProduceSchedulerSet()) {
+                if (Nondeterministic && this->isProduceSchedulerSet()) {
                     if (!_producedOptimalChoices.is_initialized()) {
                         _producedOptimalChoices.emplace();
                     }
                     _producedOptimalChoices->resize(_transitionMatrix.getRowGroupCount());
                 }
+                STORM_LOG_ASSERT(Nondeterministic || !this->isProduceSchedulerSet(), "Scheduler production enabled for deterministic model.");
                 
                 // Start by decomposing the Model into its MECs.
-                if (_mecDecomposition == nullptr) {
+                if (_longRunComponentDecomposition == nullptr) {
                     // The decomposition has not been provided or computed, yet.
-                    if (_backwardTransitions == nullptr) {
-                        _computedBackwardTransitions = _transitionMatrix.transpose(true);
-                        _backwardTransitions = &_computedBackwardTransitions;
+                    if (Nondeterministic) {
+                        if (_backwardTransitions == nullptr) {
+                            _computedBackwardTransitions = std::make_unique<storm::storage::SparseMatrix>(_transitionMatrix.transpose(true));
+                            _backwardTransitions = _computedBackwardTransitions.get();
+                        }
+                        _computedLongRunComponentDecomposition = std::make_unique<storm::storage::MaximalEndComponentDecomposition<ValueType>(_transitionMatrix, *_backwardTransitions);
+                    } else {
+                        _computedLongRunComponentDecomposition = std::make_unique<storm::storage::StronglyConnectedComponentDecomposition<ValueType>(_transitionMatrix, storm::storage::StronglyConnectedComponentDecompositionOptions().onlyBottomSccs());
                     }
-                    _computedMecDecomposition = storm::storage::MaximalEndComponentDecomposition<ValueType>(_transitionMatrix, *_backwardTransitions);
-                    _mecDecomposition = &_computedMecDecomposition;
+                    _longRunComponentDecomposition = _computedLongRunComponentDecomposition.get();
                 }
 
-                // Compute the long-run average for all end components in isolation.
-                std::vector<ValueType> mecLraValues;
-                mecLraValues.reserve(_mecDecomposition->size());
-                for (auto const& mec : *_mecDecomposition) {
-                    mecLraValues.push_back(computeLraForMec(underlyingSolverEnvironment, stateRewardsGetter, actionRewardsGetter, mec));
+                // Compute the long-run average for all components in isolation.
+                std::vector<ValueType> componentLraValues;
+                mecLraValues.reserve(_longRunComponentDecomposition->size());
+                for (auto const& c : *_longRunComponentDecomposition) {
+                    componentLraValues.push_back(computeLraForComponent(underlyingSolverEnvironment, stateRewardsGetter, actionRewardsGetter, c));
                 }
                 
                 // Solve the resulting SSP where end components are collapsed into single auxiliary states
-                return buildAndSolveSsp(underlyingSolverEnvironment, mecLraValues);
+                return buildAndSolveSsp(underlyingSolverEnvironment, componentLraValues);
             }
             
-            template <typename ValueType>
-            std::vector<uint64_t> const& SparseNondeterministicInfiniteHorizonHelper<ValueType>::getProducedOptimalChoices() const {
+            template <typename ValueType, bool Nondeterministic>
+            std::vector<uint64_t> const& SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::getProducedOptimalChoices() const {
+                STORM_LOG_WARN_COND(Nondeterministic, "Getting optimal choices for deterministic model.");
                 STORM_LOG_ASSERT(this->isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
                 STORM_LOG_ASSERT(_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
                 return _producedOptimalChoices.get();
             }
             
-            template <typename ValueType>
-            std::vector<uint64_t>& SparseNondeterministicInfiniteHorizonHelper<ValueType>::getProducedOptimalChoices() {
+            template <typename ValueType, bool Nondeterministic>
+            std::vector<uint64_t>& SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::getProducedOptimalChoices() {
+                STORM_LOG_WARN_COND(Nondeterministic, "Getting optimal choices for deterministic model.");
                 STORM_LOG_ASSERT(this->isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
                 STORM_LOG_ASSERT(_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
                 return _producedOptimalChoices.get();
             }
             
-            template <typename ValueType>
-            storm::storage::Scheduler<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::extractScheduler() const {
+            template <typename ValueType, bool Nondeterministic>
+            storm::storage::Scheduler<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::extractScheduler() const {
                 auto const& optimalChoices = getProducedOptimalChoices();
                 storm::storage::Scheduler<ValueType> scheduler(optimalChoices.size());
                 for (uint64_t state = 0; state < optimalChoices.size(); ++state) {
@@ -161,43 +175,46 @@ namespace storm {
                 return scheduler;
             }
             
-            template <typename ValueType>
-            bool SparseNondeterministicInfiniteHorizonHelper<ValueType>::isContinuousTime() const {
-                STORM_LOG_ASSERT((_markovianStates == nullptr) == (_exitRates == nullptr), "Inconsistent information given: Have Markovian states but no exit rates (or vice versa)." );
-                return _markovianStates != nullptr;
+            template <typename ValueType, bool Nondeterministic>
+            bool SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::isContinuousTime() const {
+                STORM_LOG_ASSERT((_markovianStates == nullptr) || (_exitRates != nullptr), "Inconsistent information given: Have Markovian states but no exit rates." );
+                return _exitRates != nullptr;
             }
     
-            template <typename ValueType>
-            ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMec(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
+            template <typename ValueType, bool Nondeterministic>
+            template < typename = typename std::enable_if< !Nondeterministic >::type >
+            ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLraForComponent(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, LongRunComponentType const& component) {
+                // For deterministic models, we compute the LRA for a BSCC
                 
-                // If the mec only consists of a single state, we compute the LRA value directly
-                if (mec.size() == 1) {
-                    uint64_t state = mec.begin()->first;
-                    auto choiceIt = mec.begin()->second.begin();
-                    if (isContinuousTime()) {
-                        // Singleton MECs have to consist of a Markovian state because of the non-Zenoness assumption. Then, there is just one possible choice.
-                        STORM_LOG_THROW(_markovianStates->get(state), storm::exceptions::InvalidOperationException, "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported.");
-                        STORM_LOG_ASSERT(mec.begin()->second.size() == 1, "Markovian state has Nondeterministic behavior.");
-                        if (this->isProduceSchedulerSet()) {
-                            _producedOptimalChoices.get()[state] = 0;
-                        }
-                        return stateRewardsGetter(state) + (*_exitRates)[state] * actionRewardsGetter(*choiceIt);
-                    } else {
-                        // Find the choice with the highest/lowest reward
-                        ValueType bestValue = actionRewardsGetter(*choiceIt);
-                        uint64_t bestChoice = *choiceIt;
-                        for (++choiceIt; choiceIt != mec.begin()->second.end(); ++choiceIt) {
-                            ValueType currentValue = actionRewardsGetter(*choiceIt);
-                            if ((this->minimize() &&  currentValue < bestValue) || (this->maximize() && currentValue > bestValue)) {
-                                bestValue = std::move(currentValue);
-                                bestChoice = *choiceIt;
-                            }
-                        }
-                        if (this->isProduceSchedulerSet()) {
-                            _producedOptimalChoices.get()[state] = bestChoice - _transitionMatrix.getRowGroupIndices()[state];
-                        }
-                        return bestValue + stateRewardsGetter(state);
+                STORM_LOG_ASSERT(!this->isProduceSchedulerSet(), "Scheduler production enabled for deterministic model.");
+                
+                auto trivialResult = computeLraForTrivialComponent(env, stateReardsGetter, actionRewardsGetter, component);
+                if (trivialResult.first) {
+                    return trivialResult.second;
+                }
+                
+                // Solve nontrivial BSCC with the method specified  in the settings
+                
+                // TODO
+                
+            }
+            
+            template <typename ValueType, bool Nondeterministic>
+            template < typename = typename std::enable_if< Nondeterministic >::type >
+            ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLraForComponent(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, LongRunComponentType const& component) {
+                // For models with potential nondeterminisim, we compute the LRA for a maximal end component (MEC)
+                
+                // Allocate memory for the nondeterministic choices.
+                if (this->isProduceSchedulerSet()) {
+                    if (!_producedOptimalChoices.is_initialized()) {
+                        _producedOptimalChoices.emplace();
                     }
+                    _producedOptimalChoices->resize(_transitionMatrix.getRowGroupCount());
+                }
+                
+                auto trivialResult = computeLraForTrivialComponent(env, stateReardsGetter, actionRewardsGetter, component);
+                if (trivialResult.first) {
+                    return trivialResult.second;
                 }
                 
                 // Solve nontrivial MEC with the method specified in the settings
@@ -219,8 +236,64 @@ namespace storm {
                 }
             }
     
-            template <typename ValueType>
-            ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMecVi(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
+            template <typename ValueType, bool Nondeterministic>
+            std::pair<bool, ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLraForTrivialComponent(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, LongRunComponentType const& component) {
+                
+                // If the component only consists of a single state, we compute the LRA value directly
+                if (component.size() == 1) {
+                    auto element const& = *component.begin();
+                    uint64_t state = internal::getComponentElementState(element);
+                    auto choiceIt = internal::getComponentChoicesBegin(element);
+                    if (Nondeterministic && !isContinuousTime()) {
+                        // This is an MDP.
+                        // Find the choice with the highest/lowest reward
+                        ValueType bestValue = actionRewardsGetter(*choiceIt);
+                        uint64_t bestChoice = *choiceIt;
+                        for (++choiceIt; choiceIt != getComponentChoicesEnd(element); ++choiceIt) {
+                            ValueType currentValue = actionRewardsGetter(*choiceIt);
+                            if ((this->minimize() &&  currentValue < bestValue) || (this->maximize() && currentValue > bestValue)) {
+                                bestValue = std::move(currentValue);
+                                bestChoice = *choiceIt;
+                            }
+                        }
+                        if (this->isProduceSchedulerSet()) {
+                            _producedOptimalChoices.get()[state] = bestChoice - _transitionMatrix.getRowGroupIndices()[state];
+                        }
+                        bestValue += stateRewardsGetter(state);
+                        return {true, bestValue};
+                    } else {
+                        // In a Markov Automaton, singleton components have to consist of a Markovian state because of the non-Zenoness assumption. Then, there is just one possible choice.
+                        STORM_LOG_THROW(!Nondeterministic || (_markovianStates != nullptr && _markovianStates->get(state)), storm::exceptions::InvalidOperationException, "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported.");
+                        STORM_LOG_ASSERT(internal::getComponentElementChoiceCount(element) == 1, "Markovian state has Nondeterministic behavior.");
+                        if (Nondeterministic && this->isProduceSchedulerSet()) {
+                            _producedOptimalChoices.get()[state] = 0;
+                        }
+                        ValueType result = stateRewardsGetter(state) + (isContinuousTime() ? (*_exitRates)[state] * actionRewardsGetter(*choiceIt) : actionRewardsGetter(*choiceIt));
+                        return {true, result};
+                    }
+                } else if (!Nondeterministic) {
+                    // For deterministic models, we can also easily catch the case where all values are the same
+                    bool first = true;
+                    ValueType val = storm::utility::zero<ValueType>();
+                    for (auto const& element : component) {
+                        auto state = getComponentElementState(element);
+                        STORM_LOG_ASSERT(state == *getComponentChoicesBegin(element), "Unexpected choice index at state " << state << " of deterministic model.");
+                        ValueType curr = stateRewardsGetter(state) + (isContinuousTime() ? (*_exitRates)[state] * actionRewardsGetter(state) : actionRewardsGetter(state));
+                        if (first) {
+                            first = false;
+                        } else if (val != curr) {
+                            return {false, storm::utility::zero<ValueType>()};
+                        }
+                    }
+                    // All values are the same
+                    return {true, val};
+                } else {
+                    return {false, storm::utility::zero<ValueType>()};
+                }
+            }
+            
+            template <typename ValueType, bool Nondeterministic>
+            ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLraForMecVi(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
 
                 // Collect some parameters of the computation
                 ValueType aperiodicFactor = storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor());
@@ -241,8 +314,8 @@ namespace storm {
                 }
             }
             
-            template <typename ValueType>
-            ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMecLp(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
+            template <typename ValueType, bool Nondeterministic>
+            ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLraForMecLp(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
                 // Create an LP solver
                 auto solver = storm::utility::solver::LpSolverFactory<ValueType>().create("LRA for MEC");
                 
@@ -311,11 +384,11 @@ namespace storm {
             
             /*!
              * Auxiliary function that adds the entries of the Ssp Matrix for a single choice (i.e., row)
-             * Transitions that lead to a MEC state will be redirected to a new auxiliary state (there is one aux. state for each MEC).
-             * Transitions that don't lead to a MEC state are copied (taking a state index mapping into account).
+             * Transitions that lead to a Component state will be redirected to a new auxiliary state (there is one aux. state for each component).
+             * Transitions that don't lead to a Component state are copied (taking a state index mapping into account).
              */
             template <typename ValueType>
-            void addSspMatrixChoice(uint64_t const& inputMatrixChoice, storm::storage::SparseMatrix<ValueType> const& inputTransitionMatrix, std::vector<uint64_t> const& inputToSspStateMap, uint64_t const& numberOfStatesNotInMecs, uint64_t const& currentSspChoice, storm::storage::SparseMatrixBuilder<ValueType>& sspMatrixBuilder) {
+            void addSspMatrixChoice(uint64_t const& inputMatrixChoice, storm::storage::SparseMatrix<ValueType> const& inputTransitionMatrix, std::vector<uint64_t> const& inputToSspStateMap, uint64_t const& numberOfStatesNotInComponents, uint64_t const& currentSspChoice, storm::storage::SparseMatrixBuilder<ValueType>& sspMatrixBuilder) {
             
                 // As there could be multiple transitions to the same MEC, we accumulate them in this map before adding them to the matrix builder.
                 std::map<uint64_t, ValueType> auxiliaryStateToProbabilityMap;
@@ -323,17 +396,17 @@ namespace storm {
                 for (auto const& transition : inputTransitionMatrix.getRow(inputMatrixChoice)) {
                     if (!storm::utility::isZero(transition.getValue())) {
                         auto const& sspTransitionTarget = inputToSspStateMap[transition.getColumn()];
-                        // Since the auxiliary MEC states are appended at the end of the matrix, we can use this check to
-                        // decide whether the transition leads to a MEC state or not
+                        // Since the auxiliary Component states are appended at the end of the matrix, we can use this check to
+                        // decide whether the transition leads to a component state or not
                         if (sspTransitionTarget < numberOfStatesNotInMecs) {
-                            // If the target state is not contained in a MEC, we can copy over the entry.
+                            // If the target state is not contained in a component, we can copy over the entry.
                             sspMatrixBuilder.addNextValue(currentSspChoice, sspTransitionTarget, transition.getValue());
                         } else {
-                            // If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector
-                            // so that we are able to write the cumulative probability to the MEC into the matrix.
+                            // If the target state is contained in component i, we need to add the probability to the corresponding field in the vector
+                            // so that we are able to write the cumulative probability to the component into the matrix later.
                             auto insertionRes = auxiliaryStateToProbabilityMap.emplace(sspTransitionTarget, transition.getValue());
                             if (!insertionRes.second) {
-                                // sspTransitionTarget already existed in the map, i.e., there already was a transition to that MEC.
+                                // sspTransitionTarget already existed in the map, i.e., there already was a transition to that component.
                                 // Hence, we add up the probabilities.
                                 insertionRes.first->second += transition.getValue();
                             }
@@ -341,30 +414,28 @@ namespace storm {
                     }
                 }
                 
-                // Now insert all (cumulative) probability values that target a MEC.
-                for (auto const& mecToProbEntry : auxiliaryStateToProbabilityMap) {
-                    sspMatrixBuilder.addNextValue(currentSspChoice, mecToProbEntry.first, mecToProbEntry.second);
+                // Now insert all (cumulative) probability values that target a component.
+                for (auto const& componentToProbEntry : auxiliaryStateToProbabilityMap) {
+                    sspMatrixBuilder.addNextValue(currentSspChoice, componentToProbEntry.first, componentToProbEntry.second);
                 }
             }
             
-            template <typename ValueType>
-            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues) {
-                STORM_LOG_ASSERT(_mecDecomposition != nullptr, "Decomposition not computed, yet.");
+            template <typename ValueType, bool Nondeterministic>
+            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues) {
+                STORM_LOG_ASSERT(_longRunComponentDecomposition != nullptr, "Decomposition not computed, yet.");
                 
-                // Let's improve readability a bit
                 uint64_t numberOfStates = _transitionMatrix.getRowGroupCount();
-                auto const& nondeterministicChoiceIndices = _transitionMatrix.getRowGroupIndices();
                 
                 // For fast transition rewriting, we build a mapping from the input state indices to the state indices of a new transition matrix
-                // which redirects all transitions leading to a former MEC state to a new auxiliary state.
-                // There will be one auxiliary state for each MEC. These states will be appended to the end of the matrix.
+                // which redirects all transitions leading to a former component state to a new auxiliary state.
+                // There will be one auxiliary state for each component. These states will be appended to the end of the matrix.
                 
-                // First gather the states that are part of a MEC
-                // and create a mapping from states that lie in a MEC to the corresponding MEC index.
+                // First gather the states that are part of a component
+                // and create a mapping from states that lie in a component to the corresponding component index.
                 storm::storage::BitVector statesInMecs(numberOfStates);
                 std::vector<uint64_t> inputToSspStateMap(numberOfStates, std::numeric_limits<uint64_t>::max());
-                for (uint64_t currentMecIndex = 0; currentMecIndex < _mecDecomposition->size(); ++currentMecIndex) {
-                    for (auto const& stateChoicesPair : (*_mecDecomposition)[currentMecIndex]) {
+                for (uint64_t currentMecIndex = 0; currentMecIndex < _longRunComponentDecomposition->size(); ++currentMecIndex) {
+                    for (auto const& stateChoicesPair : (*_longRunComponentDecomposition)[currentMecIndex]) {
                         statesInMecs.set(stateChoicesPair.first);
                         inputToSspStateMap[stateChoicesPair.first] = currentMecIndex;
                     }
@@ -389,7 +460,7 @@ namespace storm {
                 
                 // The next step is to create the SSP matrix and the right-hand side of the SSP.
                 std::vector<ValueType> rhs;
-                uint64_t numberOfSspStates = numberOfStatesNotInMecs + _mecDecomposition->size();
+                uint64_t numberOfSspStates = numberOfStatesNotInMecs + _longRunComponentDecomposition->size();
                 typename storm::storage::SparseMatrixBuilder<ValueType> sspMatrixBuilder(0, numberOfSspStates , 0, false, true, numberOfSspStates);
                 // If the source state of a transition is not contained in any MEC, we copy its choices (and perform the necessary modifications).
                 uint64_t currentSspChoice = 0;
@@ -402,8 +473,8 @@ namespace storm {
                     }
                 }
                 // Now we construct the choices for the auxiliary states which reflect former MEC states.
-                for (uint64_t mecIndex = 0; mecIndex < _mecDecomposition->size(); ++mecIndex) {
-                    storm::storage::MaximalEndComponent const& mec = (*_mecDecomposition)[mecIndex];
+                for (uint64_t mecIndex = 0; mecIndex < _longRunComponentDecomposition->size(); ++mecIndex) {
+                    storm::storage::MaximalEndComponent const& mec = (*_longRunComponentDecomposition)[mecIndex];
                     sspMatrixBuilder.newRowGroup(currentSspChoice);
                     for (auto const& stateChoicesPair : mec) {
                         uint64_t const& mecState = stateChoicesPair.first;
@@ -461,7 +532,7 @@ namespace storm {
                     //      a) we take an exit (non-MEC) choice at the given state
                     //      b) we have to take a MEC choice at the given state in a way that eventually an exit state of the MEC is reached
                     uint64_t exitChoiceOffset = sspMatrix.getRowGroupIndices()[numberOfStatesNotInMecs];
-                    for (auto const& mec : *_mecDecomposition) {
+                    for (auto const& mec : *_longRunComponentDecomposition) {
                         // Get the sspState of this MEC (using one representative mec state)
                         auto const& sspState = inputToSspStateMap[mec.begin()->first];
                         uint64_t sspChoiceIndex = sspMatrix.getRowGroupIndices()[sspState] + sspChoices[sspState];
@@ -533,8 +604,11 @@ namespace storm {
                 return result;
             }
             
-            template class SparseNondeterministicInfiniteHorizonHelper<double>;
-            template class SparseNondeterministicInfiniteHorizonHelper<storm::RationalNumber>;
+            template class SparseNondeterministicInfiniteHorizonHelper<double, false>;
+            template class SparseNondeterministicInfiniteHorizonHelper<storm::RationalNumber, false>;
+            
+            //template class SparseNondeterministicInfiniteHorizonHelper<double, true>;
+            //template class SparseNondeterministicInfiniteHorizonHelper<storm::RationalNumber, true>;
         }
     }
 }
\ No newline at end of file
diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h
index 3a63e7946..c11a87a90 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h
@@ -1,23 +1,45 @@
 #pragma once
 #include "storm/modelchecker/helper/SingleValueModelCheckerHelper.h"
 
-#include "storm/storage/SparseMatrix.h"
-#include "storm/storage/MaximalEndComponentDecomposition.h"
-#include "storm/models/sparse/StandardRewardModel.h"
 
 namespace storm {
     class Environment;
     
+    namespace models {
+        namespace sparse {
+            template <VT> class StandardRewardModel;
+        }
+    }
+    namespace storage {
+        template <typename C> class Decomposition<C>;
+        class MaximalEndComponent;
+        template <typename VT> class SparseMatrix;
+        class StronglyConnectedComponent;
+    }
+    
     namespace modelchecker {
         namespace helper {
         
             /*!
              * Helper class for model checking queries that depend on the long run behavior of the (nondeterministic) system.
+             * @tparam ValueType the type a value can have
+             * @tparam Nondeterministic true if there is nondeterminism in the Model (MDP or MA)
              */
-            template <typename ValueType>
+            template <typename ValueType, bool Nondeterministic>
             class SparseNondeterministicInfiniteHorizonHelper : public SingleValueModelCheckerHelper<ValueType> {
 
             public:
+                
+                /*!
+                 * The type of a component in which the system resides in the long run (BSCC for deterministic models, MEC for nondeterministic models)
+                 */
+                using LongRunComponentType = typename std::conditional<Nondeterministic, storm::storage::MaximalEndComponent, storm::storage::StronglyConnectedComponent>::type;
+                
+                /*!
+                 * Function mapping from indices to values
+                 */
+                typedef std::function<ValueType(uint64_t)> ValueGetter;
+                
                 /*!
                  * Initializes the helper for a discrete time (i.e. MDP)
                  */
@@ -36,11 +58,11 @@ namespace storm {
                 void provideBackwardTransitions(storm::storage::SparseMatrix<ValueType> const& backwardsTransitions);
     
                 /*!
-                 * Provides the maximal end component decomposition that can be used during the computation.
-                 * Providing the decomposition is optional. If they are not provided, they will be computed internally
+                 * Provides the decomposition into long run components (BSCCs/MECs) that can be used during the computation.
+                 * Providing the decomposition is optional. If it is not provided, they will be computed internally.
                  * Be aware that this class does not take ownership, i.e. the caller has to make sure that the reference to the decomposition remains valid.
                  */
-                void provideMaximalEndComponentDecomposition(storm::storage::MaximalEndComponentDecomposition<ValueType> const& decomposition);
+                void provideLongRunComponentDecomposition(storm::storage::Decomposition<ComponentType> const& decomposition);
                 
                 /*!
                  * Computes the long run average probabilities, i.e., the fraction of the time we are in a psiState
@@ -55,16 +77,20 @@ namespace storm {
                 std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, storm::models::sparse::StandardRewardModel<ValueType> const& rewardModel);
                 
                 /*!
-                 * Computes the long run average value given the provided action-based rewards
+                 * Computes the long run average value given the provided state and action-based rewards.
+                 * @param stateValues a vector containing a value for every state
+                 * @param actionValues a vector containing a value for every choice
                  * @return a value for each state
                  */
                 std::vector<ValueType> computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const* stateValues = nullptr, std::vector<ValueType> const* actionValues = nullptr);
                 
                 /*!
-                 * Computes the long run average value given the provided state-action-based rewards
+                 * Computes the long run average value given the provided state and action based rewards
+                 * @param stateValuesGetter a function returning a value for a given state index
+                 * @param actionValuesGetter a function returning a value for a given (global) choice index
                  * @return a value for each state
                  */
-                std::vector<ValueType> computeLongRunAverageValues(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateValuesGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionValuesGetter);
+                std::vector<ValueType> computeLongRunAverageValues(Environment const& env, ValueGetter const& stateValuesGetter,  sValueGetter const& actionValuesGetter);
                 
                 /*!
                  * @pre before calling this, a computation call should have been performed during which scheduler production was enabled.
@@ -84,6 +110,17 @@ namespace storm {
                  */
                 storm::storage::Scheduler<ValueType> extractScheduler() const;
 
+                /*!
+                 * @param stateValuesGetter a function returning a value for a given state index
+                 * @param actionValuesGetter a function returning a value for a given (global) choice index
+                 * @return the (unique) optimal LRA value for the given component.
+                 * @post if scheduler production is enabled and Nondeterministic is true, getProducedOptimalChoices() contains choices for the states of the given component which yield the returned LRA value. Choices for states outside of the component are not affected.
+                 */
+                template < typename = typename std::enable_if< true >::type >
+                ValueType computeLraForComponent(Environment const& env, ValueGetter const& stateValuesGetter,  ValueGetter const& actionValuesGetter, LongRunComponentType const& component);
+                template < typename = typename std::enable_if< false >::type >
+                ValueType computeLraForComponent(Environment const& env, ValueGetter const& stateValuesGetter,  ValueGetter const& actionValuesGetter, LongRunComponentType const& component);
+                
             protected:
                 
                 /*!
@@ -92,21 +129,21 @@ namespace storm {
                 bool isContinuousTime() const;
                 
                 /*!
-                 * @pre if scheduler production is enabled, the _producedOptimalChoices vector should be initialized and sufficiently large
-                 * @return the (unique) optimal LRA value for the given mec.
-                 * @post _producedOptimalChoices contains choices for the states of the given MEC which yield the returned LRA value.
+                 * Checks if the component can trivially be solved without much overhead.
+                 * @return either true and the (unique) optimal LRA value for the given component or false and an arbitrary value
+                 * @post if scheduler production is enabled and Nondeterministic is true, getProducedOptimalChoices() contains choices for the states of the given component which yield the returned LRA value. Choices for states outside of the component are not affected.
                  */
-                ValueType computeLraForMec(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateValuesGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec);
+                std::pair<bool, ValueType> computeLraForTrivialComponent(Environment const& env, ValueGetter const& stateValuesGetter,  ValueGetter const& actionValuesGetter, LongRunComponentType const& component);
                 
                 /*!
                  * As computeLraForMec but uses value iteration as a solution method (independent of what is set in env)
                  */
-                ValueType computeLraForMecVi(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateValuesGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec);
+                ValueType computeLraForMecVi(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, LongRunComponentType const& mec);
                 /*!
                  * As computeLraForMec but uses linear programming as a solution method (independent of what is set in env)
                  * @see Guck et al.: Modelling and Analysis of Markov Reward Automata (ATVA'14), https://doi.org/10.1007/978-3-319-11936-6_13
                  */
-                ValueType computeLraForMecLp(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateValuesGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec);
+                ValueType computeLraForMecLp(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, LongRunComponentType const& mec);
                 
                 /*!
                  * @return Lra values for each state
@@ -116,9 +153,9 @@ namespace storm {
             private:
                 storm::storage::SparseMatrix<ValueType> const& _transitionMatrix;
                 storm::storage::SparseMatrix<ValueType> const* _backwardTransitions;
-                storm::storage::SparseMatrix<ValueType> _computedBackwardTransitions;
-                storm::storage::MaximalEndComponentDecomposition<ValueType> const* _mecDecomposition;
-                storm::storage::MaximalEndComponentDecomposition<ValueType> _computedMecDecomposition;
+                std::unique_ptr<storm::storage::SparseMatrix<ValueType>> _computedBackwardTransitions;
+                storm::storage::Decomposition<LongRunComponentType> const* _longRunComponentDecomposition;
+                std::unique_ptr<storm::storage::Decomposition<LongRunComponentType>> _computedLongRunComponentDecomposition;
                 storm::storage::BitVector const* _markovianStates;
                 std::vector<ValueType> const* _exitRates;
                 boost::optional<std::vector<uint64_t>> _producedOptimalChoices;

From 05d2af2bfd3f247195f0869c27ac04b4ced87854 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Mon, 10 Aug 2020 18:34:00 +0200
Subject: [PATCH 27/48] Fixing destructors of model checker helpers.

---
 src/storm/modelchecker/helper/ModelCheckerHelper.h            | 2 +-
 src/storm/modelchecker/helper/SingleValueModelCheckerHelper.h | 1 -
 2 files changed, 1 insertion(+), 2 deletions(-)

diff --git a/src/storm/modelchecker/helper/ModelCheckerHelper.h b/src/storm/modelchecker/helper/ModelCheckerHelper.h
index 02ee35531..bdf708d07 100644
--- a/src/storm/modelchecker/helper/ModelCheckerHelper.h
+++ b/src/storm/modelchecker/helper/ModelCheckerHelper.h
@@ -23,7 +23,7 @@ namespace storm {
                 typedef VT ValueType;
 
                 ModelCheckerHelper() = default;
-                ~ModelCheckerHelper() = default;
+                virtual ~ModelCheckerHelper() = default;
                 
                 /*!
                  * Identifies a subset of the model states
diff --git a/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.h b/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.h
index 82184ad12..2b7776c3d 100644
--- a/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.h
+++ b/src/storm/modelchecker/helper/SingleValueModelCheckerHelper.h
@@ -19,7 +19,6 @@ namespace storm {
             public:
 
                 SingleValueModelCheckerHelper();
-                ~SingleValueModelCheckerHelper() = default;
                 
                 /*!
                  * Sets the optimization direction, i.e., whether we want to minimize or maximize the value for each state

From f316bb9d38f2667ae060d31c49d60acd79ce9950 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Mon, 10 Aug 2020 18:35:12 +0200
Subject: [PATCH 28/48] Added missing include.

---
 .../HybridNondeterministicInfiniteHorizonHelper.cpp             | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.cpp
index b9868f5a9..fa28e486a 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.cpp
+++ b/src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.cpp
@@ -3,6 +3,8 @@
 #include "storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h"
 #include "storm/modelchecker/helper/utility/SetInformationFromOtherHelper.h"
 
+#include "storm/storage/SparseMatrix.h"
+
 #include "storm/utility/macros.h"
 
 #include "storm/exceptions/NotSupportedException.h"

From 572e7ace9de535cce6d251781296d6ef33697710 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Mon, 10 Aug 2020 18:35:58 +0200
Subject: [PATCH 29/48] Moving some includes to the header file

---
 .../helper/infinitehorizon/internal/LraViHelper.cpp | 13 +++++--------
 .../helper/infinitehorizon/internal/LraViHelper.h   |  8 +++-----
 2 files changed, 8 insertions(+), 13 deletions(-)

diff --git a/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
index 3e4dfaa69..2b76f9eee 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
+++ b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
@@ -2,9 +2,6 @@
 
 #include "storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h"
 
-#include "storm/solver/LinearEquationSolver.h"
-#include "storm/solver/MinMaxLinearEquationSolver.h"
-#include "storm/solver/Multiplier.h"
 
 #include "storm/storage/MaximalEndComponent.h"
 #include "storm/storage/StronglyConnectedComponent.h"
@@ -97,7 +94,7 @@ namespace storm {
                             }
                             ValueType selfLoopProb = storm::utility::one<ValueType>() - uniformizationFactor;
                             uint64_t selfLoopColumn = toSubModelStateMapping[componentState];
-                            for (auto componentChoiceIt = getComponentChoicesBegin(element); componentChoiceIt != getComponentChoicesEnd(element); ++componentChoiceIt) {
+                            for (auto componentChoiceIt = getComponentElementChoicesBegin(element); componentChoiceIt != getComponentElementChoicesEnd(element); ++componentChoiceIt) {
                                 bool insertedDiagElement = false;
                                 for (auto const& entry : this->_transitionMatrix.getRow(*componentChoiceIt)) {
                                     uint64_t subModelColumn = toSubModelStateMapping[entry.getColumn()];
@@ -135,7 +132,7 @@ namespace storm {
                                 isTransitionsBuilder.newRowGroup(currIsRow);
                                 isToTsTransitionsBuilder.newRowGroup(currIsRow);
                             }
-                            for (auto componentChoiceIt = getComponentChoicesBegin(element); componentChoiceIt != getComponentChoicesEnd(element); ++componentChoiceIt) {
+                            for (auto componentChoiceIt = getComponentElementChoicesBegin(element); componentChoiceIt != getComponentElementChoicesEnd(element); ++componentChoiceIt) {
                                 for (auto const& entry : this->_transitionMatrix.getRow(*componentChoiceIt)) {
                                     uint64_t subModelColumn = toSubModelStateMapping[entry.getColumn()];
                                     if (isTimedState(entry.getColumn())) {
@@ -226,12 +223,12 @@ namespace storm {
                             if (exitRates) {
                                 actionRewardScalingFactor = (*exitRates)[componentState] / _uniformizationRate;
                             }
-                            for (auto componentChoiceIt = getComponentChoicesBegin(element); componentChoiceIt != getComponentChoicesEnd(element); ++componentChoiceIt) {
+                            for (auto componentChoiceIt = getComponentElementChoicesBegin(element); componentChoiceIt != getComponentElementChoicesEnd(element); ++componentChoiceIt) {
                                 // Compute the values obtained for this choice.
                                 _TsChoiceValues.push_back(stateValueGetter(componentState) / _uniformizationRate + actionValueGetter(*componentChoiceIt) * actionRewardScalingFactor);
                             }
                         } else {
-                            for (auto componentChoiceIt = getComponentChoicesBegin(element); componentChoiceIt != getComponentChoicesEnd(element); ++componentChoiceIt) {
+                            for (auto componentChoiceIt = getComponentElementChoicesBegin(element); componentChoiceIt != getComponentElementChoicesEnd(element); ++componentChoiceIt) {
                                 // Compute the values obtained for this choice.
                                 // State values do not count here since no time passes in instant states.
                                 _IsChoiceValues.push_back(actionValueGetter(*componentChoiceIt));
@@ -329,7 +326,7 @@ namespace storm {
                         } else {
                             uint64_t choice = localMecChoices[localState];
                             STORM_LOG_ASSERT(choice < getComponentElementChoiceCount(element), "The selected choice does not seem to exist.");
-                            uint64_t globalChoiceIndex = *(getComponentChoicesBegin(element) + choice);
+                            uint64_t globalChoiceIndex = *(getComponentElementChoicesBegin(element) + choice);
                             choices[elementState] = globalChoiceIndex - _transitionMatrix.getRowGroupIndices()[elementState];
                             ++localState;
                         }
diff --git a/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h
index 3fb5066c4..7cb62e9a6 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h
+++ b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h
@@ -2,15 +2,13 @@
 
 
 #include "storm/storage/SparseMatrix.h"
+#include "storm/solver/LinearEquationSolver.h"
+#include "storm/solver/MinMaxLinearEquationSolver.h"
+#include "storm/solver/Multiplier.h"
 
 namespace storm {
     class Environment;
     
-    namespace solver {
-        template<typename ValueType> class LinearEquationSolver;
-        template<typename ValueType> class MinMaxLinearEquationSolver;
-        template<typename ValueType> class Multiplier;
-    }
     
     namespace modelchecker {
         namespace helper {

From f145aa2c948d07bc8c04d944d57300bc0e65c773 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Mon, 10 Aug 2020 18:37:45 +0200
Subject: [PATCH 30/48] Adding includes for component utility. Making functions
 inline.

---
 .../internal/ComponentUtility.h               | 21 ++++++++++++-------
 1 file changed, 13 insertions(+), 8 deletions(-)

diff --git a/src/storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h b/src/storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h
index 221dc0430..2a9d62415 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h
+++ b/src/storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h
@@ -1,5 +1,8 @@
 #pragma once
 
+#include "storm/storage/StronglyConnectedComponent.h"
+#include "storm/storage/MaximalEndComponent.h"
+
 namespace storm {
     namespace modelchecker {
         namespace helper {
@@ -7,15 +10,17 @@ namespace storm {
                 
                 /// Auxiliary functions that deal with the different kinds of components (MECs on potentially nondeterministic models and BSCCs on deterministic models)
                 // BSCCS:
-                uint64_t inline getComponentElementState(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return element; }
-                uint64_t inline getComponentElementChoiceCount(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return 1; } // Assumes deterministic model!
-                uint64_t inline const* getComponentChoicesBegin(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return &element; }
-                uint64_t inline const* getComponentChoicesEnd(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return &element + 1; }
+                inline uint64_t getComponentElementState(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return element; }
+                inline uint64_t getComponentElementChoiceCount(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return 1; } // Assumes deterministic model!
+                inline uint64_t const* getComponentElementChoicesBegin(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return &element; }
+                inline uint64_t const* getComponentElementChoicesEnd(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return &element + 1; }
+                inline bool componentElementChoicesContains(typename storm::storage::StronglyConnectedComponent::value_type const& element, uint64_t choice) { return element == choice; }
                 // MECS:
-                uint64_t inline getComponentElementState(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.first; }
-                uint64_t inline getComponentElementChoiceCount(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.second.size(); }
-                typename storm::storage::MaximalEndComponent::set_type::const_iterator inline getComponentChoicesBegin(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.second.begin(); }
-                typename storm::storage::MaximalEndComponent::set_type::const_iterator inline getComponentChoicesEnd(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.second.end(); }
+                inline uint64_t getComponentElementState(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.first; }
+                inline uint64_t getComponentElementChoiceCount(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.second.size(); }
+                inline typename storm::storage::MaximalEndComponent::set_type::const_iterator getComponentElementChoicesBegin(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.second.begin(); }
+                inline typename storm::storage::MaximalEndComponent::set_type::const_iterator getComponentElementChoicesEnd(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.second.end(); }
+                inline bool componentElementChoicesContains(storm::storage::MaximalEndComponent::map_type::value_type const& element, uint64_t choice) { return element.second.contains(choice); }
             }
         }
     }

From 6ecbf113b377f399b62c3fcfcef80225ea0a1635 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Mon, 10 Aug 2020 18:38:14 +0200
Subject: [PATCH 31/48] Adding template instantiation for deterministic LRA VI

---
 .../helper/infinitehorizon/internal/LraViHelper.cpp            | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
index 2b76f9eee..3f6825a15 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
+++ b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
@@ -500,6 +500,9 @@ namespace storm {
                 template class LraViHelper<double, storm::storage::MaximalEndComponent, LraViTransitionsType::DetTsNondetIs>;
                 template class LraViHelper<storm::RationalNumber, storm::storage::MaximalEndComponent, LraViTransitionsType::DetTsNondetIs>;
                 
+                template class LraViHelper<double, storm::storage::StronglyConnectedComponent, LraViTransitionsType::DetTsNoIs>;
+                template class LraViHelper<storm::RationalNumber, storm::storage::StronglyConnectedComponent, LraViTransitionsType::DetTsNoIs>;
+                
             }
         }
     }

From 0cc2b1c7490020daea19fbdb0709a1ab10b23d52 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Mon, 10 Aug 2020 18:38:45 +0200
Subject: [PATCH 32/48] First version of sparse infinite horizon helpers for
 deterministic and nondeterministic models.

---
 ...arseDeterministicInfiniteHorizonHelper.cpp | 209 +++++++
 ...SparseDeterministicInfiniteHorizonHelper.h |  65 ++
 .../SparseInfiniteHorizonHelper.cpp           | 161 +++++
 .../SparseInfiniteHorizonHelper.h             | 140 +++++
 ...eNondeterministicInfiniteHorizonHelper.cpp | 564 +++++++-----------
 ...rseNondeterministicInfiniteHorizonHelper.h | 116 +---
 6 files changed, 814 insertions(+), 441 deletions(-)
 create mode 100644 src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.cpp
 create mode 100644 src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.h
 create mode 100644 src/storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.cpp
 create mode 100644 src/storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.h

diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.cpp
new file mode 100644
index 000000000..d6dc8eb3c
--- /dev/null
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.cpp
@@ -0,0 +1,209 @@
+#include "SparseDeterministicInfiniteHorizonHelper.h"
+
+#include "storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h"
+#include "storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h"
+
+
+#include "storm/storage/SparseMatrix.h"
+#include "storm/storage/StronglyConnectedComponentDecomposition.h"
+#include "storm/storage/Scheduler.h"
+
+#include "storm/solver/LinearEquationSolver.h"
+#include "storm/solver/Multiplier.h"
+#include "storm/solver/LpSolver.h"
+
+#include "storm/utility/SignalHandler.h"
+#include "storm/utility/solver.h"
+#include "storm/utility/vector.h"
+
+#include "storm/environment/solver/LongRunAverageSolverEnvironment.h"
+#include "storm/environment/solver/MinMaxSolverEnvironment.h"
+
+#include "storm/exceptions/UnmetRequirementException.h"
+
+namespace storm {
+    namespace modelchecker {
+        namespace helper {
+        
+            template <typename ValueType>
+            SparseDeterministicInfiniteHorizonHelper<ValueType>::SparseDeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix) : SparseInfiniteHorizonHelper<ValueType, false>(transitionMatrix) {
+                // Intentionally left empty.
+            }
+            
+            template <typename ValueType>
+            SparseDeterministicInfiniteHorizonHelper<ValueType>::SparseDeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRates) : SparseInfiniteHorizonHelper<ValueType, false>(transitionMatrix, exitRates) {
+                // For the CTMC case we assert that the caller actually provided the probabilistic transitions
+                STORM_LOG_ASSERT(this->_transitionMatrix.isProbabilistic(), "Non-probabilistic transitions");
+            }
+            
+            template <typename ValueType>
+            void SparseDeterministicInfiniteHorizonHelper<ValueType>::createDecomposition() {
+                if (this->_longRunComponentDecomposition == nullptr) {
+                    // The decomposition has not been provided or computed, yet.
+                    this->_computedLongRunComponentDecomposition = std::make_unique<storm::storage::StronglyConnectedComponentDecomposition<ValueType>>(this->_transitionMatrix, storm::storage::StronglyConnectedComponentDecompositionOptions().onlyBottomSccs());
+                    this->_longRunComponentDecomposition = this->_computedLongRunComponentDecomposition.get();
+                }
+            }
+
+            template <typename ValueType>
+            ValueType SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForComponent(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::StronglyConnectedComponent const& component) {
+                // For deterministic models, we compute the LRA for a BSCC
+                
+                STORM_LOG_ASSERT(!this->isProduceSchedulerSet(), "Scheduler production enabled for deterministic model.");
+                
+                auto trivialResult = computeLraForTrivialBscc(env, stateRewardsGetter, actionRewardsGetter, component);
+                if (trivialResult.first) {
+                    return trivialResult.second;
+                }
+                
+                // Solve nontrivial BSCC with the method specified  in the settings
+                storm::solver::LraMethod method = env.solver().lra().getDetLraMethod();
+                if ((storm::NumberTraits<ValueType>::IsExact || env.solver().isForceExact()) && env.solver().lra().isDetLraMethodSetFromDefault() && method == storm::solver::LraMethod::ValueIteration) {
+                    method = storm::solver::LraMethod::GainBiasEquations;
+                    STORM_LOG_INFO("Selecting " << storm::solver::toString(method) << " as the solution technique for long-run properties to guarantee exact results. If you want to override this, please explicitly specify a different LRA method.");
+                } else if (env.solver().isForceSoundness() && env.solver().lra().isDetLraMethodSetFromDefault() && method != storm::solver::LraMethod::ValueIteration) {
+                    method = storm::solver::LraMethod::ValueIteration;
+                    STORM_LOG_INFO("Selecting " << storm::solver::toString(method) << " as the solution technique for long-run properties to guarantee sound results. If you want to override this, please explicitly specify a different LRA method.");
+                }
+                STORM_LOG_TRACE("Computing LRA for BSCC of size " << component.size() << " using '" << storm::solver::toString(method) << "'.");
+                if (method == storm::solver::LraMethod::ValueIteration) {
+                    return computeLraForBsccVi(env, stateRewardsGetter, actionRewardsGetter, component);
+                }/* else if (method == storm::solver::LraMethod::LraDistributionEquations) {
+                    // We only need the first element of the pair as the lra distribution is not relevant at this point.
+                    return computeLongRunAveragesForBsccLraDistr<ValueType>(env, bscc, rateMatrix, valueGetter, exitRateVector).first;
+                }
+                STORM_LOG_WARN_COND(method == storm::solver::LraMethod::GainBiasEquations, "Unsupported lra method selected. Defaulting to " << storm::solver::toString(storm::solver::LraMethod::GainBiasEquations) << ".");
+                // We don't need the bias values
+                return computeLongRunAveragesForBsccGainBias<ValueType>(env, bscc, rateMatrix, valueGetter, exitRateVector).first;*/
+            }
+            
+            template <typename ValueType>
+            std::pair<bool, ValueType> SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForTrivialBscc(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::StronglyConnectedComponent const& component) {
+                
+                // For deterministic models, we can catch the case where all values are the same. This includes the special case where the BSCC consist only of just one state.
+                bool first = true;
+                ValueType val = storm::utility::zero<ValueType>();
+                for (auto const& element : component) {
+                    auto state = internal::getComponentElementState(element);
+                    STORM_LOG_ASSERT(state == *internal::getComponentElementChoicesBegin(element), "Unexpected choice index at state " << state << " of deterministic model.");
+                    ValueType curr = stateRewardsGetter(state) + (this->isContinuousTime() ? (*this->_exitRates)[state] * actionRewardsGetter(state) : actionRewardsGetter(state));
+                    if (first) {
+                        first = false;
+                    } else if (val != curr) {
+                        return {false, storm::utility::zero<ValueType>()};
+                    }
+                }
+                // All values are the same
+                return {true, val};
+            }
+            
+    
+            template <typename ValueType>
+            ValueType SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForBsccVi(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::StronglyConnectedComponent const& bscc) {
+
+                // Collect parameters of the computation
+                ValueType aperiodicFactor = storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor());
+                
+                // Now create a helper and perform the algorithm
+                if (this->isContinuousTime()) {
+                    // We assume a CTMC (with deterministic timed states and no instant states)
+                    storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::StronglyConnectedComponent, storm::modelchecker::helper::internal::LraViTransitionsType::DetTsNoIs> viHelper(bscc, this->_transitionMatrix, aperiodicFactor, this->_markovianStates, this->_exitRates);
+                    return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter, this->_exitRates);
+                } else {
+                    // We assume a DTMC (with deterministic timed states and no instant states)
+                    storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::StronglyConnectedComponent, storm::modelchecker::helper::internal::LraViTransitionsType::DetTsNoIs> viHelper(bscc, this->_transitionMatrix, aperiodicFactor);
+                    return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter);
+                }
+            }
+            
+            template <typename ValueType>
+            std::pair<storm::storage::SparseMatrix<ValueType>, std::vector<ValueType>> SparseDeterministicInfiniteHorizonHelper<ValueType>::buildSspMatrixVector(std::vector<ValueType> const& bsccLraValues, std::vector<uint64_t> const& inputStateToBsccIndexMap, storm::storage::BitVector const& statesNotInComponent, bool asEquationSystem) {
+                
+                // Create SSP Matrix.
+                // In contrast to the version for nondeterministic models, we eliminate the auxiliary states representing each BSCC on the fly
+                
+                // Probability mass that would lead to a BSCC will be considered in the rhs of the equation system
+                auto sspMatrix = this->_transitionMatrix.getSubmatrix(false, statesNotInComponent, statesNotInComponent, asEquationSystem);
+                if (asEquationSystem) {
+                    sspMatrix.convertToEquationSystem();
+                }
+                
+                // Create the SSP right-hand-side
+                std::vector<ValueType> rhs;
+                rhs.reserve(sspMatrix.getRowCount());
+                for (auto const& state : statesNotInComponent) {
+                    ValueType stateValue = storm::utility::zero<ValueType>();
+                    for (auto const& transition : this->_transitionMatrix.getRow(state)) {
+                        if (!statesNotInComponent.get(transition.getColumn())) {
+                            // This transition leads to a BSCC!
+                            stateValue += transition.getValue() * bsccLraValues[inputStateToBsccIndexMap[transition.getColumn()]];
+                        }
+                    }
+                    rhs.push_back(std::move(stateValue));
+                }
+                
+                return std::make_pair(std::move(sspMatrix), std::move(rhs));
+            }
+            
+            template <typename ValueType>
+            std::vector<ValueType> SparseDeterministicInfiniteHorizonHelper<ValueType>::buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& componentLraValues) {
+                STORM_LOG_ASSERT(this->_longRunComponentDecomposition != nullptr, "Decomposition not computed, yet.");
+                
+                // For fast transition rewriting, we build a mapping from the input state indices to the state indices of a new transition matrix
+                // which redirects all transitions leading to a former BSCC state to a new (imaginary) auxiliary state.
+                // Each auxiliary state gets assigned the value of that BSCC and we compute expected rewards (aka stochastic shortest path, SSP) on that new system.
+                // For efficiency reasons, we actually build the system where the auxiliary states are already eliminated.
+                
+                // First gather the states that are part of a component
+                // and create a mapping from states that lie in a component to the corresponding component index.
+                storm::storage::BitVector statesInComponents(this->_transitionMatrix.getRowGroupCount());
+                std::vector<uint64_t> stateIndexMap(this->_transitionMatrix.getRowGroupCount(), std::numeric_limits<uint64_t>::max());
+                for (uint64_t currentComponentIndex = 0; currentComponentIndex < this->_longRunComponentDecomposition->size(); ++currentComponentIndex) {
+                    for (auto const& element : (*this->_longRunComponentDecomposition)[currentComponentIndex]) {
+                        uint64_t state = internal::getComponentElementState(element);
+                        statesInComponents.set(state);
+                        stateIndexMap[state] = currentComponentIndex;
+                    }
+                }
+                // Map the non-component states to their index in the SSP. Note that the order of these states will be preserved.
+                uint64_t numberOfNonComponentStates = 0;
+                storm::storage::BitVector statesNotInComponent = ~statesInComponents;
+                for (auto const& nonComponentState : statesNotInComponent) {
+                    stateIndexMap[nonComponentState] = numberOfNonComponentStates;
+                    ++numberOfNonComponentStates;
+                }
+                
+                // The next step is to create the equation system solving the SSP (unless the whole system consists of BSCCs)
+                std::vector<ValueType> sspValues;
+                if (numberOfNonComponentStates > 0) {
+                    storm::solver::GeneralLinearEquationSolverFactory<ValueType> linearEquationSolverFactory;
+                    bool isEqSysFormat = linearEquationSolverFactory.getEquationProblemFormat(env) == storm::solver::LinearEquationSolverProblemFormat::EquationSystem;
+                    auto sspMatrixVector = buildSspMatrixVector(componentLraValues, stateIndexMap, statesNotInComponent, isEqSysFormat);
+                    std::unique_ptr<storm::solver::LinearEquationSolver<ValueType>> solver = linearEquationSolverFactory.create(env, sspMatrixVector.first);
+                    auto lowerUpperBounds = std::minmax_element(componentLraValues.begin(), componentLraValues.end());
+                    solver->setBounds(*lowerUpperBounds.first, *lowerUpperBounds.second);
+                    // Check solver requirements
+                    auto requirements = solver->getRequirements(env);
+                    STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
+                    sspValues.assign(sspMatrixVector.first.getRowCount(), (*lowerUpperBounds.first + *lowerUpperBounds.second) / storm::utility::convertNumber<ValueType,uint64_t>(2));
+                    solver->solveEquations(env, sspValues, sspMatrixVector.second);
+                }
+                
+                // Prepare result vector.
+                std::vector<ValueType> result(this->_transitionMatrix.getRowGroupCount());
+                for (uint64_t state = 0; state < stateIndexMap.size(); ++state) {
+                    if (statesNotInComponent.get(state)) {
+                        result[state] = sspValues[stateIndexMap[state]];
+                    } else {
+                        result[state] = componentLraValues[stateIndexMap[state]];
+                    }
+                }
+                return result;
+            }
+            
+            template class SparseDeterministicInfiniteHorizonHelper<double>;
+            template class SparseDeterministicInfiniteHorizonHelper<storm::RationalNumber>;
+            
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.h b/src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.h
new file mode 100644
index 000000000..5c22cc1af
--- /dev/null
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.h
@@ -0,0 +1,65 @@
+#pragma once
+#include "storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.h"
+
+
+namespace storm {
+    
+    namespace modelchecker {
+        namespace helper {
+        
+            /*!
+             * Helper class for model checking queries that depend on the long run behavior of the (nondeterministic) system.
+             * @tparam ValueType the type a value can have
+             */
+            template <typename ValueType>
+            class SparseDeterministicInfiniteHorizonHelper : public SparseInfiniteHorizonHelper<ValueType, false> {
+
+            public:
+                /*!
+                 * Function mapping from indices to values
+                 */
+                typedef typename SparseInfiniteHorizonHelper<ValueType, true>::ValueGetter ValueGetter;
+                
+                /*!
+                 * Initializes the helper for a discrete time model (i.e. DTMC)
+                 */
+                SparseDeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix);
+                
+                /*!
+                 * Initializes the helper for a continuous time model (i.e. CTMC)
+                 * @note The transition matrix shall be probabilistic (i.e. the rows sum up to one)
+                 */
+                SparseDeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRates);
+                
+                /*!
+                 * @param stateValuesGetter a function returning a value for a given state index
+                 * @param actionValuesGetter a function returning a value for a given (global) choice index
+                 * @return the (unique) optimal LRA value for the given component.
+                 * @post if scheduler production is enabled and Nondeterministic is true, getProducedOptimalChoices() contains choices for the states of the given component which yield the returned LRA value. Choices for states outside of the component are not affected.
+                 */
+                virtual ValueType computeLraForComponent(Environment const& env, ValueGetter const& stateValuesGetter,  ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& component) override;
+                
+            protected:
+                
+                virtual void createDecomposition() override;
+                
+                std::pair<bool, ValueType> computeLraForTrivialBscc(Environment const& env, ValueGetter const& stateValuesGetter,  ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& bscc);
+                
+                /*!
+                 * As computeLraForMec but uses value iteration as a solution method (independent of what is set in env)
+                 */
+                ValueType computeLraForBsccVi(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& bscc);
+                
+                std::pair<storm::storage::SparseMatrix<ValueType>, std::vector<ValueType>> buildSspMatrixVector(std::vector<ValueType> const& bsccLraValues, std::vector<uint64_t> const& inputStateToBsccIndexMap, storm::storage::BitVector const& statesNotInComponent, bool asEquationSystem);
+                
+                /*!
+                 * @return Lra values for each state
+                 */
+                virtual std::vector<ValueType> buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues) override;
+            
+            };
+
+        
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.cpp
new file mode 100644
index 000000000..8eab0bca0
--- /dev/null
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.cpp
@@ -0,0 +1,161 @@
+#include "SparseInfiniteHorizonHelper.h"
+
+#include "storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h"
+#include "storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h"
+
+#include "storm/models/sparse/StandardRewardModel.h"
+
+#include "storm/storage/SparseMatrix.h"
+#include "storm/storage/MaximalEndComponentDecomposition.h"
+#include "storm/storage/StronglyConnectedComponentDecomposition.h"
+
+#include "storm/solver/MinMaxLinearEquationSolver.h"
+#include "storm/solver/LinearEquationSolver.h"
+#include "storm/solver/Multiplier.h"
+#include "storm/solver/LpSolver.h"
+
+#include "storm/utility/SignalHandler.h"
+#include "storm/utility/solver.h"
+#include "storm/utility/vector.h"
+
+#include "storm/environment/solver/LongRunAverageSolverEnvironment.h"
+#include "storm/environment/solver/MinMaxSolverEnvironment.h"
+
+#include "storm/exceptions/UnmetRequirementException.h"
+
+namespace storm {
+    namespace modelchecker {
+        namespace helper {
+        
+            template <typename ValueType, bool Nondeterministic>
+            SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix) : _transitionMatrix(transitionMatrix), _markovianStates(nullptr), _exitRates(nullptr), _backwardTransitions(nullptr), _longRunComponentDecomposition(nullptr) {
+                // Intentionally left empty.
+            }
+            
+            template <typename ValueType, bool Nondeterministic>
+            SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates) : _transitionMatrix(transitionMatrix), _markovianStates(&markovianStates), _exitRates(&exitRates), _backwardTransitions(nullptr), _longRunComponentDecomposition(nullptr) {
+                // Intentionally left empty.
+            }
+            
+            template <typename ValueType, bool Nondeterministic>
+            SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRates) : _transitionMatrix(transitionMatrix), _markovianStates(nullptr), _exitRates(&exitRates), _backwardTransitions(nullptr), _longRunComponentDecomposition(nullptr) {
+                // Intentionally left empty.
+            }
+            
+            template <typename ValueType, bool Nondeterministic>
+            void SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::provideBackwardTransitions(storm::storage::SparseMatrix<ValueType> const& backwardTransitions) {
+                STORM_LOG_WARN_COND(_backwardTransitions == nullptr, "Backwards transitions were provided but they were already computed or provided before.");
+                _backwardTransitions = &backwardTransitions;
+            }
+            
+            template <typename ValueType, bool Nondeterministic>
+            void SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::provideLongRunComponentDecomposition(storm::storage::Decomposition<LongRunComponentType> const& decomposition) {
+                STORM_LOG_WARN_COND(_longRunComponentDecomposition == nullptr, "Long Run Component Decomposition was provided but it was already computed or provided before.");
+                _longRunComponentDecomposition = &decomposition;
+            }
+            
+            template <typename ValueType, bool Nondeterministic>
+            std::vector<ValueType> SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageProbabilities(Environment const& env, storm::storage::BitVector const& psiStates) {
+                return computeLongRunAverageValues(env,
+                            [&psiStates] (uint64_t stateIndex) { return psiStates.get(stateIndex) ? storm::utility::one<ValueType>() : storm::utility::zero<ValueType>(); },
+                            [] (uint64_t) { return storm::utility::zero<ValueType>(); }
+                    );
+            }
+            
+            template <typename ValueType, bool Nondeterministic>
+            std::vector<ValueType> SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageRewards(Environment const& env, storm::models::sparse::StandardRewardModel<ValueType> const& rewardModel) {
+                ValueGetter stateRewardsGetter;
+                if (rewardModel.hasStateRewards()) {
+                    stateRewardsGetter = [&rewardModel] (uint64_t stateIndex) { return rewardModel.getStateReward(stateIndex); };
+                } else {
+                    stateRewardsGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
+                }
+                ValueGetter actionRewardsGetter;
+                if (rewardModel.hasStateActionRewards() || rewardModel.hasTransitionRewards()) {
+                    if (rewardModel.hasTransitionRewards()) {
+                        actionRewardsGetter = [&] (uint64_t globalChoiceIndex) { return rewardModel.getStateActionAndTransitionReward(globalChoiceIndex, this->_transitionMatrix); };
+                    } else {
+                        actionRewardsGetter = [&] (uint64_t globalChoiceIndex) { return rewardModel.getStateActionReward(globalChoiceIndex); };
+                    }
+                } else {
+                    stateRewardsGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
+                }
+                
+                return computeLongRunAverageValues(env, stateRewardsGetter, actionRewardsGetter);
+            }
+            
+            template <typename ValueType, bool Nondeterministic>
+            std::vector<ValueType> SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const* stateValues, std::vector<ValueType> const* actionValues) {
+                ValueGetter stateValuesGetter;
+                if (stateValues) {
+                    stateValuesGetter = [&stateValues] (uint64_t stateIndex) { return (*stateValues)[stateIndex]; };
+                } else {
+                    stateValuesGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
+                }
+                ValueGetter actionValuesGetter;
+                if (actionValues) {
+                    actionValuesGetter = [&actionValues] (uint64_t globalChoiceIndex) { return (*actionValues)[globalChoiceIndex]; };
+                } else {
+                    actionValuesGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
+                }
+                
+                return computeLongRunAverageValues(env, stateValuesGetter, actionValuesGetter);
+
+            }
+            
+            
+            template <typename ValueType, bool Nondeterministic>
+            std::vector<ValueType> SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageValues(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter) {
+                // We will compute the long run average value for each MEC individually and then set-up an Equation system to compute the value also at non-mec states.
+                // For a description of this approach see, e.g., Guck et al.: Modelling and Analysis of Markov Reward Automata (ATVA'14), https://doi.org/10.1007/978-3-319-11936-6_13
+                
+                 // Prepare an environment for the underlying solvers
+                auto underlyingSolverEnvironment = env;
+                if (env.solver().isForceSoundness()) {
+                    // For sound computations, the error in the MECS plus the error in the remaining system should not exceed the user defined precsion.
+                    storm::RationalNumber newPrecision = env.solver().lra().getPrecision() / storm::utility::convertNumber<storm::RationalNumber>(2);
+                    underlyingSolverEnvironment.solver().minMax().setPrecision(newPrecision);
+                    underlyingSolverEnvironment.solver().minMax().setRelativeTerminationCriterion(env.solver().lra().getRelativeTerminationCriterion());
+                    underlyingSolverEnvironment.solver().setLinearEquationSolverPrecision(newPrecision, env.solver().lra().getRelativeTerminationCriterion());
+                    underlyingSolverEnvironment.solver().lra().setPrecision(newPrecision);
+                }
+                
+                // If requested, allocate memory for the choices made
+                if (Nondeterministic && this->isProduceSchedulerSet()) {
+                    if (!_producedOptimalChoices.is_initialized()) {
+                        _producedOptimalChoices.emplace();
+                    }
+                    _producedOptimalChoices->resize(_transitionMatrix.getRowGroupCount());
+                }
+                STORM_LOG_ASSERT(Nondeterministic || !this->isProduceSchedulerSet(), "Scheduler production enabled for deterministic model.");
+                
+                // Decompose the model to their bottom components (MECS or BSCCS)
+                createDecomposition();
+                
+                // Compute the long-run average for all components in isolation.
+                std::vector<ValueType> componentLraValues;
+                componentLraValues.reserve(_longRunComponentDecomposition->size());
+                for (auto const& c : *_longRunComponentDecomposition) {
+                    componentLraValues.push_back(computeLraForComponent(underlyingSolverEnvironment, stateRewardsGetter, actionRewardsGetter, c));
+                }
+                
+                // Solve the resulting SSP where end components are collapsed into single auxiliary states
+                return buildAndSolveSsp(underlyingSolverEnvironment, componentLraValues);
+            }
+            
+            template <typename ValueType, bool Nondeterministic>
+            bool SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::isContinuousTime() const {
+                STORM_LOG_ASSERT((_markovianStates == nullptr) || (_exitRates != nullptr), "Inconsistent information given: Have Markovian states but no exit rates." );
+                return _exitRates != nullptr;
+            }
+            
+            template class SparseInfiniteHorizonHelper<double, true>;
+            template class SparseInfiniteHorizonHelper<storm::RationalNumber, true>;
+            template class SparseInfiniteHorizonHelper<storm::RationalFunction, true>;
+            
+            template class SparseInfiniteHorizonHelper<double, false>;
+            template class SparseInfiniteHorizonHelper<storm::RationalNumber, false>;
+            
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.h b/src/storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.h
new file mode 100644
index 000000000..b8ebcef0b
--- /dev/null
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.h
@@ -0,0 +1,140 @@
+#pragma once
+#include "storm/modelchecker/helper/SingleValueModelCheckerHelper.h"
+
+#include "storm/storage/MaximalEndComponent.h"
+#include "storm/storage/StronglyConnectedComponent.h"
+#include "storm/storage/Decomposition.h"
+#include "storm/storage/SparseMatrix.h"
+
+namespace storm {
+    class Environment;
+    
+    namespace models {
+        namespace sparse {
+            template <typename VT> class StandardRewardModel;
+        }
+    }
+    
+    namespace modelchecker {
+        namespace helper {
+        
+            /*!
+             * Helper class for model checking queries that depend on the long run behavior of the (nondeterministic) system.
+             * @tparam ValueType the type a value can have
+             * @tparam Nondeterministic true if there is nondeterminism in the Model (MDP or MA)
+             */
+            template <typename ValueType, bool Nondeterministic>
+            class SparseInfiniteHorizonHelper : public SingleValueModelCheckerHelper<ValueType> {
+
+            public:
+                
+                /*!
+                 * The type of a component in which the system resides in the long run (BSCC for deterministic models, MEC for nondeterministic models)
+                 */
+                using LongRunComponentType = typename std::conditional<Nondeterministic, storm::storage::MaximalEndComponent, storm::storage::StronglyConnectedComponent>::type;
+                
+                /*!
+                 * Function mapping from indices to values
+                 */
+                typedef std::function<ValueType(uint64_t)> ValueGetter;
+                
+                /*!
+                 * Initializes the helper for a discrete time (i.e. DTMC, MDP)
+                 */
+                SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix);
+                
+                /*!
+                 * Initializes the helper for continuous time (i.e. MA)
+                 */
+                SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates);
+
+                /*!
+                 * Initializes the helper for continuous time (i.e. CTMC)
+                 */
+                SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRates);
+                
+                /*!
+                 * Provides backward transitions that can be used during the computation.
+                 * Providing them is optional. If they are not provided, they will be computed internally
+                 * Be aware that this class does not take ownership, i.e. the caller has to make sure that the reference to the backwardstransitions remains valid.
+                 */
+                void provideBackwardTransitions(storm::storage::SparseMatrix<ValueType> const& backwardsTransitions);
+    
+                /*!
+                 * Provides the decomposition into long run components (BSCCs/MECs) that can be used during the computation.
+                 * Providing the decomposition is optional. If it is not provided, they will be computed internally.
+                 * Be aware that this class does not take ownership, i.e. the caller has to make sure that the reference to the decomposition remains valid.
+                 */
+                void provideLongRunComponentDecomposition(storm::storage::Decomposition<LongRunComponentType> const& decomposition);
+                
+                /*!
+                 * Computes the long run average probabilities, i.e., the fraction of the time we are in a psiState
+                 * @return a value for each state
+                 */
+                std::vector<ValueType> computeLongRunAverageProbabilities(Environment const& env, storm::storage::BitVector const& psiStates);
+                
+                /*!
+                 * Computes the long run average rewards, i.e., the average reward collected per time unit
+                 * @return a value for each state
+                 */
+                std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, storm::models::sparse::StandardRewardModel<ValueType> const& rewardModel);
+                
+                /*!
+                 * Computes the long run average value given the provided state and action-based rewards.
+                 * @param stateValues a vector containing a value for every state
+                 * @param actionValues a vector containing a value for every choice
+                 * @return a value for each state
+                 */
+                std::vector<ValueType> computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const* stateValues = nullptr, std::vector<ValueType> const* actionValues = nullptr);
+                
+                /*!
+                 * Computes the long run average value given the provided state and action based rewards
+                 * @param stateValuesGetter a function returning a value for a given state index
+                 * @param actionValuesGetter a function returning a value for a given (global) choice index
+                 * @return a value for each state
+                 */
+                std::vector<ValueType> computeLongRunAverageValues(Environment const& env, ValueGetter const& stateValuesGetter,  ValueGetter const& actionValuesGetter);
+                
+                /*!
+                 * @param stateValuesGetter a function returning a value for a given state index
+                 * @param actionValuesGetter a function returning a value for a given (global) choice index
+                 * @return the (unique) optimal LRA value for the given component.
+                 * @post if scheduler production is enabled and Nondeterministic is true, getProducedOptimalChoices() contains choices for the states of the given component which yield the returned LRA value. Choices for states outside of the component are not affected.
+                 */
+                virtual ValueType computeLraForComponent(Environment const& env, ValueGetter const& stateValuesGetter,  ValueGetter const& actionValuesGetter, LongRunComponentType const& component) = 0;
+                
+            protected:
+                
+                /*!
+                 * @return true iff this is a computation on a continuous time model (i.e. CTMC, MA)
+                 */
+                bool isContinuousTime() const;
+                
+                /*!
+                 * @post _longRunComponentDecomposition points to a decomposition of the long run components (MECs, BSCCs)
+                 */
+                virtual void createDecomposition() = 0;
+                
+                /*!
+                 * @pre if scheduler production is enabled and Nondeterministic is true, a choice for each state within a component must be set such that the choices yield optimal values w.r.t. the individual components.
+                 * @return Lra values for each state
+                 * @post if scheduler production is enabled and Nondeterministic is true, getProducedOptimalChoices() contains choices for all input model states which yield the returned LRA values.
+                 */
+                virtual std::vector<ValueType> buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues) = 0;
+            
+                storm::storage::SparseMatrix<ValueType> const& _transitionMatrix;
+                storm::storage::BitVector const* _markovianStates;
+                std::vector<ValueType> const* _exitRates;
+                
+                storm::storage::SparseMatrix<ValueType> const* _backwardTransitions;
+                storm::storage::Decomposition<LongRunComponentType> const* _longRunComponentDecomposition;
+                std::unique_ptr<storm::storage::SparseMatrix<ValueType>> _computedBackwardTransitions;
+                std::unique_ptr<storm::storage::Decomposition<LongRunComponentType>> _computedLongRunComponentDecomposition;
+                
+                boost::optional<std::vector<uint64_t>> _producedOptimalChoices;
+            };
+
+        
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
index 5cb86d83e..4dcd8088f 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
@@ -3,18 +3,14 @@
 #include "storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h"
 #include "storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h"
 
-#include "storm/models/sparse/StandardRewardModel.h"
-
 #include "storm/storage/SparseMatrix.h"
 #include "storm/storage/MaximalEndComponentDecomposition.h"
-#include "storm/storage/StronglyConnectedComponentDecomposition.h"
+#include "storm/storage/Scheduler.h"
 
 #include "storm/solver/MinMaxLinearEquationSolver.h"
-#include "storm/solver/LinearEquationSolver.h"
 #include "storm/solver/Multiplier.h"
 #include "storm/solver/LpSolver.h"
 
-#include "storm/utility/SignalHandler.h"
 #include "storm/utility/solver.h"
 #include "storm/utility/vector.h"
 
@@ -27,146 +23,32 @@ namespace storm {
     namespace modelchecker {
         namespace helper {
         
-            template <typename ValueType, bool Nondeterministic>
-            SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix) : _transitionMatrix(transitionMatrix), _backwardTransitions(nullptr), _longRunComponentDecomposition(nullptr), _markovianStates(nullptr), _exitRates(nullptr) {
+            template <typename ValueType>
+            SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix) : SparseInfiniteHorizonHelper<ValueType, true>(transitionMatrix) {
                 // Intentionally left empty.
             }
             
-            template <typename ValueType, bool Nondeterministic>
-            SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates) : _transitionMatrix(transitionMatrix), _backwardTransitions(nullptr), _longRunComponentDecomposition(nullptr), _markovianStates(&markovianStates), _exitRates(&exitRates) {
+            template <typename ValueType>
+            SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates) : SparseInfiniteHorizonHelper<ValueType, true>(transitionMatrix, markovianStates, exitRates) {
                 // Intentionally left empty.
             }
             
-            template <typename ValueType, bool Nondeterministic>
-            void SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::provideBackwardTransitions(storm::storage::SparseMatrix<ValueType> const& backwardTransitions) {
-                STORM_LOG_WARN_COND(_backwardTransitions == nullptr, "Backwards transitions were provided but they were already computed or provided before.");
-                _backwardTransitions = &backwardTransitions;
-            }
-            
-            template <typename ValueType, bool Nondeterministic>
-            void SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::provideLongRunComponentDecomposition(storm::storage::Decomposition<LongRunComponent> const& decomposition) {
-                STORM_LOG_WARN_COND(_longRunComponentDecomposition == nullptr, "Long Run Component Decomposition was provided but it was already computed or provided before.");
-                _longRunComponentDecomposition = &decomposition;
-            }
-            
-            template <typename ValueType, bool Nondeterministic>
-            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageProbabilities(Environment const& env, storm::storage::BitVector const& psiStates) {
-                return computeLongRunAverageValues(env,
-                            [&psiStates] (uint64_t stateIndex) { return psiStates.get(stateIndex) ? storm::utility::one<ValueType>() : storm::utility::zero<ValueType>(); },
-                            [] (uint64_t) { return storm::utility::zero<ValueType>(); }
-                    );
-            }
-            
-            template <typename ValueType, bool Nondeterministic>
-            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageRewards(Environment const& env, storm::models::sparse::StandardRewardModel<ValueType> const& rewardModel) {
-                std::function<ValueType(uint64_t stateIndex)> stateRewardsGetter;
-                if (rewardModel.hasStateRewards()) {
-                    stateRewardsGetter = [&rewardModel] (uint64_t stateIndex) { return rewardModel.getStateReward(stateIndex); };
-                } else {
-                    stateRewardsGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
-                }
-                std::function<ValueType(uint64_t globalChoiceIndex)> actionRewardsGetter;
-                if (rewardModel.hasStateActionRewards() || rewardModel.hasTransitionRewards()) {
-                    if (rewardModel.hasTransitionRewards()) {
-                        actionRewardsGetter = [&] (uint64_t globalChoiceIndex) { return rewardModel.getStateActionAndTransitionReward(globalChoiceIndex, this->_transitionMatrix); };
-                    } else {
-                        actionRewardsGetter = [&] (uint64_t globalChoiceIndex) { return rewardModel.getStateActionReward(globalChoiceIndex); };
-                    }
-                } else {
-                    stateRewardsGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
-                }
-                
-                return computeLongRunAverageValues(env, stateRewardsGetter, actionRewardsGetter);
-            }
-            
-            template <typename ValueType, bool Nondeterministic>
-            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const* stateValues, std::vector<ValueType> const* actionValues) {
-                std::function<ValueType(uint64_t stateIndex)> stateValuesGetter;
-                if (stateValues) {
-                    stateValuesGetter = [&stateValues] (uint64_t stateIndex) { return (*stateValues)[stateIndex]; };
-                } else {
-                    stateValuesGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
-                }
-                std::function<ValueType(uint64_t actionIndex)> actionValuesGetter;
-                if (actionValues) {
-                    actionValuesGetter = [&actionValues] (uint64_t globalChoiceIndex) { return (*actionValues)[globalChoiceIndex]; };
-                } else {
-                    actionValuesGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
-                }
-                
-                return computeLongRunAverageValues(env, stateValuesGetter, actionValuesGetter);
-
-            }
-            
-            template <typename ValueType, bool Nondeterministic>
-            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageValues(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter) {
-                // We will compute the long run average value for each MEC individually and then set-up a MinMax Equation system to compute the value also at non-mec states.
-                // For a description of this approach see, e.g., Guck et al.: Modelling and Analysis of Markov Reward Automata (ATVA'14), https://doi.org/10.1007/978-3-319-11936-6_13
-                
-                 // Prepare an environment for the underlying solvers
-                auto underlyingSolverEnvironment = env;
-                if (env.solver().isForceSoundness()) {
-                    // For sound computations, the error in the MECS plus the error in the remaining system should not exceed the user defined precsion.
-                    storm::RationalNumber newPrecision = env.solver().lra().getPrecision() / storm::utility::convertNumber<storm::RationalNumber>(2);
-                    underlyingSolverEnvironment.solver().minMax().setPrecision(newPrecision);
-                    underlyingSolverEnvironment.solver().minMax().setRelativeTerminationCriterion(env.solver().lra().getRelativeTerminationCriterion());
-                    underlyingSolverEnvironment.solver().setLinearEquationSolverPrecision(newPrecision, env.solver().lra().getRelativeTerminationCriterion());
-                    underlyingSolverEnvironment.solver().lra().setPrecision(newPrecision);
-                }
-                
-                // If requested, allocate memory for the choices made
-                if (Nondeterministic && this->isProduceSchedulerSet()) {
-                    if (!_producedOptimalChoices.is_initialized()) {
-                        _producedOptimalChoices.emplace();
-                    }
-                    _producedOptimalChoices->resize(_transitionMatrix.getRowGroupCount());
-                }
-                STORM_LOG_ASSERT(Nondeterministic || !this->isProduceSchedulerSet(), "Scheduler production enabled for deterministic model.");
-                
-                // Start by decomposing the Model into its MECs.
-                if (_longRunComponentDecomposition == nullptr) {
-                    // The decomposition has not been provided or computed, yet.
-                    if (Nondeterministic) {
-                        if (_backwardTransitions == nullptr) {
-                            _computedBackwardTransitions = std::make_unique<storm::storage::SparseMatrix>(_transitionMatrix.transpose(true));
-                            _backwardTransitions = _computedBackwardTransitions.get();
-                        }
-                        _computedLongRunComponentDecomposition = std::make_unique<storm::storage::MaximalEndComponentDecomposition<ValueType>(_transitionMatrix, *_backwardTransitions);
-                    } else {
-                        _computedLongRunComponentDecomposition = std::make_unique<storm::storage::StronglyConnectedComponentDecomposition<ValueType>(_transitionMatrix, storm::storage::StronglyConnectedComponentDecompositionOptions().onlyBottomSccs());
-                    }
-                    _longRunComponentDecomposition = _computedLongRunComponentDecomposition.get();
-                }
-
-                // Compute the long-run average for all components in isolation.
-                std::vector<ValueType> componentLraValues;
-                mecLraValues.reserve(_longRunComponentDecomposition->size());
-                for (auto const& c : *_longRunComponentDecomposition) {
-                    componentLraValues.push_back(computeLraForComponent(underlyingSolverEnvironment, stateRewardsGetter, actionRewardsGetter, c));
-                }
-                
-                // Solve the resulting SSP where end components are collapsed into single auxiliary states
-                return buildAndSolveSsp(underlyingSolverEnvironment, componentLraValues);
-            }
-            
-            template <typename ValueType, bool Nondeterministic>
-            std::vector<uint64_t> const& SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::getProducedOptimalChoices() const {
-                STORM_LOG_WARN_COND(Nondeterministic, "Getting optimal choices for deterministic model.");
+            template <typename ValueType>
+            std::vector<uint64_t> const& SparseNondeterministicInfiniteHorizonHelper<ValueType>::getProducedOptimalChoices() const {
                 STORM_LOG_ASSERT(this->isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
-                STORM_LOG_ASSERT(_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
-                return _producedOptimalChoices.get();
+                STORM_LOG_ASSERT(this->_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
+                return this->_producedOptimalChoices.get();
             }
             
-            template <typename ValueType, bool Nondeterministic>
-            std::vector<uint64_t>& SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::getProducedOptimalChoices() {
-                STORM_LOG_WARN_COND(Nondeterministic, "Getting optimal choices for deterministic model.");
+            template <typename ValueType>
+            std::vector<uint64_t>& SparseNondeterministicInfiniteHorizonHelper<ValueType>::getProducedOptimalChoices() {
                 STORM_LOG_ASSERT(this->isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
-                STORM_LOG_ASSERT(_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
-                return _producedOptimalChoices.get();
+                STORM_LOG_ASSERT(this->_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
+                return this->_producedOptimalChoices.get();
             }
             
-            template <typename ValueType, bool Nondeterministic>
-            storm::storage::Scheduler<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::extractScheduler() const {
+            template <typename ValueType>
+            storm::storage::Scheduler<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::extractScheduler() const {
                 auto const& optimalChoices = getProducedOptimalChoices();
                 storm::storage::Scheduler<ValueType> scheduler(optimalChoices.size());
                 for (uint64_t state = 0; state < optimalChoices.size(); ++state) {
@@ -175,44 +57,32 @@ namespace storm {
                 return scheduler;
             }
             
-            template <typename ValueType, bool Nondeterministic>
-            bool SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::isContinuousTime() const {
-                STORM_LOG_ASSERT((_markovianStates == nullptr) || (_exitRates != nullptr), "Inconsistent information given: Have Markovian states but no exit rates." );
-                return _exitRates != nullptr;
-            }
-    
-            template <typename ValueType, bool Nondeterministic>
-            template < typename = typename std::enable_if< !Nondeterministic >::type >
-            ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLraForComponent(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, LongRunComponentType const& component) {
-                // For deterministic models, we compute the LRA for a BSCC
-                
-                STORM_LOG_ASSERT(!this->isProduceSchedulerSet(), "Scheduler production enabled for deterministic model.");
-                
-                auto trivialResult = computeLraForTrivialComponent(env, stateReardsGetter, actionRewardsGetter, component);
-                if (trivialResult.first) {
-                    return trivialResult.second;
+            template <typename ValueType>
+            void SparseNondeterministicInfiniteHorizonHelper<ValueType>::createDecomposition() {
+                if (this->_longRunComponentDecomposition == nullptr) {
+                    // The decomposition has not been provided or computed, yet.
+                    if (this->_backwardTransitions == nullptr) {
+                        this->_computedBackwardTransitions = std::make_unique<storm::storage::SparseMatrix<ValueType>>(this->_transitionMatrix.transpose(true));
+                        this->_backwardTransitions = this->_computedBackwardTransitions.get();
+                    }
+                    this->_computedLongRunComponentDecomposition = std::make_unique<storm::storage::MaximalEndComponentDecomposition<ValueType>>(this->_transitionMatrix, *this->_backwardTransitions);
+                    this->_longRunComponentDecomposition = this->_computedLongRunComponentDecomposition.get();
                 }
-                
-                // Solve nontrivial BSCC with the method specified  in the settings
-                
-                // TODO
-                
             }
-            
-            template <typename ValueType, bool Nondeterministic>
-            template < typename = typename std::enable_if< Nondeterministic >::type >
-            ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLraForComponent(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, LongRunComponentType const& component) {
+
+            template <typename ValueType>
+            ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForComponent(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::MaximalEndComponent const& component) {
                 // For models with potential nondeterminisim, we compute the LRA for a maximal end component (MEC)
                 
                 // Allocate memory for the nondeterministic choices.
                 if (this->isProduceSchedulerSet()) {
-                    if (!_producedOptimalChoices.is_initialized()) {
-                        _producedOptimalChoices.emplace();
+                    if (!this->_producedOptimalChoices.is_initialized()) {
+                        this->_producedOptimalChoices.emplace();
                     }
-                    _producedOptimalChoices->resize(_transitionMatrix.getRowGroupCount());
+                    this->_producedOptimalChoices->resize(this->_transitionMatrix.getRowGroupCount());
                 }
                 
-                auto trivialResult = computeLraForTrivialComponent(env, stateReardsGetter, actionRewardsGetter, component);
+                auto trivialResult = this->computeLraForTrivialMec(env, stateRewardsGetter, actionRewardsGetter, component);
                 if (trivialResult.first) {
                     return trivialResult.second;
                 }
@@ -228,28 +98,28 @@ namespace storm {
                 }
                 STORM_LOG_ERROR_COND(!this->isProduceSchedulerSet() || method == storm::solver::LraMethod::ValueIteration, "Scheduler generation not supported for the chosen LRA method. Try value-iteration.");
                 if (method == storm::solver::LraMethod::LinearProgramming) {
-                    return computeLraForMecLp(env, stateRewardsGetter, actionRewardsGetter, mec);
+                    return computeLraForMecLp(env, stateRewardsGetter, actionRewardsGetter, component);
                 } else if (method == storm::solver::LraMethod::ValueIteration) {
-                    return computeLraForMecVi(env, stateRewardsGetter, actionRewardsGetter, mec);
+                    return computeLraForMecVi(env, stateRewardsGetter, actionRewardsGetter, component);
                 } else {
                     STORM_LOG_THROW(false, storm::exceptions::InvalidSettingsException, "Unsupported technique.");
                 }
             }
-    
-            template <typename ValueType, bool Nondeterministic>
-            std::pair<bool, ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLraForTrivialComponent(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, LongRunComponentType const& component) {
+            
+            template <typename ValueType>
+            std::pair<bool, ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForTrivialMec(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::MaximalEndComponent const& component) {
                 
                 // If the component only consists of a single state, we compute the LRA value directly
                 if (component.size() == 1) {
-                    auto element const& = *component.begin();
+                    auto const& element = *component.begin();
                     uint64_t state = internal::getComponentElementState(element);
-                    auto choiceIt = internal::getComponentChoicesBegin(element);
-                    if (Nondeterministic && !isContinuousTime()) {
+                    auto choiceIt = internal::getComponentElementChoicesBegin(element);
+                    if (!this->isContinuousTime()) {
                         // This is an MDP.
                         // Find the choice with the highest/lowest reward
                         ValueType bestValue = actionRewardsGetter(*choiceIt);
                         uint64_t bestChoice = *choiceIt;
-                        for (++choiceIt; choiceIt != getComponentChoicesEnd(element); ++choiceIt) {
+                        for (++choiceIt; choiceIt != internal::getComponentElementChoicesEnd(element); ++choiceIt) {
                             ValueType currentValue = actionRewardsGetter(*choiceIt);
                             if ((this->minimize() &&  currentValue < bestValue) || (this->maximize() && currentValue > bestValue)) {
                                 bestValue = std::move(currentValue);
@@ -257,65 +127,50 @@ namespace storm {
                             }
                         }
                         if (this->isProduceSchedulerSet()) {
-                            _producedOptimalChoices.get()[state] = bestChoice - _transitionMatrix.getRowGroupIndices()[state];
+                            this->_producedOptimalChoices.get()[state] = bestChoice - this->_transitionMatrix.getRowGroupIndices()[state];
                         }
                         bestValue += stateRewardsGetter(state);
                         return {true, bestValue};
                     } else {
                         // In a Markov Automaton, singleton components have to consist of a Markovian state because of the non-Zenoness assumption. Then, there is just one possible choice.
-                        STORM_LOG_THROW(!Nondeterministic || (_markovianStates != nullptr && _markovianStates->get(state)), storm::exceptions::InvalidOperationException, "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported.");
+                        STORM_LOG_ASSERT(this->_markovianStates != nullptr, "Nondeterministic continuous time model without Markovian states... Is this a not a Markov Automaton?");
+                        STORM_LOG_THROW(this->_markovianStates->get(state), storm::exceptions::InvalidOperationException, "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported.");
                         STORM_LOG_ASSERT(internal::getComponentElementChoiceCount(element) == 1, "Markovian state has Nondeterministic behavior.");
-                        if (Nondeterministic && this->isProduceSchedulerSet()) {
-                            _producedOptimalChoices.get()[state] = 0;
+                        if (this->isProduceSchedulerSet()) {
+                            this->_producedOptimalChoices.get()[state] = 0;
                         }
-                        ValueType result = stateRewardsGetter(state) + (isContinuousTime() ? (*_exitRates)[state] * actionRewardsGetter(*choiceIt) : actionRewardsGetter(*choiceIt));
+                        ValueType result = stateRewardsGetter(state) + (this->isContinuousTime() ? (*this->_exitRates)[state] * actionRewardsGetter(*choiceIt) : actionRewardsGetter(*choiceIt));
                         return {true, result};
                     }
-                } else if (!Nondeterministic) {
-                    // For deterministic models, we can also easily catch the case where all values are the same
-                    bool first = true;
-                    ValueType val = storm::utility::zero<ValueType>();
-                    for (auto const& element : component) {
-                        auto state = getComponentElementState(element);
-                        STORM_LOG_ASSERT(state == *getComponentChoicesBegin(element), "Unexpected choice index at state " << state << " of deterministic model.");
-                        ValueType curr = stateRewardsGetter(state) + (isContinuousTime() ? (*_exitRates)[state] * actionRewardsGetter(state) : actionRewardsGetter(state));
-                        if (first) {
-                            first = false;
-                        } else if (val != curr) {
-                            return {false, storm::utility::zero<ValueType>()};
-                        }
-                    }
-                    // All values are the same
-                    return {true, val};
-                } else {
-                    return {false, storm::utility::zero<ValueType>()};
                 }
+                return {false, storm::utility::zero<ValueType>()};
             }
             
-            template <typename ValueType, bool Nondeterministic>
-            ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLraForMecVi(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
+    
+            template <typename ValueType>
+            ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMecVi(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
 
                 // Collect some parameters of the computation
                 ValueType aperiodicFactor = storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor());
                 std::vector<uint64_t>* optimalChoices = nullptr;
                 if (this->isProduceSchedulerSet()) {
-                    optimalChoices = &_producedOptimalChoices.get();
+                    optimalChoices = &this->_producedOptimalChoices.get();
                 }
                 
                 // Now create a helper and perform the algorithm
-                if (isContinuousTime()) {
+                if (this->isContinuousTime()) {
                     // We assume a Markov Automaton (with deterministic timed states and nondeterministic instant states)
-                    storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::MaximalEndComponent, storm::modelchecker::helper::internal::LraViTransitionsType::DetTsNondetIs> viHelper(mec, _transitionMatrix, aperiodicFactor, _markovianStates, _exitRates);
-                    return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter, _exitRates, &this->getOptimizationDirection(), optimalChoices);
+                    storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::MaximalEndComponent, storm::modelchecker::helper::internal::LraViTransitionsType::DetTsNondetIs> viHelper(mec, this->_transitionMatrix, aperiodicFactor, this->_markovianStates, this->_exitRates);
+                    return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter, this->_exitRates, &this->getOptimizationDirection(), optimalChoices);
                 } else {
                     // We assume an MDP (with nondeterministic timed states and no instant states)
-                    storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::MaximalEndComponent, storm::modelchecker::helper::internal::LraViTransitionsType::NondetTsNoIs> viHelper(mec, _transitionMatrix, aperiodicFactor);
+                    storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::MaximalEndComponent, storm::modelchecker::helper::internal::LraViTransitionsType::NondetTsNoIs> viHelper(mec, this->_transitionMatrix, aperiodicFactor);
                     return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter, nullptr, &this->getOptimizationDirection(), optimalChoices);
                 }
             }
             
-            template <typename ValueType, bool Nondeterministic>
-            ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLraForMecLp(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter,  std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
+            template <typename ValueType>
+            ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMecLp(Environment const& env, ValueGetter const& stateRewardsGetter,  ValueGetter const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
                 // Create an LP solver
                 auto solver = storm::utility::solver::LpSolverFactory<ValueType>().create("LRA for MEC");
                 
@@ -336,18 +191,18 @@ namespace storm {
                 // Add constraints.
                 for (auto const& stateChoicesPair : mec) {
                     uint_fast64_t state = stateChoicesPair.first;
-                    bool stateIsMarkovian = _markovianStates && _markovianStates->get(state);
+                    bool stateIsMarkovian = this->_markovianStates && this->_markovianStates->get(state);
                     
                     // Now create a suitable constraint for each choice
                     // x_s  {≤, ≥}  -k/rate(s) + sum_s' P(s,act,s') * x_s' + (value(s)/rate(s) + value(s,act))
                     for (auto choice : stateChoicesPair.second) {
                         std::vector<storm::expressions::Expression> summands;
-                        auto matrixRow = _transitionMatrix.getRow(choice);
+                        auto matrixRow = this->_transitionMatrix.getRow(choice);
                         summands.reserve(matrixRow.getNumberOfEntries() + 2);
                         // add -k/rate(s) (only if s is either a Markovian state or we have an MDP)
                         if (stateIsMarkovian) {
-                            summands.push_back(-(k / solver->getManager().rational((*_exitRates)[state])));
-                        } else if (!isContinuousTime()) {
+                            summands.push_back(-(k / solver->getManager().rational((*this->_exitRates)[state])));
+                        } else if (!this->isContinuousTime()) {
                             summands.push_back(-k);
                         }
                         // add sum_s' P(s,act,s') * x_s'
@@ -358,8 +213,8 @@ namespace storm {
                         ValueType value;
                         if (stateIsMarkovian) {
                             // divide state reward with exit rate
-                            value = stateRewardsGetter(state) / (*_exitRates)[state] + actionRewardsGetter(choice);
-                        } else if (!isContinuousTime()) {
+                            value = stateRewardsGetter(state) / (*this->_exitRates)[state] + actionRewardsGetter(choice);
+                        } else if (!this->isContinuousTime()) {
                             // in discrete time models no scaling is needed
                             value = stateRewardsGetter(state) + actionRewardsGetter(choice);
                         } else {
@@ -388,7 +243,7 @@ namespace storm {
              * Transitions that don't lead to a Component state are copied (taking a state index mapping into account).
              */
             template <typename ValueType>
-            void addSspMatrixChoice(uint64_t const& inputMatrixChoice, storm::storage::SparseMatrix<ValueType> const& inputTransitionMatrix, std::vector<uint64_t> const& inputToSspStateMap, uint64_t const& numberOfStatesNotInComponents, uint64_t const& currentSspChoice, storm::storage::SparseMatrixBuilder<ValueType>& sspMatrixBuilder) {
+            void addSspMatrixChoice(uint64_t const& inputMatrixChoice, storm::storage::SparseMatrix<ValueType> const& inputTransitionMatrix, std::vector<uint64_t> const& inputToSspStateMap, uint64_t const& numberOfNonComponentStates, uint64_t const& currentSspChoice, storm::storage::SparseMatrixBuilder<ValueType>& sspMatrixBuilder) {
             
                 // As there could be multiple transitions to the same MEC, we accumulate them in this map before adding them to the matrix builder.
                 std::map<uint64_t, ValueType> auxiliaryStateToProbabilityMap;
@@ -398,7 +253,7 @@ namespace storm {
                         auto const& sspTransitionTarget = inputToSspStateMap[transition.getColumn()];
                         // Since the auxiliary Component states are appended at the end of the matrix, we can use this check to
                         // decide whether the transition leads to a component state or not
-                        if (sspTransitionTarget < numberOfStatesNotInMecs) {
+                        if (sspTransitionTarget < numberOfNonComponentStates) {
                             // If the target state is not contained in a component, we can copy over the entry.
                             sspMatrixBuilder.addNextValue(currentSspChoice, sspTransitionTarget, transition.getValue());
                         } else {
@@ -420,195 +275,204 @@ namespace storm {
                 }
             }
             
-            template <typename ValueType, bool Nondeterministic>
-            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues) {
-                STORM_LOG_ASSERT(_longRunComponentDecomposition != nullptr, "Decomposition not computed, yet.");
-                
-                uint64_t numberOfStates = _transitionMatrix.getRowGroupCount();
-                
-                // For fast transition rewriting, we build a mapping from the input state indices to the state indices of a new transition matrix
-                // which redirects all transitions leading to a former component state to a new auxiliary state.
-                // There will be one auxiliary state for each component. These states will be appended to the end of the matrix.
-                
-                // First gather the states that are part of a component
-                // and create a mapping from states that lie in a component to the corresponding component index.
-                storm::storage::BitVector statesInMecs(numberOfStates);
-                std::vector<uint64_t> inputToSspStateMap(numberOfStates, std::numeric_limits<uint64_t>::max());
-                for (uint64_t currentMecIndex = 0; currentMecIndex < _longRunComponentDecomposition->size(); ++currentMecIndex) {
-                    for (auto const& stateChoicesPair : (*_longRunComponentDecomposition)[currentMecIndex]) {
-                        statesInMecs.set(stateChoicesPair.first);
-                        inputToSspStateMap[stateChoicesPair.first] = currentMecIndex;
-                    }
-                }
-                // Now take care of the non-mec states. Note that the order of these states will be preserved.
-                uint64_t numberOfStatesNotInMecs = 0;
-                storm::storage::BitVector statesNotContainedInAnyMec = ~statesInMecs;
-                for (auto const& nonMecState : statesNotContainedInAnyMec) {
-                    inputToSspStateMap[nonMecState] = numberOfStatesNotInMecs;
-                    ++numberOfStatesNotInMecs;
-                }
-                // Finalize the mapping for the mec states which now still assigns mec states to to their Mec index.
-                // To make sure that they point to the auxiliary states (located at the end of the SspMatrix), we need to shift them by the
-                // number of states that are not in a mec.
-                for (auto const& mecState : statesInMecs) {
-                    inputToSspStateMap[mecState] += numberOfStatesNotInMecs;
-                }
+            template <typename ValueType>
+            std::pair<storm::storage::SparseMatrix<ValueType>, std::vector<ValueType>> SparseNondeterministicInfiniteHorizonHelper<ValueType>::buildSspMatrixVector(std::vector<ValueType> const& mecLraValues, std::vector<uint64_t> const& inputToSspStateMap, storm::storage::BitVector const& statesNotInComponent, uint64_t numberOfNonComponentStates, std::vector<std::pair<uint64_t, uint64_t>>* sspComponentExitChoicesToOriginalMap) {
                 
-                // For scheduler extraction, we will need to create a mapping between choices at the auxiliary states and the
-                // corresponding choices in the original model.
-                std::vector<std::pair<uint_fast64_t, uint_fast64_t>> sspMecExitChoicesToOriginalMap;
+                auto const& choiceIndices = this->_transitionMatrix.getRowGroupIndices();
                 
-                // The next step is to create the SSP matrix and the right-hand side of the SSP.
                 std::vector<ValueType> rhs;
-                uint64_t numberOfSspStates = numberOfStatesNotInMecs + _longRunComponentDecomposition->size();
-                typename storm::storage::SparseMatrixBuilder<ValueType> sspMatrixBuilder(0, numberOfSspStates , 0, false, true, numberOfSspStates);
-                // If the source state of a transition is not contained in any MEC, we copy its choices (and perform the necessary modifications).
+                uint64_t numberOfSspStates = numberOfNonComponentStates + this->_longRunComponentDecomposition->size();
+                storm::storage::SparseMatrixBuilder<ValueType> sspMatrixBuilder(0, numberOfSspStates , 0, true, true, numberOfSspStates);
+                // If the source state of a transition is not contained in any component, we copy its choices (and perform the necessary modifications).
                 uint64_t currentSspChoice = 0;
-                for (auto const& nonMecState : statesNotContainedInAnyMec) {
+                for (auto const& nonComponentState : statesNotInComponent) {
                     sspMatrixBuilder.newRowGroup(currentSspChoice);
-                    
-                    for (uint64_t choice = nondeterministicChoiceIndices[nonMecState]; choice < nondeterministicChoiceIndices[nonMecState + 1]; ++choice, ++currentSspChoice) {
+                    for (uint64_t choice = choiceIndices[nonComponentState]; choice < choiceIndices[nonComponentState + 1]; ++choice, ++currentSspChoice) {
                         rhs.push_back(storm::utility::zero<ValueType>());
-                        addSspMatrixChoice(choice, _transitionMatrix, inputToSspStateMap, numberOfStatesNotInMecs, currentSspChoice, sspMatrixBuilder);
+                        addSspMatrixChoice(choice, this->_transitionMatrix, inputToSspStateMap, numberOfNonComponentStates, currentSspChoice, sspMatrixBuilder);
                     }
                 }
-                // Now we construct the choices for the auxiliary states which reflect former MEC states.
-                for (uint64_t mecIndex = 0; mecIndex < _longRunComponentDecomposition->size(); ++mecIndex) {
-                    storm::storage::MaximalEndComponent const& mec = (*_longRunComponentDecomposition)[mecIndex];
+                // Now we construct the choices for the auxiliary states which reflect former Component states.
+                for (uint64_t componentIndex = 0; componentIndex < this->_longRunComponentDecomposition->size(); ++componentIndex) {
+                    auto const& component = (*this->_longRunComponentDecomposition)[componentIndex];
                     sspMatrixBuilder.newRowGroup(currentSspChoice);
-                    for (auto const& stateChoicesPair : mec) {
-                        uint64_t const& mecState = stateChoicesPair.first;
-                        auto const& choicesInMec = stateChoicesPair.second;
-                        for (uint64_t choice = nondeterministicChoiceIndices[mecState]; choice < nondeterministicChoiceIndices[mecState + 1]; ++choice) {
-                            // If the choice is not contained in the MEC itself, we have to add a similar distribution to the auxiliary state.
-                            if (choicesInMec.find(choice) == choicesInMec.end()) {
+                    // For nondeterministic models it might still be that we leave the component again. This needs to be reflected in the SSP
+                    // by adding the "exiting" choices of the MEC to the axiliary states
+                    for (auto const& element : component) {
+                        uint64_t componentState = internal::getComponentElementState(element);
+                        for (uint64_t choice = choiceIndices[componentState]; choice < choiceIndices[componentState + 1]; ++choice) {
+                            // If the choice is not contained in the component itself, we have to add a similar distribution to the auxiliary state.
+                            if (!internal::componentElementChoicesContains(element, choice)) {
                                 rhs.push_back(storm::utility::zero<ValueType>());
-                                addSspMatrixChoice(choice, _transitionMatrix, inputToSspStateMap, numberOfStatesNotInMecs, currentSspChoice, sspMatrixBuilder);
-                                if (this->isProduceSchedulerSet()) {
+                                addSspMatrixChoice(choice, this->_transitionMatrix, inputToSspStateMap, numberOfNonComponentStates, currentSspChoice, sspMatrixBuilder);
+                                if (sspComponentExitChoicesToOriginalMap) {
                                     // Later we need to be able to map this choice back to the original input model
-                                    sspMecExitChoicesToOriginalMap.emplace_back(mecState, choice - nondeterministicChoiceIndices[mecState]);
+                                    sspComponentExitChoicesToOriginalMap->emplace_back(componentState, choice - choiceIndices[componentState]);
                                 }
                                 ++currentSspChoice;
                             }
                         }
                     }
-                    // For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the MEC.
-                    rhs.push_back(mecLraValues[mecIndex]);
-                    if (this->isProduceSchedulerSet()) {
+                    // For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the component.
+                    rhs.push_back(mecLraValues[componentIndex]);
+                    if (sspComponentExitChoicesToOriginalMap) {
                         // Insert some invalid values so we can later detect that this choice is not an exit choice
-                        sspMecExitChoicesToOriginalMap.emplace_back(std::numeric_limits<uint_fast64_t>::max(), std::numeric_limits<uint_fast64_t>::max());
+                        sspComponentExitChoicesToOriginalMap->emplace_back(std::numeric_limits<uint_fast64_t>::max(), std::numeric_limits<uint_fast64_t>::max());
                     }
                     ++currentSspChoice;
                 }
-                storm::storage::SparseMatrix<ValueType> sspMatrix = sspMatrixBuilder.build(currentSspChoice, numberOfSspStates, numberOfSspStates);
+                return std::make_pair(sspMatrixBuilder.build(currentSspChoice, numberOfSspStates, numberOfSspStates), std::move(rhs));
+            }
+            
+            template <typename ValueType>
+            void SparseNondeterministicInfiniteHorizonHelper<ValueType>::constructOptimalChoices(std::vector<uint64_t> const& sspChoices, storm::storage::SparseMatrix<ValueType> const& sspMatrix, std::vector<uint64_t> const& inputToSspStateMap, storm::storage::BitVector const& statesNotInComponent, uint64_t numberOfNonComponentStates, std::vector<std::pair<uint64_t, uint64_t>> const& sspComponentExitChoicesToOriginalMap) {
+                // We first take care of non-mec states
+                storm::utility::vector::setVectorValues(this->_producedOptimalChoices.get(), statesNotInComponent, sspChoices);
+                // Secondly, we consider MEC states. There are 3 cases for each MEC state:
+                // 1. The SSP choices encode that we want to stay in the MEC
+                // 2. The SSP choices encode that we want to leave the MEC and
+                //      a) we take an exit (non-MEC) choice at the given state
+                //      b) we have to take a MEC choice at the given state in a way that eventually an exit state of the MEC is reached
+                uint64_t exitChoiceOffset = sspMatrix.getRowGroupIndices()[numberOfNonComponentStates];
+                for (auto const& mec : *this->_longRunComponentDecomposition) {
+                    // Get the sspState of this MEC (using one representative mec state)
+                    auto const& sspState = inputToSspStateMap[mec.begin()->first];
+                    uint64_t sspChoiceIndex = sspMatrix.getRowGroupIndices()[sspState] + sspChoices[sspState];
+                    // Obtain the state and choice of the original model to which the selected choice corresponds.
+                    auto const& originalStateChoice = sspComponentExitChoicesToOriginalMap[sspChoiceIndex - exitChoiceOffset];
+                    // Check if we are in Case 1 or 2
+                    if (originalStateChoice.first == std::numeric_limits<uint_fast64_t>::max()) {
+                        // The optimal choice is to stay in this mec (Case 1)
+                        // In this case, no further operations are necessary. The scheduler has already been set to the optimal choices during the call of computeLraForMec.
+                        STORM_LOG_ASSERT(sspMatrix.getRow(sspState, sspChoices[sspState]).getNumberOfEntries() == 0, "Expected empty row at choice that stays in MEC.");
+                    } else {
+                        // The best choice is to leave this MEC via the selected state and choice. (Case 2)
+                        // Set the exit choice (Case 2.a)
+                        this->_producedOptimalChoices.get()[originalStateChoice.first] = originalStateChoice.second;
+                        // The remaining states in this MEC need to reach the state with the exit choice with probability 1. (Case 2.b)
+                        // Perform a backwards search from the exit state, only using MEC choices
+                        // We start by setting an invalid choice to all remaining mec states (so that we can easily detect them as unprocessed)
+                        for (auto const& stateActions : mec) {
+                            if (stateActions.first != originalStateChoice.first) {
+                                this->_producedOptimalChoices.get()[stateActions.first] = std::numeric_limits<uint64_t>::max();
+                            }
+                        }
+                        // Ensure that backwards transitions are available
+                        if (this->_backwardTransitions == nullptr) {
+                            this->_computedBackwardTransitions = std::make_unique<storm::storage::SparseMatrix<ValueType>>(this->_transitionMatrix.transpose(true));
+                            this->_backwardTransitions = this->_computedBackwardTransitions.get();
+                        }
+                        // Now start a backwards DFS
+                        std::vector<uint64_t> stack = {originalStateChoice.first};
+                        while (!stack.empty()) {
+                            uint64_t currentState = stack.back();
+                            stack.pop_back();
+                            for (auto const& backwardsTransition : this->_backwardTransitions->getRowGroup(currentState)) {
+                                uint64_t predecessorState = backwardsTransition.getColumn();
+                                if (mec.containsState(predecessorState)) {
+                                    auto& selectedPredChoice = this->_producedOptimalChoices.get()[predecessorState];
+                                    if (selectedPredChoice == std::numeric_limits<uint64_t>::max()) {
+                                        // We don't already have a choice for this predecessor.
+                                        // We now need to check whether there is a *MEC* choice leading to currentState
+                                        for (auto const& predChoice : mec.getChoicesForState(predecessorState)) {
+                                            for (auto const& forwardTransition : this->_transitionMatrix.getRow(predChoice)) {
+                                                if (forwardTransition.getColumn() == currentState && !storm::utility::isZero(forwardTransition.getValue())) {
+                                                    // Playing this choice (infinitely often) will lead to current state (infinitely often)!
+                                                    selectedPredChoice = predChoice - this->_transitionMatrix.getRowGroupIndices()[predecessorState];
+                                                    stack.push_back(predecessorState);
+                                                    break;
+                                                }
+                                            }
+                                            if (selectedPredChoice != std::numeric_limits<uint64_t>::max()) {
+                                                break;
+                                            }
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+
+            template <typename ValueType>
+            std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& componentLraValues) {
+                STORM_LOG_ASSERT(this->_longRunComponentDecomposition != nullptr, "Decomposition not computed, yet.");
+                
+                // For fast transition rewriting, we build a mapping from the input state indices to the state indices of a new transition matrix
+                // which redirects all transitions leading to a former component state to a new auxiliary state.
+                // There will be one auxiliary state for each component. These states will be appended to the end of the matrix.
+                
+                // First gather the states that are part of a component
+                // and create a mapping from states that lie in a component to the corresponding component index.
+                storm::storage::BitVector statesInComponents(this->_transitionMatrix.getRowGroupCount());
+                std::vector<uint64_t> inputToSspStateMap(this->_transitionMatrix.getRowGroupCount(), std::numeric_limits<uint64_t>::max());
+                for (uint64_t currentComponentIndex = 0; currentComponentIndex < this->_longRunComponentDecomposition->size(); ++currentComponentIndex) {
+                    for (auto const& element : (*this->_longRunComponentDecomposition)[currentComponentIndex]) {
+                        uint64_t state = internal::getComponentElementState(element);
+                        statesInComponents.set(state);
+                        inputToSspStateMap[state] = currentComponentIndex;
+                    }
+                }
+                // Now take care of the non-component states. Note that the order of these states will be preserved.
+                uint64_t numberOfNonComponentStates = 0;
+                storm::storage::BitVector statesNotInComponent = ~statesInComponents;
+                for (auto const& nonComponentState : statesNotInComponent) {
+                    inputToSspStateMap[nonComponentState] = numberOfNonComponentStates;
+                    ++numberOfNonComponentStates;
+                }
+                // Finalize the mapping for the component states which now still assigns component states to to their component index.
+                // To make sure that they point to the auxiliary states (located at the end of the SspMatrix), we need to shift them by the
+                // number of states that are not in a component.
+                for (auto const& mecState : statesInComponents) {
+                    inputToSspStateMap[mecState] += numberOfNonComponentStates;
+                }
+                
+                // For scheduler extraction, we will need to create a mapping between choices at the auxiliary states and the
+                // corresponding choices in the original model.
+                std::vector<std::pair<uint_fast64_t, uint_fast64_t>> sspComponentExitChoicesToOriginalMap;
+                
+                // The next step is to create the SSP matrix and the right-hand side of the SSP.
+                auto sspMatrixVector = buildSspMatrixVector(componentLraValues, inputToSspStateMap, statesNotInComponent, numberOfNonComponentStates, this->isProduceSchedulerSet() ? &sspComponentExitChoicesToOriginalMap : nullptr);
                 
                 // Set-up a solver
                 storm::solver::GeneralMinMaxLinearEquationSolverFactory<ValueType> minMaxLinearEquationSolverFactory;
                 storm::solver::MinMaxLinearEquationSolverRequirements requirements = minMaxLinearEquationSolverFactory.getRequirements(env, true, true, this->getOptimizationDirection(), false, this->isProduceSchedulerSet());
                 requirements.clearBounds();
                 STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
-                std::unique_ptr<storm::solver::MinMaxLinearEquationSolver<ValueType>> solver = minMaxLinearEquationSolverFactory.create(env, sspMatrix);
+                std::unique_ptr<storm::solver::MinMaxLinearEquationSolver<ValueType>> solver = minMaxLinearEquationSolverFactory.create(env, sspMatrixVector.first);
                 solver->setHasUniqueSolution();
                 solver->setHasNoEndComponents();
                 solver->setTrackScheduler(this->isProduceSchedulerSet());
-                auto lowerUpperBounds = std::minmax_element(mecLraValues.begin(), mecLraValues.end());
+                auto lowerUpperBounds = std::minmax_element(componentLraValues.begin(), componentLraValues.end());
                 solver->setLowerBound(*lowerUpperBounds.first);
                 solver->setUpperBound(*lowerUpperBounds.second);
                 solver->setRequirementsChecked();
                 
                 // Solve the equation system
-                std::vector<ValueType> x(numberOfSspStates);
-                solver->solveEquations(env, this->getOptimizationDirection(), x, rhs);
+                std::vector<ValueType> x(sspMatrixVector.first.getRowGroupCount());
+                solver->solveEquations(env, this->getOptimizationDirection(), x, sspMatrixVector.second);
 
                 // Prepare scheduler (if requested)
                 if (this->isProduceSchedulerSet() && solver->hasScheduler()) {
                     // Translate result for ssp matrix to original model
-                    auto const& sspChoices = solver->getSchedulerChoices();
-                    // We first take care of non-mec states
-                    storm::utility::vector::setVectorValues(_producedOptimalChoices.get(), statesNotContainedInAnyMec, sspChoices);
-                    // Secondly, we consider MEC states. There are 3 cases for each MEC state:
-                    // 1. The SSP choices encode that we want to stay in the MEC
-                    // 2. The SSP choices encode that we want to leave the MEC and
-                    //      a) we take an exit (non-MEC) choice at the given state
-                    //      b) we have to take a MEC choice at the given state in a way that eventually an exit state of the MEC is reached
-                    uint64_t exitChoiceOffset = sspMatrix.getRowGroupIndices()[numberOfStatesNotInMecs];
-                    for (auto const& mec : *_longRunComponentDecomposition) {
-                        // Get the sspState of this MEC (using one representative mec state)
-                        auto const& sspState = inputToSspStateMap[mec.begin()->first];
-                        uint64_t sspChoiceIndex = sspMatrix.getRowGroupIndices()[sspState] + sspChoices[sspState];
-                        // Obtain the state and choice of the original model to which the selected choice corresponds.
-                        auto const& originalStateChoice = sspMecExitChoicesToOriginalMap[sspChoiceIndex - exitChoiceOffset];
-                        // Check if we are in Case 1 or 2
-                        if (originalStateChoice.first == std::numeric_limits<uint_fast64_t>::max()) {
-                            // The optimal choice is to stay in this mec (Case 1)
-                            // In this case, no further operations are necessary. The scheduler has already been set to the optimal choices during the call of computeLraForMec.
-                            STORM_LOG_ASSERT(sspMatrix.getRow(sspState, sspChoices[sspState]).getNumberOfEntries() == 0, "Expected empty row at choice that stays in MEC.");
-                        } else {
-                            // The best choice is to leave this MEC via the selected state and choice. (Case 2)
-                            // Set the exit choice (Case 2.a)
-                            _producedOptimalChoices.get()[originalStateChoice.first] = originalStateChoice.second;
-                            // The remaining states in this MEC need to reach the state with the exit choice with probability 1. (Case 2.b)
-                            // Perform a backwards search from the exit state, only using MEC choices
-                            // We start by setting an invalid choice to all remaining mec states (so that we can easily detect them as unprocessed)
-                            for (auto const& stateActions : mec) {
-                                if (stateActions.first != originalStateChoice.first) {
-                                    _producedOptimalChoices.get()[stateActions.first] = std::numeric_limits<uint64_t>::max();
-                                }
-                            }
-                            // Ensure that backwards transitions are available
-                            if (_backwardTransitions == nullptr) {
-                                _computedBackwardTransitions = _transitionMatrix.transpose(true);
-                                _backwardTransitions = &_computedBackwardTransitions;
-                            }
-                            // Now start a backwards DFS
-                            std::vector<uint64_t> stack = {originalStateChoice.first};
-                            while (!stack.empty()) {
-                                uint64_t currentState = stack.back();
-                                stack.pop_back();
-                                for (auto const& backwardsTransition : _backwardTransitions->getRowGroup(currentState)) {
-                                    uint64_t predecessorState = backwardsTransition.getColumn();
-                                    if (mec.containsState(predecessorState)) {
-                                        auto& selectedPredChoice = _producedOptimalChoices.get()[predecessorState];
-                                        if (selectedPredChoice == std::numeric_limits<uint64_t>::max()) {
-                                            // We don't already have a choice for this predecessor.
-                                            // We now need to check whether there is a *MEC* choice leading to currentState
-                                            for (auto const& predChoice : mec.getChoicesForState(predecessorState)) {
-                                                for (auto const& forwardTransition : _transitionMatrix.getRow(predChoice)) {
-                                                    if (forwardTransition.getColumn() == currentState && !storm::utility::isZero(forwardTransition.getValue())) {
-                                                        // Playing this choice (infinitely often) will lead to current state (infinitely often)!
-                                                        selectedPredChoice = predChoice - nondeterministicChoiceIndices[predecessorState];
-                                                        stack.push_back(predecessorState);
-                                                        break;
-                                                    }
-                                                }
-                                                if (selectedPredChoice != std::numeric_limits<uint64_t>::max()) {
-                                                    break;
-                                                }
-                                            }
-                                        }
-                                    }
-                                }
-                            }
-                        }
-                    }
+                    constructOptimalChoices(solver->getSchedulerChoices(), sspMatrixVector.first, inputToSspStateMap, statesNotInComponent, numberOfNonComponentStates, sspComponentExitChoicesToOriginalMap);
                 } else {
                     STORM_LOG_ERROR_COND(!this->isProduceSchedulerSet(), "Requested to produce a scheduler, but no scheduler was generated.");
                 }
                 
                 // Prepare result vector.
                 // For efficiency reasons, we re-use the memory of our rhs for this!
-                std::vector<ValueType> result = std::move(rhs);
-                result.resize(numberOfStates);
+                std::vector<ValueType> result = std::move(sspMatrixVector.second);
+                result.resize(this->_transitionMatrix.getRowGroupCount());
                 result.shrink_to_fit();
                 storm::utility::vector::selectVectorValues(result, inputToSspStateMap, x);
                 return result;
             }
             
-            template class SparseNondeterministicInfiniteHorizonHelper<double, false>;
-            template class SparseNondeterministicInfiniteHorizonHelper<storm::RationalNumber, false>;
+            template class SparseNondeterministicInfiniteHorizonHelper<double>;
+            template class SparseNondeterministicInfiniteHorizonHelper<storm::RationalNumber>;
             
-            //template class SparseNondeterministicInfiniteHorizonHelper<double, true>;
-            //template class SparseNondeterministicInfiniteHorizonHelper<storm::RationalNumber, true>;
         }
     }
 }
\ No newline at end of file
diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h
index c11a87a90..5286dfdfc 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h
@@ -1,20 +1,11 @@
 #pragma once
-#include "storm/modelchecker/helper/SingleValueModelCheckerHelper.h"
+#include "storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.h"
 
 
 namespace storm {
-    class Environment;
     
-    namespace models {
-        namespace sparse {
-            template <VT> class StandardRewardModel;
-        }
-    }
     namespace storage {
-        template <typename C> class Decomposition<C>;
-        class MaximalEndComponent;
-        template <typename VT> class SparseMatrix;
-        class StronglyConnectedComponent;
+        template <typename VT> class Scheduler;
     }
     
     namespace modelchecker {
@@ -23,75 +14,26 @@ namespace storm {
             /*!
              * Helper class for model checking queries that depend on the long run behavior of the (nondeterministic) system.
              * @tparam ValueType the type a value can have
-             * @tparam Nondeterministic true if there is nondeterminism in the Model (MDP or MA)
              */
-            template <typename ValueType, bool Nondeterministic>
-            class SparseNondeterministicInfiniteHorizonHelper : public SingleValueModelCheckerHelper<ValueType> {
+            template <typename ValueType>
+            class SparseNondeterministicInfiniteHorizonHelper : public SparseInfiniteHorizonHelper<ValueType, true> {
 
             public:
-                
-                /*!
-                 * The type of a component in which the system resides in the long run (BSCC for deterministic models, MEC for nondeterministic models)
-                 */
-                using LongRunComponentType = typename std::conditional<Nondeterministic, storm::storage::MaximalEndComponent, storm::storage::StronglyConnectedComponent>::type;
-                
                 /*!
                  * Function mapping from indices to values
                  */
-                typedef std::function<ValueType(uint64_t)> ValueGetter;
+                typedef typename SparseInfiniteHorizonHelper<ValueType, true>::ValueGetter ValueGetter;
                 
                 /*!
-                 * Initializes the helper for a discrete time (i.e. MDP)
+                 * Initializes the helper for a discrete time model (i.e. MDP)
                  */
                 SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix);
                 
                 /*!
-                 * Initializes the helper for a continuous time (i.e. MA)
+                 * Initializes the helper for a continuous time model (i.e. MA)
                  */
                 SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates);
                 
-                /*!
-                 * Provides backward transitions that can be used during the computation.
-                 * Providing them is optional. If they are not provided, they will be computed internally
-                 * Be aware that this class does not take ownership, i.e. the caller has to make sure that the reference to the backwardstransitions remains valid.
-                 */
-                void provideBackwardTransitions(storm::storage::SparseMatrix<ValueType> const& backwardsTransitions);
-    
-                /*!
-                 * Provides the decomposition into long run components (BSCCs/MECs) that can be used during the computation.
-                 * Providing the decomposition is optional. If it is not provided, they will be computed internally.
-                 * Be aware that this class does not take ownership, i.e. the caller has to make sure that the reference to the decomposition remains valid.
-                 */
-                void provideLongRunComponentDecomposition(storm::storage::Decomposition<ComponentType> const& decomposition);
-                
-                /*!
-                 * Computes the long run average probabilities, i.e., the fraction of the time we are in a psiState
-                 * @return a value for each state
-                 */
-                std::vector<ValueType> computeLongRunAverageProbabilities(Environment const& env, storm::storage::BitVector const& psiStates);
-                
-                /*!
-                 * Computes the long run average rewards, i.e., the average reward collected per time unit
-                 * @return a value for each state
-                 */
-                std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, storm::models::sparse::StandardRewardModel<ValueType> const& rewardModel);
-                
-                /*!
-                 * Computes the long run average value given the provided state and action-based rewards.
-                 * @param stateValues a vector containing a value for every state
-                 * @param actionValues a vector containing a value for every choice
-                 * @return a value for each state
-                 */
-                std::vector<ValueType> computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const* stateValues = nullptr, std::vector<ValueType> const* actionValues = nullptr);
-                
-                /*!
-                 * Computes the long run average value given the provided state and action based rewards
-                 * @param stateValuesGetter a function returning a value for a given state index
-                 * @param actionValuesGetter a function returning a value for a given (global) choice index
-                 * @return a value for each state
-                 */
-                std::vector<ValueType> computeLongRunAverageValues(Environment const& env, ValueGetter const& stateValuesGetter,  sValueGetter const& actionValuesGetter);
-                
                 /*!
                  * @pre before calling this, a computation call should have been performed during which scheduler production was enabled.
                  * @return the produced scheduler of the most recent call.
@@ -116,49 +58,41 @@ namespace storm {
                  * @return the (unique) optimal LRA value for the given component.
                  * @post if scheduler production is enabled and Nondeterministic is true, getProducedOptimalChoices() contains choices for the states of the given component which yield the returned LRA value. Choices for states outside of the component are not affected.
                  */
-                template < typename = typename std::enable_if< true >::type >
-                ValueType computeLraForComponent(Environment const& env, ValueGetter const& stateValuesGetter,  ValueGetter const& actionValuesGetter, LongRunComponentType const& component);
-                template < typename = typename std::enable_if< false >::type >
-                ValueType computeLraForComponent(Environment const& env, ValueGetter const& stateValuesGetter,  ValueGetter const& actionValuesGetter, LongRunComponentType const& component);
+                virtual ValueType computeLraForComponent(Environment const& env, ValueGetter const& stateValuesGetter,  ValueGetter const& actionValuesGetter, storm::storage::MaximalEndComponent const& component) override;
                 
             protected:
                 
-                /*!
-                 * @return true iff this is a computation on a continuous time model (i.e. MA)
-                 */
-                bool isContinuousTime() const;
+                virtual void createDecomposition() override;
                 
-                /*!
-                 * Checks if the component can trivially be solved without much overhead.
-                 * @return either true and the (unique) optimal LRA value for the given component or false and an arbitrary value
-                 * @post if scheduler production is enabled and Nondeterministic is true, getProducedOptimalChoices() contains choices for the states of the given component which yield the returned LRA value. Choices for states outside of the component are not affected.
-                 */
-                std::pair<bool, ValueType> computeLraForTrivialComponent(Environment const& env, ValueGetter const& stateValuesGetter,  ValueGetter const& actionValuesGetter, LongRunComponentType const& component);
+                std::pair<bool, ValueType> computeLraForTrivialMec(Environment const& env, ValueGetter const& stateValuesGetter,  ValueGetter const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec);
                 
                 /*!
                  * As computeLraForMec but uses value iteration as a solution method (independent of what is set in env)
                  */
-                ValueType computeLraForMecVi(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, LongRunComponentType const& mec);
+                ValueType computeLraForMecVi(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec);
+                
                 /*!
                  * As computeLraForMec but uses linear programming as a solution method (independent of what is set in env)
                  * @see Guck et al.: Modelling and Analysis of Markov Reward Automata (ATVA'14), https://doi.org/10.1007/978-3-319-11936-6_13
                  */
-                ValueType computeLraForMecLp(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, LongRunComponentType const& mec);
+                ValueType computeLraForMecLp(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::MaximalEndComponent  const& mec);
+                
+                std::pair<storm::storage::SparseMatrix<ValueType>, std::vector<ValueType>> buildSspMatrixVector(std::vector<ValueType> const& mecLraValues, std::vector<uint64_t> const& inputToSspStateMap, storm::storage::BitVector const& statesNotInComponent, uint64_t numberOfNonComponentStates, std::vector<std::pair<uint64_t, uint64_t>>* sspComponentExitChoicesToOriginalMap);
+                
+                /*!
+                 * @pre a choice for each state within a component must be set such that the choices yield optimal values w.r.t. the individual components.
+                 * Translates optimal choices for MECS and SSP to the original model.
+                 * @post getProducedOptimalChoices() contains choices for all input model states which yield the returned LRA values.
+                 */
+                void constructOptimalChoices(std::vector<uint64_t> const& sspChoices, storm::storage::SparseMatrix<ValueType> const& sspMatrix, std::vector<uint64_t> const& inputToSspStateMap, storm::storage::BitVector const& statesNotInComponent, uint64_t numberOfNonComponentStates, std::vector<std::pair<uint64_t, uint64_t>> const& sspComponentExitChoicesToOriginalMap);
                 
                 /*!
+                 * @pre if scheduler production is enabled a choice for each state within a component must be set such that the choices yield optimal values w.r.t. the individual components.
                  * @return Lra values for each state
+                 * @post if scheduler production is enabled getProducedOptimalChoices() contains choices for all input model states which yield the returned LRA values.
                  */
-                std::vector<ValueType> buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues);
+                virtual std::vector<ValueType> buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues) override;
             
-            private:
-                storm::storage::SparseMatrix<ValueType> const& _transitionMatrix;
-                storm::storage::SparseMatrix<ValueType> const* _backwardTransitions;
-                std::unique_ptr<storm::storage::SparseMatrix<ValueType>> _computedBackwardTransitions;
-                storm::storage::Decomposition<LongRunComponentType> const* _longRunComponentDecomposition;
-                std::unique_ptr<storm::storage::Decomposition<LongRunComponentType>> _computedLongRunComponentDecomposition;
-                storm::storage::BitVector const* _markovianStates;
-                std::vector<ValueType> const* _exitRates;
-                boost::optional<std::vector<uint64_t>> _producedOptimalChoices;
             };
 
         

From c674de5893b5d7d19c667e4079c573490d005afc Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Tue, 11 Aug 2020 16:53:05 +0200
Subject: [PATCH 33/48] Deterministic infinite horizon: Added gain-bias and
 lra-distribution based solution methods

---
 ...arseDeterministicInfiniteHorizonHelper.cpp | 260 +++++++++++++++++-
 ...SparseDeterministicInfiniteHorizonHelper.h |  12 +-
 2 files changed, 259 insertions(+), 13 deletions(-)

diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.cpp
index d6dc8eb3c..f7327d8c0 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.cpp
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.cpp
@@ -17,9 +17,10 @@
 #include "storm/utility/vector.h"
 
 #include "storm/environment/solver/LongRunAverageSolverEnvironment.h"
-#include "storm/environment/solver/MinMaxSolverEnvironment.h"
+#include "storm/environment/solver/TopologicalSolverEnvironment.h"
 
 #include "storm/exceptions/UnmetRequirementException.h"
+#include "storm/exceptions/NotSupportedException.h"
 
 namespace storm {
     namespace modelchecker {
@@ -46,12 +47,12 @@ namespace storm {
             }
 
             template <typename ValueType>
-            ValueType SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForComponent(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::StronglyConnectedComponent const& component) {
+            ValueType SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForComponent(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, storm::storage::StronglyConnectedComponent const& component) {
                 // For deterministic models, we compute the LRA for a BSCC
                 
                 STORM_LOG_ASSERT(!this->isProduceSchedulerSet(), "Scheduler production enabled for deterministic model.");
                 
-                auto trivialResult = computeLraForTrivialBscc(env, stateRewardsGetter, actionRewardsGetter, component);
+                auto trivialResult = computeLraForTrivialBscc(env, stateValueGetter, actionValueGetter, component);
                 if (trivialResult.first) {
                     return trivialResult.second;
                 }
@@ -67,18 +68,18 @@ namespace storm {
                 }
                 STORM_LOG_TRACE("Computing LRA for BSCC of size " << component.size() << " using '" << storm::solver::toString(method) << "'.");
                 if (method == storm::solver::LraMethod::ValueIteration) {
-                    return computeLraForBsccVi(env, stateRewardsGetter, actionRewardsGetter, component);
-                }/* else if (method == storm::solver::LraMethod::LraDistributionEquations) {
+                    return computeLraForBsccVi(env, stateValueGetter, actionValueGetter, component);
+                } else if (method == storm::solver::LraMethod::LraDistributionEquations) {
                     // We only need the first element of the pair as the lra distribution is not relevant at this point.
-                    return computeLongRunAveragesForBsccLraDistr<ValueType>(env, bscc, rateMatrix, valueGetter, exitRateVector).first;
+                    return computeLraForBsccLraDistr(env, stateValueGetter, actionValueGetter, component).first;
                 }
                 STORM_LOG_WARN_COND(method == storm::solver::LraMethod::GainBiasEquations, "Unsupported lra method selected. Defaulting to " << storm::solver::toString(storm::solver::LraMethod::GainBiasEquations) << ".");
                 // We don't need the bias values
-                return computeLongRunAveragesForBsccGainBias<ValueType>(env, bscc, rateMatrix, valueGetter, exitRateVector).first;*/
+                return computeLraForBsccGainBias(env, stateValueGetter, actionValueGetter, component).first;
             }
             
             template <typename ValueType>
-            std::pair<bool, ValueType> SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForTrivialBscc(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::StronglyConnectedComponent const& component) {
+            std::pair<bool, ValueType> SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForTrivialBscc(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, storm::storage::StronglyConnectedComponent const& component) {
                 
                 // For deterministic models, we can catch the case where all values are the same. This includes the special case where the BSCC consist only of just one state.
                 bool first = true;
@@ -86,8 +87,9 @@ namespace storm {
                 for (auto const& element : component) {
                     auto state = internal::getComponentElementState(element);
                     STORM_LOG_ASSERT(state == *internal::getComponentElementChoicesBegin(element), "Unexpected choice index at state " << state << " of deterministic model.");
-                    ValueType curr = stateRewardsGetter(state) + (this->isContinuousTime() ? (*this->_exitRates)[state] * actionRewardsGetter(state) : actionRewardsGetter(state));
+                    ValueType curr = stateValueGetter(state) + (this->isContinuousTime() ? (*this->_exitRates)[state] * actionValueGetter(state) : actionValueGetter(state));
                     if (first) {
+                        val = curr;
                         first = false;
                     } else if (val != curr) {
                         return {false, storm::utility::zero<ValueType>()};
@@ -98,8 +100,12 @@ namespace storm {
             }
             
     
+            template <>
+            storm::RationalFunction SparseDeterministicInfiniteHorizonHelper<storm::RationalFunction>::computeLraForBsccVi(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, storm::storage::StronglyConnectedComponent const& bscc) {
+                STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "The requested Method for LRA computation is not supported for parametric models.");
+            }
             template <typename ValueType>
-            ValueType SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForBsccVi(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::StronglyConnectedComponent const& bscc) {
+            ValueType SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForBsccVi(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, storm::storage::StronglyConnectedComponent const& bscc) {
 
                 // Collect parameters of the computation
                 ValueType aperiodicFactor = storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor());
@@ -108,14 +114,243 @@ namespace storm {
                 if (this->isContinuousTime()) {
                     // We assume a CTMC (with deterministic timed states and no instant states)
                     storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::StronglyConnectedComponent, storm::modelchecker::helper::internal::LraViTransitionsType::DetTsNoIs> viHelper(bscc, this->_transitionMatrix, aperiodicFactor, this->_markovianStates, this->_exitRates);
-                    return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter, this->_exitRates);
+                    return viHelper.performValueIteration(env, stateValueGetter, actionValueGetter, this->_exitRates);
                 } else {
                     // We assume a DTMC (with deterministic timed states and no instant states)
                     storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::StronglyConnectedComponent, storm::modelchecker::helper::internal::LraViTransitionsType::DetTsNoIs> viHelper(bscc, this->_transitionMatrix, aperiodicFactor);
-                    return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter);
+                    return viHelper.performValueIteration(env, stateValueGetter, actionValueGetter);
                 }
             }
             
+            template <typename ValueType>
+            std::pair<ValueType, std::vector<ValueType>> SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForBsccGainBias(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& bscc) {
+                // We build the equation system as in Line 3 of Algorithm 3 from
+                // Kretinsky, Meggendorfer: Efficient Strategy Iteration for Mean Payoff in Markov Decision Processes (ATVA 2017)
+                // https://doi.org/10.1007/978-3-319-68167-2_25
+                // The first variable corresponds to the gain of the bscc whereas the subsequent variables yield the bias for each state s_1, s_2, ....
+                // No bias variable for s_0 is needed since it is always set to zero, yielding an nxn equation system matrix
+                // To make this work for CTMC, we could uniformize the model. This preserves LRA and ensures that we can compute the
+                // LRA as for a DTMC (the soujourn time in each state is the same). If we then multiply the equations with the uniformization rate,
+                // the uniformization rate cancels out. Hence, we obtain the equation system below.
+                
+                // Get a mapping from global state indices to local ones.
+                std::unordered_map<uint64_t, uint64_t> toLocalIndexMap;
+                uint64_t localIndex = 0;
+                for (auto const& globalIndex : bscc) {
+                    toLocalIndexMap[globalIndex] = localIndex;
+                    ++localIndex;
+                }
+                
+                // Prepare an environment for the underlying equation solver
+                auto subEnv = env;
+                if (subEnv.solver().getLinearEquationSolverType() == storm::solver::EquationSolverType::Topological) {
+                    // Topological solver does not make any sense since the BSCC is connected.
+                    subEnv.solver().setLinearEquationSolverType(subEnv.solver().topological().getUnderlyingEquationSolverType(), subEnv.solver().topological().isUnderlyingEquationSolverTypeSetFromDefault());
+                }
+                subEnv.solver().setLinearEquationSolverPrecision(env.solver().lra().getPrecision(), env.solver().lra().getRelativeTerminationCriterion());
+                
+                // Build the equation system matrix and vector.
+                storm::solver::GeneralLinearEquationSolverFactory<ValueType> linearEquationSolverFactory;
+                bool isEquationSystemFormat = linearEquationSolverFactory.getEquationProblemFormat(subEnv) == storm::solver::LinearEquationSolverProblemFormat::EquationSystem;
+                storm::storage::SparseMatrixBuilder<ValueType> builder(bscc.size(), bscc.size());
+                std::vector<ValueType> eqSysVector;
+                eqSysVector.reserve(bscc.size());
+                // The first row asserts that the weighted bias variables and the reward at s_0 sum up to the gain
+                uint64_t row = 0;
+                ValueType entryValue;
+                for (auto const& globalState : bscc) {
+                    ValueType rateAtState = this->_exitRates ? (*this->_exitRates)[globalState] : storm::utility::one<ValueType>();
+                    // Coefficient for the gain variable
+                    if (isEquationSystemFormat) {
+                        // '1-0' in row 0 and -(-1) in other rows
+                        builder.addNextValue(row, 0, storm::utility::one<ValueType>());
+                    } else if (row > 0) {
+                        // No coeficient in row 0, othwerise substract the gain
+                        builder.addNextValue(row, 0, -storm::utility::one<ValueType>());
+                    }
+                    // Compute weighted sum over successor state. As this is a BSCC, each successor state will again be in the BSCC.
+                    auto diagonalValue = storm::utility::zero<ValueType>();
+                    if (row > 0) {
+                        if (isEquationSystemFormat) {
+                            diagonalValue = rateAtState;
+                        } else {
+                            diagonalValue = storm::utility::one<ValueType>() - rateAtState;
+                        }
+                    }
+                    bool needDiagonalEntry = !storm::utility::isZero(diagonalValue);
+                    for (auto const& entry : this->_transitionMatrix.getRow(globalState)) {
+                        uint64_t col = toLocalIndexMap[entry.getColumn()];
+                        if (col == 0) {
+                            //Skip transition to state_0. This corresponds to setting the bias of state_0 to zero
+                            continue;
+                        }
+                        entryValue = entry.getValue() * rateAtState;
+                        if (isEquationSystemFormat) {
+                            entryValue = -entryValue;
+                        }
+                        if (needDiagonalEntry && col >= row) {
+                            if (col == row) {
+                                entryValue += diagonalValue;
+                            } else { // col > row
+                                builder.addNextValue(row, row, diagonalValue);
+                            }
+                            needDiagonalEntry = false;
+                        }
+                        builder.addNextValue(row, col, entryValue);
+                    }
+                    if (needDiagonalEntry) {
+                        builder.addNextValue(row, row, diagonalValue);
+                    }
+
+                    eqSysVector.push_back(stateValuesGetter(globalState) + rateAtState * actionValuesGetter(globalState));
+                    ++row;
+                }
+
+                // Create a linear equation solver
+                auto solver = linearEquationSolverFactory.create(subEnv, builder.build());
+                // Check solver requirements.
+                auto requirements = solver->getRequirements(subEnv);
+                STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
+                // Todo: Find bounds on the bias variables. Just inserting the maximal value from the vector probably does not work.
+                
+                std::vector<ValueType> eqSysSol(bscc.size(), storm::utility::zero<ValueType>());
+                // Take the mean of the rewards as an initial guess for the gain
+                //eqSysSol.front() = std::accumulate(eqSysVector.begin(), eqSysVector.end(), storm::utility::zero<ValueType>()) / storm::utility::convertNumber<ValueType, uint64_t>(bscc.size());
+                solver->solveEquations(subEnv, eqSysSol, eqSysVector);
+                
+                ValueType gain = eqSysSol.front();
+                // insert bias value for state 0
+                eqSysSol.front() = storm::utility::zero<ValueType>();
+                // Return the gain and the bias values
+                return std::pair<ValueType, std::vector<ValueType>>(std::move(gain), std::move(eqSysSol));
+            }
+            
+            template <typename ValueType>
+            std::pair<ValueType, std::vector<ValueType>> SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForBsccLraDistr(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& bscc) {
+
+                // Let A be ab auxiliary Matrix with A[s,s] =  R(s,s) - r(s) & A[s,s'] = R(s,s') for s,s' in BSCC and s!=s'.
+                // We build and solve the equation system for
+                // x*A=0 &  x_0+...+x_n=1  <=>  A^t*x=0=x-x & x_0+...+x_n=1  <=> (1+A^t)*x = x & 1-x_0-...-x_n-1=x_n
+                // Then, x[i] will be the fraction of the time we are in state i.
+                
+                // This method assumes that this BSCC consist of more than one state
+                if (bscc.size() == 1) {
+                    ValueType lraValue = stateValuesGetter(*bscc.begin()) + (this->isContinuousTime() ? (*this->_exitRates)[*bscc.begin()] * actionValuesGetter(*bscc.begin()) : actionValuesGetter(*bscc.begin()));
+                    return { lraValue, {storm::utility::one<ValueType>()} };
+                }
+                
+                // Prepare an environment for the underlying linear equation solver
+                auto subEnv = env;
+                if (subEnv.solver().getLinearEquationSolverType() == storm::solver::EquationSolverType::Topological) {
+                    // Topological solver does not make any sense since the BSCC is connected.
+                    subEnv.solver().setLinearEquationSolverType(subEnv.solver().topological().getUnderlyingEquationSolverType(), subEnv.solver().topological().isUnderlyingEquationSolverTypeSetFromDefault());
+                }
+                subEnv.solver().setLinearEquationSolverPrecision(env.solver().lra().getPrecision(), env.solver().lra().getRelativeTerminationCriterion());
+                
+                // Get a mapping from global state indices to local ones as well as a bitvector containing states within the BSCC.
+                std::unordered_map<uint64_t, uint64_t> toLocalIndexMap;
+                storm::storage::BitVector bsccStates(this->_transitionMatrix.getRowCount(), false);
+                uint64_t localIndex = 0;
+                for (auto const& globalIndex : bscc) {
+                    bsccStates.set(globalIndex, true);
+                    toLocalIndexMap[globalIndex] = localIndex;
+                    ++localIndex;
+                }
+                
+                // Build the auxiliary Matrix A.
+                auto auxMatrix = this->_transitionMatrix.getSubmatrix(false, bsccStates, bsccStates, true); // add diagonal entries!
+                uint64_t row = 0;
+                for (auto const& globalIndex : bscc) {
+                    ValueType rateAtState = this->_exitRates ? (*this->_exitRates)[globalIndex] : storm::utility::one<ValueType>();
+                    for (auto& entry : auxMatrix.getRow(row)) {
+                        if (entry.getColumn() == row) {
+                            // This value is non-zero since we have a BSCC with more than one state
+                            entry.setValue(rateAtState * (entry.getValue() - storm::utility::one<ValueType>()));
+                        } else if (this->isContinuousTime()) {
+                            entry.setValue(entry.getValue() * rateAtState);
+                        }
+                    }
+                    ++row;
+                }
+                assert(row == auxMatrix.getRowCount());
+                
+                // We need to consider A^t. This will not delete diagonal entries since they are non-zero.
+                auxMatrix = auxMatrix.transpose();
+                
+                // Check whether we need the fixpoint characterization
+                storm::solver::GeneralLinearEquationSolverFactory<ValueType> linearEquationSolverFactory;
+                bool isFixpointFormat = linearEquationSolverFactory.getEquationProblemFormat(subEnv) == storm::solver::LinearEquationSolverProblemFormat::FixedPointSystem;
+                if (isFixpointFormat) {
+                    // Add a 1 on the diagonal
+                    for (row = 0; row < auxMatrix.getRowCount(); ++row) {
+                        for (auto& entry : auxMatrix.getRow(row)) {
+                            if (entry.getColumn() == row) {
+                                entry.setValue(storm::utility::one<ValueType>() + entry.getValue());
+                            }
+                        }
+                    }
+                }
+                
+                // We now build the equation system matrix.
+                // We can drop the last row of A and add ones in this row instead to assert that the variables sum up to one
+                // Phase 1: replace the existing entries of the last row with ones
+                uint64_t col = 0;
+                uint64_t lastRow = auxMatrix.getRowCount() - 1;
+                for (auto& entry : auxMatrix.getRow(lastRow)) {
+                    entry.setColumn(col);
+                    if (isFixpointFormat) {
+                        if (col == lastRow) {
+                            entry.setValue(storm::utility::zero<ValueType>());
+                        } else {
+                            entry.setValue(-storm::utility::one<ValueType>());
+                        }
+                    } else {
+                        entry.setValue(storm::utility::one<ValueType>());
+                    }
+                    ++col;
+                }
+                storm::storage::SparseMatrixBuilder<ValueType> builder(std::move(auxMatrix));
+                for (; col <= lastRow; ++col) {
+                    if (isFixpointFormat) {
+                        if (col != lastRow) {
+                            builder.addNextValue(lastRow, col, -storm::utility::one<ValueType>());
+                        }
+                    } else {
+                        builder.addNextValue(lastRow, col, storm::utility::one<ValueType>());
+                    }
+                }
+                
+                std::vector<ValueType> bsccEquationSystemRightSide(bscc.size(), storm::utility::zero<ValueType>());
+                bsccEquationSystemRightSide.back() = storm::utility::one<ValueType>();
+                
+                // Create a linear equation solver
+                auto solver = linearEquationSolverFactory.create(subEnv,  builder.build());
+                solver->setBounds(storm::utility::zero<ValueType>(), storm::utility::one<ValueType>());
+                // Check solver requirements.
+                auto requirements = solver->getRequirements(subEnv);
+                requirements.clearLowerBounds();
+                requirements.clearUpperBounds();
+                STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
+                
+                std::vector<ValueType> lraDistr(bscc.size(), storm::utility::one<ValueType>() / storm::utility::convertNumber<ValueType, uint64_t>(bscc.size()));
+                solver->solveEquations(subEnv, lraDistr, bsccEquationSystemRightSide);
+                
+                // Calculate final LRA Value
+                ValueType result = storm::utility::zero<ValueType>();
+                auto solIt = lraDistr.begin();
+                for (auto const& globalState : bscc) {
+                    if (this->isContinuousTime()) {
+                        result += (*solIt) * (stateValuesGetter(globalState) + (*this->_exitRates)[globalState] * actionValuesGetter(globalState));
+                    } else {
+                        result += (*solIt) * (stateValuesGetter(globalState) + actionValuesGetter(globalState));
+                    }
+                    ++solIt;
+                }
+                assert(solIt == lraDistr.end());
+
+                return std::pair<ValueType, std::vector<ValueType>>(std::move(result), std::move(lraDistr));
+            }
+
             template <typename ValueType>
             std::pair<storm::storage::SparseMatrix<ValueType>, std::vector<ValueType>> SparseDeterministicInfiniteHorizonHelper<ValueType>::buildSspMatrixVector(std::vector<ValueType> const& bsccLraValues, std::vector<uint64_t> const& inputStateToBsccIndexMap, storm::storage::BitVector const& statesNotInComponent, bool asEquationSystem) {
                 
@@ -203,6 +438,7 @@ namespace storm {
             
             template class SparseDeterministicInfiniteHorizonHelper<double>;
             template class SparseDeterministicInfiniteHorizonHelper<storm::RationalNumber>;
+            template class SparseDeterministicInfiniteHorizonHelper<storm::RationalFunction>;
             
         }
     }
diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.h b/src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.h
index 5c22cc1af..b5a78fad1 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.h
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.h
@@ -46,10 +46,20 @@ namespace storm {
                 std::pair<bool, ValueType> computeLraForTrivialBscc(Environment const& env, ValueGetter const& stateValuesGetter,  ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& bscc);
                 
                 /*!
-                 * As computeLraForMec but uses value iteration as a solution method (independent of what is set in env)
+                 * As computeLraForComponent but uses value iteration as a solution method (independent of what is set in env)
                  */
                 ValueType computeLraForBsccVi(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& bscc);
                 
+                /*!
+                 * As computeLraForComponent but solves a linear equation system encoding gain and bias (independent of what is set in env)
+                 * @see Kretinsky, Meggendorfer: Efficient Strategy Iteration for Mean Payoff in Markov Decision Processes (ATVA 2017), https://doi.org/10.1007/978-3-319-68167-2_25
+                 */
+                std::pair<ValueType, std::vector<ValueType>> computeLraForBsccGainBias(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& bscc);
+                /*!
+                 * As computeLraForComponent but solves a linear equation system consisting encoding the long run average (steady state) distribution (independent of what is set in env)
+                 */
+                std::pair<ValueType, std::vector<ValueType>> computeLraForBsccLraDistr(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& bscc);
+  
                 std::pair<storm::storage::SparseMatrix<ValueType>, std::vector<ValueType>> buildSspMatrixVector(std::vector<ValueType> const& bsccLraValues, std::vector<uint64_t> const& inputStateToBsccIndexMap, storm::storage::BitVector const& statesNotInComponent, bool asEquationSystem);
                 
                 /*!

From ef2448410b84a2b542cd89d785453437ea765092 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Tue, 11 Aug 2020 16:53:41 +0200
Subject: [PATCH 34/48] Fixed selecting wrong reward kind

---
 .../helper/infinitehorizon/SparseInfiniteHorizonHelper.cpp  | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.cpp
index 8eab0bca0..3ff9611da 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.cpp
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.cpp
@@ -78,7 +78,7 @@ namespace storm {
                         actionRewardsGetter = [&] (uint64_t globalChoiceIndex) { return rewardModel.getStateActionReward(globalChoiceIndex); };
                     }
                 } else {
-                    stateRewardsGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
+                    actionRewardsGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
                 }
                 
                 return computeLongRunAverageValues(env, stateRewardsGetter, actionRewardsGetter);
@@ -109,7 +109,7 @@ namespace storm {
                 // We will compute the long run average value for each MEC individually and then set-up an Equation system to compute the value also at non-mec states.
                 // For a description of this approach see, e.g., Guck et al.: Modelling and Analysis of Markov Reward Automata (ATVA'14), https://doi.org/10.1007/978-3-319-11936-6_13
                 
-                 // Prepare an environment for the underlying solvers
+                 // Prepare an environment for the underlying solvers.
                 auto underlyingSolverEnvironment = env;
                 if (env.solver().isForceSoundness()) {
                     // For sound computations, the error in the MECS plus the error in the remaining system should not exceed the user defined precsion.
@@ -151,10 +151,10 @@ namespace storm {
             
             template class SparseInfiniteHorizonHelper<double, true>;
             template class SparseInfiniteHorizonHelper<storm::RationalNumber, true>;
-            template class SparseInfiniteHorizonHelper<storm::RationalFunction, true>;
             
             template class SparseInfiniteHorizonHelper<double, false>;
             template class SparseInfiniteHorizonHelper<storm::RationalNumber, false>;
+            template class SparseInfiniteHorizonHelper<storm::RationalFunction, false>;
             
         }
     }

From d92905a7c37f3a5b0b66d936cef2a4a0cd553f7c Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Tue, 11 Aug 2020 16:54:04 +0200
Subject: [PATCH 35/48] LraVi: Fixed uninitialized bool member.

---
 .../helper/infinitehorizon/internal/LraViHelper.cpp            | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
index 3f6825a15..c92618735 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
+++ b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
@@ -25,7 +25,7 @@ namespace storm {
             namespace internal {
                 
                 template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
-                LraViHelper<ValueType, ComponentType, TransitionsType>::LraViHelper(ComponentType const& component, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, ValueType const& aperiodicFactor, storm::storage::BitVector const* timedStates, std::vector<ValueType> const* exitRates) : _component(component), _transitionMatrix(transitionMatrix), _timedStates(timedStates), _hasInstantStates(TransitionsType == LraViTransitionsType::DetTsNondetIs || TransitionsType == LraViTransitionsType::DetTsDetIs) {
+                LraViHelper<ValueType, ComponentType, TransitionsType>::LraViHelper(ComponentType const& component, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, ValueType const& aperiodicFactor, storm::storage::BitVector const* timedStates, std::vector<ValueType> const* exitRates) : _component(component), _transitionMatrix(transitionMatrix), _timedStates(timedStates), _hasInstantStates(TransitionsType == LraViTransitionsType::DetTsNondetIs || TransitionsType == LraViTransitionsType::DetTsDetIs), _Tsx1IsCurrent(false) {
                     // Run through the component and collect some data:
                     // We create two submodels, one consisting of the timed states of the component and one consisting of the instant states of the component.
                     // For this, we create a state index map that point from state indices of the input model to indices of the corresponding submodel of that state.
@@ -461,6 +461,7 @@ namespace storm {
                 
                 template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
                 bool LraViHelper<ValueType, ComponentType, TransitionsType>::isTimedState(uint64_t const& inputModelStateIndex) const {
+                    STORM_LOG_ASSERT(!_hasInstantStates || _timedStates != nullptr, "Model has instant states but no partition into timed and instant states is given.");
                     STORM_LOG_ASSERT(!_hasInstantStates || inputModelStateIndex < _timedStates->size(), "Unable to determine whether state " << inputModelStateIndex << " is timed.");
                     return !_hasInstantStates || _timedStates->get(inputModelStateIndex);
                 }

From 462ce5dce37fe0273e1420cd5f4c2b24ffb49e60 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Tue, 11 Aug 2020 16:54:43 +0200
Subject: [PATCH 36/48] Ctmc: added a method that returns the probabilistic
 transition matrix.

---
 src/storm/models/sparse/Ctmc.cpp | 12 ++++++++++++
 src/storm/models/sparse/Ctmc.h   |  6 ++++++
 2 files changed, 18 insertions(+)

diff --git a/src/storm/models/sparse/Ctmc.cpp b/src/storm/models/sparse/Ctmc.cpp
index 9b36ded21..584b85847 100644
--- a/src/storm/models/sparse/Ctmc.cpp
+++ b/src/storm/models/sparse/Ctmc.cpp
@@ -79,6 +79,18 @@ namespace storm {
                 }
             }
             
+            template<typename ValueType, typename RewardModelType>
+            storm::storage::SparseMatrix<ValueType> Ctmc<ValueType, RewardModelType>::computeProbabilityMatrix() const {
+                // Turn the rates into probabilities by scaling each row with the exit rate of the state.
+                storm::storage::SparseMatrix<ValueType> result(this->getTransitionMatrix());
+                for (uint_fast64_t row = 0; row < result.getRowCount(); ++row) {
+                    for (auto& entry : result.getRow(row)) {
+                        entry.setValue(entry.getValue() / exitRates[row]);
+                    }
+                }
+                return result;
+            }
+            
             template class Ctmc<double>;
 
 #ifdef STORM_HAVE_CARL
diff --git a/src/storm/models/sparse/Ctmc.h b/src/storm/models/sparse/Ctmc.h
index ed5a46329..58859d5dd 100644
--- a/src/storm/models/sparse/Ctmc.h
+++ b/src/storm/models/sparse/Ctmc.h
@@ -64,6 +64,12 @@ namespace storm {
 
                 virtual void reduceToStateBasedRewards() override;
                 
+                /*!
+                 * @return the probabilistic transition matrix P
+                 * @note getTransitionMatrix() retrieves the exit rate matrix R, where R(s,s') = r(s) * P(s,s')
+                 */
+                storm::storage::SparseMatrix<ValueType> computeProbabilityMatrix() const;
+                
             private:
                 /*!
                  * Computes the exit rate vector based on the given rate matrix.

From 959e035153f1543c44cfa174fb4cc206fbbbcedf Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Tue, 11 Aug 2020 16:56:01 +0200
Subject: [PATCH 37/48] Use the new infinite horizon helper for sparse ctmc and
 dtmc.

---
 .../csl/SparseCtmcCslModelChecker.cpp         | 17 +++++++++++----
 .../utility/SetInformationFromCheckTask.h     | 15 +++++++++++++
 .../prctl/SparseDtmcPrctlModelChecker.cpp     | 21 +++++++++++++------
 3 files changed, 43 insertions(+), 10 deletions(-)

diff --git a/src/storm/modelchecker/csl/SparseCtmcCslModelChecker.cpp b/src/storm/modelchecker/csl/SparseCtmcCslModelChecker.cpp
index 10f1600bc..11530673b 100644
--- a/src/storm/modelchecker/csl/SparseCtmcCslModelChecker.cpp
+++ b/src/storm/modelchecker/csl/SparseCtmcCslModelChecker.cpp
@@ -2,6 +2,8 @@
 
 #include "storm/modelchecker/csl/helper/SparseCtmcCslHelper.h"
 #include "storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.h"
+#include "storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.h"
+#include "storm/modelchecker/helper/utility/SetInformationFromCheckTask.h"
 
 #include "storm/models/sparse/StandardRewardModel.h"
 
@@ -127,15 +129,22 @@ namespace storm {
             std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
             ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult();
             
-            std::vector<ValueType> numericResult = storm::modelchecker::helper::SparseCtmcCslHelper::computeLongRunAverageProbabilities(env, storm::solver::SolveGoal<ValueType>(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), subResult.getTruthValuesVector(), &this->getModel().getExitRateVector());
-            return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(std::move(numericResult)));
+            auto probabilisticTransitions = this->getModel().computeProbabilityMatrix();
+            storm::modelchecker::helper::SparseDeterministicInfiniteHorizonHelper<ValueType> helper(probabilisticTransitions, this->getModel().getExitRateVector());
+            storm::modelchecker::helper::setInformationFromCheckTaskDeterministic(helper, checkTask, this->getModel());
+			auto values = helper.computeLongRunAverageProbabilities(env, subResult.getTruthValuesVector());
+			
+            return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(std::move(values)));
         }
 
         template <typename SparseCtmcModelType>
         std::unique_ptr<CheckResult> SparseCtmcCslModelChecker<SparseCtmcModelType>::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask<storm::logic::LongRunAverageRewardFormula, ValueType> const& checkTask) {
             auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
-            std::vector<ValueType> numericResult = storm::modelchecker::helper::SparseCtmcCslHelper::computeLongRunAverageRewards(env, storm::solver::SolveGoal<ValueType>(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), rewardModel.get(), &this->getModel().getExitRateVector());
-            return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(std::move(numericResult)));
+            auto probabilisticTransitions = this->getModel().computeProbabilityMatrix();
+            storm::modelchecker::helper::SparseDeterministicInfiniteHorizonHelper<ValueType> helper(probabilisticTransitions, this->getModel().getExitRateVector());
+            storm::modelchecker::helper::setInformationFromCheckTaskDeterministic(helper, checkTask, this->getModel());
+			auto values = helper.computeLongRunAverageRewards(env, rewardModel.get());
+            return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(std::move(values)));
         }
         
         template <typename SparseCtmcModelType>
diff --git a/src/storm/modelchecker/helper/utility/SetInformationFromCheckTask.h b/src/storm/modelchecker/helper/utility/SetInformationFromCheckTask.h
index e1348cd03..49b46db26 100644
--- a/src/storm/modelchecker/helper/utility/SetInformationFromCheckTask.h
+++ b/src/storm/modelchecker/helper/utility/SetInformationFromCheckTask.h
@@ -26,6 +26,21 @@ namespace storm {
                 // Scheduler Production
 			    helper.setProduceScheduler(checkTask.isProduceSchedulersSet());
             }
+            
+            /*!
+             * Forwards relevant information stored in the given CheckTask to the given helper
+             */
+            template <typename HelperType, typename FormulaType, typename ModelType>
+            void setInformationFromCheckTaskDeterministic(HelperType& helper, storm::modelchecker::CheckTask<FormulaType, typename ModelType::ValueType> const& checkTask, ModelType const& model) {
+                // Relevancy of initial states.
+			    if (checkTask.isOnlyInitialStatesRelevantSet()) {
+			        helper.setRelevantStates(model.getInitialStates());
+			    }
+                // Value threshold to which the result will be compared
+			    if (checkTask.isBoundSet()) {
+			        helper.setValueThreshold(checkTask.getBoundComparisonType(), checkTask.getBoundThreshold());
+			    }
+            }
         }
     }
 }
\ No newline at end of file
diff --git a/src/storm/modelchecker/prctl/SparseDtmcPrctlModelChecker.cpp b/src/storm/modelchecker/prctl/SparseDtmcPrctlModelChecker.cpp
index 541c51502..ad0f95ef6 100644
--- a/src/storm/modelchecker/prctl/SparseDtmcPrctlModelChecker.cpp
+++ b/src/storm/modelchecker/prctl/SparseDtmcPrctlModelChecker.cpp
@@ -13,6 +13,8 @@
 #include "storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.h"
 #include "storm/modelchecker/csl/helper/SparseCtmcCslHelper.h"
 #include "storm/modelchecker/prctl/helper/rewardbounded/QuantileHelper.h"
+#include "storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.h"
+#include "storm/modelchecker/helper/utility/SetInformationFromCheckTask.h"
 
 #include "storm/logic/FragmentSpecification.h"
 
@@ -166,18 +168,25 @@ namespace storm {
 
         template<typename SparseDtmcModelType>
         std::unique_ptr<CheckResult> SparseDtmcPrctlModelChecker<SparseDtmcModelType>::computeLongRunAverageProbabilities(Environment const& env, CheckTask<storm::logic::StateFormula, ValueType> const& checkTask) {
+            
             storm::logic::StateFormula const& stateFormula = checkTask.getFormula();
-            std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
-            ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult();
-            std::vector<ValueType> numericResult = storm::modelchecker::helper::SparseCtmcCslHelper::computeLongRunAverageProbabilities<ValueType>(env, storm::solver::SolveGoal<ValueType>(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), subResult.getTruthValuesVector(), nullptr);
-            return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(std::move(numericResult)));
+			std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
+			ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult();
+			
+			storm::modelchecker::helper::SparseDeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix());
+            storm::modelchecker::helper::setInformationFromCheckTaskDeterministic(helper, checkTask, this->getModel());
+			auto values = helper.computeLongRunAverageProbabilities(env, subResult.getTruthValuesVector());
+			
+            return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(std::move(values)));
         }
         
         template<typename SparseDtmcModelType>
         std::unique_ptr<CheckResult> SparseDtmcPrctlModelChecker<SparseDtmcModelType>::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask<storm::logic::LongRunAverageRewardFormula, ValueType> const& checkTask) {
             auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
-            std::vector<ValueType> numericResult = storm::modelchecker::helper::SparseCtmcCslHelper::computeLongRunAverageRewards<ValueType>(env, storm::solver::SolveGoal<ValueType>(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), rewardModel.get(), nullptr);
-            return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(std::move(numericResult)));
+            storm::modelchecker::helper::SparseDeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix());
+            storm::modelchecker::helper::setInformationFromCheckTaskDeterministic(helper, checkTask, this->getModel());
+			auto values = helper.computeLongRunAverageRewards(env, rewardModel.get());
+            return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(std::move(values)));
         }
         
         template<typename SparseDtmcModelType>

From 19f6552b05ad5b7a7363ee92cb1e18d0ddf46eee Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Tue, 11 Aug 2020 16:56:48 +0200
Subject: [PATCH 38/48] Fixed insufficient precision in CTMC LRA test

---
 .../storm/modelchecker/csl/LraCtmcCslModelCheckerTest.cpp  | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/src/test/storm/modelchecker/csl/LraCtmcCslModelCheckerTest.cpp b/src/test/storm/modelchecker/csl/LraCtmcCslModelCheckerTest.cpp
index b20bf7f6b..75e41cdf2 100755
--- a/src/test/storm/modelchecker/csl/LraCtmcCslModelCheckerTest.cpp
+++ b/src/test/storm/modelchecker/csl/LraCtmcCslModelCheckerTest.cpp
@@ -43,6 +43,7 @@ namespace {
             env.solver().setLinearEquationSolverType(storm::solver::EquationSolverType::Gmmxx);
             env.solver().gmmxx().setMethod(storm::solver::GmmxxLinearEquationSolverMethod::Gmres);
             env.solver().gmmxx().setPreconditioner(storm::solver::GmmxxLinearEquationSolverPreconditioner::Ilu);
+            env.solver().gmmxx().setPrecision(storm::utility::convertNumber<storm::RationalNumber>(1e-8)); // Need to increase precision because eq sys yields incorrect results
             env.solver().lra().setDetLraMethod(storm::solver::LraMethod::GainBiasEquations);
             env.solver().lra().setPrecision(storm::utility::convertNumber<storm::RationalNumber>(1e-8)); // Need to increase precision because eq sys yields incorrect results
             return env;
@@ -61,6 +62,7 @@ namespace {
             env.solver().setLinearEquationSolverType(storm::solver::EquationSolverType::Gmmxx);
             env.solver().gmmxx().setMethod(storm::solver::GmmxxLinearEquationSolverMethod::Gmres);
             env.solver().gmmxx().setPreconditioner(storm::solver::GmmxxLinearEquationSolverPreconditioner::Ilu);
+            env.solver().gmmxx().setPrecision(storm::utility::convertNumber<storm::RationalNumber>(1e-8)); // Need to increase precision because eq sys yields incorrect results
             env.solver().lra().setDetLraMethod(storm::solver::LraMethod::GainBiasEquations);
             env.solver().lra().setPrecision(storm::utility::convertNumber<storm::RationalNumber>(1e-8)); // Need to increase precision because eq sys yields incorrect results
             return env;
@@ -78,6 +80,8 @@ namespace {
             storm::Environment env;
             env.solver().setLinearEquationSolverType(storm::solver::EquationSolverType::Gmmxx);
             env.solver().gmmxx().setMethod(storm::solver::GmmxxLinearEquationSolverMethod::Gmres);
+            env.solver().gmmxx().setPreconditioner(storm::solver::GmmxxLinearEquationSolverPreconditioner::Ilu);
+            env.solver().gmmxx().setPrecision(storm::utility::convertNumber<storm::RationalNumber>(1e-8)); // Need to increase precision because eq sys yields incorrect results
             env.solver().lra().setDetLraMethod(storm::solver::LraMethod::GainBiasEquations);
             env.solver().lra().setPrecision(storm::utility::convertNumber<storm::RationalNumber>(1e-8)); // Need to increase precision because eq sys yields incorrect results
             return env;
@@ -95,6 +99,8 @@ namespace {
             storm::Environment env;
             env.solver().setLinearEquationSolverType(storm::solver::EquationSolverType::Gmmxx);
             env.solver().gmmxx().setMethod(storm::solver::GmmxxLinearEquationSolverMethod::Gmres);
+            env.solver().gmmxx().setPreconditioner(storm::solver::GmmxxLinearEquationSolverPreconditioner::Ilu);
+            env.solver().gmmxx().setPrecision(storm::utility::convertNumber<storm::RationalNumber>(1e-8)); // Need to increase precision because eq sys yields incorrect results
             env.solver().lra().setDetLraMethod(storm::solver::LraMethod::GainBiasEquations);
             env.solver().lra().setPrecision(storm::utility::convertNumber<storm::RationalNumber>(1e-8)); // Need to increase precision because eq sys yields incorrect results
             return env;
@@ -164,6 +170,7 @@ namespace {
             env.solver().setLinearEquationSolverType(storm::solver::EquationSolverType::Gmmxx);
             env.solver().gmmxx().setMethod(storm::solver::GmmxxLinearEquationSolverMethod::Gmres);
             env.solver().gmmxx().setPreconditioner(storm::solver::GmmxxLinearEquationSolverPreconditioner::Ilu);
+            env.solver().gmmxx().setPrecision(storm::utility::convertNumber<storm::RationalNumber>(1e-8)); // Need to increase precision because eq sys yields incorrect results
             env.solver().lra().setDetLraMethod(storm::solver::LraMethod::LraDistributionEquations);
             env.solver().lra().setPrecision(storm::utility::convertNumber<storm::RationalNumber>(1e-8)); // Need to increase precision because eq sys yields incorrect results
             return env;

From 9c82fe7b0d1b403d16ee27e71ce72f51a139f69d Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Wed, 12 Aug 2020 10:30:45 +0200
Subject: [PATCH 39/48] Making hybrid infinite horizon helper ready for
 deterministic models.

---
 .../HybridInfiniteHorizonHelper.cpp           | 143 ++++++++++++++++++
 .../HybridInfiniteHorizonHelper.h             |  87 +++++++++++
 ...dNondeterministicInfiniteHorizonHelper.cpp | 101 -------------
 ...ridNondeterministicInfiniteHorizonHelper.h |  64 --------
 4 files changed, 230 insertions(+), 165 deletions(-)
 create mode 100644 src/storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.cpp
 create mode 100644 src/storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.h
 delete mode 100644 src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.cpp
 delete mode 100644 src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.h

diff --git a/src/storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.cpp
new file mode 100644
index 000000000..36231a419
--- /dev/null
+++ b/src/storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.cpp
@@ -0,0 +1,143 @@
+#include "HybridInfiniteHorizonHelper.h"
+
+#include "storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h"
+#include "storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.h"
+#include "storm/modelchecker/helper/utility/SetInformationFromOtherHelper.h"
+
+#include "storm/models/symbolic/NondeterministicModel.h"
+
+#include "storm/storage/SparseMatrix.h"
+
+#include "storm/utility/macros.h"
+
+#include "storm/exceptions/NotSupportedException.h"
+
+namespace storm {
+    namespace modelchecker {
+        namespace helper {
+            
+            template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic>
+            HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::HybridInfiniteHorizonHelper(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix) : _model(model), _transitionMatrix(transitionMatrix), _markovianStates(nullptr), _exitRates(nullptr) {
+                // Intentionally left empty.
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic>
+            HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::HybridInfiniteHorizonHelper(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& markovianStates, storm::dd::Add<DdType, ValueType> const& exitRateVector) : _model(model), _transitionMatrix(transitionMatrix), _markovianStates(&markovianStates), _exitRates(&exitRateVector) {
+                // Intentionally left empty.
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic>
+            HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::HybridInfiniteHorizonHelper(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector) : _model(model), _transitionMatrix(transitionMatrix), _markovianStates(nullptr), _exitRates(&exitRateVector) {
+                // Intentionally left empty.
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic>
+            std::unique_ptr<HybridQuantitativeCheckResult<DdType, ValueType>> HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::computeLongRunAverageProbabilities(Environment const& env, storm::dd::Bdd<DdType> const& psiStates) {
+                // Convert this query to an instance for the sparse engine.
+                // Create ODD for the translation.
+                storm::dd::Odd odd = _model.getReachableStates().createOdd();
+                // Translate all required components
+                storm::storage::SparseMatrix<ValueType> explicitTransitionMatrix;
+                if (_model.isNondeterministicModel()) {
+                    explicitTransitionMatrix = _transitionMatrix.toMatrix(dynamic_cast<storm::models::symbolic::NondeterministicModel<DdType, ValueType> const&>(_model).getNondeterminismVariables(), odd, odd);
+                } else {
+                    explicitTransitionMatrix = _transitionMatrix.toMatrix(odd, odd);
+                }
+                std::vector<ValueType> explicitExitRateVector;
+                storm::storage::BitVector explicitMarkovianStates;
+                if (isContinuousTime()) {
+                    explicitExitRateVector = _exitRates->toVector(odd);
+                    if (_markovianStates) {
+                        explicitMarkovianStates = _markovianStates->toVector(odd);
+                    }
+                }
+                auto sparseHelper = createSparseHelper(explicitTransitionMatrix, explicitMarkovianStates, explicitExitRateVector, odd);
+                auto explicitResult = sparseHelper->computeLongRunAverageProbabilities(env, psiStates.toVector(odd));
+                return std::make_unique<HybridQuantitativeCheckResult<DdType, ValueType>>(_model.getReachableStates(), _model.getManager().getBddZero(), _model.getManager().template getAddZero<ValueType>(), _model.getReachableStates(), std::move(odd), std::move(explicitResult));
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic>
+            std::unique_ptr<HybridQuantitativeCheckResult<DdType, ValueType>> HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::StandardRewardModel<DdType, ValueType> const& rewardModel) {
+                // Convert this query to an instance for the sparse engine.
+                // Create ODD for the translation.
+                storm::dd::Odd odd = _model.getReachableStates().createOdd();
+                
+                // Translate all required components
+                // Transitions and rewards
+                storm::storage::SparseMatrix<ValueType> explicitTransitionMatrix;
+                std::vector<ValueType> explicitStateRewards, explicitActionRewards;
+                if (rewardModel.hasStateRewards()) {
+                    explicitStateRewards = rewardModel.getStateRewardVector().toVector(odd);
+                }
+                if (_model.isNondeterministicModel() && rewardModel.hasStateActionRewards()) {
+                    // Matrix and action-based vector have to be produced at the same time to guarantee the correct order
+                    auto matrixRewards = _transitionMatrix.toMatrixVector(rewardModel.getStateActionRewardVector(), dynamic_cast<storm::models::symbolic::NondeterministicModel<DdType, ValueType> const&>(_model).getNondeterminismVariables(), odd, odd);
+                    explicitTransitionMatrix = std::move(matrixRewards.first);
+                    explicitActionRewards = std::move(matrixRewards.second);
+                } else {
+                    // Translate matrix only
+                    explicitTransitionMatrix = _transitionMatrix.toMatrix(dynamic_cast<storm::models::symbolic::NondeterministicModel<DdType, ValueType> const&>(_model).getNondeterminismVariables(), odd, odd);
+                    if (rewardModel.hasStateActionRewards()) {
+                        // For deterministic models we can translate the action rewards easily
+                        explicitActionRewards = rewardModel.getStateActionRewardVector().toVector(odd);
+                    }
+                }
+                STORM_LOG_THROW(!rewardModel.hasTransitionRewards(), storm::exceptions::NotSupportedException, "Transition rewards are not supported in this engine.");
+                // Continuous time information
+                std::vector<ValueType> explicitExitRateVector;
+                storm::storage::BitVector explicitMarkovianStates;
+                if (isContinuousTime()) {
+                    explicitExitRateVector = _exitRates->toVector(odd);
+                    if (_markovianStates) {
+                        explicitMarkovianStates = _markovianStates->toVector(odd);
+                    }
+                }
+                auto sparseHelper = createSparseHelper(explicitTransitionMatrix, explicitMarkovianStates, explicitExitRateVector, odd);
+                auto explicitResult = sparseHelper->computeLongRunAverageValues(env, rewardModel.hasStateRewards() ? &explicitStateRewards : nullptr, rewardModel.hasStateActionRewards() ? &explicitActionRewards : nullptr);
+                return std::make_unique<HybridQuantitativeCheckResult<DdType, ValueType>>(_model.getReachableStates(), _model.getManager().getBddZero(), _model.getManager().template getAddZero<ValueType>(), _model.getReachableStates(), std::move(odd), std::move(explicitResult));
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic>
+            bool HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::isContinuousTime() const {
+                STORM_LOG_ASSERT((_markovianStates == nullptr) || (_exitRates != nullptr), "Inconsistent information given: Have Markovian states but no exit rates." );
+                return _exitRates != nullptr;
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic>
+            template <bool N, std::enable_if_t<N, int>>
+            std::unique_ptr<SparseInfiniteHorizonHelper<ValueType, Nondeterministic>> HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::createSparseHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates, storm::dd::Odd const& odd) const {
+                std::unique_ptr<SparseInfiniteHorizonHelper<ValueType, Nondeterministic>> result;
+                if (isContinuousTime()) {
+                    result = std::make_unique<storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType>>(transitionMatrix, markovianStates, exitRates);
+                } else {
+                    result = std::make_unique<storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType>>(transitionMatrix);
+                }
+                storm::modelchecker::helper::setInformationFromOtherHelperNondeterministic(*result, *this, [&odd](storm::dd::Bdd<DdType> const& s){ return s.toVector(odd); });
+                STORM_LOG_WARN_COND(!this->isProduceSchedulerSet(), "Scheduler extraction not supported in Hybrid engine.");
+                return result;
+            }
+            
+            template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic>
+            template <bool N, std::enable_if_t<!N, int>>
+            std::unique_ptr<SparseInfiniteHorizonHelper<ValueType, Nondeterministic>> HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::createSparseHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates, storm::dd::Odd const& odd) const {
+                std::unique_ptr<SparseInfiniteHorizonHelper<ValueType, Nondeterministic>> result;
+                if (isContinuousTime()) {
+                    result = std::make_unique<storm::modelchecker::helper::SparseDeterministicInfiniteHorizonHelper<ValueType>>(transitionMatrix, exitRates);
+                } else {
+                    result = std::make_unique<storm::modelchecker::helper::SparseDeterministicInfiniteHorizonHelper<ValueType>>(transitionMatrix);
+                }
+                storm::modelchecker::helper::setInformationFromOtherHelperDeterministic(*result, *this, [&odd](storm::dd::Bdd<DdType> const& s){ return s.toVector(odd); });
+                return result;
+            }
+            
+            template class HybridInfiniteHorizonHelper<double, storm::dd::DdType::CUDD, false>;
+            template class HybridInfiniteHorizonHelper<double, storm::dd::DdType::CUDD, true>;
+            template class HybridInfiniteHorizonHelper<double, storm::dd::DdType::Sylvan, false>;
+            template class HybridInfiniteHorizonHelper<double, storm::dd::DdType::Sylvan, true>;
+            template class HybridInfiniteHorizonHelper<storm::RationalNumber, storm::dd::DdType::Sylvan, false>;
+            template class HybridInfiniteHorizonHelper<storm::RationalNumber, storm::dd::DdType::Sylvan, true>;
+            template class HybridInfiniteHorizonHelper<storm::RationalFunction, storm::dd::DdType::Sylvan, false>;
+            
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.h b/src/storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.h
new file mode 100644
index 000000000..09035c6f3
--- /dev/null
+++ b/src/storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.h
@@ -0,0 +1,87 @@
+#pragma once
+#include "storm/modelchecker/helper/SingleValueModelCheckerHelper.h"
+
+#include "storm/modelchecker/results/HybridQuantitativeCheckResult.h"
+
+#include "storm/models/symbolic/Model.h"
+#include "storm/models/symbolic/StandardRewardModel.h"
+
+#include "storm/storage/dd/DdManager.h"
+#include "storm/storage/dd/Add.h"
+#include "storm/storage/dd/Bdd.h"
+
+namespace storm {
+    class Environment;
+    
+    namespace storage {
+        template <typename ValueType> class SparseMatrix;
+        class BitVector;
+    }
+    
+    namespace modelchecker {
+        namespace helper {
+        
+            template <typename ValueType, bool Nondeterministic> class SparseInfiniteHorizonHelper;
+            
+            /*!
+             * Helper class for model checking queries that depend on the long run behavior of the (nondeterministic) system.
+             */
+            template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic>
+            class HybridInfiniteHorizonHelper : public SingleValueModelCheckerHelper<ValueType, DdType> {
+
+            public:
+                /*!
+                 * Initializes the helper for a discrete time model (MDP or DTMC)
+                 */
+                HybridInfiniteHorizonHelper(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix);
+                
+                /*!
+                 * Initializes the helper for a Markov Automaton
+                 */
+                HybridInfiniteHorizonHelper(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& markovianStates, storm::dd::Add<DdType, ValueType> const& _exitRates);
+                
+                /*!
+                 * Initializes the helper for a CTMC
+                 * @note The transition matrix must be probabilistic
+                 */
+                HybridInfiniteHorizonHelper(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Add<DdType, ValueType> const& _exitRates);
+                
+                /*!
+                 * Computes the long run average probabilities, i.e., the fraction of the time we are in a psiState
+                 * @return a value for each state
+                 */
+                std::unique_ptr<HybridQuantitativeCheckResult<DdType, ValueType>> computeLongRunAverageProbabilities(Environment const& env, storm::dd::Bdd<DdType> const& psiStates);
+                
+                /*!
+                 * Computes the long run average rewards, i.e., the average reward collected per time unit
+                 * @return a value for each state
+                 */
+                std::unique_ptr<HybridQuantitativeCheckResult<DdType, ValueType>> computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::StandardRewardModel<DdType, ValueType> const& rewardModel);
+                
+            protected:
+                
+                /*!
+                 * @return true iff this is a computation on a continuous time model (i.e. MA)
+                 */
+                bool isContinuousTime() const;
+                
+                /*!
+                 * @return a sparse infinite horizon helper with the provided explicit model information.
+                 * @param exitRates exit rates (ignored for discrete time models)
+                 * @param markovianStates Markovian states or (ignored for non-MA)
+                 */
+                template <bool N = Nondeterministic, std::enable_if_t<N, int> = 0>
+                std::unique_ptr<SparseInfiniteHorizonHelper<ValueType, Nondeterministic>> createSparseHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates, storm::dd::Odd const& odd) const;
+                template <bool N = Nondeterministic, std::enable_if_t<!N, int> = 0>
+                std::unique_ptr<SparseInfiniteHorizonHelper<ValueType, Nondeterministic>> createSparseHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates, storm::dd::Odd const& odd) const;
+                
+
+            private:
+                storm::models::symbolic::Model<DdType, ValueType> const& _model;
+                storm::dd::Add<DdType, ValueType> const& _transitionMatrix;
+                storm::dd::Bdd<DdType> const* _markovianStates;
+                storm::dd::Add<DdType, ValueType> const* _exitRates;
+            };
+        }
+    }
+}
diff --git a/src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.cpp
deleted file mode 100644
index fa28e486a..000000000
--- a/src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.cpp
+++ /dev/null
@@ -1,101 +0,0 @@
-#include "HybridNondeterministicInfiniteHorizonHelper.h"
-
-#include "storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h"
-#include "storm/modelchecker/helper/utility/SetInformationFromOtherHelper.h"
-
-#include "storm/storage/SparseMatrix.h"
-
-#include "storm/utility/macros.h"
-
-#include "storm/exceptions/NotSupportedException.h"
-
-namespace storm {
-    namespace modelchecker {
-        namespace helper {
-            
-            template <typename ValueType, storm::dd::DdType DdType>
-            HybridNondeterministicInfiniteHorizonHelper<ValueType, DdType>::HybridNondeterministicInfiniteHorizonHelper(storm::models::symbolic::NondeterministicModel<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix) : _model(model), _transitionMatrix(transitionMatrix), _markovianStates(nullptr), _exitRates(nullptr) {
-                // Intentionally left empty.
-            }
-            
-            template <typename ValueType, storm::dd::DdType DdType>
-            HybridNondeterministicInfiniteHorizonHelper<ValueType, DdType>::HybridNondeterministicInfiniteHorizonHelper(storm::models::symbolic::NondeterministicModel<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& markovianStates, storm::dd::Add<DdType, ValueType> const& exitRateVector) : _model(model), _transitionMatrix(transitionMatrix), _markovianStates(&markovianStates), _exitRates(&exitRateVector) {
-                // Intentionally left empty.
-            }
-            
-            template <typename ValueType, storm::dd::DdType DdType>
-            std::unique_ptr<HybridQuantitativeCheckResult<DdType, ValueType>> HybridNondeterministicInfiniteHorizonHelper<ValueType, DdType>::computeLongRunAverageProbabilities(Environment const& env, storm::dd::Bdd<DdType> const& psiStates) {
-                // Convert this query to an instance for the sparse engine.
-                // Create ODD for the translation.
-                storm::dd::Odd odd = _model.getReachableStates().createOdd();
-                storm::storage::SparseMatrix<ValueType> explicitTransitionMatrix = _transitionMatrix.toMatrix(_model.getNondeterminismVariables(), odd, odd);
-                std::unique_ptr<storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType>> sparseHelper;
-                std::vector<ValueType> explicitExitRateVector;
-                storm::storage::BitVector explicitMarkovianStates;
-                if (isContinuousTime()) {
-                    explicitExitRateVector = _exitRates->toVector(odd);
-                    explicitMarkovianStates = _markovianStates->toVector(odd);
-                    sparseHelper = std::make_unique<storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType>>(explicitTransitionMatrix, explicitMarkovianStates, explicitExitRateVector);
-                } else {
-                    sparseHelper = std::make_unique<storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType>>(explicitTransitionMatrix);
-                }
-                storm::modelchecker::helper::setInformationFromOtherHelperNondeterministic(*sparseHelper, *this, [&odd](storm::dd::Bdd<DdType> const& s){ return s.toVector(odd); });
-                STORM_LOG_WARN_COND(!this->isProduceSchedulerSet(), "Scheduler extraction not supported in Hybrid engine.");
-                auto explicitResult = sparseHelper->computeLongRunAverageProbabilities(env, psiStates.toVector(odd));
-                return std::make_unique<HybridQuantitativeCheckResult<DdType, ValueType>>(_model.getReachableStates(), _model.getManager().getBddZero(), _model.getManager().template getAddZero<ValueType>(), _model.getReachableStates(), std::move(odd), std::move(explicitResult));
-            }
-            
-            template <typename ValueType, storm::dd::DdType DdType>
-            std::unique_ptr<HybridQuantitativeCheckResult<DdType, ValueType>> HybridNondeterministicInfiniteHorizonHelper<ValueType, DdType>::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::StandardRewardModel<DdType, ValueType> const& rewardModel) {
-                // Convert this query to an instance for the sparse engine.
-                // Create ODD for the translation.
-                storm::dd::Odd odd = _model.getReachableStates().createOdd();
-                
-                // Create matrix and reward vectors
-                storm::storage::SparseMatrix<ValueType> explicitTransitionMatrix;
-                std::vector<ValueType> explicitStateRewards, explicitActionRewards;
-                if (rewardModel.hasStateRewards()) {
-                    explicitStateRewards = rewardModel.getStateRewardVector().toVector(odd);
-                }
-                if (rewardModel.hasStateActionRewards()) {
-                    // Matrix and action-based vector have to be produced at the same time to guarantee the correct order
-                    auto matrixRewards = _transitionMatrix.toMatrixVector(rewardModel.getStateActionRewardVector(), _model.getNondeterminismVariables(), odd, odd);
-                    explicitTransitionMatrix = std::move(matrixRewards.first);
-                    explicitActionRewards = std::move(matrixRewards.second);
-                } else {
-                    // Translate matrix only
-                    explicitTransitionMatrix = _transitionMatrix.toMatrix(_model.getNondeterminismVariables(), odd, odd);
-                }
-                STORM_LOG_THROW(!rewardModel.hasTransitionRewards(), storm::exceptions::NotSupportedException, "Transition rewards are not supported in this engine.");
-                
-                // Create remaining components and helper
-                std::vector<ValueType> explicitExitRateVector;
-                storm::storage::BitVector explicitMarkovianStates;
-                std::unique_ptr<storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType>> sparseHelper;
-                if (isContinuousTime()) {
-                    explicitExitRateVector = _exitRates->toVector(odd);
-                    explicitMarkovianStates = _markovianStates->toVector(odd);
-                    sparseHelper = std::make_unique<storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType>>(explicitTransitionMatrix, explicitMarkovianStates, explicitExitRateVector);
-                } else {
-                    sparseHelper = std::make_unique<storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType>>(explicitTransitionMatrix);
-                }
-                storm::modelchecker::helper::setInformationFromOtherHelperNondeterministic(*sparseHelper, *this, [&odd](storm::dd::Bdd<DdType> const& s){ return s.toVector(odd); });
-
-                STORM_LOG_WARN_COND(!this->isProduceSchedulerSet(), "Scheduler extraction not supported in Hybrid engine.");
-                auto explicitResult = sparseHelper->computeLongRunAverageValues(env, rewardModel.hasStateRewards() ? &explicitStateRewards : nullptr, rewardModel.hasStateActionRewards() ? &explicitActionRewards : nullptr);
-                return std::make_unique<HybridQuantitativeCheckResult<DdType, ValueType>>(_model.getReachableStates(), _model.getManager().getBddZero(), _model.getManager().template getAddZero<ValueType>(), _model.getReachableStates(), std::move(odd), std::move(explicitResult));
-            }
-            
-            template <typename ValueType, storm::dd::DdType DdType>
-            bool HybridNondeterministicInfiniteHorizonHelper<ValueType, DdType>::isContinuousTime() const {
-                STORM_LOG_ASSERT((_markovianStates == nullptr) == (_exitRates == nullptr), "Inconsistent information given: Have Markovian states but no exit rates (or vice versa)." );
-                return _markovianStates != nullptr;
-            }
-            
-            template class HybridNondeterministicInfiniteHorizonHelper<double, storm::dd::DdType::CUDD>;
-            template class HybridNondeterministicInfiniteHorizonHelper<double, storm::dd::DdType::Sylvan>;
-            template class HybridNondeterministicInfiniteHorizonHelper<storm::RationalNumber, storm::dd::DdType::Sylvan>;
-            
-        }
-    }
-}
\ No newline at end of file
diff --git a/src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.h b/src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.h
deleted file mode 100644
index d6a67089b..000000000
--- a/src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.h
+++ /dev/null
@@ -1,64 +0,0 @@
-#pragma once
-#include "storm/modelchecker/helper/SingleValueModelCheckerHelper.h"
-
-#include "storm/modelchecker/results/HybridQuantitativeCheckResult.h"
-
-#include "storm/models/symbolic/NondeterministicModel.h"
-#include "storm/models/symbolic/StandardRewardModel.h"
-
-#include "storm/storage/dd/DdManager.h"
-#include "storm/storage/dd/Add.h"
-#include "storm/storage/dd/Bdd.h"
-
-namespace storm {
-    class Environment;
-    
-    namespace modelchecker {
-        namespace helper {
-        
-            /*!
-             * Helper class for model checking queries that depend on the long run behavior of the (nondeterministic) system.
-             */
-            template <typename ValueType, storm::dd::DdType DdType>
-            class HybridNondeterministicInfiniteHorizonHelper : public SingleValueModelCheckerHelper<ValueType, DdType> {
-
-            public:
-                /*!
-                 * Initializes the helper for a discrete time (i.e. MDP)
-                 */
-                HybridNondeterministicInfiniteHorizonHelper(storm::models::symbolic::NondeterministicModel<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix);
-                
-                /*!
-                 * Initializes the helper for a continuous time (i.e. MA)
-                 */
-                HybridNondeterministicInfiniteHorizonHelper(storm::models::symbolic::NondeterministicModel<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& markovianStates, storm::dd::Add<DdType, ValueType> const& _exitRates);
-                
-                /*!
-                 * Computes the long run average probabilities, i.e., the fraction of the time we are in a psiState
-                 * @return a value for each state
-                 */
-                std::unique_ptr<HybridQuantitativeCheckResult<DdType, ValueType>> computeLongRunAverageProbabilities(Environment const& env, storm::dd::Bdd<DdType> const& psiStates);
-                
-                /*!
-                 * Computes the long run average rewards, i.e., the average reward collected per time unit
-                 * @return a value for each state
-                 */
-                std::unique_ptr<HybridQuantitativeCheckResult<DdType, ValueType>> computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::StandardRewardModel<DdType, ValueType> const& rewardModel);
-                
-            protected:
-                
-                /*!
-                 * @return true iff this is a computation on a continuous time model (i.e. MA)
-                 */
-                bool isContinuousTime() const;
-
-
-            private:
-                storm::models::symbolic::NondeterministicModel<DdType, ValueType> const& _model;
-                storm::dd::Add<DdType, ValueType> const& _transitionMatrix;
-                storm::dd::Bdd<DdType> const* _markovianStates;
-                storm::dd::Add<DdType, ValueType> const* _exitRates;
-            };
-        }
-    }
-}

From 64b9c176e76a2df824f824b045790caa0510311d Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Wed, 12 Aug 2020 10:31:31 +0200
Subject: [PATCH 40/48] symbolic Ctmc: Added method to compute the probability
 matrix.

---
 src/storm/models/symbolic/Ctmc.cpp | 4 ++++
 src/storm/models/symbolic/Ctmc.h   | 6 ++++++
 2 files changed, 10 insertions(+)

diff --git a/src/storm/models/symbolic/Ctmc.cpp b/src/storm/models/symbolic/Ctmc.cpp
index d54b3156d..26e20c3bf 100644
--- a/src/storm/models/symbolic/Ctmc.cpp
+++ b/src/storm/models/symbolic/Ctmc.cpp
@@ -94,6 +94,10 @@ namespace storm {
                 }
             }
             
+            template<storm::dd::DdType Type, typename ValueType>
+            storm::dd::Add<Type, ValueType> Ctmc<Type, ValueType>::computeProbabilityMatrix() const {
+                return this->getTransitionMatrix() / this->getExitRateVector();
+            }
             
             template<storm::dd::DdType Type, typename ValueType>
             template<typename NewValueType>
diff --git a/src/storm/models/symbolic/Ctmc.h b/src/storm/models/symbolic/Ctmc.h
index c73340946..34d601f4a 100644
--- a/src/storm/models/symbolic/Ctmc.h
+++ b/src/storm/models/symbolic/Ctmc.h
@@ -142,6 +142,12 @@ namespace storm {
                 storm::dd::Add<Type, ValueType> const& getExitRateVector() const;
                 
                 virtual void reduceToStateBasedRewards() override;
+                
+                /*!
+                 * @return the probabilistic transition matrix P
+                 * @note getTransitionMatrix() retrieves the exit rate matrix R, where R(s,s') = r(s) * P(s,s')
+                 */
+                storm::dd::Add<Type, ValueType> computeProbabilityMatrix() const;
 
                 template<typename NewValueType>
                 std::shared_ptr<Ctmc<Type, NewValueType>> toValueType() const;

From 5eb2bcc71f3e2585a50d09b73a48de9fe8887c99 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Wed, 12 Aug 2020 10:41:15 +0200
Subject: [PATCH 41/48] Fixing bad cast exception

---
 .../HybridInfiniteHorizonHelper.cpp              | 16 ++++++++++------
 1 file changed, 10 insertions(+), 6 deletions(-)

diff --git a/src/storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.cpp
index 36231a419..910687adc 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.cpp
+++ b/src/storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.cpp
@@ -18,17 +18,17 @@ namespace storm {
             
             template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic>
             HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::HybridInfiniteHorizonHelper(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix) : _model(model), _transitionMatrix(transitionMatrix), _markovianStates(nullptr), _exitRates(nullptr) {
-                // Intentionally left empty.
+                STORM_LOG_ASSERT(model.isNondeterministicModel() == Nondeterministic, "Template Parameter does not match model type.");
             }
             
             template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic>
             HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::HybridInfiniteHorizonHelper(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& markovianStates, storm::dd::Add<DdType, ValueType> const& exitRateVector) : _model(model), _transitionMatrix(transitionMatrix), _markovianStates(&markovianStates), _exitRates(&exitRateVector) {
-                // Intentionally left empty.
+                STORM_LOG_ASSERT(model.isNondeterministicModel() == Nondeterministic, "Template Parameter does not match model type.");
             }
             
             template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic>
             HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::HybridInfiniteHorizonHelper(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector) : _model(model), _transitionMatrix(transitionMatrix), _markovianStates(nullptr), _exitRates(&exitRateVector) {
-                // Intentionally left empty.
+                STORM_LOG_ASSERT(model.isNondeterministicModel() == Nondeterministic, "Template Parameter does not match model type.");
             }
             
             template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic>
@@ -38,7 +38,7 @@ namespace storm {
                 storm::dd::Odd odd = _model.getReachableStates().createOdd();
                 // Translate all required components
                 storm::storage::SparseMatrix<ValueType> explicitTransitionMatrix;
-                if (_model.isNondeterministicModel()) {
+                if (Nondeterministic) {
                     explicitTransitionMatrix = _transitionMatrix.toMatrix(dynamic_cast<storm::models::symbolic::NondeterministicModel<DdType, ValueType> const&>(_model).getNondeterminismVariables(), odd, odd);
                 } else {
                     explicitTransitionMatrix = _transitionMatrix.toMatrix(odd, odd);
@@ -69,14 +69,18 @@ namespace storm {
                 if (rewardModel.hasStateRewards()) {
                     explicitStateRewards = rewardModel.getStateRewardVector().toVector(odd);
                 }
-                if (_model.isNondeterministicModel() && rewardModel.hasStateActionRewards()) {
+                if (Nondeterministic && rewardModel.hasStateActionRewards()) {
                     // Matrix and action-based vector have to be produced at the same time to guarantee the correct order
                     auto matrixRewards = _transitionMatrix.toMatrixVector(rewardModel.getStateActionRewardVector(), dynamic_cast<storm::models::symbolic::NondeterministicModel<DdType, ValueType> const&>(_model).getNondeterminismVariables(), odd, odd);
                     explicitTransitionMatrix = std::move(matrixRewards.first);
                     explicitActionRewards = std::move(matrixRewards.second);
                 } else {
                     // Translate matrix only
-                    explicitTransitionMatrix = _transitionMatrix.toMatrix(dynamic_cast<storm::models::symbolic::NondeterministicModel<DdType, ValueType> const&>(_model).getNondeterminismVariables(), odd, odd);
+                    if (Nondeterministic) {
+                        explicitTransitionMatrix = _transitionMatrix.toMatrix(dynamic_cast<storm::models::symbolic::NondeterministicModel<DdType, ValueType> const&>(_model).getNondeterminismVariables(), odd, odd);
+                    } else {
+                        explicitTransitionMatrix = _transitionMatrix.toMatrix(odd, odd);
+                    }
                     if (rewardModel.hasStateActionRewards()) {
                         // For deterministic models we can translate the action rewards easily
                         explicitActionRewards = rewardModel.getStateActionRewardVector().toVector(odd);

From f8d4fd8862a15529a35adecfcf9d01584b5fa8b7 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Wed, 12 Aug 2020 10:42:26 +0200
Subject: [PATCH 42/48] New utility function for exchanging informations
 between deterministic helpers.

---
 .../utility/SetInformationFromOtherHelper.h       | 15 +++++++++++++++
 1 file changed, 15 insertions(+)

diff --git a/src/storm/modelchecker/helper/utility/SetInformationFromOtherHelper.h b/src/storm/modelchecker/helper/utility/SetInformationFromOtherHelper.h
index 44dcc467c..9ca78aa4d 100644
--- a/src/storm/modelchecker/helper/utility/SetInformationFromOtherHelper.h
+++ b/src/storm/modelchecker/helper/utility/SetInformationFromOtherHelper.h
@@ -26,6 +26,21 @@ namespace storm {
                 // Scheduler Production
 			    targetHelper.setProduceScheduler(sourceHelperType.isProduceSchedulerSet());
             }
+            
+            /*!
+             * Forwards relevant information stored in another helper to the given helper
+             */
+            template <typename TargetHelperType, typename SourceHelperType>
+            void setInformationFromOtherHelperDeterministic(TargetHelperType& targetHelper, SourceHelperType const& sourceHelperType, std::function<typename TargetHelperType::StateSet(typename SourceHelperType::StateSet const&)> const& stateSetTransformer) {
+                // Relevancy of initial states.
+			    if (sourceHelperType.hasRelevantStates()) {
+			        targetHelper.setRelevantStates(stateSetTransformer(sourceHelperType.getRelevantStates()));
+			    }
+                // Value threshold to which the result will be compared
+			    if (sourceHelperType.isValueThresholdSet()) {
+			        targetHelper.setValueThreshold(sourceHelperType.getValueThresholdComparisonType(), storm::utility::convertNumber<typename TargetHelperType::ValueType>(sourceHelperType.getValueThresholdValue()));
+			    }
+            }
         }
     }
 }
\ No newline at end of file

From 38af6357d74de9c37ba6d8ecb3404b3484ec97a6 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Wed, 12 Aug 2020 10:43:49 +0200
Subject: [PATCH 43/48] Using the new hybrid infinite horizon helper in the
 model checkers

---
 .../modelchecker/csl/HybridCtmcCslModelChecker.cpp   | 12 ++++++++++--
 .../csl/HybridMarkovAutomatonCslModelChecker.cpp     |  6 +++---
 .../prctl/HybridDtmcPrctlModelChecker.cpp            | 12 +++++++++---
 3 files changed, 22 insertions(+), 8 deletions(-)

diff --git a/src/storm/modelchecker/csl/HybridCtmcCslModelChecker.cpp b/src/storm/modelchecker/csl/HybridCtmcCslModelChecker.cpp
index c4102efab..78ec476a4 100644
--- a/src/storm/modelchecker/csl/HybridCtmcCslModelChecker.cpp
+++ b/src/storm/modelchecker/csl/HybridCtmcCslModelChecker.cpp
@@ -4,6 +4,8 @@
 
 #include "storm/modelchecker/csl/helper/SparseCtmcCslHelper.h"
 #include "storm/modelchecker/csl/helper/HybridCtmcCslHelper.h"
+#include "storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.h"
+#include "storm/modelchecker/helper/utility/SetInformationFromCheckTask.h"
 
 #include "storm/modelchecker/results/SymbolicQualitativeCheckResult.h"
 
@@ -123,13 +125,19 @@ namespace storm {
             std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
             SymbolicQualitativeCheckResult<DdType> const& subResult = subResultPointer->asSymbolicQualitativeCheckResult<DdType>();
             
-            return storm::modelchecker::helper::HybridCtmcCslHelper::computeLongRunAverageProbabilities<DdType, ValueType>(env, this->getModel(), checkTask.isOnlyInitialStatesRelevantSet(), this->getModel().getTransitionMatrix(), this->getModel().getExitRateVector(), subResult.getTruthValuesVector());
+            auto probabilisticTransitions = this->getModel().computeProbabilityMatrix();
+            storm::modelchecker::helper::HybridInfiniteHorizonHelper<ValueType, DdType, false> helper(this->getModel(), probabilisticTransitions, this->getModel().getExitRateVector());
+            storm::modelchecker::helper::setInformationFromCheckTaskDeterministic(helper, checkTask, this->getModel());
+            return helper.computeLongRunAverageProbabilities(env, subResult.getTruthValuesVector());
         }
         
         template<typename ModelType>
         std::unique_ptr<CheckResult> HybridCtmcCslModelChecker<ModelType>::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask<storm::logic::LongRunAverageRewardFormula, ValueType> const& checkTask) {
             auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
-            return storm::modelchecker::helper::HybridCtmcCslHelper::computeLongRunAverageRewards<DdType, ValueType>(env, this->getModel(), checkTask.isOnlyInitialStatesRelevantSet(), this->getModel().getTransitionMatrix(), this->getModel().getExitRateVector(), rewardModel.get());
+            auto probabilisticTransitions = this->getModel().computeProbabilityMatrix();
+            storm::modelchecker::helper::HybridInfiniteHorizonHelper<ValueType, DdType, false> helper(this->getModel(), probabilisticTransitions, this->getModel().getExitRateVector());
+            storm::modelchecker::helper::setInformationFromCheckTaskDeterministic(helper, checkTask, this->getModel());
+            return helper.computeLongRunAverageRewards(env, rewardModel.get());
         }
         
         // Explicitly instantiate the model checker.
diff --git a/src/storm/modelchecker/csl/HybridMarkovAutomatonCslModelChecker.cpp b/src/storm/modelchecker/csl/HybridMarkovAutomatonCslModelChecker.cpp
index b2833ee78..c8dff0c8f 100644
--- a/src/storm/modelchecker/csl/HybridMarkovAutomatonCslModelChecker.cpp
+++ b/src/storm/modelchecker/csl/HybridMarkovAutomatonCslModelChecker.cpp
@@ -5,7 +5,7 @@
 #include "storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.h"
 #include "storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.h"
 #include "storm/modelchecker/prctl/helper/HybridMdpPrctlHelper.h"
-#include "storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.h"
+#include "storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.h"
 #include "storm/modelchecker/helper/utility/SetInformationFromCheckTask.h"
 
 #include "storm/modelchecker/results/SymbolicQualitativeCheckResult.h"
@@ -107,7 +107,7 @@ namespace storm {
             SymbolicQualitativeCheckResult<DdType> const& subResult = subResultPointer->asSymbolicQualitativeCheckResult<DdType>();
             STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
     
-            storm::modelchecker::helper::HybridNondeterministicInfiniteHorizonHelper<ValueType, DdType> helper(this->getModel(), this->getModel().getTransitionMatrix(), this->getModel().getMarkovianStates(), this->getModel().getExitRateVector());
+            storm::modelchecker::helper::HybridInfiniteHorizonHelper<ValueType, DdType, true> helper(this->getModel(), this->getModel().getTransitionMatrix(), this->getModel().getMarkovianStates(), this->getModel().getExitRateVector());
             storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel());
             return helper.computeLongRunAverageProbabilities(env, subResult.getTruthValuesVector());
         }
@@ -116,7 +116,7 @@ namespace storm {
         std::unique_ptr<CheckResult> HybridMarkovAutomatonCslModelChecker<ModelType>::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask<storm::logic::LongRunAverageRewardFormula, ValueType> const& checkTask) {
             STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
             auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
-            storm::modelchecker::helper::HybridNondeterministicInfiniteHorizonHelper<ValueType, DdType> helper(this->getModel(), this->getModel().getTransitionMatrix(), this->getModel().getMarkovianStates(), this->getModel().getExitRateVector());
+            storm::modelchecker::helper::HybridInfiniteHorizonHelper<ValueType, DdType, true> helper(this->getModel(), this->getModel().getTransitionMatrix(), this->getModel().getMarkovianStates(), this->getModel().getExitRateVector());
             storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel());
             return helper.computeLongRunAverageRewards(env, rewardModel.get());
         }
diff --git a/src/storm/modelchecker/prctl/HybridDtmcPrctlModelChecker.cpp b/src/storm/modelchecker/prctl/HybridDtmcPrctlModelChecker.cpp
index 266363662..c861d0ea9 100644
--- a/src/storm/modelchecker/prctl/HybridDtmcPrctlModelChecker.cpp
+++ b/src/storm/modelchecker/prctl/HybridDtmcPrctlModelChecker.cpp
@@ -2,6 +2,8 @@
 
 #include "storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.h"
 #include "storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.h"
+#include "storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.h"
+#include "storm/modelchecker/helper/utility/SetInformationFromCheckTask.h"
 
 #include "storm/storage/dd/Odd.h"
 #include "storm/storage/dd/DdManager.h"
@@ -113,20 +115,24 @@ namespace storm {
             return storm::modelchecker::helper::HybridDtmcPrctlHelper<DdType, ValueType>::computeReachabilityTimes(env, this->getModel(), this->getModel().getTransitionMatrix(), subResult.getTruthValuesVector(), checkTask.isQualitativeSet());
         }
 
-
         template<typename ModelType>
         std::unique_ptr<CheckResult> HybridDtmcPrctlModelChecker<ModelType>::computeLongRunAverageProbabilities(Environment const& env, CheckTask<storm::logic::StateFormula, ValueType> const& checkTask) {
             storm::logic::StateFormula const& stateFormula = checkTask.getFormula();
             std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
             SymbolicQualitativeCheckResult<DdType> const& subResult = subResultPointer->asSymbolicQualitativeCheckResult<DdType>();
             
-            return storm::modelchecker::helper::HybridDtmcPrctlHelper<DdType, ValueType>::computeLongRunAverageProbabilities(env, this->getModel(), this->getModel().getTransitionMatrix(), subResult.getTruthValuesVector());
+            storm::modelchecker::helper::HybridInfiniteHorizonHelper<ValueType, DdType, false> helper(this->getModel(), this->getModel().getTransitionMatrix());
+            storm::modelchecker::helper::setInformationFromCheckTaskDeterministic(helper, checkTask, this->getModel());
+            return helper.computeLongRunAverageProbabilities(env, subResult.getTruthValuesVector());
         }
         
         template<typename ModelType>
         std::unique_ptr<CheckResult> HybridDtmcPrctlModelChecker<ModelType>::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask<storm::logic::LongRunAverageRewardFormula, ValueType> const& checkTask) {
             auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
-            return storm::modelchecker::helper::HybridDtmcPrctlHelper<DdType, ValueType>::computeLongRunAverageRewards(env, this->getModel(), this->getModel().getTransitionMatrix(), rewardModel.get());
+
+            storm::modelchecker::helper::HybridInfiniteHorizonHelper<ValueType, DdType, false> helper(this->getModel(), this->getModel().getTransitionMatrix());
+            storm::modelchecker::helper::setInformationFromCheckTaskDeterministic(helper, checkTask, this->getModel());
+            return helper.computeLongRunAverageRewards(env, rewardModel.get());
         }
         
         template class HybridDtmcPrctlModelChecker<storm::models::symbolic::Dtmc<storm::dd::DdType::CUDD, double>>;

From 8627cde4dc22ccc7544a881486153a8d17785adf Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Wed, 12 Aug 2020 10:50:31 +0200
Subject: [PATCH 44/48] Removing old LRA code in old helpers.

---
 .../csl/helper/HybridCtmcCslHelper.cpp        |  55 --
 .../csl/helper/HybridCtmcCslHelper.h          |   6 -
 .../csl/helper/SparseCtmcCslHelper.cpp        | 562 ------------------
 .../csl/helper/SparseCtmcCslHelper.h          |  21 -
 .../prctl/helper/HybridDtmcPrctlHelper.cpp    |  26 -
 .../prctl/helper/HybridDtmcPrctlHelper.h      |   4 -
 .../prctl/helper/SparseDtmcPrctlHelper.cpp    |  15 -
 .../prctl/helper/SparseDtmcPrctlHelper.h      |   6 -
 8 files changed, 695 deletions(-)

diff --git a/src/storm/modelchecker/csl/helper/HybridCtmcCslHelper.cpp b/src/storm/modelchecker/csl/helper/HybridCtmcCslHelper.cpp
index 9d3972179..db076ae16 100644
--- a/src/storm/modelchecker/csl/helper/HybridCtmcCslHelper.cpp
+++ b/src/storm/modelchecker/csl/helper/HybridCtmcCslHelper.cpp
@@ -411,53 +411,6 @@ namespace storm {
                 STORM_LOG_THROW(false, storm::exceptions::InvalidOperationException, "Computing cumulative rewards is unsupported for this value type.");
             }
             
-            template<storm::dd::DdType DdType, class ValueType>
-            std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Ctmc<DdType, ValueType> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<DdType, ValueType> const& rateMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector, storm::dd::Bdd<DdType> const& psiStates) {
-                
-                storm::utility::Stopwatch conversionWatch(true);
-                
-                // Create ODD for the translation.
-                storm::dd::Odd odd = model.getReachableStates().createOdd();
-                
-                storm::storage::SparseMatrix<ValueType> explicitRateMatrix = rateMatrix.toMatrix(odd, odd);
-                std::vector<ValueType> explicitExitRateVector = exitRateVector.toVector(odd);
-                storm::solver::SolveGoal<ValueType> goal;
-                if (onlyInitialStatesRelevant) {
-                    goal.setRelevantValues(model.getInitialStates().toVector(odd));
-                }
-                conversionWatch.stop();
-                STORM_LOG_INFO("Converting symbolic matrix/vector to explicit representation done in " << conversionWatch.getTimeInMilliseconds() << "ms.");
-
-                std::vector<ValueType> result = storm::modelchecker::helper::SparseCtmcCslHelper::computeLongRunAverageProbabilities(env, std::move(goal), explicitRateMatrix, psiStates.toVector(odd), &explicitExitRateVector);
-
-                return std::unique_ptr<CheckResult>(new HybridQuantitativeCheckResult<DdType, ValueType>(model.getReachableStates(), model.getManager().getBddZero(), model.getManager().template getAddZero<ValueType>(), model.getReachableStates(), std::move(odd), std::move(result)));
-            }
-            
-            template<storm::dd::DdType DdType, class ValueType>
-            std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Ctmc<DdType, ValueType> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<DdType, ValueType> const& rateMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector, typename storm::models::symbolic::Model<DdType, ValueType>::RewardModelType const& rewardModel) {
-                
-                STORM_LOG_THROW(!rewardModel.empty(), storm::exceptions::InvalidPropertyException, "Missing reward model for formula. Skipping formula.");
-                storm::dd::Add<DdType, ValueType> probabilityMatrix = computeProbabilityMatrix(rateMatrix, exitRateVector);
-                
-                storm::utility::Stopwatch conversionWatch(true);
-                
-                // Create ODD for the translation.
-                storm::dd::Odd odd = model.getReachableStates().createOdd();
-                
-                storm::storage::SparseMatrix<ValueType> explicitRateMatrix = rateMatrix.toMatrix(odd, odd);
-                std::vector<ValueType> explicitExitRateVector = exitRateVector.toVector(odd);
-                storm::solver::SolveGoal<ValueType> goal;
-                if (onlyInitialStatesRelevant) {
-                    goal.setRelevantValues(model.getInitialStates().toVector(odd));
-                }
-                conversionWatch.stop();
-                STORM_LOG_INFO("Converting symbolic matrix/vector to explicit representation done in " << conversionWatch.getTimeInMilliseconds() << "ms.");
-
-                std::vector<ValueType> result = storm::modelchecker::helper::SparseCtmcCslHelper::computeLongRunAverageRewards(env, std::move(goal), explicitRateMatrix, rewardModel.getTotalRewardVector(probabilityMatrix, model.getColumnVariables(), exitRateVector, true).toVector(odd), &explicitExitRateVector);
-                
-                return std::unique_ptr<CheckResult>(new HybridQuantitativeCheckResult<DdType, ValueType>(model.getReachableStates(), model.getManager().getBddZero(), model.getManager().template getAddZero<ValueType>(), model.getReachableStates(), std::move(odd), std::move(result)));
-            }
-            
             template<storm::dd::DdType DdType, class ValueType>
             storm::dd::Add<DdType, ValueType> HybridCtmcCslHelper::computeUniformizedMatrix(storm::models::symbolic::Ctmc<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector, storm::dd::Bdd<DdType> const& maybeStates, ValueType uniformizationRate) {
                 STORM_LOG_DEBUG("Computing uniformized matrix using uniformization rate " << uniformizationRate << ".");
@@ -489,9 +442,7 @@ namespace storm {
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeCumulativeRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::CUDD, double> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::CUDD, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::CUDD, double>::RewardModelType const& rewardModel, double timeBound);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeUntilProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::CUDD, double> const& model, storm::dd::Add<storm::dd::DdType::CUDD, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::CUDD> const& phiStates, storm::dd::Bdd<storm::dd::DdType::CUDD> const& psiStates, bool qualitative);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeReachabilityRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::CUDD, double> const& model, storm::dd::Add<storm::dd::DdType::CUDD, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::CUDD, double>::RewardModelType const& rewardModel, storm::dd::Bdd<storm::dd::DdType::CUDD> const& targetStates, bool qualitative);
-            template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::CUDD, double> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::CUDD, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::CUDD> const& psiStates);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeNextProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::CUDD, double> const& model, storm::dd::Add<storm::dd::DdType::CUDD, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::CUDD> const& nextStates);
-            template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::CUDD, double> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::CUDD, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::CUDD, double>::RewardModelType const& rewardModel);
             template storm::dd::Add<storm::dd::DdType::CUDD, double> HybridCtmcCslHelper::computeProbabilityMatrix(storm::dd::Add<storm::dd::DdType::CUDD, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector);
             template storm::dd::Add<storm::dd::DdType::CUDD, double> HybridCtmcCslHelper::computeUniformizedMatrix(storm::models::symbolic::Ctmc<storm::dd::DdType::CUDD, double> const& model, storm::dd::Add<storm::dd::DdType::CUDD, double> const& transitionMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::CUDD> const& maybeStates, double uniformizationRate);
 
@@ -501,9 +452,7 @@ namespace storm {
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeCumulativeRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, double> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, double>::RewardModelType const& rewardModel, double timeBound);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeUntilProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, double> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& phiStates, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates, bool qualitative);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeReachabilityRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, double> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, double>::RewardModelType const& rewardModel, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& targetStates, bool qualitative);
-            template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, double> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeNextProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, double> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& nextStates);
-            template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, double> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, double>::RewardModelType const& rewardModel);
             template storm::dd::Add<storm::dd::DdType::Sylvan, double> HybridCtmcCslHelper::computeProbabilityMatrix(storm::dd::Add<storm::dd::DdType::Sylvan, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector);
             template storm::dd::Add<storm::dd::DdType::Sylvan, double> HybridCtmcCslHelper::computeUniformizedMatrix(storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, double> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& transitionMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& maybeStates, double uniformizationRate);
 
@@ -513,9 +462,7 @@ namespace storm {
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeCumulativeRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, storm::RationalNumber>::RewardModelType const& rewardModel, double timeBound);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeUntilProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& phiStates, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates, bool qualitative);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeReachabilityRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, storm::RationalNumber>::RewardModelType const& rewardModel, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& targetStates, bool qualitative);
-            template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeNextProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& nextStates);
-            template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, storm::RationalNumber>::RewardModelType const& rewardModel);
             template storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> HybridCtmcCslHelper::computeProbabilityMatrix(storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector);
             template storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> HybridCtmcCslHelper::computeUniformizedMatrix(storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& transitionMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& maybeStates, storm::RationalNumber uniformizationRate);
 
@@ -525,9 +472,7 @@ namespace storm {
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeCumulativeRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalFunction> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, storm::RationalFunction>::RewardModelType const& rewardModel, double timeBound);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeUntilProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalFunction> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& phiStates, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates, bool qualitative);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeReachabilityRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalFunction> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, storm::RationalFunction>::RewardModelType const& rewardModel, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& targetStates, bool qualitative);
-            template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalFunction> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeNextProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalFunction> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& nextStates);
-            template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalFunction> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, storm::RationalFunction>::RewardModelType const& rewardModel);
             template storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> HybridCtmcCslHelper::computeProbabilityMatrix(storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector);
             template storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> HybridCtmcCslHelper::computeUniformizedMatrix(storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalFunction> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& transitionMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& maybeStates, storm::RationalFunction uniformizationRate);
 
diff --git a/src/storm/modelchecker/csl/helper/HybridCtmcCslHelper.h b/src/storm/modelchecker/csl/helper/HybridCtmcCslHelper.h
index 73c5e3c46..17cf48054 100644
--- a/src/storm/modelchecker/csl/helper/HybridCtmcCslHelper.h
+++ b/src/storm/modelchecker/csl/helper/HybridCtmcCslHelper.h
@@ -44,15 +44,9 @@ namespace storm {
                 template<storm::dd::DdType DdType, typename ValueType>
                 static std::unique_ptr<CheckResult> computeReachabilityRewards(Environment const& env, storm::models::symbolic::Ctmc<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& rateMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector, typename storm::models::symbolic::Model<DdType, ValueType>::RewardModelType const& rewardModel, storm::dd::Bdd<DdType> const& targetStates, bool qualitative);
                 
-                template<storm::dd::DdType DdType, typename ValueType>
-                static std::unique_ptr<CheckResult> computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Ctmc<DdType, ValueType> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<DdType, ValueType> const& rateMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector, storm::dd::Bdd<DdType> const& psiStates);
-                
                 template<storm::dd::DdType DdType, typename ValueType>
                 static std::unique_ptr<CheckResult> computeNextProbabilities(Environment const& env, storm::models::symbolic::Ctmc<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& rateMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector, storm::dd::Bdd<DdType> const& nextStates);
 
-                template<storm::dd::DdType DdType, typename ValueType>
-                static std::unique_ptr<CheckResult> computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Ctmc<DdType, ValueType> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<DdType, ValueType> const& rateMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector, typename storm::models::symbolic::Model<DdType, ValueType>::RewardModelType const& rewardModel);
-
                 /*!
                  * Converts the given rate-matrix into a time-abstract probability matrix.
                  *
diff --git a/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.cpp b/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.cpp
index ab8912c5d..58cdd4b15 100644
--- a/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.cpp
+++ b/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.cpp
@@ -482,555 +482,6 @@ namespace storm {
                 return storm::modelchecker::helper::SparseDtmcPrctlHelper<ValueType>::computeTotalRewards(env, std::move(goal), probabilityMatrix, backwardTransitions, dtmcRewardModel, qualitative);
             }
             
-            template <typename ValueType>
-            std::vector<ValueType> SparseCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, storm::storage::BitVector const& psiStates, std::vector<ValueType> const* exitRateVector) {
-                
-                // If there are no goal states, we avoid the computation and directly return zero.
-                uint_fast64_t numberOfStates = rateMatrix.getRowCount();
-                if (psiStates.empty()) {
-                    return std::vector<ValueType>(numberOfStates, storm::utility::zero<ValueType>());
-                }
-                
-                // Likewise, if all bits are set, we can avoid the computation.
-                if (psiStates.full()) {
-                    return std::vector<ValueType>(numberOfStates, storm::utility::one<ValueType>());
-                }
-                
-                ValueType zero = storm::utility::zero<ValueType>();
-                ValueType one = storm::utility::one<ValueType>();
-                
-                return computeLongRunAverages<ValueType>(env, std::move(goal), rateMatrix,
-                                              [&zero, &one, &psiStates] (storm::storage::sparse::state_type const& state) -> ValueType {
-                                                  if (psiStates.get(state)) {
-                                                      return one;
-                                                  }
-                                                  return zero;
-                                              },
-                                              exitRateVector);
-            }
-            
-            template <typename ValueType, typename RewardModelType>
-            std::vector<ValueType> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, RewardModelType const& rewardModel, std::vector<ValueType> const* exitRateVector) {
-                // Only compute the result if the model has a state-based reward model.
-                STORM_LOG_THROW(!rewardModel.empty(), storm::exceptions::InvalidPropertyException, "Missing reward model for formula. Skipping formula.");
-
-                return computeLongRunAverages<ValueType>(env, std::move(goal), rateMatrix,
-                        [&] (storm::storage::sparse::state_type const& state) -> ValueType {
-                            ValueType result = rewardModel.hasStateRewards() ? rewardModel.getStateReward(state) : storm::utility::zero<ValueType>();
-                            if (rewardModel.hasStateActionRewards()) {
-                                // State action rewards are multiplied with the exit rate r(s). Then, multiplying the reward with the expected time we stay at s (i.e. 1/r(s)) yields the original state reward
-                                if (exitRateVector) {
-                                    result += rewardModel.getStateActionReward(state) * (*exitRateVector)[state];
-                                } else {
-                                    result += rewardModel.getStateActionReward(state);
-                                }
-                            }
-                            if (rewardModel.hasTransitionRewards()) {
-                                // Transition rewards are already multiplied with the rates
-                                result += rateMatrix.getPointwiseProductRowSum(rewardModel.getTransitionRewardMatrix(), state);
-                            }
-                            return result;
-                        },
-                        exitRateVector);
-            }
-            
-            template <typename ValueType>
-            std::vector<ValueType> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::vector<ValueType> const& stateRewardVector, std::vector<ValueType> const* exitRateVector) {
-                return computeLongRunAverages<ValueType>(env, std::move(goal), rateMatrix,
-                                                         [&stateRewardVector] (storm::storage::sparse::state_type const& state) -> ValueType {
-                                                             return stateRewardVector[state];
-                                                         },
-                                                         exitRateVector);
-            }
-            
-            template <typename ValueType>
-            std::vector<ValueType> SparseCtmcCslHelper::computeLongRunAverages(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector){
-                storm::storage::SparseMatrix<ValueType> probabilityMatrix;
-                if (exitRateVector) {
-                    probabilityMatrix = computeProbabilityMatrix(rateMatrix, *exitRateVector);
-                } else {
-                    probabilityMatrix = rateMatrix;
-                }
-                uint_fast64_t numberOfStates = rateMatrix.getRowCount();
-            
-                // Start by decomposing the CTMC into its BSCCs.
-                storm::storage::StronglyConnectedComponentDecomposition<ValueType> bsccDecomposition(rateMatrix, storm::storage::StronglyConnectedComponentDecompositionOptions().onlyBottomSccs());
-                
-                STORM_LOG_DEBUG("Found " << bsccDecomposition.size() << " BSCCs.");
-
-                // Prepare the vector holding the LRA values for each of the BSCCs.
-                std::vector<ValueType> bsccLra;
-                bsccLra.reserve(bsccDecomposition.size());
-                
-                auto underlyingSolverEnvironment = env;
-                auto precision = env.solver().lra().getPrecision();
-                if (env.solver().isForceSoundness()) {
-                    // For sound computations, the error in the MECS plus the error in the remaining system should be less then the user defined precision.
-                    precision /= storm::utility::convertNumber<storm::RationalNumber>(2);
-                    underlyingSolverEnvironment.solver().lra().setPrecision(precision);
-                }
-                underlyingSolverEnvironment.solver().setLinearEquationSolverPrecision(precision, env.solver().lra().getRelativeTerminationCriterion());
-                
-                // Keep track of the maximal and minimal value occuring in one of the BSCCs
-                ValueType maxValue, minValue;
-                storm::storage::BitVector statesInBsccs(numberOfStates);
-                for (auto const& bscc : bsccDecomposition) {
-                    for (auto const& state : bscc) {
-                        statesInBsccs.set(state);
-                    }
-                    bsccLra.push_back(computeLongRunAveragesForBscc<ValueType>(underlyingSolverEnvironment, bscc, rateMatrix, valueGetter, exitRateVector));
-                    if (bsccLra.size() == 1) {
-                        maxValue = bsccLra.back();
-                        minValue = bsccLra.back();
-                    } else {
-                        maxValue = std::max(bsccLra.back(), maxValue);
-                        minValue = std::min(bsccLra.back(), minValue);
-                    }
-                }
-                
-                storm::storage::BitVector statesNotInBsccs = ~statesInBsccs;
-                STORM_LOG_DEBUG("Found " << statesInBsccs.getNumberOfSetBits() << " states in BSCCs.");
-                
-                std::vector<uint64_t> stateToBsccMap(statesInBsccs.size(), -1);
-                for (uint64_t bsccIndex = 0; bsccIndex < bsccDecomposition.size(); ++bsccIndex) {
-                    for (auto const& state : bsccDecomposition[bsccIndex]) {
-                        stateToBsccMap[state] = bsccIndex;
-                    }
-                }
-                
-                std::vector<ValueType> rewardSolution;
-                if (!statesNotInBsccs.empty()) {
-                    // Calculate LRA for states not in bsccs as expected reachability rewards.
-                    // Target states are states in bsccs, transition reward is the lra of the bscc for each transition into a
-                    // bscc and 0 otherwise. This corresponds to the sum of LRAs in BSCC weighted by the reachability probability
-                    // of the BSCC.
-                    
-                    std::vector<ValueType> rewardRightSide;
-                    rewardRightSide.reserve(statesNotInBsccs.getNumberOfSetBits());
-                    
-                    for (auto state : statesNotInBsccs) {
-                        ValueType reward = storm::utility::zero<ValueType>();
-                        for (auto entry : rateMatrix.getRow(state)) {
-                            if (statesInBsccs.get(entry.getColumn())) {
-                                if (exitRateVector) {
-                                    reward += (entry.getValue() / (*exitRateVector)[state]) * bsccLra[stateToBsccMap[entry.getColumn()]];
-                                } else {
-                                    reward += entry.getValue() * bsccLra[stateToBsccMap[entry.getColumn()]];
-                                }
-                            }
-                        }
-                        rewardRightSide.push_back(reward);
-                    }
-                    
-                    // Compute reachability rewards
-                    storm::solver::GeneralLinearEquationSolverFactory<ValueType> linearEquationSolverFactory;
-                    bool isEqSysFormat = linearEquationSolverFactory.getEquationProblemFormat(underlyingSolverEnvironment) == storm::solver::LinearEquationSolverProblemFormat::EquationSystem;
-                    storm::storage::SparseMatrix<ValueType> rewardEquationSystemMatrix = rateMatrix.getSubmatrix(false, statesNotInBsccs, statesNotInBsccs, isEqSysFormat);
-                    if (exitRateVector) {
-                        uint64_t localRow = 0;
-                        for (auto const& globalRow : statesNotInBsccs) {
-                            for (auto& entry : rewardEquationSystemMatrix.getRow(localRow)) {
-                                entry.setValue(entry.getValue() / (*exitRateVector)[globalRow]);
-                            }
-                            ++localRow;
-                        }
-                    }
-                    if (isEqSysFormat) {
-                        rewardEquationSystemMatrix.convertToEquationSystem();
-                    }
-                    rewardSolution = std::vector<ValueType>(rewardEquationSystemMatrix.getColumnCount(), (maxValue + minValue) / storm::utility::convertNumber<ValueType,uint64_t>(2));
-                    std::unique_ptr<storm::solver::LinearEquationSolver<ValueType>> solver = linearEquationSolverFactory.create(underlyingSolverEnvironment, std::move(rewardEquationSystemMatrix));
-                    solver->setBounds(minValue, maxValue);
-                    // Check solver requirements
-                    auto requirements = solver->getRequirements(underlyingSolverEnvironment);
-                    STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UncheckedRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
-                    solver->solveEquations(underlyingSolverEnvironment, rewardSolution, rewardRightSide);
-                }
-                
-                // Fill the result vector.
-                std::vector<ValueType> result(numberOfStates);
-                auto rewardSolutionIter = rewardSolution.begin();
-                
-                for (uint_fast64_t bsccIndex = 0; bsccIndex < bsccDecomposition.size(); ++bsccIndex) {
-                    storm::storage::StronglyConnectedComponent const& bscc = bsccDecomposition[bsccIndex];
-                    
-                    for (auto const& state : bscc) {
-                        result[state] = bsccLra[bsccIndex];
-                    }
-                }
-                for (auto state : statesNotInBsccs) {
-                    STORM_LOG_ASSERT(rewardSolutionIter != rewardSolution.end(), "Too few elements in solution.");
-                    // Take the value from the reward computation. Since the n-th state not in any bscc is the n-th
-                    // entry in rewardSolution we can just take the next value from the iterator.
-                    result[state] = *rewardSolutionIter;
-                    ++rewardSolutionIter;
-                }
-                
-                return result;
-            }
-
-            template <typename ValueType>
-            ValueType SparseCtmcCslHelper::computeLongRunAveragesForBscc(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector) {
-
-                // Catch the case where all values are the same (this includes the special case where the bscc is of size 1)
-                auto it = bscc.begin();
-                ValueType val = valueGetter(*it);
-                for (++it; it != bscc.end(); ++it) {
-                    if (valueGetter(*it) != val) {
-                        break;
-                    }
-                }
-                if (it == bscc.end()) {
-                    // All entries have the same LRA
-                    return val;
-                }
-                
-                storm::solver::LraMethod method = env.solver().lra().getDetLraMethod();
-                if ((storm::NumberTraits<ValueType>::IsExact || env.solver().isForceExact()) && env.solver().lra().isDetLraMethodSetFromDefault() && method == storm::solver::LraMethod::ValueIteration) {
-                    method = storm::solver::LraMethod::GainBiasEquations;
-                    STORM_LOG_INFO("Selecting " << storm::solver::toString(method) << " as the solution technique for long-run properties to guarantee exact results. If you want to override this, please explicitly specify a different LRA method.");
-                } else if (env.solver().isForceSoundness() && env.solver().lra().isDetLraMethodSetFromDefault() && method != storm::solver::LraMethod::ValueIteration) {
-                    method = storm::solver::LraMethod::ValueIteration;
-                    STORM_LOG_INFO("Selecting " << storm::solver::toString(method) << " as the solution technique for long-run properties to guarantee sound results. If you want to override this, please explicitly specify a different LRA method.");
-                }
-                STORM_LOG_TRACE("Computing LRA for BSCC of size " << bscc.size() << " using '" << storm::solver::toString(method) << "'.");
-                if (method == storm::solver::LraMethod::ValueIteration) {
-                    return computeLongRunAveragesForBsccVi<ValueType>(env, bscc, rateMatrix, valueGetter, exitRateVector);
-                } else if (method == storm::solver::LraMethod::LraDistributionEquations) {
-                    // We only need the first element of the pair as the lra distribution is not relevant at this point.
-                    return computeLongRunAveragesForBsccLraDistr<ValueType>(env, bscc, rateMatrix, valueGetter, exitRateVector).first;
-                }
-                STORM_LOG_WARN_COND(method == storm::solver::LraMethod::GainBiasEquations, "Unsupported lra method selected. Defaulting to " << storm::solver::toString(storm::solver::LraMethod::GainBiasEquations) << ".");
-                // We don't need the bias values
-                return computeLongRunAveragesForBsccGainBias<ValueType>(env, bscc, rateMatrix, valueGetter, exitRateVector).first;
-            }
-            
-            template <>
-            storm::RationalFunction SparseCtmcCslHelper::computeLongRunAveragesForBsccVi<storm::RationalFunction>(Environment const&, storm::storage::StronglyConnectedComponent const&, storm::storage::SparseMatrix<storm::RationalFunction> const&, std::function<storm::RationalFunction (storm::storage::sparse::state_type const& state)> const&, std::vector<storm::RationalFunction> const*) {
-                STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "The requested Method for LRA computation is not supported for parametric models.");
-            }
-                
-            template <typename ValueType>
-            ValueType SparseCtmcCslHelper::computeLongRunAveragesForBsccVi(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector) {
-                
-                // Initialize data about the bscc
-                storm::storage::BitVector bsccStates(rateMatrix.getRowGroupCount(), false);
-                for (auto const& state : bscc) {
-                    bsccStates.set(state);
-                }
-                
-                // Get the uniformization rate
-                ValueType uniformizationRate = storm::utility::one<ValueType>();
-                if (exitRateVector) {
-                    uniformizationRate = storm::utility::vector::max_if(*exitRateVector, bsccStates);
-                }
-                // To ensure that the model is aperiodic, we need to make sure that every Markovian state gets a self loop.
-                // Hence, we increase the uniformization rate a little.
-                uniformizationRate *= (storm::utility::one<ValueType>() + storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor()));
-
-                // Get the transitions of the submodel
-                typename storm::storage::SparseMatrix<ValueType> bsccMatrix = rateMatrix.getSubmatrix(true, bsccStates, bsccStates, true);
-                
-                // Uniformize the transitions
-                uint64_t subState = 0;
-                for (auto state : bsccStates) {
-                    for (auto& entry : bsccMatrix.getRow(subState)) {
-                        if (entry.getColumn() == subState) {
-                            if (exitRateVector) {
-                                entry.setValue(storm::utility::one<ValueType>() + (entry.getValue() - (*exitRateVector)[state]) / uniformizationRate);
-                            } else {
-                                entry.setValue(storm::utility::one<ValueType>() + (entry.getValue() - storm::utility::one<ValueType>()) / uniformizationRate);
-                            }
-                        } else {
-                            entry.setValue(entry.getValue() / uniformizationRate);
-                        }
-                    }
-                    ++subState;
-                }
-
-                // Compute the rewards obtained in a single uniformization step
-                std::vector<ValueType> markovianRewards;
-                markovianRewards.reserve(bsccMatrix.getRowCount());
-                for (auto const& state : bsccStates) {
-                    markovianRewards.push_back(valueGetter(state) / uniformizationRate);
-                }
-                
-                // start the iterations
-                ValueType precision = storm::utility::convertNumber<ValueType>(env.solver().lra().getPrecision()) / uniformizationRate;
-                bool relative = env.solver().lra().getRelativeTerminationCriterion();
-                if (!relative) {
-                    precision /= uniformizationRate;
-                }
-                std::vector<ValueType> x(bsccMatrix.getRowCount(), storm::utility::zero<ValueType>());
-                std::vector<ValueType> xPrime(x.size());
-                
-                auto multiplier = storm::solver::MultiplierFactory<ValueType>().create(env, bsccMatrix);
-                ValueType maxDiff, minDiff;
-                uint64_t iter = 0;
-                boost::optional<uint64_t> maxIter;
-                if (env.solver().lra().isMaximalIterationCountSet()) {
-                    maxIter = env.solver().lra().getMaximalIterationCount();
-                }
-                while (!maxIter.is_initialized() || iter < maxIter.get()) {
-                    ++iter;
-                    // Compute the values for the markovian states. We also keep track of the maximal and minimal difference between two values (for convergence checking)
-                    multiplier->multiply(env, x, &markovianRewards, xPrime);
-                    
-                    // update xPrime and check for convergence
-                    // to avoid large (and numerically unstable) x-values, we substract a reference value.
-                    auto xIt = x.begin();
-                    auto xPrimeIt = xPrime.begin();
-                    ValueType refVal = *xPrimeIt;
-                    maxDiff = *xPrimeIt - *xIt;
-                    minDiff = maxDiff;
-                    *xPrimeIt -= refVal;
-                    *xIt = *xPrimeIt;
-                    for (++xIt, ++xPrimeIt; xIt != x.end(); ++xIt, ++xPrimeIt) {
-                        ValueType diff = *xPrimeIt - *xIt;
-                        maxDiff = std::max(maxDiff, diff);
-                        minDiff = std::min(minDiff, diff);
-                        *xPrimeIt -= refVal;
-                        *xIt = *xPrimeIt;
-                    }
-                    
-                    // Check for convergence. The uniformization rate is already incorporated into the precision parameter
-                    if ((maxDiff - minDiff) <= (relative ? (precision * minDiff) : precision)) {
-                        break;
-                    }
-                    if (storm::utility::resources::isTerminate()) {
-                        break;
-                    }
-                }
-                if (maxIter.is_initialized() && iter == maxIter.get()) {
-                    STORM_LOG_WARN("LRA computation did not converge within " << iter << " iterations.");
-                } else {
-                    STORM_LOG_TRACE("LRA computation converged after " << iter << " iterations.");
-                }
-                return (maxDiff + minDiff) * uniformizationRate / (storm::utility::convertNumber<ValueType>(2.0));
-            }
-            
-            template <typename ValueType>
-            std::pair<ValueType, std::vector<ValueType>> SparseCtmcCslHelper::computeLongRunAveragesForBsccGainBias(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector) {
-                // We build the equation system as in Line 3 of Algorithm 3 from
-                // Kretinsky, Meggendorfer: Efficient Strategy Iteration for Mean Payoff in Markov Decision Processes (ATVA 2017)
-                // The first variable corresponds to the gain of the bscc whereas the subsequent variables yield the bias for each state s_1, s_2, ....
-                // No bias variable for s_0 is needed since it is always set to zero, yielding an nxn equation system matrix
-                // To make this work for CTMC, we could uniformize the model. This preserves LRA and ensures that we can compute the
-                // LRA as for a DTMC (the soujourn time in each state is the same). If we then multiply the equations with the uniformization rate,
-                // the uniformization rate cancels out. Hence, we obtain the equation system below.
-                
-                // Get a mapping from global state indices to local ones.
-                std::unordered_map<uint64_t, uint64_t> toLocalIndexMap;
-                uint64_t localIndex = 0;
-                for (auto const& globalIndex : bscc) {
-                    toLocalIndexMap[globalIndex] = localIndex;
-                    ++localIndex;
-                }
-                
-                // Prepare an environment for the underlying equation solver
-                auto subEnv = env;
-                if (subEnv.solver().getLinearEquationSolverType() == storm::solver::EquationSolverType::Topological) {
-                    // Topological solver does not make any sense since the BSCC is connected.
-                    subEnv.solver().setLinearEquationSolverType(subEnv.solver().topological().getUnderlyingEquationSolverType(), subEnv.solver().topological().isUnderlyingEquationSolverTypeSetFromDefault());
-                }
-                subEnv.solver().setLinearEquationSolverPrecision(env.solver().lra().getPrecision(), env.solver().lra().getRelativeTerminationCriterion());
-                
-                // Build the equation system matrix and vector.
-                storm::solver::GeneralLinearEquationSolverFactory<ValueType> linearEquationSolverFactory;
-                bool isEquationSystemFormat = linearEquationSolverFactory.getEquationProblemFormat(subEnv) == storm::solver::LinearEquationSolverProblemFormat::EquationSystem;
-                storm::storage::SparseMatrixBuilder<ValueType> builder(bscc.size(), bscc.size());
-                std::vector<ValueType> eqSysVector;
-                eqSysVector.reserve(bscc.size());
-                // The first row asserts that the weighted bias variables and the reward at s_0 sum up to the gain
-                uint64_t row = 0;
-                ValueType entryValue;
-                for (auto const& globalState : bscc) {
-                    // Coefficient for the gain variable
-                    if (isEquationSystemFormat) {
-                        // '1-0' in row 0 and -(-1) in other rows
-                        builder.addNextValue(row, 0, storm::utility::one<ValueType>());
-                    } else if (row > 0) {
-                        // No coeficient in row 0, othwerise substract the gain
-                        builder.addNextValue(row, 0, -storm::utility::one<ValueType>());
-                    }
-                    // Compute weighted sum over successor state. As this is a BSCC, each successor state will again be in the BSCC.
-                    auto diagonalValue = storm::utility::zero<ValueType>();
-                    if (row > 0) {
-                        if (isEquationSystemFormat) {
-                            diagonalValue = exitRateVector ? (*exitRateVector)[globalState] : storm::utility::one<ValueType>();
-                        } else {
-                            diagonalValue = storm::utility::one<ValueType>() - (exitRateVector ? (*exitRateVector)[globalState] : storm::utility::one<ValueType>());
-                        }
-                    }
-                    bool needDiagonalEntry = !storm::utility::isZero(diagonalValue);
-                    for (auto const& entry : rateMatrix.getRow(globalState)) {
-                        uint64_t col = toLocalIndexMap[entry.getColumn()];
-                        if (col == 0) {
-                            //Skip transition to state_0. This corresponds to setting the bias of state_0 to zero
-                            continue;
-                        }
-                        entryValue = entry.getValue();
-                        if (isEquationSystemFormat) {
-                            entryValue = -entryValue;
-                        }
-                        if (needDiagonalEntry && col >= row) {
-                            if (col == row) {
-                                entryValue += diagonalValue;
-                            } else { // col > row
-                                builder.addNextValue(row, row, diagonalValue);
-                            }
-                            needDiagonalEntry = false;
-                        }
-                        builder.addNextValue(row, col, entryValue);
-                    }
-                    if (needDiagonalEntry) {
-                        builder.addNextValue(row, row, diagonalValue);
-                    }
-                    eqSysVector.push_back(valueGetter(globalState));
-                    ++row;
-                }
-
-                // Create a linear equation solver
-                auto solver = linearEquationSolverFactory.create(subEnv, builder.build());
-                // Check solver requirements.
-                auto requirements = solver->getRequirements(subEnv);
-                STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UncheckedRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
-                // Todo: Find bounds on the bias variables. Just inserting the maximal value from the vector probably does not work.
-                
-                std::vector<ValueType> eqSysSol(bscc.size(), storm::utility::zero<ValueType>());
-                // Take the mean of the rewards as an initial guess for the gain
-                //eqSysSol.front() = std::accumulate(eqSysVector.begin(), eqSysVector.end(), storm::utility::zero<ValueType>()) / storm::utility::convertNumber<ValueType, uint64_t>(bscc.size());
-                solver->solveEquations(subEnv, eqSysSol, eqSysVector);
-                
-                ValueType gain = eqSysSol.front();
-                // insert bias value for state 0
-                eqSysSol.front() = storm::utility::zero<ValueType>();
-                // Return the gain and the bias values
-                return std::pair<ValueType, std::vector<ValueType>>(std::move(gain), std::move(eqSysSol));
-            }
-            
-            template <typename ValueType>
-            std::pair<ValueType, std::vector<ValueType>> SparseCtmcCslHelper::computeLongRunAveragesForBsccLraDistr(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector) {
-
-                // Let A be ab auxiliary Matrix with A[s,s] =  R(s,s) - r(s) & A[s,s'] = R(s,s') for s,s' in BSCC and s!=s'.
-                // We build and solve the equation system for
-                // x*A=0 &  x_0+...+x_n=1  <=>  A^t*x=0=x-x & x_0+...+x_n=1  <=> (1+A^t)*x = x & 1-x_0-...-x_n-1=x_n
-                // Then, x[i] will be the fraction of the time we are in state i.
-                
-                // This method assumes that this BSCC consist of more than one state
-                if (bscc.size() == 1) {
-                    return { valueGetter(*bscc.begin()), {storm::utility::one<ValueType>()} };
-                }
-                
-                // Prepare an environment for the underlying linear equation solver
-                auto subEnv = env;
-                if (subEnv.solver().getLinearEquationSolverType() == storm::solver::EquationSolverType::Topological) {
-                    // Topological solver does not make any sense since the BSCC is connected.
-                    subEnv.solver().setLinearEquationSolverType(subEnv.solver().topological().getUnderlyingEquationSolverType(), subEnv.solver().topological().isUnderlyingEquationSolverTypeSetFromDefault());
-                }
-                subEnv.solver().setLinearEquationSolverPrecision(env.solver().lra().getPrecision(), env.solver().lra().getRelativeTerminationCriterion());
-                
-                // Get a mapping from global state indices to local ones as well as a bitvector containing states within the BSCC.
-                std::unordered_map<uint64_t, uint64_t> toLocalIndexMap;
-                storm::storage::BitVector bsccStates(rateMatrix.getRowCount(), false);
-                uint64_t localIndex = 0;
-                for (auto const& globalIndex : bscc) {
-                    bsccStates.set(globalIndex, true);
-                    toLocalIndexMap[globalIndex] = localIndex;
-                    ++localIndex;
-                }
-                
-                // Build the auxiliary Matrix A.
-                auto auxMatrix = rateMatrix.getSubmatrix(false, bsccStates, bsccStates, true); // add diagonal entries!
-                uint64_t row = 0;
-                for (auto const& globalIndex : bscc) {
-                    for (auto& entry : auxMatrix.getRow(row)) {
-                        if (entry.getColumn() == row) {
-                            // This value is non-zero since we have a BSCC with more than one state
-                            if (exitRateVector) {
-                                entry.setValue(entry.getValue() - (*exitRateVector)[globalIndex]);
-                            } else {
-                                entry.setValue(entry.getValue() - storm::utility::one<ValueType>());
-                            }
-                        }
-                    }
-                    ++row;
-                }
-                assert(row == auxMatrix.getRowCount());
-                
-                // We need to consider A^t. This will not delete diagonal entries since they are non-zero.
-                auxMatrix = auxMatrix.transpose();
-                
-                // Check whether we need the fixpoint characterization
-                storm::solver::GeneralLinearEquationSolverFactory<ValueType> linearEquationSolverFactory;
-                bool isFixpointFormat = linearEquationSolverFactory.getEquationProblemFormat(subEnv) == storm::solver::LinearEquationSolverProblemFormat::FixedPointSystem;
-                if (isFixpointFormat) {
-                    // Add a 1 on the diagonal
-                    for (row = 0; row < auxMatrix.getRowCount(); ++row) {
-                        for (auto& entry : auxMatrix.getRow(row)) {
-                            if (entry.getColumn() == row) {
-                                entry.setValue(storm::utility::one<ValueType>() + entry.getValue());
-                            }
-                        }
-                    }
-                }
-                
-                // We now build the equation system matrix.
-                // We can drop the last row of A and add ones in this row instead to assert that the variables sum up to one
-                // Phase 1: replace the existing entries of the last row with ones
-                uint64_t col = 0;
-                uint64_t lastRow = auxMatrix.getRowCount() - 1;
-                for (auto& entry : auxMatrix.getRow(lastRow)) {
-                    entry.setColumn(col);
-                    if (isFixpointFormat) {
-                        if (col == lastRow) {
-                            entry.setValue(storm::utility::zero<ValueType>());
-                        } else {
-                            entry.setValue(-storm::utility::one<ValueType>());
-                        }
-                    } else {
-                        entry.setValue(storm::utility::one<ValueType>());
-                    }
-                    ++col;
-                }
-                storm::storage::SparseMatrixBuilder<ValueType> builder(std::move(auxMatrix));
-                for (; col <= lastRow; ++col) {
-                    if (isFixpointFormat) {
-                        if (col != lastRow) {
-                            builder.addNextValue(lastRow, col, -storm::utility::one<ValueType>());
-                        }
-                    } else {
-                        builder.addNextValue(lastRow, col, storm::utility::one<ValueType>());
-                    }
-                }
-                
-                std::vector<ValueType> bsccEquationSystemRightSide(bscc.size(), storm::utility::zero<ValueType>());
-                bsccEquationSystemRightSide.back() = storm::utility::one<ValueType>();
-                
-                // Create a linear equation solver
-                auto solver = linearEquationSolverFactory.create(subEnv,  builder.build());
-                solver->setBounds(storm::utility::zero<ValueType>(), storm::utility::one<ValueType>());
-                // Check solver requirements.
-                auto requirements = solver->getRequirements(subEnv);
-                requirements.clearLowerBounds();
-                requirements.clearUpperBounds();
-                STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UncheckedRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
-                
-                std::vector<ValueType> lraDistr(bscc.size(), storm::utility::one<ValueType>() / storm::utility::convertNumber<ValueType, uint64_t>(bscc.size()));
-                solver->solveEquations(subEnv, lraDistr, bsccEquationSystemRightSide);
-                
-                // Calculate final LRA Value
-                ValueType result = storm::utility::zero<ValueType>();
-                auto solIt = lraDistr.begin();
-                for (auto const& globalState : bscc) {
-                    result += valueGetter(globalState) * (*solIt);
-                    ++solIt;
-                }
-                assert(solIt == lraDistr.end());
-
-                return std::pair<ValueType, std::vector<ValueType>>(std::move(result), std::move(lraDistr));
-            }
-            
             template <typename ValueType, typename std::enable_if<storm::NumberTraits<ValueType>::SupportsExponential, int>::type>
             std::vector<ValueType> SparseCtmcCslHelper::computeAllTransientProbabilities(Environment const& env, storm::storage::SparseMatrix<ValueType> const& rateMatrix, storm::storage::BitVector const& initialStates, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, std::vector<ValueType> const& exitRates, double timeBound) {
 
@@ -1253,10 +704,6 @@ namespace storm {
             
             template std::vector<double> SparseCtmcCslHelper::computeTotalRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& rateMatrix, storm::storage::SparseMatrix<double> const& backwardTransitions, std::vector<double> const& exitRateVector, storm::models::sparse::StandardRewardModel<double> const& rewardModel, bool qualitative);
             
-            template std::vector<double> SparseCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& rateMatrix, storm::storage::BitVector const& psiStates, std::vector<double> const* exitRateVector);
-            template std::vector<double> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& rateMatrix, storm::models::sparse::StandardRewardModel<double> const& rewardModel, std::vector<double> const* exitRateVector);
-            template std::vector<double> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& rateMatrix, std::vector<double> const& stateRewardVector, std::vector<double> const* exitRateVector);
-            
             template std::vector<double> SparseCtmcCslHelper::computeCumulativeRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& rateMatrix, std::vector<double> const& exitRateVector, storm::models::sparse::StandardRewardModel<double> const& rewardModel, double timeBound);
 
             template std::vector<double> SparseCtmcCslHelper::computeAllTransientProbabilities(Environment const& env, storm::storage::SparseMatrix<double> const& rateMatrix, storm::storage::BitVector const& initialStates, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, std::vector<double> const& exitRates, double timeBound);
@@ -1290,15 +737,6 @@ namespace storm {
             template std::vector<storm::RationalNumber> SparseCtmcCslHelper::computeTotalRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& rateMatrix, storm::storage::SparseMatrix<storm::RationalNumber> const& backwardTransitions, std::vector<storm::RationalNumber> const& exitRateVector, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, bool qualitative);
             template std::vector<storm::RationalFunction> SparseCtmcCslHelper::computeTotalRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalFunction>&& goal, storm::storage::SparseMatrix<storm::RationalFunction> const& rateMatrix, storm::storage::SparseMatrix<storm::RationalFunction> const& backwardTransitions, std::vector<storm::RationalFunction> const& exitRateVector, storm::models::sparse::StandardRewardModel<storm::RationalFunction> const& rewardModel, bool qualitative);
 
-            template std::vector<storm::RationalNumber> SparseCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& rateMatrix, storm::storage::BitVector const& psiStates, std::vector<storm::RationalNumber> const* exitRateVector);
-            template std::vector<storm::RationalFunction> SparseCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<storm::RationalFunction>&& goal, storm::storage::SparseMatrix<storm::RationalFunction> const& rateMatrix, storm::storage::BitVector const& psiStates, std::vector<storm::RationalFunction> const* exitRateVector);
-            
-            template std::vector<storm::RationalNumber> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& rateMatrix, storm::models::sparse::StandardRewardModel<RationalNumber> const& rewardModel, std::vector<storm::RationalNumber> const* exitRateVector);
-            template std::vector<storm::RationalFunction> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalFunction>&& goal, storm::storage::SparseMatrix<storm::RationalFunction> const& rateMatrix, storm::models::sparse::StandardRewardModel<RationalFunction> const& rewardModel, std::vector<storm::RationalFunction> const* exitRateVector);
-
-            template std::vector<storm::RationalNumber> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& rateMatrix, std::vector<storm::RationalNumber> const& stateRewardVector, std::vector<storm::RationalNumber> const* exitRateVector);
-            template std::vector<storm::RationalFunction> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalFunction>&& goal, storm::storage::SparseMatrix<storm::RationalFunction> const& rateMatrix, std::vector<storm::RationalFunction> const& stateRewardVector, std::vector<storm::RationalFunction> const* exitRateVector);
-
             template std::vector<storm::RationalNumber> SparseCtmcCslHelper::computeCumulativeRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& rateMatrix, std::vector<storm::RationalNumber> const& exitRateVector, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, double timeBound);
             template std::vector<storm::RationalFunction> SparseCtmcCslHelper::computeCumulativeRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalFunction>&& goal, storm::storage::SparseMatrix<storm::RationalFunction> const& rateMatrix, std::vector<storm::RationalFunction> const& exitRateVector, storm::models::sparse::StandardRewardModel<storm::RationalFunction> const& rewardModel, double timeBound);
 
diff --git a/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.h b/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.h
index 697eca743..13470bd8d 100644
--- a/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.h
+++ b/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.h
@@ -55,15 +55,6 @@ namespace storm {
                 template <typename ValueType, typename RewardModelType>
                 static std::vector<ValueType> computeTotalRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::vector<ValueType> const& exitRateVector, RewardModelType const& rewardModel, bool qualitative);
 
-                template <typename ValueType>
-                static std::vector<ValueType> computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, storm::storage::BitVector const& psiStates, std::vector<ValueType> const* exitRateVector);
-
-                template <typename ValueType, typename RewardModelType>
-                static std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, RewardModelType const& rewardModel, std::vector<ValueType> const* exitRateVector);
-
-                template <typename ValueType>
-                static std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::vector<ValueType> const& stateRewardVector, std::vector<ValueType> const* exitRateVector);
-
                 template <typename ValueType>
                 static std::vector<ValueType> computeReachabilityTimes(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& targetStates, bool qualitative);
 
@@ -131,18 +122,6 @@ namespace storm {
                 template <typename ValueType>
                 static bool checkAndUpdateTransientProbabilityEpsilon(storm::Environment const& env, ValueType& epsilon, std::vector<ValueType> const& resultVector, storm::storage::BitVector const& relevantPositions);
                 
-            private:
-                template <typename ValueType>
-                static std::vector<ValueType> computeLongRunAverages(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector);
-                template <typename ValueType>
-                static ValueType computeLongRunAveragesForBscc(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector);
-                template <typename ValueType>
-                static ValueType computeLongRunAveragesForBsccVi(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector);
-                template <typename ValueType>
-                static std::pair<ValueType, std::vector<ValueType>> computeLongRunAveragesForBsccGainBias(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector);
-                template <typename ValueType>
-                static std::pair<ValueType, std::vector<ValueType>> computeLongRunAveragesForBsccLraDistr(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector);
-                
             };
         }
     }
diff --git a/src/storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.cpp b/src/storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.cpp
index ce5bf4467..e0c4fd143 100644
--- a/src/storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.cpp
+++ b/src/storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.cpp
@@ -344,32 +344,6 @@ namespace storm {
                 return computeReachabilityRewards(env, model, transitionMatrix, rewardModel, targetStates, qualitative);
             }
             
-            template<storm::dd::DdType DdType, typename ValueType>
-            std::unique_ptr<CheckResult> HybridDtmcPrctlHelper<DdType, ValueType>::computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& targetStates) {
-                // Create ODD for the translation.
-                storm::utility::Stopwatch conversionWatch(true);
-                storm::dd::Odd odd = model.getReachableStates().createOdd();
-                storm::storage::SparseMatrix<ValueType> explicitProbabilityMatrix = model.getTransitionMatrix().toMatrix(odd, odd);
-                conversionWatch.stop();
-                STORM_LOG_INFO("Converting symbolic matrix/vector to explicit representation done in " << conversionWatch.getTimeInMilliseconds() << "ms.");
-
-                std::vector<ValueType> result = storm::modelchecker::helper::SparseDtmcPrctlHelper<ValueType>::computeLongRunAverageProbabilities(env, storm::solver::SolveGoal<ValueType>(), explicitProbabilityMatrix, targetStates.toVector(odd));
-                return std::unique_ptr<CheckResult>(new HybridQuantitativeCheckResult<DdType, ValueType>(model.getReachableStates(), model.getManager().getBddZero(), model.getManager().template getAddZero<ValueType>(), model.getReachableStates(), std::move(odd), std::move(result)));
-            }
-
-            template<storm::dd::DdType DdType, typename ValueType>
-            std::unique_ptr<CheckResult> HybridDtmcPrctlHelper<DdType, ValueType>::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, RewardModelType const& rewardModel) {
-                // Create ODD for the translation.
-                storm::utility::Stopwatch conversionWatch(true);
-                storm::dd::Odd odd = model.getReachableStates().createOdd();
-                storm::storage::SparseMatrix<ValueType> explicitProbabilityMatrix = model.getTransitionMatrix().toMatrix(odd, odd);
-                conversionWatch.stop();
-                STORM_LOG_INFO("Converting symbolic matrix/vector to explicit representation done in " << conversionWatch.getTimeInMilliseconds() << "ms.");
-
-                std::vector<ValueType> result = storm::modelchecker::helper::SparseDtmcPrctlHelper<ValueType>::computeLongRunAverageRewards(env, storm::solver::SolveGoal<ValueType>(), explicitProbabilityMatrix, rewardModel.getTotalRewardVector(model.getTransitionMatrix(), model.getColumnVariables()).toVector(odd));
-                return std::unique_ptr<CheckResult>(new HybridQuantitativeCheckResult<DdType, ValueType>(model.getReachableStates(), model.getManager().getBddZero(), model.getManager().template getAddZero<ValueType>(), model.getReachableStates(), std::move(odd), std::move(result)));
-            }
-            
             template class HybridDtmcPrctlHelper<storm::dd::DdType::CUDD, double>;
             template class HybridDtmcPrctlHelper<storm::dd::DdType::Sylvan, double>;
 
diff --git a/src/storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.h b/src/storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.h
index 9e20c5e2c..f4d5b950e 100644
--- a/src/storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.h
+++ b/src/storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.h
@@ -39,10 +39,6 @@ namespace storm {
 
                 static std::unique_ptr<CheckResult> computeReachabilityTimes(Environment const& env, storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& targetStates, bool qualitative);
 
-                static std::unique_ptr<CheckResult> computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& targetStates);
-
-                static std::unique_ptr<CheckResult> computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, RewardModelType const& rewardModel);
-
             };
             
         }
diff --git a/src/storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.cpp b/src/storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.cpp
index bf9c9b9ad..97983ee54 100644
--- a/src/storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.cpp
+++ b/src/storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.cpp
@@ -531,21 +531,6 @@ namespace storm {
                 return result;
             }
             
-            template<typename ValueType, typename RewardModelType>
-            std::vector<ValueType> SparseDtmcPrctlHelper<ValueType, RewardModelType>::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& psiStates) {
-                return SparseCtmcCslHelper::computeLongRunAverageProbabilities<ValueType>(env, std::move(goal), transitionMatrix, psiStates, nullptr);
-            }
-            
-            template<typename ValueType, typename RewardModelType>
-            std::vector<ValueType> SparseDtmcPrctlHelper<ValueType, RewardModelType>::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel) {
-                return SparseCtmcCslHelper::computeLongRunAverageRewards<ValueType, RewardModelType>(env, std::move(goal), transitionMatrix, rewardModel, nullptr);
-            }
-            
-            template<typename ValueType, typename RewardModelType>
-            std::vector<ValueType> SparseDtmcPrctlHelper<ValueType, RewardModelType>::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& stateRewards) {
-                return SparseCtmcCslHelper::computeLongRunAverageRewards<ValueType>(env, std::move(goal), transitionMatrix, stateRewards, nullptr);
-            }
-            
             template<typename ValueType, typename RewardModelType>
             typename SparseDtmcPrctlHelper<ValueType, RewardModelType>::BaierTransformedModel SparseDtmcPrctlHelper<ValueType, RewardModelType>::computeBaierTransformation(Environment const& env, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& targetStates, storm::storage::BitVector const& conditionStates, boost::optional<std::vector<ValueType>> const& stateRewards) {
 
diff --git a/src/storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.h b/src/storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.h
index e22e4bfc4..f892f3a7d 100644
--- a/src/storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.h
+++ b/src/storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.h
@@ -52,12 +52,6 @@ namespace storm {
                 
                 static std::vector<ValueType> computeReachabilityTimes(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& targetStates, bool qualitative, ModelCheckerHint const& hint = ModelCheckerHint());
 
-                static std::vector<ValueType> computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& psiStates);
-
-                static std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal,  storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel);
-
-                static std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& stateRewards);
-
                 static std::vector<ValueType> computeConditionalProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& targetStates, storm::storage::BitVector const& conditionStates, bool qualitative);
                 
                 static std::vector<ValueType> computeConditionalRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, RewardModelType const& rewardModel, storm::storage::BitVector const& targetStates, storm::storage::BitVector const& conditionStates, bool qualitative);

From 6f59c4f3eb96d42a2859a393e1b9a3409c8b44b2 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Wed, 12 Aug 2020 15:58:41 +0200
Subject: [PATCH 45/48] SparseMatrixBuilder: Added a function to easily add
 diagonal entries.

---
 src/storm/storage/SparseMatrix.cpp          | 86 +++++++++++++++++----
 src/storm/storage/SparseMatrix.h            | 13 +++-
 src/test/storm/storage/SparseMatrixTest.cpp | 73 +++++++++++++++++
 3 files changed, 158 insertions(+), 14 deletions(-)

diff --git a/src/storm/storage/SparseMatrix.cpp b/src/storm/storage/SparseMatrix.cpp
index 26e1c0dc7..65ab0f101 100644
--- a/src/storm/storage/SparseMatrix.cpp
+++ b/src/storm/storage/SparseMatrix.cpp
@@ -123,22 +123,40 @@ namespace storm {
         void SparseMatrixBuilder<ValueType>::addNextValue(index_type row, index_type column, ValueType const& value) {
             // Check that we did not move backwards wrt. the row.
             STORM_LOG_THROW(row >= lastRow, storm::exceptions::InvalidArgumentException, "Adding an element in row " << row << ", but an element in row " << lastRow << " has already been added.");
+            STORM_LOG_ASSERT(columnsAndValues.size() == currentEntryCount, "Unexpected size of columnsAndValues vector.");
+            
+            // Check if a diagonal entry shall be inserted before
+            if (pendingDiagonalEntry) {
+                index_type diagColumn = hasCustomRowGrouping ? currentRowGroupCount - 1 : lastRow;
+                if (row > lastRow || column >= diagColumn) {
+                    ValueType diagValue = std::move(pendingDiagonalEntry.get());
+                    pendingDiagonalEntry = boost::none;
+                    // Add the pending diagonal value now
+                    if (row == lastRow && column == diagColumn) {
+                        // The currently added value coincides with the diagonal entry!
+                        // We add up the values and repeat this call.
+                        addNextValue(row, column, diagValue + value);
+                        // We return here because the above call already did all the work.
+                        return;
+                    } else {
+                        addNextValue(lastRow, diagColumn, diagValue);
+                    }
+                }
+            }
             
             // If the element is in the same row, but was not inserted in the correct order, we need to fix the row after
             // the insertion.
             bool fixCurrentRow = row == lastRow && column < lastColumn;
-            
-            // If the element is in the same row and column as the previous entry, we add them up.
-            if (row == lastRow && column == lastColumn && !columnsAndValues.empty()) {
+            // If the element is in the same row and column as the previous entry, we add them up...
+            // unless there is no entry in this row yet, which might happen either for the very first entry or when only a diagonal value has been added
+            if (row == lastRow && column == lastColumn && rowIndications.back() < currentEntryCount) {
                 columnsAndValues.back().setValue(columnsAndValues.back().getValue() + value);
             } else {
                 // If we switched to another row, we have to adjust the missing entries in the row indices vector.
                 if (row != lastRow) {
                     // Otherwise, we need to push the correct values to the vectors, which might trigger reallocations.
-                    for (index_type i = lastRow + 1; i <= row; ++i) {
-                        rowIndications.push_back(currentEntryCount);
-                    }
-                    
+                    assert(rowIndications.size() == lastRow + 1);
+                    rowIndications.resize(row + 1, currentEntryCount);
                     lastRow = row;
                 }
                 
@@ -183,15 +201,24 @@ namespace storm {
         void SparseMatrixBuilder<ValueType>::newRowGroup(index_type startingRow) {
             STORM_LOG_THROW(hasCustomRowGrouping, storm::exceptions::InvalidStateException, "Matrix was not created to have a custom row grouping.");
             STORM_LOG_THROW(startingRow >= lastRow, storm::exceptions::InvalidStateException, "Illegal row group with negative size.");
-            rowGroupIndices.get().push_back(startingRow);
-            ++currentRowGroupCount;
             
-            // Close all rows from the most recent one to the starting row.
-            for (index_type i = lastRow + 1; i < startingRow; ++i) {
-                rowIndications.push_back(currentEntryCount);
+            // If there still is a pending diagonal entry, we need to add it now (otherwise, the correct diagonal column will be unclear)
+            if (pendingDiagonalEntry) {
+                STORM_LOG_ASSERT(currentRowGroupCount > 0, "Diagonal entry was set before opening the first row group.");
+                index_type diagColumn = currentRowGroupCount - 1;
+                ValueType diagValue = std::move(pendingDiagonalEntry.get());
+                pendingDiagonalEntry = boost::none; // clear now, so addNextValue works properly
+                addNextValue(lastRow, diagColumn, diagValue);
             }
             
+            rowGroupIndices.get().push_back(startingRow);
+            ++currentRowGroupCount;
+            
+            // Handle the case where the previous row group ends with one or more empty rows
             if (lastRow + 1 < startingRow) {
+                // Close all rows from the most recent one to the starting row.
+                assert(rowIndications.size() == lastRow + 1);
+                rowIndications.resize(startingRow, currentEntryCount);
                 // Reset the most recently seen row/column to allow for proper insertion of the following elements.
                 lastRow = startingRow - 1;
                 lastColumn = 0;
@@ -201,6 +228,14 @@ namespace storm {
         template<typename ValueType>
         SparseMatrix<ValueType> SparseMatrixBuilder<ValueType>::build(index_type overriddenRowCount, index_type overriddenColumnCount, index_type overriddenRowGroupCount) {
             
+            // If there still is a pending diagonal entry, we need to add it now
+            if (pendingDiagonalEntry) {
+                index_type diagColumn = hasCustomRowGrouping ? currentRowGroupCount - 1 : lastRow;
+                ValueType diagValue = std::move(pendingDiagonalEntry.get());
+                pendingDiagonalEntry = boost::none; // clear now, so addNextValue works properly
+                addNextValue(lastRow, diagColumn, diagValue);
+            }
+            
             bool hasEntries = currentEntryCount != 0;
             
             uint_fast64_t rowCount = hasEntries ? lastRow + 1 : 0;
@@ -332,9 +367,34 @@ namespace storm {
             }
             
             highestColumn = maxColumn;
-            lastColumn = columnsAndValues.empty() ? 0 : columnsAndValues[columnsAndValues.size() - 1].getColumn();
+            lastColumn = columnsAndValues.empty() ? 0 : columnsAndValues.back().getColumn();
         }
         
+        template<typename ValueType>
+        void SparseMatrixBuilder<ValueType>::addDiagonalEntry(index_type row, ValueType const& value) {
+            STORM_LOG_THROW(row >= lastRow, storm::exceptions::InvalidArgumentException, "Adding a diagonal element in row " << row << ", but an element in row " << lastRow << " has already been added.");
+            if (pendingDiagonalEntry) {
+                if (row == lastRow) {
+                    // Add the two diagonal entries, nothing else to be done.
+                    pendingDiagonalEntry.get() += value;
+                    return;
+                } else {
+                    // add the pending entry
+                    index_type column = hasCustomRowGrouping ? currentRowGroupCount - 1 : lastRow;
+                    ValueType diagValue = std::move(pendingDiagonalEntry.get());
+                    pendingDiagonalEntry = boost::none; // clear now, so addNextValue works properly
+                    addNextValue(lastRow, column, diagValue);
+                }
+            }
+            pendingDiagonalEntry = value;
+            if (lastRow != row) {
+                assert(rowIndications.size() == lastRow + 1);
+                rowIndications.resize(row + 1, currentEntryCount);
+                lastRow = row;
+                lastColumn = 0;
+            }
+        }
+
         template<typename ValueType>
         SparseMatrix<ValueType>::rows::rows(iterator begin, index_type entryCount) : beginIterator(begin), entryCount(entryCount) {
             // Intentionally left empty.
diff --git a/src/storm/storage/SparseMatrix.h b/src/storm/storage/SparseMatrix.h
index ad9c00ecd..dc175c0b8 100644
--- a/src/storm/storage/SparseMatrix.h
+++ b/src/storm/storage/SparseMatrix.h
@@ -243,7 +243,16 @@ namespace storm {
              * @param offset Offset to add to each id in vector index.
              */
             void replaceColumns(std::vector<index_type> const& replacements, index_type offset);
-                        
+            
+            /*!
+             * Makes sure that a diagonal entry will be inserted at the given row.
+             * All other entries of this row must be set immediately after calling this (without setting values at other rows in between)
+             * The provided row must not be smaller than the row of the most recent insertion.
+             * If there is a row grouping, the column of the diagonal entry will correspond to the current row group.
+             * If addNextValue is called on the given row and the diagonal column, we take the sum of the two values provided to addDiagonalEntry and addNextValue
+             */
+            void addDiagonalEntry(index_type row, ValueType const& value);
+            
         private:
             // A flag indicating whether a row count was set upon construction.
             bool initialRowCountSet;
@@ -305,6 +314,8 @@ namespace storm {
             // Stores the currently active row group. This is used for correctly constructing the row grouping of the
             // matrix.
             index_type currentRowGroupCount;
+            
+            boost::optional<ValueType> pendingDiagonalEntry;
         };
         
         /*!
diff --git a/src/test/storm/storage/SparseMatrixTest.cpp b/src/test/storm/storage/SparseMatrixTest.cpp
index 38566112d..4984e6607 100644
--- a/src/test/storm/storage/SparseMatrixTest.cpp
+++ b/src/test/storm/storage/SparseMatrixTest.cpp
@@ -148,6 +148,79 @@ TEST(SparseMatrix, Build) {
     ASSERT_EQ(5ul, matrix5.getEntryCount());
 }
 
+TEST(SparseMatrix, DiagonalEntries) {
+    {
+        // No row groupings
+        storm::storage::SparseMatrixBuilder<double> builder(4, 4, 7);
+        storm::storage::SparseMatrixBuilder<double> builderCmp(4, 4, 7);
+        for (uint64_t i = 0; i < 4; ++i) {
+            ASSERT_NO_THROW(builder.addDiagonalEntry(i, i));
+            ASSERT_NO_THROW(builder.addNextValue(i, 2, 100.0 + i));
+            if (i < 2) {
+                ASSERT_NO_THROW(builderCmp.addNextValue(i, i, i));
+                ASSERT_NO_THROW(builderCmp.addNextValue(i, 2, 100.0 + i));
+            } else {
+                ASSERT_NO_THROW(builderCmp.addNextValue(i, 2, 100.0 + i));
+                ASSERT_NO_THROW(builderCmp.addNextValue(i, i, i));
+            }
+        }
+        auto matrix = builder.build();
+        auto matrixCmp = builderCmp.build();
+        EXPECT_EQ(matrix, matrixCmp);
+    }
+    {
+        // With row groupings (each row group has 3 rows)
+        storm::storage::SparseMatrixBuilder<double> builder(12, 4, 21, true, true, 4);
+        storm::storage::SparseMatrixBuilder<double> builderCmp(12, 4, 21, true, true, 4);
+        for (uint64_t i = 0; i < 4; ++i) {
+            uint64_t row = 3*i;
+            builder.newRowGroup(row);
+            builderCmp.newRowGroup(row);
+            for (; row < 3*(i+1); ++row) {
+                ASSERT_NO_THROW(builder.addDiagonalEntry(row, row));
+                ASSERT_NO_THROW(builder.addNextValue(row, 2, 100 + row));
+                if (i < 2) {
+                    ASSERT_NO_THROW(builderCmp.addNextValue(row, i, row));
+                    ASSERT_NO_THROW(builderCmp.addNextValue(row, 2, 100.0 + row));
+                } else {
+                    ASSERT_NO_THROW(builderCmp.addNextValue(row, 2, 100.0 + row));
+                    ASSERT_NO_THROW(builderCmp.addNextValue(row, i, row));
+                }
+            }
+        }
+        auto matrix = builder.build();
+        auto matrixCmp = builderCmp.build();
+        EXPECT_EQ(matrix, matrixCmp);
+    }
+    {
+        // With row groupings (every second row is empty)
+        storm::storage::SparseMatrixBuilder<double> builder(12, 4, 10, true, true, 4);
+        storm::storage::SparseMatrixBuilder<double> builderCmp(12, 4, 10, true, true, 4);
+        for (uint64_t i = 0; i < 4; ++i) {
+            uint64_t row = 3*i;
+            builder.newRowGroup(row);
+            builderCmp.newRowGroup(row);
+            for (; row < 3*(i+1); ++row) {
+                if (row % 2 == 1) {
+                    continue;
+                }
+                ASSERT_NO_THROW(builder.addDiagonalEntry(row, row));
+                ASSERT_NO_THROW(builder.addNextValue(row, 2, 100 + row));
+                if (i < 2) {
+                    ASSERT_NO_THROW(builderCmp.addNextValue(row, i, row));
+                    ASSERT_NO_THROW(builderCmp.addNextValue(row, 2, 100.0 + row));
+                } else {
+                    ASSERT_NO_THROW(builderCmp.addNextValue(row, i, row));
+                    ASSERT_NO_THROW(builderCmp.addNextValue(row, 2, 100.0 + row));
+                }
+            }
+        }
+        auto matrix = builder.build();
+        auto matrixCmp = builderCmp.build();
+        EXPECT_EQ(matrix, matrixCmp);
+    }
+}
+
 TEST(SparseMatrix, CreationWithMovingContents) {
     std::vector<storm::storage::MatrixEntry<uint_fast64_t, double>> columnsAndValues;
     columnsAndValues.emplace_back(1, 1.0);

From b7883a8ef18b2ab1b6bd2ef7cd238282f88e95b3 Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Wed, 12 Aug 2020 15:59:50 +0200
Subject: [PATCH 46/48] Used the new SparseMatrixBuilder::addDiagonalEntry to
 simplify some of the LRA code.

---
 ...arseDeterministicInfiniteHorizonHelper.cpp | 20 +++-------------
 .../infinitehorizon/internal/LraViHelper.cpp  | 23 +++----------------
 2 files changed, 6 insertions(+), 37 deletions(-)

diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.cpp
index f7327d8c0..511a67787 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.cpp
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.cpp
@@ -169,15 +169,13 @@ namespace storm {
                         builder.addNextValue(row, 0, -storm::utility::one<ValueType>());
                     }
                     // Compute weighted sum over successor state. As this is a BSCC, each successor state will again be in the BSCC.
-                    auto diagonalValue = storm::utility::zero<ValueType>();
                     if (row > 0) {
                         if (isEquationSystemFormat) {
-                            diagonalValue = rateAtState;
-                        } else {
-                            diagonalValue = storm::utility::one<ValueType>() - rateAtState;
+                            builder.addDiagonalEntry(row, rateAtState);
+                        } else if (!storm::utility::isOne(rateAtState)) {
+                            builder.addDiagonalEntry(row, storm::utility::one<ValueType>() - rateAtState);
                         }
                     }
-                    bool needDiagonalEntry = !storm::utility::isZero(diagonalValue);
                     for (auto const& entry : this->_transitionMatrix.getRow(globalState)) {
                         uint64_t col = toLocalIndexMap[entry.getColumn()];
                         if (col == 0) {
@@ -188,20 +186,8 @@ namespace storm {
                         if (isEquationSystemFormat) {
                             entryValue = -entryValue;
                         }
-                        if (needDiagonalEntry && col >= row) {
-                            if (col == row) {
-                                entryValue += diagonalValue;
-                            } else { // col > row
-                                builder.addNextValue(row, row, diagonalValue);
-                            }
-                            needDiagonalEntry = false;
-                        }
                         builder.addNextValue(row, col, entryValue);
                     }
-                    if (needDiagonalEntry) {
-                        builder.addNextValue(row, row, diagonalValue);
-                    }
-
                     eqSysVector.push_back(stateValuesGetter(globalState) + rateAtState * actionValuesGetter(globalState));
                     ++row;
                 }
diff --git a/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
index c92618735..e5ec57c6f 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
+++ b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
@@ -87,43 +87,26 @@ namespace storm {
                                     tsToIsTransitionsBuilder.newRowGroup(currTsRow);
                                 }
                             }
-                            // We need to uniformize which means that a diagonal entry for the selfloop will be inserted.
                             // If there are exit rates, the uniformization factor needs to be updated.
                             if (exitRates) {
                                 uniformizationFactor = (*exitRates)[componentState] / _uniformizationRate;
                             }
+                            // We need to uniformize which means that a diagonal entry for the selfloop will be inserted.
                             ValueType selfLoopProb = storm::utility::one<ValueType>() - uniformizationFactor;
-                            uint64_t selfLoopColumn = toSubModelStateMapping[componentState];
                             for (auto componentChoiceIt = getComponentElementChoicesBegin(element); componentChoiceIt != getComponentElementChoicesEnd(element); ++componentChoiceIt) {
-                                bool insertedDiagElement = false;
+                                tsTransitionsBuilder.addDiagonalEntry(currTsRow, selfLoopProb);
                                 for (auto const& entry : this->_transitionMatrix.getRow(*componentChoiceIt)) {
                                     uint64_t subModelColumn = toSubModelStateMapping[entry.getColumn()];
                                     if (isTimedState(entry.getColumn())) {
                                         // We have a transition from a timed state to a timed state
                                         STORM_LOG_ASSERT(subModelColumn < numTsSubModelStates, "Invalid state for timed submodel");
-                                        if (!insertedDiagElement && subModelColumn > selfLoopColumn) {
-                                            // We passed the diagonal entry, so add it now before moving on to the next entry
-                                            tsTransitionsBuilder.addNextValue(currTsRow, selfLoopColumn, selfLoopProb);
-                                            insertedDiagElement = true;
-                                        }
-                                        if (!insertedDiagElement && subModelColumn == selfLoopColumn) {
-                                            // The current entry is the diagonal (selfloop) entry
-                                            tsTransitionsBuilder.addNextValue(currTsRow, selfLoopColumn, selfLoopProb + uniformizationFactor * entry.getValue());
-                                            insertedDiagElement = true;
-                                        } else {
-                                            // The diagonal element either has been inserted already or still lies in front
-                                            tsTransitionsBuilder.addNextValue(currTsRow, subModelColumn,  uniformizationFactor * entry.getValue());
-                                        }
+                                        tsTransitionsBuilder.addNextValue(currTsRow, subModelColumn,  uniformizationFactor * entry.getValue());
                                     } else {
                                         // We have a transition from a timed to a instant state
                                         STORM_LOG_ASSERT(subModelColumn < numIsSubModelStates, "Invalid state for instant submodel");
                                         tsToIsTransitionsBuilder.addNextValue(currTsRow, subModelColumn, uniformizationFactor * entry.getValue());
                                     }
                                 }
-                                // If the diagonal entry for the MS matrix still has not been set, we do that now
-                                if (!insertedDiagElement) {
-                                    tsTransitionsBuilder.addNextValue(currTsRow, selfLoopColumn, selfLoopProb);
-                                }
                                 ++currTsRow;
                             }
                         } else {

From 7e18fbf3c21f6cb811b52caa80a8b97871f08eba Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Wed, 12 Aug 2020 17:10:14 +0200
Subject: [PATCH 47/48] Fixed weird error when invoking Storm with portfolio
 engine and no input model.

---
 src/storm-cli-utilities/model-handling.h | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/src/storm-cli-utilities/model-handling.h b/src/storm-cli-utilities/model-handling.h
index 31e287fed..fb02acf91 100644
--- a/src/storm-cli-utilities/model-handling.h
+++ b/src/storm-cli-utilities/model-handling.h
@@ -361,7 +361,6 @@ namespace storm {
             auto transformedJani = std::make_shared<SymbolicInput>();
             ModelProcessingInformation mpi = getModelProcessingInformation(output, transformedJani);
 
-            auto builderType = storm::utility::getBuilderType(mpi.engine);
             
             // Check whether conversion for PRISM to JANI is requested or necessary.
             if (output.model && output.model.get().isPrismProgram()) {
@@ -383,7 +382,7 @@ namespace storm {
             }
             
             if (output.model && output.model.get().isJaniModel()) {
-                storm::jani::ModelFeatures supportedFeatures = storm::api::getSupportedJaniFeatures(builderType);
+                storm::jani::ModelFeatures supportedFeatures = storm::api::getSupportedJaniFeatures(storm::utility::getBuilderType(mpi.engine));
                 storm::api::simplifyJaniModel(output.model.get().asJaniModel(), output.properties, supportedFeatures);
             }
 

From eb02d56b691936e512a9a821afe7daae9ecf250c Mon Sep 17 00:00:00 2001
From: Tim Quatmann <tim.quatmann@cs.rwth-aachen.de>
Date: Wed, 19 Aug 2020 12:01:03 +0200
Subject: [PATCH 48/48] LraViHelper: Changed type of toSubModelStateMapping to
 std::map, which is faster in this scenario. Also fixed some awkward const&'s

---
 .../helper/infinitehorizon/internal/LraViHelper.cpp    | 10 ++++------
 1 file changed, 4 insertions(+), 6 deletions(-)

diff --git a/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
index e5ec57c6f..e1302665f 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
+++ b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
@@ -29,7 +29,7 @@ namespace storm {
                     // Run through the component and collect some data:
                     // We create two submodels, one consisting of the timed states of the component and one consisting of the instant states of the component.
                     // For this, we create a state index map that point from state indices of the input model to indices of the corresponding submodel of that state.
-                    boost::container::flat_map<uint64_t, uint64_t> toSubModelStateMapping;
+                    std::map<uint64_t, uint64_t> toSubModelStateMapping;
                     // We also obtain state and choices counts of the two submodels
                     uint64_t numTsSubModelStates(0), numTsSubModelChoices(0);
                     uint64_t numIsSubModelStates(0), numIsSubModelChoices(0);
@@ -38,7 +38,7 @@ namespace storm {
                     _uniformizationRate = exitRates == nullptr ? storm::utility::one<ValueType>() : storm::utility::zero<ValueType>();
                     // Now run over the MEC and collect the required data.
                     for (auto const& element : _component) {
-                        uint64_t const& componentState = getComponentElementState(element);
+                        uint64_t componentState = getComponentElementState(element);
                         if (isTimedState(componentState)) {
                             toSubModelStateMapping.emplace(componentState, numTsSubModelStates);
                             ++numTsSubModelStates;
@@ -78,7 +78,7 @@ namespace storm {
                     uint64_t currTsRow = 0;
                     uint64_t currIsRow = 0;
                     for (auto const& element : _component) {
-                        uint64_t const& componentState = getComponentElementState(element);
+                        uint64_t componentState = getComponentElementState(element);
                         if (isTimedState(componentState)) {
                             // The currently processed state is timed.
                             if (nondetTs()) {
@@ -142,9 +142,7 @@ namespace storm {
                 
                 template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
                 ValueType LraViHelper<ValueType, ComponentType, TransitionsType>::performValueIteration(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, std::vector<ValueType> const* exitRates, storm::solver::OptimizationDirection const* dir, std::vector<uint64_t>* choices) {
-                    
                     initializeNewValues(stateValueGetter, actionValueGetter, exitRates);
-                    
                     ValueType precision = storm::utility::convertNumber<ValueType>(env.solver().lra().getPrecision());
                     bool relative = env.solver().lra().getRelativeTerminationCriterion();
                     boost::optional<uint64_t> maxIter;
@@ -201,7 +199,7 @@ namespace storm {
                     // Set the new choice-based values
                     ValueType actionRewardScalingFactor = storm::utility::one<ValueType>() / _uniformizationRate;
                     for (auto const& element : _component) {
-                        uint64_t const& componentState = getComponentElementState(element);
+                        uint64_t componentState = getComponentElementState(element);
                         if (isTimedState(componentState)) {
                             if (exitRates) {
                                 actionRewardScalingFactor = (*exitRates)[componentState] / _uniformizationRate;