diff --git a/src/storm/modelchecker/csl/helper/HybridCtmcCslHelper.cpp b/src/storm/modelchecker/csl/helper/HybridCtmcCslHelper.cpp
index 9d3972179..db076ae16 100644
--- a/src/storm/modelchecker/csl/helper/HybridCtmcCslHelper.cpp
+++ b/src/storm/modelchecker/csl/helper/HybridCtmcCslHelper.cpp
@@ -411,53 +411,6 @@ namespace storm {
                 STORM_LOG_THROW(false, storm::exceptions::InvalidOperationException, "Computing cumulative rewards is unsupported for this value type.");
             }
             
-            template<storm::dd::DdType DdType, class ValueType>
-            std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Ctmc<DdType, ValueType> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<DdType, ValueType> const& rateMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector, storm::dd::Bdd<DdType> const& psiStates) {
-                
-                storm::utility::Stopwatch conversionWatch(true);
-                
-                // Create ODD for the translation.
-                storm::dd::Odd odd = model.getReachableStates().createOdd();
-                
-                storm::storage::SparseMatrix<ValueType> explicitRateMatrix = rateMatrix.toMatrix(odd, odd);
-                std::vector<ValueType> explicitExitRateVector = exitRateVector.toVector(odd);
-                storm::solver::SolveGoal<ValueType> goal;
-                if (onlyInitialStatesRelevant) {
-                    goal.setRelevantValues(model.getInitialStates().toVector(odd));
-                }
-                conversionWatch.stop();
-                STORM_LOG_INFO("Converting symbolic matrix/vector to explicit representation done in " << conversionWatch.getTimeInMilliseconds() << "ms.");
-
-                std::vector<ValueType> result = storm::modelchecker::helper::SparseCtmcCslHelper::computeLongRunAverageProbabilities(env, std::move(goal), explicitRateMatrix, psiStates.toVector(odd), &explicitExitRateVector);
-
-                return std::unique_ptr<CheckResult>(new HybridQuantitativeCheckResult<DdType, ValueType>(model.getReachableStates(), model.getManager().getBddZero(), model.getManager().template getAddZero<ValueType>(), model.getReachableStates(), std::move(odd), std::move(result)));
-            }
-            
-            template<storm::dd::DdType DdType, class ValueType>
-            std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Ctmc<DdType, ValueType> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<DdType, ValueType> const& rateMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector, typename storm::models::symbolic::Model<DdType, ValueType>::RewardModelType const& rewardModel) {
-                
-                STORM_LOG_THROW(!rewardModel.empty(), storm::exceptions::InvalidPropertyException, "Missing reward model for formula. Skipping formula.");
-                storm::dd::Add<DdType, ValueType> probabilityMatrix = computeProbabilityMatrix(rateMatrix, exitRateVector);
-                
-                storm::utility::Stopwatch conversionWatch(true);
-                
-                // Create ODD for the translation.
-                storm::dd::Odd odd = model.getReachableStates().createOdd();
-                
-                storm::storage::SparseMatrix<ValueType> explicitRateMatrix = rateMatrix.toMatrix(odd, odd);
-                std::vector<ValueType> explicitExitRateVector = exitRateVector.toVector(odd);
-                storm::solver::SolveGoal<ValueType> goal;
-                if (onlyInitialStatesRelevant) {
-                    goal.setRelevantValues(model.getInitialStates().toVector(odd));
-                }
-                conversionWatch.stop();
-                STORM_LOG_INFO("Converting symbolic matrix/vector to explicit representation done in " << conversionWatch.getTimeInMilliseconds() << "ms.");
-
-                std::vector<ValueType> result = storm::modelchecker::helper::SparseCtmcCslHelper::computeLongRunAverageRewards(env, std::move(goal), explicitRateMatrix, rewardModel.getTotalRewardVector(probabilityMatrix, model.getColumnVariables(), exitRateVector, true).toVector(odd), &explicitExitRateVector);
-                
-                return std::unique_ptr<CheckResult>(new HybridQuantitativeCheckResult<DdType, ValueType>(model.getReachableStates(), model.getManager().getBddZero(), model.getManager().template getAddZero<ValueType>(), model.getReachableStates(), std::move(odd), std::move(result)));
-            }
-            
             template<storm::dd::DdType DdType, class ValueType>
             storm::dd::Add<DdType, ValueType> HybridCtmcCslHelper::computeUniformizedMatrix(storm::models::symbolic::Ctmc<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector, storm::dd::Bdd<DdType> const& maybeStates, ValueType uniformizationRate) {
                 STORM_LOG_DEBUG("Computing uniformized matrix using uniformization rate " << uniformizationRate << ".");
@@ -489,9 +442,7 @@ namespace storm {
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeCumulativeRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::CUDD, double> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::CUDD, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::CUDD, double>::RewardModelType const& rewardModel, double timeBound);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeUntilProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::CUDD, double> const& model, storm::dd::Add<storm::dd::DdType::CUDD, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::CUDD> const& phiStates, storm::dd::Bdd<storm::dd::DdType::CUDD> const& psiStates, bool qualitative);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeReachabilityRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::CUDD, double> const& model, storm::dd::Add<storm::dd::DdType::CUDD, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::CUDD, double>::RewardModelType const& rewardModel, storm::dd::Bdd<storm::dd::DdType::CUDD> const& targetStates, bool qualitative);
-            template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::CUDD, double> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::CUDD, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::CUDD> const& psiStates);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeNextProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::CUDD, double> const& model, storm::dd::Add<storm::dd::DdType::CUDD, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::CUDD> const& nextStates);
-            template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::CUDD, double> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::CUDD, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::CUDD, double>::RewardModelType const& rewardModel);
             template storm::dd::Add<storm::dd::DdType::CUDD, double> HybridCtmcCslHelper::computeProbabilityMatrix(storm::dd::Add<storm::dd::DdType::CUDD, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector);
             template storm::dd::Add<storm::dd::DdType::CUDD, double> HybridCtmcCslHelper::computeUniformizedMatrix(storm::models::symbolic::Ctmc<storm::dd::DdType::CUDD, double> const& model, storm::dd::Add<storm::dd::DdType::CUDD, double> const& transitionMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::CUDD> const& maybeStates, double uniformizationRate);
 
@@ -501,9 +452,7 @@ namespace storm {
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeCumulativeRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, double> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, double>::RewardModelType const& rewardModel, double timeBound);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeUntilProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, double> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& phiStates, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates, bool qualitative);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeReachabilityRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, double> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, double>::RewardModelType const& rewardModel, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& targetStates, bool qualitative);
-            template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, double> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeNextProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, double> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& nextStates);
-            template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, double> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, double>::RewardModelType const& rewardModel);
             template storm::dd::Add<storm::dd::DdType::Sylvan, double> HybridCtmcCslHelper::computeProbabilityMatrix(storm::dd::Add<storm::dd::DdType::Sylvan, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector);
             template storm::dd::Add<storm::dd::DdType::Sylvan, double> HybridCtmcCslHelper::computeUniformizedMatrix(storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, double> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& transitionMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& maybeStates, double uniformizationRate);
 
@@ -513,9 +462,7 @@ namespace storm {
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeCumulativeRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, storm::RationalNumber>::RewardModelType const& rewardModel, double timeBound);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeUntilProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& phiStates, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates, bool qualitative);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeReachabilityRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, storm::RationalNumber>::RewardModelType const& rewardModel, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& targetStates, bool qualitative);
-            template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeNextProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& nextStates);
-            template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, storm::RationalNumber>::RewardModelType const& rewardModel);
             template storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> HybridCtmcCslHelper::computeProbabilityMatrix(storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector);
             template storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> HybridCtmcCslHelper::computeUniformizedMatrix(storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& transitionMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& maybeStates, storm::RationalNumber uniformizationRate);
 
@@ -525,9 +472,7 @@ namespace storm {
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeCumulativeRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalFunction> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, storm::RationalFunction>::RewardModelType const& rewardModel, double timeBound);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeUntilProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalFunction> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& phiStates, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates, bool qualitative);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeReachabilityRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalFunction> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, storm::RationalFunction>::RewardModelType const& rewardModel, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& targetStates, bool qualitative);
-            template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalFunction> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates);
             template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeNextProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalFunction> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& nextStates);
-            template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalFunction> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, storm::RationalFunction>::RewardModelType const& rewardModel);
             template storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> HybridCtmcCslHelper::computeProbabilityMatrix(storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector);
             template storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> HybridCtmcCslHelper::computeUniformizedMatrix(storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalFunction> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& transitionMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& maybeStates, storm::RationalFunction uniformizationRate);
 
diff --git a/src/storm/modelchecker/csl/helper/HybridCtmcCslHelper.h b/src/storm/modelchecker/csl/helper/HybridCtmcCslHelper.h
index 73c5e3c46..17cf48054 100644
--- a/src/storm/modelchecker/csl/helper/HybridCtmcCslHelper.h
+++ b/src/storm/modelchecker/csl/helper/HybridCtmcCslHelper.h
@@ -44,15 +44,9 @@ namespace storm {
                 template<storm::dd::DdType DdType, typename ValueType>
                 static std::unique_ptr<CheckResult> computeReachabilityRewards(Environment const& env, storm::models::symbolic::Ctmc<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& rateMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector, typename storm::models::symbolic::Model<DdType, ValueType>::RewardModelType const& rewardModel, storm::dd::Bdd<DdType> const& targetStates, bool qualitative);
                 
-                template<storm::dd::DdType DdType, typename ValueType>
-                static std::unique_ptr<CheckResult> computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Ctmc<DdType, ValueType> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<DdType, ValueType> const& rateMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector, storm::dd::Bdd<DdType> const& psiStates);
-                
                 template<storm::dd::DdType DdType, typename ValueType>
                 static std::unique_ptr<CheckResult> computeNextProbabilities(Environment const& env, storm::models::symbolic::Ctmc<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& rateMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector, storm::dd::Bdd<DdType> const& nextStates);
 
-                template<storm::dd::DdType DdType, typename ValueType>
-                static std::unique_ptr<CheckResult> computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Ctmc<DdType, ValueType> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<DdType, ValueType> const& rateMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector, typename storm::models::symbolic::Model<DdType, ValueType>::RewardModelType const& rewardModel);
-
                 /*!
                  * Converts the given rate-matrix into a time-abstract probability matrix.
                  *
diff --git a/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.cpp b/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.cpp
index ab8912c5d..58cdd4b15 100644
--- a/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.cpp
+++ b/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.cpp
@@ -482,555 +482,6 @@ namespace storm {
                 return storm::modelchecker::helper::SparseDtmcPrctlHelper<ValueType>::computeTotalRewards(env, std::move(goal), probabilityMatrix, backwardTransitions, dtmcRewardModel, qualitative);
             }
             
-            template <typename ValueType>
-            std::vector<ValueType> SparseCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, storm::storage::BitVector const& psiStates, std::vector<ValueType> const* exitRateVector) {
-                
-                // If there are no goal states, we avoid the computation and directly return zero.
-                uint_fast64_t numberOfStates = rateMatrix.getRowCount();
-                if (psiStates.empty()) {
-                    return std::vector<ValueType>(numberOfStates, storm::utility::zero<ValueType>());
-                }
-                
-                // Likewise, if all bits are set, we can avoid the computation.
-                if (psiStates.full()) {
-                    return std::vector<ValueType>(numberOfStates, storm::utility::one<ValueType>());
-                }
-                
-                ValueType zero = storm::utility::zero<ValueType>();
-                ValueType one = storm::utility::one<ValueType>();
-                
-                return computeLongRunAverages<ValueType>(env, std::move(goal), rateMatrix,
-                                              [&zero, &one, &psiStates] (storm::storage::sparse::state_type const& state) -> ValueType {
-                                                  if (psiStates.get(state)) {
-                                                      return one;
-                                                  }
-                                                  return zero;
-                                              },
-                                              exitRateVector);
-            }
-            
-            template <typename ValueType, typename RewardModelType>
-            std::vector<ValueType> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, RewardModelType const& rewardModel, std::vector<ValueType> const* exitRateVector) {
-                // Only compute the result if the model has a state-based reward model.
-                STORM_LOG_THROW(!rewardModel.empty(), storm::exceptions::InvalidPropertyException, "Missing reward model for formula. Skipping formula.");
-
-                return computeLongRunAverages<ValueType>(env, std::move(goal), rateMatrix,
-                        [&] (storm::storage::sparse::state_type const& state) -> ValueType {
-                            ValueType result = rewardModel.hasStateRewards() ? rewardModel.getStateReward(state) : storm::utility::zero<ValueType>();
-                            if (rewardModel.hasStateActionRewards()) {
-                                // State action rewards are multiplied with the exit rate r(s). Then, multiplying the reward with the expected time we stay at s (i.e. 1/r(s)) yields the original state reward
-                                if (exitRateVector) {
-                                    result += rewardModel.getStateActionReward(state) * (*exitRateVector)[state];
-                                } else {
-                                    result += rewardModel.getStateActionReward(state);
-                                }
-                            }
-                            if (rewardModel.hasTransitionRewards()) {
-                                // Transition rewards are already multiplied with the rates
-                                result += rateMatrix.getPointwiseProductRowSum(rewardModel.getTransitionRewardMatrix(), state);
-                            }
-                            return result;
-                        },
-                        exitRateVector);
-            }
-            
-            template <typename ValueType>
-            std::vector<ValueType> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::vector<ValueType> const& stateRewardVector, std::vector<ValueType> const* exitRateVector) {
-                return computeLongRunAverages<ValueType>(env, std::move(goal), rateMatrix,
-                                                         [&stateRewardVector] (storm::storage::sparse::state_type const& state) -> ValueType {
-                                                             return stateRewardVector[state];
-                                                         },
-                                                         exitRateVector);
-            }
-            
-            template <typename ValueType>
-            std::vector<ValueType> SparseCtmcCslHelper::computeLongRunAverages(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector){
-                storm::storage::SparseMatrix<ValueType> probabilityMatrix;
-                if (exitRateVector) {
-                    probabilityMatrix = computeProbabilityMatrix(rateMatrix, *exitRateVector);
-                } else {
-                    probabilityMatrix = rateMatrix;
-                }
-                uint_fast64_t numberOfStates = rateMatrix.getRowCount();
-            
-                // Start by decomposing the CTMC into its BSCCs.
-                storm::storage::StronglyConnectedComponentDecomposition<ValueType> bsccDecomposition(rateMatrix, storm::storage::StronglyConnectedComponentDecompositionOptions().onlyBottomSccs());
-                
-                STORM_LOG_DEBUG("Found " << bsccDecomposition.size() << " BSCCs.");
-
-                // Prepare the vector holding the LRA values for each of the BSCCs.
-                std::vector<ValueType> bsccLra;
-                bsccLra.reserve(bsccDecomposition.size());
-                
-                auto underlyingSolverEnvironment = env;
-                auto precision = env.solver().lra().getPrecision();
-                if (env.solver().isForceSoundness()) {
-                    // For sound computations, the error in the MECS plus the error in the remaining system should be less then the user defined precision.
-                    precision /= storm::utility::convertNumber<storm::RationalNumber>(2);
-                    underlyingSolverEnvironment.solver().lra().setPrecision(precision);
-                }
-                underlyingSolverEnvironment.solver().setLinearEquationSolverPrecision(precision, env.solver().lra().getRelativeTerminationCriterion());
-                
-                // Keep track of the maximal and minimal value occuring in one of the BSCCs
-                ValueType maxValue, minValue;
-                storm::storage::BitVector statesInBsccs(numberOfStates);
-                for (auto const& bscc : bsccDecomposition) {
-                    for (auto const& state : bscc) {
-                        statesInBsccs.set(state);
-                    }
-                    bsccLra.push_back(computeLongRunAveragesForBscc<ValueType>(underlyingSolverEnvironment, bscc, rateMatrix, valueGetter, exitRateVector));
-                    if (bsccLra.size() == 1) {
-                        maxValue = bsccLra.back();
-                        minValue = bsccLra.back();
-                    } else {
-                        maxValue = std::max(bsccLra.back(), maxValue);
-                        minValue = std::min(bsccLra.back(), minValue);
-                    }
-                }
-                
-                storm::storage::BitVector statesNotInBsccs = ~statesInBsccs;
-                STORM_LOG_DEBUG("Found " << statesInBsccs.getNumberOfSetBits() << " states in BSCCs.");
-                
-                std::vector<uint64_t> stateToBsccMap(statesInBsccs.size(), -1);
-                for (uint64_t bsccIndex = 0; bsccIndex < bsccDecomposition.size(); ++bsccIndex) {
-                    for (auto const& state : bsccDecomposition[bsccIndex]) {
-                        stateToBsccMap[state] = bsccIndex;
-                    }
-                }
-                
-                std::vector<ValueType> rewardSolution;
-                if (!statesNotInBsccs.empty()) {
-                    // Calculate LRA for states not in bsccs as expected reachability rewards.
-                    // Target states are states in bsccs, transition reward is the lra of the bscc for each transition into a
-                    // bscc and 0 otherwise. This corresponds to the sum of LRAs in BSCC weighted by the reachability probability
-                    // of the BSCC.
-                    
-                    std::vector<ValueType> rewardRightSide;
-                    rewardRightSide.reserve(statesNotInBsccs.getNumberOfSetBits());
-                    
-                    for (auto state : statesNotInBsccs) {
-                        ValueType reward = storm::utility::zero<ValueType>();
-                        for (auto entry : rateMatrix.getRow(state)) {
-                            if (statesInBsccs.get(entry.getColumn())) {
-                                if (exitRateVector) {
-                                    reward += (entry.getValue() / (*exitRateVector)[state]) * bsccLra[stateToBsccMap[entry.getColumn()]];
-                                } else {
-                                    reward += entry.getValue() * bsccLra[stateToBsccMap[entry.getColumn()]];
-                                }
-                            }
-                        }
-                        rewardRightSide.push_back(reward);
-                    }
-                    
-                    // Compute reachability rewards
-                    storm::solver::GeneralLinearEquationSolverFactory<ValueType> linearEquationSolverFactory;
-                    bool isEqSysFormat = linearEquationSolverFactory.getEquationProblemFormat(underlyingSolverEnvironment) == storm::solver::LinearEquationSolverProblemFormat::EquationSystem;
-                    storm::storage::SparseMatrix<ValueType> rewardEquationSystemMatrix = rateMatrix.getSubmatrix(false, statesNotInBsccs, statesNotInBsccs, isEqSysFormat);
-                    if (exitRateVector) {
-                        uint64_t localRow = 0;
-                        for (auto const& globalRow : statesNotInBsccs) {
-                            for (auto& entry : rewardEquationSystemMatrix.getRow(localRow)) {
-                                entry.setValue(entry.getValue() / (*exitRateVector)[globalRow]);
-                            }
-                            ++localRow;
-                        }
-                    }
-                    if (isEqSysFormat) {
-                        rewardEquationSystemMatrix.convertToEquationSystem();
-                    }
-                    rewardSolution = std::vector<ValueType>(rewardEquationSystemMatrix.getColumnCount(), (maxValue + minValue) / storm::utility::convertNumber<ValueType,uint64_t>(2));
-                    std::unique_ptr<storm::solver::LinearEquationSolver<ValueType>> solver = linearEquationSolverFactory.create(underlyingSolverEnvironment, std::move(rewardEquationSystemMatrix));
-                    solver->setBounds(minValue, maxValue);
-                    // Check solver requirements
-                    auto requirements = solver->getRequirements(underlyingSolverEnvironment);
-                    STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UncheckedRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
-                    solver->solveEquations(underlyingSolverEnvironment, rewardSolution, rewardRightSide);
-                }
-                
-                // Fill the result vector.
-                std::vector<ValueType> result(numberOfStates);
-                auto rewardSolutionIter = rewardSolution.begin();
-                
-                for (uint_fast64_t bsccIndex = 0; bsccIndex < bsccDecomposition.size(); ++bsccIndex) {
-                    storm::storage::StronglyConnectedComponent const& bscc = bsccDecomposition[bsccIndex];
-                    
-                    for (auto const& state : bscc) {
-                        result[state] = bsccLra[bsccIndex];
-                    }
-                }
-                for (auto state : statesNotInBsccs) {
-                    STORM_LOG_ASSERT(rewardSolutionIter != rewardSolution.end(), "Too few elements in solution.");
-                    // Take the value from the reward computation. Since the n-th state not in any bscc is the n-th
-                    // entry in rewardSolution we can just take the next value from the iterator.
-                    result[state] = *rewardSolutionIter;
-                    ++rewardSolutionIter;
-                }
-                
-                return result;
-            }
-
-            template <typename ValueType>
-            ValueType SparseCtmcCslHelper::computeLongRunAveragesForBscc(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector) {
-
-                // Catch the case where all values are the same (this includes the special case where the bscc is of size 1)
-                auto it = bscc.begin();
-                ValueType val = valueGetter(*it);
-                for (++it; it != bscc.end(); ++it) {
-                    if (valueGetter(*it) != val) {
-                        break;
-                    }
-                }
-                if (it == bscc.end()) {
-                    // All entries have the same LRA
-                    return val;
-                }
-                
-                storm::solver::LraMethod method = env.solver().lra().getDetLraMethod();
-                if ((storm::NumberTraits<ValueType>::IsExact || env.solver().isForceExact()) && env.solver().lra().isDetLraMethodSetFromDefault() && method == storm::solver::LraMethod::ValueIteration) {
-                    method = storm::solver::LraMethod::GainBiasEquations;
-                    STORM_LOG_INFO("Selecting " << storm::solver::toString(method) << " as the solution technique for long-run properties to guarantee exact results. If you want to override this, please explicitly specify a different LRA method.");
-                } else if (env.solver().isForceSoundness() && env.solver().lra().isDetLraMethodSetFromDefault() && method != storm::solver::LraMethod::ValueIteration) {
-                    method = storm::solver::LraMethod::ValueIteration;
-                    STORM_LOG_INFO("Selecting " << storm::solver::toString(method) << " as the solution technique for long-run properties to guarantee sound results. If you want to override this, please explicitly specify a different LRA method.");
-                }
-                STORM_LOG_TRACE("Computing LRA for BSCC of size " << bscc.size() << " using '" << storm::solver::toString(method) << "'.");
-                if (method == storm::solver::LraMethod::ValueIteration) {
-                    return computeLongRunAveragesForBsccVi<ValueType>(env, bscc, rateMatrix, valueGetter, exitRateVector);
-                } else if (method == storm::solver::LraMethod::LraDistributionEquations) {
-                    // We only need the first element of the pair as the lra distribution is not relevant at this point.
-                    return computeLongRunAveragesForBsccLraDistr<ValueType>(env, bscc, rateMatrix, valueGetter, exitRateVector).first;
-                }
-                STORM_LOG_WARN_COND(method == storm::solver::LraMethod::GainBiasEquations, "Unsupported lra method selected. Defaulting to " << storm::solver::toString(storm::solver::LraMethod::GainBiasEquations) << ".");
-                // We don't need the bias values
-                return computeLongRunAveragesForBsccGainBias<ValueType>(env, bscc, rateMatrix, valueGetter, exitRateVector).first;
-            }
-            
-            template <>
-            storm::RationalFunction SparseCtmcCslHelper::computeLongRunAveragesForBsccVi<storm::RationalFunction>(Environment const&, storm::storage::StronglyConnectedComponent const&, storm::storage::SparseMatrix<storm::RationalFunction> const&, std::function<storm::RationalFunction (storm::storage::sparse::state_type const& state)> const&, std::vector<storm::RationalFunction> const*) {
-                STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "The requested Method for LRA computation is not supported for parametric models.");
-            }
-                
-            template <typename ValueType>
-            ValueType SparseCtmcCslHelper::computeLongRunAveragesForBsccVi(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector) {
-                
-                // Initialize data about the bscc
-                storm::storage::BitVector bsccStates(rateMatrix.getRowGroupCount(), false);
-                for (auto const& state : bscc) {
-                    bsccStates.set(state);
-                }
-                
-                // Get the uniformization rate
-                ValueType uniformizationRate = storm::utility::one<ValueType>();
-                if (exitRateVector) {
-                    uniformizationRate = storm::utility::vector::max_if(*exitRateVector, bsccStates);
-                }
-                // To ensure that the model is aperiodic, we need to make sure that every Markovian state gets a self loop.
-                // Hence, we increase the uniformization rate a little.
-                uniformizationRate *= (storm::utility::one<ValueType>() + storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor()));
-
-                // Get the transitions of the submodel
-                typename storm::storage::SparseMatrix<ValueType> bsccMatrix = rateMatrix.getSubmatrix(true, bsccStates, bsccStates, true);
-                
-                // Uniformize the transitions
-                uint64_t subState = 0;
-                for (auto state : bsccStates) {
-                    for (auto& entry : bsccMatrix.getRow(subState)) {
-                        if (entry.getColumn() == subState) {
-                            if (exitRateVector) {
-                                entry.setValue(storm::utility::one<ValueType>() + (entry.getValue() - (*exitRateVector)[state]) / uniformizationRate);
-                            } else {
-                                entry.setValue(storm::utility::one<ValueType>() + (entry.getValue() - storm::utility::one<ValueType>()) / uniformizationRate);
-                            }
-                        } else {
-                            entry.setValue(entry.getValue() / uniformizationRate);
-                        }
-                    }
-                    ++subState;
-                }
-
-                // Compute the rewards obtained in a single uniformization step
-                std::vector<ValueType> markovianRewards;
-                markovianRewards.reserve(bsccMatrix.getRowCount());
-                for (auto const& state : bsccStates) {
-                    markovianRewards.push_back(valueGetter(state) / uniformizationRate);
-                }
-                
-                // start the iterations
-                ValueType precision = storm::utility::convertNumber<ValueType>(env.solver().lra().getPrecision()) / uniformizationRate;
-                bool relative = env.solver().lra().getRelativeTerminationCriterion();
-                if (!relative) {
-                    precision /= uniformizationRate;
-                }
-                std::vector<ValueType> x(bsccMatrix.getRowCount(), storm::utility::zero<ValueType>());
-                std::vector<ValueType> xPrime(x.size());
-                
-                auto multiplier = storm::solver::MultiplierFactory<ValueType>().create(env, bsccMatrix);
-                ValueType maxDiff, minDiff;
-                uint64_t iter = 0;
-                boost::optional<uint64_t> maxIter;
-                if (env.solver().lra().isMaximalIterationCountSet()) {
-                    maxIter = env.solver().lra().getMaximalIterationCount();
-                }
-                while (!maxIter.is_initialized() || iter < maxIter.get()) {
-                    ++iter;
-                    // Compute the values for the markovian states. We also keep track of the maximal and minimal difference between two values (for convergence checking)
-                    multiplier->multiply(env, x, &markovianRewards, xPrime);
-                    
-                    // update xPrime and check for convergence
-                    // to avoid large (and numerically unstable) x-values, we substract a reference value.
-                    auto xIt = x.begin();
-                    auto xPrimeIt = xPrime.begin();
-                    ValueType refVal = *xPrimeIt;
-                    maxDiff = *xPrimeIt - *xIt;
-                    minDiff = maxDiff;
-                    *xPrimeIt -= refVal;
-                    *xIt = *xPrimeIt;
-                    for (++xIt, ++xPrimeIt; xIt != x.end(); ++xIt, ++xPrimeIt) {
-                        ValueType diff = *xPrimeIt - *xIt;
-                        maxDiff = std::max(maxDiff, diff);
-                        minDiff = std::min(minDiff, diff);
-                        *xPrimeIt -= refVal;
-                        *xIt = *xPrimeIt;
-                    }
-                    
-                    // Check for convergence. The uniformization rate is already incorporated into the precision parameter
-                    if ((maxDiff - minDiff) <= (relative ? (precision * minDiff) : precision)) {
-                        break;
-                    }
-                    if (storm::utility::resources::isTerminate()) {
-                        break;
-                    }
-                }
-                if (maxIter.is_initialized() && iter == maxIter.get()) {
-                    STORM_LOG_WARN("LRA computation did not converge within " << iter << " iterations.");
-                } else {
-                    STORM_LOG_TRACE("LRA computation converged after " << iter << " iterations.");
-                }
-                return (maxDiff + minDiff) * uniformizationRate / (storm::utility::convertNumber<ValueType>(2.0));
-            }
-            
-            template <typename ValueType>
-            std::pair<ValueType, std::vector<ValueType>> SparseCtmcCslHelper::computeLongRunAveragesForBsccGainBias(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector) {
-                // We build the equation system as in Line 3 of Algorithm 3 from
-                // Kretinsky, Meggendorfer: Efficient Strategy Iteration for Mean Payoff in Markov Decision Processes (ATVA 2017)
-                // The first variable corresponds to the gain of the bscc whereas the subsequent variables yield the bias for each state s_1, s_2, ....
-                // No bias variable for s_0 is needed since it is always set to zero, yielding an nxn equation system matrix
-                // To make this work for CTMC, we could uniformize the model. This preserves LRA and ensures that we can compute the
-                // LRA as for a DTMC (the soujourn time in each state is the same). If we then multiply the equations with the uniformization rate,
-                // the uniformization rate cancels out. Hence, we obtain the equation system below.
-                
-                // Get a mapping from global state indices to local ones.
-                std::unordered_map<uint64_t, uint64_t> toLocalIndexMap;
-                uint64_t localIndex = 0;
-                for (auto const& globalIndex : bscc) {
-                    toLocalIndexMap[globalIndex] = localIndex;
-                    ++localIndex;
-                }
-                
-                // Prepare an environment for the underlying equation solver
-                auto subEnv = env;
-                if (subEnv.solver().getLinearEquationSolverType() == storm::solver::EquationSolverType::Topological) {
-                    // Topological solver does not make any sense since the BSCC is connected.
-                    subEnv.solver().setLinearEquationSolverType(subEnv.solver().topological().getUnderlyingEquationSolverType(), subEnv.solver().topological().isUnderlyingEquationSolverTypeSetFromDefault());
-                }
-                subEnv.solver().setLinearEquationSolverPrecision(env.solver().lra().getPrecision(), env.solver().lra().getRelativeTerminationCriterion());
-                
-                // Build the equation system matrix and vector.
-                storm::solver::GeneralLinearEquationSolverFactory<ValueType> linearEquationSolverFactory;
-                bool isEquationSystemFormat = linearEquationSolverFactory.getEquationProblemFormat(subEnv) == storm::solver::LinearEquationSolverProblemFormat::EquationSystem;
-                storm::storage::SparseMatrixBuilder<ValueType> builder(bscc.size(), bscc.size());
-                std::vector<ValueType> eqSysVector;
-                eqSysVector.reserve(bscc.size());
-                // The first row asserts that the weighted bias variables and the reward at s_0 sum up to the gain
-                uint64_t row = 0;
-                ValueType entryValue;
-                for (auto const& globalState : bscc) {
-                    // Coefficient for the gain variable
-                    if (isEquationSystemFormat) {
-                        // '1-0' in row 0 and -(-1) in other rows
-                        builder.addNextValue(row, 0, storm::utility::one<ValueType>());
-                    } else if (row > 0) {
-                        // No coeficient in row 0, othwerise substract the gain
-                        builder.addNextValue(row, 0, -storm::utility::one<ValueType>());
-                    }
-                    // Compute weighted sum over successor state. As this is a BSCC, each successor state will again be in the BSCC.
-                    auto diagonalValue = storm::utility::zero<ValueType>();
-                    if (row > 0) {
-                        if (isEquationSystemFormat) {
-                            diagonalValue = exitRateVector ? (*exitRateVector)[globalState] : storm::utility::one<ValueType>();
-                        } else {
-                            diagonalValue = storm::utility::one<ValueType>() - (exitRateVector ? (*exitRateVector)[globalState] : storm::utility::one<ValueType>());
-                        }
-                    }
-                    bool needDiagonalEntry = !storm::utility::isZero(diagonalValue);
-                    for (auto const& entry : rateMatrix.getRow(globalState)) {
-                        uint64_t col = toLocalIndexMap[entry.getColumn()];
-                        if (col == 0) {
-                            //Skip transition to state_0. This corresponds to setting the bias of state_0 to zero
-                            continue;
-                        }
-                        entryValue = entry.getValue();
-                        if (isEquationSystemFormat) {
-                            entryValue = -entryValue;
-                        }
-                        if (needDiagonalEntry && col >= row) {
-                            if (col == row) {
-                                entryValue += diagonalValue;
-                            } else { // col > row
-                                builder.addNextValue(row, row, diagonalValue);
-                            }
-                            needDiagonalEntry = false;
-                        }
-                        builder.addNextValue(row, col, entryValue);
-                    }
-                    if (needDiagonalEntry) {
-                        builder.addNextValue(row, row, diagonalValue);
-                    }
-                    eqSysVector.push_back(valueGetter(globalState));
-                    ++row;
-                }
-
-                // Create a linear equation solver
-                auto solver = linearEquationSolverFactory.create(subEnv, builder.build());
-                // Check solver requirements.
-                auto requirements = solver->getRequirements(subEnv);
-                STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UncheckedRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
-                // Todo: Find bounds on the bias variables. Just inserting the maximal value from the vector probably does not work.
-                
-                std::vector<ValueType> eqSysSol(bscc.size(), storm::utility::zero<ValueType>());
-                // Take the mean of the rewards as an initial guess for the gain
-                //eqSysSol.front() = std::accumulate(eqSysVector.begin(), eqSysVector.end(), storm::utility::zero<ValueType>()) / storm::utility::convertNumber<ValueType, uint64_t>(bscc.size());
-                solver->solveEquations(subEnv, eqSysSol, eqSysVector);
-                
-                ValueType gain = eqSysSol.front();
-                // insert bias value for state 0
-                eqSysSol.front() = storm::utility::zero<ValueType>();
-                // Return the gain and the bias values
-                return std::pair<ValueType, std::vector<ValueType>>(std::move(gain), std::move(eqSysSol));
-            }
-            
-            template <typename ValueType>
-            std::pair<ValueType, std::vector<ValueType>> SparseCtmcCslHelper::computeLongRunAveragesForBsccLraDistr(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector) {
-
-                // Let A be ab auxiliary Matrix with A[s,s] =  R(s,s) - r(s) & A[s,s'] = R(s,s') for s,s' in BSCC and s!=s'.
-                // We build and solve the equation system for
-                // x*A=0 &  x_0+...+x_n=1  <=>  A^t*x=0=x-x & x_0+...+x_n=1  <=> (1+A^t)*x = x & 1-x_0-...-x_n-1=x_n
-                // Then, x[i] will be the fraction of the time we are in state i.
-                
-                // This method assumes that this BSCC consist of more than one state
-                if (bscc.size() == 1) {
-                    return { valueGetter(*bscc.begin()), {storm::utility::one<ValueType>()} };
-                }
-                
-                // Prepare an environment for the underlying linear equation solver
-                auto subEnv = env;
-                if (subEnv.solver().getLinearEquationSolverType() == storm::solver::EquationSolverType::Topological) {
-                    // Topological solver does not make any sense since the BSCC is connected.
-                    subEnv.solver().setLinearEquationSolverType(subEnv.solver().topological().getUnderlyingEquationSolverType(), subEnv.solver().topological().isUnderlyingEquationSolverTypeSetFromDefault());
-                }
-                subEnv.solver().setLinearEquationSolverPrecision(env.solver().lra().getPrecision(), env.solver().lra().getRelativeTerminationCriterion());
-                
-                // Get a mapping from global state indices to local ones as well as a bitvector containing states within the BSCC.
-                std::unordered_map<uint64_t, uint64_t> toLocalIndexMap;
-                storm::storage::BitVector bsccStates(rateMatrix.getRowCount(), false);
-                uint64_t localIndex = 0;
-                for (auto const& globalIndex : bscc) {
-                    bsccStates.set(globalIndex, true);
-                    toLocalIndexMap[globalIndex] = localIndex;
-                    ++localIndex;
-                }
-                
-                // Build the auxiliary Matrix A.
-                auto auxMatrix = rateMatrix.getSubmatrix(false, bsccStates, bsccStates, true); // add diagonal entries!
-                uint64_t row = 0;
-                for (auto const& globalIndex : bscc) {
-                    for (auto& entry : auxMatrix.getRow(row)) {
-                        if (entry.getColumn() == row) {
-                            // This value is non-zero since we have a BSCC with more than one state
-                            if (exitRateVector) {
-                                entry.setValue(entry.getValue() - (*exitRateVector)[globalIndex]);
-                            } else {
-                                entry.setValue(entry.getValue() - storm::utility::one<ValueType>());
-                            }
-                        }
-                    }
-                    ++row;
-                }
-                assert(row == auxMatrix.getRowCount());
-                
-                // We need to consider A^t. This will not delete diagonal entries since they are non-zero.
-                auxMatrix = auxMatrix.transpose();
-                
-                // Check whether we need the fixpoint characterization
-                storm::solver::GeneralLinearEquationSolverFactory<ValueType> linearEquationSolverFactory;
-                bool isFixpointFormat = linearEquationSolverFactory.getEquationProblemFormat(subEnv) == storm::solver::LinearEquationSolverProblemFormat::FixedPointSystem;
-                if (isFixpointFormat) {
-                    // Add a 1 on the diagonal
-                    for (row = 0; row < auxMatrix.getRowCount(); ++row) {
-                        for (auto& entry : auxMatrix.getRow(row)) {
-                            if (entry.getColumn() == row) {
-                                entry.setValue(storm::utility::one<ValueType>() + entry.getValue());
-                            }
-                        }
-                    }
-                }
-                
-                // We now build the equation system matrix.
-                // We can drop the last row of A and add ones in this row instead to assert that the variables sum up to one
-                // Phase 1: replace the existing entries of the last row with ones
-                uint64_t col = 0;
-                uint64_t lastRow = auxMatrix.getRowCount() - 1;
-                for (auto& entry : auxMatrix.getRow(lastRow)) {
-                    entry.setColumn(col);
-                    if (isFixpointFormat) {
-                        if (col == lastRow) {
-                            entry.setValue(storm::utility::zero<ValueType>());
-                        } else {
-                            entry.setValue(-storm::utility::one<ValueType>());
-                        }
-                    } else {
-                        entry.setValue(storm::utility::one<ValueType>());
-                    }
-                    ++col;
-                }
-                storm::storage::SparseMatrixBuilder<ValueType> builder(std::move(auxMatrix));
-                for (; col <= lastRow; ++col) {
-                    if (isFixpointFormat) {
-                        if (col != lastRow) {
-                            builder.addNextValue(lastRow, col, -storm::utility::one<ValueType>());
-                        }
-                    } else {
-                        builder.addNextValue(lastRow, col, storm::utility::one<ValueType>());
-                    }
-                }
-                
-                std::vector<ValueType> bsccEquationSystemRightSide(bscc.size(), storm::utility::zero<ValueType>());
-                bsccEquationSystemRightSide.back() = storm::utility::one<ValueType>();
-                
-                // Create a linear equation solver
-                auto solver = linearEquationSolverFactory.create(subEnv,  builder.build());
-                solver->setBounds(storm::utility::zero<ValueType>(), storm::utility::one<ValueType>());
-                // Check solver requirements.
-                auto requirements = solver->getRequirements(subEnv);
-                requirements.clearLowerBounds();
-                requirements.clearUpperBounds();
-                STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UncheckedRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
-                
-                std::vector<ValueType> lraDistr(bscc.size(), storm::utility::one<ValueType>() / storm::utility::convertNumber<ValueType, uint64_t>(bscc.size()));
-                solver->solveEquations(subEnv, lraDistr, bsccEquationSystemRightSide);
-                
-                // Calculate final LRA Value
-                ValueType result = storm::utility::zero<ValueType>();
-                auto solIt = lraDistr.begin();
-                for (auto const& globalState : bscc) {
-                    result += valueGetter(globalState) * (*solIt);
-                    ++solIt;
-                }
-                assert(solIt == lraDistr.end());
-
-                return std::pair<ValueType, std::vector<ValueType>>(std::move(result), std::move(lraDistr));
-            }
-            
             template <typename ValueType, typename std::enable_if<storm::NumberTraits<ValueType>::SupportsExponential, int>::type>
             std::vector<ValueType> SparseCtmcCslHelper::computeAllTransientProbabilities(Environment const& env, storm::storage::SparseMatrix<ValueType> const& rateMatrix, storm::storage::BitVector const& initialStates, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, std::vector<ValueType> const& exitRates, double timeBound) {
 
@@ -1253,10 +704,6 @@ namespace storm {
             
             template std::vector<double> SparseCtmcCslHelper::computeTotalRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& rateMatrix, storm::storage::SparseMatrix<double> const& backwardTransitions, std::vector<double> const& exitRateVector, storm::models::sparse::StandardRewardModel<double> const& rewardModel, bool qualitative);
             
-            template std::vector<double> SparseCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& rateMatrix, storm::storage::BitVector const& psiStates, std::vector<double> const* exitRateVector);
-            template std::vector<double> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& rateMatrix, storm::models::sparse::StandardRewardModel<double> const& rewardModel, std::vector<double> const* exitRateVector);
-            template std::vector<double> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& rateMatrix, std::vector<double> const& stateRewardVector, std::vector<double> const* exitRateVector);
-            
             template std::vector<double> SparseCtmcCslHelper::computeCumulativeRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& rateMatrix, std::vector<double> const& exitRateVector, storm::models::sparse::StandardRewardModel<double> const& rewardModel, double timeBound);
 
             template std::vector<double> SparseCtmcCslHelper::computeAllTransientProbabilities(Environment const& env, storm::storage::SparseMatrix<double> const& rateMatrix, storm::storage::BitVector const& initialStates, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, std::vector<double> const& exitRates, double timeBound);
@@ -1290,15 +737,6 @@ namespace storm {
             template std::vector<storm::RationalNumber> SparseCtmcCslHelper::computeTotalRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& rateMatrix, storm::storage::SparseMatrix<storm::RationalNumber> const& backwardTransitions, std::vector<storm::RationalNumber> const& exitRateVector, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, bool qualitative);
             template std::vector<storm::RationalFunction> SparseCtmcCslHelper::computeTotalRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalFunction>&& goal, storm::storage::SparseMatrix<storm::RationalFunction> const& rateMatrix, storm::storage::SparseMatrix<storm::RationalFunction> const& backwardTransitions, std::vector<storm::RationalFunction> const& exitRateVector, storm::models::sparse::StandardRewardModel<storm::RationalFunction> const& rewardModel, bool qualitative);
 
-            template std::vector<storm::RationalNumber> SparseCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& rateMatrix, storm::storage::BitVector const& psiStates, std::vector<storm::RationalNumber> const* exitRateVector);
-            template std::vector<storm::RationalFunction> SparseCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<storm::RationalFunction>&& goal, storm::storage::SparseMatrix<storm::RationalFunction> const& rateMatrix, storm::storage::BitVector const& psiStates, std::vector<storm::RationalFunction> const* exitRateVector);
-            
-            template std::vector<storm::RationalNumber> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& rateMatrix, storm::models::sparse::StandardRewardModel<RationalNumber> const& rewardModel, std::vector<storm::RationalNumber> const* exitRateVector);
-            template std::vector<storm::RationalFunction> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalFunction>&& goal, storm::storage::SparseMatrix<storm::RationalFunction> const& rateMatrix, storm::models::sparse::StandardRewardModel<RationalFunction> const& rewardModel, std::vector<storm::RationalFunction> const* exitRateVector);
-
-            template std::vector<storm::RationalNumber> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& rateMatrix, std::vector<storm::RationalNumber> const& stateRewardVector, std::vector<storm::RationalNumber> const* exitRateVector);
-            template std::vector<storm::RationalFunction> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalFunction>&& goal, storm::storage::SparseMatrix<storm::RationalFunction> const& rateMatrix, std::vector<storm::RationalFunction> const& stateRewardVector, std::vector<storm::RationalFunction> const* exitRateVector);
-
             template std::vector<storm::RationalNumber> SparseCtmcCslHelper::computeCumulativeRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& rateMatrix, std::vector<storm::RationalNumber> const& exitRateVector, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, double timeBound);
             template std::vector<storm::RationalFunction> SparseCtmcCslHelper::computeCumulativeRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalFunction>&& goal, storm::storage::SparseMatrix<storm::RationalFunction> const& rateMatrix, std::vector<storm::RationalFunction> const& exitRateVector, storm::models::sparse::StandardRewardModel<storm::RationalFunction> const& rewardModel, double timeBound);
 
diff --git a/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.h b/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.h
index 697eca743..13470bd8d 100644
--- a/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.h
+++ b/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.h
@@ -55,15 +55,6 @@ namespace storm {
                 template <typename ValueType, typename RewardModelType>
                 static std::vector<ValueType> computeTotalRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::vector<ValueType> const& exitRateVector, RewardModelType const& rewardModel, bool qualitative);
 
-                template <typename ValueType>
-                static std::vector<ValueType> computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, storm::storage::BitVector const& psiStates, std::vector<ValueType> const* exitRateVector);
-
-                template <typename ValueType, typename RewardModelType>
-                static std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, RewardModelType const& rewardModel, std::vector<ValueType> const* exitRateVector);
-
-                template <typename ValueType>
-                static std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::vector<ValueType> const& stateRewardVector, std::vector<ValueType> const* exitRateVector);
-
                 template <typename ValueType>
                 static std::vector<ValueType> computeReachabilityTimes(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& targetStates, bool qualitative);
 
@@ -131,18 +122,6 @@ namespace storm {
                 template <typename ValueType>
                 static bool checkAndUpdateTransientProbabilityEpsilon(storm::Environment const& env, ValueType& epsilon, std::vector<ValueType> const& resultVector, storm::storage::BitVector const& relevantPositions);
                 
-            private:
-                template <typename ValueType>
-                static std::vector<ValueType> computeLongRunAverages(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector);
-                template <typename ValueType>
-                static ValueType computeLongRunAveragesForBscc(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector);
-                template <typename ValueType>
-                static ValueType computeLongRunAveragesForBsccVi(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector);
-                template <typename ValueType>
-                static std::pair<ValueType, std::vector<ValueType>> computeLongRunAveragesForBsccGainBias(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector);
-                template <typename ValueType>
-                static std::pair<ValueType, std::vector<ValueType>> computeLongRunAveragesForBsccLraDistr(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector);
-                
             };
         }
     }
diff --git a/src/storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.cpp b/src/storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.cpp
index ce5bf4467..e0c4fd143 100644
--- a/src/storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.cpp
+++ b/src/storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.cpp
@@ -344,32 +344,6 @@ namespace storm {
                 return computeReachabilityRewards(env, model, transitionMatrix, rewardModel, targetStates, qualitative);
             }
             
-            template<storm::dd::DdType DdType, typename ValueType>
-            std::unique_ptr<CheckResult> HybridDtmcPrctlHelper<DdType, ValueType>::computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& targetStates) {
-                // Create ODD for the translation.
-                storm::utility::Stopwatch conversionWatch(true);
-                storm::dd::Odd odd = model.getReachableStates().createOdd();
-                storm::storage::SparseMatrix<ValueType> explicitProbabilityMatrix = model.getTransitionMatrix().toMatrix(odd, odd);
-                conversionWatch.stop();
-                STORM_LOG_INFO("Converting symbolic matrix/vector to explicit representation done in " << conversionWatch.getTimeInMilliseconds() << "ms.");
-
-                std::vector<ValueType> result = storm::modelchecker::helper::SparseDtmcPrctlHelper<ValueType>::computeLongRunAverageProbabilities(env, storm::solver::SolveGoal<ValueType>(), explicitProbabilityMatrix, targetStates.toVector(odd));
-                return std::unique_ptr<CheckResult>(new HybridQuantitativeCheckResult<DdType, ValueType>(model.getReachableStates(), model.getManager().getBddZero(), model.getManager().template getAddZero<ValueType>(), model.getReachableStates(), std::move(odd), std::move(result)));
-            }
-
-            template<storm::dd::DdType DdType, typename ValueType>
-            std::unique_ptr<CheckResult> HybridDtmcPrctlHelper<DdType, ValueType>::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, RewardModelType const& rewardModel) {
-                // Create ODD for the translation.
-                storm::utility::Stopwatch conversionWatch(true);
-                storm::dd::Odd odd = model.getReachableStates().createOdd();
-                storm::storage::SparseMatrix<ValueType> explicitProbabilityMatrix = model.getTransitionMatrix().toMatrix(odd, odd);
-                conversionWatch.stop();
-                STORM_LOG_INFO("Converting symbolic matrix/vector to explicit representation done in " << conversionWatch.getTimeInMilliseconds() << "ms.");
-
-                std::vector<ValueType> result = storm::modelchecker::helper::SparseDtmcPrctlHelper<ValueType>::computeLongRunAverageRewards(env, storm::solver::SolveGoal<ValueType>(), explicitProbabilityMatrix, rewardModel.getTotalRewardVector(model.getTransitionMatrix(), model.getColumnVariables()).toVector(odd));
-                return std::unique_ptr<CheckResult>(new HybridQuantitativeCheckResult<DdType, ValueType>(model.getReachableStates(), model.getManager().getBddZero(), model.getManager().template getAddZero<ValueType>(), model.getReachableStates(), std::move(odd), std::move(result)));
-            }
-            
             template class HybridDtmcPrctlHelper<storm::dd::DdType::CUDD, double>;
             template class HybridDtmcPrctlHelper<storm::dd::DdType::Sylvan, double>;
 
diff --git a/src/storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.h b/src/storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.h
index 9e20c5e2c..f4d5b950e 100644
--- a/src/storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.h
+++ b/src/storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.h
@@ -39,10 +39,6 @@ namespace storm {
 
                 static std::unique_ptr<CheckResult> computeReachabilityTimes(Environment const& env, storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& targetStates, bool qualitative);
 
-                static std::unique_ptr<CheckResult> computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& targetStates);
-
-                static std::unique_ptr<CheckResult> computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, RewardModelType const& rewardModel);
-
             };
             
         }
diff --git a/src/storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.cpp b/src/storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.cpp
index bf9c9b9ad..97983ee54 100644
--- a/src/storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.cpp
+++ b/src/storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.cpp
@@ -531,21 +531,6 @@ namespace storm {
                 return result;
             }
             
-            template<typename ValueType, typename RewardModelType>
-            std::vector<ValueType> SparseDtmcPrctlHelper<ValueType, RewardModelType>::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& psiStates) {
-                return SparseCtmcCslHelper::computeLongRunAverageProbabilities<ValueType>(env, std::move(goal), transitionMatrix, psiStates, nullptr);
-            }
-            
-            template<typename ValueType, typename RewardModelType>
-            std::vector<ValueType> SparseDtmcPrctlHelper<ValueType, RewardModelType>::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel) {
-                return SparseCtmcCslHelper::computeLongRunAverageRewards<ValueType, RewardModelType>(env, std::move(goal), transitionMatrix, rewardModel, nullptr);
-            }
-            
-            template<typename ValueType, typename RewardModelType>
-            std::vector<ValueType> SparseDtmcPrctlHelper<ValueType, RewardModelType>::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& stateRewards) {
-                return SparseCtmcCslHelper::computeLongRunAverageRewards<ValueType>(env, std::move(goal), transitionMatrix, stateRewards, nullptr);
-            }
-            
             template<typename ValueType, typename RewardModelType>
             typename SparseDtmcPrctlHelper<ValueType, RewardModelType>::BaierTransformedModel SparseDtmcPrctlHelper<ValueType, RewardModelType>::computeBaierTransformation(Environment const& env, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& targetStates, storm::storage::BitVector const& conditionStates, boost::optional<std::vector<ValueType>> const& stateRewards) {
 
diff --git a/src/storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.h b/src/storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.h
index e22e4bfc4..f892f3a7d 100644
--- a/src/storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.h
+++ b/src/storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.h
@@ -52,12 +52,6 @@ namespace storm {
                 
                 static std::vector<ValueType> computeReachabilityTimes(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& targetStates, bool qualitative, ModelCheckerHint const& hint = ModelCheckerHint());
 
-                static std::vector<ValueType> computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& psiStates);
-
-                static std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal,  storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel);
-
-                static std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& stateRewards);
-
                 static std::vector<ValueType> computeConditionalProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& targetStates, storm::storage::BitVector const& conditionStates, bool qualitative);
                 
                 static std::vector<ValueType> computeConditionalRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, RewardModelType const& rewardModel, storm::storage::BitVector const& targetStates, storm::storage::BitVector const& conditionStates, bool qualitative);