diff --git a/src/storm-parsers/parser/PrismParser.cpp b/src/storm-parsers/parser/PrismParser.cpp
index 624dda819..23a3b3464 100644
--- a/src/storm-parsers/parser/PrismParser.cpp
+++ b/src/storm-parsers/parser/PrismParser.cpp
@@ -816,7 +816,6 @@ namespace storm {
                         STORM_LOG_THROW(false, storm::exceptions::WrongFormatException, "Parsing error in " << this->getFilename() << " for player " << playerName << ": No action named '" << actionName << "' present.");
                     }
                 }
-                STORM_LOG_DEBUG("PLAYER created:" << playerName);
                 return storm::prism::Player(playerName, controlledModuleIndices, controlledActionIndices);
             } else {
                 return storm::prism::Player();
diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicGameInfiniteHorizonHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicGameInfiniteHorizonHelper.cpp
index 03538c2e9..d48211af6 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicGameInfiniteHorizonHelper.cpp
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicGameInfiniteHorizonHelper.cpp
@@ -3,6 +3,7 @@
 #include "storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h"
 
 #include "storm/storage/SparseMatrix.h"
+#include "storm/storage/MaximalEndComponentDecomposition.h"
 #include "storm/storage/Scheduler.h"
 
 #include "storm/solver/MinMaxLinearEquationSolver.h"
@@ -26,25 +27,99 @@ namespace storm {
                 // Intentionally left empty.
             }
 
+            template <typename ValueType>
+            std::vector<uint64_t> const& SparseNondeterministicGameInfiniteHorizonHelper<ValueType>::getProducedOptimalChoices() const {
+                STORM_LOG_ASSERT(this->isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
+                STORM_LOG_ASSERT(this->_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
+                return this->_producedOptimalChoices.get();
+            }
+
+            template <typename ValueType>
+            std::vector<uint64_t>& SparseNondeterministicGameInfiniteHorizonHelper<ValueType>::getProducedOptimalChoices() {
+                STORM_LOG_ASSERT(this->isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
+                STORM_LOG_ASSERT(this->_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
+                return this->_producedOptimalChoices.get();
+            }
+
+            template <typename ValueType>
+            storm::storage::Scheduler<ValueType> SparseNondeterministicGameInfiniteHorizonHelper<ValueType>::extractScheduler() const {
+                auto const& optimalChoices = getProducedOptimalChoices();
+                storm::storage::Scheduler<ValueType> scheduler(optimalChoices.size());
+                for (uint64_t state = 0; state < optimalChoices.size(); ++state) {
+                        scheduler.setChoice(optimalChoices[state], state);
+                }
+                return scheduler;
+            }
+
+
             template <typename ValueType>
             void SparseNondeterministicGameInfiniteHorizonHelper<ValueType>::createDecomposition() {
-                STORM_LOG_THROW(false, storm::exceptions::InternalException, "Creating Decompositions of SMGs is currently not possible.");
+                // TODO This needs to be changed to return the whole model as one component as long as there is no overwritten version of MaximalEndComponentDecomposition for SMGs.
+                if (this->_longRunComponentDecomposition == nullptr) {
+                    // The decomposition has not been provided or computed, yet.
+                    if (this->_backwardTransitions == nullptr) {
+                        this->_computedBackwardTransitions = std::make_unique<storm::storage::SparseMatrix<ValueType>>(this->_transitionMatrix.transpose(true));
+                        this->_backwardTransitions = this->_computedBackwardTransitions.get();
+                    }
+                    this->_computedLongRunComponentDecomposition = std::make_unique<storm::storage::MaximalEndComponentDecomposition<ValueType>>(this->_transitionMatrix, *this->_backwardTransitions);
+                    this->_longRunComponentDecomposition = this->_computedLongRunComponentDecomposition.get();
+                }
             }
 
             template <typename ValueType>
-            ValueType SparseNondeterministicGameInfiniteHorizonHelper<ValueType>::computeLraForComponent(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::MaximalEndComponent const& component) {
+            std::vector<ValueType> SparseNondeterministicGameInfiniteHorizonHelper<ValueType>::computeLongRunAverageValues(Environment const& env, ValueGetter const& stateValuesGetter,  ValueGetter const& actionValuesGetter) {
+                auto underlyingSolverEnvironment = env;
+                std::vector<ValueType> componentLraValues;
+                createDecomposition();
+                componentLraValues.reserve(this->_longRunComponentDecomposition->size());
+                for (auto const& c : *(this->_longRunComponentDecomposition)) {
+                    componentLraValues.push_back(computeLraForComponent(underlyingSolverEnvironment, stateValuesGetter, actionValuesGetter, c));
+                }
+                return componentLraValues;
+            }
 
-                STORM_LOG_THROW(false, storm::exceptions::InternalException, "Computing values for LRA for SMGs components is currently not possible.");
+            template <typename ValueType>
+            ValueType SparseNondeterministicGameInfiniteHorizonHelper<ValueType>::computeLraForComponent(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::MaximalEndComponent const& component) {
+                // Allocate memory for the nondeterministic choices.
+                if (this->isProduceSchedulerSet()) {
+                    if (!this->_producedOptimalChoices.is_initialized()) {
+                        this->_producedOptimalChoices.emplace();
+                    }
+                    this->_producedOptimalChoices->resize(this->_transitionMatrix.getRowGroupCount());
+                }
+
+                storm::solver::LraMethod method = env.solver().lra().getNondetLraMethod();
+                if (method == storm::solver::LraMethod::LinearProgramming) {
+                    STORM_LOG_THROW(false, storm::exceptions::InvalidSettingsException, "Unsupported technique.");
+                } else if (method == storm::solver::LraMethod::ValueIteration) {
+                    return computeLraVi(env, stateRewardsGetter, actionRewardsGetter, component);
+                } else {
+                    STORM_LOG_THROW(false, storm::exceptions::InvalidSettingsException, "Unsupported technique.");
+                }
             }
 
             template <typename ValueType>
-            std::vector<ValueType> SparseNondeterministicGameInfiniteHorizonHelper<ValueType>::buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& componentLraValues) {
-                STORM_LOG_THROW(false, storm::exceptions::InternalException, "buildAndSolveSsp not available for SMGs");
+            ValueType SparseNondeterministicGameInfiniteHorizonHelper<ValueType>::computeLraVi(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
+
+                // Collect some parameters of the computation
+                ValueType aperiodicFactor = storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor());
+                std::vector<uint64_t>* optimalChoices = nullptr;
+                if (this->isProduceSchedulerSet()) {
+                    optimalChoices = &this->_producedOptimalChoices.get();
+                }
+
+                // Now create a helper and perform the algorithm
+                if (this->isContinuousTime()) {
+                    STORM_LOG_THROW(false, storm::exceptions::InternalException, "We cannot handle continuous time games.");
+                } else {
+                    storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::MaximalEndComponent, storm::modelchecker::helper::internal::LraViTransitionsType::GameNondetTsNoIs> viHelper(mec, this->_transitionMatrix, aperiodicFactor);
+                    return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter, nullptr, &this->getOptimizationDirection(), optimalChoices);
+                }
             }
 
             template <typename ValueType>
-            std::vector<ValueType> SparseNondeterministicGameInfiniteHorizonHelper<ValueType>::computeLongRunAverageValues(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter) {
-                STORM_LOG_THROW(false, storm::exceptions::InternalException, "computeLongRunAverageValues not possible yet.");
+            std::vector<ValueType> SparseNondeterministicGameInfiniteHorizonHelper<ValueType>::buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& componentLraValues) {
+                STORM_LOG_THROW(false, storm::exceptions::InternalException, "We do not create compositions for LRA for SMGs, solving a stochastic shortest path problem is not available.");
             }
 
 
diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicGameInfiniteHorizonHelper.h b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicGameInfiniteHorizonHelper.h
index fd872b807..f6075dc57 100644
--- a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicGameInfiniteHorizonHelper.h
+++ b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicGameInfiniteHorizonHelper.h
@@ -1,4 +1,5 @@
 #pragma once
+
 #include "storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.h"
 
 namespace storm {
@@ -29,31 +30,37 @@ namespace storm {
                  */
                 SparseNondeterministicGameInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<std::pair<std::string, uint_fast64_t>> const& player);
 
-                /*!
-                 * TODO
+                /*! TODO
+                 * Computes the long run average value given the provided state and action based rewards
+                 * @param stateValuesGetter a function returning a value for a given state index
+                 * @param actionValuesGetter a function returning a value for a given (global) choice index
+                 * @return a value for each state
                  */
-                std::vector<ValueType> computeLongRunAverageValues(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter);
+                std::vector<ValueType> computeLongRunAverageValues(Environment const& env, ValueGetter const& stateValuesGetter,  ValueGetter const& actionValuesGetter) override;
 
                 /*!
                  * @pre before calling this, a computation call should have been performed during which scheduler production was enabled.
                  * @return the produced scheduler of the most recent call.
                  */
-                //std::vector<uint64_t> const& getProducedOptimalChoices() const;
+                std::vector<uint64_t> const& getProducedOptimalChoices() const;
 
                 /*!
                  * @pre before calling this, a computation call should have been performed during which scheduler production was enabled.
                  * @return the produced scheduler of the most recent call.
                  */
-                //std::vector<uint64_t>& getProducedOptimalChoices();
+                std::vector<uint64_t>& getProducedOptimalChoices();
 
                 /*!
                  * @pre before calling this, a computation call should have been performed during which scheduler production was enabled.
                  * @return a new scheduler containing optimal choices for each state that yield the long run average values of the most recent call.
                  */
-                //storm::storage::Scheduler<ValueType> extractScheduler() const;
+                storm::storage::Scheduler<ValueType> extractScheduler() const;
 
-                void createDecomposition();
                 ValueType computeLraForComponent(Environment const& env, ValueGetter const& stateValuesGetter,  ValueGetter const& actionValuesGetter, storm::storage::MaximalEndComponent const& component);
+
+                ValueType computeLraVi(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec);
+
+                void createDecomposition();
                 std::vector<ValueType> buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues);
 
             private: