diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicGameInfiniteHorizonHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicGameInfiniteHorizonHelper.cpp index 03538c2e9..d48211af6 100644 --- a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicGameInfiniteHorizonHelper.cpp +++ b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicGameInfiniteHorizonHelper.cpp @@ -3,6 +3,7 @@ #include "storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h" #include "storm/storage/SparseMatrix.h" +#include "storm/storage/MaximalEndComponentDecomposition.h" #include "storm/storage/Scheduler.h" #include "storm/solver/MinMaxLinearEquationSolver.h" @@ -26,25 +27,99 @@ namespace storm { // Intentionally left empty. } + template + std::vector const& SparseNondeterministicGameInfiniteHorizonHelper::getProducedOptimalChoices() const { + STORM_LOG_ASSERT(this->isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested."); + STORM_LOG_ASSERT(this->_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?"); + return this->_producedOptimalChoices.get(); + } + + template + std::vector& SparseNondeterministicGameInfiniteHorizonHelper::getProducedOptimalChoices() { + STORM_LOG_ASSERT(this->isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested."); + STORM_LOG_ASSERT(this->_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?"); + return this->_producedOptimalChoices.get(); + } + + template + storm::storage::Scheduler SparseNondeterministicGameInfiniteHorizonHelper::extractScheduler() const { + auto const& optimalChoices = getProducedOptimalChoices(); + storm::storage::Scheduler scheduler(optimalChoices.size()); + for (uint64_t state = 0; state < optimalChoices.size(); ++state) { + scheduler.setChoice(optimalChoices[state], state); + } + return scheduler; + } + + template void SparseNondeterministicGameInfiniteHorizonHelper::createDecomposition() { - STORM_LOG_THROW(false, storm::exceptions::InternalException, "Creating Decompositions of SMGs is currently not possible."); + // TODO This needs to be changed to return the whole model as one component as long as there is no overwritten version of MaximalEndComponentDecomposition for SMGs. + if (this->_longRunComponentDecomposition == nullptr) { + // The decomposition has not been provided or computed, yet. + if (this->_backwardTransitions == nullptr) { + this->_computedBackwardTransitions = std::make_unique>(this->_transitionMatrix.transpose(true)); + this->_backwardTransitions = this->_computedBackwardTransitions.get(); + } + this->_computedLongRunComponentDecomposition = std::make_unique>(this->_transitionMatrix, *this->_backwardTransitions); + this->_longRunComponentDecomposition = this->_computedLongRunComponentDecomposition.get(); + } } template - ValueType SparseNondeterministicGameInfiniteHorizonHelper::computeLraForComponent(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::MaximalEndComponent const& component) { + std::vector SparseNondeterministicGameInfiniteHorizonHelper::computeLongRunAverageValues(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter) { + auto underlyingSolverEnvironment = env; + std::vector componentLraValues; + createDecomposition(); + componentLraValues.reserve(this->_longRunComponentDecomposition->size()); + for (auto const& c : *(this->_longRunComponentDecomposition)) { + componentLraValues.push_back(computeLraForComponent(underlyingSolverEnvironment, stateValuesGetter, actionValuesGetter, c)); + } + return componentLraValues; + } - STORM_LOG_THROW(false, storm::exceptions::InternalException, "Computing values for LRA for SMGs components is currently not possible."); + template + ValueType SparseNondeterministicGameInfiniteHorizonHelper::computeLraForComponent(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::MaximalEndComponent const& component) { + // Allocate memory for the nondeterministic choices. + if (this->isProduceSchedulerSet()) { + if (!this->_producedOptimalChoices.is_initialized()) { + this->_producedOptimalChoices.emplace(); + } + this->_producedOptimalChoices->resize(this->_transitionMatrix.getRowGroupCount()); + } + + storm::solver::LraMethod method = env.solver().lra().getNondetLraMethod(); + if (method == storm::solver::LraMethod::LinearProgramming) { + STORM_LOG_THROW(false, storm::exceptions::InvalidSettingsException, "Unsupported technique."); + } else if (method == storm::solver::LraMethod::ValueIteration) { + return computeLraVi(env, stateRewardsGetter, actionRewardsGetter, component); + } else { + STORM_LOG_THROW(false, storm::exceptions::InvalidSettingsException, "Unsupported technique."); + } } template - std::vector SparseNondeterministicGameInfiniteHorizonHelper::buildAndSolveSsp(Environment const& env, std::vector const& componentLraValues) { - STORM_LOG_THROW(false, storm::exceptions::InternalException, "buildAndSolveSsp not available for SMGs"); + ValueType SparseNondeterministicGameInfiniteHorizonHelper::computeLraVi(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) { + + // Collect some parameters of the computation + ValueType aperiodicFactor = storm::utility::convertNumber(env.solver().lra().getAperiodicFactor()); + std::vector* optimalChoices = nullptr; + if (this->isProduceSchedulerSet()) { + optimalChoices = &this->_producedOptimalChoices.get(); + } + + // Now create a helper and perform the algorithm + if (this->isContinuousTime()) { + STORM_LOG_THROW(false, storm::exceptions::InternalException, "We cannot handle continuous time games."); + } else { + storm::modelchecker::helper::internal::LraViHelper viHelper(mec, this->_transitionMatrix, aperiodicFactor); + return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter, nullptr, &this->getOptimizationDirection(), optimalChoices); + } } template - std::vector SparseNondeterministicGameInfiniteHorizonHelper::computeLongRunAverageValues(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter) { - STORM_LOG_THROW(false, storm::exceptions::InternalException, "computeLongRunAverageValues not possible yet."); + std::vector SparseNondeterministicGameInfiniteHorizonHelper::buildAndSolveSsp(Environment const& env, std::vector const& componentLraValues) { + STORM_LOG_THROW(false, storm::exceptions::InternalException, "We do not create compositions for LRA for SMGs, solving a stochastic shortest path problem is not available."); } diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicGameInfiniteHorizonHelper.h b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicGameInfiniteHorizonHelper.h index fd872b807..f6075dc57 100644 --- a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicGameInfiniteHorizonHelper.h +++ b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicGameInfiniteHorizonHelper.h @@ -1,4 +1,5 @@ #pragma once + #include "storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.h" namespace storm { @@ -29,31 +30,37 @@ namespace storm { */ SparseNondeterministicGameInfiniteHorizonHelper(storm::storage::SparseMatrix const& transitionMatrix, std::vector> const& player); - /*! - * TODO + /*! TODO + * Computes the long run average value given the provided state and action based rewards + * @param stateValuesGetter a function returning a value for a given state index + * @param actionValuesGetter a function returning a value for a given (global) choice index + * @return a value for each state */ - std::vector computeLongRunAverageValues(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter); + std::vector computeLongRunAverageValues(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter) override; /*! * @pre before calling this, a computation call should have been performed during which scheduler production was enabled. * @return the produced scheduler of the most recent call. */ - //std::vector const& getProducedOptimalChoices() const; + std::vector const& getProducedOptimalChoices() const; /*! * @pre before calling this, a computation call should have been performed during which scheduler production was enabled. * @return the produced scheduler of the most recent call. */ - //std::vector& getProducedOptimalChoices(); + std::vector& getProducedOptimalChoices(); /*! * @pre before calling this, a computation call should have been performed during which scheduler production was enabled. * @return a new scheduler containing optimal choices for each state that yield the long run average values of the most recent call. */ - //storm::storage::Scheduler extractScheduler() const; + storm::storage::Scheduler extractScheduler() const; - void createDecomposition(); ValueType computeLraForComponent(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::MaximalEndComponent const& component); + + ValueType computeLraVi(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec); + + void createDecomposition(); std::vector buildAndSolveSsp(Environment const& env, std::vector const& mecLraValues); private: