|
|
@ -2,6 +2,8 @@ |
|
|
|
|
|
|
|
#include <vector>
|
|
|
|
#include <memory>
|
|
|
|
#include <algorithm>
|
|
|
|
#include <boost/variant/get.hpp>
|
|
|
|
|
|
|
|
#include "storm/utility/macros.h"
|
|
|
|
#include "storm/utility/FilteredRewardModel.h"
|
|
|
@ -12,6 +14,10 @@ |
|
|
|
#include "storm/modelchecker/helper/utility/SetInformationFromCheckTask.h"
|
|
|
|
|
|
|
|
#include "storm/logic/FragmentSpecification.h"
|
|
|
|
#include "storm/logic/Coalition.h"
|
|
|
|
|
|
|
|
#include "storm/storage/BitVector.h"
|
|
|
|
#include "storm/environment/solver/MultiplierEnvironment.h"
|
|
|
|
|
|
|
|
#include "storm/models/sparse/StandardRewardModel.h"
|
|
|
|
|
|
|
@ -27,6 +33,8 @@ namespace storm { |
|
|
|
// Intentionally left empty.
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template<typename SparseSmgModelType> |
|
|
|
bool SparseSmgRpatlModelChecker<SparseSmgModelType>::canHandleStatic(CheckTask<storm::logic::Formula, ValueType> const& checkTask, bool* requiresSingleInitialState) { |
|
|
|
storm::logic::Formula const& formula = checkTask.getFormula(); |
|
|
@ -44,14 +52,46 @@ namespace storm { |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template<typename SparseSmgModelType> |
|
|
|
std::unique_ptr<CheckResult> SparseSmgRpatlModelChecker<SparseSmgModelType>::checkGameFormula(Environment const& env, CheckTask<storm::logic::GameFormula, ValueType> const& checkTask) { |
|
|
|
storm::logic::GameFormula const& gameFormula = checkTask.getFormula(); |
|
|
|
storm::logic::Formula const& subFormula = gameFormula.getSubformula(); |
|
|
|
if (subFormula.isOperatorFormula()) { |
|
|
|
return this->checkStateFormula(env, checkTask.substituteFormula(subFormula.asStateFormula()).setPlayerCoalition(gameFormula.getCoalition())); |
|
|
|
|
|
|
|
if (subFormula.isRewardOperatorFormula()) { |
|
|
|
return this->checkRewardOperatorFormula(solverEnv, checkTask.substituteFormula(subFormula.asRewardOperatorFormula())); |
|
|
|
} else if (subFormula.isLongRunAverageOperatorFormula()) { |
|
|
|
return this->checkLongRunAverageOperatorFormula(solverEnv, checkTask.substituteFormula(subFormula.asLongRunAverageOperatorFormula())); |
|
|
|
} |
|
|
|
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Cannot check this property (yet)."); |
|
|
|
} |
|
|
|
|
|
|
|
template<typename ModelType> |
|
|
|
std::unique_ptr<CheckResult> SparseSmgRpatlModelChecker<ModelType>::checkRewardOperatorFormula(Environment const& env, CheckTask<storm::logic::RewardOperatorFormula, ValueType> const& checkTask) { |
|
|
|
storm::logic::RewardOperatorFormula const& formula = checkTask.getFormula(); |
|
|
|
std::unique_ptr<CheckResult> result = this->computeRewards(env, formula.getMeasureType(), checkTask.substituteFormula(formula.getSubformula())); |
|
|
|
|
|
|
|
return result; |
|
|
|
} |
|
|
|
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Only game formulas with Operatorformulas as subformula are supported."); |
|
|
|
|
|
|
|
template<typename ModelType> |
|
|
|
std::unique_ptr<CheckResult> SparseSmgRpatlModelChecker<ModelType>::checkLongRunAverageOperatorFormula(Environment const& env, CheckTask<storm::logic::LongRunAverageOperatorFormula, ValueType> const& checkTask) { |
|
|
|
storm::logic::LongRunAverageOperatorFormula const& formula = checkTask.getFormula(); |
|
|
|
std::unique_ptr<CheckResult> result = this->computeLongRunAverageProbabilities(env, checkTask.substituteFormula(formula.getSubformula().asStateFormula())); |
|
|
|
|
|
|
|
return result; |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template<typename ModelType> |
|
|
|
std::unique_ptr<CheckResult> SparseSmgRpatlModelChecker<ModelType>::computeRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask<storm::logic::Formula, ValueType> const& checkTask) { |
|
|
|
storm::logic::Formula const& rewardFormula = checkTask.getFormula(); |
|
|
|
if (rewardFormula.isLongRunAverageRewardFormula()) { |
|
|
|
return this->computeLongRunAverageRewards(env, rewardMeasureType, checkTask.substituteFormula(rewardFormula.asLongRunAverageRewardFormula())); |
|
|
|
} |
|
|
|
STORM_LOG_THROW(false, storm::exceptions::InvalidArgumentException, "The given formula '" << rewardFormula << "' cannot (yet) be handled."); |
|
|
|
} |
|
|
|
|
|
|
|
template<typename SparseSmgModelType> |
|
|
@ -62,10 +102,15 @@ namespace storm { |
|
|
|
template<typename SparseSmgModelType> |
|
|
|
std::unique_ptr<CheckResult> SparseSmgRpatlModelChecker<SparseSmgModelType>::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask<storm::logic::LongRunAverageRewardFormula, ValueType> const& checkTask) { |
|
|
|
auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); |
|
|
|
STORM_LOG_THROW(checkTask.isPlayerCoalitionSet(), storm::exceptions::InvalidPropertyException, "No player coalition was set."); |
|
|
|
auto coalitionStates = this->getModel().computeStatesOfCoalition(checkTask.getPlayerCoalition()); |
|
|
|
std::cout << "Found " << coalitionStates.getNumberOfSetBits() << " states in coalition." << std::endl; |
|
|
|
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Not implemented."); |
|
|
|
storm::modelchecker::helper::SparseNondeterministicGameInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix(), this->getModel().getPlayerActionIndices()); |
|
|
|
storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel()); |
|
|
|
auto values = helper.computeLongRunAverageRewards(env, rewardModel.get()); |
|
|
|
|
|
|
|
std::unique_ptr<CheckResult> result(new ExplicitQuantitativeCheckResult<ValueType>(std::move(values))); |
|
|
|
if (checkTask.isProduceSchedulersSet()) { |
|
|
|
result->asExplicitQuantitativeCheckResult<ValueType>().setScheduler(std::make_unique<storm::storage::Scheduler<ValueType>>(helper.extractScheduler())); |
|
|
|
} |
|
|
|
return result; |
|
|
|
} |
|
|
|
|
|
|
|
template class SparseSmgRpatlModelChecker<storm::models::sparse::Smg<double>>; |
|
|
|