Browse Source

sparse qualitative solving of menu games

main
dehnert 7 years ago
parent
commit
9665f4fa30
  1. 16
      src/storm/abstraction/ExplicitQualitativeGameResult.cpp
  2. 20
      src/storm/abstraction/ExplicitQualitativeGameResult.h
  3. 25
      src/storm/abstraction/ExplicitQualitativeGameResultMinMax.cpp
  4. 33
      src/storm/abstraction/ExplicitQualitativeGameResultMinMax.h
  5. 8
      src/storm/abstraction/MenuGameRefiner.cpp
  6. 8
      src/storm/abstraction/MenuGameRefiner.h
  7. 1
      src/storm/abstraction/SymbolicQualitativeGameResult.h
  8. 4
      src/storm/abstraction/SymbolicQualitativeGameResultMinMax.cpp
  9. 9
      src/storm/abstraction/SymbolicQualitativeGameResultMinMax.h
  10. 1
      src/storm/abstraction/SymbolicQualitativeMdpResult.h
  11. 48
      src/storm/modelchecker/abstraction/AbstractAbstractionRefinementModelChecker.cpp
  12. 6
      src/storm/modelchecker/abstraction/AbstractAbstractionRefinementModelChecker.h
  13. 176
      src/storm/modelchecker/abstraction/GameBasedMdpModelChecker.cpp
  14. 28
      src/storm/modelchecker/abstraction/GameBasedMdpModelChecker.h
  15. 7
      src/storm/settings/modules/AbstractionSettings.cpp
  16. 8
      src/storm/settings/modules/AbstractionSettings.h
  17. 10
      src/storm/storage/SparseMatrix.cpp
  18. 6
      src/storm/storage/SparseMatrix.h
  19. 270
      src/storm/utility/graph.cpp
  20. 79
      src/storm/utility/graph.h

16
src/storm/abstraction/ExplicitQualitativeGameResult.cpp

@ -0,0 +1,16 @@
#include "storm/abstraction/ExplicitQualitativeGameResult.h"
namespace storm {
namespace abstraction {
ExplicitQualitativeGameResult::ExplicitQualitativeGameResult(storm::utility::graph::ExplicitGameProb01Result const& prob01Result) : storm::utility::graph::ExplicitGameProb01Result(prob01Result) {
// Intentionally left empty.
}
storm::storage::BitVector const& ExplicitQualitativeGameResult::getStates() const {
return this->getPlayer1States();
}
}
}

20
src/storm/abstraction/ExplicitQualitativeGameResult.h

@ -0,0 +1,20 @@
#pragma once
#include "storm/utility/graph.h"
#include "storm/abstraction/ExplicitQualitativeResult.h"
namespace storm {
namespace abstraction {
class ExplicitQualitativeGameResult : public storm::utility::graph::ExplicitGameProb01Result, public ExplicitQualitativeResult {
public:
ExplicitQualitativeGameResult() = default;
ExplicitQualitativeGameResult(storm::utility::graph::ExplicitGameProb01Result const& prob01Result);
virtual storm::storage::BitVector const& getStates() const override;
};
}
}

25
src/storm/abstraction/ExplicitQualitativeGameResultMinMax.cpp

@ -0,0 +1,25 @@
#include "storm/abstraction/ExplicitQualitativeGameResultMinMax.h"
namespace storm {
namespace abstraction {
ExplicitQualitativeGameResult const& ExplicitQualitativeGameResultMinMax::getProb0(storm::OptimizationDirection const& dir) const {
if (dir == storm::OptimizationDirection::Minimize) {
return prob0Min;
} else {
return prob0Max;
}
}
ExplicitQualitativeGameResult const& ExplicitQualitativeGameResultMinMax::getProb1(storm::OptimizationDirection const& dir) const {
if (dir == storm::OptimizationDirection::Minimize) {
return prob1Min;
} else {
return prob1Max;
}
}
}
}

33
src/storm/abstraction/ExplicitQualitativeGameResultMinMax.h

@ -1,37 +1,22 @@
#pragma once
#include "storm/storage/dd/DdType.h"
#include "storm/abstraction/SymbolicQualitativeResultMinMax.h"
#include "storm/abstraction/QualitativeGameResult.h"
#include "storm/abstraction/ExplicitQualitativeResultMinMax.h"
#include "storm/abstraction/ExplicitQualitativeGameResult.h"
namespace storm {
namespace abstraction {
class ExplicitQualitativeGameResultMinMax : public QualitativeResultMinMax {
class ExplicitQualitativeGameResultMinMax : public ExplicitQualitativeResultMinMax {
public:
ExplicitQualitativeGameResultMinMax() = default;
virtual QualitativeResult<Type> const& getProb0(storm::OptimizationDirection const& dir) const override {
if (dir == storm::OptimizationDirection::Minimize) {
return prob0Min;
} else {
return prob0Max;
}
}
virtual QualitativeResult<Type> const& getProb1(storm::OptimizationDirection const& dir) const override {
if (dir == storm::OptimizationDirection::Minimize) {
return prob1Min;
} else {
return prob1Max;
}
}
virtual ExplicitQualitativeGameResult const& getProb0(storm::OptimizationDirection const& dir) const override;
virtual ExplicitQualitativeGameResult const& getProb1(storm::OptimizationDirection const& dir) const override;
QualitativeGameResult<Type> prob0Min;
QualitativeGameResult<Type> prob1Min;
QualitativeGameResult<Type> prob0Max;
QualitativeGameResult<Type> prob1Max;
ExplicitQualitativeGameResult prob0Min;
ExplicitQualitativeGameResult prob1Min;
ExplicitQualitativeGameResult prob0Max;
ExplicitQualitativeGameResult prob1Max;
};
}

8
src/storm/abstraction/MenuGameRefiner.cpp

@ -3,6 +3,8 @@
#include "storm/abstraction/AbstractionInformation.h"
#include "storm/abstraction/MenuGameAbstractor.h"
#include "storm/storage/BitVector.h"
#include "storm/storage/dd/DdManager.h"
#include "storm/utility/dd.h"
#include "storm/utility/solver.h"
@ -122,7 +124,7 @@ namespace storm {
}
template<storm::dd::DdType Type, typename ValueType>
PivotStateResult<Type, ValueType> pickPivotState(AbstractionSettings::PivotSelectionHeuristic const& heuristic, storm::abstraction::MenuGame<Type, ValueType> const& game, PivotStateCandidatesResult<Type> const& pivotStateCandidateResult, boost::optional<QualitativeGameResultMinMax<Type>> const& qualitativeResult, boost::optional<QuantitativeGameResultMinMax<Type, ValueType>> const& quantitativeResult) {
PivotStateResult<Type, ValueType> pickPivotState(AbstractionSettings::PivotSelectionHeuristic const& heuristic, storm::abstraction::MenuGame<Type, ValueType> const& game, PivotStateCandidatesResult<Type> const& pivotStateCandidateResult, boost::optional<SymbolicQualitativeGameResultMinMax<Type>> const& qualitativeResult, boost::optional<SymbolicQuantitativeGameResultMinMax<Type, ValueType>> const& quantitativeResult) {
// Get easy access to strategies.
storm::dd::Bdd<Type> minPlayer1Strategy;
@ -587,7 +589,7 @@ namespace storm {
}
template<storm::dd::DdType Type, typename ValueType>
bool MenuGameRefiner<Type, ValueType>::refine(storm::abstraction::MenuGame<Type, ValueType> const& game, storm::dd::Bdd<Type> const& transitionMatrixBdd, QualitativeGameResultMinMax<Type> const& qualitativeResult) const {
bool MenuGameRefiner<Type, ValueType>::refine(storm::abstraction::MenuGame<Type, ValueType> const& game, storm::dd::Bdd<Type> const& transitionMatrixBdd, SymbolicQualitativeGameResultMinMax<Type> const& qualitativeResult) const {
STORM_LOG_TRACE("Trying refinement after qualitative check.");
// Get all relevant strategies.
storm::dd::Bdd<Type> minPlayer1Strategy = qualitativeResult.prob0Min.getPlayer1Strategy();
@ -675,7 +677,7 @@ namespace storm {
}
template<storm::dd::DdType Type, typename ValueType>
bool MenuGameRefiner<Type, ValueType>::refine(storm::abstraction::MenuGame<Type, ValueType> const& game, storm::dd::Bdd<Type> const& transitionMatrixBdd, QuantitativeGameResultMinMax<Type, ValueType> const& quantitativeResult) const {
bool MenuGameRefiner<Type, ValueType>::refine(storm::abstraction::MenuGame<Type, ValueType> const& game, storm::dd::Bdd<Type> const& transitionMatrixBdd, SymbolicQuantitativeGameResultMinMax<Type, ValueType> const& quantitativeResult) const {
STORM_LOG_TRACE("Refining after quantitative check.");
// Get all relevant strategies.
storm::dd::Bdd<Type> minPlayer1Strategy = quantitativeResult.min.getPlayer1Strategy();

8
src/storm/abstraction/MenuGameRefiner.h

@ -7,8 +7,8 @@
#include <boost/optional.hpp>
#include "storm/abstraction/RefinementCommand.h"
#include "storm/abstraction/QualitativeGameResultMinMax.h"
#include "storm/abstraction/QuantitativeGameResultMinMax.h"
#include "storm/abstraction/SymbolicQualitativeGameResultMinMax.h"
#include "storm/abstraction/SymbolicQuantitativeGameResultMinMax.h"
#include "storm/storage/expressions/Expression.h"
#include "storm/storage/expressions/FullPredicateSplitter.h"
@ -85,14 +85,14 @@ namespace storm {
*
* @param True if predicates for refinement could be derived, false otherwise.
*/
bool refine(storm::abstraction::MenuGame<Type, ValueType> const& game, storm::dd::Bdd<Type> const& transitionMatrixBdd, QualitativeGameResultMinMax<Type> const& qualitativeResult) const;
bool refine(storm::abstraction::MenuGame<Type, ValueType> const& game, storm::dd::Bdd<Type> const& transitionMatrixBdd, SymbolicQualitativeGameResultMinMax<Type> const& qualitativeResult) const;
/*!
* Refines the abstractor based on the quantitative result by trying to derive suitable predicates.
*
* @param True if predicates for refinement could be derived, false otherwise.
*/
bool refine(storm::abstraction::MenuGame<Type, ValueType> const& game, storm::dd::Bdd<Type> const& transitionMatrixBdd, QuantitativeGameResultMinMax<Type, ValueType> const& quantitativeResult) const;
bool refine(storm::abstraction::MenuGame<Type, ValueType> const& game, storm::dd::Bdd<Type> const& transitionMatrixBdd, SymbolicQuantitativeGameResultMinMax<Type, ValueType> const& quantitativeResult) const;
/*!
* Retrieves whether all guards were added.

1
src/storm/abstraction/SymbolicQualitativeGameResult.h

@ -8,6 +8,7 @@ namespace storm {
template <storm::dd::DdType Type>
class SymbolicQualitativeGameResult : public storm::utility::graph::SymbolicGameProb01Result<Type>, public SymbolicQualitativeResult<Type> {
public:
SymbolicQualitativeGameResult() = default;
SymbolicQualitativeGameResult(storm::utility::graph::SymbolicGameProb01Result<Type> const& prob01Result);

4
src/storm/abstraction/SymbolicQualitativeGameResultMinMax.cpp

@ -21,8 +21,8 @@ namespace storm {
}
}
template class SymbolicQualitativeResultMinMax<storm::dd::DdType::CUDD>;
template class SymbolicQualitativeResultMinMax<storm::dd::DdType::Sylvan>;
template class SymbolicQualitativeGameResultMinMax<storm::dd::DdType::CUDD>;
template class SymbolicQualitativeGameResultMinMax<storm::dd::DdType::Sylvan>;
}
}

9
src/storm/abstraction/SymbolicQualitativeGameResultMinMax.h

@ -3,6 +3,7 @@
#include "storm/storage/dd/DdType.h"
#include "storm/abstraction/SymbolicQualitativeResultMinMax.h"
#include "storm/abstraction/SymbolicQualitativeGameResult.h"
namespace storm {
namespace abstraction {
@ -15,10 +16,10 @@ namespace storm {
virtual SymbolicQualitativeResult<Type> const& getProb0(storm::OptimizationDirection const& dir) const override;
virtual SymbolicQualitativeResult<Type> const& getProb1(storm::OptimizationDirection const& dir) const override;
SymbolicQualitativeResult<Type> prob0Min;
SymbolicQualitativeResult<Type> prob1Min;
SymbolicQualitativeResult<Type> prob0Max;
SymbolicQualitativeResult<Type> prob1Max;
SymbolicQualitativeGameResult<Type> prob0Min;
SymbolicQualitativeGameResult<Type> prob1Min;
SymbolicQualitativeGameResult<Type> prob0Max;
SymbolicQualitativeGameResult<Type> prob1Max;
};
}

1
src/storm/abstraction/SymbolicQualitativeMdpResult.h

@ -9,6 +9,7 @@ namespace storm {
template <storm::dd::DdType Type>
class SymbolicQualitativeMdpResult : public SymbolicQualitativeResult<Type> {
public:
SymbolicQualitativeMdpResult() = default;
SymbolicQualitativeMdpResult(storm::dd::Bdd<Type> const& states);

48
src/storm/modelchecker/abstraction/AbstractAbstractionRefinementModelChecker.cpp

@ -25,9 +25,9 @@
#include "storm/abstraction/StateSet.h"
#include "storm/abstraction/SymbolicStateSet.h"
#include "storm/abstraction/QualitativeResultMinMax.h"
#include "storm/abstraction/QualitativeMdpResult.h"
#include "storm/abstraction/QualitativeMdpResultMinMax.h"
#include "storm/abstraction/QualitativeGameResultMinMax.h"
#include "storm/abstraction/SymbolicQualitativeMdpResult.h"
#include "storm/abstraction/SymbolicQualitativeMdpResultMinMax.h"
#include "storm/abstraction/SymbolicQualitativeGameResultMinMax.h"
#include "storm/settings/SettingsManager.h"
#include "storm/settings/modules/AbstractionSettings.h"
@ -470,18 +470,18 @@ namespace storm {
template<typename ModelType>
std::unique_ptr<storm::abstraction::QualitativeResultMinMax> AbstractAbstractionRefinementModelChecker<ModelType>::computeQualitativeResult(Environment const& env, storm::models::symbolic::Dtmc<DdType, ValueType> const& abstractModel, storm::abstraction::SymbolicStateSet<DdType> const& constraintStates, storm::abstraction::SymbolicStateSet<DdType> const& targetStates) {
STORM_LOG_DEBUG("Computing qualitative solution for DTMC.");
std::unique_ptr<storm::abstraction::QualitativeMdpResultMinMax<DdType>> result = std::make_unique<storm::abstraction::QualitativeMdpResultMinMax<DdType>>();
std::unique_ptr<storm::abstraction::SymbolicQualitativeMdpResultMinMax<DdType>> result = std::make_unique<storm::abstraction::SymbolicQualitativeMdpResultMinMax<DdType>>();
auto start = std::chrono::high_resolution_clock::now();
bool isRewardFormula = checkTask->getFormula().isEventuallyFormula() && checkTask->getFormula().asEventuallyFormula().getContext() == storm::logic::FormulaContext::Reward;
storm::dd::Bdd<DdType> transitionMatrixBdd = abstractModel.getTransitionMatrix().notZero();
if (isRewardFormula) {
auto prob1 = storm::utility::graph::performProb1(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates());
result->prob1Min = result->prob1Max = storm::abstraction::QualitativeMdpResult<DdType>(prob1);
result->prob1Min = result->prob1Max = storm::abstraction::SymbolicQualitativeMdpResult<DdType>(prob1);
} else {
auto prob01 = storm::utility::graph::performProb01(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates());
result->prob0Min = result->prob0Max = storm::abstraction::QualitativeMdpResult<DdType>(prob01.first);
result->prob1Min = result->prob1Max = storm::abstraction::QualitativeMdpResult<DdType>(prob01.second);
result->prob0Min = result->prob0Max = storm::abstraction::SymbolicQualitativeMdpResult<DdType>(prob01.first);
result->prob1Min = result->prob1Max = storm::abstraction::SymbolicQualitativeMdpResult<DdType>(prob01.second);
}
auto end = std::chrono::high_resolution_clock::now();
@ -494,7 +494,7 @@ namespace storm {
template<typename ModelType>
std::unique_ptr<storm::abstraction::QualitativeResultMinMax> AbstractAbstractionRefinementModelChecker<ModelType>::computeQualitativeResult(Environment const& env, storm::models::symbolic::Mdp<DdType, ValueType> const& abstractModel, storm::abstraction::SymbolicStateSet<DdType> const& constraintStates, storm::abstraction::SymbolicStateSet<DdType> const& targetStates) {
STORM_LOG_DEBUG("Computing qualitative solution for MDP.");
std::unique_ptr<storm::abstraction::QualitativeMdpResultMinMax<DdType>> result = std::make_unique<storm::abstraction::QualitativeMdpResultMinMax<DdType>>();
std::unique_ptr<storm::abstraction::SymbolicQualitativeMdpResultMinMax<DdType>> result = std::make_unique<storm::abstraction::SymbolicQualitativeMdpResultMinMax<DdType>>();
auto start = std::chrono::high_resolution_clock::now();
bool isRewardFormula = checkTask->getFormula().isEventuallyFormula() && checkTask->getFormula().asEventuallyFormula().getContext() == storm::logic::FormulaContext::Reward;
@ -505,13 +505,13 @@ namespace storm {
bool computedMin = false;
if (abstractionPlayer == 1 || checkTask->getOptimizationDirection() == storm::OptimizationDirection::Minimize) {
auto states = storm::utility::graph::performProb1E(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates(), lastQualitativeResults ? lastQualitativeResults->asSymbolicQualitativeResultMinMax<DdType>().getProb1Min().getStates() : storm::utility::graph::performProbGreater0E(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates()));
result->prob1Min = storm::abstraction::QualitativeMdpResult<DdType>(states);
result->prob1Min = storm::abstraction::SymbolicQualitativeMdpResult<DdType>(states);
computedMin = true;
}
if (abstractionPlayer == 1 || checkTask->getOptimizationDirection() == storm::OptimizationDirection::Maximize) {
auto states = storm::utility::graph::performProb1A(abstractModel, transitionMatrixBdd, targetStates.getStates(), storm::utility::graph::performProbGreater0A(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates()));
result->prob1Max = storm::abstraction::QualitativeMdpResult<DdType>(states);
result->prob1Max = storm::abstraction::SymbolicQualitativeMdpResult<DdType>(states);
if (!computedMin) {
result->prob1Min = result->prob1Max;
}
@ -523,18 +523,18 @@ namespace storm {
bool computedMax = false;
if (abstractionPlayer == 1 || checkTask->getOptimizationDirection() == storm::OptimizationDirection::Maximize) {
auto states = storm::utility::graph::performProb0A(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates());
result->prob0Max = storm::abstraction::QualitativeMdpResult<DdType>(states);
result->prob0Max = storm::abstraction::SymbolicQualitativeMdpResult<DdType>(states);
states = storm::utility::graph::performProb1E(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates(), lastQualitativeResults ? lastQualitativeResults->asSymbolicQualitativeResultMinMax<DdType>().getProb1Min().getStates() : storm::utility::graph::performProbGreater0E(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates()));
result->prob1Max = storm::abstraction::QualitativeMdpResult<DdType>(states);
result->prob1Max = storm::abstraction::SymbolicQualitativeMdpResult<DdType>(states);
computedMax = true;
}
if (abstractionPlayer == 1 || checkTask->getOptimizationDirection() == storm::OptimizationDirection::Minimize) {
auto states = storm::utility::graph::performProb1A(abstractModel, transitionMatrixBdd, lastQualitativeResults ? lastQualitativeResults->asSymbolicQualitativeResultMinMax<DdType>().getProb1Min().getStates() : targetStates.getStates(), storm::utility::graph::performProbGreater0A(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates()));
result->prob1Min = storm::abstraction::QualitativeMdpResult<DdType>(states);
result->prob1Min = storm::abstraction::SymbolicQualitativeMdpResult<DdType>(states);
states = storm::utility::graph::performProb0E(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates());
result->prob0Min = storm::abstraction::QualitativeMdpResult<DdType>(states);
result->prob0Min = storm::abstraction::SymbolicQualitativeMdpResult<DdType>(states);
if (!computedMax) {
result->prob0Max = result->prob0Min;
@ -550,13 +550,13 @@ namespace storm {
bool computedMin = false;
if (abstractionPlayer == 1 || checkTask->getOptimizationDirection() == storm::OptimizationDirection::Minimize) {
auto prob1 = storm::utility::graph::performProb1E(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates(), storm::utility::graph::performProbGreater0E(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates()));
result->prob1Min = storm::abstraction::QualitativeMdpResult<DdType>(prob1);
result->prob1Min = storm::abstraction::SymbolicQualitativeMdpResult<DdType>(prob1);
computedMin = true;
}
if (abstractionPlayer == 1 || checkTask->getOptimizationDirection() == storm::OptimizationDirection::Maximize) {
auto prob1 = storm::utility::graph::performProb1A(abstractModel, transitionMatrixBdd, targetStates.getStates(), storm::utility::graph::performProbGreater0A(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates()));
result->prob1Max = storm::abstraction::QualitativeMdpResult<DdType>(prob1);
result->prob1Max = storm::abstraction::SymbolicQualitativeMdpResult<DdType>(prob1);
if (!computedMin) {
result->prob1Min = result->prob1Max;
}
@ -567,15 +567,15 @@ namespace storm {
bool computedMin = false;
if (abstractionPlayer == 1 || checkTask->getOptimizationDirection() == storm::OptimizationDirection::Minimize) {
auto prob01 = storm::utility::graph::performProb01Min(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates());
result->prob0Min = storm::abstraction::QualitativeMdpResult<DdType>(prob01.first);
result->prob1Min = storm::abstraction::QualitativeMdpResult<DdType>(prob01.second);
result->prob0Min = storm::abstraction::SymbolicQualitativeMdpResult<DdType>(prob01.first);
result->prob1Min = storm::abstraction::SymbolicQualitativeMdpResult<DdType>(prob01.second);
computedMin = true;
}
if (abstractionPlayer == 1 || checkTask->getOptimizationDirection() == storm::OptimizationDirection::Maximize) {
auto prob01 = storm::utility::graph::performProb01Max(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates());
result->prob0Max = storm::abstraction::QualitativeMdpResult<DdType>(prob01.first);
result->prob1Max = storm::abstraction::QualitativeMdpResult<DdType>(prob01.second);
result->prob0Max = storm::abstraction::SymbolicQualitativeMdpResult<DdType>(prob01.first);
result->prob1Max = storm::abstraction::SymbolicQualitativeMdpResult<DdType>(prob01.second);
if (!computedMin) {
result->prob0Min = result->prob0Max;
result->prob1Min = result->prob1Max;
@ -604,7 +604,7 @@ namespace storm {
template<typename ModelType>
std::unique_ptr<storm::abstraction::QualitativeResultMinMax> AbstractAbstractionRefinementModelChecker<ModelType>::computeQualitativeResult(Environment const& env, storm::models::symbolic::StochasticTwoPlayerGame<DdType, ValueType> const& abstractModel, storm::abstraction::SymbolicStateSet<DdType> const& constraintStates, storm::abstraction::SymbolicStateSet<DdType> const& targetStates) {
STORM_LOG_DEBUG("Computing qualitative solution for S2PG.");
std::unique_ptr<storm::abstraction::QualitativeGameResultMinMax<DdType>> result;
std::unique_ptr<storm::abstraction::SymbolicQualitativeGameResultMinMax<DdType>> result;
// Obtain the player optimization directions.
uint64_t abstractionPlayer = this->getAbstractionPlayer();
@ -622,7 +622,7 @@ namespace storm {
if (this->getReuseQualitativeResults()) {
result = computeQualitativeResultReuse(abstractModel, transitionMatrixBdd, constraintStates, targetStates, abstractionPlayer, modelNondeterminismDirection, requiresSchedulers);
} else {
result = std::make_unique<storm::abstraction::QualitativeGameResultMinMax<DdType>>();
result = std::make_unique<storm::abstraction::SymbolicQualitativeGameResultMinMax<DdType>>();
result->prob0Min = storm::utility::graph::performProb0(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates(), abstractionPlayer == 1 ? storm::OptimizationDirection::Minimize : modelNondeterminismDirection, abstractionPlayer == 2 ? storm::OptimizationDirection::Minimize : modelNondeterminismDirection, requiresSchedulers, requiresSchedulers);
result->prob1Min = storm::utility::graph::performProb1(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates(), abstractionPlayer == 1 ? storm::OptimizationDirection::Minimize : modelNondeterminismDirection, abstractionPlayer == 2 ? storm::OptimizationDirection::Minimize : modelNondeterminismDirection, requiresSchedulers, requiresSchedulers);
@ -648,8 +648,8 @@ namespace storm {
}
template<typename ModelType>
std::unique_ptr<storm::abstraction::QualitativeGameResultMinMax<AbstractAbstractionRefinementModelChecker<ModelType>::DdType>> AbstractAbstractionRefinementModelChecker<ModelType>::computeQualitativeResultReuse(storm::models::symbolic::StochasticTwoPlayerGame<DdType, ValueType> const& abstractModel, storm::dd::Bdd<DdType> const& transitionMatrixBdd, storm::abstraction::SymbolicStateSet<DdType> const& constraintStates, storm::abstraction::SymbolicStateSet<DdType> const& targetStates, uint64_t abstractionPlayer, storm::OptimizationDirection const& modelNondeterminismDirection, bool requiresSchedulers) {
std::unique_ptr<storm::abstraction::QualitativeGameResultMinMax<DdType>> result = std::make_unique<storm::abstraction::QualitativeGameResultMinMax<DdType>>();
std::unique_ptr<storm::abstraction::SymbolicQualitativeGameResultMinMax<AbstractAbstractionRefinementModelChecker<ModelType>::DdType>> AbstractAbstractionRefinementModelChecker<ModelType>::computeQualitativeResultReuse(storm::models::symbolic::StochasticTwoPlayerGame<DdType, ValueType> const& abstractModel, storm::dd::Bdd<DdType> const& transitionMatrixBdd, storm::abstraction::SymbolicStateSet<DdType> const& constraintStates, storm::abstraction::SymbolicStateSet<DdType> const& targetStates, uint64_t abstractionPlayer, storm::OptimizationDirection const& modelNondeterminismDirection, bool requiresSchedulers) {
std::unique_ptr<storm::abstraction::SymbolicQualitativeGameResultMinMax<DdType>> result = std::make_unique<storm::abstraction::SymbolicQualitativeGameResultMinMax<DdType>>();
// Depending on the model nondeterminism direction, we choose a different order of operations.
if (modelNondeterminismDirection == storm::OptimizationDirection::Minimize) {

6
src/storm/modelchecker/abstraction/AbstractAbstractionRefinementModelChecker.h

@ -41,10 +41,10 @@ namespace storm {
class SymbolicQualitativeResultMinMax;
template <storm::dd::DdType DdType>
class QualitativeMdpResultMinMax;
class SymbolicQualitativeMdpResultMinMax;
template <storm::dd::DdType DdType>
class QualitativeGameResultMinMax;
class SymbolicQualitativeGameResultMinMax;
class StateSet;
@ -124,7 +124,7 @@ namespace storm {
std::unique_ptr<storm::abstraction::QualitativeResultMinMax> computeQualitativeResult(Environment const& env, storm::models::symbolic::Dtmc<DdType, ValueType> const& abstractModel, storm::abstraction::SymbolicStateSet<DdType> const& constraintStates, storm::abstraction::SymbolicStateSet<DdType> const& targetStates);
std::unique_ptr<storm::abstraction::QualitativeResultMinMax> computeQualitativeResult(Environment const& env, storm::models::symbolic::Mdp<DdType, ValueType> const& abstractModel, storm::abstraction::SymbolicStateSet<DdType> const& constraintStates, storm::abstraction::SymbolicStateSet<DdType> const& targetStates);
std::unique_ptr<storm::abstraction::QualitativeResultMinMax> computeQualitativeResult(Environment const& env, storm::models::symbolic::StochasticTwoPlayerGame<DdType, ValueType> const& abstractModel, storm::abstraction::SymbolicStateSet<DdType> const& constraintStates, storm::abstraction::SymbolicStateSet<DdType> const& targetStates);
std::unique_ptr<storm::abstraction::QualitativeGameResultMinMax<DdType>> computeQualitativeResultReuse(storm::models::symbolic::StochasticTwoPlayerGame<DdType, ValueType> const& abstractModel, storm::dd::Bdd<DdType> const& transitionMatrixBdd, storm::abstraction::SymbolicStateSet<DdType> const& constraintStates, storm::abstraction::SymbolicStateSet<DdType> const& targetStates, uint64_t abstractionPlayer, storm::OptimizationDirection const& modelNondeterminismDirection, bool requiresSchedulers);
std::unique_ptr<storm::abstraction::SymbolicQualitativeGameResultMinMax<DdType>> computeQualitativeResultReuse(storm::models::symbolic::StochasticTwoPlayerGame<DdType, ValueType> const& abstractModel, storm::dd::Bdd<DdType> const& transitionMatrixBdd, storm::abstraction::SymbolicStateSet<DdType> const& constraintStates, storm::abstraction::SymbolicStateSet<DdType> const& targetStates, uint64_t abstractionPlayer, storm::OptimizationDirection const& modelNondeterminismDirection, bool requiresSchedulers);
std::unique_ptr<CheckResult> checkForResultAfterQualitativeCheck(storm::models::Model<ValueType> const& abstractModel);
std::unique_ptr<CheckResult> checkForResultAfterQualitativeCheck(storm::models::symbolic::Model<DdType, ValueType> const& abstractModel);

176
src/storm/modelchecker/abstraction/GameBasedMdpModelChecker.cpp

@ -10,7 +10,6 @@
#include "storm/storage/expressions/ExpressionManager.h"
#include "storm/storage/expressions/VariableSetPredicateSplitter.h"
#include "storm/storage/jani/Edge.h"
#include "storm/storage/jani/EdgeDestination.h"
#include "storm/storage/jani/Model.h"
@ -26,6 +25,8 @@
#include "storm/abstraction/jani/JaniMenuGameAbstractor.h"
#include "storm/abstraction/MenuGameRefiner.h"
#include "storm/abstraction/ExplicitQualitativeGameResultMinMax.h"
#include "storm/logic/FragmentSpecification.h"
#include "storm/solver/SymbolicGameSolver.h"
@ -45,8 +46,8 @@
namespace storm {
namespace modelchecker {
using storm::abstraction::QuantitativeGameResult;
using storm::abstraction::QuantitativeGameResultMinMax;
using storm::abstraction::SymbolicQuantitativeGameResult;
using storm::abstraction::SymbolicQuantitativeGameResultMinMax;
template<storm::dd::DdType Type, typename ModelType>
GameBasedMdpModelChecker<Type, ModelType>::GameBasedMdpModelChecker(storm::storage::SymbolicModelDescription const& model, std::shared_ptr<storm::utility::solver::SmtSolverFactory> const& smtSolverFactory) : smtSolverFactory(smtSolverFactory), comparator(storm::settings::getModule<storm::settings::modules::AbstractionSettings>().getPrecision()), reuseQualitativeResults(false), reuseQuantitativeResults(false), solveMode(storm::settings::getModule<storm::settings::modules::AbstractionSettings>().getSolveMode()) {
@ -71,9 +72,11 @@ namespace storm {
preprocessedModel = model.asJaniModel().flattenComposition();
}
storm::settings::modules::AbstractionSettings::ReuseMode reuseMode = storm::settings::getModule<storm::settings::modules::AbstractionSettings>().getReuseMode();
auto const& abstractionSettings = storm::settings::getModule<storm::settings::modules::AbstractionSettings>();
storm::settings::modules::AbstractionSettings::ReuseMode reuseMode = abstractionSettings.getReuseMode();
reuseQualitativeResults = reuseMode == storm::settings::modules::AbstractionSettings::ReuseMode::All || reuseMode == storm::settings::modules::AbstractionSettings::ReuseMode::Qualitative;
reuseQuantitativeResults = reuseMode == storm::settings::modules::AbstractionSettings::ReuseMode::All || reuseMode == storm::settings::modules::AbstractionSettings::ReuseMode::Quantitative;
maximalNumberOfAbstractions = abstractionSettings.getMaximalAbstractionCount();
}
template<storm::dd::DdType Type, typename ModelType>
@ -159,7 +162,7 @@ namespace storm {
}
template<storm::dd::DdType Type, typename ValueType>
std::unique_ptr<CheckResult> checkForResultAfterQualitativeCheck(CheckTask<storm::logic::Formula> const& checkTask, storm::dd::Bdd<Type> const& initialStates, QualitativeGameResultMinMax<Type> const& qualitativeResult) {
std::unique_ptr<CheckResult> checkForResultAfterQualitativeCheck(CheckTask<storm::logic::Formula> const& checkTask, storm::dd::Bdd<Type> const& initialStates, SymbolicQualitativeGameResultMinMax<Type> const& qualitativeResult) {
// Check whether we can already give the answer based on the current information.
std::unique_ptr<CheckResult> result = checkForResultAfterQualitativeCheck<Type, ValueType>(checkTask, storm::OptimizationDirection::Minimize, initialStates, qualitativeResult.prob0Min.getPlayer1States(), qualitativeResult.prob1Min.getPlayer1States());
if (result) {
@ -232,7 +235,7 @@ namespace storm {
}
template<storm::dd::DdType Type, typename ValueType>
QuantitativeGameResult<Type, ValueType> solveMaybeStates(Environment const& env, storm::OptimizationDirection const& player1Direction, storm::OptimizationDirection const& player2Direction, storm::abstraction::MenuGame<Type, ValueType> const& game, storm::dd::Bdd<Type> const& maybeStates, storm::dd::Bdd<Type> const& prob1States, boost::optional<QuantitativeGameResult<Type, ValueType>> const& startInfo = boost::none) {
SymbolicQuantitativeGameResult<Type, ValueType> solveMaybeStates(Environment const& env, storm::OptimizationDirection const& player1Direction, storm::OptimizationDirection const& player2Direction, storm::abstraction::MenuGame<Type, ValueType> const& game, storm::dd::Bdd<Type> const& maybeStates, storm::dd::Bdd<Type> const& prob1States, boost::optional<SymbolicQuantitativeGameResult<Type, ValueType>> const& startInfo = boost::none) {
STORM_LOG_TRACE("Performing quantative solution step. Player 1: " << player1Direction << ", player 2: " << player2Direction << ".");
@ -259,14 +262,14 @@ namespace storm {
std::unique_ptr<storm::solver::SymbolicGameSolver<Type, ValueType>> solver = solverFactory.create(submatrix, maybeStates, game.getIllegalPlayer1Mask(), game.getIllegalPlayer2Mask(), game.getRowVariables(), game.getColumnVariables(), game.getRowColumnMetaVariablePairs(), game.getPlayer1Variables(), game.getPlayer2Variables());
solver->setGeneratePlayersStrategies(true);
auto values = solver->solveGame(env, player1Direction, player2Direction, startVector, subvector, startInfo ? boost::make_optional(startInfo.get().getPlayer1Strategy()) : boost::none, startInfo ? boost::make_optional(startInfo.get().getPlayer2Strategy()) : boost::none);
return QuantitativeGameResult<Type, ValueType>(std::make_pair(storm::utility::zero<ValueType>(), storm::utility::one<ValueType>()), values, solver->getPlayer1Strategy(), solver->getPlayer2Strategy());
return SymbolicQuantitativeGameResult<Type, ValueType>(std::make_pair(storm::utility::zero<ValueType>(), storm::utility::one<ValueType>()), values, solver->getPlayer1Strategy(), solver->getPlayer2Strategy());
}
template<storm::dd::DdType Type, typename ValueType>
QuantitativeGameResult<Type, ValueType> computeQuantitativeResult(Environment const& env, storm::OptimizationDirection player1Direction, storm::OptimizationDirection player2Direction, storm::abstraction::MenuGame<Type, ValueType> const& game, QualitativeGameResultMinMax<Type> const& qualitativeResult, storm::dd::Add<Type, ValueType> const& initialStatesAdd, storm::dd::Bdd<Type> const& maybeStates, boost::optional<QuantitativeGameResult<Type, ValueType>> const& startInfo = boost::none) {
SymbolicQuantitativeGameResult<Type, ValueType> computeQuantitativeResult(Environment const& env, storm::OptimizationDirection player1Direction, storm::OptimizationDirection player2Direction, storm::abstraction::MenuGame<Type, ValueType> const& game, SymbolicQualitativeGameResultMinMax<Type> const& qualitativeResult, storm::dd::Add<Type, ValueType> const& initialStatesAdd, storm::dd::Bdd<Type> const& maybeStates, boost::optional<SymbolicQuantitativeGameResult<Type, ValueType>> const& startInfo = boost::none) {
bool min = player2Direction == storm::OptimizationDirection::Minimize;
QuantitativeGameResult<Type, ValueType> result;
SymbolicQuantitativeGameResult<Type, ValueType> result;
// We fix the strategies. That is, we take the decisions of the strategies obtained in the qualitiative
// preprocessing if possible.
@ -355,9 +358,11 @@ namespace storm {
storm::dd::Bdd<Type> globalTargetStates = abstractor->getStates(targetStateExpression);
// Enter the main-loop of abstraction refinement.
boost::optional<QualitativeGameResultMinMax<Type>> previousQualitativeResult = boost::none;
boost::optional<QuantitativeGameResult<Type, ValueType>> previousMinQuantitativeResult = boost::none;
for (uint_fast64_t iterations = 0; iterations < 10000; ++iterations) {
boost::optional<SymbolicQualitativeGameResultMinMax<Type>> previousSymbolicQualitativeResult = boost::none;
boost::optional<SymbolicQuantitativeGameResult<Type, ValueType>> previousSymbolicMinQuantitativeResult = boost::none;
boost::optional<ExplicitQualitativeGameResultMinMax> previousExplicitQualitativeResult = boost::none;
// boost::optional<ExplicitQuantitativeGameResult<ValueType>> previousExplicitMinQuantitativeResult = boost::none;
for (uint_fast64_t iterations = 0; iterations < maximalNumberOfAbstractions; ++iterations) {
auto iterationStart = std::chrono::high_resolution_clock::now();
STORM_LOG_TRACE("Starting iteration " << iterations << ".");
@ -384,9 +389,9 @@ namespace storm {
std::unique_ptr<CheckResult> result;
if (solveMode == storm::settings::modules::AbstractionSettings::SolveMode::Dd) {
result = performSymbolicAbstractionSolutionStep(env, checkTask, game, player1Direction, initialStates, constraintStates, targetStates, refiner, previousQualitativeResult, previousMinQuantitativeResult);
result = performSymbolicAbstractionSolutionStep(env, checkTask, game, player1Direction, initialStates, constraintStates, targetStates, refiner, previousSymbolicQualitativeResult, previousSymbolicMinQuantitativeResult);
} else {
result = performExplicitAbstractionSolutionStep(env, checkTask, game, player1Direction, initialStates, constraintStates, targetStates, refiner);
result = performExplicitAbstractionSolutionStep(env, checkTask, game, player1Direction, initialStates, constraintStates, targetStates, refiner, previousExplicitQualitativeResult);
}
if (result) {
@ -397,13 +402,14 @@ namespace storm {
auto iterationEnd = std::chrono::high_resolution_clock::now();
STORM_LOG_DEBUG("Iteration " << iterations << " took " << std::chrono::duration_cast<std::chrono::milliseconds>(iterationEnd - iterationStart).count() << "ms.");
}
STORM_LOG_ASSERT(false, "This point must not be reached.");
// If this point is reached, we have given up on abstraction.
STORM_LOG_WARN("Could not derive result, maximal number of abstractions exceeded.");
return nullptr;
}
template<storm::dd::DdType Type, typename ModelType>
std::unique_ptr<CheckResult> GameBasedMdpModelChecker<Type, ModelType>::performSymbolicAbstractionSolutionStep(Environment const& env, CheckTask<storm::logic::Formula> const& checkTask, storm::abstraction::MenuGame<Type, ValueType> const& game, storm::OptimizationDirection player1Direction, storm::dd::Bdd<Type> const& initialStates, storm::dd::Bdd<Type> const& constraintStates, storm::dd::Bdd<Type> const& targetStates, storm::abstraction::MenuGameRefiner<Type, ValueType> const& refiner, boost::optional<QualitativeGameResultMinMax<Type>>& previousQualitativeResult, boost::optional<QuantitativeGameResult<Type, ValueType>>& previousMinQuantitativeResult) {
std::unique_ptr<CheckResult> GameBasedMdpModelChecker<Type, ModelType>::performSymbolicAbstractionSolutionStep(Environment const& env, CheckTask<storm::logic::Formula> const& checkTask, storm::abstraction::MenuGame<Type, ValueType> const& game, storm::OptimizationDirection player1Direction, storm::dd::Bdd<Type> const& initialStates, storm::dd::Bdd<Type> const& constraintStates, storm::dd::Bdd<Type> const& targetStates, storm::abstraction::MenuGameRefiner<Type, ValueType> const& refiner, boost::optional<SymbolicQualitativeGameResultMinMax<Type>>& previousQualitativeResult, boost::optional<SymbolicQuantitativeGameResult<Type, ValueType>>& previousMinQuantitativeResult) {
STORM_LOG_TRACE("Using dd-based solving.");
@ -412,7 +418,7 @@ namespace storm {
// (1) compute all states with probability 0/1 wrt. to the two different player 2 goals (min/max).
auto qualitativeStart = std::chrono::high_resolution_clock::now();
QualitativeGameResultMinMax<Type> qualitativeResult = computeProb01States(previousQualitativeResult, game, player1Direction, transitionMatrixBdd, constraintStates, targetStates);
SymbolicQualitativeGameResultMinMax<Type> qualitativeResult = computeProb01States(previousQualitativeResult, game, player1Direction, transitionMatrixBdd, constraintStates, targetStates);
std::unique_ptr<CheckResult> result = checkForResultAfterQualitativeCheck<Type, ValueType>(checkTask, initialStates, qualitativeResult);
if (result) {
return result;
@ -456,7 +462,7 @@ namespace storm {
auto quantitativeStart = std::chrono::high_resolution_clock::now();
QuantitativeGameResultMinMax<Type, ValueType> quantitativeResult;
SymbolicQuantitativeGameResultMinMax<Type, ValueType> quantitativeResult;
// (7) Solve the min values and check whether we can give the answer already.
quantitativeResult.min = computeQuantitativeResult(env, player1Direction, storm::OptimizationDirection::Minimize, game, qualitativeResult, initialStatesAdd, maybeMin, reuseQuantitativeResults ? previousMinQuantitativeResult : boost::none);
@ -502,39 +508,64 @@ namespace storm {
}
template<storm::dd::DdType Type, typename ModelType>
std::unique_ptr<CheckResult> GameBasedMdpModelChecker<Type, ModelType>::performExplicitAbstractionSolutionStep(Environment const& env, CheckTask<storm::logic::Formula> const& checkTask, storm::abstraction::MenuGame<Type, ValueType> const& game, storm::OptimizationDirection player1Direction, storm::dd::Bdd<Type> const& initialStatesBdd, storm::dd::Bdd<Type> const& constraintStatesBdd, storm::dd::Bdd<Type> const& targetStatesBdd, storm::abstraction::MenuGameRefiner<Type, ValueType> const& refiner) {
STORM_LOG_TRACE("Using hybrid solving.");
std::unique_ptr<CheckResult> GameBasedMdpModelChecker<Type, ModelType>::performExplicitAbstractionSolutionStep(Environment const& env, CheckTask<storm::logic::Formula> const& checkTask, storm::abstraction::MenuGame<Type, ValueType> const& game, storm::OptimizationDirection player1Direction, storm::dd::Bdd<Type> const& initialStatesBdd, storm::dd::Bdd<Type> const& constraintStatesBdd, storm::dd::Bdd<Type> const& targetStatesBdd, storm::abstraction::MenuGameRefiner<Type, ValueType> const& refiner, boost::optional<ExplicitQualitativeGameResultMinMax>& previousQualitativeResult) {
STORM_LOG_TRACE("Using sparse solving.");
// (0) Start by transforming the necessary symbolic elements to explicit ones.
storm::dd::Odd odd = game.getReachableStates().createOdd();
std::pair<storm::storage::SparseMatrix<ValueType>, std::vector<uint64_t>> transitionMatrixAndLabeling = game.getTransitionMatrix().toLabeledMatrix(game.getRowVariables(), game.getColumnVariables(), game.getNondeterminismVariables(), game.getPlayer1Variables(), odd, odd, true);
auto const& transitionMatrix = transitionMatrixAndLabeling.first;
auto const& labeling = transitionMatrixAndLabeling.second;
auto& transitionMatrix = transitionMatrixAndLabeling.first;
auto& labeling = transitionMatrixAndLabeling.second;
// Create the player 2 row grouping from the labeling.
std::vector<uint64_t> tmpPlayer2RowGrouping;
for (uint64_t player1State = 0; player1State < transitionMatrix.getRowGroupCount(); ++player1State) {
uint64_t lastLabel = std::numeric_limits<uint64_t>::max();
for (uint64_t row = transitionMatrix.getRowGroupIndices()[player1State]; row < transitionMatrix.getRowGroupIndices()[player1State + 1]; ++row) {
if (labeling[row] != lastLabel) {
tmpPlayer2RowGrouping.emplace_back(row);
lastLabel = labeling[row];
}
}
}
tmpPlayer2RowGrouping.emplace_back(labeling.size());
std::vector<uint64_t> player1RowGrouping = transitionMatrix.swapRowGroupIndices(std::move(tmpPlayer2RowGrouping));
auto const& player2RowGrouping = transitionMatrix.getRowGroupIndices();
// Create the backward transitions for both players.
storm::storage::SparseMatrix<ValueType> player1BackwardTransitions = transitionMatrix.transpose(true);
std::vector<uint64_t> player2BackwardTransitions(transitionMatrix.getRowGroupCount());
uint64_t player2State = 0;
for (uint64_t player1State = 0; player1State < player2RowGrouping.size() - 1; ++player1State) {
while (player1RowGrouping[player1State + 1] > player2RowGrouping[player2State]) {
player2BackwardTransitions[player2State] = player1State;
++player2State;
}
}
storm::storage::BitVector initialStates = initialStatesBdd.toVector(odd);
storm::storage::BitVector constraintStates = constraintStatesBdd.toVector(odd);
storm::storage::BitVector targetStates = targetStatesBdd.toVector(odd);
// (1) compute all states with probability 0/1 wrt. to the two different player 2 goals (min/max).
auto qualitativeStart = std::chrono::high_resolution_clock::now();
ExplicitQualitativeGameResultMinMax qualitativeResult = computeProb01States(game, player1Direction, transitionMatrix, constraintStates, targetStates);
std::unique_ptr<CheckResult> result = checkForResultAfterQualitativeCheck<Type, ValueType>(checkTask, initialStates, qualitativeResult);
if (result) {
return result;
}
ExplicitQualitativeGameResultMinMax qualitativeResult = computeProb01States(previousQualitativeResult, player1Direction, transitionMatrix, player1RowGrouping, player1BackwardTransitions, player2BackwardTransitions, constraintStates, targetStates);
// std::unique_ptr<CheckResult> result = checkForResultAfterQualitativeCheck<Type, ValueType>(checkTask, initialStates, qualitativeResult);
// if (result) {
// return result;
// }
auto qualitativeEnd = std::chrono::high_resolution_clock::now();
STORM_LOG_DEBUG("Qualitative computation completed in " << std::chrono::duration_cast<std::chrono::milliseconds>(qualitativeEnd - qualitativeStart).count() << "ms.");
// std::cout << transitionMatrix << std::endl;
// std::cout << labeling.size() << std::endl;
// std::cout << initialStates << std::endl;
// std::cout << constraintStates << std::endl;
// std::cout << targetStates << std::endl;
std::cout << transitionMatrix << std::endl;
std::cout << labeling.size() << std::endl;
std::cout << initialStates << std::endl;
std::cout << constraintStates << std::endl;
std::cout << targetStates << std::endl;
exit(-1);
return nullptr;
}
template<storm::dd::DdType Type, typename ModelType>
@ -573,7 +604,7 @@ namespace storm {
}
template<storm::dd::DdType Type>
bool checkQualitativeStrategies(bool prob0, QualitativeGameResult<Type> const& result, storm::dd::Bdd<Type> const& targetStates) {
bool checkQualitativeStrategies(bool prob0, SymbolicQualitativeGameResult<Type> const& result, storm::dd::Bdd<Type> const& targetStates) {
if (prob0) {
STORM_LOG_ASSERT(result.hasPlayer1Strategy() && (result.getPlayer1States().isZero() || !result.getPlayer1Strategy().isZero()), "Unable to proceed without strategy.");
} else {
@ -586,7 +617,7 @@ namespace storm {
}
template<storm::dd::DdType Type>
bool checkQualitativeStrategies(QualitativeGameResultMinMax<Type> const& qualitativeResult, storm::dd::Bdd<Type> const& targetStates) {
bool checkQualitativeStrategies(SymbolicQualitativeGameResultMinMax<Type> const& qualitativeResult, storm::dd::Bdd<Type> const& targetStates) {
bool result = true;
result &= checkQualitativeStrategies(true, qualitativeResult.prob0Min, targetStates);
result &= checkQualitativeStrategies(false, qualitativeResult.prob1Min, targetStates);
@ -596,9 +627,72 @@ namespace storm {
}
template<storm::dd::DdType Type, typename ModelType>
QualitativeGameResultMinMax<Type> GameBasedMdpModelChecker<Type, ModelType>::computeProb01States(boost::optional<QualitativeGameResultMinMax<Type>> const& previousQualitativeResult, storm::abstraction::MenuGame<Type, ValueType> const& game, storm::OptimizationDirection player1Direction, storm::dd::Bdd<Type> const& transitionMatrixBdd, storm::dd::Bdd<Type> const& constraintStates, storm::dd::Bdd<Type> const& targetStates) {
ExplicitQualitativeGameResultMinMax GameBasedMdpModelChecker<Type, ModelType>::computeProb01States(boost::optional<ExplicitQualitativeGameResultMinMax> const& previousQualitativeResult, storm::OptimizationDirection player1Direction, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<uint64_t> const& player1RowGrouping, storm::storage::SparseMatrix<ValueType> const& player1BackwardTransitions, std::vector<uint64_t> const& player2BackwardTransitions, storm::storage::BitVector const& constraintStates, storm::storage::BitVector const& targetStates) {
ExplicitQualitativeGameResultMinMax result;
// if (reuseQualitativeResults) {
// // Depending on the player 1 direction, we choose a different order of operations.
// if (player1Direction == storm::OptimizationDirection::Minimize) {
// // (1) min/min: compute prob0 using the game functions
// result.prob0Min = storm::utility::graph::performProb0(game, transitionMatrixBdd, constraintStates, targetStates, player1Direction, storm::OptimizationDirection::Minimize, true, true);
//
// // (2) min/min: compute prob1 using the MDP functions
// storm::dd::Bdd<Type> candidates = game.getReachableStates() && !result.prob0Min.player1States;
// storm::dd::Bdd<Type> prob1MinMinMdp = storm::utility::graph::performProb1A(game, transitionMatrixBdd, previousQualitativeResult ? previousQualitativeResult.get().prob1Min.player1States : targetStates, candidates);
//
// // (3) min/min: compute prob1 using the game functions
// result.prob1Min = storm::utility::graph::performProb1(game, transitionMatrixBdd, constraintStates, targetStates, player1Direction, storm::OptimizationDirection::Minimize, true, true, boost::make_optional(prob1MinMinMdp));
//
// // (4) min/max: compute prob 0 using the game functions
// result.prob0Max = storm::utility::graph::performProb0(game, transitionMatrixBdd, constraintStates, targetStates, player1Direction, storm::OptimizationDirection::Maximize, true, true);
//
// // (5) min/max: compute prob 1 using the game functions
// // We know that only previous prob1 states can now be prob 1 states again, because the upper bound
// // values can only decrease over iterations.
// boost::optional<storm::dd::Bdd<Type>> prob1Candidates;
// if (previousQualitativeResult) {
// prob1Candidates = previousQualitativeResult.get().prob1Max.player1States;
// }
// result.prob1Max = storm::utility::graph::performProb1(game, transitionMatrixBdd, constraintStates, targetStates, player1Direction, storm::OptimizationDirection::Maximize, true, true, prob1Candidates);
// } else {
// // (1) max/max: compute prob0 using the game functions
// result.prob0Max = storm::utility::graph::performProb0(game, transitionMatrixBdd, constraintStates, targetStates, player1Direction, storm::OptimizationDirection::Maximize, true, true);
//
// // (2) max/max: compute prob1 using the MDP functions, reuse prob1 states of last iteration to constrain the candidate states.
// storm::dd::Bdd<Type> candidates = game.getReachableStates() && !result.prob0Max.player1States;
// if (previousQualitativeResult) {
// candidates &= previousQualitativeResult.get().prob1Max.player1States;
// }
// storm::dd::Bdd<Type> prob1MaxMaxMdp = storm::utility::graph::performProb1E(game, transitionMatrixBdd, constraintStates, targetStates, candidates);
//
// // (3) max/max: compute prob1 using the game functions, reuse prob1 states from the MDP precomputation
// result.prob1Max = storm::utility::graph::performProb1(game, transitionMatrixBdd, constraintStates, targetStates, player1Direction, storm::OptimizationDirection::Maximize, true, true, boost::make_optional(prob1MaxMaxMdp));
//
// // (4) max/min: compute prob0 using the game functions
// result.prob0Min = storm::utility::graph::performProb0(game, transitionMatrixBdd, constraintStates, targetStates, player1Direction, storm::OptimizationDirection::Minimize, true, true);
//
// // (5) max/min: compute prob1 using the game functions, use prob1 from max/max as the candidate set
// result.prob1Min = storm::utility::graph::performProb1(game, transitionMatrixBdd, constraintStates, targetStates, player1Direction, storm::OptimizationDirection::Minimize, true, true, boost::make_optional(prob1MaxMaxMdp));
// }
// } else {
result.prob0Min = storm::utility::graph::performProb0(transitionMatrix, player1RowGrouping, player1BackwardTransitions, player2BackwardTransitions, constraintStates, targetStates, player1Direction, storm::OptimizationDirection::Minimize, true, true);
result.prob1Min = storm::utility::graph::performProb1(transitionMatrix, player1RowGrouping, player1BackwardTransitions, player2BackwardTransitions, constraintStates, targetStates, player1Direction, storm::OptimizationDirection::Minimize, true, true);
result.prob0Max = storm::utility::graph::performProb0(transitionMatrix, player1RowGrouping, player1BackwardTransitions, player2BackwardTransitions, constraintStates, targetStates, player1Direction, storm::OptimizationDirection::Maximize, true, true);
result.prob1Max = storm::utility::graph::performProb1(transitionMatrix, player1RowGrouping, player1BackwardTransitions, player2BackwardTransitions, constraintStates, targetStates, player1Direction, storm::OptimizationDirection::Maximize, true, true);
// }
STORM_LOG_TRACE("Qualitative precomputation completed.");
STORM_LOG_TRACE("[" << player1Direction << ", " << storm::OptimizationDirection::Minimize << "]: " << result.prob0Min.player1States.getNumberOfSetBits()<< " 'no', " << result.prob1Min.player1States.getNumberOfSetBits() << " 'yes'.");
STORM_LOG_TRACE("[" << player1Direction << ", " << storm::OptimizationDirection::Maximize << "]: " << result.prob0Max.player1States.getNumberOfSetBits() << " 'no', " << result.prob1Max.player1States.getNumberOfSetBits() << " 'yes'.");
return result;
}
template<storm::dd::DdType Type, typename ModelType>
SymbolicQualitativeGameResultMinMax<Type> GameBasedMdpModelChecker<Type, ModelType>::computeProb01States(boost::optional<SymbolicQualitativeGameResultMinMax<Type>> const& previousQualitativeResult, storm::abstraction::MenuGame<Type, ValueType> const& game, storm::OptimizationDirection player1Direction, storm::dd::Bdd<Type> const& transitionMatrixBdd, storm::dd::Bdd<Type> const& constraintStates, storm::dd::Bdd<Type> const& targetStates) {
QualitativeGameResultMinMax<Type> result;
SymbolicQualitativeGameResultMinMax<Type> result;
if (reuseQualitativeResults) {
// Depending on the player 1 direction, we choose a different order of operations.

28
src/storm/modelchecker/abstraction/GameBasedMdpModelChecker.h

@ -9,8 +9,8 @@
#include "storm/storage/SymbolicModelDescription.h"
#include "storm/abstraction/QualitativeGameResult.h"
#include "storm/abstraction/QualitativeGameResultMinMax.h"
#include "storm/abstraction/SymbolicQualitativeGameResult.h"
#include "storm/abstraction/SymbolicQualitativeGameResultMinMax.h"
#include "storm/logic/Bound.h"
@ -32,16 +32,21 @@ namespace storm {
class MenuGameRefiner;
template<storm::dd::DdType Type>
class QualitativeGameResultMinMax;
class SymbolicQualitativeGameResultMinMax;
template<storm::dd::DdType Type, typename ValueType>
struct QuantitativeGameResult;
class SymbolicQuantitativeGameResult;
class ExplicitQualitativeGameResult;
class ExplicitQualitativeGameResultMinMax;
}
namespace modelchecker {
using storm::abstraction::QualitativeGameResult;
using storm::abstraction::QualitativeGameResultMinMax;
using storm::abstraction::SymbolicQualitativeGameResult;
using storm::abstraction::SymbolicQualitativeGameResultMinMax;
using storm::abstraction::ExplicitQualitativeGameResult;
using storm::abstraction::ExplicitQualitativeGameResultMinMax;
template<storm::dd::DdType Type, typename ModelType>
class GameBasedMdpModelChecker : public AbstractModelChecker<ModelType> {
@ -68,8 +73,8 @@ namespace storm {
*/
std::unique_ptr<CheckResult> performGameBasedAbstractionRefinement(Environment const& env, CheckTask<storm::logic::Formula> const& checkTask, storm::expressions::Expression const& constraintExpression, storm::expressions::Expression const& targetStateExpression);
std::unique_ptr<CheckResult> performSymbolicAbstractionSolutionStep(Environment const& env, CheckTask<storm::logic::Formula> const& checkTask, storm::abstraction::MenuGame<Type, ValueType> const& game, storm::OptimizationDirection player1Direction, storm::dd::Bdd<Type> const& initialStates, storm::dd::Bdd<Type> const& constraintStates, storm::dd::Bdd<Type> const& targetStates, storm::abstraction::MenuGameRefiner<Type, ValueType> const& refiner, boost::optional<QualitativeGameResultMinMax<Type>>& previousQualitativeResult, boost::optional<abstraction::QuantitativeGameResult<Type, ValueType>>& previousMinQuantitativeResult);
std::unique_ptr<CheckResult> performExplicitAbstractionSolutionStep(Environment const& env, CheckTask<storm::logic::Formula> const& checkTask, storm::abstraction::MenuGame<Type, ValueType> const& game, storm::OptimizationDirection player1Direction, storm::dd::Bdd<Type> const& initialStates, storm::dd::Bdd<Type> const& constraintStates, storm::dd::Bdd<Type> const& targetStates, storm::abstraction::MenuGameRefiner<Type, ValueType> const& refiner);
std::unique_ptr<CheckResult> performSymbolicAbstractionSolutionStep(Environment const& env, CheckTask<storm::logic::Formula> const& checkTask, storm::abstraction::MenuGame<Type, ValueType> const& game, storm::OptimizationDirection player1Direction, storm::dd::Bdd<Type> const& initialStates, storm::dd::Bdd<Type> const& constraintStates, storm::dd::Bdd<Type> const& targetStates, storm::abstraction::MenuGameRefiner<Type, ValueType> const& refiner, boost::optional<SymbolicQualitativeGameResultMinMax<Type>>& previousQualitativeResult, boost::optional<abstraction::SymbolicQuantitativeGameResult<Type, ValueType>>& previousMinQuantitativeResult);
std::unique_ptr<CheckResult> performExplicitAbstractionSolutionStep(Environment const& env, CheckTask<storm::logic::Formula> const& checkTask, storm::abstraction::MenuGame<Type, ValueType> const& game, storm::OptimizationDirection player1Direction, storm::dd::Bdd<Type> const& initialStates, storm::dd::Bdd<Type> const& constraintStates, storm::dd::Bdd<Type> const& targetStates, storm::abstraction::MenuGameRefiner<Type, ValueType> const& refiner, boost::optional<ExplicitQualitativeGameResultMinMax>& previousQualitativeResult);
/*!
* Retrieves the initial predicates for the abstraction.
@ -85,7 +90,9 @@ namespace storm {
* Performs a qualitative check on the the given game to compute the (player 1) states that have probability
* 0 or 1, respectively, to reach a target state and only visiting constraint states before.
*/
QualitativeGameResultMinMax<Type> computeProb01States(boost::optional<QualitativeGameResultMinMax<Type>> const& previousQualitativeResult, storm::abstraction::MenuGame<Type, ValueType> const& game, storm::OptimizationDirection player1Direction, storm::dd::Bdd<Type> const& transitionMatrixBdd, storm::dd::Bdd<Type> const& constraintStates, storm::dd::Bdd<Type> const& targetStates);
SymbolicQualitativeGameResultMinMax<Type> computeProb01States(boost::optional<SymbolicQualitativeGameResultMinMax<Type>> const& previousQualitativeResult, storm::abstraction::MenuGame<Type, ValueType> const& game, storm::OptimizationDirection player1Direction, storm::dd::Bdd<Type> const& transitionMatrixBdd, storm::dd::Bdd<Type> const& constraintStates, storm::dd::Bdd<Type> const& targetStates);
ExplicitQualitativeGameResultMinMax computeProb01States(boost::optional<ExplicitQualitativeGameResultMinMax> const& previousQualitativeResult, storm::OptimizationDirection player1Direction, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<uint64_t> const& player1RowGrouping, storm::storage::SparseMatrix<ValueType> const& player1BackwardTransitions, std::vector<uint64_t> const& player2BackwardTransitions, storm::storage::BitVector const& constraintStates, storm::storage::BitVector const& targetStates);
void printStatistics(storm::abstraction::MenuGameAbstractor<Type, ValueType> const& abstractor, storm::abstraction::MenuGame<Type, ValueType> const& game) const;
@ -110,6 +117,9 @@ namespace storm {
/// A flag indicating whether to reuse the quantitative results.
bool reuseQuantitativeResults;
/// The maximal number of abstractions to perform.
uint64_t maximalNumberOfAbstractions;
/// The mode selected for solving the abstraction.
storm::settings::modules::AbstractionSettings::SolveMode solveMode;
};

7
src/storm/settings/modules/AbstractionSettings.cpp

@ -23,6 +23,7 @@ namespace storm {
const std::string AbstractionSettings::reuseResultsOptionName = "reuse";
const std::string AbstractionSettings::restrictToRelevantStatesOptionName = "relevant";
const std::string AbstractionSettings::solveModeOptionName = "solve";
const std::string AbstractionSettings::maximalAbstractionOptionName = "maxabs";
AbstractionSettings::AbstractionSettings() : ModuleSettings(moduleName) {
std::vector<std::string> methods = {"games", "bisimulation", "bisim"};
@ -31,6 +32,8 @@ namespace storm {
.setDefaultValueString("bisim").build())
.build());
this->addOption(storm::settings::OptionBuilder(moduleName, maximalAbstractionOptionName, false, "The maximal number of abstraction to perform before solving is aborted.").addArgument(storm::settings::ArgumentBuilder::createUnsignedIntegerArgument("count", "The maximal abstraction count.").setDefaultValueUnsignedInteger(20000).build()).build());
std::vector<std::string> onOff = {"on", "off"};
this->addOption(storm::settings::OptionBuilder(moduleName, useDecompositionOptionName, true, "Sets whether to apply decomposition during the abstraction.")
@ -159,6 +162,10 @@ namespace storm {
return ReuseMode::All;
}
uint_fast64_t AbstractionSettings::getMaximalAbstractionCount() const {
return this->getOption(maximalAbstractionOptionName).getArgumentByName("count").getValueAsUnsignedInteger();
}
}
}
}

8
src/storm/settings/modules/AbstractionSettings.h

@ -109,6 +109,13 @@ namespace storm {
*/
SolveMode getSolveMode() const;
/*!
* Retrieves the maximal number of abstractions to perform until giving up on converging.
*
* @return The maximal abstraction count.
*/
uint_fast64_t getMaximalAbstractionCount() const;
const static std::string moduleName;
private:
@ -122,6 +129,7 @@ namespace storm {
const static std::string reuseResultsOptionName;
const static std::string restrictToRelevantStatesOptionName;
const static std::string solveModeOptionName;
const static std::string maximalAbstractionOptionName;
};
}

10
src/storm/storage/SparseMatrix.cpp

@ -603,6 +603,16 @@ namespace storm {
return rowGroupIndices.get();
}
template<typename ValueType>
std::vector<typename SparseMatrix<ValueType>::index_type> SparseMatrix<ValueType>::swapRowGroupIndices(std::vector<index_type>&& newRowGrouping) {
std::vector<index_type> result;
if (this->rowGroupIndices) {
result = std::move(rowGroupIndices.get());
rowGroupIndices = std::move(newRowGrouping);
}
return result;
}
template<typename ValueType>
void SparseMatrix<ValueType>::setRowGroupIndices(std::vector<index_type> const& newRowGroupIndices) {
trivialRowGrouping = false;

6
src/storm/storage/SparseMatrix.h

@ -579,6 +579,12 @@ namespace storm {
*/
std::vector<index_type> const& getRowGroupIndices() const;
/*!
* Swaps the grouping of rows of this matrix.
*
* @return The old grouping of rows of this matrix.
*/
std::vector<index_type> swapRowGroupIndices(std::vector<index_type>&& newRowGrouping);
/*!
* Sets the row grouping to the given one.

270
src/storm/utility/graph.cpp

@ -1083,6 +1083,145 @@ namespace storm {
return result;
}
template <typename ValueType>
ExplicitGameProb01Result performProb0(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<uint64_t> const& player1RowGrouping, storm::storage::SparseMatrix<ValueType> const& player1BackwardTransitions, std::vector<uint64_t> const& player2BackwardTransitions, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, storm::OptimizationDirection const& player1Strategy, storm::OptimizationDirection const& player2Strategy, bool producePlayer1Strategy, bool producePlayer2Strategy) {
ExplicitGameProb01Result result(psiStates, storm::storage::BitVector(transitionMatrix.getRowGroupCount()));
// Initialize the stack used for the DFS with the states
std::vector<uint_fast64_t> stack(psiStates.begin(), psiStates.end());
// Perform the actual DFS.
uint_fast64_t currentState;
while (!stack.empty()) {
currentState = stack.back();
stack.pop_back();
// Check which player 2 predecessors of the current player 1 state to add.
for (auto const& player2PredecessorEntry : player1BackwardTransitions.getRow(currentState)) {
uint64_t player2Predecessor = player2PredecessorEntry.getColumn();
if (!result.player2States.get(player2Predecessor)) {
bool addPlayer2State = false;
if (player2Strategy == OptimizationDirection::Minimize) {
bool allChoicesHavePlayer1State = true;
for (uint64_t row = transitionMatrix.getRowGroupIndices()[player2Predecessor]; row < transitionMatrix.getRowGroupIndices()[player2Predecessor + 1]; ++row) {
bool choiceHasPlayer1State = false;
for (auto const& entry : transitionMatrix.getRow(row)) {
if (result.player1States.get(entry.getColumn())) {
choiceHasPlayer1State = true;
break;
}
}
if (!choiceHasPlayer1State) {
allChoicesHavePlayer1State = false;
}
}
if (allChoicesHavePlayer1State) {
addPlayer2State = true;
}
} else {
addPlayer2State = true;
}
if (addPlayer2State) {
result.player2States.set(player2Predecessor);
// Now check whether adding the player 2 state changes something with respect to the
// (single) player 1 predecessor.
uint64_t player1Predecessor = player2BackwardTransitions[player2Predecessor];
if (!result.player1States.get(player1Predecessor)) {
bool addPlayer1State = false;
if (player1Strategy == OptimizationDirection::Minimize) {
bool allPlayer2Successors = true;
for (uint64_t player2State = player1RowGrouping[player1Predecessor]; player2State < player1RowGrouping[player1Predecessor + 1]; ++player2State) {
if (!result.player2States.get(player2State)) {
allPlayer2Successors = false;
break;
}
}
if (allPlayer2Successors) {
addPlayer1State = true;
}
} else {
addPlayer1State = true;
}
if (addPlayer1State) {
result.player1States.set(player1Predecessor);
stack.emplace_back(player1Predecessor);
}
}
}
}
}
}
// Since we have determined the complements of the desired sets, we need to complement it now.
result.player1States.complement();
result.player2States.complement();
// Generate player 1 strategy if required.
if (producePlayer1Strategy) {
result.player1Strategy = std::vector<uint64_t>(result.player1States.size());
for (auto player1State : result.player1States) {
if (player1Strategy == storm::OptimizationDirection::Minimize) {
// At least one player 2 successor is a state with probability 0, find it.
bool foundProb0Successor = false;
uint64_t player2State;
for (player2State = player1RowGrouping[player1State]; player2State < player1RowGrouping[player1State + 1]; ++player2State) {
if (result.player2States.get(player2State)) {
result.player1Strategy.get()[player1State] = player2State - player1RowGrouping[player1State];
foundProb0Successor = true;
break;
}
}
STORM_LOG_ASSERT(foundProb0Successor, "Expected at least one state 2 successor with probability 0.");
result.player1Strategy.get()[player1State] = player2State - player1RowGrouping[player1State];
} else {
// Since all player 2 successors are states with probability 0, just pick any.
result.player1Strategy.get()[player1State] = 0;
}
}
}
// Generate player 2 strategy if required.
if (producePlayer2Strategy) {
result.player2Strategy = std::vector<uint64_t>(result.player2States.size());
for (auto player2State : result.player2States) {
if (player2Strategy == storm::OptimizationDirection::Minimize) {
// At least one distribution only has successors with probability 0, find it.
bool foundProb0SuccessorDistribution = false;
uint64_t row;
for (row = transitionMatrix.getRowGroupIndices()[player2State]; row < transitionMatrix.getRowGroupIndices()[player2State + 1]; ++row) {
bool distributionHasOnlyProb0Successors = true;
for (auto const& player1SuccessorEntry : transitionMatrix.getRow(row)) {
if (!result.player1States.get(player1SuccessorEntry.getColumn())) {
distributionHasOnlyProb0Successors = false;
break;
}
}
if (distributionHasOnlyProb0Successors) {
foundProb0SuccessorDistribution = true;
break;
}
}
STORM_LOG_ASSERT(foundProb0SuccessorDistribution, "Expected at least one distribution with only successors with probability 0.");
result.player2Strategy.get()[player2State] = row - transitionMatrix.getRowGroupIndices()[player2State];
} else {
// Since all player 1 successors are states with probability 0, just pick any.
result.player2Strategy.get()[player2State] = 0;
}
}
}
return result;
}
template <storm::dd::DdType Type, typename ValueType>
SymbolicGameProb01Result<Type> performProb0(storm::models::symbolic::StochasticTwoPlayerGame<Type, ValueType> const& model, storm::dd::Bdd<Type> const& transitionMatrix, storm::dd::Bdd<Type> const& phiStates, storm::dd::Bdd<Type> const& psiStates, storm::OptimizationDirection const& player1Strategy, storm::OptimizationDirection const& player2Strategy, bool producePlayer1Strategy, bool producePlayer2Strategy) {
@ -1150,6 +1289,133 @@ namespace storm {
return SymbolicGameProb01Result<Type>(player1States, player2States, player1StrategyBdd, player2StrategyBdd);
}
template <typename ValueType>
ExplicitGameProb01Result performProb1(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<uint64_t> const& player1RowGrouping, storm::storage::SparseMatrix<ValueType> const& player1BackwardTransitions, std::vector<uint64_t> const& player2BackwardTransitions, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, storm::OptimizationDirection const& player1Strategy, storm::OptimizationDirection const& player2Strategy, bool producePlayer1Strategy, bool producePlayer2Strategy, boost::optional<storm::storage::BitVector> const& player1Candidates) {
// During the execution, the two state sets in the result hold the potential player 1/2 states.
ExplicitGameProb01Result result;
if (player1Candidates) {
result = ExplicitGameProb01Result(player1Candidates.get(), storm::storage::BitVector(transitionMatrix.getRowGroupCount()));
} else {
result = ExplicitGameProb01Result(storm::storage::BitVector(phiStates.size(), true), storm::storage::BitVector(transitionMatrix.getRowGroupCount()));
}
// A flag that governs whether strategies are produced in the current iteration.
bool produceStrategiesInIteration = false;
// Initialize the stack used for the DFS with the states
std::vector<uint_fast64_t> stack;
bool maybeStatesDone = false;
uint_fast64_t maybeStateIterations = 0;
while (!maybeStatesDone || produceStrategiesInIteration) {
storm::storage::BitVector player1Solution = psiStates;
storm::storage::BitVector player2Solution(result.player2States.size());
stack.clear();
stack.insert(stack.end(), psiStates.begin(), psiStates.end());
// If we are to produce strategies in this iteration, we prepare some storage.
if (produceStrategiesInIteration) {
if (producePlayer1Strategy) {
result.player1Strategy = std::vector<uint64_t>(result.player1States.size());
}
if (producePlayer2Strategy) {
result.player2Strategy = std::vector<uint64_t>(result.player2States.size());
}
}
// Perform the actual DFS.
uint_fast64_t currentState;
while (!stack.empty()) {
currentState = stack.back();
stack.pop_back();
for (auto player2PredecessorEntry : player1BackwardTransitions.getRow(currentState)) {
uint64_t player2Predecessor = player2PredecessorEntry.getColumn();
if (!player2Solution.get(player2PredecessorEntry.getColumn())) {
bool addPlayer2State = player2Strategy == storm::OptimizationDirection::Minimize ? true : false;
uint64_t validChoice = 0;
for (uint64_t row = transitionMatrix.getRowGroupIndices()[player2Predecessor]; row < transitionMatrix.getRowGroupIndices()[player2Predecessor + 1]; ++row) {
bool choiceHasSolutionSuccessor = false;
bool choiceStaysInMaybeStates = true;
for (auto const& entry : transitionMatrix.getRow(row)) {
if (player1Solution.get(entry.getColumn())) {
choiceHasSolutionSuccessor = true;
}
if (!result.player1States.get(entry.getColumn())) {
choiceStaysInMaybeStates = false;
break;
}
}
if (choiceHasSolutionSuccessor && choiceStaysInMaybeStates) {
if (player2Strategy == storm::OptimizationDirection::Maximize) {
validChoice = row - transitionMatrix.getRowGroupIndices()[player2Predecessor];
addPlayer2State = true;
break;
}
} else if (player2Strategy == storm::OptimizationDirection::Minimize) {
addPlayer2State = false;
break;
}
}
if (addPlayer2State) {
player2Solution.set(player2Predecessor);
if (produceStrategiesInIteration && producePlayer2Strategy) {
result.player2Strategy.get()[player2Predecessor] = validChoice;
}
// Check whether the addition of the player 2 state changes the state of the (single)
// player 1 predecessor.
uint64_t player1Predecessor = player2BackwardTransitions[player2Predecessor];
if (!player1Solution.get(player1Predecessor)) {
bool addPlayer1State = player1Strategy == storm::OptimizationDirection::Minimize ? true : false;
validChoice = 0;
for (uint64_t player2Successor = player1RowGrouping[player1Predecessor]; player2Successor < player1RowGrouping[player1Predecessor + 1]; ++player2Successor) {
if (player2Solution.get(player2Successor)) {
if (player1Strategy == storm::OptimizationDirection::Maximize) {
validChoice = player2Successor - player1RowGrouping[player1Predecessor];
addPlayer1State = true;
break;
}
} else if (player1Strategy == storm::OptimizationDirection::Minimize) {
addPlayer1State = false;
break;
}
}
if (addPlayer1State) {
player1Solution.set(player1Predecessor);
if (produceStrategiesInIteration && producePlayer1Strategy) {
result.player1Strategy.get()[player1Predecessor] = validChoice;
}
stack.emplace_back();
}
}
}
}
}
}
if (result.player1States == player1Solution) {
maybeStatesDone = true;
result.player2States = player2Solution;
} else {
result.player1States = player1Solution;
result.player2States = player2Solution;
}
++maybeStateIterations;
}
return result;
}
template <storm::dd::DdType Type, typename ValueType>
SymbolicGameProb01Result<Type> performProb1(storm::models::symbolic::StochasticTwoPlayerGame<Type, ValueType> const& model, storm::dd::Bdd<Type> const& transitionMatrix, storm::dd::Bdd<Type> const& phiStates, storm::dd::Bdd<Type> const& psiStates, storm::OptimizationDirection const& player1Strategy, storm::OptimizationDirection const& player2Strategy, bool producePlayer1Strategy, bool producePlayer2Strategy, boost::optional<storm::dd::Bdd<Type>> const& player1Candidates) {
@ -1424,6 +1690,10 @@ namespace storm {
template std::pair<storm::storage::BitVector, storm::storage::BitVector> performProb01Min(storm::models::sparse::NondeterministicModel<double, storm::models::sparse::StandardRewardModel<storm::Interval>> const& model, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates);
#endif
template ExplicitGameProb01Result performProb0(storm::storage::SparseMatrix<double> const& transitionMatrix, std::vector<uint64_t> const& player1RowGrouping, storm::storage::SparseMatrix<double> const& player1BackwardTransitions, std::vector<uint64_t> const& player2BackwardTransitions, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, storm::OptimizationDirection const& player1Strategy, storm::OptimizationDirection const& player2Strategy, bool producePlayer1Strategy, bool producePlayer2Strategy);
template ExplicitGameProb01Result performProb1(storm::storage::SparseMatrix<double> const& transitionMatrix, std::vector<uint64_t> const& player1RowGrouping, storm::storage::SparseMatrix<double> const& player1BackwardTransitions, std::vector<uint64_t> const& player2BackwardTransitions, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, storm::OptimizationDirection const& player1Strategy, storm::OptimizationDirection const& player2Strategy, bool producePlayer1Strategy, bool producePlayer2Strategy, boost::optional<storm::storage::BitVector> const& player1Candidates);
template std::vector<uint_fast64_t> getTopologicalSort(storm::storage::SparseMatrix<double> const& matrix) ;
// Instantiations for storm::RationalNumber.

79
src/storm/utility/graph.h

@ -615,6 +615,85 @@ namespace storm {
template <storm::dd::DdType Type, typename ValueType>
SymbolicGameProb01Result<Type> performProb1(storm::models::symbolic::StochasticTwoPlayerGame<Type, ValueType> const& model, storm::dd::Bdd<Type> const& transitionMatrix, storm::dd::Bdd<Type> const& phiStates, storm::dd::Bdd<Type> const& psiStates, storm::OptimizationDirection const& player1Strategy, storm::OptimizationDirection const& player2Strategy, bool producePlayer1Strategy = false, bool producePlayer2Strategy = false, boost::optional<storm::dd::Bdd<Type>> const& player1Candidates = boost::none);
struct ExplicitGameProb01Result {
ExplicitGameProb01Result() = default;
ExplicitGameProb01Result(uint64_t numberOfPlayer1States, uint64_t numberOfPlayer2States) : player1States(numberOfPlayer1States), player2States(numberOfPlayer2States) {
// Intentionally left empty.
}
ExplicitGameProb01Result(storm::storage::BitVector const& player1States, storm::storage::BitVector const& player2States, boost::optional<std::vector<uint64_t>> const& player1Strategy = boost::none, boost::optional<std::vector<uint64_t>> const& player2Strategy = boost::none) : player1States(player1States), player2States(player2States), player1Strategy(player1Strategy), player2Strategy(player2Strategy) {
// Intentionally left empty.
}
bool hasPlayer1Strategy() const {
return static_cast<bool>(player1Strategy);
}
std::vector<uint64_t> const& getPlayer1Strategy() const {
return player1Strategy.get();
}
boost::optional<std::vector<uint64_t>> const& getOptionalPlayer1Strategy() {
return player1Strategy;
}
bool hasPlayer2Strategy() const {
return static_cast<bool>(player2Strategy);
}
std::vector<uint64_t> const& getPlayer2Strategy() const {
return player2Strategy.get();
}
boost::optional<std::vector<uint64_t>> const& getOptionalPlayer2Strategy() {
return player2Strategy;
}
storm::storage::BitVector const& getPlayer1States() const {
return player1States;
}
storm::storage::BitVector const& getPlayer2States() const {
return player2States;
}
storm::storage::BitVector player1States;
storm::storage::BitVector player2States;
boost::optional<std::vector<uint64_t>> player1Strategy;
boost::optional<std::vector<uint64_t>> player2Strategy;
};
/*!
* Computes the set of states that have probability 0 given the strategies of the two players.
*
* @param transitionMatrix The transition matrix of the model as a BDD.
* @param player1RowGrouping The row grouping of player 1 states.
* @param player1BackwardTransitions The backward transitions (player 1 to player 2).
* @param player2BackwardTransitions The backward transitions (player 2 to player 1).
* @param phiStates The phi states of the model.
* @param psiStates The psi states of the model.
* @param producePlayer1Strategy A flag indicating whether the strategy of player 1 shall be produced.
* @param producePlayer2Strategy A flag indicating whether the strategy of player 2 shall be produced.
*/
template <typename ValueType>
ExplicitGameProb01Result performProb0(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<uint64_t> const& player1RowGrouping, storm::storage::SparseMatrix<ValueType> const& player1BackwardTransitions, std::vector<uint64_t> const& player2BackwardTransitions, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, storm::OptimizationDirection const& player1Strategy, storm::OptimizationDirection const& player2Strategy, bool producePlayer1Strategy = false, bool producePlayer2Strategy = false);
/*!
* Computes the set of states that have probability 1 given the strategies of the two players.
*
* @param transitionMatrix The transition matrix of the model as a BDD.
* @param player1RowGrouping The row grouping of player 1 states.
* @param player1BackwardTransitions The backward transitions (player 1 to player 2).
* @param player2BackwardTransitions The backward transitions (player 2 to player 1).
* @param phiStates The phi states of the model.
* @param psiStates The psi states of the model.
* @param producePlayer1Strategy A flag indicating whether the strategy of player 1 shall be produced.
* @param producePlayer2Strategy A flag indicating whether the strategy of player 2 shall be produced.
* @param player1Candidates If given, this set constrains the candidates of player 1 states that are considered.
*/
template <typename ValueType>
ExplicitGameProb01Result performProb1(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<uint64_t> const& player1RowGrouping, storm::storage::SparseMatrix<ValueType> const& player1BackwardTransitions, std::vector<uint64_t> const& player2BackwardTransitions, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, storm::OptimizationDirection const& player1Strategy, storm::OptimizationDirection const& player2Strategy, bool producePlayer1Strategy = false, bool producePlayer2Strategy = false, boost::optional<storm::storage::BitVector> const& player1Candidates = boost::none);
/*!
* Performs a topological sort of the states of the system according to the given transitions.
*

Loading…
Cancel
Save