38 changed files with 1192 additions and 1194 deletions
-
10src/logic/AtomicExpressionFormula.cpp
-
5src/logic/AtomicExpressionFormula.h
-
8src/logic/AtomicLabelFormula.cpp
-
5src/logic/AtomicLabelFormula.h
-
3src/logic/BinaryBooleanStateFormula.h
-
16src/logic/BinaryPathFormula.cpp
-
5src/logic/BinaryPathFormula.h
-
17src/logic/BinaryStateFormula.cpp
-
5src/logic/BinaryStateFormula.h
-
8src/logic/BooleanLiteralFormula.cpp
-
5src/logic/BooleanLiteralFormula.h
-
8src/logic/BoundedUntilFormula.cpp
-
3src/logic/BoundedUntilFormula.h
-
20src/logic/Formula.cpp
-
5src/logic/Formula.h
-
16src/logic/ProbabilityOperatorFormula.cpp
-
5src/logic/ProbabilityOperatorFormula.h
-
4src/logic/RewardOperatorFormula.cpp
-
2src/logic/RewardOperatorFormula.h
-
12src/logic/SteadyStateOperatorFormula.cpp
-
4src/logic/SteadyStateOperatorFormula.h
-
18src/logic/UnaryPathFormula.cpp
-
5src/logic/UnaryPathFormula.h
-
16src/logic/UnaryStateFormula.cpp
-
7src/logic/UnaryStateFormula.h
-
34src/modelchecker/AbstractModelChecker.cpp
-
4src/modelchecker/CheckResult.cpp
-
5src/modelchecker/CheckResult.h
-
38src/modelchecker/ExplicitQuantitativeCheckResult.cpp
-
2src/modelchecker/ExplicitQuantitativeCheckResult.h
-
478src/modelchecker/prctl/SparseDtmcPrctlModelChecker.cpp
-
72src/modelchecker/prctl/SparseDtmcPrctlModelChecker.h
-
327src/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp
-
715src/modelchecker/prctl/SparseMdpPrctlModelChecker.h
-
4src/parser/FormulaParser.cpp
-
149test/functional/modelchecker/GmmxxDtmcPrctlModelCheckerTest.cpp
-
344test/functional/modelchecker/SparseMdpPrctlModelCheckerTest.cpp
-
2test/functional/parser/FormulaParserTest.cpp
@ -0,0 +1,327 @@ |
|||
#include "src/modelchecker/prctl/SparseMdpPrctlModelChecker.h"
|
|||
|
|||
#include <vector>
|
|||
|
|||
#include "src/utility/ConstantsComparator.h"
|
|||
#include "src/utility/macros.h"
|
|||
#include "src/utility/vector.h"
|
|||
#include "src/utility/graph.h"
|
|||
|
|||
#include "src/modelchecker/ExplicitQualitativeCheckResult.h"
|
|||
#include "src/modelchecker/ExplicitQuantitativeCheckResult.h"
|
|||
|
|||
#include "src/exceptions/InvalidPropertyException.h"
|
|||
|
|||
namespace storm { |
|||
namespace modelchecker { |
|||
template<typename ValueType> |
|||
SparseMdpPrctlModelChecker<ValueType>::SparseMdpPrctlModelChecker(storm::models::Mdp<ValueType> const& model) : model(model), nondeterministicLinearEquationSolver(storm::utility::solver::getNondeterministicLinearEquationSolver<ValueType>()) { |
|||
// Intentionally left empty.
|
|||
} |
|||
|
|||
template<typename ValueType> |
|||
SparseMdpPrctlModelChecker<ValueType>::SparseMdpPrctlModelChecker(storm::models::Mdp<ValueType> const& model, std::shared_ptr<storm::solver::NondeterministicLinearEquationSolver<ValueType>> nondeterministicLinearEquationSolver) : model(model), nondeterministicLinearEquationSolver(nondeterministicLinearEquationSolver) { |
|||
// Intentionally left empty.
|
|||
} |
|||
|
|||
template<typename ValueType> |
|||
bool SparseMdpPrctlModelChecker<ValueType>::canHandle(storm::logic::Formula const& formula) const { |
|||
return formula.isPctlStateFormula() || formula.isPctlPathFormula() || formula.isRewardPathFormula(); |
|||
} |
|||
|
|||
template<typename ValueType> |
|||
std::vector<ValueType> SparseMdpPrctlModelChecker<ValueType>::computeBoundedUntilProbabilitiesHelper(bool minimize, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, uint_fast64_t stepBound) { |
|||
std::vector<ValueType> result(model.getNumberOfStates(), storm::utility::zero<ValueType>()); |
|||
|
|||
// Determine the states that have 0 probability of reaching the target states.
|
|||
storm::storage::BitVector statesWithProbabilityGreater0; |
|||
if (minimize) { |
|||
statesWithProbabilityGreater0 = storm::utility::graph::performProbGreater0A(model.getTransitionMatrix(), model.getTransitionMatrix().getRowGroupIndices(), model.getBackwardTransitions(), phiStates, psiStates, true, stepBound); |
|||
} else { |
|||
statesWithProbabilityGreater0 = storm::utility::graph::performProbGreater0E(model.getTransitionMatrix(), model.getTransitionMatrix().getRowGroupIndices(), model.getBackwardTransitions(), phiStates, psiStates, true, stepBound); |
|||
} |
|||
STORM_LOG_INFO("Found " << statesWithProbabilityGreater0.getNumberOfSetBits() << " 'maybe' states."); |
|||
|
|||
if (!statesWithProbabilityGreater0.empty()) { |
|||
// We can eliminate the rows and columns from the original transition probability matrix that have probability 0.
|
|||
storm::storage::SparseMatrix<ValueType> submatrix = model.getTransitionMatrix().getSubmatrix(true, statesWithProbabilityGreater0, statesWithProbabilityGreater0, false); |
|||
|
|||
// Compute the new set of target states in the reduced system.
|
|||
storm::storage::BitVector rightStatesInReducedSystem = psiStates % statesWithProbabilityGreater0; |
|||
|
|||
// Make all rows absorbing that satisfy the second sub-formula.
|
|||
submatrix.makeRowGroupsAbsorbing(rightStatesInReducedSystem); |
|||
|
|||
// Create the vector with which to multiply.
|
|||
std::vector<ValueType> subresult(statesWithProbabilityGreater0.getNumberOfSetBits()); |
|||
storm::utility::vector::setVectorValues(subresult, rightStatesInReducedSystem, storm::utility::one<ValueType>()); |
|||
|
|||
STORM_LOG_THROW(nondeterministicLinearEquationSolver != nullptr, storm::exceptions::InvalidStateException, "No valid equation solver available."); |
|||
this->nondeterministicLinearEquationSolver->performMatrixVectorMultiplication(minimize, submatrix, subresult, nullptr, stepBound); |
|||
|
|||
// Set the values of the resulting vector accordingly.
|
|||
storm::utility::vector::setVectorValues(result, statesWithProbabilityGreater0, subresult); |
|||
storm::utility::vector::setVectorValues(result, ~statesWithProbabilityGreater0, storm::utility::zero<ValueType>()); |
|||
} |
|||
|
|||
return result; |
|||
} |
|||
|
|||
template<typename ValueType> |
|||
std::unique_ptr<CheckResult> SparseMdpPrctlModelChecker<ValueType>::computeBoundedUntilProbabilities(storm::logic::BoundedUntilFormula const& pathFormula, bool qualitative, boost::optional<storm::logic::OptimalityType> const& optimalityType) { |
|||
STORM_LOG_THROW(optimalityType, storm::exceptions::InvalidArgumentException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); |
|||
std::unique_ptr<CheckResult> leftResultPointer = this->check(pathFormula.getLeftSubformula()); |
|||
std::unique_ptr<CheckResult> rightResultPointer = this->check(pathFormula.getRightSubformula()); |
|||
ExplicitQualitativeCheckResult& leftResult = dynamic_cast<ExplicitQualitativeCheckResult&>(*leftResultPointer); |
|||
ExplicitQualitativeCheckResult& rightResult = dynamic_cast<ExplicitQualitativeCheckResult&>(*rightResultPointer); |
|||
std::unique_ptr<CheckResult> result = std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(this->computeBoundedUntilProbabilitiesHelper(optimalityType.get() == storm::logic::OptimalityType::Minimize, leftResult.getTruthValues(), rightResult.getTruthValues(), pathFormula.getUpperBound()))); |
|||
return result; |
|||
} |
|||
|
|||
template<typename ValueType> |
|||
std::vector<ValueType> SparseMdpPrctlModelChecker<ValueType>::computeNextProbabilitiesHelper(bool minimize, storm::storage::BitVector const& nextStates) { |
|||
// Create the vector with which to multiply and initialize it correctly.
|
|||
std::vector<ValueType> result(model.getNumberOfStates()); |
|||
storm::utility::vector::setVectorValues(result, nextStates, storm::utility::one<ValueType>()); |
|||
|
|||
STORM_LOG_THROW(nondeterministicLinearEquationSolver != nullptr, storm::exceptions::InvalidStateException, "No valid equation solver available."); |
|||
this->nondeterministicLinearEquationSolver->performMatrixVectorMultiplication(minimize, model.getTransitionMatrix(), result); |
|||
|
|||
return result; |
|||
} |
|||
|
|||
template<typename ValueType> |
|||
std::unique_ptr<CheckResult> SparseMdpPrctlModelChecker<ValueType>::computeNextProbabilities(storm::logic::NextFormula const& pathFormula, bool qualitative, boost::optional<storm::logic::OptimalityType> const& optimalityType) { |
|||
STORM_LOG_THROW(optimalityType, storm::exceptions::InvalidArgumentException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); |
|||
std::unique_ptr<CheckResult> subResultPointer = this->check(pathFormula.getSubformula()); |
|||
ExplicitQualitativeCheckResult& subResult = dynamic_cast<ExplicitQualitativeCheckResult&>(*subResultPointer); |
|||
return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(this->computeNextProbabilitiesHelper(optimalityType.get() == storm::logic::OptimalityType::Minimize, subResult.getTruthValues()))); |
|||
} |
|||
|
|||
template<typename ValueType> |
|||
std::vector<ValueType> SparseMdpPrctlModelChecker<ValueType>::computeUntilProbabilitiesHelper(bool minimize, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, std::shared_ptr<storm::solver::NondeterministicLinearEquationSolver<ValueType>> nondeterministicLinearEquationSolver, bool qualitative) { |
|||
size_t numberOfStates = phiStates.size(); |
|||
|
|||
// We need to identify the states which have to be taken out of the matrix, i.e.
|
|||
// all states that have probability 0 and 1 of satisfying the until-formula.
|
|||
std::pair<storm::storage::BitVector, storm::storage::BitVector> statesWithProbability01; |
|||
if (minimize) { |
|||
statesWithProbability01 = storm::utility::graph::performProb01Min(transitionMatrix, transitionMatrix.getRowGroupIndices(), backwardTransitions, phiStates, psiStates); |
|||
} else { |
|||
statesWithProbability01 = storm::utility::graph::performProb01Max(transitionMatrix, transitionMatrix.getRowGroupIndices(), backwardTransitions, phiStates, psiStates); |
|||
} |
|||
storm::storage::BitVector statesWithProbability0 = std::move(statesWithProbability01.first); |
|||
storm::storage::BitVector statesWithProbability1 = std::move(statesWithProbability01.second); |
|||
storm::storage::BitVector maybeStates = ~(statesWithProbability0 | statesWithProbability1); |
|||
LOG4CPLUS_INFO(logger, "Found " << statesWithProbability0.getNumberOfSetBits() << " 'no' states."); |
|||
LOG4CPLUS_INFO(logger, "Found " << statesWithProbability1.getNumberOfSetBits() << " 'yes' states."); |
|||
LOG4CPLUS_INFO(logger, "Found " << maybeStates.getNumberOfSetBits() << " 'maybe' states."); |
|||
|
|||
// Create resulting vector.
|
|||
std::vector<ValueType> result(numberOfStates); |
|||
|
|||
// Check whether we need to compute exact probabilities for some states.
|
|||
if (qualitative) { |
|||
// Set the values for all maybe-states to 0.5 to indicate that their probability values are neither 0 nor 1.
|
|||
storm::utility::vector::setVectorValues<ValueType>(result, maybeStates, ValueType(0.5)); |
|||
} else { |
|||
if (!maybeStates.empty()) { |
|||
// In this case we have have to compute the probabilities.
|
|||
|
|||
// First, we can eliminate the rows and columns from the original transition probability matrix for states
|
|||
// whose probabilities are already known.
|
|||
storm::storage::SparseMatrix<ValueType> submatrix = transitionMatrix.getSubmatrix(true, maybeStates, maybeStates, false); |
|||
|
|||
// Prepare the right-hand side of the equation system. For entry i this corresponds to
|
|||
// the accumulated probability of going from state i to some 'yes' state.
|
|||
std::vector<ValueType> b = transitionMatrix.getConstrainedRowGroupSumVector(maybeStates, statesWithProbability1); |
|||
|
|||
// Create vector for results for maybe states.
|
|||
std::vector<ValueType> x(maybeStates.getNumberOfSetBits()); |
|||
|
|||
// Solve the corresponding system of equations.
|
|||
nondeterministicLinearEquationSolver->solveEquationSystem(minimize, submatrix, x, b); |
|||
|
|||
// Set values of resulting vector according to result.
|
|||
storm::utility::vector::setVectorValues<ValueType>(result, maybeStates, x); |
|||
} |
|||
} |
|||
|
|||
// Set values of resulting vector that are known exactly.
|
|||
storm::utility::vector::setVectorValues<ValueType>(result, statesWithProbability0, storm::utility::zero<ValueType>()); |
|||
storm::utility::vector::setVectorValues<ValueType>(result, statesWithProbability1, storm::utility::one<ValueType>()); |
|||
|
|||
return result; |
|||
} |
|||
|
|||
template<typename ValueType> |
|||
std::unique_ptr<CheckResult> SparseMdpPrctlModelChecker<ValueType>::computeUntilProbabilities(storm::logic::UntilFormula const& pathFormula, bool qualitative, boost::optional<storm::logic::OptimalityType> const& optimalityType) { |
|||
STORM_LOG_THROW(optimalityType, storm::exceptions::InvalidArgumentException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); |
|||
std::unique_ptr<CheckResult> leftResultPointer = this->check(pathFormula.getLeftSubformula()); |
|||
std::unique_ptr<CheckResult> rightResultPointer = this->check(pathFormula.getRightSubformula()); |
|||
ExplicitQualitativeCheckResult& leftResult = dynamic_cast<ExplicitQualitativeCheckResult&>(*leftResultPointer); |
|||
ExplicitQualitativeCheckResult& rightResult = dynamic_cast<ExplicitQualitativeCheckResult&>(*rightResultPointer); |
|||
return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(SparseMdpPrctlModelChecker<ValueType>::computeUntilProbabilitiesHelper(optimalityType.get() == storm::logic::OptimalityType::Minimize, model.getTransitionMatrix(), model.getBackwardTransitions(), leftResult.getTruthValues(), rightResult.getTruthValues(), nondeterministicLinearEquationSolver, qualitative))); |
|||
} |
|||
|
|||
template<typename ValueType> |
|||
std::vector<ValueType> SparseMdpPrctlModelChecker<ValueType>::computeCumulativeRewardsHelper(bool minimize, uint_fast64_t stepBound) const { |
|||
// Only compute the result if the model has at least one reward model.
|
|||
STORM_LOG_THROW(model.hasStateRewards() || model.hasTransitionRewards(), storm::exceptions::InvalidPropertyException, "Missing reward model for formula. Skipping formula."); |
|||
|
|||
// Compute the reward vector to add in each step based on the available reward models.
|
|||
std::vector<ValueType> totalRewardVector; |
|||
if (model.hasTransitionRewards()) { |
|||
totalRewardVector = model.getTransitionMatrix().getPointwiseProductRowSumVector(model.getTransitionRewardMatrix()); |
|||
if (model.hasStateRewards()) { |
|||
storm::utility::vector::addVectorsInPlace(totalRewardVector, model.getStateRewardVector()); |
|||
} |
|||
} else { |
|||
totalRewardVector = std::vector<ValueType>(model.getStateRewardVector()); |
|||
} |
|||
|
|||
// Initialize result to either the state rewards of the model or the null vector.
|
|||
std::vector<ValueType> result; |
|||
if (model.hasStateRewards()) { |
|||
result = std::vector<ValueType>(model.getStateRewardVector()); |
|||
} else { |
|||
result.resize(model.getNumberOfStates()); |
|||
} |
|||
|
|||
this->nondeterministicLinearEquationSolver->performMatrixVectorMultiplication(minimize, model.getTransitionMatrix(), result, &totalRewardVector, stepBound); |
|||
|
|||
return result; |
|||
} |
|||
|
|||
template<typename ValueType> |
|||
std::unique_ptr<CheckResult> SparseMdpPrctlModelChecker<ValueType>::computeCumulativeRewards(storm::logic::CumulativeRewardFormula const& rewardPathFormula, bool qualitative, boost::optional<storm::logic::OptimalityType> const& optimalityType) { |
|||
STORM_LOG_THROW(optimalityType, storm::exceptions::InvalidArgumentException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); |
|||
return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(this->computeCumulativeRewardsHelper(optimalityType.get() == storm::logic::OptimalityType::Minimize, rewardPathFormula.getStepBound()))); |
|||
} |
|||
|
|||
template<typename ValueType> |
|||
std::vector<ValueType> SparseMdpPrctlModelChecker<ValueType>::computeInstantaneousRewardsHelper(bool minimize, uint_fast64_t stepCount) const { |
|||
// Only compute the result if the model has a state-based reward model.
|
|||
STORM_LOG_THROW(model.hasStateRewards(), storm::exceptions::InvalidPropertyException, "Missing reward model for formula. Skipping formula."); |
|||
|
|||
// Initialize result to state rewards of the model.
|
|||
std::vector<ValueType> result(model.getStateRewardVector()); |
|||
|
|||
STORM_LOG_THROW(nondeterministicLinearEquationSolver != nullptr, storm::exceptions::InvalidStateException, "No valid linear equation solver available."); |
|||
this->nondeterministicLinearEquationSolver->performMatrixVectorMultiplication(minimize, model.getTransitionMatrix(), result, nullptr, stepCount); |
|||
|
|||
return result; |
|||
} |
|||
|
|||
template<typename ValueType> |
|||
std::unique_ptr<CheckResult> SparseMdpPrctlModelChecker<ValueType>::computeInstantaneousRewards(storm::logic::InstantaneousRewardFormula const& rewardPathFormula, bool qualitative, boost::optional<storm::logic::OptimalityType> const& optimalityType) { |
|||
STORM_LOG_THROW(optimalityType, storm::exceptions::InvalidArgumentException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); |
|||
return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(this->computeInstantaneousRewardsHelper(optimalityType.get() == storm::logic::OptimalityType::Minimize, rewardPathFormula.getStepCount()))); |
|||
} |
|||
|
|||
template<typename ValueType> |
|||
std::vector<ValueType> SparseMdpPrctlModelChecker<ValueType>::computeReachabilityRewardsHelper(bool minimize, storm::storage::BitVector const& targetStates, bool qualitative) const { |
|||
// Only compute the result if the model has at least one reward model.
|
|||
STORM_LOG_THROW(model.hasStateRewards() || model.hasTransitionRewards(), storm::exceptions::InvalidPropertyException, "Missing reward model for formula. Skipping formula."); |
|||
|
|||
// Determine which states have a reward of infinity by definition.
|
|||
storm::storage::BitVector infinityStates; |
|||
storm::storage::BitVector trueStates(model.getNumberOfStates(), true); |
|||
if (minimize) { |
|||
infinityStates = std::move(storm::utility::graph::performProb1A(model.getTransitionMatrix(), model.getTransitionMatrix().getRowGroupIndices(), model.getBackwardTransitions(), trueStates, targetStates)); |
|||
} else { |
|||
infinityStates = std::move(storm::utility::graph::performProb1E(model.getTransitionMatrix(), model.getTransitionMatrix().getRowGroupIndices(), model.getBackwardTransitions(), trueStates, targetStates)); |
|||
} |
|||
infinityStates.complement(); |
|||
storm::storage::BitVector maybeStates = ~targetStates & ~infinityStates; |
|||
LOG4CPLUS_INFO(logger, "Found " << infinityStates.getNumberOfSetBits() << " 'infinity' states."); |
|||
LOG4CPLUS_INFO(logger, "Found " << targetStates.getNumberOfSetBits() << " 'target' states."); |
|||
LOG4CPLUS_INFO(logger, "Found " << maybeStates.getNumberOfSetBits() << " 'maybe' states."); |
|||
|
|||
// Create resulting vector.
|
|||
std::vector<ValueType> result(model.getNumberOfStates()); |
|||
|
|||
// Check whether we need to compute exact rewards for some states.
|
|||
if (model.getInitialStates().isDisjointFrom(maybeStates)) { |
|||
LOG4CPLUS_INFO(logger, "The rewards for the initial states were determined in a preprocessing step." |
|||
<< " No exact rewards were computed."); |
|||
// Set the values for all maybe-states to 1 to indicate that their reward values
|
|||
// are neither 0 nor infinity.
|
|||
storm::utility::vector::setVectorValues<ValueType>(result, maybeStates, storm::utility::one<ValueType>()); |
|||
} else { |
|||
// In this case we have to compute the reward values for the remaining states.
|
|||
|
|||
// We can eliminate the rows and columns from the original transition probability matrix for states
|
|||
// whose reward values are already known.
|
|||
storm::storage::SparseMatrix<ValueType> submatrix = model.getTransitionMatrix().getSubmatrix(true, maybeStates, maybeStates, false); |
|||
|
|||
// Prepare the right-hand side of the equation system. For entry i this corresponds to
|
|||
// the accumulated probability of going from state i to some 'yes' state.
|
|||
std::vector<ValueType> b(submatrix.getRowCount()); |
|||
|
|||
if (model.hasTransitionRewards()) { |
|||
// If a transition-based reward model is available, we initialize the right-hand
|
|||
// side to the vector resulting from summing the rows of the pointwise product
|
|||
// of the transition probability matrix and the transition reward matrix.
|
|||
std::vector<ValueType> pointwiseProductRowSumVector = model.getTransitionMatrix().getPointwiseProductRowSumVector(model.getTransitionRewardMatrix()); |
|||
storm::utility::vector::selectVectorValues(b, maybeStates, model.getTransitionMatrix().getRowGroupIndices(), pointwiseProductRowSumVector); |
|||
|
|||
if (model.hasStateRewards()) { |
|||
// If a state-based reward model is also available, we need to add this vector
|
|||
// as well. As the state reward vector contains entries not just for the states
|
|||
// that we still consider (i.e. maybeStates), we need to extract these values
|
|||
// first.
|
|||
std::vector<ValueType> subStateRewards(b.size()); |
|||
storm::utility::vector::selectVectorValuesRepeatedly(subStateRewards, maybeStates, model.getTransitionMatrix().getRowGroupIndices(), model.getStateRewardVector()); |
|||
storm::utility::vector::addVectorsInPlace(b, subStateRewards); |
|||
} |
|||
} else { |
|||
// If only a state-based reward model is available, we take this vector as the
|
|||
// right-hand side. As the state reward vector contains entries not just for the
|
|||
// states that we still consider (i.e. maybeStates), we need to extract these values
|
|||
// first.
|
|||
storm::utility::vector::selectVectorValuesRepeatedly(b, maybeStates, model.getTransitionMatrix().getRowGroupIndices(), model.getStateRewardVector()); |
|||
} |
|||
|
|||
// Create vector for results for maybe states.
|
|||
std::vector<ValueType> x(maybeStates.getNumberOfSetBits()); |
|||
|
|||
// Solve the corresponding system of equations.
|
|||
this->nondeterministicLinearEquationSolver->solveEquationSystem(minimize, submatrix, x, b); |
|||
|
|||
// Set values of resulting vector according to result.
|
|||
storm::utility::vector::setVectorValues<ValueType>(result, maybeStates, x); |
|||
} |
|||
|
|||
// Set values of resulting vector that are known exactly.
|
|||
storm::utility::vector::setVectorValues(result, targetStates, storm::utility::zero<ValueType>()); |
|||
storm::utility::vector::setVectorValues(result, infinityStates, storm::utility::infinity<ValueType>()); |
|||
|
|||
return result; |
|||
} |
|||
|
|||
template<typename ValueType> |
|||
std::unique_ptr<CheckResult> SparseMdpPrctlModelChecker<ValueType>::computeReachabilityRewards(storm::logic::ReachabilityRewardFormula const& rewardPathFormula, bool qualitative, boost::optional<storm::logic::OptimalityType> const& optimalityType) { |
|||
STORM_LOG_THROW(optimalityType, storm::exceptions::InvalidArgumentException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); |
|||
std::unique_ptr<CheckResult> subResultPointer = this->check(rewardPathFormula.getSubformula()); |
|||
ExplicitQualitativeCheckResult& subResult = dynamic_cast<ExplicitQualitativeCheckResult&>(*subResultPointer); |
|||
return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(this->computeReachabilityRewardsHelper(optimalityType.get() == storm::logic::OptimalityType::Minimize, subResult.getTruthValues(), qualitative))); |
|||
} |
|||
|
|||
template<typename ValueType> |
|||
std::unique_ptr<CheckResult> SparseMdpPrctlModelChecker<ValueType>::checkBooleanLiteralFormula(storm::logic::BooleanLiteralFormula const& stateFormula) { |
|||
if (stateFormula.isTrueFormula()) { |
|||
return std::unique_ptr<CheckResult>(new ExplicitQualitativeCheckResult(storm::storage::BitVector(model.getNumberOfStates(), true))); |
|||
} else { |
|||
return std::unique_ptr<CheckResult>(new ExplicitQualitativeCheckResult(storm::storage::BitVector(model.getNumberOfStates()))); |
|||
} |
|||
} |
|||
|
|||
template<typename ValueType> |
|||
std::unique_ptr<CheckResult> SparseMdpPrctlModelChecker<ValueType>::checkAtomicLabelFormula(storm::logic::AtomicLabelFormula const& stateFormula) { |
|||
return std::unique_ptr<CheckResult>(new ExplicitQualitativeCheckResult(model.getLabeledStates(stateFormula.getLabel()))); |
|||
} |
|||
|
|||
template class SparseMdpPrctlModelChecker<double>; |
|||
} |
|||
} |
@ -1,687 +1,48 @@ |
|||
/* |
|||
* SparseMdpPrctlModelChecker.h |
|||
* |
|||
* Created on: 15.02.2013 |
|||
* Author: Christian Dehnert |
|||
*/ |
|||
#ifndef STORM_MODELCHECKER_SPARSEMDPPRCTLMODELCHECKER_H_ |
|||
#define STORM_MODELCHECKER_SPARSEMDPPRCTLMODELCHECKER_H_ |
|||
|
|||
#ifndef STORM_MODELCHECKER_PRCTL_SPARSEMDPPRCTLMODELCHECKER_H_ |
|||
#define STORM_MODELCHECKER_PRCTL_SPARSEMDPPRCTLMODELCHECKER_H_ |
|||
|
|||
#include <vector> |
|||
#include <fstream> |
|||
|
|||
#include "src/modelchecker/prctl/AbstractModelChecker.h" |
|||
#include "src/solver/NondeterministicLinearEquationSolver.h" |
|||
#include "src/solver/LinearEquationSolver.h" |
|||
#include "src/modelchecker/AbstractModelChecker.h" |
|||
#include "src/models/Mdp.h" |
|||
#include "src/utility/vector.h" |
|||
#include "src/utility/graph.h" |
|||
#include "src/utility/solver.h" |
|||
#include "src/settings/SettingsManager.h" |
|||
#include "src/storage/TotalScheduler.h" |
|||
#include "src/solver/NondeterministicLinearEquationSolver.h" |
|||
|
|||
namespace storm { |
|||
namespace modelchecker { |
|||
namespace prctl { |
|||
|
|||
template<class ValueType> |
|||
class SparseMdpPrctlModelChecker : public AbstractModelChecker { |
|||
public: |
|||
explicit SparseMdpPrctlModelChecker(storm::models::Mdp<ValueType> const& model); |
|||
explicit SparseMdpPrctlModelChecker(storm::models::Mdp<ValueType> const& model, std::shared_ptr<storm::solver::NondeterministicLinearEquationSolver<ValueType>> nondeterministicLinearEquationSolver); |
|||
|
|||
/*! |
|||
* This class represents the base class for all PRCTL model checkers for MDPs. |
|||
*/ |
|||
template<class Type> |
|||
class SparseMdpPrctlModelChecker : public AbstractModelChecker<Type> { |
|||
|
|||
public: |
|||
/*! |
|||
* Constructs a SparseMdpPrctlModelChecker with the given model. |
|||
* |
|||
* @param model The MDP to be checked. |
|||
*/ |
|||
explicit SparseMdpPrctlModelChecker(storm::models::Mdp<Type> const& model) : AbstractModelChecker<Type>(model), nondeterministicLinearEquationSolver(storm::utility::solver::getNondeterministicLinearEquationSolver<Type>()) { |
|||
// Intentionally left empty. |
|||
} |
|||
|
|||
explicit SparseMdpPrctlModelChecker(storm::models::Mdp<Type> const& model, std::shared_ptr<storm::solver::NondeterministicLinearEquationSolver<Type>> nondeterministicLinearEquationSolver) : AbstractModelChecker<Type>(model), nondeterministicLinearEquationSolver(nondeterministicLinearEquationSolver) { |
|||
// Intentionally left empty. |
|||
} |
|||
|
|||
/*! |
|||
* Copy constructs a SparseMdpPrctlModelChecker from the given model checker. In particular, this means that the newly |
|||
* constructed model checker will have the model of the given model checker as its associated model. |
|||
*/ |
|||
explicit SparseMdpPrctlModelChecker(storm::modelchecker::prctl::SparseMdpPrctlModelChecker<Type> const& modelchecker) |
|||
: AbstractModelChecker<Type>(modelchecker), nondeterministicLinearEquationSolver(storm::utility::solver::getNondeterministicLinearEquationSolver<Type>()) { |
|||
// Intentionally left empty. |
|||
} |
|||
|
|||
/*! |
|||
* Virtual destructor. Needs to be virtual, because this class has virtual methods. |
|||
*/ |
|||
virtual ~SparseMdpPrctlModelChecker() { |
|||
// Intentionally left empty. |
|||
} |
|||
|
|||
/*! |
|||
* Returns a constant reference to the MDP associated with this model checker. |
|||
* @returns A constant reference to the MDP associated with this model checker. |
|||
*/ |
|||
storm::models::Mdp<Type> const& getModel() const { |
|||
return AbstractModelChecker<Type>::template getModel<storm::models::Mdp<Type>>(); |
|||
} |
|||
|
|||
/*! |
|||
* Checks the given formula that is a P operator over a path formula featuring a value bound. |
|||
* |
|||
* @param formula The formula to check. |
|||
* @return The set of states satisfying the formula represented by a bit vector. |
|||
*/ |
|||
virtual storm::storage::BitVector checkProbabilisticBoundOperator(storm::properties::prctl::ProbabilisticBoundOperator<Type> const& formula) const override { |
|||
|
|||
// For P< and P<= the MDP satisfies the formula iff the probability maximizing scheduler is used. |
|||
// For P> and P>= " iff the probability minimizing " . |
|||
if(formula.getComparisonOperator() == storm::properties::LESS || formula.getComparisonOperator() == storm::properties::LESS_EQUAL) { |
|||
this->minimumOperatorStack.push(false); |
|||
} |
|||
else { |
|||
this->minimumOperatorStack.push(true); |
|||
} |
|||
|
|||
// First, we need to compute the probability for satisfying the path formula for each state. |
|||
std::vector<Type> quantitativeResult = formula.getChild()->check(*this, false); |
|||
|
|||
//Remove the minimizing operator entry from the stack. |
|||
this->minimumOperatorStack.pop(); |
|||
|
|||
// Create resulting bit vector that will hold the yes/no-answer for every state. |
|||
storm::storage::BitVector result(quantitativeResult.size()); |
|||
|
|||
// Now, we can compute which states meet the bound specified in this operator and set the |
|||
// corresponding bits to true in the resulting vector. |
|||
for (uint_fast64_t i = 0; i < quantitativeResult.size(); ++i) { |
|||
if (formula.meetsBound(quantitativeResult[i])) { |
|||
result.set(i, true); |
|||
} |
|||
} |
|||
|
|||
return result; |
|||
} |
|||
|
|||
/*! |
|||
* Checks the given formula that is an R operator over a reward formula featuring a value bound. |
|||
* |
|||
* @param formula The formula to check. |
|||
* @return The set of states satisfying the formula represented by a bit vector. |
|||
*/ |
|||
virtual storm::storage::BitVector checkRewardBoundOperator(const storm::properties::prctl::RewardBoundOperator<Type>& formula) const override { |
|||
|
|||
// For R< and R<= the MDP satisfies the formula iff the reward maximizing scheduler is used. |
|||
// For R> and R>= " iff the reward minimizing " . |
|||
if(formula.getComparisonOperator() == storm::properties::LESS || formula.getComparisonOperator() == storm::properties::LESS_EQUAL) { |
|||
this->minimumOperatorStack.push(false); |
|||
} |
|||
else { |
|||
this->minimumOperatorStack.push(true); |
|||
} |
|||
|
|||
// First, we need to compute the probability for satisfying the path formula for each state. |
|||
std::vector<Type> quantitativeResult = formula.getChild()->check(*this, false); |
|||
|
|||
//Remove the minimizing operator entry from the stack. |
|||
this->minimumOperatorStack.pop(); |
|||
|
|||
// Create resulting bit vector that will hold the yes/no-answer for every state. |
|||
storm::storage::BitVector result(quantitativeResult.size()); |
|||
|
|||
// Now, we can compute which states meet the bound specified in this operator and set the |
|||
// corresponding bits to true in the resulting vector. |
|||
for (uint_fast64_t i = 0; i < quantitativeResult.size(); ++i) { |
|||
if (formula.meetsBound(quantitativeResult[i])) { |
|||
result.set(i, true); |
|||
} |
|||
} |
|||
|
|||
return result; |
|||
} |
|||
|
|||
/*! |
|||
* Computes the probability to satisfy phi until psi within a limited number of steps for each state. |
|||
* |
|||
* @param phiStates A bit vector indicating which states satisfy phi. |
|||
* @param psiStates A bit vector indicating which states satisfy psi. |
|||
* @param stepBound The upper bound for the number of steps. |
|||
* @param qualitative A flag indicating whether the check only needs to be done qualitatively, i.e. if the |
|||
* results are only compared against the bounds 0 and 1. If set to true, this will most likely results that are only |
|||
* qualitatively correct, i.e. do not represent the correct value, but only the correct relation with respect to the |
|||
* bounds 0 and 1. |
|||
* @return The probabilities for satisfying phi until psi within a limited number of steps for each state. |
|||
* If the qualitative flag is set, exact probabilities might not be computed. |
|||
*/ |
|||
std::vector<Type> checkBoundedUntil(storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, uint_fast64_t stepBound, bool qualitative) const { |
|||
// First test if it is specified if the minimum or maximum probabilities are to be computed. |
|||
if(this->minimumOperatorStack.empty()) { |
|||
LOG4CPLUS_ERROR(logger, "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."); |
|||
throw storm::exceptions::InvalidArgumentException() << "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."; |
|||
} |
|||
|
|||
std::vector<Type> result(this->getModel().getNumberOfStates()); |
|||
|
|||
// Determine the states that have 0 probability of reaching the target states. |
|||
storm::storage::BitVector statesWithProbabilityGreater0; |
|||
if (this->minimumOperatorStack.top()) { |
|||
statesWithProbabilityGreater0 = storm::utility::graph::performProbGreater0A(this->getModel().getTransitionMatrix(), this->getModel().getTransitionMatrix().getRowGroupIndices(), this->getModel().getBackwardTransitions(), phiStates, psiStates, true, stepBound); |
|||
} else { |
|||
statesWithProbabilityGreater0 = storm::utility::graph::performProbGreater0E(this->getModel().getTransitionMatrix(), this->getModel().getTransitionMatrix().getRowGroupIndices(), this->getModel().getBackwardTransitions(), phiStates, psiStates, true, stepBound); |
|||
} |
|||
|
|||
// Check if we already know the result (i.e. probability 0) for all initial states and |
|||
// don't compute anything in this case. |
|||
if (this->getModel().getInitialStates().isDisjointFrom(statesWithProbabilityGreater0)) { |
|||
LOG4CPLUS_INFO(logger, "The probabilities for the initial states were determined in a preprocessing step." |
|||
<< " No exact probabilities were computed."); |
|||
// Set the values for all maybe-states to 0.5 to indicate that their probability values are not 0 (and |
|||
// not necessarily 1). |
|||
storm::utility::vector::setVectorValues(result, statesWithProbabilityGreater0, Type(0.5)); |
|||
} else { |
|||
// In this case we have have to compute the probabilities. |
|||
|
|||
// We can eliminate the rows and columns from the original transition probability matrix that have probability 0. |
|||
storm::storage::SparseMatrix<Type> submatrix = this->getModel().getTransitionMatrix().getSubmatrix(true, statesWithProbabilityGreater0, statesWithProbabilityGreater0, false); |
|||
|
|||
// Compute the new set of target states in the reduced system. |
|||
storm::storage::BitVector rightStatesInReducedSystem = psiStates % statesWithProbabilityGreater0; |
|||
|
|||
// Make all rows absorbing that satisfy the second sub-formula. |
|||
submatrix.makeRowGroupsAbsorbing(rightStatesInReducedSystem); |
|||
|
|||
// Create the vector with which to multiply. |
|||
std::vector<Type> subresult(statesWithProbabilityGreater0.getNumberOfSetBits()); |
|||
storm::utility::vector::setVectorValues(subresult, rightStatesInReducedSystem, storm::utility::constantOne<Type>()); |
|||
|
|||
this->nondeterministicLinearEquationSolver->performMatrixVectorMultiplication(this->minimumOperatorStack.top(), submatrix, subresult, nullptr, stepBound); |
|||
|
|||
// Set the values of the resulting vector accordingly. |
|||
storm::utility::vector::setVectorValues(result, statesWithProbabilityGreater0, subresult); |
|||
storm::utility::vector::setVectorValues(result, ~statesWithProbabilityGreater0, storm::utility::constantZero<Type>()); |
|||
} |
|||
|
|||
return result; |
|||
} |
|||
|
|||
/*! |
|||
* Checks the given formula that is a bounded-until formula. |
|||
* |
|||
* @param formula The formula to check. |
|||
* @param qualitative A flag indicating whether the formula only needs to be evaluated qualitatively, i.e. if the |
|||
* results are only compared against the bounds 0 and 1. If set to true, this will most likely results that are only |
|||
* qualitatively correct, i.e. do not represent the correct value, but only the correct relation with respect to the |
|||
* bounds 0 and 1. |
|||
* @return The probabilities for the given formula to hold on every state of the model associated with this model |
|||
* checker. If the qualitative flag is set, exact probabilities might not be computed. |
|||
*/ |
|||
virtual std::vector<Type> checkBoundedUntil(storm::properties::prctl::BoundedUntil<Type> const& formula, bool qualitative) const { |
|||
return checkBoundedUntil(formula.getLeft()->check(*this), formula.getRight()->check(*this), formula.getBound(), qualitative); |
|||
} |
|||
|
|||
/*! |
|||
* Computes the probability to reach the given set of states in the next step for each state. |
|||
* |
|||
* @param nextStates A bit vector defining the states to reach in the next state. |
|||
* @param qualitative A flag indicating whether the formula only needs to be evaluated qualitatively, i.e. if the |
|||
* results are only compared against the bounds 0 and 1. If set to true, this will most likely results that are only |
|||
* qualitatively correct, i.e. do not represent the correct value, but only the correct relation with respect to the |
|||
* bounds 0 and 1. |
|||
* @return The probabilities to reach the gien set of states in the next step for each state. If the |
|||
* qualitative flag is set, exact probabilities might not be computed. |
|||
*/ |
|||
virtual std::vector<Type> checkNext(storm::storage::BitVector const& nextStates, bool qualitative) const { |
|||
// First test if it is specified if the minimum or maximum probabilities are to be computed. |
|||
if(this->minimumOperatorStack.empty()) { |
|||
LOG4CPLUS_ERROR(logger, "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."); |
|||
throw storm::exceptions::InvalidArgumentException() << "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."; |
|||
} |
|||
|
|||
// Create the vector with which to multiply and initialize it correctly. |
|||
std::vector<Type> result(this->getModel().getNumberOfStates()); |
|||
storm::utility::vector::setVectorValues(result, nextStates, storm::utility::constantOne<Type>()); |
|||
|
|||
this->nondeterministicLinearEquationSolver->performMatrixVectorMultiplication(this->minimumOperatorStack.top(), this->getModel().getTransitionMatrix(), result); |
|||
|
|||
return result; |
|||
} |
|||
|
|||
/*! |
|||
* Checks the given formula that is a next formula. |
|||
* |
|||
* @param formula The formula to check. |
|||
* @param qualitative A flag indicating whether the formula only needs to be evaluated qualitatively, i.e. if the |
|||
* results are only compared against the bounds 0 and 1. If set to true, this will most likely results that are only |
|||
* qualitatively correct, i.e. do not represent the correct value, but only the correct relation with respect to the |
|||
* bounds 0 and 1. |
|||
* @return The probabilities for the given formula to hold on every state of the model associated with this model |
|||
* checker. If the qualitative flag is set, exact probabilities might not be computed. |
|||
*/ |
|||
virtual std::vector<Type> checkNext(const storm::properties::prctl::Next<Type>& formula, bool qualitative) const { |
|||
return checkNext(formula.getChild()->check(*this), qualitative); |
|||
} |
|||
|
|||
/*! |
|||
* Checks the given formula that is a bounded-eventually formula. |
|||
* |
|||
* @param formula The formula to check. |
|||
* @param qualitative A flag indicating whether the formula only needs to be evaluated qualitatively, i.e. if the |
|||
* results are only compared against the bounds 0 and 1. If set to true, this will most likely results that are only |
|||
* qualitatively correct, i.e. do not represent the correct value, but only the correct relation with respect to the |
|||
* bounds 0 and 1. |
|||
* @returns The probabilities for the given formula to hold on every state of the model associated with this model |
|||
* checker. If the qualitative flag is set, exact probabilities might not be computed. |
|||
*/ |
|||
virtual std::vector<Type> checkBoundedEventually(const storm::properties::prctl::BoundedEventually<Type>& formula, bool qualitative) const { |
|||
// Create equivalent temporary bounded until formula and check it. |
|||
storm::properties::prctl::BoundedUntil<Type> temporaryBoundedUntilFormula(std::shared_ptr<storm::properties::prctl::Ap<Type>>(new storm::properties::prctl::Ap<Type>("true")), formula.getChild(), formula.getBound()); |
|||
return this->checkBoundedUntil(temporaryBoundedUntilFormula, qualitative); |
|||
} |
|||
|
|||
/*! |
|||
* Checks the given formula that is an eventually formula. |
|||
* |
|||
* @param formula The formula to check. |
|||
* @param qualitative A flag indicating whether the formula only needs to be evaluated qualitatively, i.e. if the |
|||
* results are only compared against the bounds 0 and 1. If set to true, this will most likely results that are only |
|||
* qualitatively correct, i.e. do not represent the correct value, but only the correct relation with respect to the |
|||
* bounds 0 and 1. |
|||
* @returns The probabilities for the given formula to hold on every state of the model associated with this model |
|||
* checker. If the qualitative flag is set, exact probabilities might not be computed. |
|||
*/ |
|||
virtual std::vector<Type> checkEventually(const storm::properties::prctl::Eventually<Type>& formula, bool qualitative) const { |
|||
// Create equivalent temporary until formula and check it. |
|||
storm::properties::prctl::Until<Type> temporaryUntilFormula(std::shared_ptr<storm::properties::prctl::Ap<Type>>(new storm::properties::prctl::Ap<Type>("true")), formula.getChild()); |
|||
return this->checkUntil(temporaryUntilFormula, qualitative); |
|||
} |
|||
|
|||
/*! |
|||
* Checks the given formula that is a globally formula. |
|||
* |
|||
* @param formula The formula to check. |
|||
* @param qualitative A flag indicating whether the formula only needs to be evaluated qualitatively, i.e. if the |
|||
* results are only compared against the bounds 0 and 1. If set to true, this will most likely results that are only |
|||
* qualitatively correct, i.e. do not represent the correct value, but only the correct relation with respect to the |
|||
* bounds 0 and 1. |
|||
* @returns The probabilities for the given formula to hold on every state of the model associated with this model |
|||
* checker. If the qualitative flag is set, exact probabilities might not be computed. |
|||
*/ |
|||
virtual std::vector<Type> checkGlobally(const storm::properties::prctl::Globally<Type>& formula, bool qualitative) const { |
|||
// Create "equivalent" temporary eventually formula and check it. |
|||
storm::properties::prctl::Eventually<Type> temporaryEventuallyFormula(std::shared_ptr<storm::properties::prctl::Not<Type>>(new storm::properties::prctl::Not<Type>(formula.getChild()))); |
|||
std::vector<Type> result = this->checkEventually(temporaryEventuallyFormula, qualitative); |
|||
|
|||
// Now subtract the resulting vector from the constant one vector to obtain final result. |
|||
storm::utility::vector::subtractFromConstantOneVector(result); |
|||
return result; |
|||
} |
|||
|
|||
/*! |
|||
* Check the given formula that is an until formula. |
|||
* |
|||
* @param formula The formula to check. |
|||
* @param qualitative A flag indicating whether the formula only needs to be evaluated qualitatively, i.e. if the |
|||
* results are only compared against the bounds 0 and 1. If set to true, this will most likely results that are only |
|||
* qualitatively correct, i.e. do not represent the correct value, but only the correct relation with respect to the |
|||
* bounds 0 and 1. |
|||
* @param scheduler If <code>qualitative</code> is false and this vector is non-null and has as many elements as |
|||
* there are states in the MDP, this vector will represent a scheduler for the model that achieves the probability |
|||
* returned by model checking. To this end, the vector will hold the nondeterministic choice made for each state. |
|||
* @return The probabilities for the given formula to hold on every state of the model associated with this model |
|||
* checker. If the qualitative flag is set, exact probabilities might not be computed. |
|||
*/ |
|||
virtual std::vector<Type> checkUntil(const storm::properties::prctl::Until<Type>& formula, bool qualitative) const { |
|||
// First test if it is specified if the minimum or maximum probabilities are to be computed. |
|||
if(this->minimumOperatorStack.empty()) { |
|||
LOG4CPLUS_ERROR(logger, "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."); |
|||
throw storm::exceptions::InvalidArgumentException() << "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."; |
|||
} |
|||
|
|||
return this->checkUntil(this->minimumOperatorStack.top(), formula.getLeft()->check(*this), formula.getRight()->check(*this), qualitative).first; |
|||
} |
|||
|
|||
/*! |
|||
* Computes the extremal probability to satisfy phi until psi for each state in the model. |
|||
* |
|||
* @param minimize If set, the probability is minimized and maximized otherwise. |
|||
* @param phiStates A bit vector indicating which states satisfy phi. |
|||
* @param psiStates A bit vector indicating which states satisfy psi. |
|||
* @param qualitative A flag indicating whether the formula only needs to be evaluated qualitatively, i.e. if the |
|||
* results are only compared against the bounds 0 and 1. If set to true, this will most likely results that are only |
|||
* qualitatively correct, i.e. do not represent the correct value, but only the correct relation with respect to the |
|||
* bounds 0 and 1. |
|||
* @param scheduler If <code>qualitative</code> is false and this vector is non-null and has as many elements as |
|||
* there are states in the MDP, this vector will represent a scheduler for the model that achieves the probability |
|||
* returned by model checking. To this end, the vector will hold the nondeterministic choice made for each state. |
|||
* @return The probabilities for the satisfying phi until psi for each state of the model. If the |
|||
* qualitative flag is set, exact probabilities might not be computed. |
|||
*/ |
|||
static std::pair<std::vector<Type>, storm::storage::TotalScheduler> computeUnboundedUntilProbabilities(bool minimize, storm::storage::SparseMatrix<Type> const& transitionMatrix, storm::storage::SparseMatrix<Type> const& backwardTransitions, storm::storage::BitVector const& initialStates, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, std::shared_ptr<storm::solver::NondeterministicLinearEquationSolver<Type>> nondeterministicLinearEquationSolver, bool qualitative) { |
|||
size_t numberOfStates = phiStates.size(); |
|||
|
|||
// We need to identify the states which have to be taken out of the matrix, i.e. |
|||
// all states that have probability 0 and 1 of satisfying the until-formula. |
|||
std::pair<storm::storage::BitVector, storm::storage::BitVector> statesWithProbability01; |
|||
if (minimize) { |
|||
statesWithProbability01 = storm::utility::graph::performProb01Min(transitionMatrix, transitionMatrix.getRowGroupIndices(), backwardTransitions, phiStates, psiStates); |
|||
|
|||
} else { |
|||
statesWithProbability01 = storm::utility::graph::performProb01Max(transitionMatrix, transitionMatrix.getRowGroupIndices(), backwardTransitions, phiStates, psiStates); |
|||
} |
|||
storm::storage::BitVector statesWithProbability0 = std::move(statesWithProbability01.first); |
|||
storm::storage::BitVector statesWithProbability1 = std::move(statesWithProbability01.second); |
|||
storm::storage::BitVector maybeStates = ~(statesWithProbability0 | statesWithProbability1); |
|||
LOG4CPLUS_INFO(logger, "Found " << statesWithProbability0.getNumberOfSetBits() << " 'no' states."); |
|||
LOG4CPLUS_INFO(logger, "Found " << statesWithProbability1.getNumberOfSetBits() << " 'yes' states."); |
|||
LOG4CPLUS_INFO(logger, "Found " << maybeStates.getNumberOfSetBits() << " 'maybe' states."); |
|||
|
|||
// Create resulting vector. |
|||
std::vector<Type> result(numberOfStates); |
|||
|
|||
// Check whether we need to compute exact probabilities for some states. |
|||
if (initialStates.isDisjointFrom(maybeStates) || qualitative) { |
|||
if (qualitative) { |
|||
LOG4CPLUS_INFO(logger, "The formula was checked qualitatively. No exact probabilities were computed."); |
|||
} else { |
|||
LOG4CPLUS_INFO(logger, "The probabilities for the initial states were determined in a preprocessing step." |
|||
<< " No exact probabilities were computed."); |
|||
} |
|||
// Set the values for all maybe-states to 0.5 to indicate that their probability values |
|||
// are neither 0 nor 1. |
|||
storm::utility::vector::setVectorValues<Type>(result, maybeStates, Type(0.5)); |
|||
} else { |
|||
// In this case we have have to compute the probabilities. |
|||
|
|||
// First, we can eliminate the rows and columns from the original transition probability matrix for states |
|||
// whose probabilities are already known. |
|||
storm::storage::SparseMatrix<Type> submatrix = transitionMatrix.getSubmatrix(true, maybeStates, maybeStates, false); |
|||
|
|||
// Prepare the right-hand side of the equation system. For entry i this corresponds to |
|||
// the accumulated probability of going from state i to some 'yes' state. |
|||
std::vector<Type> b = transitionMatrix.getConstrainedRowGroupSumVector(maybeStates, statesWithProbability1); |
|||
|
|||
// Create vector for results for maybe states. |
|||
std::vector<Type> x(maybeStates.getNumberOfSetBits()); |
|||
|
|||
// Solve the corresponding system of equations. |
|||
nondeterministicLinearEquationSolver->solveEquationSystem(minimize, submatrix, x, b); |
|||
|
|||
// Set values of resulting vector according to result. |
|||
storm::utility::vector::setVectorValues<Type>(result, maybeStates, x); |
|||
} |
|||
|
|||
// Set values of resulting vector that are known exactly. |
|||
storm::utility::vector::setVectorValues<Type>(result, statesWithProbability0, storm::utility::constantZero<Type>()); |
|||
storm::utility::vector::setVectorValues<Type>(result, statesWithProbability1, storm::utility::constantOne<Type>()); |
|||
|
|||
// Finally, compute a scheduler that achieves the extramal value. |
|||
storm::storage::TotalScheduler scheduler = computeExtremalScheduler(minimize, transitionMatrix, result); |
|||
|
|||
return std::make_pair(result, scheduler); |
|||
} |
|||
|
|||
std::pair<std::vector<Type>, storm::storage::TotalScheduler> checkUntil(bool minimize, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, bool qualitative) const { |
|||
return computeUnboundedUntilProbabilities(minimize, this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), this->getModel().getInitialStates(), phiStates, psiStates, this->nondeterministicLinearEquationSolver, qualitative); |
|||
} |
|||
|
|||
/*! |
|||
* Checks the given formula that is an instantaneous reward formula. |
|||
* |
|||
* @param formula The formula to check. |
|||
* @param qualitative A flag indicating whether the formula only needs to be evaluated qualitatively, i.e. if the |
|||
* results are only compared against the bound 0. If set to true, this will most likely results that are only |
|||
* qualitatively correct, i.e. do not represent the correct value, but only the correct relation with respect to the |
|||
* bound 0. |
|||
* @return The reward values for the given formula for every state of the model associated with this model |
|||
* checker. If the qualitative flag is set, exact values might not be computed. |
|||
*/ |
|||
virtual std::vector<Type> checkInstantaneousReward(const storm::properties::prctl::InstantaneousReward<Type>& formula, bool qualitative) const { |
|||
// Only compute the result if the model has a state-based reward model. |
|||
if (!this->getModel().hasStateRewards()) { |
|||
LOG4CPLUS_ERROR(logger, "Missing (state-based) reward model for formula."); |
|||
throw storm::exceptions::InvalidPropertyException() << "Missing (state-based) reward model for formula."; |
|||
} |
|||
|
|||
// Now test whether it is specified if the minimum or maximum probabilities are to be computed. |
|||
if(this->minimumOperatorStack.empty()) { |
|||
LOG4CPLUS_ERROR(logger, "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."); |
|||
throw storm::exceptions::InvalidArgumentException() << "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."; |
|||
} |
|||
|
|||
// Initialize result to state rewards of the model. |
|||
std::vector<Type> result(this->getModel().getStateRewardVector()); |
|||
|
|||
this->nondeterministicLinearEquationSolver->performMatrixVectorMultiplication(this->minimumOperatorStack.top(), this->getModel().getTransitionMatrix(), result, nullptr, formula.getBound()); |
|||
|
|||
return result; |
|||
} |
|||
|
|||
/*! |
|||
* Check the given formula that is a cumulative reward formula. |
|||
* |
|||
* @param formula The formula to check. |
|||
* @param qualitative A flag indicating whether the formula only needs to be evaluated qualitatively, i.e. if the |
|||
* results are only compared against the bound 0. If set to true, this will most likely results that are only |
|||
* qualitatively correct, i.e. do not represent the correct value, but only the correct relation with respect to the |
|||
* bound 0. |
|||
* @return The reward values for the given formula for every state of the model associated with this model |
|||
* checker. If the qualitative flag is set, exact values might not be computed. |
|||
*/ |
|||
virtual std::vector<Type> checkCumulativeReward(const storm::properties::prctl::CumulativeReward<Type>& formula, bool qualitative) const { |
|||
// Only compute the result if the model has at least one reward model. |
|||
if (!this->getModel().hasStateRewards() && !this->getModel().hasTransitionRewards()) { |
|||
LOG4CPLUS_ERROR(logger, "Missing reward model for formula."); |
|||
throw storm::exceptions::InvalidPropertyException() << "Missing reward model for formula."; |
|||
} |
|||
|
|||
// Now test whether it is specified if the minimum or maximum probabilities are to be computed. |
|||
if(this->minimumOperatorStack.empty()) { |
|||
LOG4CPLUS_ERROR(logger, "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."); |
|||
throw storm::exceptions::InvalidArgumentException() << "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."; |
|||
} |
|||
|
|||
// Compute the reward vector to add in each step based on the available reward models. |
|||
std::vector<Type> totalRewardVector; |
|||
if (this->getModel().hasTransitionRewards()) { |
|||
totalRewardVector = this->getModel().getTransitionMatrix().getPointwiseProductRowSumVector(this->getModel().getTransitionRewardMatrix()); |
|||
if (this->getModel().hasStateRewards()) { |
|||
storm::utility::vector::addVectorsInPlace(totalRewardVector, this->getModel().getStateRewardVector()); |
|||
} |
|||
} else { |
|||
totalRewardVector = std::vector<Type>(this->getModel().getStateRewardVector()); |
|||
} |
|||
|
|||
// Initialize result to either the state rewards of the model or the null vector. |
|||
std::vector<Type> result; |
|||
if (this->getModel().hasStateRewards()) { |
|||
result = std::vector<Type>(this->getModel().getStateRewardVector()); |
|||
} else { |
|||
result.resize(this->getModel().getNumberOfStates()); |
|||
} |
|||
|
|||
this->nondeterministicLinearEquationSolver->performMatrixVectorMultiplication(this->minimumOperatorStack.top(), this->getModel().getTransitionMatrix(), result, &totalRewardVector, static_cast<uint_fast64_t>(formula.getBound())); |
|||
|
|||
return result; |
|||
} |
|||
|
|||
/*! |
|||
* Checks the given formula that is a reachability reward formula. |
|||
* |
|||
* @param formula The formula to check. |
|||
* @param qualitative A flag indicating whether the formula only needs to be evaluated qualitatively, i.e. if the |
|||
* results are only compared against the bound 0. If set to true, this will most likely results that are only |
|||
* qualitatively correct, i.e. do not represent the correct value, but only the correct relation with respect to the |
|||
* bound 0. |
|||
* @return The reward values for the given formula for every state of the model associated with this model |
|||
* checker. If the qualitative flag is set, exact values might not be computed. |
|||
*/ |
|||
virtual std::vector<Type> checkReachabilityReward(const storm::properties::prctl::ReachabilityReward<Type>& formula, bool qualitative) const { |
|||
// First test whether it is specified if the minimum or maximum probabilities are to be computed. |
|||
if(this->minimumOperatorStack.empty()) { |
|||
LOG4CPLUS_ERROR(logger, "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."); |
|||
throw storm::exceptions::InvalidArgumentException() << "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."; |
|||
} |
|||
|
|||
return this->checkReachabilityReward(this->minimumOperatorStack.top(), formula.getChild()->check(*this), qualitative).first; |
|||
} |
|||
|
|||
/*! |
|||
* Computes the expected reachability reward that is gained before a target state is reached for each state. |
|||
* |
|||
* @param minimize If set, the reward is to be minimized and maximized otherwise. |
|||
* @param targetStates The target states before which rewards can be gained. |
|||
* @param qualitative A flag indicating whether the formula only needs to be evaluated qualitatively, i.e. if the |
|||
* results are only compared against the bound 0. If set to true, this will most likely results that are only |
|||
* qualitatively correct, i.e. do not represent the correct value, but only the correct relation with respect to the |
|||
* bound 0. |
|||
* @param scheduler If <code>qualitative</code> is false and this vector is non-null and has as many elements as |
|||
* there are states in the MDP, this vector will represent a scheduler for the model that achieves the probability |
|||
* returned by model checking. To this end, the vector will hold the nondeterministic choice made for each state. |
|||
* @return The expected reward values gained before a target state is reached for each state. If the |
|||
* qualitative flag is set, exact values might not be computed. |
|||
*/ |
|||
virtual std::pair<std::vector<Type>, storm::storage::TotalScheduler> checkReachabilityReward(bool minimize, storm::storage::BitVector const& targetStates, bool qualitative) const { |
|||
// Only compute the result if the model has at least one reward model. |
|||
if (!(this->getModel().hasStateRewards() || this->getModel().hasTransitionRewards())) { |
|||
LOG4CPLUS_ERROR(logger, "Missing reward model for formula."); |
|||
throw storm::exceptions::InvalidPropertyException() << "Missing reward model for formula."; |
|||
} |
|||
|
|||
// Also test whether it is specified if the minimum or maximum probabilities are to be computed. |
|||
if(this->minimumOperatorStack.empty()) { |
|||
LOG4CPLUS_ERROR(logger, "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."); |
|||
throw storm::exceptions::InvalidArgumentException() << "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."; |
|||
} |
|||
|
|||
// Determine which states have a reward of infinity by definition. |
|||
storm::storage::BitVector infinityStates; |
|||
storm::storage::BitVector trueStates(this->getModel().getNumberOfStates(), true); |
|||
if (minimize) { |
|||
infinityStates = std::move(storm::utility::graph::performProb1A(this->getModel().getTransitionMatrix(), this->getModel().getTransitionMatrix().getRowGroupIndices(), this->getModel().getBackwardTransitions(), trueStates, targetStates)); |
|||
} else { |
|||
infinityStates = std::move(storm::utility::graph::performProb1E(this->getModel().getTransitionMatrix(), this->getModel().getTransitionMatrix().getRowGroupIndices(), this->getModel().getBackwardTransitions(), trueStates, targetStates)); |
|||
} |
|||
infinityStates.complement(); |
|||
storm::storage::BitVector maybeStates = ~targetStates & ~infinityStates; |
|||
LOG4CPLUS_INFO(logger, "Found " << infinityStates.getNumberOfSetBits() << " 'infinity' states."); |
|||
LOG4CPLUS_INFO(logger, "Found " << targetStates.getNumberOfSetBits() << " 'target' states."); |
|||
LOG4CPLUS_INFO(logger, "Found " << maybeStates.getNumberOfSetBits() << " 'maybe' states."); |
|||
|
|||
// Create resulting vector. |
|||
std::vector<Type> result(this->getModel().getNumberOfStates()); |
|||
|
|||
// Check whether we need to compute exact rewards for some states. |
|||
if (this->getModel().getInitialStates().isDisjointFrom(maybeStates)) { |
|||
LOG4CPLUS_INFO(logger, "The rewards for the initial states were determined in a preprocessing step." |
|||
<< " No exact rewards were computed."); |
|||
// Set the values for all maybe-states to 1 to indicate that their reward values |
|||
// are neither 0 nor infinity. |
|||
storm::utility::vector::setVectorValues<Type>(result, maybeStates, storm::utility::constantOne<Type>()); |
|||
} else { |
|||
// In this case we have to compute the reward values for the remaining states. |
|||
|
|||
// We can eliminate the rows and columns from the original transition probability matrix for states |
|||
// whose reward values are already known. |
|||
storm::storage::SparseMatrix<Type> submatrix = this->getModel().getTransitionMatrix().getSubmatrix(true, maybeStates, maybeStates, false); |
|||
|
|||
// Prepare the right-hand side of the equation system. For entry i this corresponds to |
|||
// the accumulated probability of going from state i to some 'yes' state. |
|||
std::vector<Type> b(submatrix.getRowCount()); |
|||
|
|||
if (this->getModel().hasTransitionRewards()) { |
|||
// If a transition-based reward model is available, we initialize the right-hand |
|||
// side to the vector resulting from summing the rows of the pointwise product |
|||
// of the transition probability matrix and the transition reward matrix. |
|||
std::vector<Type> pointwiseProductRowSumVector = this->getModel().getTransitionMatrix().getPointwiseProductRowSumVector(this->getModel().getTransitionRewardMatrix()); |
|||
storm::utility::vector::selectVectorValues(b, maybeStates, this->getModel().getTransitionMatrix().getRowGroupIndices(), pointwiseProductRowSumVector); |
|||
|
|||
if (this->getModel().hasStateRewards()) { |
|||
// If a state-based reward model is also available, we need to add this vector |
|||
// as well. As the state reward vector contains entries not just for the states |
|||
// that we still consider (i.e. maybeStates), we need to extract these values |
|||
// first. |
|||
std::vector<Type> subStateRewards(b.size()); |
|||
storm::utility::vector::selectVectorValuesRepeatedly(subStateRewards, maybeStates, this->getModel().getTransitionMatrix().getRowGroupIndices(), this->getModel().getStateRewardVector()); |
|||
storm::utility::vector::addVectorsInPlace(b, subStateRewards); |
|||
} |
|||
} else { |
|||
// If only a state-based reward model is available, we take this vector as the |
|||
// right-hand side. As the state reward vector contains entries not just for the |
|||
// states that we still consider (i.e. maybeStates), we need to extract these values |
|||
// first. |
|||
storm::utility::vector::selectVectorValuesRepeatedly(b, maybeStates, this->getModel().getTransitionMatrix().getRowGroupIndices(), this->getModel().getStateRewardVector()); |
|||
} |
|||
|
|||
// Create vector for results for maybe states. |
|||
std::vector<Type> x(maybeStates.getNumberOfSetBits()); |
|||
|
|||
// Solve the corresponding system of equations. |
|||
this->nondeterministicLinearEquationSolver->solveEquationSystem(minimize, submatrix, x, b); |
|||
|
|||
// Set values of resulting vector according to result. |
|||
storm::utility::vector::setVectorValues<Type>(result, maybeStates, x); |
|||
} |
|||
|
|||
// Set values of resulting vector that are known exactly. |
|||
storm::utility::vector::setVectorValues(result, targetStates, storm::utility::constantZero<Type>()); |
|||
storm::utility::vector::setVectorValues(result, infinityStates, storm::utility::constantInfinity<Type>()); |
|||
|
|||
// Finally, compute a scheduler that achieves the extramal value. |
|||
storm::storage::TotalScheduler scheduler = computeExtremalScheduler(this->minimumOperatorStack.top(), this->getModel().getTransitionMatrix(), result, this->getModel().hasStateRewards() ? &this->getModel().getStateRewardVector() : nullptr, this->getModel().hasTransitionRewards() ? &this->getModel().getTransitionRewardMatrix() : nullptr); |
|||
|
|||
return std::make_pair(result, scheduler); |
|||
} |
|||
|
|||
protected: |
|||
/*! |
|||
* Computes the vector of choices that need to be made to minimize/maximize the model checking result for each state. |
|||
* |
|||
* @param minimize If set, all choices are resolved such that the solution value becomes minimal and maximal otherwise. |
|||
* @param nondeterministicResult The model checking result for nondeterministic choices of all states. |
|||
* @param takenChoices The output vector that is to store the taken choices. |
|||
*/ |
|||
static storm::storage::TotalScheduler computeExtremalScheduler(bool minimize, storm::storage::SparseMatrix<Type> const& transitionMatrix, std::vector<Type> const& result, std::vector<Type> const* stateRewardVector = nullptr, storm::storage::SparseMatrix<Type> const* transitionRewardMatrix = nullptr) { |
|||
std::vector<Type> temporaryResult(result.size()); |
|||
std::vector<Type> nondeterministicResult(transitionMatrix.getRowCount()); |
|||
transitionMatrix.multiplyWithVector(result, nondeterministicResult); |
|||
|
|||
if (stateRewardVector != nullptr || transitionRewardMatrix != nullptr) { |
|||
std::vector<Type> totalRewardVector; |
|||
if (transitionRewardMatrix != nullptr) { |
|||
totalRewardVector = transitionMatrix.getPointwiseProductRowSumVector(*transitionRewardMatrix); |
|||
if (stateRewardVector != nullptr) { |
|||
std::vector<Type> stateRewards(totalRewardVector.size()); |
|||
storm::utility::vector::selectVectorValuesRepeatedly(stateRewards, storm::storage::BitVector(stateRewardVector->size(), true), transitionMatrix.getRowGroupIndices(), *stateRewardVector); |
|||
storm::utility::vector::addVectorsInPlace(totalRewardVector, stateRewards); |
|||
} |
|||
} else { |
|||
totalRewardVector.resize(nondeterministicResult.size()); |
|||
storm::utility::vector::selectVectorValuesRepeatedly(totalRewardVector, storm::storage::BitVector(stateRewardVector->size(), true), transitionMatrix.getRowGroupIndices(), *stateRewardVector); |
|||
} |
|||
storm::utility::vector::addVectorsInPlace(nondeterministicResult, totalRewardVector); |
|||
} |
|||
|
|||
std::vector<uint_fast64_t> choices(result.size()); |
|||
|
|||
if (minimize) { |
|||
storm::utility::vector::reduceVectorMin(nondeterministicResult, temporaryResult, transitionMatrix.getRowGroupIndices(), &choices); |
|||
} else { |
|||
storm::utility::vector::reduceVectorMax(nondeterministicResult, temporaryResult, transitionMatrix.getRowGroupIndices(), &choices); |
|||
} |
|||
|
|||
return storm::storage::TotalScheduler(choices); |
|||
} |
|||
|
|||
/*! |
|||
* A solver that is used for solving systems of linear equations that are the result of nondeterministic choices. |
|||
*/ |
|||
std::shared_ptr<storm::solver::NondeterministicLinearEquationSolver<Type>> nondeterministicLinearEquationSolver; |
|||
}; |
|||
} // namespace prctl |
|||
// The implemented methods of the AbstractModelChecker interface. |
|||
virtual bool canHandle(storm::logic::Formula const& formula) const override; |
|||
virtual std::unique_ptr<CheckResult> computeBoundedUntilProbabilities(storm::logic::BoundedUntilFormula const& pathFormula, bool qualitative = false, boost::optional<storm::logic::OptimalityType> const& optimalityType = boost::optional<storm::logic::OptimalityType>()) override; |
|||
virtual std::unique_ptr<CheckResult> computeNextProbabilities(storm::logic::NextFormula const& pathFormula, bool qualitative = false, boost::optional<storm::logic::OptimalityType> const& optimalityType = boost::optional<storm::logic::OptimalityType>()) override; |
|||
virtual std::unique_ptr<CheckResult> computeUntilProbabilities(storm::logic::UntilFormula const& pathFormula, bool qualitative = false, boost::optional<storm::logic::OptimalityType> const& optimalityType = boost::optional<storm::logic::OptimalityType>()) override; |
|||
virtual std::unique_ptr<CheckResult> computeCumulativeRewards(storm::logic::CumulativeRewardFormula const& rewardPathFormula, bool qualitative = false, boost::optional<storm::logic::OptimalityType> const& optimalityType = boost::optional<storm::logic::OptimalityType>()) override; |
|||
virtual std::unique_ptr<CheckResult> computeInstantaneousRewards(storm::logic::InstantaneousRewardFormula const& rewardPathFormula, bool qualitative = false, boost::optional<storm::logic::OptimalityType> const& optimalityType = boost::optional<storm::logic::OptimalityType>()) override; |
|||
virtual std::unique_ptr<CheckResult> computeReachabilityRewards(storm::logic::ReachabilityRewardFormula const& rewardPathFormula, bool qualitative = false, boost::optional<storm::logic::OptimalityType> const& optimalityType = boost::optional<storm::logic::OptimalityType>()) override; |
|||
virtual std::unique_ptr<CheckResult> checkBooleanLiteralFormula(storm::logic::BooleanLiteralFormula const& stateFormula) override; |
|||
virtual std::unique_ptr<CheckResult> checkAtomicLabelFormula(storm::logic::AtomicLabelFormula const& stateFormula) override; |
|||
|
|||
// The methods that perform the actual checking. |
|||
std::vector<ValueType> computeBoundedUntilProbabilitiesHelper(bool minimize, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, uint_fast64_t stepBound); |
|||
std::vector<ValueType> computeNextProbabilitiesHelper(bool minimize, storm::storage::BitVector const& nextStates); |
|||
std::vector<ValueType> computeUntilProbabilitiesHelper(bool minimize, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, bool qualitative) const; |
|||
static std::vector<ValueType> computeUntilProbabilitiesHelper(bool minimize, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, std::shared_ptr<storm::solver::NondeterministicLinearEquationSolver<ValueType>> nondeterministicLinearEquationSolver, bool qualitative); |
|||
std::vector<ValueType> computeInstantaneousRewardsHelper(bool minimize, uint_fast64_t stepCount) const; |
|||
std::vector<ValueType> computeCumulativeRewardsHelper(bool minimize, uint_fast64_t stepBound) const; |
|||
std::vector<ValueType> computeReachabilityRewardsHelper(bool minimize, storm::storage::BitVector const& targetStates, bool qualitative) const; |
|||
|
|||
protected: |
|||
// The model this model checker is supposed to analyze. |
|||
storm::models::Mdp<ValueType> const& model; |
|||
|
|||
// A solver that is used for solving systems of linear equations that are the result of nondeterministic choices. |
|||
std::shared_ptr<storm::solver::NondeterministicLinearEquationSolver<ValueType>> nondeterministicLinearEquationSolver; |
|||
}; |
|||
} // namespace modelchecker |
|||
} // namespace storm |
|||
|
|||
#endif /* STORM_MODELCHECKER_PRCTL_SPARSEMDPPRCTLMODELCHECKER_H_ */ |
|||
#endif /* STORM_MODELCHECKER_SPARSEMDPPRCTLMODELCHECKER_H_ */ |
@ -1,148 +1,196 @@ |
|||
//#include "gtest/gtest.h"
|
|||
//#include "storm-config.h"
|
|||
//
|
|||
//#include "src/solver/NativeNondeterministicLinearEquationSolver.h"
|
|||
//#include "src/settings/SettingsManager.h"
|
|||
//#include "src/modelchecker/prctl/SparseMdpPrctlModelChecker.h"
|
|||
//#include "src/parser/AutoParser.h"
|
|||
//
|
|||
//TEST(SparseMdpPrctlModelCheckerTest, Dice) {
|
|||
// std::shared_ptr<storm::models::AbstractModel<double>> abstractModel = storm::parser::AutoParser::parseModel(STORM_CPP_BASE_PATH "/examples/mdp/two_dice/two_dice.tra", STORM_CPP_BASE_PATH "/examples/mdp/two_dice/two_dice.lab", "", STORM_CPP_BASE_PATH "/examples/mdp/two_dice/two_dice.flip.trans.rew");
|
|||
//
|
|||
// ASSERT_EQ(abstractModel->getType(), storm::models::MDP);
|
|||
//
|
|||
// std::shared_ptr<storm::models::Mdp<double>> mdp = abstractModel->as<storm::models::Mdp<double>>();
|
|||
//
|
|||
// ASSERT_EQ(mdp->getNumberOfStates(), 169ull);
|
|||
// ASSERT_EQ(mdp->getNumberOfTransitions(), 436ull);
|
|||
//
|
|||
// storm::modelchecker::prctl::SparseMdpPrctlModelChecker<double> mc(*mdp, std::shared_ptr<storm::solver::NativeNondeterministicLinearEquationSolver<double>>(new storm::solver::NativeNondeterministicLinearEquationSolver<double>()));
|
|||
//
|
|||
// auto apFormula = std::make_shared<storm::properties::prctl::Ap<double>>("two");
|
|||
// auto eventuallyFormula = std::make_shared<storm::properties::prctl::Eventually<double>>(apFormula);
|
|||
//
|
|||
// std::vector<double> result = mc.checkOptimizingOperator(*eventuallyFormula, true);
|
|||
//
|
|||
// ASSERT_LT(std::abs(result[0] - 0.0277777612209320068), storm::settings::nativeEquationSolverSettings().getPrecision());
|
|||
//
|
|||
// result = mc.checkOptimizingOperator(*eventuallyFormula, false);
|
|||
//
|
|||
// ASSERT_LT(std::abs(result[0] - 0.0277777612209320068), storm::settings::nativeEquationSolverSettings().getPrecision());
|
|||
//
|
|||
// apFormula = std::make_shared<storm::properties::prctl::Ap<double>>("three");
|
|||
// eventuallyFormula = std::make_shared<storm::properties::prctl::Eventually<double>>(apFormula);
|
|||
//
|
|||
// result = mc.checkOptimizingOperator(*eventuallyFormula, true);
|
|||
//
|
|||
// ASSERT_LT(std::abs(result[0] - 0.0555555224418640136), storm::settings::nativeEquationSolverSettings().getPrecision());
|
|||
//
|
|||
// result = mc.checkOptimizingOperator(*eventuallyFormula, false);
|
|||
//
|
|||
// ASSERT_LT(std::abs(result[0] - 0.0555555224418640136), storm::settings::nativeEquationSolverSettings().getPrecision());
|
|||
//
|
|||
// apFormula = std::make_shared<storm::properties::prctl::Ap<double>>("four");
|
|||
// eventuallyFormula = std::make_shared<storm::properties::prctl::Eventually<double>>(apFormula);
|
|||
//
|
|||
// result = mc.checkOptimizingOperator(*eventuallyFormula, true);
|
|||
//
|
|||
// ASSERT_LT(std::abs(result[0] - 0.083333283662796020508), storm::settings::nativeEquationSolverSettings().getPrecision());
|
|||
//
|
|||
// result = mc.checkOptimizingOperator(*eventuallyFormula, false);
|
|||
//
|
|||
// ASSERT_LT(std::abs(result[0] - 0.083333283662796020508), storm::settings::nativeEquationSolverSettings().getPrecision());
|
|||
//
|
|||
// apFormula = std::make_shared<storm::properties::prctl::Ap<double>>("done");
|
|||
// auto reachabilityRewardFormula = std::make_shared<storm::properties::prctl::ReachabilityReward<double>>(apFormula);
|
|||
//
|
|||
// result = mc.checkOptimizingOperator(*reachabilityRewardFormula, true);
|
|||
//
|
|||
// ASSERT_LT(std::abs(result[0] - 7.333329499), storm::settings::nativeEquationSolverSettings().getPrecision());
|
|||
//
|
|||
// result = mc.checkOptimizingOperator(*reachabilityRewardFormula, false);
|
|||
//
|
|||
// ASSERT_LT(std::abs(result[0] - 7.333329499), storm::settings::nativeEquationSolverSettings().getPrecision());
|
|||
//
|
|||
// abstractModel = storm::parser::AutoParser::parseModel(STORM_CPP_BASE_PATH "/examples/mdp/two_dice/two_dice.tra", STORM_CPP_BASE_PATH "/examples/mdp/two_dice/two_dice.lab", STORM_CPP_BASE_PATH "/examples/mdp/two_dice/two_dice.flip.state.rew", "");
|
|||
//
|
|||
// ASSERT_EQ(abstractModel->getType(), storm::models::MDP);
|
|||
//
|
|||
// std::shared_ptr<storm::models::Mdp<double>> stateRewardMdp = abstractModel->as<storm::models::Mdp<double>>();
|
|||
//
|
|||
// storm::modelchecker::prctl::SparseMdpPrctlModelChecker<double> stateRewardModelChecker(*stateRewardMdp, std::shared_ptr<storm::solver::NativeNondeterministicLinearEquationSolver<double>>(new storm::solver::NativeNondeterministicLinearEquationSolver<double>()));
|
|||
//
|
|||
// apFormula = std::make_shared<storm::properties::prctl::Ap<double>>("done");
|
|||
// reachabilityRewardFormula = std::make_shared<storm::properties::prctl::ReachabilityReward<double>>(apFormula);
|
|||
//
|
|||
// result = stateRewardModelChecker.checkOptimizingOperator(*reachabilityRewardFormula, true);
|
|||
//
|
|||
// ASSERT_LT(std::abs(result[0] - 7.333329499), storm::settings::nativeEquationSolverSettings().getPrecision());
|
|||
//
|
|||
// result = stateRewardModelChecker.checkOptimizingOperator(*reachabilityRewardFormula, false);
|
|||
//
|
|||
// ASSERT_LT(std::abs(result[0] - 7.333329499), storm::settings::nativeEquationSolverSettings().getPrecision());
|
|||
//
|
|||
// abstractModel = storm::parser::AutoParser::parseModel(STORM_CPP_BASE_PATH "/examples/mdp/two_dice/two_dice.tra", STORM_CPP_BASE_PATH "/examples/mdp/two_dice/two_dice.lab", STORM_CPP_BASE_PATH "/examples/mdp/two_dice/two_dice.flip.state.rew", STORM_CPP_BASE_PATH "/examples/mdp/two_dice/two_dice.flip.trans.rew");
|
|||
//
|
|||
// ASSERT_EQ(abstractModel->getType(), storm::models::MDP);
|
|||
//
|
|||
// std::shared_ptr<storm::models::Mdp<double>> stateAndTransitionRewardMdp = abstractModel->as<storm::models::Mdp<double>>();
|
|||
//
|
|||
// storm::modelchecker::prctl::SparseMdpPrctlModelChecker<double> stateAndTransitionRewardModelChecker(*stateAndTransitionRewardMdp, std::shared_ptr<storm::solver::NativeNondeterministicLinearEquationSolver<double>>(new storm::solver::NativeNondeterministicLinearEquationSolver<double>()));
|
|||
//
|
|||
// apFormula = std::make_shared<storm::properties::prctl::Ap<double>>("done");
|
|||
// reachabilityRewardFormula = std::make_shared<storm::properties::prctl::ReachabilityReward<double>>(apFormula);
|
|||
//
|
|||
// result = stateAndTransitionRewardModelChecker.checkOptimizingOperator(*reachabilityRewardFormula, true);
|
|||
//
|
|||
// ASSERT_LT(std::abs(result[0] - 14.666658998), storm::settings::nativeEquationSolverSettings().getPrecision());
|
|||
//
|
|||
// result = stateAndTransitionRewardModelChecker.checkOptimizingOperator(*reachabilityRewardFormula, false);
|
|||
//
|
|||
// ASSERT_LT(std::abs(result[0] - 14.666658998), storm::settings::nativeEquationSolverSettings().getPrecision());
|
|||
//}
|
|||
//
|
|||
//TEST(SparseMdpPrctlModelCheckerTest, AsynchronousLeader) {
|
|||
// std::shared_ptr<storm::models::AbstractModel<double>> abstractModel = storm::parser::AutoParser::parseModel(STORM_CPP_BASE_PATH "/examples/mdp/asynchronous_leader/leader4.tra", STORM_CPP_BASE_PATH "/examples/mdp/asynchronous_leader/leader4.lab", "", STORM_CPP_BASE_PATH "/examples/mdp/asynchronous_leader/leader4.trans.rew");
|
|||
//
|
|||
// ASSERT_EQ(storm::models::MDP, abstractModel->getType());
|
|||
//
|
|||
// std::shared_ptr<storm::models::Mdp<double>> mdp = abstractModel->as<storm::models::Mdp<double>>();
|
|||
//
|
|||
// ASSERT_EQ(3172ull, mdp->getNumberOfStates());
|
|||
// ASSERT_EQ(7144ull, mdp->getNumberOfTransitions());
|
|||
//
|
|||
// storm::modelchecker::prctl::SparseMdpPrctlModelChecker<double> mc(*mdp, std::shared_ptr<storm::solver::NativeNondeterministicLinearEquationSolver<double>>(new storm::solver::NativeNondeterministicLinearEquationSolver<double>()));
|
|||
//
|
|||
// auto apFormula = std::make_shared<storm::properties::prctl::Ap<double>>("elected");
|
|||
// auto eventuallyFormula = std::make_shared<storm::properties::prctl::Eventually<double>>(apFormula);
|
|||
//
|
|||
// std::vector<double> result = mc.checkOptimizingOperator(*eventuallyFormula, true);
|
|||
//
|
|||
// ASSERT_LT(std::abs(result[0] - 1), storm::settings::nativeEquationSolverSettings().getPrecision());
|
|||
//
|
|||
// result = mc.checkOptimizingOperator(*eventuallyFormula, false);
|
|||
//
|
|||
// ASSERT_LT(std::abs(result[0] - 1), storm::settings::nativeEquationSolverSettings().getPrecision());
|
|||
//
|
|||
// apFormula = std::make_shared<storm::properties::prctl::Ap<double>>("elected");
|
|||
// auto boundedEventuallyFormula = std::make_shared<storm::properties::prctl::BoundedEventually<double>>(apFormula, 25);
|
|||
//
|
|||
// result = mc.checkOptimizingOperator(*boundedEventuallyFormula, true);
|
|||
//
|
|||
// ASSERT_LT(std::abs(result[0] - 0.0625), storm::settings::nativeEquationSolverSettings().getPrecision());
|
|||
//
|
|||
// result = mc.checkOptimizingOperator(*boundedEventuallyFormula, false);
|
|||
//
|
|||
// ASSERT_LT(std::abs(result[0] - 0.0625), storm::settings::nativeEquationSolverSettings().getPrecision());
|
|||
//
|
|||
// apFormula = std::make_shared<storm::properties::prctl::Ap<double>>("elected");
|
|||
// auto reachabilityRewardFormula = std::make_shared<storm::properties::prctl::ReachabilityReward<double>>(apFormula);
|
|||
//
|
|||
// result = mc.checkOptimizingOperator(*reachabilityRewardFormula, true);
|
|||
//
|
|||
// ASSERT_LT(std::abs(result[0] - 4.285689611), storm::settings::nativeEquationSolverSettings().getPrecision());
|
|||
//
|
|||
// result = mc.checkOptimizingOperator(*reachabilityRewardFormula, false);
|
|||
//
|
|||
// ASSERT_LT(std::abs(result[0] - 4.285689611), storm::settings::nativeEquationSolverSettings().getPrecision());
|
|||
//}
|
|||
#include "gtest/gtest.h"
|
|||
#include "storm-config.h"
|
|||
|
|||
#include "src/logic/Formulas.h"
|
|||
#include "src/solver/NativeNondeterministicLinearEquationSolver.h"
|
|||
#include "src/modelchecker/prctl/SparseMdpPrctlModelChecker.h"
|
|||
#include "src/modelchecker/ExplicitQuantitativeCheckResult.h"
|
|||
#include "src/settings/SettingsManager.h"
|
|||
#include "src/parser/AutoParser.h"
|
|||
|
|||
TEST(SparseMdpPrctlModelCheckerTest, Dice) { |
|||
std::shared_ptr<storm::models::AbstractModel<double>> abstractModel = storm::parser::AutoParser::parseModel(STORM_CPP_BASE_PATH "/examples/mdp/two_dice/two_dice.tra", STORM_CPP_BASE_PATH "/examples/mdp/two_dice/two_dice.lab", "", STORM_CPP_BASE_PATH "/examples/mdp/two_dice/two_dice.flip.trans.rew"); |
|||
|
|||
ASSERT_EQ(abstractModel->getType(), storm::models::MDP); |
|||
|
|||
std::shared_ptr<storm::models::Mdp<double>> mdp = abstractModel->as<storm::models::Mdp<double>>(); |
|||
|
|||
ASSERT_EQ(mdp->getNumberOfStates(), 169ull); |
|||
ASSERT_EQ(mdp->getNumberOfTransitions(), 436ull); |
|||
|
|||
storm::modelchecker::SparseMdpPrctlModelChecker<double> checker(*mdp, std::shared_ptr<storm::solver::NativeNondeterministicLinearEquationSolver<double>>(new storm::solver::NativeNondeterministicLinearEquationSolver<double>())); |
|||
|
|||
auto labelFormula = std::make_shared<storm::logic::AtomicLabelFormula>("two"); |
|||
auto eventuallyFormula = std::make_shared<storm::logic::EventuallyFormula>(labelFormula); |
|||
auto minProbabilityOperatorFormula = std::make_shared<storm::logic::ProbabilityOperatorFormula>(storm::logic::OptimalityType::Minimize, eventuallyFormula); |
|||
|
|||
std::unique_ptr<storm::modelchecker::CheckResult> result = checker.check(*minProbabilityOperatorFormula); |
|||
storm::modelchecker::ExplicitQuantitativeCheckResult<double>& quantitativeResult1 = result->asExplicitQuantitativeCheckResult<double>(); |
|||
|
|||
EXPECT_NEAR(0.0277777612209320068, quantitativeResult1[0], storm::settings::nativeEquationSolverSettings().getPrecision()); |
|||
|
|||
auto maxProbabilityOperatorFormula = std::make_shared<storm::logic::ProbabilityOperatorFormula>(storm::logic::OptimalityType::Maximize, eventuallyFormula); |
|||
|
|||
result = checker.check(*maxProbabilityOperatorFormula); |
|||
storm::modelchecker::ExplicitQuantitativeCheckResult<double>& quantitativeResult2 = result->asExplicitQuantitativeCheckResult<double>(); |
|||
|
|||
EXPECT_NEAR(0.0277777612209320068, quantitativeResult2[0], storm::settings::nativeEquationSolverSettings().getPrecision()); |
|||
|
|||
labelFormula = std::make_shared<storm::logic::AtomicLabelFormula>("three"); |
|||
eventuallyFormula = std::make_shared<storm::logic::EventuallyFormula>(labelFormula); |
|||
minProbabilityOperatorFormula = std::make_shared<storm::logic::ProbabilityOperatorFormula>(storm::logic::OptimalityType::Minimize, eventuallyFormula); |
|||
|
|||
result = checker.check(*minProbabilityOperatorFormula); |
|||
storm::modelchecker::ExplicitQuantitativeCheckResult<double>& quantitativeResult3 = result->asExplicitQuantitativeCheckResult<double>(); |
|||
|
|||
EXPECT_NEAR(0.0555555224418640136, quantitativeResult3[0], storm::settings::nativeEquationSolverSettings().getPrecision()); |
|||
|
|||
maxProbabilityOperatorFormula = std::make_shared<storm::logic::ProbabilityOperatorFormula>(storm::logic::OptimalityType::Maximize, eventuallyFormula); |
|||
|
|||
result = checker.check(*maxProbabilityOperatorFormula); |
|||
storm::modelchecker::ExplicitQuantitativeCheckResult<double>& quantitativeResult4 = result->asExplicitQuantitativeCheckResult<double>(); |
|||
|
|||
EXPECT_NEAR(0.0555555224418640136, quantitativeResult4[0], storm::settings::nativeEquationSolverSettings().getPrecision()); |
|||
|
|||
labelFormula = std::make_shared<storm::logic::AtomicLabelFormula>("four"); |
|||
eventuallyFormula = std::make_shared<storm::logic::EventuallyFormula>(labelFormula); |
|||
minProbabilityOperatorFormula = std::make_shared<storm::logic::ProbabilityOperatorFormula>(storm::logic::OptimalityType::Minimize, eventuallyFormula); |
|||
|
|||
result = checker.check(*minProbabilityOperatorFormula); |
|||
storm::modelchecker::ExplicitQuantitativeCheckResult<double>& quantitativeResult5 = result->asExplicitQuantitativeCheckResult<double>(); |
|||
|
|||
EXPECT_NEAR(0.083333283662796020508, quantitativeResult5[0], storm::settings::nativeEquationSolverSettings().getPrecision()); |
|||
|
|||
maxProbabilityOperatorFormula = std::make_shared<storm::logic::ProbabilityOperatorFormula>(storm::logic::OptimalityType::Maximize, eventuallyFormula); |
|||
|
|||
result = checker.check(*maxProbabilityOperatorFormula); |
|||
storm::modelchecker::ExplicitQuantitativeCheckResult<double>& quantitativeResult6 = result->asExplicitQuantitativeCheckResult<double>(); |
|||
|
|||
EXPECT_NEAR(0.083333283662796020508, quantitativeResult6[0], storm::settings::nativeEquationSolverSettings().getPrecision()); |
|||
|
|||
labelFormula = std::make_shared<storm::logic::AtomicLabelFormula>("done"); |
|||
auto reachabilityRewardFormula = std::make_shared<storm::logic::ReachabilityRewardFormula>(labelFormula); |
|||
auto minRewardOperatorFormula = std::make_shared<storm::logic::RewardOperatorFormula>(storm::logic::OptimalityType::Minimize, reachabilityRewardFormula); |
|||
|
|||
result = checker.check(*minRewardOperatorFormula); |
|||
storm::modelchecker::ExplicitQuantitativeCheckResult<double>& quantitativeResult7 = result->asExplicitQuantitativeCheckResult<double>(); |
|||
|
|||
EXPECT_NEAR(7.333329499, quantitativeResult7[0], storm::settings::nativeEquationSolverSettings().getPrecision()); |
|||
|
|||
auto maxRewardOperatorFormula = std::make_shared<storm::logic::RewardOperatorFormula>(storm::logic::OptimalityType::Maximize, reachabilityRewardFormula); |
|||
|
|||
result = checker.check(*maxRewardOperatorFormula); |
|||
storm::modelchecker::ExplicitQuantitativeCheckResult<double>& quantitativeResult8 = result->asExplicitQuantitativeCheckResult<double>(); |
|||
|
|||
EXPECT_NEAR(7.333329499, quantitativeResult8[0], storm::settings::nativeEquationSolverSettings().getPrecision()); |
|||
|
|||
abstractModel = storm::parser::AutoParser::parseModel(STORM_CPP_BASE_PATH "/examples/mdp/two_dice/two_dice.tra", STORM_CPP_BASE_PATH "/examples/mdp/two_dice/two_dice.lab", STORM_CPP_BASE_PATH "/examples/mdp/two_dice/two_dice.flip.state.rew", ""); |
|||
|
|||
ASSERT_EQ(abstractModel->getType(), storm::models::MDP); |
|||
|
|||
std::shared_ptr<storm::models::Mdp<double>> stateRewardMdp = abstractModel->as<storm::models::Mdp<double>>(); |
|||
|
|||
storm::modelchecker::SparseMdpPrctlModelChecker<double> stateRewardModelChecker(*stateRewardMdp, std::shared_ptr<storm::solver::NativeNondeterministicLinearEquationSolver<double>>(new storm::solver::NativeNondeterministicLinearEquationSolver<double>())); |
|||
|
|||
labelFormula = std::make_shared<storm::logic::AtomicLabelFormula>("done"); |
|||
reachabilityRewardFormula = std::make_shared<storm::logic::ReachabilityRewardFormula>(labelFormula); |
|||
minRewardOperatorFormula = std::make_shared<storm::logic::RewardOperatorFormula>(storm::logic::OptimalityType::Minimize, reachabilityRewardFormula); |
|||
|
|||
result = stateRewardModelChecker.check(*minRewardOperatorFormula); |
|||
storm::modelchecker::ExplicitQuantitativeCheckResult<double>& quantitativeResult9 = result->asExplicitQuantitativeCheckResult<double>(); |
|||
|
|||
EXPECT_NEAR(7.333329499, quantitativeResult9[0], storm::settings::nativeEquationSolverSettings().getPrecision()); |
|||
|
|||
maxRewardOperatorFormula = std::make_shared<storm::logic::RewardOperatorFormula>(storm::logic::OptimalityType::Maximize, reachabilityRewardFormula); |
|||
|
|||
result = stateRewardModelChecker.check(*maxRewardOperatorFormula); |
|||
storm::modelchecker::ExplicitQuantitativeCheckResult<double>& quantitativeResult10 = result->asExplicitQuantitativeCheckResult<double>(); |
|||
|
|||
EXPECT_NEAR(7.333329499, quantitativeResult10[0], storm::settings::nativeEquationSolverSettings().getPrecision()); |
|||
|
|||
abstractModel = storm::parser::AutoParser::parseModel(STORM_CPP_BASE_PATH "/examples/mdp/two_dice/two_dice.tra", STORM_CPP_BASE_PATH "/examples/mdp/two_dice/two_dice.lab", STORM_CPP_BASE_PATH "/examples/mdp/two_dice/two_dice.flip.state.rew", STORM_CPP_BASE_PATH "/examples/mdp/two_dice/two_dice.flip.trans.rew"); |
|||
|
|||
ASSERT_EQ(abstractModel->getType(), storm::models::MDP); |
|||
|
|||
std::shared_ptr<storm::models::Mdp<double>> stateAndTransitionRewardMdp = abstractModel->as<storm::models::Mdp<double>>(); |
|||
|
|||
storm::modelchecker::SparseMdpPrctlModelChecker<double> stateAndTransitionRewardModelChecker(*stateAndTransitionRewardMdp, std::shared_ptr<storm::solver::NativeNondeterministicLinearEquationSolver<double>>(new storm::solver::NativeNondeterministicLinearEquationSolver<double>())); |
|||
|
|||
labelFormula = std::make_shared<storm::logic::AtomicLabelFormula>("done"); |
|||
reachabilityRewardFormula = std::make_shared<storm::logic::ReachabilityRewardFormula>(labelFormula); |
|||
minRewardOperatorFormula = std::make_shared<storm::logic::RewardOperatorFormula>(storm::logic::OptimalityType::Minimize, reachabilityRewardFormula); |
|||
|
|||
result = stateAndTransitionRewardModelChecker.check(*minRewardOperatorFormula); |
|||
storm::modelchecker::ExplicitQuantitativeCheckResult<double>& quantitativeResult11 = result->asExplicitQuantitativeCheckResult<double>(); |
|||
|
|||
EXPECT_NEAR(14.666658998, quantitativeResult11[0], storm::settings::nativeEquationSolverSettings().getPrecision()); |
|||
|
|||
maxRewardOperatorFormula = std::make_shared<storm::logic::RewardOperatorFormula>(storm::logic::OptimalityType::Maximize, reachabilityRewardFormula); |
|||
|
|||
result = stateAndTransitionRewardModelChecker.check(*maxRewardOperatorFormula); |
|||
storm::modelchecker::ExplicitQuantitativeCheckResult<double>& quantitativeResult12 = result->asExplicitQuantitativeCheckResult<double>(); |
|||
|
|||
EXPECT_NEAR(14.666658998, quantitativeResult12[0], storm::settings::nativeEquationSolverSettings().getPrecision()); |
|||
} |
|||
|
|||
TEST(SparseMdpPrctlModelCheckerTest, AsynchronousLeader) { |
|||
std::shared_ptr<storm::models::AbstractModel<double>> abstractModel = storm::parser::AutoParser::parseModel(STORM_CPP_BASE_PATH "/examples/mdp/asynchronous_leader/leader4.tra", STORM_CPP_BASE_PATH "/examples/mdp/asynchronous_leader/leader4.lab", "", STORM_CPP_BASE_PATH "/examples/mdp/asynchronous_leader/leader4.trans.rew"); |
|||
|
|||
ASSERT_EQ(storm::models::MDP, abstractModel->getType()); |
|||
|
|||
std::shared_ptr<storm::models::Mdp<double>> mdp = abstractModel->as<storm::models::Mdp<double>>(); |
|||
|
|||
ASSERT_EQ(3172ull, mdp->getNumberOfStates()); |
|||
ASSERT_EQ(7144ull, mdp->getNumberOfTransitions()); |
|||
|
|||
storm::modelchecker::SparseMdpPrctlModelChecker<double> checker(*mdp, std::shared_ptr<storm::solver::NativeNondeterministicLinearEquationSolver<double>>(new storm::solver::NativeNondeterministicLinearEquationSolver<double>())); |
|||
|
|||
auto labelFormula = std::make_shared<storm::logic::AtomicLabelFormula>("elected"); |
|||
auto eventuallyFormula = std::make_shared<storm::logic::EventuallyFormula>(labelFormula); |
|||
auto minProbabilityOperatorFormula = std::make_shared<storm::logic::ProbabilityOperatorFormula>(storm::logic::OptimalityType::Minimize, eventuallyFormula); |
|||
|
|||
std::unique_ptr<storm::modelchecker::CheckResult> result = checker.check(*minProbabilityOperatorFormula); |
|||
storm::modelchecker::ExplicitQuantitativeCheckResult<double>& quantitativeResult1 = result->asExplicitQuantitativeCheckResult<double>(); |
|||
|
|||
EXPECT_NEAR(1, quantitativeResult1[0], storm::settings::nativeEquationSolverSettings().getPrecision()); |
|||
|
|||
auto maxProbabilityOperatorFormula = std::make_shared<storm::logic::ProbabilityOperatorFormula>(storm::logic::OptimalityType::Maximize, eventuallyFormula); |
|||
|
|||
result = checker.check(*maxProbabilityOperatorFormula); |
|||
storm::modelchecker::ExplicitQuantitativeCheckResult<double>& quantitativeResult2 = result->asExplicitQuantitativeCheckResult<double>(); |
|||
|
|||
EXPECT_NEAR(1, quantitativeResult2[0], storm::settings::nativeEquationSolverSettings().getPrecision()); |
|||
|
|||
labelFormula = std::make_shared<storm::logic::AtomicLabelFormula>("elected"); |
|||
auto trueFormula = std::make_shared<storm::logic::BooleanLiteralFormula>(true); |
|||
auto boundedUntilFormula = std::make_shared<storm::logic::BoundedUntilFormula>(trueFormula, labelFormula, 25); |
|||
minProbabilityOperatorFormula = std::make_shared<storm::logic::ProbabilityOperatorFormula>(storm::logic::OptimalityType::Minimize, boundedUntilFormula); |
|||
|
|||
result = checker.check(*minProbabilityOperatorFormula); |
|||
storm::modelchecker::ExplicitQuantitativeCheckResult<double>& quantitativeResult3 = result->asExplicitQuantitativeCheckResult<double>(); |
|||
|
|||
EXPECT_NEAR(0.0625, quantitativeResult3[0], storm::settings::nativeEquationSolverSettings().getPrecision()); |
|||
|
|||
maxProbabilityOperatorFormula = std::make_shared<storm::logic::ProbabilityOperatorFormula>(storm::logic::OptimalityType::Maximize, boundedUntilFormula); |
|||
|
|||
result = checker.check(*maxProbabilityOperatorFormula); |
|||
storm::modelchecker::ExplicitQuantitativeCheckResult<double>& quantitativeResult4 = result->asExplicitQuantitativeCheckResult<double>(); |
|||
|
|||
EXPECT_NEAR(0.0625, quantitativeResult4[0], storm::settings::nativeEquationSolverSettings().getPrecision()); |
|||
|
|||
labelFormula = std::make_shared<storm::logic::AtomicLabelFormula>("elected"); |
|||
auto reachabilityRewardFormula = std::make_shared<storm::logic::ReachabilityRewardFormula>(labelFormula); |
|||
auto minRewardOperatorFormula = std::make_shared<storm::logic::RewardOperatorFormula>(storm::logic::OptimalityType::Minimize, reachabilityRewardFormula); |
|||
|
|||
result = checker.check(*minRewardOperatorFormula); |
|||
storm::modelchecker::ExplicitQuantitativeCheckResult<double>& quantitativeResult5 = result->asExplicitQuantitativeCheckResult<double>(); |
|||
|
|||
EXPECT_NEAR(4.285689611, quantitativeResult5[0], storm::settings::nativeEquationSolverSettings().getPrecision()); |
|||
|
|||
auto maxRewardOperatorFormula = std::make_shared<storm::logic::RewardOperatorFormula>(storm::logic::OptimalityType::Maximize, reachabilityRewardFormula); |
|||
|
|||
result = checker.check(*maxRewardOperatorFormula); |
|||
storm::modelchecker::ExplicitQuantitativeCheckResult<double>& quantitativeResult6 = result->asExplicitQuantitativeCheckResult<double>(); |
|||
|
|||
EXPECT_NEAR(4.285689611, quantitativeResult6[0], storm::settings::nativeEquationSolverSettings().getPrecision()); |
|||
} |
Reference in new issue
xxxxxxxxxx