@ -7,14 +7,17 @@
# include "storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.h"
# include "storm/modelchecker/propositional/SparsePropositionalModelChecker.h"
# include "storm/modelchecker/results/ExplicitQualitativeCheckResult.h"
# include "storm/modelchecker/prctl/helper/BaierUpperRewardBoundsComputer.h"
# include "storm/modelchecker/results/ExplicitQuantitativeCheckResult.h"
# include "storm/storage/BitVector.h"
# include "storm/storage/MaximalEndComponentDecomposition.h"
# include "storm/utility/graph.h"
# include "storm/utility/FilteredRewardModel.h"
# include "storm/utility/vector.h"
# include "storm/logic/Formulas.h"
# include "storm/logic/CloneVisitor.h"
# include "storm/environment/solver/MinMaxSolverEnvironment.h"
# include "storm/transformer/EndComponentEliminator.h"
# include "storm/exceptions/UnexpectedException.h"
@ -192,21 +195,53 @@ namespace storm {
}
}
template < typename ValueType >
std : : vector < ValueType > getTotalRewardVector ( storm : : models : : sparse : : MarkovAutomaton < ValueType > const & model , storm : : logic : : Formula const & formula ) {
boost : : optional < std : : string > rewardModelName = formula . asRewardOperatorFormula ( ) . getOptionalRewardModelName ( ) ;
typename storm : : models : : sparse : : MarkovAutomaton < ValueType > : : RewardModelType const & rewardModel = rewardModelName . is_initialized ( ) ? model . getRewardModel ( rewardModelName . get ( ) ) : model . getUniqueRewardModel ( ) ;
// Get a reward model where the state rewards are scaled accordingly
std : : vector < ValueType > stateRewardWeights ( model . getNumberOfStates ( ) , storm : : utility : : zero < ValueType > ( ) ) ;
for ( auto const markovianState : model . getMarkovianStates ( ) ) {
stateRewardWeights [ markovianState ] = storm : : utility : : one < ValueType > ( ) / model . getExitRate ( markovianState ) ;
}
return rewardModel . getTotalActionRewardVector ( model . getTransitionMatrix ( ) , stateRewardWeights ) ;
}
template < typename ValueType >
std : : vector < ValueType > getTotalRewardVector ( storm : : models : : sparse : : Mdp < ValueType > const & model , storm : : logic : : Formula const & formula ) {
boost : : optional < std : : string > rewardModelName = formula . asRewardOperatorFormula ( ) . getOptionalRewardModelName ( ) ;
typename storm : : models : : sparse : : Mdp < ValueType > : : RewardModelType const & rewardModel = rewardModelName . is_initialized ( ) ? model . getRewardModel ( rewardModelName . get ( ) ) : model . getUniqueRewardModel ( ) ;
return rewardModel . getTotalRewardVector ( model . getTransitionMatrix ( ) ) ;
}
template < typename ModelType >
typename ModelType : : ValueType const & DeterministicSchedsObjectiveHelper < ModelType > : : getUpperValueBoundAtState ( Environment const & env , uint64_t state ) const {
//return objective.upperResultBound.get();
if ( ! upperResultBounds ) {
upperResultBounds = computeValueBounds ( env , false , model , * objective . formula ) ;
storm : : utility : : vector : : clip ( upperResultBounds . get ( ) , objective . lowerResultBound , objective . upperResultBound ) ;
STORM_LOG_THROW ( ! storm : : utility : : vector : : hasInfinityEntry ( upperResultBounds . get ( ) ) , storm : : exceptions : : NotSupportedException , " The upper bound for objective " < < * objective . originalFormula < < " is infinity at some state. This is not supported. " ) ;
auto upperResultBound = objective . upperResultBound ;
if ( storm : : utility : : vector : : hasInfinityEntry ( upperResultBounds . get ( ) ) ) {
STORM_LOG_THROW ( objective . formula - > isRewardOperatorFormula ( ) , storm : : exceptions : : NotSupportedException , " The upper bound for objective " < < * objective . originalFormula < < " is infinity at some state. This is only supported for reachability rewards / total rewards. " ) ;
STORM_LOG_THROW ( objective . formula - > getSubformula ( ) . isTotalRewardFormula ( ) | | objective . formula - > getSubformula ( ) . isEventuallyFormula ( ) , storm : : exceptions : : NotSupportedException , " The upper bound for objective " < < * objective . originalFormula < < " is infinity at some state. This is only supported for reachability rewards / total rewards. " ) ;
auto rewards = getTotalRewardVector ( model , * objective . formula ) ;
auto zeroValuedStates = storm : : utility : : vector : : filterZero ( upperResultBounds . get ( ) ) ;
auto expVisits = computeUpperBoundOnExpectedVisitingTimes ( model . getTransitionMatrix ( ) , zeroValuedStates , ~ zeroValuedStates , true ) ;
ValueType upperBound = storm : : utility : : zero < ValueType > ( ) ;
for ( uint64_t state = 0 ; state < expVisits . size ( ) ; + + state ) {
ValueType maxReward = storm : : utility : : zero < ValueType > ( ) ;
for ( auto row = model . getTransitionMatrix ( ) . getRowGroupIndices ( ) [ state ] , endRow = model . getTransitionMatrix ( ) . getRowGroupIndices ( ) [ state + 1 ] ; row < endRow ; + + row ) {
maxReward = std : : max ( maxReward , rewards [ row ] ) ;
}
upperBound + = expVisits [ state ] * maxReward ;
}
}
storm : : utility : : vector : : clip ( upperResultBounds . get ( ) , objective . lowerResultBound , upperResultBound ) ;
}
return upperResultBounds . get ( ) [ state ] ;
}
template < typename ModelType >
typename ModelType : : ValueType const & DeterministicSchedsObjectiveHelper < ModelType > : : getLowerValueBoundAtState ( Environment const & env , uint64_t state ) const {
// return objective.lowerResultBound.get();
if ( ! lowerResultBounds ) {
lowerResultBounds = computeValueBounds ( env , true , model , * objective . formula ) ;
storm : : utility : : vector : : clip ( lowerResultBounds . get ( ) , objective . lowerResultBound , objective . upperResultBound ) ;
@ -224,6 +259,79 @@ namespace storm {
bool DeterministicSchedsObjectiveHelper < ModelType > : : isTotalRewardObjective ( ) const {
return objective . formula - > isRewardOperatorFormula ( ) & & objective . formula - > getSubformula ( ) . isTotalRewardFormula ( ) ;
}
template < typename ModelType >
std : : vector < typename ModelType : : ValueType > DeterministicSchedsObjectiveHelper < ModelType > : : computeUpperBoundOnExpectedVisitingTimes ( storm : : storage : : SparseMatrix < ValueType > const & modelTransitions , storm : : storage : : BitVector const & bottomStates , storm : : storage : : BitVector const & nonBottomStates , bool hasEndComponents ) {
storm : : storage : : SparseMatrix < ValueType > transitions ;
std : : vector < ValueType > probabilitiesToBottomStates ;
boost : : optional < std : : vector < uint64_t > > modelToSubsystemStateMapping ;
if ( hasEndComponents ) {
// We assume that end components will always be left (or form a sink state).
// The approach is to give a lower bound lpath on a path that leaves the end component.
// Then we use end component elimination and add a self loop on the 'ec' states with probability 1-lpath
storm : : storage : : MaximalEndComponentDecomposition < ValueType > mecs ( modelTransitions , modelTransitions . transpose ( true ) , nonBottomStates ) ;
auto mecElimination = storm : : transformer : : EndComponentEliminator < ValueType > : : transform ( modelTransitions , mecs , nonBottomStates , nonBottomStates , true ) ;
transitions = std : : move ( mecElimination . matrix ) ;
modelToSubsystemStateMapping = std : : move ( mecElimination . oldToNewStateMapping ) ;
probabilitiesToBottomStates . reserve ( transitions . getRowCount ( ) ) ;
for ( uint64_t row = 0 ; row < transitions . getRowCount ( ) ; + + row ) {
probabilitiesToBottomStates . push_back ( modelTransitions . getConstrainedRowSum ( mecElimination . newToOldRowMapping [ row ] , bottomStates ) ) ;
}
// replace 'selfloop probability' for mec states by 1-lpath
for ( auto const & mec : mecs ) {
ValueType lpath = storm : : utility : : one < ValueType > ( ) ;
for ( auto const & stateChoices : mec ) {
ValueType minProb = storm : : utility : : one < ValueType > ( ) ;
for ( auto const & choice : stateChoices . second ) {
for ( auto const & transition : modelTransitions . getRow ( choice ) ) {
if ( ! storm : : utility : : isZero ( transition . getValue ( ) ) ) {
minProb = std : : min ( minProb , transition . getValue ( ) ) ;
}
}
}
lpath * = minProb ;
}
STORM_LOG_ASSERT ( ! storm : : utility : : isZero ( lpath ) , " unexpected value of lpath " ) ;
uint64_t mecState = mecElimination . oldToNewStateMapping [ mec . begin ( ) - > first ] ;
bool foundEntry = false ;
for ( uint64_t mecChoice = transitions . getRowGroupIndices ( ) [ mecState ] ; mecChoice < transitions . getRowGroupIndices ( ) [ mecState + 1 ] ; + + mecChoice ) {
if ( transitions . getRow ( mecChoice ) . getNumberOfEntries ( ) = = 1 ) {
auto & entry = * transitions . getRow ( mecChoice ) . begin ( ) ;
if ( entry . getColumn ( ) = = mecState & & storm : : utility : : isOne ( entry . getValue ( ) ) ) {
entry . setValue ( storm : : utility : : one < ValueType > ( ) - lpath ) ;
foundEntry = true ;
probabilitiesToBottomStates [ mecChoice ] = lpath ;
break ;
}
}
}
STORM_LOG_THROW ( foundEntry , storm : : exceptions : : UnexpectedException , " Unable to find self loop entry at mec state. " ) ;
}
} else {
transitions = modelTransitions . getSubmatrix ( true , nonBottomStates , nonBottomStates ) ;
probabilitiesToBottomStates = modelTransitions . getConstrainedRowGroupSumVector ( nonBottomStates , bottomStates ) ;
}
auto subsystemBounds = storm : : modelchecker : : helper : : BaierUpperRewardBoundsComputer < ValueType > : : computeUpperBoundOnExpectedVisitingTimes ( transitions , probabilitiesToBottomStates ) ;
uint64_t subsystemState = 0 ;
std : : vector < ValueType > visitingTimesUpperBounds ;
visitingTimesUpperBounds . reserve ( bottomStates . size ( ) ) ;
for ( uint64_t state = 0 ; state < bottomStates . size ( ) ; + + state ) {
if ( bottomStates . get ( state ) ) {
visitingTimesUpperBounds . push_back ( storm : : utility : : zero < ValueType > ( ) ) ;
} else {
if ( modelToSubsystemStateMapping ) {
visitingTimesUpperBounds . push_back ( subsystemBounds [ modelToSubsystemStateMapping . get ( ) [ state ] ] ) ;
} else {
visitingTimesUpperBounds . push_back ( subsystemBounds [ subsystemState ] ) ;
}
+ + subsystemState ;
}
}
assert ( subsystemState = = subsystemBounds . size ( ) ) ;
}
template class DeterministicSchedsObjectiveHelper < storm : : models : : sparse : : Mdp < double > > ;
template class DeterministicSchedsObjectiveHelper < storm : : models : : sparse : : Mdp < storm : : RationalNumber > > ;