@ -15,7 +15,7 @@
# include "src/utility/graph.h"
# include "src/exceptions/InvalidPropertyException.h"
# include "src/exceptions/Unexpec tedException.h"
# include "src/exceptions/NotImplemen tedException.h"
namespace storm {
namespace modelchecker {
@ -24,241 +24,233 @@ namespace storm {
template < typename SparseMdpModelType >
SparseMultiObjectiveModelCheckerInformation < SparseMdpModelType > SparseMdpMultiObjectivePreprocessingHelper < SparseMdpModelType > : : preprocess ( storm : : logic : : MultiObjectiveFormula const & originalFormula , SparseMdpModelType originalModel ) {
Information info ( std : : move ( originalModel ) ) ;
//Initialize the state mapping.
info . setNewToOldStateIndexMapping ( storm : : utility : : vector : : buildVectorForRange ( 0 , info . getModel ( ) . getNumberOfStates ( ) ) ) ;
info . newToOldStateIndexMapping = storm : : utility : : vector : : buildVectorForRange ( 0 , info . model . getNumberOfStates ( ) ) ;
//Gather information regarding the individual objectives
if ( ! gatherObjectiveInformation ( originalFormula , info ) ) {
STORM_LOG_THROW ( false , storm : : exceptions : : InvalidPropertyException , " Could not gather information for objectives " < < originalFormula < < " . " ) ;
}
// Find out whether negative rewards should be considered.
if ( ! setWhetherNegativeRewardsAreConsidered ( info ) ) {
STORM_LOG_THROW ( false , storm : : exceptions : : InvalidPropertyException , " Could not find out whether to consider negative rewards " < < originalFormula < < " . " ) ;
for ( auto const & subFormula : originalFormula . getSubFormulas ( ) ) {
addObjective ( subFormula , info ) ;
}
// Find out whether negated rewards should be considered.
setWhetherNegatedRewardsAreConsidered ( info ) ;
//Invoke preprocessing on objectives
bool success = false ;
for ( auto & obj : info . getObjectives ( ) ) {
for ( auto & obj : info . objectives ) {
STORM_LOG_DEBUG ( " Preprocessing objective " < < * obj . originalFormula < < " . " ) ;
if ( obj . originalFormula - > isProbabilityOperatorFormula ( ) ) {
success = preprocess ( obj . originalFormula - > asProbabilityOperatorFormula ( ) , info , obj ) ;
preprocessFormula ( obj . originalFormula - > asProbabilityOperatorFormula ( ) , info , obj ) ;
} else if ( obj . originalFormula - > isRewardOperatorFormula ( ) ) {
success = preprocess ( obj . originalFormula - > asRewardOperatorFormula ( ) , info , obj ) ;
preprocessFormula ( obj . originalFormula - > asRewardOperatorFormula ( ) , info , obj ) ;
} else {
STORM_LOG_THROW ( false , storm : : exceptions : : InvalidPropertyException , " Could not preprocess the subformula " < < * obj . originalFormula < < " of " < < originalFormula < < " because it is not supported " ) ;
}
}
STORM_LOG_THROW ( success , storm : : exceptions : : InvalidPropertyException , " Could not preprocess for the formula " < < originalFormula < < " . " ) ;
//We can now remove all original reward models to save some memory
std : : set < std : : string > origRewardModels = originalFormula . getReferencedRewardModels ( ) ;
for ( auto const & rewModel : origRewardModels ) {
info . model . removeRewardModel ( rewModel ) ;
}
return info ;
}
template < typename SparseMdpModelType >
bool SparseMdpMultiObjectivePreprocessingHelper < SparseMdpModelType > : : gatherObjectiveInformation ( storm : : logic : : MultiObjectiveFormula const & formula , Information & info ) {
for ( auto const & subF : formula . getSubFormulas ( ) ) {
if ( ! subF - > isOperatorFormula ( ) ) {
STORM_LOG_ERROR ( " Expected an OperatorFormula as subformula of " < < formula < < " but got " < < * subF ) ;
return false ;
}
storm : : logic : : OperatorFormula const & f = subF - > asOperatorFormula ( ) ;
typename Information : : ObjectiveInformation objective ;
objective . originalFormula = subF ;
if ( f . hasBound ( ) ) {
objective . threshold = f . getBound ( ) . threshold ;
// Note that we minimize if the comparison type is an upper bound since we are interested in the EXISTENCE of a scheduler...
objective . originalFormulaMinimizes = ! storm : : logic : : isLowerBound ( f . getBound ( ) . comparisonType ) ;
} else if ( f . hasOptimalityType ( ) ) {
objective . originalFormulaMinimizes = storm : : solver : : minimize ( f . getOptimalityType ( ) ) ;
} else {
STORM_LOG_ERROR ( " Current objective " < < f < < " does not specify whether to minimize or maximize " ) ;
}
objective . rewardModelName = " objective " + std : : to_string ( info . getObjectives ( ) . size ( ) ) ;
// Make sure the new reward model gets a unique name
while ( info . getModel ( ) . hasRewardModel ( objective . rewardModelName ) ) {
objective . rewardModelName = " _ " + objective . rewardModelName ;
}
if ( ! setStepBoundOfObjective ( objective ) ) {
return false ;
}
info . getObjectives ( ) . push_back ( std : : move ( objective ) ) ;
void SparseMdpMultiObjectivePreprocessingHelper < SparseMdpModelType > : : addObjective ( std : : shared_ptr < storm : : logic : : Formula const > const & formula , Information & info ) {
STORM_LOG_THROW ( formula - > isOperatorFormula ( ) , storm : : exceptions : : InvalidPropertyException , " Expected an OperatorFormula as subformula of multi-objective query but got " < < * formula ) ;
typename Information : : ObjectiveInformation objective ;
objective . originalFormula = formula ;
storm : : logic : : OperatorFormula const & opFormula = formula - > asOperatorFormula ( ) ;
if ( opFormula . hasBound ( ) ) {
objective . threshold = opFormula . getBound ( ) . threshold ;
// Note that we minimize if the comparison type is an upper bound since we are interested in the EXISTENCE of a scheduler.
objective . originalFormulaMinimizes = ! storm : : logic : : isLowerBound ( opFormula . getBound ( ) . comparisonType ) ;
} else if ( opFormula . hasOptimalityType ( ) ) {
objective . originalFormulaMinimizes = storm : : solver : : minimize ( opFormula . getOptimalityType ( ) ) ;
} else {
STORM_LOG_THROW ( false , storm : : exceptions : : InvalidPropertyException , " Current objective " < < opFormula < < " does not specify whether to minimize or maximize " ) ;
}
return true ;
objective . rewardModelName = " objective " + std : : to_string ( info . objectives . size ( ) ) ;
// Make sure the new reward model gets a unique name
while ( info . model . hasRewardModel ( objective . rewardModelName ) ) {
objective . rewardModelName = " _ " + objective . rewardModelName ;
}
setStepBoundOfObjective ( objective ) ;
info . objectives . push_back ( std : : move ( objective ) ) ;
}
template < typename SparseMdpModelType >
bool SparseMdpMultiObjectivePreprocessingHelper < SparseMdpModelType > : : setStepBoundOfObjective ( typename Information : : ObjectiveInformation & objective ) {
void SparseMdpMultiObjectivePreprocessingHelper < SparseMdpModelType > : : setStepBoundOfObjective ( typename Information : : ObjectiveInformation & objective ) {
if ( objective . originalFormula - > isProbabilityOperatorFormula ( ) ) {
storm : : logic : : Formula const & f = objective . originalFormula - > asProbabilityOperatorFormula ( ) . getSubformula ( ) ;
if ( f . isBoundedUntilFormula ( ) ) {
if ( f . asBoundedUntilFormula ( ) . hasDiscreteTimeBound ( ) ) {
objective . stepBound = f . asBoundedUntilFormula ( ) . getDiscreteTimeBound ( ) ;
} else {
STORM_LOG_ERROR ( " Expected a discrete time bound at boundedUntilFormula but got " < < f < < " . " ) ;
return false ;
}
STORM_LOG_THROW ( f . asBoundedUntilFormula ( ) . hasDiscreteTimeBound ( ) , storm : : exceptions : : InvalidPropertyException , " Expected a boundedUntilFormula with a discrete time bound but got " < < f < < " . " ) ;
objective . stepBound = f . asBoundedUntilFormula ( ) . getDiscreteTimeBound ( ) ;
}
} else if ( objective . originalFormula - > isRewardOperatorFormula ( ) ) {
storm : : logic : : Formula const & f = objective . originalFormula - > asRewardOperatorFormula ( ) ;
if ( f . isCumulativeRewardFormula ( ) ) {
if ( f . asCumulativeRewardFormula ( ) . hasDiscreteTimeBound ( ) ) {
objective . stepBound = f . asCumulativeRewardFormula ( ) . getDiscreteTimeBound ( ) ;
} else {
STORM_LOG_ERROR ( " Expected a discrete time bound at cumulativeRewardFormula but got " < < f < < " . " ) ;
return false ;
}
STORM_LOG_THROW ( f . asCumulativeRewardFormula ( ) . hasDiscreteTimeBound ( ) , storm : : exceptions : : InvalidPropertyException , " Expected a cumulativeRewardFormula with a discrete time bound but got " < < f < < " . " ) ;
objective . stepBound = f . asCumulativeRewardFormula ( ) . getDiscreteTimeBound ( ) ;
}
} else {
STORM_LOG_ERROR ( " Expected a Probability or Reward OperatorFormula but got " < < * objective . originalFormula < < " . " ) ;
return false ;
STORM_LOG_THROW ( false , storm : : exceptions : : InvalidPropertyException , " Expected a Probability or Reward OperatorFormula but got " < < * objective . originalFormula < < " . " ) ;
}
return true ;
}
template < typename SparseMdpModelType >
bool SparseMdpMultiObjectivePreprocessingHelper < SparseMdpModelType > : : setWhetherNegativ eRewardsAreConsidered ( Information & info ) {
void SparseMdpMultiObjectivePreprocessingHelper < SparseMdpModelType > : : setWhetherNegated RewardsAreConsidered ( Information & info ) {
// Negative Rewards are considered whenever there is an unbounded reward objective that requires to minimize.
// If there is no unbounded reward objective, we make a majority vote.
uint_fast64_t numOfMinimizingObjectives = 0 ;
for ( auto const & obj : info . getObjectives ( ) ) {
for ( auto const & obj : info . objectives ) {
if ( obj . originalFormula - > isRewardOperatorFormula ( ) & & ! obj . stepBound ) {
info . setNegativeRewardsConsidered ( obj . originalFormulaMinimizes ) ;
return true ;
info . negatedRewardsConsidered = obj . originalFormulaMinimizes ;
return ;
}
numOfMinimizingObjectives + = obj . originalFormulaMinimizes ? 1 : 0 ;
}
// Reaching this point means that we make a majority vote
info . setNegativeRewardsConsidered ( numOfMinimizingObjectives * 2 > info . getObjectives ( ) . size ( ) ) ;
return true ;
info . negatedRewardsConsidered = ( numOfMinimizingObjectives * 2 ) > info . objectives . size ( ) ;
}
template < typename SparseMdpModelType >
bool SparseMdpMultiObjectivePreprocessingHelper < SparseMdpModelType > : : preprocess ( storm : : logic : : ProbabilityOperatorFormula const & formula , Information & info , typename Information : : ObjectiveInformation & currentObjective ) {
void SparseMdpMultiObjectivePreprocessingHelper < SparseMdpModelType > : : preprocessFormula ( storm : : logic : : ProbabilityOperatorFormula const & formula , Information & info , typename Information : : ObjectiveInformation & currentObjective ) {
// Check if we need to complement the property, e.g., P<0.3 [ F "a" ] ---> P >=0.7 [ G !"a" ]
// This is the case if the formula requires to minimize and positive rewards are considered or vice versa
if ( info . areNegativeRewardsConsidered ( ) ! = currentObjective . originalFormulaMinimizes ) {
STORM_LOG_ERROR ( " Inverting of properties not yet supported " ) ;
if ( info . negatedRewardsConsidered ! = currentObjective . originalFormulaMinimizes ) {
STORM_LOG_THROW ( false , storm : : exceptions : : NotImplementedException , " Inverting of properties not supported yet " ) ;
//TODO
return false ;
}
bool success = false ;
if ( info . negatedRewardsConsidered & & currentObjective . threshold ) {
* ( currentObjective . threshold ) * = - storm : : utility : : one < ValueType > ( ) ;
}
// Invoke preprocessing for subformula
if ( formula . getSubformula ( ) . isUntilFormula ( ) ) {
success = preprocess ( formula . getSubformula ( ) . asUntilFormula ( ) , info , currentObjective ) ;
preprocessFormula ( formula . getSubformula ( ) . asUntilFormula ( ) , info , currentObjective ) ;
} else if ( formula . getSubformula ( ) . isBoundedUntilFormula ( ) ) {
success = preprocess ( formula . getSubformula ( ) . asBoundedUntilFormula ( ) , info , currentObjective ) ;
preprocessFormula ( formula . getSubformula ( ) . asBoundedUntilFormula ( ) , info , currentObjective ) ;
} else if ( formula . getSubformula ( ) . isGloballyFormula ( ) ) {
success = preprocess ( formula . getSubformula ( ) . asGloballyFormula ( ) , info , currentObjective ) ;
preprocessFormula ( formula . getSubformula ( ) . asGloballyFormula ( ) , info , currentObjective ) ;
} else if ( formula . getSubformula ( ) . isEventuallyFormula ( ) ) {
success = preprocess ( formula . getSubformula ( ) . asEventuallyFormula ( ) , info , currentObjective ) ;
preprocessFormula ( formula . getSubformula ( ) . asEventuallyFormula ( ) , info , currentObjective ) ;
} else {
STORM_LOG_THROW ( false , storm : : exceptions : : InvalidPropertyException , " The subformula of " < < formula < < " is not supported. " ) ;
}
STORM_LOG_ERROR_COND ( success , " No implementation for the subformula of " < < formula < < " . " ) ;
return success ;
}
template < typename SparseMdpModelType >
bool SparseMdpMultiObjectivePreprocessingHelper < SparseMdpModelType > : : preprocess ( storm : : logic : : RewardOperatorFormula const & formula , Information & info , typename Information : : ObjectiveInformation & currentObjective ) {
void SparseMdpMultiObjectivePreprocessingHelper < SparseMdpModelType > : : preprocessFormula ( storm : : logic : : RewardOperatorFormula const & formula , Information & info , typename Information : : ObjectiveInformation & currentObjective ) {
// Make sure that our decision for negative rewards is consistent with the formula
if ( info . areNegativeRewardsConsidered ( ) ! = currentObjective . originalFormulaMinimizes ) {
STORM_LOG_ERROR ( " Decided to consider " < < ( info . areNegativeRewardsConsidered ( ) ? " negative " : " non-negative " ) < < " rewards, but the formula " < < formula < < ( currentObjective . originalFormulaMinimizes ? " minimizes " : " maximizes " ) < < " . Reward objectives should either all minimize or all maximize. " ) ;
return false ;
}
STORM_LOG_THROW ( info . negatedRewardsConsidered = = currentObjective . originalFormulaMinimizes , storm : : exceptions : : InvalidPropertyException , " Decided to consider " < < ( info . negatedRewardsConsidered ? " negated " : " non-negated " ) < < " rewards, but the formula " < < formula < < ( currentObjective . originalFormulaMinimizes ? " minimizes " : " maximizes " ) < < " . Reward objectives should either all minimize or all maximize. " ) ;
// Check if the reward model is uniquely specified
if ( ( formula . hasRewardModelName ( ) & & info . getModel ( ) . hasRewardModel ( formula . getRewardModelName ( ) ) )
| | info . getModel ( ) . hasUniqueRewardModel ( ) ) {
STORM_LOG_ERROR ( " The reward model is not unique and the formula " < < formula < < " does not specify a reward model. " ) ;
return false ;
STORM_LOG_THROW ( ( formula . hasRewardModelName ( ) & & info . model . hasRewardModel ( formula . getRewardModelName ( ) ) )
| | info . model . hasUniqueRewardModel ( ) , storm : : exceptions : : InvalidPropertyException , " The reward model is not unique and the formula " < < formula < < " does not specify a reward model. " ) ;
if ( info . negatedRewardsConsidered & & currentObjective . threshold ) {
* ( currentObjective . threshold ) * = - storm : : utility : : one < ValueType > ( ) ;
}
bool success = false ;
// Invoke preprocessing for subformula
if ( formula . getSubformula ( ) . isEventuallyFormula ( ) ) {
success = preprocess ( formula . getSubformula ( ) . asEventuallyFormula ( ) , info , currentObjective , formula . getOptionalRewardModelName ( ) ) ;
preprocessFormula ( formula . getSubformula ( ) . asEventuallyFormula ( ) , info , currentObjective , formula . getOptionalRewardModelName ( ) ) ;
} else if ( formula . getSubformula ( ) . isCumulativeRewardFormula ( ) ) {
success = preprocess ( formula . getSubformula ( ) . asCumulativeRewardFormula ( ) , info , currentObjective , formula . getOptionalRewardModelName ( ) ) ;
preprocessFormula ( formula . getSubformula ( ) . asCumulativeRewardFormula ( ) , info , currentObjective , formula . getOptionalRewardModelName ( ) ) ;
} else {
STORM_LOG_THROW ( false , storm : : exceptions : : InvalidPropertyException , " The subformula of " < < formula < < " is not supported. " ) ;
}
STORM_LOG_ERROR_COND ( success , " No implementation for the subformula of " < < formula < < " . " ) ;
return success ;
}
template < typename SparseMdpModelType >
bool SparseMdpMultiObjectivePreprocessingHelper < SparseMdpModelType > : : preprocess ( storm : : logic : : UntilFormula const & formula , Information & info , typename Information : : ObjectiveInformation & currentObjective ) {
bool success = false ;
void SparseMdpMultiObjectivePreprocessingHelper < SparseMdpModelType > : : preprocessFormula ( storm : : logic : : UntilFormula const & formula , Information & info , typename Information : : ObjectiveInformation & currentObjective ) {
CheckTask < storm : : logic : : Formula > phiTask ( formula . getLeftSubformula ( ) ) ;
CheckTask < storm : : logic : : Formula > psiTask ( formula . getRightSubformula ( ) ) ;
storm : : modelchecker : : SparsePropositionalModelChecker < SparseMdpModelType > mc ( info . getModel ( ) ) ;
success = mc . canHandle ( phiTask ) & & mc . canHandle ( psiTask ) ;
STORM_LOG_ERROR_COND ( success , " The subformulas of " < < formula < < " should be propositional. " ) ;
storm : : modelchecker : : SparsePropositionalModelChecker < SparseMdpModelType > mc ( info . model ) ;
STORM_LOG_THROW ( mc . canHandle ( phiTask ) & & mc . canHandle ( psiTask ) , storm : : exceptions : : InvalidPropertyException , " The subformulas of " < < formula < < " should be propositional. " ) ;
storm : : storage : : BitVector phiStates = mc . check ( phiTask ) - > asExplicitQualitativeCheckResult ( ) . getTruthValuesVector ( ) ;
storm : : storage : : BitVector psiStates = mc . check ( psiTask ) - > asExplicitQualitativeCheckResult ( ) . getTruthValuesVector ( ) ;
auto duplicatorResult = storm : : transformer : : StateDuplicator < SparseMdpModelType > : : transform ( info . getModel ( ) , ~ phiStates | psiStates ) ;
auto duplicatorResult = storm : : transformer : : StateDuplicator < SparseMdpModelType > : : transform ( info . model , ~ phiStates | psiStates ) ;
info . model = std : : move ( * duplicatorResult . model ) ;
// duplicatorResult.newToOldStateIndexMapping now reffers to the indices of the model we had before preprocessing this formula.
// This might not be the actual original model.
for ( auto & originalModelStateIndex : duplicatorResult . newToOldStateIndexMapping ) {
originalModelStateIndex = info . getNewToOldStateIndexMapping ( ) [ originalModelStateIndex ] ;
originalModelStateIndex = info . newToOldStateIndexMapping [ originalModelStateIndex ] ;
}
info . setNewToOldStateIndexMapping ( duplicatorResult . newToOldStateIndexMapping ) ;
info . newToOldStateIndexMapping = std : : move ( duplicatorResult . newToOldStateIndexMapping ) ;
// build stateAction reward vector that gives (one*transitionProbability) reward whenever a transition leads from the firstCopy to a psiState
storm : : storage : : BitVector newPsiStates ( duplicatorResult . model - > getNumberOfStates ( ) , false ) ;
storm : : storage : : BitVector newPsiStates ( info . model . getNumberOfStates ( ) , false ) ;
for ( auto const & oldPsiState : psiStates ) {
//note that psiStates are always located in the second copy
newPsiStates . set ( duplicatorResult . secondCopyOldToNewStateIndexMapping [ oldPsiState ] , true ) ;
}
std : : vector < ValueType > objectiveRewards = duplicatorResult . model - > getTransitionMatrix ( ) . getConstrainedRowGroupSumVector ( duplicatorResult . firstCopy , newPsiStates ) ;
if ( info . areNegativeRewardsConsidered ( ) ) {
std : : vector < ValueType > objectiveRewards ( info . model . getTransitionMatrix ( ) . getRowCount ( ) , storm : : utility : : zero < ValueType > ( ) ) ;
for ( auto const & firstCopyState : duplicatorResult . firstCopy ) {
for ( uint_fast64_t row = info . model . getTransitionMatrix ( ) . getRowGroupIndices ( ) [ firstCopyState ] ; row < info . model . getTransitionMatrix ( ) . getRowGroupIndices ( ) [ firstCopyState + 1 ] ; + + row ) {
objectiveRewards [ row ] = info . model . getTransitionMatrix ( ) . getConstrainedRowSum ( row , newPsiStates ) ;
}
}
if ( info . negatedRewardsConsidered ) {
storm : : utility : : vector : : scaleVectorInPlace ( objectiveRewards , - storm : : utility : : one < ValueType > ( ) ) ;
}
duplicatorResult . model - > getRewardModels ( ) . insert ( std : : make_pair ( currentObjective . rewardModelName , RewardModelType ( boost : : none , objectiveRewards ) ) ) ;
info . setModel ( std : : move ( * duplicatorResult . model ) ) ;
return success ;
std : : cout < < objectiveRewards . size ( ) < < " == " < < info . model . getTransitionMatrix ( ) . getRowCount ( ) < < " != " < < info . model . getNumberOfStates ( ) < < std : : endl ;
info . model . addRewardModel ( currentObjective . rewardModelName , RewardModelType ( boost : : none , objectiveRewards ) ) ;
}
template < typename SparseMdpModelType >
bool SparseMdpMultiObjectivePreprocessingHelper < SparseMdpModelType > : : preprocess ( storm : : logic : : BoundedUntilFormula const & formula , Information & info , typename Information : : ObjectiveInformation & currentObjective ) {
return preprocess ( storm : : logic : : UntilFormula ( formula . getLeftSubformula ( ) . asSharedPointer ( ) , formula . getRightSubformula ( ) . asSharedPointer ( ) ) , info , currentObjective ) ;
void SparseMdpMultiObjectivePreprocessingHelper < SparseMdpModelType > : : preprocessFormula ( storm : : logic : : BoundedUntilFormula const & formula , Information & info , typename Information : : ObjectiveInformation & currentObjective ) {
preprocessFormula ( storm : : logic : : UntilFormula ( formula . getLeftSubformula ( ) . asSharedPointer ( ) , formula . getRightSubformula ( ) . asSharedPointer ( ) ) , info , currentObjective ) ;
}
template < typename SparseMdpModelType >
bool SparseMdpMultiObjectivePreprocessingHelper < SparseMdpModelType > : : preprocess ( storm : : logic : : GloballyFormula const & formula , Information & info , typename Information : : ObjectiveInformation & currentObjective ) {
void SparseMdpMultiObjectivePreprocessingHelper < SparseMdpModelType > : : preprocessFormula ( storm : : logic : : GloballyFormula const & formula , Information & info , typename Information : : ObjectiveInformation & currentObjective ) {
//TODO
STORM_LOG_ERROR ( " Globally not yet implemented " ) ;
return false ;
STORM_LOG_THROW ( false , storm : : exceptions : : NotImplementedException , " Globally not yet implemented " ) ;
}
template < typename SparseMdpModelType >
bool SparseMdpMultiObjectivePreprocessingHelper < SparseMdpModelType > : : preprocess ( storm : : logic : : EventuallyFormula const & formula , Information & info , typename Information : : ObjectiveInformation & currentObjective , boost : : optional < std : : string > const & optionalRewardModelName ) {
void SparseMdpMultiObjectivePreprocessingHelper < SparseMdpModelType > : : preprocessFormula ( storm : : logic : : EventuallyFormula const & formula , Information & info , typename Information : : ObjectiveInformation & currentObjective , boost : : optional < std : : string > const & optionalRewardModelName ) {
if ( formula . isReachabilityProbabilityFormula ( ) ) {
return preprocess ( storm : : logic : : UntilFormula ( storm : : logic : : Formula : : getTrueFormula ( ) , formula . getSubformula ( ) . asSharedPointer ( ) ) , info , currentObjective ) ;
preprocessFormula ( storm : : logic : : UntilFormula ( storm : : logic : : Formula : : getTrueFormula ( ) , formula . getSubformula ( ) . asSharedPointer ( ) ) , info , currentObjective ) ;
return ;
}
STORM_LOG_THROW ( formula . isReachabilityRewardFormula ( ) , storm : : exceptions : : InvalidPropertyException , " The formula " < < formula < < " neither considers reachability Probabilities nor reachability rewards " ) ;
CheckTask < storm : : logic : : Formula > targetTask ( formula . getSubformula ( ) ) ;
storm : : modelchecker : : SparsePropositionalModelChecker < SparseMdpModelType > mc ( info . model ) ;
STORM_LOG_THROW ( mc . canHandle ( targetTask ) , storm : : exceptions : : InvalidPropertyException , " The subformula of " < < formula < < " should be propositional. " ) ;
storm : : storage : : BitVector targetStates = mc . check ( targetTask ) - > asExplicitQualitativeCheckResult ( ) . getTruthValuesVector ( ) ;
STORM_LOG_WARN ( " There is no check for reward finiteness yet " ) ; //TODO
auto duplicatorResult = storm : : transformer : : StateDuplicator < SparseMdpModelType > : : transform ( info . model , targetStates ) ;
info . model = std : : move ( * duplicatorResult . model ) ;
// duplicatorResult.newToOldStateIndexMapping now reffers to the indices of the model we had before preprocessing this formula.
// This might not be the actual original model.
for ( auto & originalModelStateIndex : duplicatorResult . newToOldStateIndexMapping ) {
originalModelStateIndex = info . newToOldStateIndexMapping [ originalModelStateIndex ] ;
}
if ( ! formula . isReachabilityRewardFormula ( ) ) {
STORM_LOG_ERROR ( " The formula " < < formula < < " neither considers reachability Probabilities nor reachability rewards " ) ;
return false ;
info . newToOldStateIndexMapping = std : : move ( duplicatorResult . newToOldStateIndexMapping ) ;
// Add a reward model that gives zero reward to the states of the second copy.
std : : vector < ValueType > objectiveRewards = info . model . getRewardModel ( optionalRewardModelName ? optionalRewardModelName . get ( ) : " " ) . getTotalRewardVector ( info . model . getTransitionMatrix ( ) ) ;
storm : : utility : : vector : : setVectorValues ( objectiveRewards , duplicatorResult . secondCopy , storm : : utility : : zero < ValueType > ( ) ) ;
if ( info . negatedRewardsConsidered ) {
storm : : utility : : vector : : scaleVectorInPlace ( objectiveRewards , - storm : : utility : : one < ValueType > ( ) ) ;
}
//TODO
STORM_LOG_ERROR ( " Rewards not yet implemented " ) ;
return false ;
info . model . addRewardModel ( currentObjective . rewardModelName , RewardModelType ( boost : : none , objectiveRewards ) ) ;
}
template < typename SparseMdpModelType >
bool SparseMdpMultiObjectivePreprocessingHelper < SparseMdpModelType > : : preprocess ( storm : : logic : : CumulativeRewardFormula const & formula , Information & info , typename Information : : ObjectiveInformation & currentObjective , boost : : optional < std : : string > const & optionalRewardModelName ) {
//TODO
STORM_LOG_ERROR ( " Rewards not yet implemented " ) ;
return false ;
void SparseMdpMultiObjectivePreprocessingHelper < SparseMdpModelType > : : preprocessFormula ( storm : : logic : : CumulativeRewardFormula const & formula , Information & info , typename Information : : ObjectiveInformation & currentObjective , boost : : optional < std : : string > const & optionalRewardModelName ) {
std : : vector < ValueType > objectiveRewards = info . model . getRewardModel ( optionalRewardModelName ? optionalRewardModelName . get ( ) : " " ) . getTotalRewardVector ( info . model . getTransitionMatrix ( ) ) ;
if ( info . negatedRewardsConsidered ) {
storm : : utility : : vector : : scaleVectorInPlace ( objectiveRewards , - storm : : utility : : one < ValueType > ( ) ) ;
}
info . model . addRewardModel ( currentObjective . rewardModelName , RewardModelType ( boost : : none , objectiveRewards ) ) ;
}
template class SparseMdpMultiObjectivePreprocessingHelper < storm : : models : : sparse : : Mdp < double > > ;