@ -38,7 +38,7 @@ namespace storm {
template < typename SparseMdpModelType >
bool SparseMdpPrctlModelChecker < SparseMdpModelType > : : canHandle ( CheckTask < storm : : logic : : Formula , ValueType > const & checkTask ) const {
storm : : logic : : Formula const & formula = checkTask . getFormula ( ) ;
if ( formula . isInFragment ( storm : : logic : : prctl ( ) . setLongRunAverageRewardFormulasAllowed ( true ) . setLongRunAverageProbabilitiesAllowed ( true ) . setConditionalProbabilityFormulasAllowed ( true ) . setOnlyEventuallyFormuluasInConditionalFormulasAllowed ( true ) . setRewardBoundedUntilFormulasAllowed ( true ) . setRewardBoundedCumulativeRewardFormulasAllowed ( true ) . setMultiDimensionalBoundedUntilFormulasAllowed ( true ) . setMultiDimensionalCumulativeRewardFormulasAllowed ( true ) ) ) {
if ( formula . isInFragment ( storm : : logic : : prctl ( ) . setLongRunAverageRewardFormulasAllowed ( true ) . setLongRunAverageProbabilitiesAllowed ( true ) . setConditionalProbabilityFormulasAllowed ( true ) . setOnlyEventuallyFormuluasInConditionalFormulasAllowed ( true ) . setTotalRewardFormulasAllowed ( true ) . set RewardBoundedUntilFormulasAllowed ( true ) . setRewardBoundedCumulativeRewardFormulasAllowed ( true ) . setMultiDimensionalBoundedUntilFormulasAllowed ( true ) . setMultiDimensionalCumulativeRewardFormulasAllowed ( true ) ) ) {
return true ;
} else {
// Check whether we consider a multi-objective formula
@ -172,6 +172,17 @@ namespace storm {
}
return result ;
}
template < typename SparseMdpModelType >
std : : unique_ptr < CheckResult > SparseMdpPrctlModelChecker < SparseMdpModelType > : : computeTotalRewards ( Environment const & env , storm : : logic : : RewardMeasureType , CheckTask < storm : : logic : : TotalRewardFormula , ValueType > const & checkTask ) {
STORM_LOG_THROW ( checkTask . isOptimizationDirectionSet ( ) , storm : : exceptions : : InvalidPropertyException , " Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model. " ) ;
auto ret = storm : : modelchecker : : helper : : SparseMdpPrctlHelper < ValueType > : : computeTotalRewards ( env , storm : : solver : : SolveGoal < ValueType > ( this - > getModel ( ) , checkTask ) , this - > getModel ( ) . getTransitionMatrix ( ) , this - > getModel ( ) . getBackwardTransitions ( ) , checkTask . isRewardModelSet ( ) ? this - > getModel ( ) . getRewardModel ( checkTask . getRewardModel ( ) ) : this - > getModel ( ) . getRewardModel ( " " ) , checkTask . isQualitativeSet ( ) , checkTask . isProduceSchedulersSet ( ) , checkTask . getHint ( ) ) ;
std : : unique_ptr < CheckResult > result ( new ExplicitQuantitativeCheckResult < ValueType > ( std : : move ( ret . values ) ) ) ;
if ( checkTask . isProduceSchedulersSet ( ) & & ret . scheduler ) {
result - > asExplicitQuantitativeCheckResult < ValueType > ( ) . setScheduler ( std : : move ( ret . scheduler ) ) ;
}
return result ;
}
template < typename SparseMdpModelType >
std : : unique_ptr < CheckResult > SparseMdpPrctlModelChecker < SparseMdpModelType > : : computeLongRunAverageProbabilities ( Environment const & env , CheckTask < storm : : logic : : StateFormula , ValueType > const & checkTask ) {