STORM_LOG_THROW(!data.originalModel.isOfType(storm::models::ModelType::MarkovAutomaton)||formula.getTimeBoundReference().isTimeBound(),storm::exceptions::InvalidPropertyException,"Bounded until formulas for Markov Automata are only allowed when time bounds are considered.");
STORM_LOG_THROW(data.originalModel.isOfType(storm::models::ModelType::Mdp),storm::exceptions::InvalidPropertyException,"Cumulative reward formulas are not supported for the given model type.");
STORM_LOG_ASSERT(result.objectives[objIndex].formula->isRewardOperatorFormula(),"Objective needs to be checked for finite reward but has no reward operator.");
STORM_LOG_THROW(preprocessorResult.rewardFinitenessType!=SparseMultiObjectivePreprocessorResult<SparseModelType>::RewardFinitenessType::Infinite,storm::exceptions::NotSupportedException,"There is no Pareto optimal scheduler that yields finite reward for all objectives. This is not supported.");
STORM_LOG_THROW(preprocessorResult.rewardLessInfinityEStates,storm::exceptions::UnexpectedException,"The set of states with reward < infinity for some scheduler has not been computed during preprocessing.");
STORM_LOG_THROW(preprocessorResult.containsOnlyRewardObjectives(),storm::exceptions::NotSupportedException,"At least one objective was not reduced to an expected (total or cumulative) reward objective during preprocessing. This is not supported by the considered weight vector checker.");
STORM_LOG_THROW(preprocessorResult.containsOnlyTrivialObjectives(),storm::exceptions::NotSupportedException,"At least one objective was not reduced to an expected (total or cumulative) reward objective during preprocessing. This is not supported by the considered weight vector checker.");
STORM_LOG_THROW(preprocessorResult.preprocessedModel->getInitialStates().getNumberOfSetBits()==1,storm::exceptions::NotSupportedException,"The model has multiple initial states.");
// Build a subsystem of the preprocessor result model that discards states that yield infinite reward for all schedulers.
STORM_LOG_THROW(preprocessorResult.rewardFinitenessType!=SparseMultiObjectivePreprocessorResult<SparseModelType>::RewardFinitenessType::Infinite,storm::exceptions::NotSupportedException,"There is no Pareto optimal scheduler that yields finite reward for all objectives. This is not supported.");
STORM_LOG_THROW(preprocessorResult.rewardLessInfinityEStates,storm::exceptions::UnexpectedException,"The set of states with reward < infinity for some scheduler has not been computed during preprocessing.");
STORM_LOG_THROW(preprocessorResult.containsOnlyRewardObjectives(),storm::exceptions::NotSupportedException,"At least one objective was not reduced to an expected (total or cumulative) reward objective during preprocessing. This is not supported by the considered weight vector checker.");
STORM_LOG_THROW(preprocessorResult.containsOnlyTrivialObjectives(),storm::exceptions::NotSupportedException,"At least one objective was not reduced to an expected (total or cumulative) reward objective during preprocessing. This is not supported by the considered weight vector checker.");
STORM_LOG_THROW(preprocessorResult.preprocessedModel->getInitialStates().getNumberOfSetBits()==1,storm::exceptions::NotSupportedException,"The model has multiple initial states.");
STORM_LOG_ASSERT(dimensionCount>0,"Invoked MemoryStateManager with zero dimension count.");
STORM_LOG_ASSERT(dimension<dimensionCount,"Tried to set a dimension that is larger then the number of considered dimensions");
STORM_LOG_ASSERT(((dimensionBitMask<<dimension)&dimensionsWithoutMemoryMask)==0,"Tried to change a memory state for a dimension but the dimension is assumed to have no memory.");
STORM_LOG_ASSERT(dimensions.size()==dimensionCount,"Invalid size of given bitset.");
if(value){
for(autoconst&d:dimensions){
STORM_LOG_ASSERT(((dimensionBitMask<<d)&dimensionsWithoutMemoryMask)==0,"Tried to set a dimension to 'relevant'-memory state but the dimension is assumed to have no memory.");
state|=(dimensionBitMask<<d);
}
}else{
for(autoconst&d:dimensions){
STORM_LOG_ASSERT(((dimensionBitMask<<d)&dimensionsWithoutMemoryMask)==0,"Tried to set a dimension to 'unrelevant'-memory state but the dimension is assumed to have no memory.");
STORM_LOG_THROW(objectiveFormula->hasOptimalityType(),storm::exceptions::InvalidPropertyException,"Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
STORM_LOG_THROW(subFormula->isBoundedUntilFormula(),storm::exceptions::InvalidPropertyException,"Formula "<<objectiveFormula<<" is not supported. Invalid subformula "<<*subFormula<<".");
STORM_LOG_THROW(subFormula->isBoundedUntilFormula(),storm::exceptions::InvalidPropertyException,"Formula "<<objectiveFormula<<" is not supported. Invalid subformula "<<*subFormula<<".");
}
}else{
STORM_LOG_THROW(objectiveFormula->getSubformula().isBoundedUntilFormula(),storm::exceptions::InvalidPropertyException,"Formula "<<objectiveFormula<<" is not supported. Invalid subformula "<<objectiveFormula->getSubformula()<<".");
}
}else{
STORM_LOG_THROW(objectiveFormula->getSubformula().isBoundedUntilFormula(),storm::exceptions::InvalidPropertyException,"Formula "<<objectiveFormula<<" is not supported. Invalid subformula "<<objectiveFormula->getSubformula()<<".");
STORM_LOG_THROW(objectiveFormula->isRewardOperatorFormula()&&objectiveFormula->getSubformula().isCumulativeRewardFormula(),storm::exceptions::InvalidPropertyException,"Formula "<<objectiveFormula<<" is not supported.");
STORM_LOG_THROW(this->model.hasRewardModel(rewardName),storm::exceptions::IllegalArgumentException,"No reward model with name '"<<rewardName<<"' found.");
STORM_LOG_THROW(!rewardModel.hasTransitionRewards(),storm::exceptions::NotSupportedException,"Transition rewards are currently not supported as reward bounds.");
// Check whether we consider a multi-objective formula
// For multi-objective model checking, each initial state requires an individual scheduler (in contrast to single-objective model checking). Let's exclude multiple initial states.
STORM_LOG_THROW(!pathFormula.hasLowerBound()&&pathFormula.hasUpperBound(),storm::exceptions::InvalidPropertyException,"Formula needs to have single upper time bound.");
STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(),storm::exceptions::InvalidPropertyException,"Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
STORM_LOG_THROW(rewardPathFormula.hasIntegerBound(),storm::exceptions::InvalidPropertyException,"Formula needs to have a discrete time bound.");
STORM_LOG_THROW(checkTask.isOnlyInitialStatesRelevantSet(),storm::exceptions::InvalidOperationException,"Checking reward bounded cumulative reward formulas can only be done for the initial states of the model.");
STORM_LOG_WARN_COND(!checkTask.isQualitativeSet(),"Checking reward bounded until formulas is not optimized w.r.t. qualitative queries");
initVal=parseExpression(variableStructure.at("initial-value"),"Initial value for variable "+name+" (scope: "+scopeDescription+") ",globalVars,constants,localVars);
STORM_LOG_THROW(initVal.get().hasRationalType(),storm::exceptions::InvalidJaniException,"Initial value for integer variable "+name+"(scope "+scopeDescription+") should be a rational");
STORM_LOG_THROW(initVal.get().hasRationalType()||initVal.get().hasIntegerType(),storm::exceptions::InvalidJaniException,"Initial value for rational variable "+name+"(scope "+scopeDescription+") should be a rational");
initVal=parseExpression(variableStructure.at("initial-value"),"Initial value for variable "+name+" (scope: "+scopeDescription+") ",globalVars,constants,localVars);
STORM_LOG_THROW(initVal.get().hasBooleanType(),storm::exceptions::InvalidJaniException,"Initial value for integer variable "+name+"(scope "+scopeDescription+") should be a Boolean");
STORM_LOG_THROW(initVal.get().hasBooleanType(),storm::exceptions::InvalidJaniException,"Initial value for boolean variable "+name+"(scope "+scopeDescription+") should be a Boolean");