STORM_LOG_THROW(rewardAnalysis.rewardFinitenessType!=preprocessing::RewardFinitenessType::Infinite,storm::exceptions::NotSupportedException,"TThere is no Pareto optimal scheduler that yields finite reward for all objectives. This is not supported.");
STORM_LOG_THROW(rewardAnalysis.rewardLessInfinityEStates,storm::exceptions::UnexpectedException,"The set of states with reward < infinity for some scheduler has not been computed during preprocessing.");
STORM_LOG_THROW(rewardAnalysis.totalRewardLessInfinityEStates,storm::exceptions::UnexpectedException,"The set of states with reward < infinity for some scheduler has not been computed during preprocessing.");
STORM_LOG_THROW(preprocessorResult.containsOnlyTrivialObjectives(),storm::exceptions::NotSupportedException,"At least one objective was not reduced to an expected (total or cumulative) reward objective during preprocessing. This is not supported by the considered weight vector checker.");
STORM_LOG_THROW(preprocessorResult.preprocessedModel->getInitialStates().getNumberOfSetBits()==1,storm::exceptions::NotSupportedException,"The model has multiple initial states.");
// Build a subsystem of the preprocessor result model that discards states that yield infinite reward for all schedulers.
// We can also merge the states that will have reward zero anyway.
STORM_LOG_THROW(rewardAnalysis.rewardFinitenessType!=preprocessing::RewardFinitenessType::Infinite,storm::exceptions::NotSupportedException,"There is no Pareto optimal scheduler that yields finite reward for all objectives. This is not supported.");
STORM_LOG_THROW(rewardAnalysis.rewardLessInfinityEStates,storm::exceptions::UnexpectedException,"The set of states with reward < infinity for some scheduler has not been computed during preprocessing.");
STORM_LOG_THROW(rewardAnalysis.totalRewardLessInfinityEStates,storm::exceptions::UnexpectedException,"The set of states with reward < infinity for some scheduler has not been computed during preprocessing.");
STORM_LOG_THROW(preprocessorResult.containsOnlyTrivialObjectives(),storm::exceptions::NotSupportedException,"At least one objective was not reduced to an expected (total or cumulative) reward objective during preprocessing. This is not supported by the considered weight vector checker.");
STORM_LOG_THROW(preprocessorResult.preprocessedModel->getInitialStates().getNumberOfSetBits()==1,storm::exceptions::NotSupportedException,"The model has multiple initial states.");
// Build a subsystem of the preprocessor result model that discards states that yield infinite reward for all schedulers.
// We can also merge the states that will have reward zero anyway.
STORM_LOG_WARN_COND(obj.formula->getSubformula().isLongRunAverageRewardFormula(),"Analyzing subformula "<<obj.formula->getSubformula()<<" is not supported properly.");
STORM_LOG_WARN_COND(obj.formula->getSubformula().isCumulativeRewardFormula(),"Analyzing subformula "<<obj.formula->getSubformula()<<" is not supported properly.");