diff --git a/resources/examples/testfiles/ma/multiobj_simple_lra.ma b/resources/examples/testfiles/ma/multiobj_simple_lra.ma new file mode 100644 index 000000000..045c86563 --- /dev/null +++ b/resources/examples/testfiles/ma/multiobj_simple_lra.ma @@ -0,0 +1,22 @@ +ma +module main + + x : [0..4]; + [a] x=0 -> 0.5 : (x'=1) + 0.5 : (x'=0); + [b] x=0 -> (x'=2); + <> x=1 -> 6 : (x'=2) + 4 : (x'=4); + <> x=2 -> 5 : (x'=2) + 5 : (x'=3); + [c] x=3 -> 0.3 : (x'=2) + 0.7 : (x'=4); + [d] x=3 -> 0.3 : (x'=4) + 0.7 : (x'=2); + <> x=4 -> 5 : (x'=3); +endmodule + +rewards "first" + true : 3; + [] x=2 : 10; + [d] true : 5; +endrewards + +rewards "second" + x<2 : 42; +endrewards diff --git a/resources/examples/testfiles/ma/polling.ma b/resources/examples/testfiles/ma/polling.ma new file mode 100755 index 000000000..a33e53261 --- /dev/null +++ b/resources/examples/testfiles/ma/polling.ma @@ -0,0 +1,101 @@ +// Translation of the MAPA Specification of a polling system into PRISM code +// http://wwwhome.cs.utwente.nl/~timmer/scoop/papers/qest13/index.html + +ma + +const int N; // number of job types (should be at most 6) +const int Q; // Maximum queue size in each station + +// Formulae to control the LIFO queue of the stations. +// The queue is represented by some integer whose base N representation has at most Q digits, each representing one of the job types 0, 1, ..., N-1. +// In addition, we store the current size of the queue which is needed to distinguish an empty queue from a queue holding job of type 0 +formula queue1_empty = q1Size=0; +formula queue1_full = q1Size=Q; +formula queue1_pop = floor(q1/N); +formula queue1_head = q1 - (queue1_pop * N); // i.e. q1 modulo N +formula queue1_push = q1*N; +formula queue2_empty = q2Size=0; +formula queue2_full = q2Size=Q; +formula queue2_pop = floor(q2/N); +formula queue2_head = q2 - (queue2_pop * N); // i.e. q2 modulo N +formula queue2_push = q2*N; + +const int queue_maxValue = (N^Q)-1; + +const double inRate1 = 3; // = (2 * #station) + 1; +const double inRate2 = 5; // = (2 * #station) + 1; + +module pollingsys + // The queues for the stations + q1 : [0..queue_maxValue]; + q1Size : [0..Q]; + q2 : [0..queue_maxValue]; + q2Size : [0..Q]; + + // Store the job that is currently processed by the server. j=N means that no job is processed. + j : [0..N] init N; + + // Flag indicating whether a new job arrived + newJob1 : bool init false; + newJob2 : bool init false; + + //<> !newJob1 & !newJob2 & !queue1_full & queue2_full & j=N -> inRate1 : (newJob1'=true); + //<> !newJob1 & !newJob2 & queue1_full & !queue2_full & j=N -> inRate2 : (newJob2'=true); + <> !newJob1 & !newJob2 & !queue1_full & !queue2_full & j=N -> inRate1 : (newJob1'=true) + inRate2 : (newJob2'=true); + <> !newJob1 & !newJob2 & queue1_full & queue2_full & j 2*(j+1) : (j'=N); + <> !newJob1 & !newJob2 & !queue1_full & queue2_full & j inRate1 : (newJob1'=true) + 2*(j+1) : (j'=N); + <> !newJob1 & !newJob2 & queue1_full & !queue2_full & j inRate2 : (newJob2'=true) + 2*(j+1) : (j'=N); + <> !newJob1 & !newJob2 & !queue1_full & !queue2_full & j inRate1 : (newJob1'=true) + inRate2 : (newJob2'=true) + 2*(j+1) : (j'=N); + + [] newJob1 & N>=1 -> 1 : (q1Size'=q1Size+1) & (q1'=queue1_push+0) & (newJob1'=false); + [] newJob1 & N>=2 -> 1 : (q1Size'=q1Size+1) & (q1'=queue1_push+1) & (newJob1'=false); + [] newJob1 & N>=3 -> 1 : (q1Size'=q1Size+1) & (q1'=queue1_push+2) & (newJob1'=false); + [] newJob1 & N>=4 -> 1 : (q1Size'=q1Size+1) & (q1'=queue1_push+3) & (newJob1'=false); + [] newJob1 & N>=5 -> 1 : (q1Size'=q1Size+1) & (q1'=queue1_push+4) & (newJob1'=false); + [] newJob1 & N>=6 -> 1 : (q1Size'=q1Size+1) & (q1'=queue1_push+5) & (newJob1'=false); + + [] newJob2 & N>=1 -> 1 : (q2Size'=q2Size+1) & (q2'=queue2_push+0) & (newJob2'=false); + [] newJob2 & N>=2 -> 1 : (q2Size'=q2Size+1) & (q2'=queue2_push+1) & (newJob2'=false); + [] newJob2 & N>=3 -> 1 : (q2Size'=q2Size+1) & (q2'=queue2_push+2) & (newJob2'=false); + [] newJob2 & N>=4 -> 1 : (q2Size'=q2Size+1) & (q2'=queue2_push+3) & (newJob2'=false); + [] newJob2 & N>=5 -> 1 : (q2Size'=q2Size+1) & (q2'=queue2_push+4) & (newJob2'=false); + [] newJob2 & N>=6 -> 1 : (q2Size'=q2Size+1) & (q2'=queue2_push+5) & (newJob2'=false); + + [copy1] !newJob1 & !newJob2 & !queue1_empty & j=N -> 0.9 : (j'=queue1_head) & (q1Size'=q1Size-1) & (q1'=queue1_pop) + 0.1 : (j'=queue1_head); + [copy2] !newJob1 & !newJob2 & !queue2_empty & j=N -> 0.9 : (j'=queue2_head) & (q2Size'=q2Size-1) & (q2'=queue2_pop) + 0.1 : (j'=queue2_head); + +endmodule + + + +label "q1full" = q1Size=Q; +label "q2full" = q2Size=Q; +label "allqueuesfull" = q1Size=Q & q2Size=Q; + + +// Rewards adapted from Guck et al.: Modelling and Analysis of Markov Reward Automata + +rewards "processedjobs1" + [copy1] true : 0.1; +endrewards + +rewards "processedjobs2" + [copy2] true : 0.1; +endrewards + +rewards "processedjobs" + [copy1] true : 0.1; + [copy2] true : 0.1; +endrewards + +rewards "queuesize1" + true : 0.01 * (q1Size); +endrewards + +rewards "queuesize2" + true : 0.01 * (q2Size); +endrewards + +rewards "queuesize" + true : 0.01 * (q1Size + q2Size); +endrewards diff --git a/resources/examples/testfiles/mdp/multiobj_simple_lra.nm b/resources/examples/testfiles/mdp/multiobj_simple_lra.nm new file mode 100644 index 000000000..d6f9d7e16 --- /dev/null +++ b/resources/examples/testfiles/mdp/multiobj_simple_lra.nm @@ -0,0 +1,28 @@ +mdp +module main + + x : [0..3]; + [a] x=0 -> 0.5 : (x'=1) + 0.5 : (x'=0); + [a] x=0 -> 0.2 : (x'=0) + 0.8 : (x'=3); + [b] x=1 -> (x'=1); + [c] x=2 -> (x'=2); + [d] x=1 -> (x'=2); + [d] x=2 -> 0.1 : (x'=1) + 0.9 : (x'=2); + [] x=3 -> (x'=3); +endmodule + +rewards "first" + [a] true : 1; + [b] true : 5; + [d] true : 5; +endrewards + +rewards "second" + [a] true : 1; + [c] true : 8; + x=2 : 8; +endrewards + +rewards "third" + x=0 : 1; +endrewards diff --git a/resources/examples/testfiles/mdp/resource-gathering.nm b/resources/examples/testfiles/mdp/resource-gathering.nm new file mode 100644 index 000000000..5b00f263f --- /dev/null +++ b/resources/examples/testfiles/mdp/resource-gathering.nm @@ -0,0 +1,89 @@ +mdp + +const int WIDTH = 5; +const int HEIGHT = 5; +const int XINIT = 3; +const int YINIT = 1; + +const int GOLD_TO_COLLECT; // Set to 0 to avoid unfolding +const int GEM_TO_COLLECT; // Set to 0 to avoid unfolding +const int B; + +const double pAttack = 1/10; + +formula left_of_gold = x=2 & y=5; +formula right_of_gold = x=4 & y=5; +formula below_of_gold = (x=3 & y=4); +formula above_of_gold = false; +formula left_of_gem = (x=4 & y=4); +formula right_of_gem = false; +formula below_of_gem = (x=5 & y=3); +formula above_of_gem = (x=5 & y=5); +formula left_of_home = x=2 & y=1; +formula right_of_home = x=4 & y=1; +formula above_of_home = x=3 & y=2; +formula below_of_home = false; +formula left_of_enemy = (x=3 & y=5) | (x=2 & y=4); +formula right_of_enemy = (x=5 & y=5) | (x=4 & y=4); +formula above_of_enemy = x=3 & y=5; +formula below_of_enemy = (x=3 & y=3) | (x=4 & y=4); + +module robot + + gold : bool init false; + gem : bool init false; + attacked : bool init false; + + x : [1..WIDTH] init XINIT; + y : [1..HEIGHT] init YINIT; + + [right] !left_of_enemy & x (attacked'=false) & (x'=x+1) & (gold' = (gold & !left_of_home) | left_of_gold) & (gem' = (gem & !left_of_home) | left_of_gem); + [left] !right_of_enemy & x>1 -> (attacked'=false) & (x'=x-1) & (gold' = (gold & !right_of_home) | right_of_gold) & (gem' = (gem & !right_of_home) | right_of_gem); + [top] !below_of_enemy & y (attacked'=false) & (y'=y+1) & (gold' = (gold & !below_of_home) | below_of_gold) & (gem' = (gem & !below_of_home) | below_of_gem); + [down] !above_of_enemy & y>1 -> (attacked'=false) & (y'=y-1) & (gold' = (gold & !above_of_home) | above_of_gold) & (gem' = (gem & !above_of_home) | above_of_gem); + + [right] left_of_enemy & x pAttack : (attacked'=true) & (x'=XINIT) & (y'=YINIT) & (gold'=false) & (gem'=false) + (1-pAttack) : (attacked'=false) & (x'=x+1) & (gold' = (gold & !left_of_home) | left_of_gold) & (gem' = (gem & !left_of_home) | left_of_gem); + [left] right_of_enemy & x>1 -> pAttack : (attacked'=true) & (x'=XINIT) & (y'=YINIT) & (gold'=false) & (gem'=false) + (1-pAttack) : (attacked'=false) & (x'=x-1) & (gold' = (gold & !right_of_home) | right_of_gold) & (gem' = (gem & !right_of_home) | right_of_gem); + [top] below_of_enemy & y pAttack : (attacked'=true) & (x'=XINIT) & (y'=YINIT) & (gold'=false) & (gem'=false) + (1-pAttack) : (attacked'=false) & (y'=y+1) & (gold' = (gold & !below_of_home) | below_of_gold) & (gem' = (gem & !below_of_home) | below_of_gem); + [down] above_of_enemy & y>1 -> pAttack : (attacked'=true) & (x'=XINIT) & (y'=YINIT) & (gold'=false) & (gem'=false) + (1-pAttack) : (attacked'=false) & (y'=y-1) & (gold' = (gold & !above_of_home) | above_of_gold) & (gem' = (gem & !above_of_home) | above_of_gem); +endmodule + +rewards "attacks" + attacked : 1; +endrewards + +rewards "rew_gold" + [right] left_of_home & gold : 1; + [left] right_of_home & gold : 1; + [top] below_of_home & gold : 1; + [down] above_of_home & gold : 1; +endrewards + +rewards "rew_gem" + [right] left_of_home & gem : 1; + [left] right_of_home & gem : 1; + [top] below_of_home & gem : 1; + [down] above_of_home & gem: 1; +endrewards + +module goldcounter + + required_gold : [0..GOLD_TO_COLLECT] init GOLD_TO_COLLECT; + + [right] true -> (required_gold'=max(0, required_gold - (left_of_home & gold ? 1: 0))); + [left] true -> (required_gold'=max(0, required_gold - (right_of_home & gold ? 1 : 0))); + [top] true -> (required_gold'=max(0, required_gold - (below_of_home & gold ? 1 : 0))); + [down] true -> (required_gold'=max(0, required_gold - (above_of_home & gold ? 1 : 0))); +endmodule + +module gemcounter + + required_gem : [0..GEM_TO_COLLECT] init GEM_TO_COLLECT; + + [right] true -> (required_gem'=max(0, required_gem - (left_of_home & gem ? 1: 0))); + [left] true -> (required_gem'=max(0, required_gem - (right_of_home & gem ? 1 : 0))); + [top] true -> (required_gem'=max(0, required_gem - (below_of_home & gem ? 1 : 0))); + [down] true -> (required_gem'=max(0, required_gem - (above_of_home & gem ? 1 : 0))); +endmodule + +label "success" = required_gold=0 & required_gem=0; diff --git a/src/storm-cli-utilities/model-handling.h b/src/storm-cli-utilities/model-handling.h index f630e67df..9fc3cc772 100644 --- a/src/storm-cli-utilities/model-handling.h +++ b/src/storm-cli-utilities/model-handling.h @@ -347,6 +347,12 @@ namespace storm { SymbolicInput output = input; + // Preprocess properties (if requested) + if (ioSettings.isPropertiesAsMultiSet()) { + STORM_LOG_THROW(!input.properties.empty(), storm::exceptions::InvalidArgumentException, "Can not translate properties to multi-objective formula because no properties were specified."); + output.properties = {storm::api::createMultiObjectiveProperty(output.properties)}; + } + // Substitute constant definitions in symbolic input. std::string constantDefinitionString = ioSettings.getConstantDefinitionString(); std::map constantDefinitions; diff --git a/src/storm/api/properties.cpp b/src/storm/api/properties.cpp index b1e36af86..bd07cb013 100644 --- a/src/storm/api/properties.cpp +++ b/src/storm/api/properties.cpp @@ -58,5 +58,18 @@ namespace storm { return formulas; } + storm::jani::Property createMultiObjectiveProperty(std::vector const& properties) { + std::set undefConstants; + std::string name = ""; + std::string comment = ""; + for (auto const& prop : properties) { + undefConstants.insert(prop.getUndefinedConstants().begin(), prop.getUndefinedConstants().end()); + name += prop.getName(); + comment += prop.getComment(); + STORM_LOG_WARN_COND(prop.getFilter().isDefault(), "Non-default property filter of property " + prop.getName() + " will be dropped during conversion to multi-objective property."); + } + auto multiFormula = std::make_shared(extractFormulasFromProperties(properties)); + return storm::jani::Property(name, multiFormula, undefConstants, comment); + } } } diff --git a/src/storm/api/properties.h b/src/storm/api/properties.h index c0c878f48..2df4a1fe1 100644 --- a/src/storm/api/properties.h +++ b/src/storm/api/properties.h @@ -33,6 +33,7 @@ namespace storm { std::vector substituteConstantsInProperties(std::vector const& properties, std::map const& substitution); std::vector filterProperties(std::vector const& properties, boost::optional> const& propertyFilter); std::vector> extractFormulasFromProperties(std::vector const& properties); + storm::jani::Property createMultiObjectiveProperty(std::vector const& properties); } } diff --git a/src/storm/logic/CumulativeRewardFormula.cpp b/src/storm/logic/CumulativeRewardFormula.cpp index 33633b319..ccafab82c 100644 --- a/src/storm/logic/CumulativeRewardFormula.cpp +++ b/src/storm/logic/CumulativeRewardFormula.cpp @@ -136,6 +136,10 @@ namespace storm { } } + std::vector const& CumulativeRewardFormula::getBounds() const { + return bounds; + } + void CumulativeRewardFormula::checkNoVariablesInBound(storm::expressions::Expression const& bound) { STORM_LOG_THROW(!bound.containsVariables(), storm::exceptions::InvalidOperationException, "Cannot evaluate time-bound '" << bound << "' as it contains undefined constants."); } @@ -149,8 +153,12 @@ namespace storm { } + std::shared_ptr CumulativeRewardFormula::stripRewardAccumulation() const { + return std::make_shared(bounds, timeBoundReferences); + } + std::shared_ptr CumulativeRewardFormula::restrictToDimension(unsigned i) const { - return std::make_shared(bounds.at(i), getTimeBoundReference(i)); + return std::make_shared(bounds.at(i), getTimeBoundReference(i), rewardAccumulation); } std::ostream& CumulativeRewardFormula::writeToStream(std::ostream& out) const { diff --git a/src/storm/logic/CumulativeRewardFormula.h b/src/storm/logic/CumulativeRewardFormula.h index cccd1ac9e..80924865a 100644 --- a/src/storm/logic/CumulativeRewardFormula.h +++ b/src/storm/logic/CumulativeRewardFormula.h @@ -48,8 +48,11 @@ namespace storm { template ValueType getNonStrictBound() const; + std::vector const& getBounds() const; + bool hasRewardAccumulation() const; RewardAccumulation const& getRewardAccumulation() const; + std::shared_ptr stripRewardAccumulation() const; std::shared_ptr restrictToDimension(unsigned i) const; diff --git a/src/storm/logic/FragmentSpecification.cpp b/src/storm/logic/FragmentSpecification.cpp index 46d7c4075..9405a3841 100644 --- a/src/storm/logic/FragmentSpecification.cpp +++ b/src/storm/logic/FragmentSpecification.cpp @@ -106,6 +106,9 @@ namespace storm { multiObjective.setBoundedUntilFormulasAllowed(true); multiObjective.setStepBoundedUntilFormulasAllowed(true); multiObjective.setTimeBoundedUntilFormulasAllowed(true); + multiObjective.setLongRunAverageOperatorsAllowed(true); + multiObjective.setLongRunAverageRewardFormulasAllowed(true); + return multiObjective; } diff --git a/src/storm/logic/LongRunAverageRewardFormula.cpp b/src/storm/logic/LongRunAverageRewardFormula.cpp index c36dab8a9..294c8de95 100644 --- a/src/storm/logic/LongRunAverageRewardFormula.cpp +++ b/src/storm/logic/LongRunAverageRewardFormula.cpp @@ -24,6 +24,10 @@ namespace storm { return rewardAccumulation.get(); } + std::shared_ptr LongRunAverageRewardFormula::stripRewardAccumulation() const { + return std::make_shared(); + } + boost::any LongRunAverageRewardFormula::accept(FormulaVisitor const& visitor, boost::any const& data) const { return visitor.visit(*this, data); } diff --git a/src/storm/logic/LongRunAverageRewardFormula.h b/src/storm/logic/LongRunAverageRewardFormula.h index 03f1c3684..45bbe3de5 100644 --- a/src/storm/logic/LongRunAverageRewardFormula.h +++ b/src/storm/logic/LongRunAverageRewardFormula.h @@ -19,7 +19,8 @@ namespace storm { virtual bool isRewardPathFormula() const override; bool hasRewardAccumulation() const; RewardAccumulation const& getRewardAccumulation() const; - + std::shared_ptr stripRewardAccumulation() const; + virtual boost::any accept(FormulaVisitor const& visitor, boost::any const& data) const override; virtual std::ostream& writeToStream(std::ostream& out) const override; diff --git a/src/storm/logic/TotalRewardFormula.cpp b/src/storm/logic/TotalRewardFormula.cpp index 4d1df7e0d..fb6f60873 100644 --- a/src/storm/logic/TotalRewardFormula.cpp +++ b/src/storm/logic/TotalRewardFormula.cpp @@ -24,6 +24,10 @@ namespace storm { return rewardAccumulation.get(); } + std::shared_ptr TotalRewardFormula::stripRewardAccumulation() const { + return std::make_shared(); + } + boost::any TotalRewardFormula::accept(FormulaVisitor const& visitor, boost::any const& data) const { return visitor.visit(*this, data); } diff --git a/src/storm/logic/TotalRewardFormula.h b/src/storm/logic/TotalRewardFormula.h index a11fb3b03..1fa366723 100644 --- a/src/storm/logic/TotalRewardFormula.h +++ b/src/storm/logic/TotalRewardFormula.h @@ -20,6 +20,7 @@ namespace storm { virtual bool isRewardPathFormula() const override; bool hasRewardAccumulation() const; RewardAccumulation const& getRewardAccumulation() const; + std::shared_ptr stripRewardAccumulation() const; virtual boost::any accept(FormulaVisitor const& visitor, boost::any const& data) const override; diff --git a/src/storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.cpp b/src/storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.cpp index d3fa819b7..e0844fbb8 100644 --- a/src/storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.cpp +++ b/src/storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.cpp @@ -33,7 +33,7 @@ namespace storm { template bool SparseMarkovAutomatonCslModelChecker::canHandleStatic(CheckTask const& checkTask, bool* requiresSingleInitialState) { auto singleObjectiveFragment = storm::logic::csl().setGloballyFormulasAllowed(false).setNextFormulasAllowed(false).setRewardOperatorsAllowed(true).setReachabilityRewardFormulasAllowed(true).setTotalRewardFormulasAllowed(true).setTimeAllowed(true).setLongRunAverageProbabilitiesAllowed(true).setLongRunAverageRewardFormulasAllowed(true).setRewardAccumulationAllowed(true).setInstantaneousFormulasAllowed(false); - auto multiObjectiveFragment = storm::logic::multiObjective().setTimeAllowed(true).setTimeBoundedUntilFormulasAllowed(true); + auto multiObjectiveFragment = storm::logic::multiObjective().setTimeAllowed(true).setTimeBoundedUntilFormulasAllowed(true).setRewardAccumulationAllowed(true); if (!storm::NumberTraits::SupportsExponential) { singleObjectiveFragment.setBoundedUntilFormulasAllowed(false).setCumulativeRewardFormulasAllowed(false); multiObjectiveFragment.setTimeBoundedUntilFormulasAllowed(false).setCumulativeRewardFormulasAllowed(false); diff --git a/src/storm/modelchecker/multiobjective/constraintbased/SparseCbQuery.cpp b/src/storm/modelchecker/multiobjective/constraintbased/SparseCbQuery.cpp index 7d2d680a8..30d808782 100644 --- a/src/storm/modelchecker/multiobjective/constraintbased/SparseCbQuery.cpp +++ b/src/storm/modelchecker/multiobjective/constraintbased/SparseCbQuery.cpp @@ -27,13 +27,13 @@ namespace storm { auto rewardAnalysis = preprocessing::SparseMultiObjectiveRewardAnalysis::analyze(preprocessorResult); STORM_LOG_THROW(rewardAnalysis.rewardFinitenessType != preprocessing::RewardFinitenessType::Infinite, storm::exceptions::NotSupportedException, "TThere is no Pareto optimal scheduler that yields finite reward for all objectives. This is not supported."); - STORM_LOG_THROW(rewardAnalysis.rewardLessInfinityEStates, storm::exceptions::UnexpectedException, "The set of states with reward < infinity for some scheduler has not been computed during preprocessing."); + STORM_LOG_THROW(rewardAnalysis.totalRewardLessInfinityEStates, storm::exceptions::UnexpectedException, "The set of states with reward < infinity for some scheduler has not been computed during preprocessing."); STORM_LOG_THROW(preprocessorResult.containsOnlyTrivialObjectives(), storm::exceptions::NotSupportedException, "At least one objective was not reduced to an expected (total or cumulative) reward objective during preprocessing. This is not supported by the considered weight vector checker."); STORM_LOG_THROW(preprocessorResult.preprocessedModel->getInitialStates().getNumberOfSetBits() == 1, storm::exceptions::NotSupportedException, "The model has multiple initial states."); // Build a subsystem of the preprocessor result model that discards states that yield infinite reward for all schedulers. // We can also merge the states that will have reward zero anyway. - storm::storage::BitVector maybeStates = rewardAnalysis.rewardLessInfinityEStates.get() & ~rewardAnalysis.reward0AStates; + storm::storage::BitVector maybeStates = rewardAnalysis.totalRewardLessInfinityEStates.get() & ~rewardAnalysis.reward0AStates; std::set relevantRewardModels; for (auto const& obj : this->objectives) { obj.formula->gatherReferencedRewardModels(relevantRewardModels); @@ -42,7 +42,7 @@ namespace storm { auto mergerResult = merger.mergeTargetAndSinkStates(maybeStates, rewardAnalysis.reward0AStates, storm::storage::BitVector(maybeStates.size(), false), std::vector(relevantRewardModels.begin(), relevantRewardModels.end())); preprocessedModel = mergerResult.model; - reward0EStates = rewardAnalysis.reward0EStates % maybeStates; + reward0EStates = rewardAnalysis.totalReward0EStates % maybeStates; if (mergerResult.targetState) { // There is an additional state in the result reward0EStates.resize(reward0EStates.size() + 1, true); diff --git a/src/storm/modelchecker/multiobjective/pcaa/PcaaWeightVectorChecker.cpp b/src/storm/modelchecker/multiobjective/pcaa/PcaaWeightVectorChecker.cpp index 19ae057dd..5f2ea028d 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/PcaaWeightVectorChecker.cpp +++ b/src/storm/modelchecker/multiobjective/pcaa/PcaaWeightVectorChecker.cpp @@ -35,7 +35,7 @@ namespace storm { boost::optional PcaaWeightVectorChecker::computeWeightedResultBound(bool lower, std::vector const& weightVector, storm::storage::BitVector const& objectiveFilter) const { ValueType result = storm::utility::zero(); - for (auto const& objIndex : objectiveFilter) { + for (auto objIndex : objectiveFilter) { boost::optional const& objBound = (lower == storm::solver::minimize(this->objectives[objIndex].formula->getOptimalityType())) ? this->objectives[objIndex].upperResultBound : this->objectives[objIndex].lowerResultBound; if (objBound) { if (storm::solver::minimize(this->objectives[objIndex].formula->getOptimalityType())) { diff --git a/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaAchievabilityQuery.cpp b/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaAchievabilityQuery.cpp index 8f81dab76..d87edbf00 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaAchievabilityQuery.cpp +++ b/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaAchievabilityQuery.cpp @@ -7,6 +7,7 @@ #include "storm/modelchecker/results/ExplicitQualitativeCheckResult.h" #include "storm/utility/constants.h" #include "storm/utility/vector.h" +#include "storm/utility/SignalHandler.h" #include "storm/environment/modelchecker/MultiObjectiveModelCheckerEnvironment.h" @@ -56,7 +57,7 @@ namespace storm { template bool SparsePcaaAchievabilityQuery::checkAchievability(Environment const& env) { // repeatedly refine the over/ under approximation until the threshold point is either in the under approx. or not in the over approx. - while(!this->maxStepsPerformed(env)){ + while (!this->maxStepsPerformed(env) && !storm::utility::resources::isTerminate()) { WeightVector separatingVector = this->findSeparatingVector(thresholds); this->updateWeightedPrecision(separatingVector); this->performRefinementStep(env, std::move(separatingVector)); @@ -67,7 +68,7 @@ namespace storm { return true; } } - STORM_LOG_ERROR("Could not check whether thresholds are achievable: Exceeded maximum number of refinement steps"); + STORM_LOG_ERROR("Could not check whether thresholds are achievable: Termination requested or maximum number of refinement steps exceeded."); return false; } diff --git a/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaParetoQuery.cpp b/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaParetoQuery.cpp index c7be2fc7e..f1c4145df 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaParetoQuery.cpp +++ b/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaParetoQuery.cpp @@ -7,6 +7,7 @@ #include "storm/modelchecker/results/ExplicitParetoCurveCheckResult.h" #include "storm/utility/constants.h" #include "storm/utility/vector.h" +#include "storm/utility/SignalHandler.h" #include "storm/environment/modelchecker/MultiObjectiveModelCheckerEnvironment.h" #include "storm/modelchecker/multiobjective/MultiObjectivePostprocessing.h" @@ -59,9 +60,12 @@ namespace storm { WeightVector direction(this->objectives.size(), storm::utility::zero()); direction[objIndex] = storm::utility::one(); this->performRefinementStep(env, std::move(direction)); + if (storm::utility::resources::isTerminate()) { + break; + } } - while(!this->maxStepsPerformed(env)) { + while(!this->maxStepsPerformed(env) && !storm::utility::resources::isTerminate()) { // Get the halfspace of the underApproximation with maximal distance to a vertex of the overApproximation std::vector> underApproxHalfspaces = this->underApproximation->getHalfspaces(); std::vector overApproxVertices = this->overApproximation->getVertices(); @@ -84,12 +88,10 @@ namespace storm { WeightVector direction = underApproxHalfspaces[farestHalfspaceIndex].normalVector(); this->performRefinementStep(env, std::move(direction)); } - STORM_LOG_ERROR("Could not reach the desired precision: Exceeded maximum number of refinement steps"); + STORM_LOG_ERROR("Could not reach the desired precision: Termination requested or maximum number of refinement steps exceeded."); } - - #ifdef STORM_HAVE_CARL template class SparsePcaaParetoQuery, storm::RationalNumber>; template class SparsePcaaParetoQuery, storm::RationalNumber>; diff --git a/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaQuantitativeQuery.cpp b/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaQuantitativeQuery.cpp index d43aec7c6..f6f640cfa 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaQuantitativeQuery.cpp +++ b/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaQuantitativeQuery.cpp @@ -8,6 +8,7 @@ #include "storm/modelchecker/results/ExplicitQuantitativeCheckResult.h" #include "storm/utility/constants.h" #include "storm/utility/vector.h" +#include "storm/utility/SignalHandler.h" #include "storm/environment/modelchecker/MultiObjectiveModelCheckerEnvironment.h" #include "storm/exceptions/InvalidOperationException.h" @@ -97,7 +98,7 @@ namespace storm { // We don't care for the optimizing objective at this point this->diracWeightVectorsToBeChecked.set(indexOfOptimizingObjective, false); - while(!this->maxStepsPerformed(env)){ + while(!this->maxStepsPerformed(env) && !storm::utility::resources::isTerminate()){ WeightVector separatingVector = this->findSeparatingVector(thresholds); this->updateWeightedPrecisionInAchievabilityPhase(separatingVector); this->performRefinementStep(env, std::move(separatingVector)); @@ -114,7 +115,7 @@ namespace storm { // If there is only one objective than its the optimizing one. Thus the query has to be achievable. return true; } - STORM_LOG_ERROR("Could not check whether thresholds are achievable: Exceeded maximum number of refinement steps"); + STORM_LOG_ERROR("Could not check whether thresholds are achievable: Termination requested or maximum number of refinement steps exceeded."); return false; } @@ -149,7 +150,7 @@ namespace storm { // the supremum over all strategies. Hence, one could combine a scheduler inducing the optimum value (but possibly violating strict // thresholds) and (with very low probability) a scheduler that satisfies all (possibly strict) thresholds. GeometryValueType result = storm::utility::zero(); - while(!this->maxStepsPerformed(env)) { + while(!this->maxStepsPerformed(env) && !storm::utility::resources::isTerminate()) { if (this->refinementSteps.empty()) { // We did not make any refinement steps during the checkAchievability phase (e.g., because there is only one objective). this->weightVectorChecker->setWeightedPrecision(storm::utility::convertNumber(env.modelchecker().multi().getPrecision())); @@ -178,7 +179,7 @@ namespace storm { this->updateWeightedPrecisionInImprovingPhase(env, separatingVector); this->performRefinementStep(env, std::move(separatingVector)); } - STORM_LOG_ERROR("Could not reach the desired precision: Exceeded maximum number of refinement steps"); + STORM_LOG_ERROR("Could not reach the desired precision: Termination requested or maximum number of refinement steps exceeded."); return result; } diff --git a/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaQuery.cpp b/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaQuery.cpp index a97905f7d..730a79c4e 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaQuery.cpp +++ b/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaQuery.cpp @@ -8,6 +8,8 @@ #include "storm/modelchecker/multiobjective/MultiObjectivePostprocessing.h" #include "storm/environment/modelchecker/MultiObjectiveModelCheckerEnvironment.h" #include "storm/storage/geometry/Hyperrectangle.h" +#include "storm/settings/SettingsManager.h" +#include "storm/settings/modules/CoreSettings.h" #include "storm/utility/constants.h" #include "storm/utility/vector.h" #include "storm/io/export.h" @@ -18,6 +20,12 @@ namespace storm { namespace modelchecker { namespace multiobjective { + template + SparsePcaaQuery::~SparsePcaaQuery() { + if (storm::settings::getModule().isShowStatisticsSet()) { + STORM_PRINT_AND_LOG("Pareto Curve Approximation Algorithm terminated after " << this->refinementSteps.size() << " refinement steps." << std::endl); + } + } template SparsePcaaQuery::SparsePcaaQuery(preprocessing::SparseMultiObjectivePreprocessorResult& preprocessorResult) : diff --git a/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaQuery.h b/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaQuery.h index b1fb9d359..7df4ff7bc 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaQuery.h +++ b/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaQuery.h @@ -25,7 +25,7 @@ namespace storm { typedef std::vector Point; typedef std::vector WeightVector; - virtual ~SparsePcaaQuery() = default; + virtual ~SparsePcaaQuery(); /* * Invokes the computation and retrieves the result diff --git a/src/storm/modelchecker/multiobjective/pcaa/StandardMaPcaaWeightVectorChecker.cpp b/src/storm/modelchecker/multiobjective/pcaa/StandardMaPcaaWeightVectorChecker.cpp index 72052768a..029e8244f 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/StandardMaPcaaWeightVectorChecker.cpp +++ b/src/storm/modelchecker/multiobjective/pcaa/StandardMaPcaaWeightVectorChecker.cpp @@ -7,10 +7,14 @@ #include "storm/models/sparse/StandardRewardModel.h" #include "storm/utility/macros.h" #include "storm/utility/vector.h" +#include "storm/utility/SignalHandler.h" #include "storm/logic/Formulas.h" #include "storm/solver/SolverSelectionOptions.h" +#include "storm/settings/SettingsManager.h" +#include "storm/settings/modules/CoreSettings.h" -#include "storm/environment/solver/NativeSolverEnvironment.h" +#include "storm/environment/solver/SolverEnvironment.h" +#include "storm/environment/solver/MinMaxSolverEnvironment.h" #include "storm/exceptions/InvalidOperationException.h" #include "storm/exceptions/InvalidPropertyException.h" @@ -35,7 +39,8 @@ namespace storm { exitRates = model.getExitRates(); // Set the (discretized) state action rewards. - this->actionRewards.resize(this->objectives.size()); + this->actionRewards.assign(this->objectives.size(), {}); + this->stateRewards.assign(this->objectives.size(), {}); for (uint64_t objIndex = 0; objIndex < this->objectives.size(); ++objIndex) { auto const& formula = *this->objectives[objIndex].formula; STORM_LOG_THROW(formula.isRewardOperatorFormula() && formula.asRewardOperatorFormula().hasRewardModelName(), storm::exceptions::UnexpectedException, "Unexpected type of operator formula: " << formula); @@ -49,14 +54,37 @@ namespace storm { this->actionRewards[objIndex][model.getTransitionMatrix().getRowGroupIndices()[markovianState]] += rewModel.getStateReward(markovianState) / exitRates[markovianState]; } } + } else if (formula.getSubformula().isLongRunAverageRewardFormula()) { + // The LRA methods for MA require keeping track of state- and action rewards separately + if (rewModel.hasStateRewards()) { + this->stateRewards[objIndex] = rewModel.getStateRewardVector(); + } } else { STORM_LOG_THROW(formula.getSubformula().isCumulativeRewardFormula() && formula.getSubformula().asCumulativeRewardFormula().getTimeBoundReference().isTimeBound(), storm::exceptions::UnexpectedException, "Unexpected type of sub-formula: " << formula.getSubformula()); STORM_LOG_THROW(!rewModel.hasStateRewards(), storm::exceptions::InvalidPropertyException, "Found state rewards for time bounded objective " << this->objectives[objIndex].originalFormula << ". This is not supported."); STORM_LOG_WARN_COND(this->objectives[objIndex].originalFormula->isProbabilityOperatorFormula() && this->objectives[objIndex].originalFormula->asProbabilityOperatorFormula().getSubformula().isBoundedUntilFormula(), "Objective " << this->objectives[objIndex].originalFormula << " was simplified to a cumulative reward formula. Correctness of the algorithm is unknown for this type of property."); } } + // Print some statistics (if requested) + if (storm::settings::getModule().isShowStatisticsSet()) { + STORM_PRINT_AND_LOG("Final preprocessed model has " << markovianStates.getNumberOfSetBits() << " Markovian states." << std::endl); + } } + template + storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper StandardMaPcaaWeightVectorChecker::createNondetInfiniteHorizonHelper(storm::storage::SparseMatrix const& transitions) const { + STORM_LOG_ASSERT(transitions.getRowGroupCount() == this->transitionMatrix.getRowGroupCount(), "Unexpected size of given matrix."); + return storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper(transitions, this->markovianStates, this->exitRates); + } + + template + storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper StandardMaPcaaWeightVectorChecker::createDetInfiniteHorizonHelper(storm::storage::SparseMatrix const& transitions) const { + STORM_LOG_ASSERT(transitions.getRowGroupCount() == this->transitionMatrix.getRowGroupCount(), "Unexpected size of given matrix."); + // TODO: Right now, there is no dedicated support for "deterministic" Markov automata so we have to pick the nondeterministic one. + auto result = storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper(transitions, this->markovianStates, this->exitRates); + result.setOptimizationDirection(storm::solver::OptimizationDirection::Maximize); + return result; + } template void StandardMaPcaaWeightVectorChecker::boundedPhase(Environment const& env, std::vector const& weightVector, std::vector& weightedRewardVector) { @@ -64,7 +92,7 @@ namespace storm { // Split the preprocessed model into transitions from/to probabilistic/Markovian states. SubModel MS = createSubModel(true, weightedRewardVector); SubModel PS = createSubModel(false, weightedRewardVector); - + // Apply digitization to Markovian transitions ValueType digitizationConstant = getDigitizationConstant(weightVector); digitize(MS, digitizationConstant); @@ -73,23 +101,26 @@ namespace storm { TimeBoundMap upperTimeBounds; digitizeTimeBounds(upperTimeBounds, digitizationConstant); + // Check whether there is a cycle in of probabilistic states + bool acyclic = !storm::utility::graph::hasCycle(PS.toPS); + // Initialize a minMaxSolver to compute an optimal scheduler (w.r.t. PS) for each epoch // No EC elimination is necessary as we assume non-zenoness - std::unique_ptr minMax = initMinMaxSolver(env, PS, weightVector); + std::unique_ptr minMax = initMinMaxSolver(env, PS, acyclic, weightVector); // create a linear equation solver for the model induced by the optimal choice vector. // the solver will be updated whenever the optimal choice vector has changed. - std::unique_ptr linEq = initLinEqSolver(env, PS); + std::unique_ptr linEq = initLinEqSolver(env, PS, acyclic); // Store the optimal choices of PS as computed by the minMax solver. std::vector optimalChoicesAtCurrentEpoch(PS.getNumberOfStates(), std::numeric_limits::max()); // Stores the objectives for which we need to compute values in the current time epoch. - storm::storage::BitVector consideredObjectives = this->objectivesWithNoUpperTimeBound; + storm::storage::BitVector consideredObjectives = this->objectivesWithNoUpperTimeBound & ~this->lraObjectives; auto upperTimeBoundIt = upperTimeBounds.begin(); uint_fast64_t currentEpoch = upperTimeBounds.empty() ? 0 : upperTimeBoundIt->first; - while (true) { + while (!storm::utility::resources::isTerminate()) { // Update the objectives that are considered at the current time epoch as well as the (weighted) reward vectors. updateDataToCurrentEpoch(MS, PS, *minMax, consideredObjectives, currentEpoch, weightVector, upperTimeBoundIt, upperTimeBounds); @@ -105,6 +136,7 @@ namespace storm { break; } } + STORM_LOG_WARN_COND(!storm::utility::resources::isTerminate(), "Time-bounded reachability computation aborted."); // compose the results from MS and PS storm::utility::vector::setVectorValues(this->weightedResult, MS.states, MS.weightedSolutionVector); @@ -136,7 +168,7 @@ namespace storm { std::vector const& objRewards = this->actionRewards[objIndex]; std::vector subModelObjRewards; subModelObjRewards.reserve(result.getNumberOfChoices()); - for (auto const& choice : result.choices) { + for (auto choice : result.choices) { subModelObjRewards.push_back(objRewards[choice]); } result.objectiveRewardVectors.push_back(std::move(subModelObjRewards)); @@ -197,7 +229,7 @@ namespace storm { VT delta = smallestNonZeroBound / smallestStepBound; while(true) { bool deltaValid = true; - for (auto const& objIndex : objectivesWithTimeBound) { + for (auto objIndex : objectivesWithTimeBound) { auto const& timeBound = timeBounds[objIndex]; if (timeBound/delta != std::floor(timeBound/delta)) { deltaValid = false; @@ -296,15 +328,21 @@ namespace storm { } template - std::unique_ptr::MinMaxSolverData> StandardMaPcaaWeightVectorChecker::initMinMaxSolver(Environment const& env, SubModel const& PS, std::vector const& weightVector) const { + std::unique_ptr::MinMaxSolverData> StandardMaPcaaWeightVectorChecker::initMinMaxSolver(Environment const& env, SubModel const& PS, bool acyclic, std::vector const& weightVector) const { std::unique_ptr result(new MinMaxSolverData()); + result->env = std::make_unique(env); + // For acyclic models we switch to the more efficient acyclic solver (Unless the solver / method was explicitly specified) + if (acyclic && result->env->solver().minMax().isMethodSetFromDefault() && result->env->solver().minMax().getMethod() != storm::solver::MinMaxMethod::Acyclic) { + STORM_LOG_INFO("Switching to Acyclic linear equation solver method. To overwrite this, explicitly specify another solver."); + result->env->solver().minMax().setMethod(storm::solver::MinMaxMethod::Acyclic); + } storm::solver::GeneralMinMaxLinearEquationSolverFactory minMaxSolverFactory; - result->solver = minMaxSolverFactory.create(env, PS.toPS); + result->solver = minMaxSolverFactory.create(*result->env, PS.toPS); result->solver->setHasUniqueSolution(true); result->solver->setHasNoEndComponents(true); // Non-zeno MA result->solver->setTrackScheduler(true); result->solver->setCachingEnabled(true); - auto req = result->solver->getRequirements(env, storm::solver::OptimizationDirection::Maximize, false); + auto req = result->solver->getRequirements(*result->env, storm::solver::OptimizationDirection::Maximize, false); boost::optional lowerBound = this->computeWeightedResultBound(true, weightVector, storm::storage::BitVector(weightVector.size(), true)); if (lowerBound) { result->solver->setLowerBound(lowerBound.get()); @@ -315,6 +353,9 @@ namespace storm { result->solver->setUpperBound(upperBound.get()); req.clearUpperBounds(); } + if (acyclic) { + req.clearAcyclic(); + } STORM_LOG_THROW(!req.hasEnabledCriticalRequirement(), storm::exceptions::UncheckedRequirementException, "Solver requirements " + req.getEnabledRequirementsAsString() + " not checked."); result->solver->setRequirementsChecked(true); result->solver->setOptimizationDirection(storm::solver::OptimizationDirection::Maximize); @@ -326,15 +367,14 @@ namespace storm { template template ::SupportsExponential, int>::type> - std::unique_ptr::LinEqSolverData> StandardMaPcaaWeightVectorChecker::initLinEqSolver(Environment const& env, SubModel const& PS) const { + std::unique_ptr::LinEqSolverData> StandardMaPcaaWeightVectorChecker::initLinEqSolver(Environment const& env, SubModel const& PS, bool acyclic) const { std::unique_ptr result(new LinEqSolverData()); - result->env = std::make_unique(); - // Unless the solver / method was explicitly specified, we switch it to Native / Power. - // We choose the Power method since we call the solver very frequently on 'easy' inputs and power has little overhead). - if ((result->env->solver().isLinearEquationSolverTypeSetFromDefaultValue() || result->env->solver().getLinearEquationSolverType() == storm::solver::EquationSolverType::Native) && (result->env->solver().native().isMethodSetFromDefault() || result->env->solver().native().getMethod() == storm::solver::NativeLinearEquationSolverMethod::Power)) { - STORM_LOG_INFO("Switching to Native/Power linear equation solver method. To overwrite this, explicitly specify another method."); - result->env->solver().setLinearEquationSolverType(storm::solver::EquationSolverType::Native); - result->env->solver().native().setMethod(storm::solver::NativeLinearEquationSolverMethod::Power); + result->env = std::make_unique(env); + result->acyclic = acyclic; + // For acyclic models we switch to the more efficient acyclic solver (Unless the solver / method was explicitly specified) + if (acyclic && result->env->solver().isLinearEquationSolverTypeSetFromDefaultValue() && result->env->solver().getLinearEquationSolverType() != storm::solver::EquationSolverType::Acyclic) { + STORM_LOG_INFO("Switching to Acyclic linear equation solver method. To overwrite this, explicitly specify another solver."); + result->env->solver().setLinearEquationSolverType(storm::solver::EquationSolverType::Acyclic); } result->factory = std::make_unique>(); result->b.resize(PS.getNumberOfStates()); @@ -343,7 +383,7 @@ namespace storm { template template ::SupportsExponential, int>::type> - std::unique_ptr::LinEqSolverData> StandardMaPcaaWeightVectorChecker::initLinEqSolver(Environment const& env, SubModel const& PS) const { + std::unique_ptr::LinEqSolverData> StandardMaPcaaWeightVectorChecker::initLinEqSolver(Environment const& env, SubModel const& PS, bool acyclic) const { STORM_LOG_THROW(false, storm::exceptions::InvalidOperationException, "Computing bounded probabilities of MAs is unsupported for this value type."); } @@ -369,7 +409,7 @@ namespace storm { template void StandardMaPcaaWeightVectorChecker::performPSStep(Environment const& env, SubModel& PS, SubModel const& MS, MinMaxSolverData& minMax, LinEqSolverData& linEq, std::vector& optimalChoicesAtCurrentEpoch, storm::storage::BitVector const& consideredObjectives, std::vector const& weightVector) const { // compute a choice vector for the probabilistic states that is optimal w.r.t. the weighted reward vector - minMax.solver->solveEquations(env, PS.weightedSolutionVector, minMax.b); + minMax.solver->solveEquations(*minMax.env, PS.weightedSolutionVector, minMax.b); auto const& newChoices = minMax.solver->getSchedulerChoices(); if (consideredObjectives.getNumberOfSetBits() == 1 && storm::utility::isOne(weightVector[*consideredObjectives.begin()])) { // In this case there is no need to perform the computation on the individual objectives @@ -390,6 +430,11 @@ namespace storm { } linEq.solver = linEq.factory->create(*linEq.env, std::move(linEqMatrix)); linEq.solver->setCachingEnabled(true); + auto req = linEq.solver->getRequirements(*linEq.env); + if (linEq.acyclic) { + req.clearAcyclic(); + } + STORM_LOG_THROW(!req.hasEnabledCriticalRequirement(), storm::exceptions::UncheckedRequirementException, "Solver requirements " + req.getEnabledRequirementsAsString() + " not checked."); } // Get the results for the individual objectives. @@ -443,13 +488,13 @@ namespace storm { template double StandardMaPcaaWeightVectorChecker>::getDigitizationConstant(std::vector const& direction) const; template void StandardMaPcaaWeightVectorChecker>::digitize(StandardMaPcaaWeightVectorChecker>::SubModel& subModel, double const& digitizationConstant) const; template void StandardMaPcaaWeightVectorChecker>::digitizeTimeBounds( StandardMaPcaaWeightVectorChecker>::TimeBoundMap& upperTimeBounds, double const& digitizationConstant); - template std::unique_ptr>::LinEqSolverData> StandardMaPcaaWeightVectorChecker>::initLinEqSolver(Environment const& env, StandardMaPcaaWeightVectorChecker>::SubModel const& PS ) const; + template std::unique_ptr>::LinEqSolverData> StandardMaPcaaWeightVectorChecker>::initLinEqSolver(Environment const& env, StandardMaPcaaWeightVectorChecker>::SubModel const& PS, bool acyclic) const; #ifdef STORM_HAVE_CARL template class StandardMaPcaaWeightVectorChecker>; template storm::RationalNumber StandardMaPcaaWeightVectorChecker>::getDigitizationConstant(std::vector const& direction) const; template void StandardMaPcaaWeightVectorChecker>::digitize(StandardMaPcaaWeightVectorChecker>::SubModel& subModel, storm::RationalNumber const& digitizationConstant) const; template void StandardMaPcaaWeightVectorChecker>::digitizeTimeBounds(StandardMaPcaaWeightVectorChecker>::TimeBoundMap& upperTimeBounds, storm::RationalNumber const& digitizationConstant); - template std::unique_ptr>::LinEqSolverData> StandardMaPcaaWeightVectorChecker>::initLinEqSolver(Environment const& env, StandardMaPcaaWeightVectorChecker>::SubModel const& PS ) const; + template std::unique_ptr>::LinEqSolverData> StandardMaPcaaWeightVectorChecker>::initLinEqSolver(Environment const& env, StandardMaPcaaWeightVectorChecker>::SubModel const& PS, bool acyclic) const; #endif } diff --git a/src/storm/modelchecker/multiobjective/pcaa/StandardMaPcaaWeightVectorChecker.h b/src/storm/modelchecker/multiobjective/pcaa/StandardMaPcaaWeightVectorChecker.h index ee21f1b28..9283102ef 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/StandardMaPcaaWeightVectorChecker.h +++ b/src/storm/modelchecker/multiobjective/pcaa/StandardMaPcaaWeightVectorChecker.h @@ -30,7 +30,9 @@ namespace storm { protected: virtual void initializeModelTypeSpecificData(SparseMaModelType const& model) override; - + virtual storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper createNondetInfiniteHorizonHelper(storm::storage::SparseMatrix const& transitions) const override; + virtual storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper createDetInfiniteHorizonHelper(storm::storage::SparseMatrix const& transitions) const override; + private: /* @@ -64,12 +66,14 @@ namespace storm { * Stores the data that is relevant to invoke the minMaxSolver and retrieve the result. */ struct MinMaxSolverData { + std::unique_ptr env; std::unique_ptr> solver; std::vector b; }; struct LinEqSolverData { std::unique_ptr env; + bool acyclic; std::unique_ptr> factory; std::unique_ptr> solver; std::vector b; @@ -118,15 +122,15 @@ namespace storm { /*! * Initializes the data for the MinMax solver */ - std::unique_ptr initMinMaxSolver(Environment const& env, SubModel const& PS, std::vector const& weightVector) const; + std::unique_ptr initMinMaxSolver(Environment const& env, SubModel const& PS, bool acyclic, std::vector const& weightVector) const; /*! * Initializes the data for the LinEq solver */ template ::SupportsExponential, int>::type = 0> - std::unique_ptr initLinEqSolver(Environment const& env, SubModel const& PS) const; + std::unique_ptr initLinEqSolver(Environment const& env, SubModel const& PS, bool acyclic) const; template ::SupportsExponential, int>::type = 0> - std::unique_ptr initLinEqSolver(Environment const& env, SubModel const& PS) const; + std::unique_ptr initLinEqSolver(Environment const& env, SubModel const& PS, bool acyclic) const; /* * Updates the reward vectors within the split model, diff --git a/src/storm/modelchecker/multiobjective/pcaa/StandardMdpPcaaWeightVectorChecker.cpp b/src/storm/modelchecker/multiobjective/pcaa/StandardMdpPcaaWeightVectorChecker.cpp index 96a7e1de6..ef08399db 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/StandardMdpPcaaWeightVectorChecker.cpp +++ b/src/storm/modelchecker/multiobjective/pcaa/StandardMdpPcaaWeightVectorChecker.cpp @@ -37,7 +37,7 @@ namespace storm { auto const& cumulativeRewardFormula = formula.getSubformula().asCumulativeRewardFormula(); STORM_LOG_THROW(!cumulativeRewardFormula.isMultiDimensional() && !cumulativeRewardFormula.getTimeBoundReference().isRewardBound(), storm::exceptions::UnexpectedException, "Unexpected type of sub-formula: " << formula.getSubformula()); } else { - STORM_LOG_THROW(formula.getSubformula().isTotalRewardFormula(), storm::exceptions::UnexpectedException, "Unexpected type of sub-formula: " << formula.getSubformula()); + STORM_LOG_THROW(formula.getSubformula().isTotalRewardFormula() || formula.getSubformula().isLongRunAverageRewardFormula(), storm::exceptions::UnexpectedException, "Unexpected type of sub-formula: " << formula.getSubformula()); } typename SparseMdpModelType::RewardModelType const& rewModel = model.getRewardModel(formula.asRewardOperatorFormula().getRewardModelName()); @@ -46,6 +46,18 @@ namespace storm { } } + template + storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper StandardMdpPcaaWeightVectorChecker::createNondetInfiniteHorizonHelper(storm::storage::SparseMatrix const& transitions) const { + STORM_LOG_ASSERT(transitions.getRowGroupCount() == this->transitionMatrix.getRowGroupCount(), "Unexpected size of given matrix."); + return storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper(transitions); + } + + template + storm::modelchecker::helper::SparseDeterministicInfiniteHorizonHelper StandardMdpPcaaWeightVectorChecker::createDetInfiniteHorizonHelper(storm::storage::SparseMatrix const& transitions) const { + STORM_LOG_ASSERT(transitions.getRowGroupCount() == this->transitionMatrix.getRowGroupCount(), "Unexpected size of given matrix."); + return storm::modelchecker::helper::SparseDeterministicInfiniteHorizonHelper(transitions); + } + template void StandardMdpPcaaWeightVectorChecker::boundedPhase(Environment const& env,std::vector const& weightVector, std::vector& weightedRewardVector) { // Allocate some memory so this does not need to happen for each time epoch @@ -71,7 +83,7 @@ namespace storm { } // Stores the objectives for which we need to compute values in the current time epoch. - storm::storage::BitVector consideredObjectives = this->objectivesWithNoUpperTimeBound; + storm::storage::BitVector consideredObjectives = this->objectivesWithNoUpperTimeBound & ~this->lraObjectives; auto stepBoundIt = stepBounds.begin(); uint_fast64_t currentEpoch = stepBounds.empty() ? 0 : stepBoundIt->first; diff --git a/src/storm/modelchecker/multiobjective/pcaa/StandardMdpPcaaWeightVectorChecker.h b/src/storm/modelchecker/multiobjective/pcaa/StandardMdpPcaaWeightVectorChecker.h index 2482d0829..7dcf7ea28 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/StandardMdpPcaaWeightVectorChecker.h +++ b/src/storm/modelchecker/multiobjective/pcaa/StandardMdpPcaaWeightVectorChecker.h @@ -38,6 +38,8 @@ namespace storm { protected: virtual void initializeModelTypeSpecificData(SparseMdpModelType const& model) override; + virtual storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper createNondetInfiniteHorizonHelper(storm::storage::SparseMatrix const& transitions) const override; + virtual storm::modelchecker::helper::SparseDeterministicInfiniteHorizonHelper createDetInfiniteHorizonHelper(storm::storage::SparseMatrix const& transitions) const override; private: diff --git a/src/storm/modelchecker/multiobjective/pcaa/StandardPcaaWeightVectorChecker.cpp b/src/storm/modelchecker/multiobjective/pcaa/StandardPcaaWeightVectorChecker.cpp index db0fa99f1..13621f683 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/StandardPcaaWeightVectorChecker.cpp +++ b/src/storm/modelchecker/multiobjective/pcaa/StandardPcaaWeightVectorChecker.cpp @@ -11,6 +11,8 @@ #include "storm/modelchecker/prctl/helper/DsMpiUpperRewardBoundsComputer.h" #include "storm/modelchecker/prctl/helper/BaierUpperRewardBoundsComputer.h" #include "storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectiveRewardAnalysis.h" +#include "storm/settings/SettingsManager.h" +#include "storm/settings/modules/CoreSettings.h" #include "storm/solver/MinMaxLinearEquationSolver.h" #include "storm/utility/graph.h" #include "storm/utility/macros.h" @@ -39,20 +41,20 @@ namespace storm { void StandardPcaaWeightVectorChecker::initialize(preprocessing::SparseMultiObjectivePreprocessorResult const& preprocessorResult) { auto rewardAnalysis = preprocessing::SparseMultiObjectiveRewardAnalysis::analyze(preprocessorResult); STORM_LOG_THROW(rewardAnalysis.rewardFinitenessType != preprocessing::RewardFinitenessType::Infinite, storm::exceptions::NotSupportedException, "There is no Pareto optimal scheduler that yields finite reward for all objectives. This is not supported."); - STORM_LOG_THROW(rewardAnalysis.rewardLessInfinityEStates, storm::exceptions::UnexpectedException, "The set of states with reward < infinity for some scheduler has not been computed during preprocessing."); - STORM_LOG_THROW(preprocessorResult.containsOnlyTrivialObjectives(), storm::exceptions::NotSupportedException, "At least one objective was not reduced to an expected (total or cumulative) reward objective during preprocessing. This is not supported by the considered weight vector checker."); + STORM_LOG_THROW(rewardAnalysis.totalRewardLessInfinityEStates, storm::exceptions::UnexpectedException, "The set of states with reward < infinity for some scheduler has not been computed during preprocessing."); + STORM_LOG_THROW(preprocessorResult.containsOnlyTrivialObjectives(), storm::exceptions::NotSupportedException, "At least one objective was not reduced to an expected (long run, total or cumulative) reward objective during preprocessing. This is not supported by the considered weight vector checker."); STORM_LOG_THROW(preprocessorResult.preprocessedModel->getInitialStates().getNumberOfSetBits() == 1, storm::exceptions::NotSupportedException, "The model has multiple initial states."); // Build a subsystem of the preprocessor result model that discards states that yield infinite reward for all schedulers. // We can also merge the states that will have reward zero anyway. - storm::storage::BitVector maybeStates = rewardAnalysis.rewardLessInfinityEStates.get() & ~rewardAnalysis.reward0AStates; - storm::storage::BitVector finiteRewardChoices = preprocessorResult.preprocessedModel->getTransitionMatrix().getRowFilter(rewardAnalysis.rewardLessInfinityEStates.get(), rewardAnalysis.rewardLessInfinityEStates.get()); + storm::storage::BitVector maybeStates = rewardAnalysis.totalRewardLessInfinityEStates.get() & ~rewardAnalysis.reward0AStates; + storm::storage::BitVector finiteTotalRewardChoices = preprocessorResult.preprocessedModel->getTransitionMatrix().getRowFilter(rewardAnalysis.totalRewardLessInfinityEStates.get(), rewardAnalysis.totalRewardLessInfinityEStates.get()); std::set relevantRewardModels; for (auto const& obj : this->objectives) { obj.formula->gatherReferencedRewardModels(relevantRewardModels); } storm::transformer::GoalStateMerger merger(*preprocessorResult.preprocessedModel); - auto mergerResult = merger.mergeTargetAndSinkStates(maybeStates, rewardAnalysis.reward0AStates, storm::storage::BitVector(maybeStates.size(), false), std::vector(relevantRewardModels.begin(), relevantRewardModels.end()), finiteRewardChoices); + auto mergerResult = merger.mergeTargetAndSinkStates(maybeStates, rewardAnalysis.reward0AStates, storm::storage::BitVector(maybeStates.size(), false), std::vector(relevantRewardModels.begin(), relevantRewardModels.end()), finiteTotalRewardChoices); // Initialize data specific for the considered model type initializeModelTypeSpecificData(*mergerResult.model); @@ -60,10 +62,10 @@ namespace storm { // Initilize general data of the model transitionMatrix = std::move(mergerResult.model->getTransitionMatrix()); initialState = *mergerResult.model->getInitialStates().begin(); - reward0EStates = rewardAnalysis.reward0EStates % maybeStates; + totalReward0EStates = rewardAnalysis.totalReward0EStates % maybeStates; if (mergerResult.targetState) { // There is an additional state in the result - reward0EStates.resize(reward0EStates.size() + 1, true); + totalReward0EStates.resize(totalReward0EStates.size() + 1, true); // The overapproximation for the possible ec choices consists of the states that can reach the target states with prob. 0 and the target state itself. storm::storage::BitVector targetStateAsVector(transitionMatrix.getRowGroupCount(), false); @@ -75,6 +77,7 @@ namespace storm { } // set data for unbounded objectives + lraObjectives = storm::storage::BitVector(this->objectives.size(), false); objectivesWithNoUpperTimeBound = storm::storage::BitVector(this->objectives.size(), false); actionsWithoutRewardInUnboundedPhase = storm::storage::BitVector(transitionMatrix.getRowCount(), true); for (uint_fast64_t objIndex = 0; objIndex < this->objectives.size(); ++objIndex) { @@ -83,6 +86,17 @@ namespace storm { objectivesWithNoUpperTimeBound.set(objIndex, true); actionsWithoutRewardInUnboundedPhase &= storm::utility::vector::filterZero(actionRewards[objIndex]); } + if (formula.getSubformula().isLongRunAverageRewardFormula()) { + lraObjectives.set(objIndex, true); + objectivesWithNoUpperTimeBound.set(objIndex, true); + } + } + + // Set data for LRA objectives (if available) + if (!lraObjectives.empty()) { + lraMecDecomposition = LraMecDecomposition(); + lraMecDecomposition->mecs = storm::storage::MaximalEndComponentDecomposition(transitionMatrix, transitionMatrix.transpose(true), storm::storage::BitVector(transitionMatrix.getRowGroupCount(), true), actionsWithoutRewardInUnboundedPhase); + lraMecDecomposition->auxMecValues.resize(lraMecDecomposition->mecs.size()); } // initialize data for the results @@ -91,29 +105,63 @@ namespace storm { offsetsToUnderApproximation.resize(this->objectives.size(), storm::utility::zero()); offsetsToOverApproximation.resize(this->objectives.size(), storm::utility::zero()); optimalChoices.resize(transitionMatrix.getRowGroupCount(), 0); + + // Print some statistics (if requested) + if (storm::settings::getModule().isShowStatisticsSet()) { + STORM_PRINT_AND_LOG("Weight Vector Checker Statistics:" << std::endl); + STORM_PRINT_AND_LOG("Final preprocessed model has " << transitionMatrix.getRowGroupCount() << " states." << std::endl); + STORM_PRINT_AND_LOG("Final preprocessed model has " << transitionMatrix.getRowCount() << " actions." << std::endl); + if (lraMecDecomposition) { + STORM_PRINT_AND_LOG("Found " << lraMecDecomposition->mecs.size() << " end components that are relevant for LRA-analysis." << std::endl); + uint64_t numLraMecStates = 0; + for (auto const& mec : this->lraMecDecomposition->mecs) { + numLraMecStates += mec.size(); + } + STORM_PRINT_AND_LOG(numLraMecStates << " states lie on such an end component." << std::endl); + } + STORM_PRINT_AND_LOG(std::endl); + } } - template void StandardPcaaWeightVectorChecker::check(Environment const& env, std::vector const& weightVector) { checkHasBeenCalled = true; STORM_LOG_INFO("Invoked WeightVectorChecker with weights " << std::endl << "\t" << storm::utility::vector::toString(storm::utility::vector::convertNumericVector(weightVector))); + // Prepare and invoke weighted infinite horizon (long run average) phase std::vector weightedRewardVector(transitionMatrix.getRowCount(), storm::utility::zero()); - for (auto objIndex : objectivesWithNoUpperTimeBound) { + if (!lraObjectives.empty()) { + boost::optional> weightedStateRewardVector; + for (auto objIndex : lraObjectives) { + ValueType weight = storm::solver::minimize(this->objectives[objIndex].formula->getOptimalityType()) ? -weightVector[objIndex] : weightVector[objIndex]; + storm::utility::vector::addScaledVector(weightedRewardVector, actionRewards[objIndex], weight); + if (!stateRewards.empty() && !stateRewards[objIndex].empty()) { + if (!weightedStateRewardVector) { + weightedStateRewardVector = std::vector(transitionMatrix.getRowGroupCount(), storm::utility::zero()); + } + storm::utility::vector::addScaledVector(weightedStateRewardVector.get(), stateRewards[objIndex], weight); + } + } + infiniteHorizonWeightedPhase(env, weightedRewardVector, weightedStateRewardVector); + // Clear all values of the weighted reward vector + weightedRewardVector.assign(weightedRewardVector.size(), storm::utility::zero()); + } + + // Prepare and invoke weighted indefinite horizon (unbounded total reward) phase + auto totalRewardObjectives = objectivesWithNoUpperTimeBound & ~lraObjectives; + for (auto objIndex : totalRewardObjectives) { if (storm::solver::minimize(this->objectives[objIndex].formula->getOptimalityType())) { storm::utility::vector::addScaledVector(weightedRewardVector, actionRewards[objIndex], -weightVector[objIndex]); } else { storm::utility::vector::addScaledVector(weightedRewardVector, actionRewards[objIndex], weightVector[objIndex]); } } - unboundedWeightedPhase(env, weightedRewardVector, weightVector); unboundedIndividualPhase(env, weightVector); // Only invoke boundedPhase if necessarry, i.e., if there is at least one objective with a time bound for (auto const& obj : this->objectives) { - if (!obj.formula->getSubformula().isTotalRewardFormula()) { + if (!obj.formula->getSubformula().isTotalRewardFormula() && !obj.formula->getSubformula().isLongRunAverageRewardFormula()) { boundedPhase(env, weightVector, weightedRewardVector); break; } @@ -151,7 +199,7 @@ namespace storm { storm::storage::Scheduler::ValueType> StandardPcaaWeightVectorChecker::computeScheduler() const { STORM_LOG_THROW(this->checkHasBeenCalled, storm::exceptions::IllegalFunctionCallException, "Tried to retrieve results but check(..) has not been called before."); for (auto const& obj : this->objectives) { - STORM_LOG_THROW(obj.formula->getSubformula().isTotalRewardFormula(), storm::exceptions::NotImplementedException, "Scheduler retrival is only implemented for objectives without time-bound."); + STORM_LOG_THROW(obj.formula->getSubformula().isTotalRewardFormula() || obj.formula->getSubformula().isLongRunAverageRewardFormula(), storm::exceptions::NotImplementedException, "Scheduler retrival is only implemented for objectives without time-bound."); } storm::storage::Scheduler result(this->optimalChoices.size()); @@ -163,6 +211,71 @@ namespace storm { return result; } + template + void computeSchedulerProb1(storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& consideredStates, storm::storage::BitVector const& statesToReach, std::vector& choices, storm::storage::BitVector const* allowedChoices = nullptr) { + + std::vector stack; + storm::storage::BitVector processedStates = statesToReach; + stack.insert(stack.end(), processedStates.begin(), processedStates.end()); + uint64_t currentState = 0; + + while (!stack.empty()) { + currentState = stack.back(); + stack.pop_back(); + + for (auto const& predecessorEntry : backwardTransitions.getRow(currentState)) { + auto predecessor = predecessorEntry.getColumn(); + if (consideredStates.get(predecessor) & !processedStates.get(predecessor)) { + // Find a choice leading to an already processed state (such a choice has to exist since this is a predecessor of the currentState) + auto const& groupStart = transitionMatrix.getRowGroupIndices()[predecessor]; + auto const& groupEnd = transitionMatrix.getRowGroupIndices()[predecessor + 1]; + uint64_t row = allowedChoices ? allowedChoices->getNextSetIndex(groupStart) : groupStart; + for (; row < groupEnd; row = allowedChoices ? allowedChoices->getNextSetIndex(row + 1) : row + 1) { + bool hasSuccessorInProcessedStates = false; + for (auto const& successorOfPredecessor : transitionMatrix.getRow(row)) { + if (processedStates.get(successorOfPredecessor.getColumn())) { + hasSuccessorInProcessedStates = true; + break; + } + } + if (hasSuccessorInProcessedStates) { + choices[predecessor] = row - groupStart; + processedStates.set(predecessor, true); + stack.push_back(predecessor); + break; + } + } + STORM_LOG_ASSERT(allowedChoices || row < groupEnd, "Unable to find choice at a predecessor of a processed state that leads to a processed state."); + } + } + } + STORM_LOG_ASSERT(consideredStates.isSubsetOf(processedStates), "Not all states have been processed."); + } + + template + void computeSchedulerProb0(storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& consideredStates, storm::storage::BitVector const& statesToAvoid, storm::storage::BitVector const& allowedChoices, std::vector& choices) { + + for (auto state : consideredStates) { + auto const& groupStart = transitionMatrix.getRowGroupIndices()[state]; + auto const& groupEnd = transitionMatrix.getRowGroupIndices()[state + 1]; + bool choiceFound = false; + for (uint64_t row = allowedChoices.getNextSetIndex(groupStart); row < groupEnd; row = allowedChoices.getNextSetIndex(row + 1)) { + choiceFound = true; + for (auto const& element : transitionMatrix.getRow(row)) { + if (statesToAvoid.get(element.getColumn())) { + choiceFound = false; + break; + } + } + if (choiceFound) { + choices[state] = row - groupStart; + break; + } + } + STORM_LOG_ASSERT(choiceFound, "Unable to find choice for a state."); + } + } + template std::vector computeValidInitialScheduler(storm::storage::SparseMatrix const& matrix, storm::storage::BitVector const& rowsWithSumLessOne) { std::vector result(matrix.getRowGroupCount()); @@ -175,50 +288,110 @@ namespace storm { processedStates.set(state, true); } } - std::vector stack(processedStates.begin(), processedStates.end()); - while (!stack.empty()) { - uint64_t current = stack.back(); - stack.pop_back(); - STORM_LOG_ASSERT(processedStates.get(current), "states on the stack shall be processed."); - for (auto const& entry : backwardsTransitions.getRow(current)) { - uint64_t pred = entry.getColumn(); - if (!processedStates.get(pred)) { - // Find a choice that leads to a processed state - uint64_t predChoice = groups[pred]; - bool foundSuccessor = false; - for (; predChoice < groups[pred + 1]; ++predChoice) { - for (auto const& predEntry : matrix.getRow(predChoice)) { - if (processedStates.get(predEntry.getColumn())) { - foundSuccessor = true; - break; - } - } - if (foundSuccessor) { - break; - } - } - STORM_LOG_ASSERT(foundSuccessor && predChoice < groups[pred + 1], "Predecessor of a processed state should have a processed successor"); - result[pred] = predChoice - groups[pred]; - processedStates.set(pred, true); - stack.push_back(pred); - } + + computeSchedulerProb1(matrix, backwardsTransitions, ~processedStates, processedStates, result); + return result; + } + + + + /*! + * Computes a scheduler taking the choices from the given set only finitely often + * @param safeStates it is assumed that reaching such a state is unproblematic. The choice for these states is not set. + * @pre such a scheduler exists + */ + template + void computeSchedulerFinitelyOften(storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& finitelyOftenChoices, storm::storage::BitVector safeStates, std::vector& choices) { + auto badStates = transitionMatrix.getRowGroupFilter(finitelyOftenChoices, true) & ~safeStates; + // badStates shall only be reached finitely often + + auto reachBadWithProbGreater0AStates = storm::utility::graph::performProbGreater0A(transitionMatrix, transitionMatrix.getRowGroupIndices(), backwardTransitions, ~safeStates, badStates, false, 0, ~finitelyOftenChoices); + // States in ~reachBadWithProbGreater0AStates can avoid bad states forever by only taking ~finitelyOftenChoices. + // We compute a scheduler for these states achieving exactly this (but we exclude the safe states) + auto avoidBadStates = ~reachBadWithProbGreater0AStates & ~safeStates; + computeSchedulerProb0(transitionMatrix, backwardTransitions, avoidBadStates, reachBadWithProbGreater0AStates, ~finitelyOftenChoices, choices); + + // We need to take care of states that will reach a bad state with prob greater 0 (including the bad states themselves). + // due to the precondition, we know that it has to be possible to eventually avoid the bad states for ever. + // Perform a backwards search from the avoid states and store choices with prob. 1 + computeSchedulerProb1(transitionMatrix, backwardTransitions, reachBadWithProbGreater0AStates, avoidBadStates | safeStates, choices); + } + + template + void StandardPcaaWeightVectorChecker::infiniteHorizonWeightedPhase(Environment const& env, std::vector const& weightedActionRewardVector, boost::optional> const& weightedStateRewardVector) { + // Compute the optimal (weighted) lra value for each mec, keeping track of the optimal choices + STORM_LOG_ASSERT(lraMecDecomposition, "Mec decomposition for lra computations not initialized."); + storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper helper = createNondetInfiniteHorizonHelper(this->transitionMatrix); + helper.provideLongRunComponentDecomposition(lraMecDecomposition->mecs); + helper.setOptimizationDirection(storm::solver::OptimizationDirection::Maximize); + helper.setProduceScheduler(true); + for (uint64_t mecIndex = 0; mecIndex < lraMecDecomposition->mecs.size(); ++mecIndex) { + auto const& mec = lraMecDecomposition->mecs[mecIndex]; + auto actionValueGetter = [&weightedActionRewardVector] (uint64_t const& a) { return weightedActionRewardVector[a]; }; + typename storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper::ValueGetter stateValueGetter; + if (weightedStateRewardVector) { + stateValueGetter = [&weightedStateRewardVector] (uint64_t const& s) { return weightedStateRewardVector.get()[s]; }; + } else { + stateValueGetter = [] (uint64_t const&) { return storm::utility::zero(); }; } + lraMecDecomposition->auxMecValues[mecIndex] = helper.computeLraForComponent(env, stateValueGetter, actionValueGetter, mec); } - return result; + // Extract the produced optimal choices for the MECs + this->optimalChoices = std::move(helper.getProducedOptimalChoices()); } template void StandardPcaaWeightVectorChecker::unboundedWeightedPhase(Environment const& env, std::vector const& weightedRewardVector, std::vector const& weightVector) { - - if (this->objectivesWithNoUpperTimeBound.empty() || !storm::utility::vector::hasNonZeroEntry(weightedRewardVector)) { - this->weightedResult = std::vector(transitionMatrix.getRowGroupCount(), storm::utility::zero()); - this->optimalChoices = std::vector(transitionMatrix.getRowGroupCount(), 0); + // Catch the case where all values on the RHS of the MinMax equation system are zero. + if (this->objectivesWithNoUpperTimeBound.empty() || ((this->lraObjectives.empty() || !storm::utility::vector::hasNonZeroEntry(lraMecDecomposition->auxMecValues)) && !storm::utility::vector::hasNonZeroEntry(weightedRewardVector))) { + this->weightedResult.assign(transitionMatrix.getRowGroupCount(), storm::utility::zero()); + storm::storage::BitVector statesInLraMec(transitionMatrix.getRowGroupCount(), false); + if (this->lraMecDecomposition) { + for (auto const& mec : this->lraMecDecomposition->mecs) { + for (auto const& sc : mec) { + statesInLraMec.set(sc.first, true); + } + } + } + // Get an arbitrary scheduler that yields finite reward for all objectives + computeSchedulerFinitelyOften(transitionMatrix, transitionMatrix.transpose(true), ~actionsWithoutRewardInUnboundedPhase, statesInLraMec, this->optimalChoices); return; } updateEcQuotient(weightedRewardVector); + // Set up the choice values storm::utility::vector::selectVectorValues(ecQuotient->auxChoiceValues, ecQuotient->ecqToOriginalChoiceMapping, weightedRewardVector); + std::map ecqStateToOptimalMecMap; + if (!lraObjectives.empty()) { + // We also need to assign a value for each ecQuotientChoice that corresponds to "staying" in the eliminated EC. (at this point these choices should all have a value of zero). + // Since each of the eliminated ECs has to contain *at least* one LRA EC, we need to find the largest value among the contained LRA ECs + storm::storage::BitVector foundEcqChoices(ecQuotient->matrix.getRowCount(), false); // keeps track of choices we have already seen before + for (uint64_t mecIndex = 0; mecIndex < lraMecDecomposition->mecs.size(); ++mecIndex) { + auto const& mec = lraMecDecomposition->mecs[mecIndex]; + auto const& mecValue = lraMecDecomposition->auxMecValues[mecIndex]; + uint64_t ecqState = ecQuotient->originalToEcqStateMapping[mec.begin()->first]; + if (ecqState >= ecQuotient->matrix.getRowGroupCount()) { + // The mec was not part of the ecquotient. This means that it must have value 0. + // No further processing is needed. + continue; + } + uint64_t ecqChoice = ecQuotient->ecqStayInEcChoices.getNextSetIndex(ecQuotient->matrix.getRowGroupIndices()[ecqState]); + STORM_LOG_ASSERT(ecqChoice < ecQuotient->matrix.getRowGroupIndices()[ecqState + 1], "Unable to find choice that represents staying inside the (eliminated) ec."); + auto& ecqChoiceValue = ecQuotient->auxChoiceValues[ecqChoice]; + auto insertionRes = ecqStateToOptimalMecMap.emplace(ecqState, mecIndex); + if (insertionRes.second) { + // We have seen this ecqState for the first time. + STORM_LOG_ASSERT(storm::utility::isZero(ecqChoiceValue), "Expected a total reward of zero for choices that represent staying in an EC for ever."); + ecqChoiceValue = mecValue; + } else { + if (mecValue > ecqChoiceValue) { // found a larger value + ecqChoiceValue = mecValue; + insertionRes.first->second = mecIndex; + } + } + } + } storm::solver::GeneralMinMaxLinearEquationSolverFactory solverFactory; std::unique_ptr> solver = solverFactory.create(env, ecQuotient->matrix); @@ -246,7 +419,7 @@ namespace storm { solver->solveEquations(env, ecQuotient->auxStateValues, ecQuotient->auxChoiceValues); this->weightedResult = std::vector(transitionMatrix.getRowGroupCount()); - transformReducedSolutionToOriginalModel(ecQuotient->matrix, ecQuotient->auxStateValues, solver->getSchedulerChoices(), ecQuotient->ecqToOriginalChoiceMapping, ecQuotient->originalToEcqStateMapping, this->weightedResult, this->optimalChoices); + transformEcqSolutionToOriginalModel(ecQuotient->auxStateValues, solver->getSchedulerChoices(), ecqStateToOptimalMecMap, this->weightedResult, this->optimalChoices); } template @@ -263,11 +436,14 @@ namespace storm { } } } else { - storm::storage::SparseMatrix deterministicMatrix = transitionMatrix.selectRowsFromRowGroups(this->optimalChoices, true); + storm::storage::SparseMatrix deterministicMatrix = transitionMatrix.selectRowsFromRowGroups(this->optimalChoices, true); // TODO: why diagonal entries? storm::storage::SparseMatrix deterministicBackwardTransitions = deterministicMatrix.transpose(); - std::vector deterministicStateRewards(deterministicMatrix.getRowCount()); + std::vector deterministicStateRewards(deterministicMatrix.getRowCount()); // allocate here storm::solver::GeneralLinearEquationSolverFactory linearEquationSolverFactory; + auto infiniteHorizonHelper = createDetInfiniteHorizonHelper(deterministicMatrix); + infiniteHorizonHelper.provideBackwardTransitions(deterministicBackwardTransitions); + // We compute an estimate for the results of the individual objectives which is obtained from the weighted result and the result of the objectives computed so far. // Note that weightedResult = Sum_{i=1}^{n} w_i * objectiveResult_i. std::vector weightedSumOfUncheckedObjectives = weightedResult; @@ -278,58 +454,68 @@ namespace storm { if (objectivesWithNoUpperTimeBound.get(objIndex)) { offsetsToUnderApproximation[objIndex] = storm::utility::zero(); offsetsToOverApproximation[objIndex] = storm::utility::zero(); - storm::utility::vector::selectVectorValues(deterministicStateRewards, this->optimalChoices, transitionMatrix.getRowGroupIndices(), actionRewards[objIndex]); - storm::storage::BitVector statesWithRewards = ~storm::utility::vector::filterZero(deterministicStateRewards); - // As maybestates we pick the states from which a state with reward is reachable - storm::storage::BitVector maybeStates = storm::utility::graph::performProbGreater0(deterministicBackwardTransitions, storm::storage::BitVector(deterministicMatrix.getRowCount(), true), statesWithRewards); - - // Compute the estimate for this objective - if (!storm::utility::isZero(weightVector[objIndex])) { - objectiveResults[objIndex] = weightedSumOfUncheckedObjectives; - ValueType scalingFactor = storm::utility::one() / sumOfWeightsOfUncheckedObjectives; - if (storm::solver::minimize(obj.formula->getOptimalityType())) { - scalingFactor *= -storm::utility::one(); + if (lraObjectives.get(objIndex)) { + auto actionValueGetter = [&] (uint64_t const& a) { return actionRewards[objIndex][transitionMatrix.getRowGroupIndices()[a] + this->optimalChoices[a]]; }; + typename storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper::ValueGetter stateValueGetter; + if (stateRewards.empty() || stateRewards[objIndex].empty()) { + stateValueGetter = [] (uint64_t const&) { return storm::utility::zero(); }; + } else { + stateValueGetter = [&] (uint64_t const& s) { return stateRewards[objIndex][s]; }; } - storm::utility::vector::scaleVectorInPlace(objectiveResults[objIndex], scalingFactor); - storm::utility::vector::clip(objectiveResults[objIndex], obj.lowerResultBound, obj.upperResultBound); - } - // Make sure that the objectiveResult is initialized correctly - objectiveResults[objIndex].resize(transitionMatrix.getRowGroupCount(), storm::utility::zero()); - - if (!maybeStates.empty()) { - bool needEquationSystem = linearEquationSolverFactory.getEquationProblemFormat(env) == storm::solver::LinearEquationSolverProblemFormat::EquationSystem; - storm::storage::SparseMatrix submatrix = deterministicMatrix.getSubmatrix( - true, maybeStates, maybeStates, needEquationSystem); - if (needEquationSystem) { - // Converting the matrix from the fixpoint notation to the form needed for the equation - // system. That is, we go from x = A*x + b to (I-A)x = b. - submatrix.convertToEquationSystem(); - } - - // Prepare solution vector and rhs of the equation system. - std::vector x = storm::utility::vector::filterVector(objectiveResults[objIndex], maybeStates); - std::vector b = storm::utility::vector::filterVector(deterministicStateRewards, maybeStates); - - // Now solve the resulting equation system. - std::unique_ptr> solver = linearEquationSolverFactory.create(env, submatrix); - auto req = solver->getRequirements(env); - solver->clearBounds(); - storm::storage::BitVector submatrixRowsWithSumLessOne = deterministicMatrix.getRowFilter(maybeStates, maybeStates) % maybeStates; - submatrixRowsWithSumLessOne.complement(); - this->setBoundsToSolver(*solver, req.lowerBounds(), req.upperBounds(), objIndex, submatrix, submatrixRowsWithSumLessOne, b); - if (solver->hasLowerBound()) { - req.clearLowerBounds(); + objectiveResults[objIndex] = infiniteHorizonHelper.computeLongRunAverageValues(env, stateValueGetter, actionValueGetter); + } else { // i.e. a total reward objective + storm::utility::vector::selectVectorValues(deterministicStateRewards, this->optimalChoices, transitionMatrix.getRowGroupIndices(), actionRewards[objIndex]); + storm::storage::BitVector statesWithRewards = ~storm::utility::vector::filterZero(deterministicStateRewards); + // As maybestates we pick the states from which a state with reward is reachable + storm::storage::BitVector maybeStates = storm::utility::graph::performProbGreater0(deterministicBackwardTransitions, storm::storage::BitVector(deterministicMatrix.getRowCount(), true), statesWithRewards); + + // Compute the estimate for this objective + if (!storm::utility::isZero(weightVector[objIndex])) { + objectiveResults[objIndex] = weightedSumOfUncheckedObjectives; + ValueType scalingFactor = storm::utility::one() / sumOfWeightsOfUncheckedObjectives; + if (storm::solver::minimize(obj.formula->getOptimalityType())) { + scalingFactor *= -storm::utility::one(); + } + storm::utility::vector::scaleVectorInPlace(objectiveResults[objIndex], scalingFactor); + storm::utility::vector::clip(objectiveResults[objIndex], obj.lowerResultBound, obj.upperResultBound); } - if (solver->hasUpperBound()) { - req.clearUpperBounds(); + // Make sure that the objectiveResult is initialized correctly + objectiveResults[objIndex].resize(transitionMatrix.getRowGroupCount(), storm::utility::zero()); + + if (!maybeStates.empty()) { + bool needEquationSystem = linearEquationSolverFactory.getEquationProblemFormat(env) == storm::solver::LinearEquationSolverProblemFormat::EquationSystem; + storm::storage::SparseMatrix submatrix = deterministicMatrix.getSubmatrix( + true, maybeStates, maybeStates, needEquationSystem); + if (needEquationSystem) { + // Converting the matrix from the fixpoint notation to the form needed for the equation + // system. That is, we go from x = A*x + b to (I-A)x = b. + submatrix.convertToEquationSystem(); + } + + // Prepare solution vector and rhs of the equation system. + std::vector x = storm::utility::vector::filterVector(objectiveResults[objIndex], maybeStates); + std::vector b = storm::utility::vector::filterVector(deterministicStateRewards, maybeStates); + + // Now solve the resulting equation system. + std::unique_ptr> solver = linearEquationSolverFactory.create(env, submatrix); + auto req = solver->getRequirements(env); + solver->clearBounds(); + storm::storage::BitVector submatrixRowsWithSumLessOne = deterministicMatrix.getRowFilter(maybeStates, maybeStates) % maybeStates; + submatrixRowsWithSumLessOne.complement(); + this->setBoundsToSolver(*solver, req.lowerBounds(), req.upperBounds(), objIndex, submatrix, submatrixRowsWithSumLessOne, b); + if (solver->hasLowerBound()) { + req.clearLowerBounds(); + } + if (solver->hasUpperBound()) { + req.clearUpperBounds(); + } + STORM_LOG_THROW(!req.hasEnabledCriticalRequirement(), storm::exceptions::UncheckedRequirementException, "Solver requirements " + req.getEnabledRequirementsAsString() + " not checked."); + solver->solveEquations(env, x, b); + // Set the result for this objective accordingly + storm::utility::vector::setVectorValues(objectiveResults[objIndex], maybeStates, x); } - STORM_LOG_THROW(!req.hasEnabledCriticalRequirement(), storm::exceptions::UncheckedRequirementException, "Solver requirements " + req.getEnabledRequirementsAsString() + " not checked."); - solver->solveEquations(env, x, b); - // Set the result for this objective accordingly - storm::utility::vector::setVectorValues(objectiveResults[objIndex], maybeStates, x); + storm::utility::vector::setVectorValues(objectiveResults[objIndex], ~maybeStates, storm::utility::zero()); } - storm::utility::vector::setVectorValues(objectiveResults[objIndex], ~maybeStates, storm::utility::zero()); - // Update the estimate for the next objectives. if (!storm::utility::isZero(weightVector[objIndex])) { storm::utility::vector::addScaledVector(weightedSumOfUncheckedObjectives, objectiveResults[objIndex], -weightVector[objIndex]); @@ -345,20 +531,32 @@ namespace storm { template void StandardPcaaWeightVectorChecker::updateEcQuotient(std::vector const& weightedRewardVector) { // Check whether we need to update the currently cached ecElimResult - storm::storage::BitVector newReward0Choices = storm::utility::vector::filterZero(weightedRewardVector); + storm::storage::BitVector newTotalReward0Choices = storm::utility::vector::filterZero(weightedRewardVector); + storm::storage::BitVector zeroLraRewardChoices(weightedRewardVector.size(), true); + if (lraMecDecomposition) { + for (uint64_t mecIndex = 0; mecIndex < lraMecDecomposition->mecs.size(); ++mecIndex) { + if (!storm::utility::isZero(lraMecDecomposition->auxMecValues[mecIndex])) { + // The mec has a non-zero value, so flag all its choices as non-zero + auto const& mec = lraMecDecomposition->mecs[mecIndex]; + for (auto const& stateChoices : mec) { + for (auto const& choice : stateChoices.second) { + zeroLraRewardChoices.set(choice, false); + } + } + } + } + } + storm::storage::BitVector newReward0Choices = newTotalReward0Choices & zeroLraRewardChoices; if (!ecQuotient || ecQuotient->origReward0Choices != newReward0Choices) { // It is sufficient to consider the states from which a transition with non-zero reward is reachable. (The remaining states always have reward zero). - storm::storage::BitVector nonZeroRewardStates(transitionMatrix.getRowGroupCount(), false); - for (uint_fast64_t state = 0; state < transitionMatrix.getRowGroupCount(); ++state){ - if (newReward0Choices.getNextUnsetIndex(transitionMatrix.getRowGroupIndices()[state]) < transitionMatrix.getRowGroupIndices()[state+1]) { - nonZeroRewardStates.set(state); - } - } + auto nonZeroRewardStates = transitionMatrix.getRowGroupFilter(newReward0Choices, true); + nonZeroRewardStates.complement(); storm::storage::BitVector subsystemStates = storm::utility::graph::performProbGreater0E(transitionMatrix.transpose(true), storm::storage::BitVector(transitionMatrix.getRowGroupCount(), true), nonZeroRewardStates); - // Remove neutral end components, i.e., ECs in which no reward is earned. - auto ecElimResult = storm::transformer::EndComponentEliminator::transform(transitionMatrix, subsystemStates, ecChoicesHint & newReward0Choices, reward0EStates); + // Remove neutral end components, i.e., ECs in which no total reward is earned. + // Note that such ECs contain one (or maybe more) LRA ECs. + auto ecElimResult = storm::transformer::EndComponentEliminator::transform(transitionMatrix, subsystemStates, ecChoicesHint & newTotalReward0Choices, totalReward0EStates); storm::storage::BitVector rowsWithSumLessOne(ecElimResult.matrix.getRowCount(), false); for (uint64_t row = 0; row < rowsWithSumLessOne.size(); ++row) { @@ -378,12 +576,19 @@ namespace storm { ecQuotient->matrix = std::move(ecElimResult.matrix); ecQuotient->ecqToOriginalChoiceMapping = std::move(ecElimResult.newToOldRowMapping); ecQuotient->originalToEcqStateMapping = std::move(ecElimResult.oldToNewStateMapping); + ecQuotient->ecqToOriginalStateMapping.resize(ecQuotient->matrix.getRowGroupCount()); + for (uint64_t state = 0; state < ecQuotient->originalToEcqStateMapping.size(); ++state) { + uint64_t ecqState = ecQuotient->originalToEcqStateMapping[state]; + if (ecqState < ecQuotient->matrix.getRowGroupCount()) { + ecQuotient->ecqToOriginalStateMapping[ecqState].insert(state); + } + } + ecQuotient->ecqStayInEcChoices = std::move(ecElimResult.sinkRows); ecQuotient->origReward0Choices = std::move(newReward0Choices); ecQuotient->rowsWithSumLessOne = std::move(rowsWithSumLessOne); - ecQuotient->auxStateValues.reserve(transitionMatrix.getRowGroupCount()); ecQuotient->auxStateValues.resize(ecQuotient->matrix.getRowGroupCount()); - ecQuotient->auxChoiceValues.reserve(transitionMatrix.getRowCount()); ecQuotient->auxChoiceValues.resize(ecQuotient->matrix.getRowCount()); + } } @@ -405,14 +610,26 @@ namespace storm { template void StandardPcaaWeightVectorChecker::setBoundsToSolver(storm::solver::AbstractEquationSolver& solver, bool requiresLower, bool requiresUpper, std::vector const& weightVector, storm::storage::BitVector const& objectiveFilter, storm::storage::SparseMatrix const& transitions, storm::storage::BitVector const& rowsWithSumLessOne, std::vector const& rewards) const { - + // Check whether bounds are already available - boost::optional lowerBound = this->computeWeightedResultBound(true, weightVector, objectiveFilter); + boost::optional lowerBound = this->computeWeightedResultBound(true, weightVector, objectiveFilter & ~lraObjectives); if (lowerBound) { + if (!lraObjectives.empty()) { + auto min = std::min_element(lraMecDecomposition->auxMecValues.begin(), lraMecDecomposition->auxMecValues.end()); + if (min != lraMecDecomposition->auxMecValues.end()) { + lowerBound.get() += *min; + } + } solver.setLowerBound(lowerBound.get()); } boost::optional upperBound = this->computeWeightedResultBound(false, weightVector, objectiveFilter); if (upperBound) { + if (!lraObjectives.empty()) { + auto max = std::max_element(lraMecDecomposition->auxMecValues.begin(), lraMecDecomposition->auxMecValues.end()); + if (max != lraMecDecomposition->auxMecValues.end()) { + upperBound.get() += *max; + } + } solver.setUpperBound(upperBound.get()); } @@ -426,7 +643,7 @@ namespace storm { // Compute the one step target probs std::vector oneStepTargetProbs(transitions.getRowCount(), storm::utility::zero()); - for (auto const& row : rowsWithSumLessOne) { + for (auto row : rowsWithSumLessOne) { oneStepTargetProbs[row] = storm::utility::one() - transitions.getRowSum(row); } @@ -474,100 +691,153 @@ namespace storm { } template - void StandardPcaaWeightVectorChecker::transformReducedSolutionToOriginalModel(storm::storage::SparseMatrix const& reducedMatrix, - std::vector const& reducedSolution, - std::vector const& reducedOptimalChoices, - std::vector const& reducedToOriginalChoiceMapping, - std::vector const& originalToReducedStateMapping, - std::vector& originalSolution, - std::vector& originalOptimalChoices) const { - - storm::storage::BitVector bottomStates(transitionMatrix.getRowGroupCount(), false); - storm::storage::BitVector statesThatShouldStayInTheirEC(transitionMatrix.getRowGroupCount(), false); - storm::storage::BitVector statesWithUndefSched(transitionMatrix.getRowGroupCount(), false); - - // Handle all the states for which the choice in the original model is uniquely given by the choice in the reduced model - // Also store some information regarding the remaining states - for (uint_fast64_t state = 0; state < transitionMatrix.getRowGroupCount(); ++state) { - // Check if the state exists in the reduced model, i.e., the mapping retrieves a valid index - uint_fast64_t stateInReducedModel = originalToReducedStateMapping[state]; - if (stateInReducedModel < reducedMatrix.getRowGroupCount()) { - originalSolution[state] = reducedSolution[stateInReducedModel]; - uint_fast64_t chosenRowInReducedModel = reducedMatrix.getRowGroupIndices()[stateInReducedModel] + reducedOptimalChoices[stateInReducedModel]; - uint_fast64_t chosenRowInOriginalModel = reducedToOriginalChoiceMapping[chosenRowInReducedModel]; - // Check if the state is a bottom state, i.e., the chosen row stays inside its EC. - bool stateIsBottom = reward0EStates.get(state); - for (auto const& entry : transitionMatrix.getRow(chosenRowInOriginalModel)) { - stateIsBottom &= originalToReducedStateMapping[entry.getColumn()] == stateInReducedModel; - } - if (stateIsBottom) { - bottomStates.set(state); - statesThatShouldStayInTheirEC.set(state); - } else { - // Check if the chosen row originaly belonged to the current state (and not to another state of the EC) - if (chosenRowInOriginalModel >= transitionMatrix.getRowGroupIndices()[state] && - chosenRowInOriginalModel < transitionMatrix.getRowGroupIndices()[state+1]) { - originalOptimalChoices[state] = chosenRowInOriginalModel - transitionMatrix.getRowGroupIndices()[state]; + void StandardPcaaWeightVectorChecker::transformEcqSolutionToOriginalModel(std::vector const& ecqSolution, + std::vector const& ecqOptimalChoices, + std::map const& ecqStateToOptimalMecMap, + std::vector& originalSolution, + std::vector& originalOptimalChoices) const { + + auto backwardsTransitions = transitionMatrix.transpose(true); + + // Keep track of states for which no choice has been set yet. + storm::storage::BitVector unprocessedStates(transitionMatrix.getRowGroupCount(), true); + + // For each eliminated ec, keep track of the states (within the ec) that we want to reach and the states for which a choice needs to be set + // (Declared already at this point to avoid expensive allocations in each loop iteration) + storm::storage::BitVector ecStatesToReach(transitionMatrix.getRowGroupCount(), false); + storm::storage::BitVector ecStatesToProcess(transitionMatrix.getRowGroupCount(), false); + + // Run through each state of the ec quotient as well as the associated state(s) of the original model + for (uint64_t ecqState = 0; ecqState < ecqSolution.size(); ++ecqState) { + uint64_t ecqChoice = ecQuotient->matrix.getRowGroupIndices()[ecqState] + ecqOptimalChoices[ecqState]; + uint_fast64_t origChoice = ecQuotient->ecqToOriginalChoiceMapping[ecqChoice]; + auto const& origStates = ecQuotient->ecqToOriginalStateMapping[ecqState]; + STORM_LOG_ASSERT(!origStates.empty(), "Unexpected empty set of original states."); + if (ecQuotient->ecqStayInEcChoices.get(ecqChoice)) { + // We stay in the current state(s) forever (End component) + // We need to set choices in a way that (i) the optimal LRA Mec is reached (if there is any) and (ii) 0 total reward is collected. + if (!ecqStateToOptimalMecMap.empty()) { + // The current ecqState represents an elimnated EC and we need to stay in this EC and we need to make sure that optimal MEC decisions are performed within this EC. + STORM_LOG_ASSERT(ecqStateToOptimalMecMap.count(ecqState) > 0, "No Lra Mec associated to given eliminated EC"); + auto const& lraMec = lraMecDecomposition->mecs[ecqStateToOptimalMecMap.at(ecqState)]; + if (lraMec.size() == origStates.size()) { + // LRA mec and eliminated EC coincide + for (auto const& state : origStates) { + STORM_LOG_ASSERT(lraMec.containsState(state), "Expected state to be contained in the lra mec."); + // Note that the optimal choice for this state has already been set in the infinite horizon phase. + unprocessedStates.set(state, false); + originalSolution[state] = ecqSolution[ecqState]; + } } else { - statesWithUndefSched.set(state); - statesThatShouldStayInTheirEC.set(state); + // LRA mec is proper subset of eliminated ec. There are also other states for which we have to set choices leading to the LRA MEC inside. + STORM_LOG_ASSERT(lraMec.size() < origStates.size(), "Lra Mec (" << lraMec.size() << " states) should be a proper subset of the eliminated ec (" << origStates.size() << " states)."); + for (auto const& state : origStates) { + if (lraMec.containsState(state)) { + ecStatesToReach.set(state, true); + // Note that the optimal choice for this state has already been set in the infinite horizon phase. + } else { + ecStatesToProcess.set(state, true); + } + unprocessedStates.set(state, false); + originalSolution[state] = ecqSolution[ecqState]; + } + computeSchedulerProb1(transitionMatrix, backwardsTransitions, ecStatesToProcess, ecStatesToReach, originalOptimalChoices); + // Clear bitvectors for next ecqState. + ecStatesToProcess.clear(); + ecStatesToReach.clear(); + } + } else { + // If there is no LRA Mec to reach, we just need to make sure that finite total reward is collected for all objectives + // In this branch our BitVectors have a slightly different meaning, so we create more readable aliases + storm::storage::BitVector& ecStatesToAvoid = ecStatesToReach; + bool needSchedulerComputation = false; + STORM_LOG_ASSERT(storm::utility::isZero(ecqSolution[ecqState]), "Solution for state that stays inside EC must be zero. Got " << ecqSolution[ecqState] << " instead."); + for (auto const& state : origStates) { + originalSolution[state] = storm::utility::zero(); // i.e. ecqSolution[ecqState]; + ecStatesToProcess.set(state, true); + } + auto validChoices = transitionMatrix.getRowFilter(ecStatesToProcess, ecStatesToProcess); + auto valid0RewardChoices = validChoices & actionsWithoutRewardInUnboundedPhase; + for (auto const& state : origStates) { + auto groupStart = transitionMatrix.getRowGroupIndices()[state]; + auto groupEnd = transitionMatrix.getRowGroupIndices()[state + 1]; + auto nextValidChoice = valid0RewardChoices.getNextSetIndex(groupStart); + if (nextValidChoice < groupEnd) { + originalOptimalChoices[state] = nextValidChoice - groupStart; + } else { + // this state should not be reached infinitely often + ecStatesToAvoid.set(state, true); + needSchedulerComputation = true; + } + } + if (needSchedulerComputation) { + // There are ec states which we should not visit infinitely often + auto ecStatesThatCanAvoid = storm::utility::graph::performProbGreater0A(transitionMatrix, transitionMatrix.getRowGroupIndices(), backwardsTransitions, ecStatesToProcess, ecStatesToAvoid, false, 0, valid0RewardChoices); + ecStatesThatCanAvoid.complement(); + // Set the choice for all states that can achieve value 0 + computeSchedulerProb0(transitionMatrix, backwardsTransitions, ecStatesThatCanAvoid, ecStatesToAvoid, valid0RewardChoices, originalOptimalChoices); + // Set the choice for all remaining states + computeSchedulerProb1(transitionMatrix, backwardsTransitions, ecStatesToProcess & ~ecStatesToAvoid, ecStatesToAvoid, originalOptimalChoices, &validChoices); } + ecStatesToAvoid.clear(); + ecStatesToProcess.clear(); } } else { - // if the state does not exist in the reduced model, it means that the (weighted) result is always zero, independent of the scheduler. - originalSolution[state] = storm::utility::zero(); - // However, it might be the case that infinite reward is induced for an objective with weight 0. - // To avoid this, all possible bottom states are made bottom and the remaining states have to reach a bottom state with prob. one - if (reward0EStates.get(state)) { - bottomStates.set(state); + // We eventually leave the current state(s) + // In this case, we can safely take the origChoice at the corresponding state (say 's'). + // For all other origStates associated with ecqState (if there are any), we make sure that the state 's' is reached almost surely. + if (origStates.size() > 1) { + for (auto const& state : origStates) { + // Check if the orig choice originates from this state + auto groupStart = transitionMatrix.getRowGroupIndices()[state]; + auto groupEnd = transitionMatrix.getRowGroupIndices()[state + 1]; + if (origChoice >= groupStart && origChoice < groupEnd) { + originalOptimalChoices[state] = origChoice - groupStart; + ecStatesToReach.set(state, true); + } else { + STORM_LOG_ASSERT(origStates.size() > 1, "Multiple original states expected."); + ecStatesToProcess.set(state, true); + } + unprocessedStates.set(state, false); + originalSolution[state] = ecqSolution[ecqState]; + } + computeSchedulerProb1(transitionMatrix, backwardsTransitions, ecStatesToProcess, ecStatesToReach, originalOptimalChoices); + // Clear bitvectors for next ecqState. + ecStatesToProcess.clear(); + ecStatesToReach.clear(); } else { - statesWithUndefSched.set(state); + // There is just one state so we take the associated choice. + auto state = *origStates.begin(); + auto groupStart = transitionMatrix.getRowGroupIndices()[state]; + STORM_LOG_ASSERT(origChoice >= groupStart && origChoice < transitionMatrix.getRowGroupIndices()[state + 1], "Invalid choice: " << originalOptimalChoices[state] << " at a state with " << transitionMatrix.getRowGroupSize(state) << " choices."); + originalOptimalChoices[state] = origChoice - groupStart; + originalSolution[state] = ecqSolution[ecqState]; + unprocessedStates.set(state, false); } } } - // Handle bottom states - for (auto state : bottomStates) { - bool foundRowForState = false; - // Find a row with zero rewards that only leads to bottom states. - // If the state should stay in its EC, we also need to make sure that all successors map to the same state in the reduced model - uint_fast64_t stateInReducedModel = originalToReducedStateMapping[state]; - for (uint_fast64_t row = transitionMatrix.getRowGroupIndices()[state]; row < transitionMatrix.getRowGroupIndices()[state+1]; ++row) { - bool rowOnlyLeadsToBottomStates = true; - bool rowStaysInEC = true; - for ( auto const& entry : transitionMatrix.getRow(row)) { - rowOnlyLeadsToBottomStates &= bottomStates.get(entry.getColumn()); - rowStaysInEC &= originalToReducedStateMapping[entry.getColumn()] == stateInReducedModel; - } - if (rowOnlyLeadsToBottomStates && (rowStaysInEC || !statesThatShouldStayInTheirEC.get(state)) && actionsWithoutRewardInUnboundedPhase.get(row)) { - foundRowForState = true; - originalOptimalChoices[state] = row - transitionMatrix.getRowGroupIndices()[state]; - break; - } - } - STORM_LOG_ASSERT(foundRowForState, "Could not find a suitable choice for a bottom state."); - } - - // Handle remaining states with still undef. scheduler (either EC states or non-subsystem states) - while(!statesWithUndefSched.empty()) { - for (auto state : statesWithUndefSched) { - // Iteratively Try to find a choice such that at least one successor has a defined scheduler. - uint_fast64_t stateInReducedModel = originalToReducedStateMapping[state]; - for (uint_fast64_t row = transitionMatrix.getRowGroupIndices()[state]; row < transitionMatrix.getRowGroupIndices()[state+1]; ++row) { - bool rowStaysInEC = true; - bool rowLeadsToDefinedScheduler = false; - for (auto const& entry : transitionMatrix.getRow(row)) { - rowStaysInEC &= ( stateInReducedModel == originalToReducedStateMapping[entry.getColumn()]); - rowLeadsToDefinedScheduler |= !statesWithUndefSched.get(entry.getColumn()); - } - if (rowLeadsToDefinedScheduler && (rowStaysInEC || !statesThatShouldStayInTheirEC.get(state))) { - originalOptimalChoices[state] = row - transitionMatrix.getRowGroupIndices()[state]; - statesWithUndefSched.set(state, false); - break; + // The states that still not have been processed, there is no associated state of the ec quotient. + // This is because the value for these states will be 0 under all (lra optimal-) schedulers. + storm::utility::vector::setVectorValues(originalSolution, unprocessedStates, storm::utility::zero()); + // Get a set of states for which we know that no reward (for all objectives) will be collected + if (this->lraMecDecomposition) { + // In this case, all unprocessed non-lra mec states should reach an (unprocessed) lra mec + for (auto const& mec : this->lraMecDecomposition->mecs) { + for (auto const& sc : mec) { + if (unprocessedStates.get(sc.first)) { + ecStatesToReach.set(sc.first, true); } } } + } else { + ecStatesToReach = unprocessedStates & totalReward0EStates; + // Set a scheduler for the ecStates that we want to reach + computeSchedulerProb0(transitionMatrix, backwardsTransitions, ecStatesToReach, ~unprocessedStates | ~totalReward0EStates, actionsWithoutRewardInUnboundedPhase, originalOptimalChoices); } + unprocessedStates &= ~ecStatesToReach; + // Set a scheduler for the remaining states + computeSchedulerProb1(transitionMatrix, backwardsTransitions, unprocessedStates, ecStatesToReach, originalOptimalChoices); } diff --git a/src/storm/modelchecker/multiobjective/pcaa/StandardPcaaWeightVectorChecker.h b/src/storm/modelchecker/multiobjective/pcaa/StandardPcaaWeightVectorChecker.h index 3a6ac3c9f..b59cd1336 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/StandardPcaaWeightVectorChecker.h +++ b/src/storm/modelchecker/multiobjective/pcaa/StandardPcaaWeightVectorChecker.h @@ -4,11 +4,15 @@ #include "storm/storage/BitVector.h" #include "storm/storage/SparseMatrix.h" #include "storm/storage/Scheduler.h" +#include "storm/storage/MaximalEndComponentDecomposition.h" #include "storm/transformer/EndComponentEliminator.h" +#include "storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h" +#include "storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.h" #include "storm/modelchecker/multiobjective/Objective.h" #include "storm/modelchecker/multiobjective/pcaa/PcaaWeightVectorChecker.h" #include "storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectivePreprocessorResult.h" #include "storm/utility/vector.h" +#include "storm/storage/BoostTypes.h" namespace storm { namespace modelchecker { @@ -24,7 +28,10 @@ namespace storm { class StandardPcaaWeightVectorChecker : public PcaaWeightVectorChecker { public: typedef typename SparseModelType::ValueType ValueType; - + using DeterministicInfiniteHorizonHelperType = typename std::conditional>::value, + storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper, + storm::modelchecker::helper::SparseDeterministicInfiniteHorizonHelper>::type; + /* * Creates a weight vextor checker. * @@ -64,7 +71,11 @@ namespace storm { void initialize(preprocessing::SparseMultiObjectivePreprocessorResult const& preprocessorResult); virtual void initializeModelTypeSpecificData(SparseModelType const& model) = 0; + virtual storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper createNondetInfiniteHorizonHelper(storm::storage::SparseMatrix const& transitions) const = 0; + virtual DeterministicInfiniteHorizonHelperType createDetInfiniteHorizonHelper(storm::storage::SparseMatrix const& transitions) const = 0; + void infiniteHorizonWeightedPhase(Environment const& env, std::vector const& weightedActionRewardVector, boost::optional> const& weightedStateRewardVector); + /*! * Determines the scheduler that optimizes the weighted reward vector of the unbounded objectives * @@ -99,11 +110,9 @@ namespace storm { /*! * Transforms the results of a min-max-solver that considers a reduced model (without end components) to a result for the original (unreduced) model */ - void transformReducedSolutionToOriginalModel(storm::storage::SparseMatrix const& reducedMatrix, - std::vector const& reducedSolution, - std::vector const& reducedOptimalChoices, - std::vector const& reducedToOriginalChoiceMapping, - std::vector const& originalToReducedStateMapping, + void transformEcqSolutionToOriginalModel(std::vector const& ecqSolution, + std::vector const& ecqOptimalChoices, + std::map const& ecqStateToOptimalMecMap, std::vector& originalSolution, std::vector& originalOptimalChoices) const; @@ -117,11 +126,16 @@ namespace storm { storm::storage::BitVector ecChoicesHint; // The actions that have reward assigned for at least one objective without upper timeBound storm::storage::BitVector actionsWithoutRewardInUnboundedPhase; - // The states for which there is a scheduler yielding reward 0 for each objective - storm::storage::BitVector reward0EStates; + // The states for which there is a scheduler yielding reward 0 for each total reward objective + storm::storage::BitVector totalReward0EStates; // stores the state action rewards for each objective. std::vector> actionRewards; + // stores the state rewards for each objective. + // These are only relevant for LRA objectives for MAs (otherwise, they appear within the action rewards). For other objectives/models, the corresponding vector will be empty. + std::vector> stateRewards; + // stores the indices of the objectives for which we need to compute the long run average values + storm::storage::BitVector lraObjectives; // stores the indices of the objectives for which there is no upper time bound storm::storage::BitVector objectivesWithNoUpperTimeBound; @@ -137,22 +151,28 @@ namespace storm { std::vector offsetsToUnderApproximation; std::vector offsetsToOverApproximation; // The scheduler choices that optimize the weighted rewards of undounded objectives. - std::vector optimalChoices; + std::vector optimalChoices; struct EcQuotient { storm::storage::SparseMatrix matrix; std::vector ecqToOriginalChoiceMapping; std::vector originalToEcqStateMapping; + std::vector ecqToOriginalStateMapping; + storm::storage::BitVector ecqStayInEcChoices; storm::storage::BitVector origReward0Choices; storm::storage::BitVector rowsWithSumLessOne; std::vector auxStateValues; std::vector auxChoiceValues; - }; - boost::optional ecQuotient; + struct LraMecDecomposition { + storm::storage::MaximalEndComponentDecomposition mecs; + std::vector auxMecValues; + }; + boost::optional lraMecDecomposition; + }; } diff --git a/src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectivePreprocessor.cpp b/src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectivePreprocessor.cpp index 9f253bc38..1fd427456 100644 --- a/src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectivePreprocessor.cpp +++ b/src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectivePreprocessor.cpp @@ -169,6 +169,12 @@ namespace storm { auto rewardModel = storm::utility::createFilteredRewardModel(baseRewardModel, model->isDiscreteTimeModel(), pathFormula.asTotalRewardFormula()); storm::storage::BitVector statesWithoutReward = rewardModel.get().getStatesWithZeroReward(model->getTransitionMatrix()); absorbingStatesForSubformula = storm::utility::graph::performProb0A(backwardTransitions, statesWithoutReward, ~statesWithoutReward); + } else if (pathFormula.isLongRunAverageRewardFormula()) { + auto rewardModel = storm::utility::createFilteredRewardModel(baseRewardModel, model->isDiscreteTimeModel(), pathFormula.asLongRunAverageRewardFormula()); + storm::storage::BitVector statesWithoutReward = rewardModel.get().getStatesWithZeroReward(model->getTransitionMatrix()); + // Compute Sat(Forall F (Forall G "statesWithoutReward")) + auto forallGloballyStatesWithoutReward = storm::utility::graph::performProb0A(backwardTransitions, statesWithoutReward, ~statesWithoutReward); + absorbingStatesForSubformula = storm::utility::graph::performProb1A(model->getTransitionMatrix(), model->getNondeterministicChoiceIndices(), backwardTransitions, storm::storage::BitVector(model->getNumberOfStates(), true), forallGloballyStatesWithoutReward); } else { STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << pathFormula << " is not supported."); } @@ -179,6 +185,11 @@ namespace storm { } else { STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << pathFormula << " is not supported."); } + } else if (opFormula->isLongRunAverageOperatorFormula()) { + auto lraStates = mc.check(pathFormula)->asExplicitQualitativeCheckResult().getTruthValuesVector(); + // Compute Sat(Forall F (Forall G not "lraStates")) + auto forallGloballyNotLraStates = storm::utility::graph::performProb0A(backwardTransitions, ~lraStates, lraStates); + absorbingStatesForSubformula = storm::utility::graph::performProb1A(model->getTransitionMatrix(), model->getNondeterministicChoiceIndices(), backwardTransitions, ~lraStates, forallGloballyNotLraStates); } else { STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Could not preprocess the subformula " << *opFormula << " of " << originalFormula << " because it is not supported"); } @@ -288,6 +299,8 @@ namespace storm { preprocessRewardOperatorFormula(formula.asRewardOperatorFormula(), opInfo, data); } else if (formula.isTimeOperatorFormula()){ preprocessTimeOperatorFormula(formula.asTimeOperatorFormula(), opInfo, data); + } else if (formula.isLongRunAverageOperatorFormula()) { + preprocessLongRunAverageOperatorFormula(formula.asLongRunAverageOperatorFormula(), opInfo, data); } else { STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Could not preprocess the objective " << formula << " because it is not supported"); } @@ -335,7 +348,9 @@ namespace storm { } else if (formula.getSubformula().isCumulativeRewardFormula()) { preprocessCumulativeRewardFormula(formula.getSubformula().asCumulativeRewardFormula(), opInfo, data, rewardModelName); } else if (formula.getSubformula().isTotalRewardFormula()) { - preprocessTotalRewardFormula(opInfo, data, rewardModelName); + preprocessTotalRewardFormula(formula.getSubformula().asTotalRewardFormula(), opInfo, data, rewardModelName); + } else if (formula.getSubformula().isLongRunAverageRewardFormula()) { + preprocessLongRunAverageRewardFormula(formula.getSubformula().asLongRunAverageRewardFormula(), opInfo, data, rewardModelName); } else { STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << formula << " is not supported."); } @@ -352,6 +367,28 @@ namespace storm { STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << formula << " is not supported."); } } + + template + void SparseMultiObjectivePreprocessor::preprocessLongRunAverageOperatorFormula(storm::logic::LongRunAverageOperatorFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data) { + + data.objectives.back()->lowerResultBound = storm::utility::zero(); + data.objectives.back()->upperResultBound = storm::utility::one(); + + // Convert to a long run average reward formula + // Create and add the new formula + std::string rewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size()); + auto lraRewardFormula = std::make_shared(); + data.objectives.back()->formula = std::make_shared(lraRewardFormula, rewardModelName, opInfo); + + // Create and add the new reward model that only gives one reward for goal states + storm::modelchecker::SparsePropositionalModelChecker mc(*data.model); + storm::storage::BitVector subFormulaResult = mc.check(formula.getSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector(); + std::vector lraRewards(data.model->getNumberOfStates(), storm::utility::zero()); + storm::utility::vector::setVectorValues(lraRewards, subFormulaResult, storm::utility::one()); + data.model->addRewardModel(rewardModelName, typename SparseModelType::RewardModelType(std::move(lraRewards))); + + + } template void SparseMultiObjectivePreprocessor::preprocessUntilFormula(storm::logic::UntilFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, std::shared_ptr subformula) { @@ -461,7 +498,8 @@ namespace storm { if (formula.isReachabilityRewardFormula()) { // build stateAction reward vector that only gives reward for states that are reachable from init assert(optionalRewardModelName.is_initialized()); - typename SparseModelType::RewardModelType objectiveRewards = data.model->getRewardModel(optionalRewardModelName.get()); + auto objectiveRewards = storm::utility::createFilteredRewardModel(data.model->getRewardModel(optionalRewardModelName.get()), data.model->isDiscreteTimeModel(), formula).extract(); + // get rid of potential transition rewards objectiveRewards.reduceToStateBasedRewards(data.model->getTransitionMatrix(), false); if (objectiveRewards.hasStateRewards()) { storm::utility::vector::setVectorValues(objectiveRewards.getStateRewardVector(), reachableFromGoal, storm::utility::zero()); @@ -503,7 +541,7 @@ namespace storm { } else { data.objectives.back()->formula = std::make_shared(formula.asSharedPointer(), optionalRewardModelName.get(), opInfo); } - } else if (formula.isReachabilityTimeFormula() && data.model->isOfType(storm::models::ModelType::MarkovAutomaton)) { + } else if (formula.isReachabilityTimeFormula()) { // Reduce to reachability rewards so that time formulas do not have to be treated seperately later. std::string rewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size()); std::shared_ptr newSubformula = formula.getSubformula().asSharedPointer(); @@ -514,8 +552,14 @@ namespace storm { } auto newFormula = std::make_shared(newSubformula, storm::logic::FormulaContext::Reward); data.objectives.back()->formula = std::make_shared(newFormula, rewardModelName, opInfo); - std::vector timeRewards(data.model->getNumberOfStates(), storm::utility::zero()); - storm::utility::vector::setVectorValues(timeRewards, dynamic_cast const&>(*data.model).getMarkovianStates(), storm::utility::one()); + std::vector timeRewards; + if (data.model->isOfType(storm::models::ModelType::MarkovAutomaton)) { + timeRewards.assign(data.model->getNumberOfStates(), storm::utility::zero()); + storm::utility::vector::setVectorValues(timeRewards, dynamic_cast const&>(*data.model).getMarkovianStates(), storm::utility::one()); + } else { + timeRewards.assign(data.model->getNumberOfStates(), storm::utility::one()); + + } data.model->addRewardModel(rewardModelName, typename SparseModelType::RewardModelType(std::move(timeRewards))); } else { STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The formula " << formula << " neither considers reachability probabilities nor reachability rewards " << (data.model->isOfType(storm::models::ModelType::MarkovAutomaton) ? "nor reachability time" : "") << ". This is not supported."); @@ -527,29 +571,70 @@ namespace storm { template void SparseMultiObjectivePreprocessor::preprocessCumulativeRewardFormula(storm::logic::CumulativeRewardFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional const& optionalRewardModelName) { STORM_LOG_THROW(data.model->isOfType(storm::models::ModelType::Mdp), storm::exceptions::InvalidPropertyException, "Cumulative reward formulas are not supported for the given model type."); + std::string rewardModelName = optionalRewardModelName.get(); + // Strip away potential RewardAccumulations in the formula itself but also in reward bounds + auto filteredRewards = storm::utility::createFilteredRewardModel(data.model->getRewardModel(rewardModelName), data.model->isDiscreteTimeModel(), formula); + if (filteredRewards.isDifferentFromUnfilteredModel()) { + std::string rewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size()); + data.model->addRewardModel(rewardModelName, std::move(filteredRewards.extract())); + } - auto cumulativeRewardFormula = std::make_shared(formula); - data.objectives.back()->formula = std::make_shared(cumulativeRewardFormula, *optionalRewardModelName, opInfo); + std::vector newTimeBoundReferences; bool onlyRewardBounds = true; - for (uint64_t i = 0; i < cumulativeRewardFormula->getDimension(); ++i) { - if (!cumulativeRewardFormula->getTimeBoundReference(i).isRewardBound()) { + for (uint64_t i = 0; i < formula.getDimension(); ++i) { + auto oldTbr = formula.getTimeBoundReference(i); + if (oldTbr.isRewardBound()) { + if (oldTbr.hasRewardAccumulation()) { + auto filteredBoundRewards = storm::utility::createFilteredRewardModel(data.model->getRewardModel(oldTbr.getRewardName()), oldTbr.getRewardAccumulation(), data.model->isDiscreteTimeModel()); + if (filteredBoundRewards.isDifferentFromUnfilteredModel()) { + std::string freshRewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size()) + std::string("_" + std::to_string(i)); + data.model->addRewardModel(freshRewardModelName, std::move(filteredBoundRewards.extract())); + newTimeBoundReferences.emplace_back(freshRewardModelName); + } else { + // Strip away the reward accumulation + newTimeBoundReferences.emplace_back(oldTbr.getRewardName()); + } + } else { + newTimeBoundReferences.push_back(oldTbr); + } + } else { onlyRewardBounds = false; - break; + newTimeBoundReferences.push_back(oldTbr); } } + + auto newFormula = std::make_shared(formula.getBounds(), newTimeBoundReferences); + data.objectives.back()->formula = std::make_shared(newFormula, rewardModelName, opInfo); + if (onlyRewardBounds) { data.finiteRewardCheckObjectives.set(data.objectives.size() - 1, true); } } template - void SparseMultiObjectivePreprocessor::preprocessTotalRewardFormula(storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional const& optionalRewardModelName) { + void SparseMultiObjectivePreprocessor::preprocessTotalRewardFormula(storm::logic::TotalRewardFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional const& optionalRewardModelName) { - auto totalRewardFormula = std::make_shared(); - data.objectives.back()->formula = std::make_shared(totalRewardFormula, *optionalRewardModelName, opInfo); + std::string rewardModelName = optionalRewardModelName.get(); + auto filteredRewards = storm::utility::createFilteredRewardModel(data.model->getRewardModel(rewardModelName), data.model->isDiscreteTimeModel(), formula); + if (filteredRewards.isDifferentFromUnfilteredModel()) { + std::string rewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size()); + data.model->addRewardModel(rewardModelName, filteredRewards.extract()); + } + data.objectives.back()->formula = std::make_shared(formula.stripRewardAccumulation(), rewardModelName, opInfo); data.finiteRewardCheckObjectives.set(data.objectives.size() - 1, true); } + template + void SparseMultiObjectivePreprocessor::preprocessLongRunAverageRewardFormula(storm::logic::LongRunAverageRewardFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional const& optionalRewardModelName) { + std::string rewardModelName = optionalRewardModelName.get(); + auto filteredRewards = storm::utility::createFilteredRewardModel(data.model->getRewardModel(rewardModelName), data.model->isDiscreteTimeModel(), formula); + if (filteredRewards.isDifferentFromUnfilteredModel()) { + std::string rewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size()); + data.model->addRewardModel(rewardModelName, std::move(filteredRewards.extract())); + } + data.objectives.back()->formula = std::make_shared(formula.stripRewardAccumulation(), rewardModelName, opInfo); + } + template typename SparseMultiObjectivePreprocessor::ReturnType SparseMultiObjectivePreprocessor::buildResult(SparseModelType const& originalModel, storm::logic::MultiObjectiveFormula const& originalFormula, PreprocessorData& data) { ReturnType result(originalFormula, originalModel); diff --git a/src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectivePreprocessor.h b/src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectivePreprocessor.h index 67f65b20a..91a45b87a 100644 --- a/src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectivePreprocessor.h +++ b/src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectivePreprocessor.h @@ -69,13 +69,14 @@ namespace storm { static void preprocessProbabilityOperatorFormula(storm::logic::ProbabilityOperatorFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data); static void preprocessRewardOperatorFormula(storm::logic::RewardOperatorFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data); static void preprocessTimeOperatorFormula(storm::logic::TimeOperatorFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data); + static void preprocessLongRunAverageOperatorFormula(storm::logic::LongRunAverageOperatorFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data); static void preprocessUntilFormula(storm::logic::UntilFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, std::shared_ptr subformula = nullptr); static void preprocessBoundedUntilFormula(storm::logic::BoundedUntilFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data); static void preprocessGloballyFormula(storm::logic::GloballyFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data); static void preprocessEventuallyFormula(storm::logic::EventuallyFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional const& optionalRewardModelName = boost::none); static void preprocessCumulativeRewardFormula(storm::logic::CumulativeRewardFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional const& optionalRewardModelName = boost::none); - static void preprocessTotalRewardFormula(storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional const& optionalRewardModelName = boost::none); // The total reward formula itself does not need to be provided as it is unique. - + static void preprocessTotalRewardFormula(storm::logic::TotalRewardFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional const& optionalRewardModelName = boost::none); + static void preprocessLongRunAverageRewardFormula(storm::logic::LongRunAverageRewardFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional const& optionalRewardModelName = boost::none); /*! * Builds the result from preprocessing diff --git a/src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectivePreprocessorResult.h b/src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectivePreprocessorResult.h index 460ba3ec2..5b442b701 100644 --- a/src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectivePreprocessorResult.h +++ b/src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectivePreprocessorResult.h @@ -38,22 +38,43 @@ namespace storm { // Intentionally left empty } + uint64_t getNumberOfTotalRewardFormulas() const { + uint64_t count = 0; + for (auto const& obj : objectives) { + if (obj.formula->isRewardOperatorFormula() && obj.formula->getSubformula().isTotalRewardFormula()) { + ++count; + } + } + return count; + } bool containsOnlyTotalRewardFormulas() const { + return getNumberOfTotalRewardFormulas() == objectives.size(); + } + + uint64_t getNumberOfLongRunAverageRewardFormulas() const { + uint64_t count = 0; for (auto const& obj : objectives) { - if (!obj.formula->isRewardOperatorFormula() || !obj.formula->getSubformula().isTotalRewardFormula()) { - return false; + if (obj.formula->isRewardOperatorFormula() && obj.formula->getSubformula().isLongRunAverageRewardFormula()) { + ++count; } } - return true; + return count; + } + + bool containsLongRunAverageRewardFormulas() const { + return getNumberOfLongRunAverageRewardFormulas() > 0; } bool containsOnlyTrivialObjectives() const { - // Trivial objectives are either total reward formulas or single-dimensional step or time bounded cumulative reward formulas + // Trivial objectives are either total reward formulas, LRA reward formulas or single-dimensional step or time bounded cumulative reward formulas for (auto const& obj : objectives) { if (obj.formula->isRewardOperatorFormula() && obj.formula->getSubformula().isTotalRewardFormula()) { continue; } + if (obj.formula->isRewardOperatorFormula() && obj.formula->getSubformula().isLongRunAverageRewardFormula()) { + continue; + } if (obj.formula->isRewardOperatorFormula() && obj.formula->getSubformula().isCumulativeRewardFormula()) { auto const& subf = obj.formula->getSubformula().asCumulativeRewardFormula(); if (!subf.isMultiDimensional() && (subf.getTimeBoundReference().isTimeBound() || subf.getTimeBoundReference().isStepBound())) { @@ -76,12 +97,14 @@ namespace storm { out << "--------------------------------------------------------------" << std::endl; out << "\t" << originalFormula << std::endl; out << std::endl; - out << "Objectives:" << std::endl; + out << "The query considers " << objectives.size() << " objectives:" << std::endl; out << "--------------------------------------------------------------" << std::endl; for (auto const& obj : objectives) { obj.printToStream(out); out << std::endl; } + out << "Number of Long-Run-Average Reward Objectives (after preprocessing): " << getNumberOfLongRunAverageRewardFormulas() << "." << std::endl; + out << "Number of Total Reward Objectives (after preprocessing): " << getNumberOfTotalRewardFormulas() << "." << std::endl; out << "--------------------------------------------------------------" << std::endl; out << std::endl; out << "Original Model Information:" << std::endl; diff --git a/src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectiveRewardAnalysis.cpp b/src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectiveRewardAnalysis.cpp index 3c9c0e6a4..7ae3e3d73 100644 --- a/src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectiveRewardAnalysis.cpp +++ b/src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectiveRewardAnalysis.cpp @@ -48,43 +48,39 @@ namespace storm { std::vector const& groupIndices = transitions.getRowGroupIndices(); storm::storage::BitVector allStates(stateCount, true); - // Get the choices that yield non-zero reward - storm::storage::BitVector zeroRewardChoices(preprocessorResult.preprocessedModel->getNumberOfChoices(), true); + // Get the choices without any reward for the various objective types + storm::storage::BitVector zeroLraRewardChoices(preprocessorResult.preprocessedModel->getNumberOfChoices(), true); + storm::storage::BitVector zeroTotalRewardChoices(preprocessorResult.preprocessedModel->getNumberOfChoices(), true); + storm::storage::BitVector zeroCumulativeRewardChoices(preprocessorResult.preprocessedModel->getNumberOfChoices(), true); for (auto const& obj : preprocessorResult.objectives) { if (obj.formula->isRewardOperatorFormula()) { - STORM_LOG_WARN_COND(obj.formula->getSubformula().isTotalRewardFormula() || obj.formula->getSubformula().isCumulativeRewardFormula(), "Analyzing reachability reward formulas is not supported properly."); auto const& rewModel = preprocessorResult.preprocessedModel->getRewardModel(obj.formula->asRewardOperatorFormula().getRewardModelName()); - zeroRewardChoices &= rewModel.getChoicesWithZeroReward(transitions); - } - } - - // Get the states that have reward for at least one (or for all) choices assigned to it. - storm::storage::BitVector statesWithRewardForOneChoice = storm::storage::BitVector(stateCount, false); - storm::storage::BitVector statesWithRewardForAllChoices = storm::storage::BitVector(stateCount, true); - for (uint_fast64_t state = 0; state < stateCount; ++state) { - bool stateHasChoiceWithReward = false; - bool stateHasChoiceWithoutReward = false; - uint_fast64_t const& groupEnd = groupIndices[state + 1]; - for (uint_fast64_t choice = groupIndices[state]; choice < groupEnd; ++choice) { - if (zeroRewardChoices.get(choice)) { - stateHasChoiceWithoutReward = true; + if (obj.formula->getSubformula().isLongRunAverageRewardFormula()) { + zeroLraRewardChoices &= rewModel.getChoicesWithZeroReward(transitions); + } else if (obj.formula->getSubformula().isTotalRewardFormula()) { + zeroTotalRewardChoices &= rewModel.getChoicesWithZeroReward(transitions); } else { - stateHasChoiceWithReward = true; + STORM_LOG_WARN_COND(obj.formula->getSubformula().isCumulativeRewardFormula(), "Analyzing subformula " << obj.formula->getSubformula() << " is not supported properly."); + zeroCumulativeRewardChoices &= rewModel.getChoicesWithZeroReward(transitions); } } - if (stateHasChoiceWithReward) { - statesWithRewardForOneChoice.set(state, true); - } - if (stateHasChoiceWithoutReward) { - statesWithRewardForAllChoices.set(state, false); - } } - // get the states for which there is a scheduler yielding reward zero - result.reward0EStates = storm::utility::graph::performProbGreater0A(transitions, groupIndices, backwardTransitions, allStates, statesWithRewardForAllChoices, false, 0, zeroRewardChoices); - result.reward0EStates.complement(); - result.reward0AStates = storm::utility::graph::performProb0A(backwardTransitions, allStates, statesWithRewardForOneChoice); - assert(result.reward0AStates.isSubsetOf(result.reward0EStates)); + // get the states for which there is a scheduler yielding total reward zero + auto statesWithTotalRewardForAllChoices = transitions.getRowGroupFilter(~zeroTotalRewardChoices, true); + result.totalReward0EStates = storm::utility::graph::performProbGreater0A(transitions, groupIndices, backwardTransitions, allStates, statesWithTotalRewardForAllChoices, false, 0, zeroTotalRewardChoices); + result.totalReward0EStates.complement(); + + // Get the states for which all schedulers yield a reward of 0 + // Starting with LRA objectives + auto statesWithoutLraReward = transitions.getRowGroupFilter(zeroLraRewardChoices, true); + // Compute Sat(Forall F (Forall G "LRAStatesWithoutReward")) + auto forallGloballyStatesWithoutLraReward = storm::utility::graph::performProb0A(backwardTransitions, statesWithoutLraReward, ~statesWithoutLraReward); + result.reward0AStates = storm::utility::graph::performProb1A(transitions, groupIndices, backwardTransitions, allStates, forallGloballyStatesWithoutLraReward); + // Now also incorporate cumulative and total reward objectives + auto statesWithTotalOrCumulativeReward = transitions.getRowGroupFilter(~(zeroTotalRewardChoices & zeroCumulativeRewardChoices), false); + result.reward0AStates &= storm::utility::graph::performProb0A(backwardTransitions, allStates, statesWithTotalOrCumulativeReward); + assert(result.reward0AStates.isSubsetOf(result.totalReward0EStates)); } template @@ -126,8 +122,8 @@ namespace storm { result.rewardFinitenessType = RewardFinitenessType::Infinite; } else { // Check whether there is a scheduler under which all rewards are finite. - result.rewardLessInfinityEStates = storm::utility::graph::performProb1E(transitions, groupIndices, backwardTransitions, allStates, result.reward0EStates); - if ((result.rewardLessInfinityEStates.get() & preprocessorResult.preprocessedModel->getInitialStates()).empty()) { + result.totalRewardLessInfinityEStates = storm::utility::graph::performProb1E(transitions, groupIndices, backwardTransitions, allStates, result.totalReward0EStates); + if ((result.totalRewardLessInfinityEStates.get() & preprocessorResult.preprocessedModel->getInitialStates()).empty()) { // There is no scheduler that induces finite reward for the initial state result.rewardFinitenessType = RewardFinitenessType::Infinite; } else { @@ -135,7 +131,7 @@ namespace storm { } } } else { - result.rewardLessInfinityEStates = allStates; + result.totalRewardLessInfinityEStates = allStates; } } diff --git a/src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectiveRewardAnalysis.h b/src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectiveRewardAnalysis.h index 73dc0b3b3..ed58e8a0e 100644 --- a/src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectiveRewardAnalysis.h +++ b/src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectiveRewardAnalysis.h @@ -34,9 +34,9 @@ namespace storm { RewardFinitenessType rewardFinitenessType; // The states of the preprocessed model for which... - storm::storage::BitVector reward0EStates; // ... there is a scheduler such that all expected reward objectives have value zero - storm::storage::BitVector reward0AStates; // ... all schedulers induce value 0 for all expected reward objectives - boost::optional rewardLessInfinityEStates; // ... there is a scheduler yielding finite reward for all expected reward objectives + storm::storage::BitVector totalReward0EStates; // ... there is a scheduler such that all expected total reward objectives have value zero + storm::storage::BitVector reward0AStates; // ... all schedulers induce value 0 for all reward-based objectives + boost::optional totalRewardLessInfinityEStates; // ... there is a scheduler yielding finite reward for all expected total reward objectives }; /*! diff --git a/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp b/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp index 709ffe605..70d7dd581 100644 --- a/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp +++ b/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp @@ -47,7 +47,7 @@ namespace storm { if (formula.isInFragment(storm::logic::prctl().setLongRunAverageRewardFormulasAllowed(true).setLongRunAverageProbabilitiesAllowed(true).setConditionalProbabilityFormulasAllowed(true).setOnlyEventuallyFormuluasInConditionalFormulasAllowed(true).setTotalRewardFormulasAllowed(true).setRewardBoundedUntilFormulasAllowed(true).setRewardBoundedCumulativeRewardFormulasAllowed(true).setMultiDimensionalBoundedUntilFormulasAllowed(true).setMultiDimensionalCumulativeRewardFormulasAllowed(true).setTimeOperatorsAllowed(true).setReachbilityTimeFormulasAllowed(true).setRewardAccumulationAllowed(true))) { return true; } else if (checkTask.isOnlyInitialStatesRelevantSet()) { - auto multiObjectiveFragment = storm::logic::multiObjective().setCumulativeRewardFormulasAllowed(true).setTimeBoundedCumulativeRewardFormulasAllowed(true).setStepBoundedCumulativeRewardFormulasAllowed(true).setRewardBoundedCumulativeRewardFormulasAllowed(true).setTimeBoundedUntilFormulasAllowed(true).setStepBoundedUntilFormulasAllowed(true).setRewardBoundedUntilFormulasAllowed(true).setMultiDimensionalBoundedUntilFormulasAllowed(true).setMultiDimensionalCumulativeRewardFormulasAllowed(true); + auto multiObjectiveFragment = storm::logic::multiObjective().setCumulativeRewardFormulasAllowed(true).setTimeBoundedCumulativeRewardFormulasAllowed(true).setStepBoundedCumulativeRewardFormulasAllowed(true).setRewardBoundedCumulativeRewardFormulasAllowed(true).setTimeBoundedUntilFormulasAllowed(true).setStepBoundedUntilFormulasAllowed(true).setRewardBoundedUntilFormulasAllowed(true).setMultiDimensionalBoundedUntilFormulasAllowed(true).setMultiDimensionalCumulativeRewardFormulasAllowed(true).setRewardAccumulationAllowed(true); if (formula.isInFragment(multiObjectiveFragment) || formula.isInFragment(storm::logic::quantiles())) { if (requiresSingleInitialState) { *requiresSingleInitialState = true; diff --git a/src/storm/models/sparse/StandardRewardModel.h b/src/storm/models/sparse/StandardRewardModel.h index e84faee19..e30da32b1 100644 --- a/src/storm/models/sparse/StandardRewardModel.h +++ b/src/storm/models/sparse/StandardRewardModel.h @@ -34,15 +34,15 @@ namespace storm { * @param optionalStateActionRewardVector The reward values associated with state-action pairs. * @param optionalTransitionRewardMatrix The reward values associated with the transitions of the model. */ - StandardRewardModel(boost::optional>&& optionalStateRewardVector = boost::none, + StandardRewardModel(boost::optional>&& optionalStateRewardVector, boost::optional>&& optionalStateActionRewardVector = boost::none, boost::optional>&& optionalTransitionRewardMatrix = boost::none); - StandardRewardModel(StandardRewardModel const& dtmc) = default; - StandardRewardModel& operator=(StandardRewardModel const& dtmc) = default; + StandardRewardModel(StandardRewardModel const& other) = default; + StandardRewardModel& operator=(StandardRewardModel const& other) = default; - StandardRewardModel(StandardRewardModel&& dtmc) = default; - StandardRewardModel& operator=(StandardRewardModel&& dtmc) = default; + StandardRewardModel(StandardRewardModel&& other) = default; + StandardRewardModel& operator=(StandardRewardModel&& other) = default; /*! * Retrieves whether the reward model has state rewards. diff --git a/src/storm/settings/modules/IOSettings.cpp b/src/storm/settings/modules/IOSettings.cpp index dc438f231..7069c453a 100644 --- a/src/storm/settings/modules/IOSettings.cpp +++ b/src/storm/settings/modules/IOSettings.cpp @@ -50,6 +50,7 @@ namespace storm { const std::string IOSettings::qvbsInputOptionName = "qvbs"; const std::string IOSettings::qvbsInputOptionShortName = "qvbs"; const std::string IOSettings::qvbsRootOptionName = "qvbsroot"; + const std::string IOSettings::propertiesAsMultiOptionName = "propsasmulti"; std::string preventDRNPlaceholderOptionName = "no-drn-placeholders"; @@ -103,6 +104,9 @@ namespace storm { .addArgument(storm::settings::ArgumentBuilder::createUnsignedIntegerArgument("instance-index", "The selected instance of this model.").setDefaultValueUnsignedInteger(0).makeOptional().build()) .addArgument(storm::settings::ArgumentBuilder::createStringArgument("filter", "The comma separated list of property names to check. Omit to check all, \"\" to check none.").setDefaultValueString("").makeOptional().build()) .build()); + + this->addOption(storm::settings::OptionBuilder(moduleName, propertiesAsMultiOptionName, false, "If set, the selected properties are interpreted as a multi-objective formula.").setIsAdvanced().build()); + #ifdef STORM_HAVE_QVBS std::string qvbsRootDefault = STORM_QVBS_ROOT; #else @@ -321,6 +325,10 @@ namespace storm { return path.getValueAsString(); } + bool IOSettings::isPropertiesAsMultiSet() const { + return this->getOption(propertiesAsMultiOptionName).getHasOptionBeenSet(); + } + void IOSettings::finalize() { // Intentionally left empty. } diff --git a/src/storm/settings/modules/IOSettings.h b/src/storm/settings/modules/IOSettings.h index 3f41bdce7..447913665 100644 --- a/src/storm/settings/modules/IOSettings.h +++ b/src/storm/settings/modules/IOSettings.h @@ -339,6 +339,11 @@ namespace storm { */ std::string getQvbsRoot() const; + /*! + * Retrieves whether the input properties are to be interpreted as a single multi-objective formula + */ + bool isPropertiesAsMultiSet() const; + bool check() const override; void finalize() override; @@ -377,6 +382,7 @@ namespace storm { static const std::string qvbsInputOptionName; static const std::string qvbsInputOptionShortName; static const std::string qvbsRootOptionName; + static const std::string propertiesAsMultiOptionName; }; diff --git a/src/storm/solver/AcyclicMinMaxLinearEquationSolver.cpp b/src/storm/solver/AcyclicMinMaxLinearEquationSolver.cpp index a11fa137e..d17e46528 100644 --- a/src/storm/solver/AcyclicMinMaxLinearEquationSolver.cpp +++ b/src/storm/solver/AcyclicMinMaxLinearEquationSolver.cpp @@ -27,15 +27,7 @@ namespace storm { bool AcyclicMinMaxLinearEquationSolver::internalSolveEquations(Environment const& env, OptimizationDirection dir, std::vector& x, std::vector const& b) const { STORM_LOG_ASSERT(x.size() == this->A->getRowGroupCount(), "Provided x-vector has invalid size."); STORM_LOG_ASSERT(b.size() == this->A->getRowCount(), "Provided b-vector has invalid size."); - // Allocate memory for the scheduler (if required) - if (this->isTrackSchedulerSet()) { - if (this->schedulerChoices) { - this->schedulerChoices->resize(this->A->getRowGroupCount()); - } else { - this->schedulerChoices = std::vector(this->A->getRowGroupCount()); - } - } - + if (!multiplier) { // We have not allocated cache memory, yet rowGroupOrdering = helper::computeTopologicalGroupOrdering(*this->A); @@ -72,16 +64,40 @@ namespace storm { bPtr = &auxiliaryRowVector.get(); } + // Allocate memory for the scheduler (if required) + std::vector* choicesPtr = nullptr; if (this->isTrackSchedulerSet()) { - this->multiplier->multiplyAndReduceGaussSeidel(env, dir, *xPtr, bPtr, &this->schedulerChoices.get(), true); - } else { - this->multiplier->multiplyAndReduceGaussSeidel(env, dir, *xPtr, bPtr, nullptr, true); + if (this->schedulerChoices) { + this->schedulerChoices->resize(this->A->getRowGroupCount()); + } else { + this->schedulerChoices = std::vector(this->A->getRowGroupCount()); + } + if (rowGroupOrdering) { + if (auxiliaryRowGroupIndexVector) { + auxiliaryRowGroupIndexVector->resize(this->A->getRowGroupCount()); + } else { + auxiliaryRowGroupIndexVector = std::vector(this->A->getRowGroupCount()); + } + choicesPtr = &(auxiliaryRowGroupIndexVector.get()); + } else { + choicesPtr = &(this->schedulerChoices.get()); + } } + // Since a topological ordering is guaranteed, we can solve the equations with a single matrix-vector Multiplication step. + this->multiplier->multiplyAndReduceGaussSeidel(env, dir, *xPtr, bPtr, choicesPtr, true); + if (rowGroupOrdering) { + // Restore the correct input-order for the output vector for (uint64_t newGroupIndex = 0; newGroupIndex < x.size(); ++newGroupIndex) { x[(*rowGroupOrdering)[newGroupIndex]] = (*xPtr)[newGroupIndex]; } + if (this->isTrackSchedulerSet()) { + // Do the same for the scheduler choices + for (uint64_t newGroupIndex = 0; newGroupIndex < x.size(); ++newGroupIndex) { + this->schedulerChoices.get()[(*rowGroupOrdering)[newGroupIndex]] = (*choicesPtr)[newGroupIndex]; + } + } } if (!this->isCachingEnabled()) { @@ -105,6 +121,7 @@ namespace storm { rowGroupOrdering = boost::none; auxiliaryRowVector = boost::none; auxiliaryRowGroupVector = boost::none; + auxiliaryRowGroupIndexVector = boost::none; bFactors.clear(); } diff --git a/src/storm/solver/AcyclicMinMaxLinearEquationSolver.h b/src/storm/solver/AcyclicMinMaxLinearEquationSolver.h index 682f14432..41c94c918 100644 --- a/src/storm/solver/AcyclicMinMaxLinearEquationSolver.h +++ b/src/storm/solver/AcyclicMinMaxLinearEquationSolver.h @@ -44,6 +44,8 @@ namespace storm { mutable boost::optional> auxiliaryRowVector; // A.rowCount() entries // can be used if the entries in 'x' need to be reordered mutable boost::optional> auxiliaryRowGroupVector; // A.rowGroupCount() entries + // can be used if the performed scheduler choices need to be reordered + mutable boost::optional> auxiliaryRowGroupIndexVector; // A.rowGroupCount() entries // contains factors applied to scale the entries of the 'b' vector mutable std::vector> bFactors; diff --git a/src/storm/storage/SparseMatrix.cpp b/src/storm/storage/SparseMatrix.cpp index ac40bfb00..05a526702 100644 --- a/src/storm/storage/SparseMatrix.cpp +++ b/src/storm/storage/SparseMatrix.cpp @@ -744,6 +744,29 @@ namespace storm { return result; } + template + storm::storage::BitVector SparseMatrix::getRowGroupFilter(storm::storage::BitVector const& rowConstraint, bool setIfForAllRowsInGroup) const { + STORM_LOG_ASSERT(!this->hasTrivialRowGrouping(), "Tried to get a row group filter but this matrix does not have row groups"); + storm::storage::BitVector result(this->getRowGroupCount(), false); + auto const& groupIndices = this->getRowGroupIndices(); + if (setIfForAllRowsInGroup) { + for (uint64_t group = 0; group < this->getRowGroupCount(); ++group) { + if (rowConstraint.getNextUnsetIndex(groupIndices[group]) >= groupIndices[group + 1]) { + // All rows within this group are set + result.set(group, true); + } + } + } else { + for (uint64_t group = 0; group < this->getRowGroupCount(); ++group) { + if (rowConstraint.getNextSetIndex(groupIndices[group]) < groupIndices[group + 1]) { + // Some row is set + result.set(group, true); + } + } + } + return result; + } + template void SparseMatrix::makeRowsAbsorbing(storm::storage::BitVector const& rows) { for (auto row : rows) { diff --git a/src/storm/storage/SparseMatrix.h b/src/storm/storage/SparseMatrix.h index b16a1ea99..ba25a2819 100644 --- a/src/storm/storage/SparseMatrix.h +++ b/src/storm/storage/SparseMatrix.h @@ -642,6 +642,15 @@ namespace storm { */ storm::storage::BitVector getRowFilter(storm::storage::BitVector const& groupConstraint, storm::storage::BitVector const& columnConstraints) const; + /*! + * Returns the indices of all row groups selected by the row constraints + * + * @param rowConstraint the selected rows + * @param setIfForAllRowsInGroup if true, a group is selected if the rowConstraint is true for *all* rows within that group. If false, a group is selected if the rowConstraint is true for *some* row within that group + * @return a bit vector that is true at position i iff row i satisfies the constraints. + */ + storm::storage::BitVector getRowGroupFilter(storm::storage::BitVector const& rowConstraint, bool setIfForAllRowsInGroup) const; + /*! * This function makes the given rows absorbing. * diff --git a/src/storm/transformer/EndComponentEliminator.h b/src/storm/transformer/EndComponentEliminator.h index 141ed5198..1c7060fe6 100644 --- a/src/storm/transformer/EndComponentEliminator.h +++ b/src/storm/transformer/EndComponentEliminator.h @@ -24,6 +24,8 @@ namespace storm { // States of a removed ECs are mapped to the state that substitutes the EC. // If the given state does not exist in the result (i.e., it is not in the provided subsystem), the returned value will be std::numeric_limits::max(), i.e., an invalid index. std::vector oldToNewStateMapping; + // Indicates the rows that represent "staying" in the eliminated EC for ever + storm::storage::BitVector sinkRows; }; /* @@ -52,7 +54,7 @@ namespace storm { EndComponentEliminatorReturnType result; std::vector newRowGroupIndices; result.oldToNewStateMapping = std::vector(originalMatrix.getRowGroupCount(), std::numeric_limits::max()); - storm::storage::BitVector sinkRows(originalMatrix.getRowCount(), false); // will be resized as soon as the rowCount of the resulting matrix is known + result.sinkRows = storm::storage::BitVector(originalMatrix.getRowCount(), false); // will be resized as soon as the rowCount of the resulting matrix is known for(auto keptState : keptStates) { result.oldToNewStateMapping[keptState] = newRowGroupIndices.size(); // i.e., the current number of processed states @@ -75,14 +77,14 @@ namespace storm { } if(ecGetsSinkRow) { STORM_LOG_ASSERT(result.newToOldRowMapping.size() < originalMatrix.getRowCount(), "Didn't expect to see more rows in the reduced matrix than in the original one."); - sinkRows.set(result.newToOldRowMapping.size(), true); + result.sinkRows.set(result.newToOldRowMapping.size(), true); result.newToOldRowMapping.push_back(*ec.begin()->second.begin()); } } newRowGroupIndices.push_back(result.newToOldRowMapping.size()); - sinkRows.resize(result.newToOldRowMapping.size()); + result.sinkRows.resize(result.newToOldRowMapping.size()); - result.matrix = buildTransformedMatrix(originalMatrix, newRowGroupIndices, result.newToOldRowMapping, result.oldToNewStateMapping, sinkRows, addSelfLoopAtSinkStates); + result.matrix = buildTransformedMatrix(originalMatrix, newRowGroupIndices, result.newToOldRowMapping, result.oldToNewStateMapping, result.sinkRows, addSelfLoopAtSinkStates); STORM_LOG_DEBUG("EndComponentEliminator is done. Resulting matrix has " << result.matrix.getRowGroupCount() << " row groups."); return result; } diff --git a/src/storm/transformer/MemoryIncorporation.cpp b/src/storm/transformer/MemoryIncorporation.cpp index 36a87e6e8..5879cde50 100644 --- a/src/storm/transformer/MemoryIncorporation.cpp +++ b/src/storm/transformer/MemoryIncorporation.cpp @@ -74,7 +74,7 @@ namespace storm { auto notPhi = std::make_shared(storm::logic::UnaryBooleanStateFormula::OperatorType::Not, subsubFormula.asGloballyFormula().getSubformula().asSharedPointer()); memory = memory.product(getGoalMemory(model, *notPhi)); } else { - STORM_LOG_THROW(subsubFormula.isTotalRewardFormula() || subsubFormula.isCumulativeRewardFormula(), storm::exceptions::NotSupportedException, "The given Formula " << subsubFormula << " is not supported."); + STORM_LOG_THROW(subFormula->isLongRunAverageOperatorFormula() || subsubFormula.isTotalRewardFormula() || subsubFormula.isCumulativeRewardFormula() || subsubFormula.isLongRunAverageRewardFormula(), storm::exceptions::NotSupportedException, "The given Formula " << subsubFormula << " is not supported."); } } diff --git a/src/storm/utility/FilteredRewardModel.h b/src/storm/utility/FilteredRewardModel.h index 5280cfd98..2c806d732 100644 --- a/src/storm/utility/FilteredRewardModel.h +++ b/src/storm/utility/FilteredRewardModel.h @@ -35,25 +35,54 @@ namespace storm { rewardModel = &baseRewardModel; } } + + bool isDifferentFromUnfilteredModel() const { + STORM_LOG_ASSERT(rewardModel, "tried to check if the filtered reward model is different. Was it extracted before?"); + return localRewardModel.get() != nullptr; + } RewardModelType const& get() const { + STORM_LOG_ASSERT(rewardModel, "tried to get a reward model but none is available. Was it extracted before?"); return *rewardModel; } + + /*! + * Extracts the reward model. After calling this, this object should not be queried anymore + * @return + */ + RewardModelType extract() { + STORM_LOG_ASSERT(rewardModel, "tried to extract a reward model but none is available. Was it extracted already before?"); + RewardModelType result; + if (localRewardModel) { + result = std::move(*localRewardModel); + localRewardModel.reset(); + } else { + result = *rewardModel; // Creates a copy + } + rewardModel = nullptr; + return result; + } + + private: std::unique_ptr localRewardModel; RewardModelType const* rewardModel; }; + template + FilteredRewardModel createFilteredRewardModel(RewardModelType const& baseRewardModel, storm::logic::RewardAccumulation const& acc, bool isDiscreteTimeModel) { + STORM_LOG_THROW(isDiscreteTimeModel || !acc.isExitSet() || !baseRewardModel.hasStateRewards(), storm::exceptions::NotSupportedException, "Exit rewards for continuous time models are not supported."); + // Check which of the available reward types are allowed. + bool hasStateRewards = isDiscreteTimeModel ? acc.isExitSet() : acc.isTimeSet(); + bool hasStateActionRewards = acc.isStepsSet(); + bool hasTransitionRewards = acc.isStepsSet(); + return FilteredRewardModel(baseRewardModel, hasStateRewards, hasStateActionRewards, hasTransitionRewards); + } + template FilteredRewardModel createFilteredRewardModel(RewardModelType const& baseRewardModel, bool isDiscreteTimeModel, FormulaType const& formula) { if (formula.hasRewardAccumulation()) { - auto const& acc = formula.getRewardAccumulation(); - STORM_LOG_THROW(isDiscreteTimeModel || !acc.isExitSet() || !baseRewardModel.hasStateRewards(), storm::exceptions::NotSupportedException, "Exit rewards for continuous time models are not supported."); - // Check which of the available reward types are allowed. - bool hasStateRewards = isDiscreteTimeModel ? acc.isExitSet() : acc.isTimeSet(); - bool hasStateActionRewards = acc.isStepsSet(); - bool hasTransitionRewards = acc.isStepsSet(); - return FilteredRewardModel(baseRewardModel, hasStateRewards, hasStateActionRewards, hasTransitionRewards); + return createFilteredRewardModel(baseRewardModel, formula.getRewardAccumulation(), isDiscreteTimeModel); } else { return FilteredRewardModel(baseRewardModel, true, true, true); } diff --git a/src/storm/utility/graph.h b/src/storm/utility/graph.h index d6f12cc94..fdb9a2538 100644 --- a/src/storm/utility/graph.h +++ b/src/storm/utility/graph.h @@ -406,6 +406,7 @@ namespace storm { * @param psiStates The set of all states satisfying psi. * @param useStepBound A flag that indicates whether or not to use the given number of maximal steps for the search. * @param maximalSteps The maximal number of steps to reach the psi states. + * @param choiceConstraint If set, we assume that only the specified choices exist in the model * @return A bit vector that represents all states with probability 0. */ template diff --git a/src/test/storm/modelchecker/multiobjective/SparseMaPcaaMultiObjectiveModelCheckerTest.cpp b/src/test/storm/modelchecker/multiobjective/SparseMaPcaaMultiObjectiveModelCheckerTest.cpp index 1fcd25efa..5f5694e13 100755 --- a/src/test/storm/modelchecker/multiobjective/SparseMaPcaaMultiObjectiveModelCheckerTest.cpp +++ b/src/test/storm/modelchecker/multiobjective/SparseMaPcaaMultiObjectiveModelCheckerTest.cpp @@ -213,4 +213,194 @@ TEST(SparseMaPcaaMultiObjectiveModelCheckerTest, jobscheduler_pareto_2Obj) { } +template +bool expectPointConained(std::vector> const& pointset, std::vector const& point, ValueType precision) { + for (auto const& p : pointset) { + EXPECT_EQ(p.size(), point.size()) << "Missmatch in point dimension."; + bool found = true; + for (uint64_t i = 0; i < p.size(); ++i) { + if (storm::utility::abs(p[i] - point[i]) > precision) { + found = false; + break; + } + } + if (found) { + return true; + } + } + // prepare failure message: + std::stringstream errstr; + errstr << "Point ["; + bool firstPi = true; + for (auto const& pi : point) { + if (firstPi) { + firstPi = false; + } else { + errstr << ", "; + } + errstr << pi; + } + errstr << "] is not contained in point set {"; + bool firstP = true; + for (auto const& p : pointset) { + if (firstP) { + firstP = false; + } else { + errstr << ", \t"; + } + errstr << "["; + firstPi = true; + for (auto const& pi : p) { + if (firstPi) { + firstPi = false; + } else { + errstr << ", "; + } + errstr << pi; + } + errstr << "]"; + } + errstr << "}."; + ADD_FAILURE() << errstr.str(); + return false; +} + + +template +bool expectSubset(std::vector> const& lhs, std::vector> const& rhs, ValueType precision) { + for (auto const& p : lhs) { + if (!expectPointConained(rhs, p, precision)) { + return false; + } + } + return true; +} + +template +std::vector> convertPointset(std::vector> const& in) { + std::vector> out; + for (auto const& point_str : in) { + out.emplace_back(); + for (auto const& pi_str : point_str) { + out.back().push_back(storm::utility::convertNumber(pi_str)); + } + } + return out; +} + +TEST(SparseMaPcaaMultiObjectiveModelCheckerTest, simple_lra) { + if (!storm::test::z3AtLeastVersion(4,8,5)) { + GTEST_SKIP() << "Test disabled since it triggers a bug in the installed version of z3."; + } + storm::Environment env; + env.modelchecker().multi().setMethod(storm::modelchecker::multiobjective::MultiObjectiveMethod::Pcaa); + + std::string programFile = STORM_TEST_RESOURCES_DIR "/ma/multiobj_simple_lra.ma"; + std::string formulasAsString = "multi(R{\"first\"}max=? [LRA], LRAmax=? [ x=4 ] );\n"; // pareto + formulasAsString += "multi(R{\"first\"}max=? [LRA], LRAmin=? [ x=4] );\n"; // pareto + formulasAsString += "multi(R{\"first\"}max=? [LRA], LRAmin=? [ x=4] , R{\"second\"}max=? [LRA]);\n"; // pareto + formulasAsString += "multi(R{\"first\"}max=? [LRA], LRAmax=? [ x=4] , R{\"second\"}max=? [C]);\n"; // pareto + formulasAsString += "multi(R{\"first\"}max=? [LRA], LRAmax=? [ x=4] , R{\"second\"}min=? [C]);\n"; // pareto + + // programm, model, formula + storm::prism::Program program = storm::api::parseProgram(programFile); + program.checkValidity(); + std::vector> formulas = storm::api::extractFormulasFromProperties(storm::api::parsePropertiesForPrismProgram(formulasAsString, program)); + storm::generator::NextStateGeneratorOptions options(formulas); + auto ma = storm::builder::ExplicitModelBuilder(program, options).build()->as>(); + + { + std::unique_ptr result = storm::modelchecker::multiobjective::performMultiObjectiveModelChecking(env, *ma, formulas[0]->asMultiObjectiveFormula()); + ASSERT_TRUE(result->isExplicitParetoCurveCheckResult()); + std::vector> expectedPoints; + expectedPoints.emplace_back(std::vector({"98","3/10"})); + expectedPoints.emplace_back(std::vector({"33","7/10"})); + double eps = 1e-4; + EXPECT_TRUE(expectSubset(result->asExplicitParetoCurveCheckResult().getPoints(), convertPointset(expectedPoints), eps)) << "Non-Pareto point found."; + EXPECT_TRUE(expectSubset(convertPointset(expectedPoints), result->asExplicitParetoCurveCheckResult().getPoints(), eps)) << "Pareto point missing."; + } + { + std::unique_ptr result = storm::modelchecker::multiobjective::performMultiObjectiveModelChecking(env, *ma, formulas[1]->asMultiObjectiveFormula()); + ASSERT_TRUE(result->isExplicitParetoCurveCheckResult()); + std::vector> expectedPoints; + expectedPoints.emplace_back(std::vector({"98","3/10"})); + double eps = 1e-4; + EXPECT_TRUE(expectSubset(result->asExplicitParetoCurveCheckResult().getPoints(), convertPointset(expectedPoints), eps)) << "Non-Pareto point found."; + EXPECT_TRUE(expectSubset(convertPointset(expectedPoints), result->asExplicitParetoCurveCheckResult().getPoints(), eps)) << "Pareto point missing."; + } + { + std::unique_ptr result = storm::modelchecker::multiobjective::performMultiObjectiveModelChecking(env, *ma, formulas[2]->asMultiObjectiveFormula()); + ASSERT_TRUE(result->isExplicitParetoCurveCheckResult()); + std::vector> expectedPoints; + expectedPoints.emplace_back(std::vector({"98","3/10", "0"})); + double eps = 1e-4; + EXPECT_TRUE(expectSubset(result->asExplicitParetoCurveCheckResult().getPoints(), convertPointset(expectedPoints), eps)) << "Non-Pareto point found."; + EXPECT_TRUE(expectSubset(convertPointset(expectedPoints), result->asExplicitParetoCurveCheckResult().getPoints(), eps)) << "Pareto point missing."; + } + { + std::unique_ptr result = storm::modelchecker::multiobjective::performMultiObjectiveModelChecking(env, *ma, formulas[3]->asMultiObjectiveFormula()); + ASSERT_TRUE(result->isExplicitParetoCurveCheckResult()); + std::vector> expectedPoints; + expectedPoints.emplace_back(std::vector({"98", "3/10", "42/10"})); + expectedPoints.emplace_back(std::vector({"33", "7/10", "42/10"})); + double eps = 1e-4; + EXPECT_TRUE(expectSubset(result->asExplicitParetoCurveCheckResult().getPoints(), convertPointset(expectedPoints), eps)) << "Non-Pareto point found."; + EXPECT_TRUE(expectSubset(convertPointset(expectedPoints), result->asExplicitParetoCurveCheckResult().getPoints(), eps)) << "Pareto point missing."; + } + { + std::unique_ptr result = storm::modelchecker::multiobjective::performMultiObjectiveModelChecking(env, *ma, formulas[4]->asMultiObjectiveFormula()); + ASSERT_TRUE(result->isExplicitParetoCurveCheckResult()); + std::vector> expectedPoints; + expectedPoints.emplace_back(std::vector({"98", "3/10", "0"})); + expectedPoints.emplace_back(std::vector({"33", "7/10", "0"})); + double eps = 1e-4; + EXPECT_TRUE(expectSubset(result->asExplicitParetoCurveCheckResult().getPoints(), convertPointset(expectedPoints), eps)) << "Non-Pareto point found."; + EXPECT_TRUE(expectSubset(convertPointset(expectedPoints), result->asExplicitParetoCurveCheckResult().getPoints(), eps)) << "Pareto point missing."; + } +} + +TEST(SparseMaPcaaMultiObjectiveModelCheckerTest, polling) { + if (!storm::test::z3AtLeastVersion(4,8,5)) { + GTEST_SKIP() << "Test disabled since it triggers a bug in the installed version of z3."; + } + storm::Environment env; + env.modelchecker().multi().setMethod(storm::modelchecker::multiobjective::MultiObjectiveMethod::Pcaa); + + std::string programFile = STORM_TEST_RESOURCES_DIR "/ma/polling.ma"; + std::string constantsDefStr = "N=1,Q=1"; + std::string formulasAsString = "multi(LRAmin=? [\"q1full\"], LRAmin=? [\"q2full\"]);\n"; // pareto + formulasAsString += "multi(Pmin=? [F<0.1 \"q1full\"], LRAmin=? [\"q1full\"]);\n" ; // pareto + + // programm, model, formula + storm::prism::Program program = storm::api::parseProgram(programFile); + program = storm::utility::prism::preprocess(program, constantsDefStr); + program.checkValidity(); + std::vector> formulas = storm::api::extractFormulasFromProperties(storm::api::parsePropertiesForPrismProgram(formulasAsString, program)); + storm::generator::NextStateGeneratorOptions options(formulas); + auto ma = storm::builder::ExplicitModelBuilder(program, options).build()->as>(); + + { + std::unique_ptr result = storm::modelchecker::multiobjective::performMultiObjectiveModelChecking(env, *ma, formulas[0]->asMultiObjectiveFormula()); + ASSERT_TRUE(result->isExplicitParetoCurveCheckResult()); + std::vector> expectedPoints; + expectedPoints.emplace_back(std::vector({"62771/102086","89585/102086"})); + expectedPoints.emplace_back(std::vector({"77531/89546","64985/89546"})); + double eps = 1e-4; + EXPECT_TRUE(expectSubset(result->asExplicitParetoCurveCheckResult().getPoints(), convertPointset(expectedPoints), eps)) << "Non-Pareto point found."; + EXPECT_TRUE(expectSubset(convertPointset(expectedPoints), result->asExplicitParetoCurveCheckResult().getPoints(), eps)) << "Pareto point missing."; + } + { + std::unique_ptr result = storm::modelchecker::multiobjective::performMultiObjectiveModelChecking(env, *ma, formulas[1]->asMultiObjectiveFormula()); + ASSERT_TRUE(result->isExplicitParetoCurveCheckResult()); + std::vector> expectedPoints; + expectedPoints.emplace_back(std::vector({"0.25918177931828","62771/102086"})); + double eps = 1e-4; + // TODO: Right now, there is a non-optimal point included due to numerical imprecisions. We therefore skip this check: + //EXPECT_TRUE(expectSubset(result->asExplicitParetoCurveCheckResult().getPoints(), convertPointset(expectedPoints), eps)) << "Non-Pareto point found."; + EXPECT_TRUE(expectSubset(convertPointset(expectedPoints), result->asExplicitParetoCurveCheckResult().getPoints(), eps)) << "Pareto point missing."; + } +} + + + #endif /* defined STORM_HAVE_HYPRO || defined STORM_HAVE_Z3_OPTIMIZE */ diff --git a/src/test/storm/modelchecker/multiobjective/SparseMdpPcaaMultiObjectiveModelCheckerTest.cpp b/src/test/storm/modelchecker/multiobjective/SparseMdpPcaaMultiObjectiveModelCheckerTest.cpp index bed96b7b1..3c60c9818 100755 --- a/src/test/storm/modelchecker/multiobjective/SparseMdpPcaaMultiObjectiveModelCheckerTest.cpp +++ b/src/test/storm/modelchecker/multiobjective/SparseMdpPcaaMultiObjectiveModelCheckerTest.cpp @@ -8,10 +8,13 @@ #include "storm/modelchecker/results/ExplicitQuantitativeCheckResult.h" #include "storm/modelchecker/results/ExplicitQualitativeCheckResult.h" +#include "storm/modelchecker/results/ExplicitParetoCurveCheckResult.h" #include "storm/models/sparse/Mdp.h" #include "storm/settings/modules/GeneralSettings.h" #include "storm/settings/SettingsManager.h" #include "storm/storage/jani/Property.h" +#include "storm/storage/geometry/Polytope.h" +#include "storm/storage/geometry/Hyperrectangle.h" #include "storm/api/storm.h" #include "storm-parsers/api/storm-parsers.h" #include "storm/environment/Environment.h" @@ -141,6 +144,290 @@ TEST(SparseMdpPcaaMultiObjectiveModelCheckerTest, dpm) { } +template +bool expectPointConained(std::vector> const& pointset, std::vector const& point, ValueType precision) { + for (auto const& p : pointset) { + EXPECT_EQ(p.size(), point.size()) << "Missmatch in point dimension."; + bool found = true; + for (uint64_t i = 0; i < p.size(); ++i) { + if (storm::utility::abs(p[i] - point[i]) > precision) { + found = false; + break; + } + } + if (found) { + return true; + } + } + // prepare failure message: + std::stringstream errstr; + errstr << "Point ["; + bool firstPi = true; + for (auto const& pi : point) { + if (firstPi) { + firstPi = false; + } else { + errstr << ", "; + } + errstr << pi; + } + errstr << "] is not contained in point set {"; + bool firstP = true; + for (auto const& p : pointset) { + if (firstP) { + firstP = false; + } else { + errstr << ", \t"; + } + errstr << "["; + firstPi = true; + for (auto const& pi : p) { + if (firstPi) { + firstPi = false; + } else { + errstr << ", "; + } + errstr << pi; + } + errstr << "]"; + } + errstr << "}."; + ADD_FAILURE() << errstr.str(); + return false; +} + + +template +bool expectSubset(std::vector> const& lhs, std::vector> const& rhs, ValueType precision) { + for (auto const& p : lhs) { + if (!expectPointConained(rhs, p, precision)) { + return false; + } + } + return true; +} + +template +std::vector> convertPointset(std::vector> const& in) { + std::vector> out; + for (auto const& point_str : in) { + out.emplace_back(); + for (auto const& pi_str : point_str) { + out.back().push_back(storm::utility::convertNumber(pi_str)); + } + } + return out; +} + +TEST(SparseMdpPcaaMultiObjectiveModelCheckerTest, simple_lra) { + if (!storm::test::z3AtLeastVersion(4,8,5)) { + GTEST_SKIP() << "Test disabled since it triggers a bug in the installed version of z3."; + } + storm::Environment env; + env.modelchecker().multi().setMethod(storm::modelchecker::multiobjective::MultiObjectiveMethod::Pcaa); + + + std::string programFile = STORM_TEST_RESOURCES_DIR "/mdp/multiobj_simple_lra.nm"; + std::string formulasAsString = "multi(R{\"first\"}max=? [ LRA ], R{\"second\"}max=? [ LRA ]);\n"; // pareto + formulasAsString += "multi(R{\"first\"}min=? [ LRA ], R{\"second\"}max=? [ LRA ]);\n"; // pareto + formulasAsString += "multi(R{\"first\"}min=? [ LRA ], R{\"second\"}min=? [ LRA ]);\n"; // pareto + formulasAsString += "multi(R{\"first\"}min=? [ C ], R{\"second\"}min=? [ LRA ]);\n"; // pareto + formulasAsString += "multi(R{\"first\"}min=? [ C ], R{\"first\"}max=? [ LRA ]);\n"; // pareto + formulasAsString += "multi(R{\"first\"}min=? [ C ], R{\"second\"}max=? [ LRA ], R{\"third\"}max=? [ C ]);\n"; // pareto + formulasAsString += "multi(R{\"first\"}min=? [ LRA ], R{\"second\"}max=? [ LRA ], R{\"third\"}min=? [ C ]);\n"; // pareto + formulasAsString += "multi(LRAmax=? [ x=1 ], R{\"second\"}max=? [ LRA ]);\n"; // pareto + + // programm, model, formula + storm::prism::Program program = storm::api::parseProgram(programFile); + program.checkValidity(); + std::vector> formulas = storm::api::extractFormulasFromProperties(storm::api::parsePropertiesForPrismProgram(formulasAsString, program)); + storm::generator::NextStateGeneratorOptions options(formulas); + auto mdp = storm::builder::ExplicitModelBuilder(program, options).build()->as>(); + + { + std::unique_ptr result = storm::modelchecker::multiobjective::performMultiObjectiveModelChecking(env, *mdp, formulas[0]->asMultiObjectiveFormula()); + ASSERT_TRUE(result->isExplicitParetoCurveCheckResult()); + std::vector> expectedPoints; + expectedPoints.emplace_back(std::vector({"5","80/11"})); + expectedPoints.emplace_back(std::vector({"0","16"})); + double eps = 1e-4; + EXPECT_TRUE(expectSubset(result->asExplicitParetoCurveCheckResult().getPoints(), convertPointset(expectedPoints), eps)) << "Non-Pareto point found."; + EXPECT_TRUE(expectSubset(convertPointset(expectedPoints), result->asExplicitParetoCurveCheckResult().getPoints(), eps)) << "Pareto point missing."; + } + { + std::unique_ptr result = storm::modelchecker::multiobjective::performMultiObjectiveModelChecking(env, *mdp, formulas[1]->asMultiObjectiveFormula()); + ASSERT_TRUE(result->isExplicitParetoCurveCheckResult()); + std::vector> expectedPoints; + expectedPoints.emplace_back(std::vector({"0","16"})); + double eps = 1e-4; + EXPECT_TRUE(expectSubset(result->asExplicitParetoCurveCheckResult().getPoints(), convertPointset(expectedPoints), eps)) << "Non-Pareto point found."; + EXPECT_TRUE(expectSubset(convertPointset(expectedPoints), result->asExplicitParetoCurveCheckResult().getPoints(), eps)) << "Pareto point missing."; + } + { + std::unique_ptr result = storm::modelchecker::multiobjective::performMultiObjectiveModelChecking(env, *mdp, formulas[2]->asMultiObjectiveFormula()); + ASSERT_TRUE(result->isExplicitParetoCurveCheckResult()); + std::vector> expectedPoints; + expectedPoints.emplace_back(std::vector({"0","0"})); + double eps = 1e-4; + EXPECT_TRUE(expectSubset(result->asExplicitParetoCurveCheckResult().getPoints(), convertPointset(expectedPoints), eps)) << "Non-Pareto point found."; + EXPECT_TRUE(expectSubset(convertPointset(expectedPoints), result->asExplicitParetoCurveCheckResult().getPoints(), eps)) << "Pareto point missing."; + } + { + std::unique_ptr result = storm::modelchecker::multiobjective::performMultiObjectiveModelChecking(env, *mdp, formulas[3]->asMultiObjectiveFormula()); + ASSERT_TRUE(result->isExplicitParetoCurveCheckResult()); + std::vector> expectedPoints; + expectedPoints.emplace_back(std::vector({"10/8","0"})); + double eps = 1e-4; + EXPECT_TRUE(expectSubset(result->asExplicitParetoCurveCheckResult().getPoints(), convertPointset(expectedPoints), eps)) << "Non-Pareto point found."; + EXPECT_TRUE(expectSubset(convertPointset(expectedPoints), result->asExplicitParetoCurveCheckResult().getPoints(), eps)) << "Pareto point missing."; + } + { + std::unique_ptr result = storm::modelchecker::multiobjective::performMultiObjectiveModelChecking(env, *mdp, formulas[4]->asMultiObjectiveFormula()); + ASSERT_TRUE(result->isExplicitParetoCurveCheckResult()); + std::vector> expectedPoints; + expectedPoints.emplace_back(std::vector({"10/8","0"})); + double eps = 1e-4; + EXPECT_TRUE(expectSubset(result->asExplicitParetoCurveCheckResult().getPoints(), convertPointset(expectedPoints), eps)) << "Non-Pareto point found."; + EXPECT_TRUE(expectSubset(convertPointset(expectedPoints), result->asExplicitParetoCurveCheckResult().getPoints(), eps)) << "Pareto point missing."; + } + { + std::unique_ptr result = storm::modelchecker::multiobjective::performMultiObjectiveModelChecking(env, *mdp, formulas[5]->asMultiObjectiveFormula()); + ASSERT_TRUE(result->isExplicitParetoCurveCheckResult()); + std::vector> expectedPoints; + expectedPoints.emplace_back(std::vector({"10/8", "0", "10/8"})); + expectedPoints.emplace_back(std::vector({"7", "16", "2"})); + double eps = 1e-4; + EXPECT_TRUE(expectSubset(result->asExplicitParetoCurveCheckResult().getPoints(), convertPointset(expectedPoints), eps)) << "Non-Pareto point found."; + EXPECT_TRUE(expectSubset(convertPointset(expectedPoints), result->asExplicitParetoCurveCheckResult().getPoints(), eps)) << "Pareto point missing."; + } + { + std::unique_ptr result = storm::modelchecker::multiobjective::performMultiObjectiveModelChecking(env, *mdp, formulas[6]->asMultiObjectiveFormula()); + ASSERT_TRUE(result->isExplicitParetoCurveCheckResult()); + std::vector> expectedPoints; + expectedPoints.emplace_back(std::vector({"0","0", "10/8"})); + expectedPoints.emplace_back(std::vector({"0","16", "2"})); + double eps = 1e-4; + EXPECT_TRUE(expectSubset(result->asExplicitParetoCurveCheckResult().getPoints(), convertPointset(expectedPoints), eps)) << "Non-Pareto point found."; + EXPECT_TRUE(expectSubset(convertPointset(expectedPoints), result->asExplicitParetoCurveCheckResult().getPoints(), eps)) << "Pareto point missing."; + } + { + std::unique_ptr result = storm::modelchecker::multiobjective::performMultiObjectiveModelChecking(env, *mdp, formulas[7]->asMultiObjectiveFormula()); + ASSERT_TRUE(result->isExplicitParetoCurveCheckResult()); + std::vector> expectedPoints; + expectedPoints.emplace_back(std::vector({"1","0"})); + expectedPoints.emplace_back(std::vector({"0","16"})); + double eps = 1e-4; + EXPECT_TRUE(expectSubset(result->asExplicitParetoCurveCheckResult().getPoints(), convertPointset(expectedPoints), eps)) << "Non-Pareto point found."; + EXPECT_TRUE(expectSubset(convertPointset(expectedPoints), result->asExplicitParetoCurveCheckResult().getPoints(), eps)) << "Pareto point missing."; + } +} + +TEST(SparseMdpPcaaMultiObjectiveModelCheckerTest, resource_gathering) { + if (!storm::test::z3AtLeastVersion(4,8,5)) { + GTEST_SKIP() << "Test disabled since it triggers a bug in the installed version of z3."; + } + storm::Environment env; + env.modelchecker().multi().setMethod(storm::modelchecker::multiobjective::MultiObjectiveMethod::Pcaa); + + std::string programFile = STORM_TEST_RESOURCES_DIR "/mdp/resource-gathering.nm"; + std::string constantsDef = "GOLD_TO_COLLECT=0,GEM_TO_COLLECT=0,B=0"; + std::string formulasAsString = "multi(R{\"rew_gold\"}max=? [LRA], R{\"rew_gem\"}max=? [LRA]);\n"; // pareto + formulasAsString += "multi(R{\"rew_gold\"}min=? [LRA], R{\"rew_gem\"}max=? [LRA]);\n"; // pareto + formulasAsString += "multi(R{\"rew_gold\"}max=? [LRA], R{\"rew_gem\"}min=? [LRA]);\n"; // pareto + formulasAsString += "multi(R{\"rew_gold\"}min=? [LRA], R{\"rew_gem\"}min=? [LRA]);\n"; // pareto + formulasAsString += "multi(R{\"rew_gold\"}max=? [LRA], R{\"attacks\"}min=? [LRA]);\n"; // pareto + formulasAsString += "multi(R{\"rew_gold\"}max=? [LRA], R{\"attacks\"}min=? [C]);\n"; // pareto + formulasAsString += "multi(R{\"rew_gold\"}max=? [LRA], R{\"rew_gem\"}max=? [LRA], R{\"attacks\"}min=? [C]);\n"; // pareto + formulasAsString += "multi(R{\"rew_gold\"}max=? [LRA], R{\"rew_gem\"}max=? [ C<100 ]);\n"; // pareto + + // programm, model, formula + storm::prism::Program program = storm::api::parseProgram(programFile); + program = storm::utility::prism::preprocess(program, constantsDef); + program.checkValidity(); + std::vector> formulas = storm::api::extractFormulasFromProperties(storm::api::parsePropertiesForPrismProgram(formulasAsString, program)); + storm::generator::NextStateGeneratorOptions options(formulas); + auto mdp = storm::builder::ExplicitModelBuilder(program, options).build()->as>(); + + { + std::unique_ptr result = storm::modelchecker::multiobjective::performMultiObjectiveModelChecking(env, *mdp, formulas[0]->asMultiObjectiveFormula()); + ASSERT_TRUE(result->isExplicitParetoCurveCheckResult()); + std::vector> expectedPoints; + expectedPoints.emplace_back(std::vector({"27/241","0"})); + expectedPoints.emplace_back(std::vector({"0","1/10"})); + expectedPoints.emplace_back(std::vector({"27/349","27/349"})); + double eps = 1e-4; + EXPECT_TRUE(expectSubset(result->asExplicitParetoCurveCheckResult().getPoints(), convertPointset(expectedPoints), eps)) << "Non-Pareto point found."; + EXPECT_TRUE(expectSubset(convertPointset(expectedPoints), result->asExplicitParetoCurveCheckResult().getPoints(), eps)) << "Pareto point missing."; + } + { + std::unique_ptr result = storm::modelchecker::multiobjective::performMultiObjectiveModelChecking(env, *mdp, formulas[1]->asMultiObjectiveFormula()); + ASSERT_TRUE(result->isExplicitParetoCurveCheckResult()); + std::vector> expectedPoints; + expectedPoints.emplace_back(std::vector({"0","1/10"})); + double eps = 1e-4; + EXPECT_TRUE(expectSubset(result->asExplicitParetoCurveCheckResult().getPoints(), convertPointset(expectedPoints), eps)) << "Non-Pareto point found."; + EXPECT_TRUE(expectSubset(convertPointset(expectedPoints), result->asExplicitParetoCurveCheckResult().getPoints(), eps)) << "Pareto point missing."; + } + { + std::unique_ptr result = storm::modelchecker::multiobjective::performMultiObjectiveModelChecking(env, *mdp, formulas[2]->asMultiObjectiveFormula()); + ASSERT_TRUE(result->isExplicitParetoCurveCheckResult()); + std::vector> expectedPoints; + expectedPoints.emplace_back(std::vector({"27/241","0"})); + double eps = 1e-4; + EXPECT_TRUE(expectSubset(result->asExplicitParetoCurveCheckResult().getPoints(), convertPointset(expectedPoints), eps)) << "Non-Pareto point found."; + EXPECT_TRUE(expectSubset(convertPointset(expectedPoints), result->asExplicitParetoCurveCheckResult().getPoints(), eps)) << "Pareto point missing."; + } + { + std::unique_ptr result = storm::modelchecker::multiobjective::performMultiObjectiveModelChecking(env, *mdp, formulas[3]->asMultiObjectiveFormula()); + ASSERT_TRUE(result->isExplicitParetoCurveCheckResult()); + std::vector> expectedPoints; + expectedPoints.emplace_back(std::vector({"0","0"})); + double eps = 1e-4; + EXPECT_TRUE(expectSubset(result->asExplicitParetoCurveCheckResult().getPoints(), convertPointset(expectedPoints), eps)) << "Non-Pareto point found."; + EXPECT_TRUE(expectSubset(convertPointset(expectedPoints), result->asExplicitParetoCurveCheckResult().getPoints(), eps)) << "Pareto point missing."; + } + { + std::unique_ptr result = storm::modelchecker::multiobjective::performMultiObjectiveModelChecking(env, *mdp, formulas[4]->asMultiObjectiveFormula()); + ASSERT_TRUE(result->isExplicitParetoCurveCheckResult()); + std::vector> expectedPoints; + expectedPoints.emplace_back(std::vector({"27/241","19/723"})); + expectedPoints.emplace_back(std::vector({"3/31","1/93"})); + expectedPoints.emplace_back(std::vector({"1/12","0"})); + double eps = 1e-4; + EXPECT_TRUE(expectSubset(result->asExplicitParetoCurveCheckResult().getPoints(), convertPointset(expectedPoints), eps)) << "Non-Pareto point found."; + EXPECT_TRUE(expectSubset(convertPointset(expectedPoints), result->asExplicitParetoCurveCheckResult().getPoints(), eps)) << "Pareto point missing."; + } + { + std::unique_ptr result = storm::modelchecker::multiobjective::performMultiObjectiveModelChecking(env, *mdp, formulas[5]->asMultiObjectiveFormula()); + ASSERT_TRUE(result->isExplicitParetoCurveCheckResult()); + std::vector> expectedPoints; + expectedPoints.emplace_back(std::vector({"1/12","0"})); + double eps = 1e-4; + EXPECT_TRUE(expectSubset(result->asExplicitParetoCurveCheckResult().getPoints(), convertPointset(expectedPoints), eps)) << "Non-Pareto point found."; + EXPECT_TRUE(expectSubset(convertPointset(expectedPoints), result->asExplicitParetoCurveCheckResult().getPoints(), eps)) << "Pareto point missing."; + } + { + std::unique_ptr result = storm::modelchecker::multiobjective::performMultiObjectiveModelChecking(env, *mdp, formulas[6]->asMultiObjectiveFormula()); + ASSERT_TRUE(result->isExplicitParetoCurveCheckResult()); + std::vector> expectedPoints; + expectedPoints.emplace_back(std::vector({"1/12","0","0"})); + expectedPoints.emplace_back(std::vector({"0","1/10","0"})); + expectedPoints.emplace_back(std::vector({"1/18","1/18","0"})); + double eps = 1e-4; + EXPECT_TRUE(expectSubset(result->asExplicitParetoCurveCheckResult().getPoints(), convertPointset(expectedPoints), eps)) << "Non-Pareto point found."; + EXPECT_TRUE(expectSubset(convertPointset(expectedPoints), result->asExplicitParetoCurveCheckResult().getPoints(), eps)) << "Pareto point missing."; + } + { + std::unique_ptr result = storm::modelchecker::multiobjective::performMultiObjectiveModelChecking(env, *mdp, formulas[7]->asMultiObjectiveFormula()); + ASSERT_TRUE(result->isExplicitParetoCurveCheckResult()); + std::vector> expectedPoints; + expectedPoints.emplace_back(std::vector({"27/241","9"})); + double eps = 1e-4; + // TODO: Right now, there is a non-optimal point included due to numerical imprecisions. We therefore skip this check: + // EXPECT_TRUE(expectSubset(result->asExplicitParetoCurveCheckResult().getPoints(), convertPointset(expectedPoints), eps)) << "Non-Pareto point found."; + EXPECT_TRUE(expectSubset(convertPointset(expectedPoints), result->asExplicitParetoCurveCheckResult().getPoints(), eps)) << "Pareto point missing."; + } +} #endif /* STORM_HAVE_HYPRO || defined STORM_HAVE_Z3_OPTIMIZE */