From bc1eff959feae56c187a26b79d50f7963487c981 Mon Sep 17 00:00:00 2001 From: dehnert Date: Tue, 30 Aug 2016 16:39:54 +0200 Subject: [PATCH] graph algorithms for games now also compute player 2 prob0/1 states and the generated strategies are adapted accordingly Former-commit-id: da525813283b68ce29b66b8081430db6f97b6454 --- .../abstraction/GameBasedMdpModelChecker.cpp | 101 +++++++++++------- src/utility/graph.cpp | 54 ++++++---- src/utility/graph.h | 13 ++- test/functional/utility/GraphTest.cpp | 76 ++++++------- 4 files changed, 141 insertions(+), 103 deletions(-) diff --git a/src/modelchecker/abstraction/GameBasedMdpModelChecker.cpp b/src/modelchecker/abstraction/GameBasedMdpModelChecker.cpp index 2a7b407f2..d4a7d21bf 100644 --- a/src/modelchecker/abstraction/GameBasedMdpModelChecker.cpp +++ b/src/modelchecker/abstraction/GameBasedMdpModelChecker.cpp @@ -172,6 +172,7 @@ namespace storm { template storm::dd::Bdd pickPivotState(storm::dd::Bdd const& initialStates, storm::dd::Bdd const& transitions, std::set const& rowVariables, std::set const& columnVariables, storm::dd::Bdd const& pivotStates) { + // Perform a BFS and pick the first pivot state we encounter. storm::dd::Bdd pivotState; storm::dd::Bdd frontier = initialStates; @@ -186,6 +187,7 @@ namespace storm { frontierPivotStates = frontier && pivotStates; if (!frontierPivotStates.isZero()) { + STORM_LOG_TRACE("Picked pivot state from " << frontierPivotStates.getNonZeroCount() << " candidates on level, " << pivotStates.getNonZeroCount() << " candidates in total."); pivotState = frontierPivotStates.existsAbstractRepresentative(rowVariables); foundPivotState = true; } @@ -196,18 +198,37 @@ namespace storm { } template - void refineAfterQualitativeCheck(storm::abstraction::prism::PrismMenuGameAbstractor& abstractor, storm::abstraction::MenuGame const& game, detail::GameProb01Result const& prob01, storm::dd::Bdd const& transitionMatrixBdd) { - STORM_LOG_TRACE("Refining after qualitative check."); - // Build the fragment of states that is reachable by any combination of the player 1 and player 2 strategies. - storm::dd::Bdd reachableTransitions = prob01.min.first.getPlayer2Strategy() || prob01.max.second.getPlayer2Strategy(); - reachableTransitions = (prob01.min.first.getPlayer1Strategy() && reachableTransitions) || (prob01.max.second.getPlayer1Strategy() && reachableTransitions); - reachableTransitions &= transitionMatrixBdd; + bool refineAfterQualitativeCheck(storm::abstraction::prism::PrismMenuGameAbstractor& abstractor, storm::abstraction::MenuGame const& game, detail::GameProb01Result const& prob01, storm::dd::Bdd const& transitionMatrixBdd) { + STORM_LOG_TRACE("Trying refinement after qualitative check."); + // Build the fragment of transitions that is reachable by both the min and the max strategies. + storm::dd::Bdd reachableTransitions = transitionMatrixBdd && prob01.min.first.getPlayer1Strategy() && prob01.min.first.getPlayer2Strategy() && prob01.max.second.getPlayer1Strategy() && prob01.max.second.getPlayer2Strategy(); reachableTransitions = reachableTransitions.existsAbstract(game.getNondeterminismVariables()); storm::dd::Bdd pivotStates = storm::utility::dd::computeReachableStates(game.getInitialStates(), reachableTransitions, game.getRowVariables(), game.getColumnVariables()); + + // Require the pivot state to be a [0, 1] state. + pivotStates &= prob01.min.first.getPlayer1States() && prob01.max.second.getPlayer1States(); - // Then constrain these states by the requirement that for either the lower or upper player 1 choice the player 2 choices must be different. - pivotStates &= ((prob01.min.first.getPlayer1Strategy() || prob01.max.second.getPlayer1Strategy()) && (prob01.min.first.getPlayer2Strategy().exclusiveOr(prob01.max.second.getPlayer2Strategy()))).existsAbstract(game.getNondeterminismVariables()); - STORM_LOG_ASSERT(!pivotStates.isZero(), "Unable to refine without pivot state candidates."); + // Then constrain these states by the requirement that for either the lower or upper player 1 choice the player 2 choices must be different and + // that the difference is not because of a missing strategy in either case. + + // Start with constructing the player 2 states that have a prob 0 (min) and prob 1 (max) strategy. + storm::dd::Bdd constraint = prob01.min.first.getPlayer2Strategy().existsAbstract(game.getPlayer2Variables()) && prob01.max.second.getPlayer2Strategy().existsAbstract(game.getPlayer2Variables()); + + // Now construct all player 2 choices that actually exist and differ in the min and max case. + constraint &= prob01.min.first.getPlayer2Strategy().exclusiveOr(prob01.max.second.getPlayer2Strategy()); + + // Then restrict the pivot states by requiring existing and different player 2 choices. + pivotStates &= ((prob01.min.first.getPlayer1Strategy() || prob01.max.second.getPlayer1Strategy()) && constraint).existsAbstract(game.getNondeterminismVariables()); + + // We can only refine in case we have a reachable player 1 state with a player 2 successor (under either + // player 1's min or max strategy) such that from this player 2 state, both prob0 min and prob0 max define + // strategies and they differ. Hence, it is possible that we arrive at a point where no suitable pivot state + // is found. In this case, we abort the qualitative refinement here. + if (pivotStates.isZero()) { + return false; + } + + STORM_LOG_ASSERT(!pivotStates.isZero(), "Unable to proceed without pivot state candidates."); // Now that we have the pivot state candidates, we need to pick one. storm::dd::Bdd pivotState = pickPivotState(game.getInitialStates(), reachableTransitions, game.getRowVariables(), game.getColumnVariables(), pivotStates); @@ -223,6 +244,7 @@ namespace storm { if (lowerChoicesDifferent) { STORM_LOG_TRACE("Refining based on lower choice."); abstractor.refine(pivotState, (pivotState && prob01.min.first.getPlayer1Strategy()).existsAbstract(game.getRowVariables()), lowerChoice1, lowerChoice2); + return true; } else { storm::dd::Bdd upperChoice = pivotState && game.getExtendedTransitionMatrix().toBdd() && prob01.max.second.getPlayer1Strategy(); storm::dd::Bdd upperChoice1 = (upperChoice && prob01.min.first.getPlayer2Strategy()).existsAbstract(variablesToAbstract); @@ -232,10 +254,12 @@ namespace storm { if (upperChoicesDifferent) { STORM_LOG_TRACE("Refining based on upper choice."); abstractor.refine(pivotState, (pivotState && prob01.max.second.getPlayer1Strategy()).existsAbstract(game.getRowVariables()), upperChoice1, upperChoice2); + return true; } else { STORM_LOG_ASSERT(false, "Did not find choices from which to derive predicates."); } } + return false; } template @@ -257,8 +281,8 @@ namespace storm { storm::dd::Add pivotStateLower = pivotState.template toAdd() * minResult; storm::dd::Add pivotStateUpper = pivotState.template toAdd() * maxResult; - storm::dd::Bdd pivotStateIsMinProb0 = pivotState && prob01.min.first.states; - storm::dd::Bdd pivotStateIsMaxProb0 = pivotState && prob01.max.first.states; + storm::dd::Bdd pivotStateIsMinProb0 = pivotState && prob01.min.first.getPlayer1States(); + storm::dd::Bdd pivotStateIsMaxProb0 = pivotState && prob01.max.first.getPlayer1States(); storm::dd::Bdd pivotStateLowerStrategies = pivotState && prob01.min.first.getPlayer1Strategy() && prob01.min.first.getPlayer2Strategy(); storm::dd::Bdd pivotStateUpperStrategies = pivotState && prob01.max.first.getPlayer1Strategy() && prob01.max.first.getPlayer2Strategy(); storm::dd::Bdd pivotStateLowerPl1Strategy = pivotState && prob01.min.first.getPlayer1Strategy(); @@ -407,39 +431,33 @@ namespace storm { targetStates |= game.getBottomStates(); } prob01.min = computeProb01States(player1Direction, storm::OptimizationDirection::Minimize, game, transitionMatrixBdd, game.getStates(constraintExpression), targetStates); - std::unique_ptr result = checkForResultAfterQualitativeCheck(checkTask, storm::OptimizationDirection::Minimize, game.getInitialStates(), prob01.min.first.states, prob01.min.second.states); + std::unique_ptr result = checkForResultAfterQualitativeCheck(checkTask, storm::OptimizationDirection::Minimize, game.getInitialStates(), prob01.min.first.getPlayer1States(), prob01.min.second.getPlayer1States()); if (result) { return result; } prob01.max = computeProb01States(player1Direction, storm::OptimizationDirection::Maximize, game, transitionMatrixBdd, game.getStates(constraintExpression), targetStates); - result = checkForResultAfterQualitativeCheck(checkTask, storm::OptimizationDirection::Maximize, game.getInitialStates(), prob01.max.first.states, prob01.max.second.states); + result = checkForResultAfterQualitativeCheck(checkTask, storm::OptimizationDirection::Maximize, game.getInitialStates(), prob01.max.first.getPlayer1States(), prob01.max.second.getPlayer1States()); if (result) { return result; } - - // 2.5 At this point, we need to fix the max strategy for the prob 0 states for player 2. This is because - // there may be player 2 nodes for which there is no choice in the currently computed strategy (by prob0) - // as no choice achieves probability 0. However, later we require the strategies to only differ if necessary - // so we need to force the player 2 strategy to agree with the player 2 strategy for the probability 0 - // states in the min case. - prob01.max.first.player2Strategy = prob01.max.first.getPlayer2Strategy().existsAbstract(game.getPlayer1Variables()).ite(prob01.max.first.getPlayer2Strategy(), prob01.min.first.getPlayer2Strategy()); - + // 3. compute the states for which we know the result/for which we know there is more work to be done. - storm::dd::Bdd maybeMin = !(prob01.min.first.states || prob01.min.second.states) && game.getReachableStates(); - storm::dd::Bdd maybeMax = !(prob01.max.first.states || prob01.max.second.states) && game.getReachableStates(); + storm::dd::Bdd maybeMin = !(prob01.min.first.getPlayer1States() || prob01.min.second.getPlayer1States()) && game.getReachableStates(); + storm::dd::Bdd maybeMax = !(prob01.max.first.getPlayer1States() || prob01.max.second.getPlayer1States()) && game.getReachableStates(); // 4. if the initial states are not maybe states, then we can refine at this point. storm::dd::Bdd initialMaybeStates = (game.getInitialStates() && maybeMin) || (game.getInitialStates() && maybeMax); + bool qualitativeRefinement = false; if (initialMaybeStates.isZero()) { // In this case, we know the result for the initial states for both player 2 minimizing and maximizing. STORM_LOG_TRACE("No initial state is a 'maybe' state. Refining abstraction based on qualitative check."); // Check whether we can already give the answer based on the current information. - result = checkForResultAfterQualitativeCheck(checkTask, game.getInitialStates(), prob01.min.first.states, prob01.max.first.states, true); + result = checkForResultAfterQualitativeCheck(checkTask, game.getInitialStates(), prob01.min.first.getPlayer1States(), prob01.max.first.getPlayer1States(), true); if (result) { return result; } - result = checkForResultAfterQualitativeCheck(checkTask, game.getInitialStates(), prob01.min.second.states, prob01.max.second.states, false); + result = checkForResultAfterQualitativeCheck(checkTask, game.getInitialStates(), prob01.min.second.getPlayer1States(), prob01.max.second.getPlayer1States(), false); if (result) { return result; } @@ -448,21 +466,24 @@ namespace storm { // If we get here, the initial states were all identified as prob0/1 states, but the value (0 or 1) // depends on whether player 2 is minimizing or maximizing. Therefore, we need to find a place to refine. - refineAfterQualitativeCheck(abstractor, game, prob01, transitionMatrixBdd); - } else { - + qualitativeRefinement = refineAfterQualitativeCheck(abstractor, game, prob01, transitionMatrixBdd); + } + + if (!qualitativeRefinement) { // At this point, we know that we cannot answer the query without further numeric computation. STORM_LOG_TRACE("Starting numerical solution step."); // Prepare some storage that we use on-demand during the quantitative solving step. - storm::dd::Add minResult = prob01.min.second.states.template toAdd(); - storm::dd::Add maxResult = prob01.max.second.states.template toAdd(); + storm::dd::Add minResult = prob01.min.second.getPlayer1States().template toAdd(); + storm::dd::Add maxResult = prob01.max.second.getPlayer1States().template toAdd(); storm::dd::Add initialStatesAdd = game.getInitialStates().template toAdd(); - ValueType minValue = (prob01.min.second.states && game.getInitialStates()) == game.getInitialStates() ? storm::utility::one() : storm::utility::zero(); + // The minimal value after qualitative checking can only be zero. It it was 1, we could have given + // the result right away. + ValueType minValue = storm::utility::zero(); MaybeStateResult minMaybeStateResult(game.getManager().template getAddZero(), game.getManager().getBddZero(), game.getManager().getBddZero()); if (!maybeMin.isZero()) { - minMaybeStateResult = solveMaybeStates(player1Direction, storm::OptimizationDirection::Minimize, game, maybeMin, prob01.min.second.states); + minMaybeStateResult = solveMaybeStates(player1Direction, storm::OptimizationDirection::Minimize, game, maybeMin, prob01.min.second.getPlayer1States()); minResult += minMaybeStateResult.values; storm::dd::Add initialStateMin = initialStatesAdd * minResult; // Here we can only require a non-zero count of *at most* one, because the result may actually be 0. @@ -477,10 +498,12 @@ namespace storm { return result; } - ValueType maxValue = (prob01.max.second.states && game.getInitialStates()) == game.getInitialStates() ? storm::utility::one() : storm::utility::zero(); + // Likewise, the maximal value after qualitative checking can only be 1. If it was 0, we could have + // given the result right awy. + ValueType maxValue = storm::utility::one(); MaybeStateResult maxMaybeStateResult(game.getManager().template getAddZero(), game.getManager().getBddZero(), game.getManager().getBddZero()); if (!maybeMax.isZero()) { - maxMaybeStateResult = solveMaybeStates(player1Direction, storm::OptimizationDirection::Maximize, game, maybeMax, prob01.max.second.states, boost::make_optional(minMaybeStateResult)); + maxMaybeStateResult = solveMaybeStates(player1Direction, storm::OptimizationDirection::Maximize, game, maybeMax, prob01.max.second.getPlayer1States(), boost::make_optional(minMaybeStateResult)); maxResult += maxMaybeStateResult.values; storm::dd::Add initialStateMax = (initialStatesAdd * maxResult); // Unlike in the min-case, we can require a non-zero count of 1 here, because if the max was in @@ -528,12 +551,12 @@ namespace storm { storm::utility::graph::GameProb01Result prob0 = storm::utility::graph::performProb0(game, transitionMatrixBdd, constraintStates, targetStates, player1Direction, player2Direction, true, true); storm::utility::graph::GameProb01Result prob1 = storm::utility::graph::performProb1(game, transitionMatrixBdd, constraintStates, targetStates, player1Direction, player2Direction, true, true); - STORM_LOG_ASSERT(player2Direction != storm::OptimizationDirection::Minimize || (prob0.hasPlayer1Strategy() && (prob0.states.isZero() || !prob0.getPlayer1Strategy().isZero())), "Unable to proceed without strategy."); - STORM_LOG_ASSERT(player2Direction != storm::OptimizationDirection::Minimize || (prob0.hasPlayer2Strategy() && (prob0.states.isZero() || !prob0.getPlayer2Strategy().isZero())), "Unable to proceed without strategy."); - STORM_LOG_ASSERT(player2Direction != storm::OptimizationDirection::Maximize || (prob1.hasPlayer1Strategy() && (prob1.states.isZero() || !prob1.getPlayer1Strategy().isZero())), "Unable to proceed without strategy."); - STORM_LOG_ASSERT(player2Direction != storm::OptimizationDirection::Maximize || (prob1.hasPlayer2Strategy() && (prob1.states.isZero() || !prob1.getPlayer2Strategy().isZero())), "Unable to proceed without strategy."); + STORM_LOG_ASSERT(player2Direction != storm::OptimizationDirection::Minimize || (prob0.hasPlayer1Strategy() && (prob0.getPlayer1States().isZero() || !prob0.getPlayer1Strategy().isZero())), "Unable to proceed without strategy."); + STORM_LOG_ASSERT(player2Direction != storm::OptimizationDirection::Minimize || (prob0.hasPlayer2Strategy() && (prob0.getPlayer1States().isZero() || !prob0.getPlayer2Strategy().isZero())), "Unable to proceed without strategy."); + STORM_LOG_ASSERT(player2Direction != storm::OptimizationDirection::Maximize || (prob1.hasPlayer1Strategy() && (prob1.getPlayer1States().isZero() || !prob1.getPlayer1Strategy().isZero())), "Unable to proceed without strategy."); + STORM_LOG_ASSERT(player2Direction != storm::OptimizationDirection::Maximize || (prob1.hasPlayer2Strategy() && (prob1.getPlayer1States().isZero() || !prob1.getPlayer2Strategy().isZero())), "Unable to proceed without strategy."); - STORM_LOG_TRACE("Player 1: " << player1Direction << ", player 2: " << player2Direction << ": " << prob0.states.getNonZeroCount() << " 'no' states, " << prob1.states.getNonZeroCount() << " 'yes' states."); + STORM_LOG_TRACE("Player 1: " << player1Direction << ", player 2: " << player2Direction << ": " << prob0.getPlayer1States().getNonZeroCount() << " 'no' states, " << prob1.getPlayer1States().getNonZeroCount() << " 'yes' states."); return std::make_pair(prob0, prob1); } diff --git a/src/utility/graph.cpp b/src/utility/graph.cpp index 9f3f9a002..fa492a096 100644 --- a/src/utility/graph.cpp +++ b/src/utility/graph.cpp @@ -926,19 +926,21 @@ namespace storm { template GameProb01Result performProb0(storm::models::symbolic::StochasticTwoPlayerGame const& model, storm::dd::Bdd const& transitionMatrix, storm::dd::Bdd const& phiStates, storm::dd::Bdd const& psiStates, storm::OptimizationDirection const& player1Strategy, storm::OptimizationDirection const& player2Strategy, bool producePlayer1Strategy, bool producePlayer2Strategy) { - // The solution set. - storm::dd::Bdd solution = psiStates; + // The solution sets. + storm::dd::Bdd player1States = psiStates; + storm::dd::Bdd player2States = model.getManager().getBddZero(); bool done = false; uint_fast64_t iterations = 0; while (!done) { - storm::dd::Bdd tmp = (transitionMatrix && solution.swapVariables(model.getRowColumnMetaVariablePairs())).existsAbstract(model.getColumnVariables()) && phiStates; + storm::dd::Bdd tmp = (transitionMatrix && player1States.swapVariables(model.getRowColumnMetaVariablePairs())).existsAbstract(model.getColumnVariables()) && phiStates; if (player2Strategy == OptimizationDirection::Minimize) { tmp = (tmp || model.getIllegalPlayer2Mask()).universalAbstract(model.getPlayer2Variables()); } else { tmp = tmp.existsAbstract(model.getPlayer2Variables()); } + player2States |= tmp; if (player1Strategy == OptimizationDirection::Minimize) { tmp = (tmp || model.getIllegalPlayer1Mask()).universalAbstract(model.getPlayer1Variables()); @@ -946,21 +948,23 @@ namespace storm { tmp = tmp.existsAbstract(model.getPlayer1Variables()); } - tmp |= solution; + // Re-add all previous player 1 states. + tmp |= player1States; - if (tmp == solution) { + if (tmp == player1States) { done = true; } - solution = tmp; + player1States = tmp; ++iterations; } - // Since we have determined the inverse of the desired set, we need to complement it now. - solution = !solution && model.getReachableStates(); + // Since we have determined the complements of the desired sets, we need to complement it now. + player1States = !player1States && model.getReachableStates(); + player2States = !player2States && model.getReachableStates(); // Determine all transitions between prob0 states. - storm::dd::Bdd transitionsBetweenProb0States = solution && (transitionMatrix && solution.swapVariables(model.getRowColumnMetaVariablePairs())); + storm::dd::Bdd transitionsBetweenProb0States = player2States && (transitionMatrix && player1States.swapVariables(model.getRowColumnMetaVariablePairs())); // Determine the distributions that have only successors that are prob0 states. storm::dd::Bdd onlyProb0Successors = (transitionsBetweenProb0States || model.getIllegalSuccessorMask()).universalAbstract(model.getColumnVariables()); @@ -974,13 +978,13 @@ namespace storm { boost::optional> player1StrategyBdd; if (producePlayer1Strategy) { // Move from player 2 choices with only prob0 successors to player 1 choices with only prob 0 successors. - onlyProb0Successors = onlyProb0Successors.existsAbstract(model.getPlayer2Variables()); + onlyProb0Successors = (player1States && onlyProb0Successors).existsAbstract(model.getPlayer2Variables()); // Pick a prob0 player 2 state. player1StrategyBdd = onlyProb0Successors.existsAbstractRepresentative(model.getPlayer1Variables()); } - return GameProb01Result(solution, player1StrategyBdd, player2StrategyBdd); + return GameProb01Result(player1States, player2States, player1StrategyBdd, player2StrategyBdd); } template @@ -988,7 +992,8 @@ namespace storm { // Create two sets of states. Those states for which we definitely know that their probability is 1 and // those states that potentially have a probability of 1. - storm::dd::Bdd maybeStates = model.getReachableStates(); + storm::dd::Bdd maybePlayer1States = model.getReachableStates(); + storm::dd::Bdd maybePlayer2States = model.getReachableStates(); // A flag that governs whether strategies are produced in the current iteration. bool produceStrategiesInIteration = false; @@ -1015,15 +1020,17 @@ namespace storm { } } - storm::dd::Bdd solution = psiStates; + storm::dd::Bdd player1Solution = psiStates; + storm::dd::Bdd player2Solution = model.getManager().getBddZero(); while (!solutionStatesDone) { // Start by computing the transitions that have only maybe states as successors. Note that at // this point, there may be illegal transitions. - storm::dd::Bdd distributionsStayingInMaybe = (!transitionMatrix || maybeStates.swapVariables(model.getRowColumnMetaVariablePairs())).universalAbstract(model.getColumnVariables()); + // FIXME: use getIllegalSuccessorMask instead of !transitionMatrix? + storm::dd::Bdd distributionsStayingInMaybe = (!transitionMatrix || maybePlayer1States.swapVariables(model.getRowColumnMetaVariablePairs())).universalAbstract(model.getColumnVariables()); // Then, determine all distributions that have at least one successor in the states that have // probability 1. - storm::dd::Bdd distributionsWithProb1Successor = (transitionMatrix && solution.swapVariables(model.getRowColumnMetaVariablePairs())).existsAbstract(model.getColumnVariables()); + storm::dd::Bdd distributionsWithProb1Successor = (transitionMatrix && player1Solution.swapVariables(model.getRowColumnMetaVariablePairs())).existsAbstract(model.getColumnVariables()); // The valid distributions are then those that emanate from phi states, stay completely in the // maybe states and have at least one successor with probability 1. @@ -1046,6 +1053,8 @@ namespace storm { } } + player2Solution |= valid; + // And do the same for player 1. if (player1Strategy == OptimizationDirection::Minimize) { valid = (valid || model.getIllegalPlayer1Mask()).universalAbstract(model.getPlayer1Variables()); @@ -1068,17 +1077,18 @@ namespace storm { // If no new states were added, we have found the current hypothesis for the states with // probability 1. - if (valid == solution) { + if (valid == player1Solution) { solutionStatesDone = true; } else { - solution = valid; + player1Solution = valid; } ++solutionStateIterations; } // If the states with probability 1 and the potential probability 1 states coincide, we have found // the solution. - if (solution == maybeStates) { + if (player1Solution == maybePlayer1States) { + maybePlayer2States = player2Solution; maybeStatesDone = true; // If we were asked to produce strategies, we propagate that by triggering another iteration. @@ -1087,7 +1097,7 @@ namespace storm { } else { // Otherwise, we use the current hypothesis for the states with probability 1 as the new maybe // state set. - maybeStates = solution; + maybePlayer1States = player1Solution; } ++maybeStateIterations; } @@ -1098,17 +1108,17 @@ namespace storm { // 'arbitrary', do so now. bool strategiesToCompute = (producePlayer1Strategy && !player1StrategyBdd) || (producePlayer2Strategy && !player2StrategyBdd); if (strategiesToCompute) { - storm::dd::Bdd relevantStates = (transitionMatrix && maybeStates).existsAbstract(model.getColumnVariables()); + storm::dd::Bdd relevantStates = (transitionMatrix && maybePlayer2States).existsAbstract(model.getColumnVariables()); if (producePlayer2Strategy && !player2StrategyBdd) { player2StrategyBdd = relevantStates.existsAbstractRepresentative(model.getPlayer2Variables()); } if (producePlayer1Strategy && !player1StrategyBdd) { - relevantStates = relevantStates.existsAbstract(model.getPlayer2Variables()); + relevantStates = (maybePlayer1States && relevantStates).existsAbstract(model.getPlayer2Variables()); player1StrategyBdd = relevantStates.existsAbstractRepresentative(model.getPlayer1Variables()); } } - return GameProb01Result(maybeStates, player1StrategyBdd, player2StrategyBdd); + return GameProb01Result(maybePlayer1States, maybePlayer2States, player1StrategyBdd, player2StrategyBdd); } template diff --git a/src/utility/graph.h b/src/utility/graph.h index 89ceafc3e..dc6684509 100644 --- a/src/utility/graph.h +++ b/src/utility/graph.h @@ -525,7 +525,7 @@ namespace storm { template struct GameProb01Result { GameProb01Result() = default; - GameProb01Result(storm::dd::Bdd const& states, boost::optional> const& player1Strategy = boost::none, boost::optional> const& player2Strategy = boost::none) : states(states), player1Strategy(player1Strategy), player2Strategy(player2Strategy) { + GameProb01Result(storm::dd::Bdd const& player1States, storm::dd::Bdd const& player2States, boost::optional> const& player1Strategy = boost::none, boost::optional> const& player2Strategy = boost::none) : player1States(player1States), player2States(player2States), player1Strategy(player1Strategy), player2Strategy(player2Strategy) { // Intentionally left empty. } @@ -553,11 +553,16 @@ namespace storm { return player2Strategy; } - storm::dd::Bdd const& getStates() const { - return states; + storm::dd::Bdd const& getPlayer1States() const { + return player1States; } - storm::dd::Bdd states; + storm::dd::Bdd const& getPlayer2States() const { + return player2States; + } + + storm::dd::Bdd player1States; + storm::dd::Bdd player2States; boost::optional> player1Strategy; boost::optional> player2Strategy; }; diff --git a/test/functional/utility/GraphTest.cpp b/test/functional/utility/GraphTest.cpp index 7b431e67e..f3b6a460b 100644 --- a/test/functional/utility/GraphTest.cpp +++ b/test/functional/utility/GraphTest.cpp @@ -215,30 +215,30 @@ TEST(GraphTest, SymbolicProb01StochasticGameDieSmall) { storm::dd::Bdd targetStates = game.getStates(initialPredicates[0], true); storm::utility::graph::GameProb01Result result = storm::utility::graph::performProb0(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Minimize, true, true); - EXPECT_EQ(2, result.states.getNonZeroCount()); + EXPECT_EQ(2, result.getPlayer1States().getNonZeroCount()); EXPECT_TRUE(result.hasPlayer1Strategy()); EXPECT_TRUE(result.hasPlayer2Strategy()); result = storm::utility::graph::performProb1(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Minimize); - EXPECT_EQ(2, result.states.getNonZeroCount()); + EXPECT_EQ(2, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb0(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Maximize); - EXPECT_EQ(2, result.states.getNonZeroCount()); + EXPECT_EQ(2, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb0(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Maximize); - EXPECT_EQ(2, result.states.getNonZeroCount()); + EXPECT_EQ(2, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb0(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Minimize); - EXPECT_EQ(2, result.states.getNonZeroCount()); + EXPECT_EQ(2, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb1(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Minimize); - EXPECT_EQ(2, result.states.getNonZeroCount()); + EXPECT_EQ(2, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb0(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Maximize); - EXPECT_EQ(1, result.states.getNonZeroCount()); + EXPECT_EQ(1, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb1(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Maximize, true, true); - EXPECT_EQ(3, result.states.getNonZeroCount()); + EXPECT_EQ(3, result.getPlayer1States().getNonZeroCount()); EXPECT_TRUE(result.hasPlayer1Strategy()); EXPECT_TRUE(result.hasPlayer2Strategy()); @@ -249,12 +249,12 @@ TEST(GraphTest, SymbolicProb01StochasticGameDieSmall) { targetStates = game.getStates(initialPredicates[0], true); result = storm::utility::graph::performProb0(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Minimize, true, true); - EXPECT_EQ(2, result.states.getNonZeroCount()); + EXPECT_EQ(2, result.getPlayer1States().getNonZeroCount()); ASSERT_TRUE(result.hasPlayer1Strategy()); ASSERT_TRUE(result.hasPlayer2Strategy()); // Check the validity of the strategies. Start by checking whether only prob0 states have a strategy. - storm::dd::Bdd nonProb0StatesWithStrategy = !result.states && result.player1Strategy.get(); + storm::dd::Bdd nonProb0StatesWithStrategy = !result.getPlayer1States() && result.player1Strategy.get(); EXPECT_TRUE(nonProb0StatesWithStrategy.isZero()); // Proceed by checking whether they select exactly one action in each state. @@ -266,30 +266,30 @@ TEST(GraphTest, SymbolicProb01StochasticGameDieSmall) { EXPECT_EQ(1, stateDistributionCount.getMax()); result = storm::utility::graph::performProb1(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Minimize); - EXPECT_EQ(3, result.states.getNonZeroCount()); + EXPECT_EQ(3, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb0(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Maximize); - EXPECT_EQ(1, result.states.getNonZeroCount()); + EXPECT_EQ(1, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb1(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Maximize); - EXPECT_EQ(4, result.states.getNonZeroCount()); + EXPECT_EQ(4, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb0(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Minimize); - EXPECT_EQ(2, result.states.getNonZeroCount()); + EXPECT_EQ(2, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb1(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Minimize); - EXPECT_EQ(3, result.states.getNonZeroCount()); + EXPECT_EQ(3, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb0(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Maximize); - EXPECT_EQ(1, result.states.getNonZeroCount()); + EXPECT_EQ(1, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb1(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Maximize, true, true); - EXPECT_EQ(4, result.states.getNonZeroCount()); + EXPECT_EQ(4, result.getPlayer1States().getNonZeroCount()); EXPECT_TRUE(result.hasPlayer1Strategy()); EXPECT_TRUE(result.hasPlayer2Strategy()); // Check the validity of the strategies. Start by checking whether only prob1 states have a strategy. - storm::dd::Bdd nonProb1StatesWithStrategy = !result.states && result.player1Strategy.get(); + storm::dd::Bdd nonProb1StatesWithStrategy = !result.getPlayer1States() && result.player1Strategy.get(); EXPECT_TRUE(nonProb1StatesWithStrategy.isZero()); // Proceed by checking whether they select exactly one action in each state. @@ -351,12 +351,12 @@ TEST(GraphTest, SymbolicProb01StochasticGameTwoDice) { storm::dd::Bdd targetStates = game.getStates(initialPredicates[7], false) && game.getStates(initialPredicates[22], false) && game.getStates(initialPredicates[9], false) && game.getStates(initialPredicates[24], false); storm::utility::graph::GameProb01Result result = storm::utility::graph::performProb0(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Minimize, true, true); - EXPECT_EQ(153, result.states.getNonZeroCount()); + EXPECT_EQ(153, result.getPlayer1States().getNonZeroCount()); ASSERT_TRUE(result.hasPlayer1Strategy()); ASSERT_TRUE(result.hasPlayer2Strategy()); // Check the validity of the strategies. Start by checking whether only prob0 states have a strategy. - storm::dd::Bdd nonProb0StatesWithStrategy = !result.states && result.player1Strategy.get(); + storm::dd::Bdd nonProb0StatesWithStrategy = !result.getPlayer1States() && result.player1Strategy.get(); EXPECT_TRUE(nonProb0StatesWithStrategy.isZero()); // Proceed by checking whether they select exactly one exaction in each state. @@ -367,30 +367,30 @@ TEST(GraphTest, SymbolicProb01StochasticGameTwoDice) { EXPECT_EQ(1, stateDistributionCount.getMax()); result = storm::utility::graph::performProb1(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Minimize); - EXPECT_EQ(1, result.states.getNonZeroCount()); + EXPECT_EQ(1, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb0(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Maximize); - EXPECT_EQ(153, result.states.getNonZeroCount()); + EXPECT_EQ(153, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb1(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Maximize); - EXPECT_EQ(1, result.states.getNonZeroCount()); + EXPECT_EQ(1, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb0(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Minimize); - EXPECT_EQ(153, result.states.getNonZeroCount()); + EXPECT_EQ(153, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb1(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Minimize); - EXPECT_EQ(1, result.states.getNonZeroCount()); + EXPECT_EQ(1, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb0(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Maximize); - EXPECT_EQ(153, result.states.getNonZeroCount()); + EXPECT_EQ(153, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb1(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Maximize, true, true); - EXPECT_EQ(1, result.states.getNonZeroCount()); + EXPECT_EQ(1, result.getPlayer1States().getNonZeroCount()); EXPECT_TRUE(result.hasPlayer1Strategy()); EXPECT_TRUE(result.hasPlayer2Strategy()); // Check the validity of the strategies. Start by checking whether only prob1 states have a strategy. - storm::dd::Bdd nonProb1StatesWithStrategy = !result.states && result.player1Strategy.get(); + storm::dd::Bdd nonProb1StatesWithStrategy = !result.getPlayer1States() && result.player1Strategy.get(); EXPECT_TRUE(nonProb1StatesWithStrategy.isZero()); // Proceed by checking whether they select exactly one action in each state. @@ -520,12 +520,12 @@ TEST(GraphTest, SymbolicProb01StochasticGameWlan) { storm::dd::Bdd targetStates = game.getStates(initialPredicates[2], false); storm::utility::graph::GameProb01Result result = storm::utility::graph::performProb0(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Minimize, true, true); - EXPECT_EQ(2831, result.states.getNonZeroCount()); + EXPECT_EQ(2831, result.getPlayer1States().getNonZeroCount()); EXPECT_TRUE(result.hasPlayer1Strategy()); EXPECT_TRUE(result.hasPlayer2Strategy()); // Check the validity of the strategies. Start by checking whether only prob0 states have a strategy. - storm::dd::Bdd nonProb0StatesWithStrategy = !result.states && result.player1Strategy.get(); + storm::dd::Bdd nonProb0StatesWithStrategy = !result.getPlayer1States() && result.player1Strategy.get(); EXPECT_TRUE(nonProb0StatesWithStrategy.isZero()); // Proceed by checking whether they select exactly one action in each state. @@ -537,30 +537,30 @@ TEST(GraphTest, SymbolicProb01StochasticGameWlan) { EXPECT_EQ(1, stateDistributionCount.getMax()); result = storm::utility::graph::performProb1(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Minimize); - EXPECT_EQ(2692, result.states.getNonZeroCount()); + EXPECT_EQ(2692, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb0(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Maximize); - EXPECT_EQ(2831, result.states.getNonZeroCount()); + EXPECT_EQ(2831, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb1(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Maximize); - EXPECT_EQ(2692, result.states.getNonZeroCount()); + EXPECT_EQ(2692, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb0(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Minimize); - EXPECT_EQ(2064, result.states.getNonZeroCount()); + EXPECT_EQ(2064, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb1(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Minimize); - EXPECT_EQ(2884, result.states.getNonZeroCount()); + EXPECT_EQ(2884, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb0(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Maximize); - EXPECT_EQ(2064, result.states.getNonZeroCount()); + EXPECT_EQ(2064, result.getPlayer1States().getNonZeroCount()); result = storm::utility::graph::performProb1(game, game.getQualitativeTransitionMatrix(), game.getReachableStates(), targetStates, storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Maximize, true, true); - EXPECT_EQ(2884, result.states.getNonZeroCount()); + EXPECT_EQ(2884, result.getPlayer1States().getNonZeroCount()); EXPECT_TRUE(result.hasPlayer1Strategy()); EXPECT_TRUE(result.hasPlayer2Strategy()); // Check the validity of the strategies. Start by checking whether only prob1 states have a strategy. - storm::dd::Bdd nonProb1StatesWithStrategy = !result.states && result.player1Strategy.get(); + storm::dd::Bdd nonProb1StatesWithStrategy = !result.getPlayer1States() && result.player1Strategy.get(); EXPECT_TRUE(nonProb1StatesWithStrategy.isZero()); // Proceed by checking whether they select exactly one action in each state.