From 1985c708ea15490a5414fc9340106b2aa41fd111 Mon Sep 17 00:00:00 2001 From: PBerger Date: Wed, 14 Sep 2016 13:54:54 +0200 Subject: [PATCH] Revert back to older version ae0e423a4edbb16fb5796b23c50e3ccc905e5251 [formerly e3f9d7a53363e782b6299b02cb78dbc3d48ca822] Former-commit-id: 9f72a10358a802cc96653002f82ef7b88d49b0e1 --- .../abstraction/GameBasedMdpModelChecker.cpp | 78 ++++++++----------- src/solver/SymbolicGameSolver.cpp | 12 +-- src/solver/SymbolicGameSolver.h | 2 +- .../solver/FullySymbolicGameSolverTest.cpp | 16 ++-- 4 files changed, 42 insertions(+), 66 deletions(-) diff --git a/src/modelchecker/abstraction/GameBasedMdpModelChecker.cpp b/src/modelchecker/abstraction/GameBasedMdpModelChecker.cpp index 9bdc468c9..71a9a4df6 100644 --- a/src/modelchecker/abstraction/GameBasedMdpModelChecker.cpp +++ b/src/modelchecker/abstraction/GameBasedMdpModelChecker.cpp @@ -400,14 +400,16 @@ namespace storm { }; template - MaybeStateResult solveMaybeStates(storm::OptimizationDirection const& player1Direction, storm::OptimizationDirection const& player2Direction, storm::abstraction::MenuGame const& game, storm::dd::Bdd const& prob0States, storm::dd::Bdd const& prob1States, boost::optional> startInfo = boost::none) { + MaybeStateResult solveMaybeStates(storm::OptimizationDirection const& player1Direction, storm::OptimizationDirection const& player2Direction, storm::abstraction::MenuGame const& game, storm::dd::Bdd const& maybeStates, storm::dd::Bdd const& prob1States, boost::optional> startInfo = boost::none) { STORM_LOG_TRACE("Performing quantative solution step. Player 1: " << player1Direction << ", player 2: " << player2Direction << "."); - // Compute the ingredients of the equation system. - storm::dd::Add maybeStatesAdd = game.getReachableStates().template toAdd(); + // Compute the ingredients of the equation system. + storm::dd::Add maybeStatesAdd = maybeStates.template toAdd(); storm::dd::Add submatrix = maybeStatesAdd * game.getTransitionMatrix(); - storm::dd::Add subvector = game.getManager().template getAddZero(); + storm::dd::Add prob1StatesAsColumn = prob1States.template toAdd().swapVariables(game.getRowColumnMetaVariablePairs()); + storm::dd::Add subvector = submatrix * prob1StatesAsColumn; + subvector = subvector.sumAbstract(game.getColumnVariables()); // Cut away all columns targeting non-maybe states. submatrix *= maybeStatesAdd.swapVariables(game.getRowColumnMetaVariablePairs()); @@ -419,16 +421,12 @@ namespace storm { } else { startVector = game.getManager().template getAddZero(); } - - // Set all target- and Prob1-states to 1 and all Prob0-states to 0 - startVector = prob0States.ite(game.getManager().template getAddZero(), startVector); - startVector = prob1States.ite(game.getManager().template getAddOne(), startVector); // Create the solver and solve the equation system. storm::utility::solver::SymbolicGameSolverFactory solverFactory; - std::unique_ptr> solver = solverFactory.create(submatrix, game.getReachableStates(), game.getIllegalPlayer1Mask(), game.getIllegalPlayer2Mask(), game.getRowVariables(), game.getColumnVariables(), game.getRowColumnMetaVariablePairs(), game.getPlayer1Variables(), game.getPlayer2Variables()); + std::unique_ptr> solver = solverFactory.create(submatrix, maybeStates, game.getIllegalPlayer1Mask(), game.getIllegalPlayer2Mask(), game.getRowVariables(), game.getColumnVariables(), game.getRowColumnMetaVariablePairs(), game.getPlayer1Variables(), game.getPlayer2Variables()); solver->setGeneratePlayersStrategies(true); - auto values = solver->solveGame(player1Direction, player2Direction, startVector, subvector, prob0States, prob1States, startInfo ? boost::make_optional(startInfo.get().player1Strategy) : boost::none, startInfo ? boost::make_optional(startInfo.get().player2Strategy) : boost::none); + auto values = solver->solveGame(player1Direction, player2Direction, startVector, subvector, startInfo ? boost::make_optional(startInfo.get().player1Strategy) : boost::none, startInfo ? boost::make_optional(startInfo.get().player2Strategy) : boost::none); return MaybeStateResult(values, solver->getPlayer1Strategy(), solver->getPlayer2Strategy()); } @@ -527,14 +525,14 @@ namespace storm { storm::dd::Add minResult = prob01.min.second.getPlayer1States().template toAdd(); storm::dd::Add maxResult = prob01.max.second.getPlayer1States().template toAdd(); storm::dd::Add initialStatesAdd = game.getInitialStates().template toAdd(); - + // The minimal value after qualitative checking can only be zero. It it was 1, we could have given // the result right away. ValueType minValue = storm::utility::zero(); MaybeStateResult minMaybeStateResult(game.getManager().template getAddZero(), game.getManager().getBddZero(), game.getManager().getBddZero()); if (!maybeMin.isZero()) { - minMaybeStateResult = solveMaybeStates(player1Direction, storm::OptimizationDirection::Minimize, game, prob01.min.first.getPlayer1States(), prob01.min.second.getPlayer1States()); - minResult = minMaybeStateResult.values; + minMaybeStateResult = solveMaybeStates(player1Direction, storm::OptimizationDirection::Minimize, game, maybeMin, prob01.min.second.getPlayer1States()); + minResult += minMaybeStateResult.values; storm::dd::Add initialStateMin = initialStatesAdd * minResult; // Here we can only require a non-zero count of *at most* one, because the result may actually be 0. STORM_LOG_ASSERT(initialStateMin.getNonZeroCount() <= 1, "Wrong number of results for initial states. Expected <= 1, but got " << initialStateMin.getNonZeroCount() << "."); @@ -553,8 +551,8 @@ namespace storm { ValueType maxValue = storm::utility::one(); MaybeStateResult maxMaybeStateResult(game.getManager().template getAddZero(), game.getManager().getBddZero(), game.getManager().getBddZero()); if (!maybeMax.isZero()) { - maxMaybeStateResult = solveMaybeStates(player1Direction, storm::OptimizationDirection::Maximize, game, prob01.max.first.getPlayer1States(), prob01.max.second.getPlayer1States(), boost::make_optional(minMaybeStateResult)); - maxResult = maxMaybeStateResult.values; + maxMaybeStateResult = solveMaybeStates(player1Direction, storm::OptimizationDirection::Maximize, game, maybeMax, prob01.max.second.getPlayer1States(), boost::make_optional(minMaybeStateResult)); + maxResult += maxMaybeStateResult.values; storm::dd::Add initialStateMax = (initialStatesAdd * maxResult); // Unlike in the min-case, we can require a non-zero count of 1 here, because if the max was in // fact 0, the result would be 0, which would have been detected earlier by the graph algorithms. @@ -579,30 +577,16 @@ namespace storm { // If we arrived at this point, it means that we have all qualitative and quantitative information // about the game, but we could not yet answer the query. In this case, we need to refine. - // Redirect all player 1 choices of the min strategy to that of the max strategy if this leads to a player 2 state that has the same prob. - // Get all relevant strategies. - storm::dd::Bdd minPlayer1Strategy = minMaybeStateResult.player1Strategy; - storm::dd::Bdd minPlayer2Strategy = minMaybeStateResult.player2Strategy; - storm::dd::Bdd maxPlayer1Strategy = maxMaybeStateResult.player1Strategy; - storm::dd::Bdd maxPlayer2Strategy = maxMaybeStateResult.player2Strategy; - - storm::dd::Add matrix = game.getTransitionMatrix(); - - storm::dd::Add minResultTmp = minResult.swapVariables(game.getRowColumnMetaVariablePairs()); - storm::dd::Add minValuesForPlayer1UnderMinP1Strategy = (matrix * minPlayer1Strategy.template toAdd() * minPlayer2Strategy.template toAdd() * minResultTmp).sumAbstract(game.getColumnVariables()).sumAbstract(game.getPlayer2Variables()); - storm::dd::Add minValuesForPlayer1UnderMaxP1Strategy = (matrix * maxPlayer1Strategy.template toAdd() * minPlayer2Strategy.template toAdd() * minResultTmp).sumAbstract(game.getColumnVariables()).sumAbstract(game.getPlayer2Variables()); - // This BDD has a 1 for every state (s) that can switch the strategy. - storm::dd::Bdd minIsGreaterOrEqual = minValuesForPlayer1UnderMinP1Strategy.greaterOrEqual(minValuesForPlayer1UnderMaxP1Strategy); - - minPlayer1Strategy = minIsGreaterOrEqual.existsAbstract(game.getPlayer1Variables()).ite(maxPlayer1Strategy, minPlayer1Strategy); + // Start by extending the quantitative strategies by the qualitative ones. + minMaybeStateResult.player1Strategy |= prob01.min.first.getPlayer1Strategy() || prob01.min.second.getPlayer1Strategy(); storm::dd::Bdd tmp = (prob01.min.first.getPlayer2Strategy().existsAbstract(game.getPlayer2Variables()) && prob01.min.second.getPlayer2Strategy().existsAbstract(game.getPlayer2Variables())); STORM_LOG_ASSERT(tmp.isZero(), "wth?"); - tmp = prob01.min.first.getPlayer2Strategy().existsAbstract(game.getPlayer2Variables()) && minPlayer2Strategy.existsAbstract(game.getPlayer2Variables()); + tmp = prob01.min.first.getPlayer2Strategy().existsAbstract(game.getPlayer2Variables()) && minMaybeStateResult.player2Strategy.existsAbstract(game.getPlayer2Variables()); if (!tmp.isZero()) { - tmp = tmp && prob01.min.first.getPlayer2Strategy().exclusiveOr(minPlayer2Strategy).existsAbstract(game.getPlayer2Variables()); + tmp = tmp && prob01.min.first.getPlayer2Strategy().exclusiveOr(minMaybeStateResult.player2Strategy).existsAbstract(game.getPlayer2Variables()); (tmp && prob01.min.first.getPlayer2Strategy()).template toAdd().exportToDot("prob0_strat.dot"); - (tmp && minPlayer2Strategy).template toAdd().exportToDot("maybe_strat.dot"); + (tmp && minMaybeStateResult.player2Strategy).template toAdd().exportToDot("maybe_strat.dot"); if (!tmp.isZero()) { storm::dd::Add values = (tmp.template toAdd() * game.getTransitionMatrix() * minResult.swapVariables(game.getRowColumnMetaVariablePairs())).sumAbstract(game.getColumnVariables()); tmp.template toAdd().exportToDot("illegal.dot"); @@ -611,20 +595,20 @@ namespace storm { STORM_LOG_ASSERT(tmp.isZero(), "ddduuuudde?"); } STORM_LOG_ASSERT(tmp.isZero(), "wth2?"); - //tmp = prob01.min.second.getPlayer2Strategy().existsAbstract(game.getPlayer2Variables()) && minPlayer2Strategy; + tmp = prob01.min.second.getPlayer2Strategy().existsAbstract(game.getPlayer2Variables()) && minMaybeStateResult.player2Strategy; - //(minMaybeStateResult.player2Strategy && (prob01.min.first.getPlayer2Strategy() || prob01.min.second.getPlayer2Strategy())).template toAdd().exportToDot("strat_overlap.dot"); - //minMaybeStateResult.player2Strategy |= prob01.min.first.getPlayer2Strategy() || prob01.min.second.getPlayer2Strategy(); - //maxMaybeStateResult.player1Strategy |= prob01.max.first.getPlayer1Strategy() || prob01.max.second.getPlayer1Strategy(); - //maxMaybeStateResult.player2Strategy |= prob01.max.first.getPlayer2Strategy() || prob01.max.second.getPlayer2Strategy(); + (minMaybeStateResult.player2Strategy && (prob01.min.first.getPlayer2Strategy() || prob01.min.second.getPlayer2Strategy())).template toAdd().exportToDot("strat_overlap.dot"); + minMaybeStateResult.player2Strategy |= prob01.min.first.getPlayer2Strategy() || prob01.min.second.getPlayer2Strategy(); + maxMaybeStateResult.player1Strategy |= prob01.max.first.getPlayer1Strategy() || prob01.max.second.getPlayer1Strategy(); + maxMaybeStateResult.player2Strategy |= prob01.max.first.getPlayer2Strategy() || prob01.max.second.getPlayer2Strategy(); // Make sure that all strategies are still valid strategies. - STORM_LOG_ASSERT(minPlayer1Strategy.template toAdd().sumAbstract(game.getPlayer1Variables()).getMax() <= 1, "Player 1 strategy for min is illegal."); - STORM_LOG_ASSERT(maxPlayer1Strategy.template toAdd().sumAbstract(game.getPlayer1Variables()).getMax() <= 1, "Player 1 strategy for max is illegal."); - STORM_LOG_ASSERT(minPlayer2Strategy.template toAdd().sumAbstract(game.getPlayer2Variables()).getMax() <= 1, "Player 2 strategy for min is illegal."); - STORM_LOG_ASSERT(maxPlayer2Strategy.template toAdd().sumAbstract(game.getPlayer2Variables()).getMax() <= 1, "Player 2 strategy for max is illegal."); + STORM_LOG_ASSERT(minMaybeStateResult.player1Strategy.template toAdd().sumAbstract(game.getPlayer1Variables()).getMax() <= 1, "Player 1 strategy for min is illegal."); + STORM_LOG_ASSERT(maxMaybeStateResult.player1Strategy.template toAdd().sumAbstract(game.getPlayer1Variables()).getMax() <= 1, "Player 1 strategy for max is illegal."); + STORM_LOG_ASSERT(minMaybeStateResult.player2Strategy.template toAdd().sumAbstract(game.getPlayer2Variables()).getMax() <= 1, "Player 2 strategy for min is illegal."); + STORM_LOG_ASSERT(maxMaybeStateResult.player2Strategy.template toAdd().sumAbstract(game.getPlayer2Variables()).getMax() <= 1, "Player 2 strategy for max is illegal."); - refineAfterQuantitativeCheck(abstractor, game, minResult, maxResult, prob01, std::make_pair(minPlayer1Strategy, minPlayer2Strategy), std::make_pair(maxPlayer1Strategy, maxPlayer2Strategy), transitionMatrixBdd); + refineAfterQuantitativeCheck(abstractor, game, minResult, maxResult, prob01, std::make_pair(minMaybeStateResult.player1Strategy, minMaybeStateResult.player2Strategy), std::make_pair(maxMaybeStateResult.player1Strategy, maxMaybeStateResult.player2Strategy), transitionMatrixBdd); } } @@ -649,7 +633,7 @@ namespace storm { return std::make_pair(prob0, prob1); } - + template storm::expressions::Expression GameBasedMdpModelChecker::getExpression(storm::logic::Formula const& formula) { STORM_LOG_THROW(formula.isBooleanLiteralFormula() || formula.isAtomicExpressionFormula() || formula.isAtomicLabelFormula(), storm::exceptions::InvalidPropertyException, "The target states have to be given as label or an expression."); @@ -663,11 +647,11 @@ namespace storm { } return result; } - + template class GameBasedMdpModelChecker>; template class GameBasedMdpModelChecker>; template class GameBasedMdpModelChecker>; template class GameBasedMdpModelChecker>; } -} +} \ No newline at end of file diff --git a/src/solver/SymbolicGameSolver.cpp b/src/solver/SymbolicGameSolver.cpp index 428374ee3..db297cafb 100644 --- a/src/solver/SymbolicGameSolver.cpp +++ b/src/solver/SymbolicGameSolver.cpp @@ -25,16 +25,12 @@ namespace storm { } template - storm::dd::Add SymbolicGameSolver::solveGame(OptimizationDirection player1Goal, OptimizationDirection player2Goal, storm::dd::Add const& x, storm::dd::Add const& b, storm::dd::Bdd const& prob0States, storm::dd::Bdd const& prob1States, boost::optional> const& basePlayer1Strategy, boost::optional> const& basePlayer2Strategy) { + storm::dd::Add SymbolicGameSolver::solveGame(OptimizationDirection player1Goal, OptimizationDirection player2Goal, storm::dd::Add const& x, storm::dd::Add const& b, boost::optional> const& basePlayer1Strategy, boost::optional> const& basePlayer2Strategy) { // Set up the environment. storm::dd::Add xCopy = x; uint_fast64_t iterations = 0; bool converged = false; - // Constants - storm::dd::Add const addOne = A.getDdManager().template getAddOne(); - storm::dd::Add const addZero = A.getDdManager().template getAddZero(); - // Prepare some data storage in case we need to generate strategies. if (generatePlayer1Strategy) { if (basePlayer1Strategy) { @@ -53,7 +49,7 @@ namespace storm { previousPlayer2Values = (player2Strategy.get().template toAdd() * (this->A.multiplyMatrix(x.swapVariables(this->rowColumnMetaVariablePairs), this->columnMetaVariables) + b)).sumAbstract(this->player2Variables); } else { player2Strategy = A.getDdManager().getBddZero(); - previousPlayer2Values = addZero; + previousPlayer2Values = A.getDdManager().template getAddZero(); } } @@ -107,10 +103,6 @@ namespace storm { tmp = newValues; } - // Fix Prob0 and Prob1 States - tmp = prob0States.ite(addZero, tmp); - tmp = prob1States.ite(addOne, tmp); - // Now check if the process already converged within our precision. converged = xCopy.equalModuloPrecision(tmp, this->precision, this->relative); diff --git a/src/solver/SymbolicGameSolver.h b/src/solver/SymbolicGameSolver.h index a9d430fc0..211dc8f33 100644 --- a/src/solver/SymbolicGameSolver.h +++ b/src/solver/SymbolicGameSolver.h @@ -70,7 +70,7 @@ namespace storm { * then this strategy can be used to generate a strategy that only differs from the given one if it has to. * @return The solution vector. */ - virtual storm::dd::Add solveGame(OptimizationDirection player1Goal, OptimizationDirection player2Goal, storm::dd::Add const& x, storm::dd::Add const& b, storm::dd::Bdd const& prob0States, storm::dd::Bdd const& prob1States, boost::optional> const& basePlayer1Strategy = boost::none, boost::optional> const& basePlayer2Strategy = boost::none); + virtual storm::dd::Add solveGame(OptimizationDirection player1Goal, OptimizationDirection player2Goal, storm::dd::Add const& x, storm::dd::Add const& b, boost::optional> const& basePlayer1Strategy = boost::none, boost::optional> const& basePlayer2Strategy = boost::none); // Setters that enable the generation of the players' strategies. void setGeneratePlayer1Strategy(bool value); diff --git a/test/functional/solver/FullySymbolicGameSolverTest.cpp b/test/functional/solver/FullySymbolicGameSolverTest.cpp index 2f64c36e1..8da44ca92 100644 --- a/test/functional/solver/FullySymbolicGameSolverTest.cpp +++ b/test/functional/solver/FullySymbolicGameSolverTest.cpp @@ -49,25 +49,25 @@ TEST(FullySymbolicGameSolverTest, Solve_Cudd) { storm::dd::Add b = manager->getEncoding(state.first, 2).template toAdd() + manager->getEncoding(state.first, 4).template toAdd(); // Now solve the game with different strategies for the players. - storm::dd::Add result = solver->solveGame(storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Minimize, x, b, manager->getBddZero(), manager->getBddZero()); + storm::dd::Add result = solver->solveGame(storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Minimize, x, b); result *= manager->getEncoding(state.first, 1).template toAdd(); result = result.sumAbstract({state.first}); EXPECT_NEAR(0, result.getValue(), storm::settings::getModule().getPrecision()); x = manager->getAddZero(); - result = solver->solveGame(storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Maximize, x, b, manager->getBddZero(), manager->getBddZero()); + result = solver->solveGame(storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Maximize, x, b); result *= manager->getEncoding(state.first, 1).template toAdd(); result = result.sumAbstract({state.first}); EXPECT_NEAR(0.5, result.getValue(), storm::settings::getModule().getPrecision()); x = manager->getAddZero(); - result = solver->solveGame(storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Minimize, x, b, manager->getBddZero(), manager->getBddZero()); + result = solver->solveGame(storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Minimize, x, b); result *= manager->getEncoding(state.first, 1).template toAdd(); result = result.sumAbstract({state.first}); EXPECT_NEAR(0.2, result.getValue(), storm::settings::getModule().getPrecision()); x = manager->getAddZero(); - result = solver->solveGame(storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Maximize, x, b, manager->getBddZero(), manager->getBddZero()); + result = solver->solveGame(storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Maximize, x, b); result *= manager->getEncoding(state.first, 1).template toAdd(); result = result.sumAbstract({state.first}); EXPECT_NEAR(0.99999892625817599, result.getValue(), storm::settings::getModule().getPrecision()); @@ -114,25 +114,25 @@ TEST(FullySymbolicGameSolverTest, Solve_Sylvan) { storm::dd::Add b = manager->getEncoding(state.first, 2).template toAdd() + manager->getEncoding(state.first, 4).template toAdd(); // Now solve the game with different strategies for the players. - storm::dd::Add result = solver->solveGame(storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Minimize, x, b, manager->getBddZero(), manager->getBddZero()); + storm::dd::Add result = solver->solveGame(storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Minimize, x, b); result *= manager->getEncoding(state.first, 1).template toAdd(); result = result.sumAbstract({state.first}); EXPECT_NEAR(0, result.getValue(), storm::settings::getModule().getPrecision()); x = manager->getAddZero(); - result = solver->solveGame(storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Maximize, x, b, manager->getBddZero(), manager->getBddZero()); + result = solver->solveGame(storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Maximize, x, b); result *= manager->getEncoding(state.first, 1).template toAdd(); result = result.sumAbstract({state.first}); EXPECT_NEAR(0.5, result.getValue(), storm::settings::getModule().getPrecision()); x = manager->getAddZero(); - result = solver->solveGame(storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Minimize, x, b, manager->getBddZero(), manager->getBddZero()); + result = solver->solveGame(storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Minimize, x, b); result *= manager->getEncoding(state.first, 1).template toAdd(); result = result.sumAbstract({state.first}); EXPECT_NEAR(0.2, result.getValue(), storm::settings::getModule().getPrecision()); x = manager->getAddZero(); - result = solver->solveGame(storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Maximize, x, b, manager->getBddZero(), manager->getBddZero()); + result = solver->solveGame(storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Maximize, x, b); result *= manager->getEncoding(state.first, 1).template toAdd(); result = result.sumAbstract({state.first}); EXPECT_NEAR(0.99999892625817599, result.getValue(), storm::settings::getModule().getPrecision());