Browse Source

further debugging

tempestpy_adaptions
dehnert 7 years ago
parent
commit
bcd3d68c61
  1. 18
      src/storm/abstraction/MenuGameRefiner.cpp
  2. 101
      src/storm/modelchecker/abstraction/GameBasedMdpModelChecker.cpp

18
src/storm/abstraction/MenuGameRefiner.cpp

@ -1097,9 +1097,14 @@ namespace storm {
ValueType pivotStateDeviation = storm::utility::zero<ValueType>(); ValueType pivotStateDeviation = storm::utility::zero<ValueType>();
auto const& player2Grouping = transitionMatrix.getRowGroupIndices(); auto const& player2Grouping = transitionMatrix.getRowGroupIndices();
storm::storage::BitVector visitedStates(initialStates.size());
while (!dijkstraQueue.empty()) { while (!dijkstraQueue.empty()) {
auto distanceStatePair = *dijkstraQueue.begin(); auto distanceStatePair = *dijkstraQueue.begin();
uint64_t currentState = distanceStatePair.second; uint64_t currentState = distanceStatePair.second;
visitedStates.set(currentState);
std::cout << "current: " << currentState << std::endl;
std::cout << "visited: " << visitedStates << std::endl;
ValueType currentDistance = distanceStatePair.first; ValueType currentDistance = distanceStatePair.first;
dijkstraQueue.erase(dijkstraQueue.begin()); dijkstraQueue.erase(dijkstraQueue.begin());
@ -1129,9 +1134,9 @@ namespace storm {
isPivotState = true; isPivotState = true;
} }
} }
// If it is indeed a pivot state, we can abort the search here. // If it is indeed a pivot state, we can abort the search here.
if (isPivotState) { if (isPivotState) {
if (considerDeviation && foundPivotState) { if (considerDeviation && foundPivotState) {
ValueType deviationOfCurrentState = (*upperValues)[currentState] - (*lowerValues)[currentState]; ValueType deviationOfCurrentState = (*upperValues)[currentState] - (*lowerValues)[currentState];
if (deviationOfCurrentState > pivotStateDeviation) { if (deviationOfCurrentState > pivotStateDeviation) {
@ -1154,10 +1159,13 @@ namespace storm {
} }
} }
// TODO: remember the strategy from which we came and only go on to explore that further?
// Otherwise, we explore all its relevant successors. // Otherwise, we explore all its relevant successors.
if (minStrategyPair.getPlayer1Strategy().hasDefinedChoice(currentState)) { if (minStrategyPair.getPlayer1Strategy().hasDefinedChoice(currentState)) {
uint64_t minPlayer2Successor = minStrategyPair.getPlayer1Strategy().getChoice(currentState); uint64_t minPlayer2Successor = minStrategyPair.getPlayer1Strategy().getChoice(currentState);
uint64_t minPlayer2Choice = minStrategyPair.getPlayer2Strategy().getChoice(minPlayer2Successor); uint64_t minPlayer2Choice = minStrategyPair.getPlayer2Strategy().getChoice(minPlayer2Successor);
STORM_LOG_ASSERT(!maxStrategyPair.getPlayer2Strategy().hasDefinedChoice(minPlayer2Successor) || minStrategyPair.getPlayer2Strategy().getChoice(minPlayer2Successor) == minPlayer2Choice, "Did not expect deviation in player 2 strategy.");
STORM_LOG_ASSERT(player2Grouping[minPlayer2Successor] <= minPlayer2Choice && minPlayer2Choice < player2Grouping[minPlayer2Successor + 1], "Illegal choice for player 2."); STORM_LOG_ASSERT(player2Grouping[minPlayer2Successor] <= minPlayer2Choice && minPlayer2Choice < player2Grouping[minPlayer2Successor + 1], "Illegal choice for player 2.");
for (auto const& entry : transitionMatrix.getRow(minPlayer2Choice)) { for (auto const& entry : transitionMatrix.getRow(minPlayer2Choice)) {
@ -1175,10 +1183,13 @@ namespace storm {
dijkstraQueue.emplace(alternateDistance, player1Successor); dijkstraQueue.emplace(alternateDistance, player1Successor);
} }
} }
} else {
STORM_LOG_ASSERT(targetStates.get(currentState), "Expecting min strategy for non-target states.");
} }
if (maxStrategyPair.getPlayer1Strategy().hasDefinedChoice(currentState)) { if (maxStrategyPair.getPlayer1Strategy().hasDefinedChoice(currentState)) {
uint64_t maxPlayer2Successor = maxStrategyPair.getPlayer1Strategy().getChoice(currentState); uint64_t maxPlayer2Successor = maxStrategyPair.getPlayer1Strategy().getChoice(currentState);
uint64_t maxPlayer2Choice = maxStrategyPair.getPlayer2Strategy().getChoice(maxPlayer2Successor); uint64_t maxPlayer2Choice = maxStrategyPair.getPlayer2Strategy().getChoice(maxPlayer2Successor);
STORM_LOG_ASSERT(!minStrategyPair.getPlayer2Strategy().hasDefinedChoice(maxPlayer2Successor) || minStrategyPair.getPlayer2Strategy().getChoice(maxPlayer2Successor) == maxPlayer2Choice, "Did not expect deviation in player 2 strategy.");
STORM_LOG_ASSERT(player2Grouping[maxPlayer2Successor] <= maxPlayer2Choice && maxPlayer2Choice < player2Grouping[maxPlayer2Successor + 1], "Illegal choice for player 2."); STORM_LOG_ASSERT(player2Grouping[maxPlayer2Successor] <= maxPlayer2Choice && maxPlayer2Choice < player2Grouping[maxPlayer2Successor + 1], "Illegal choice for player 2.");
for (auto const& entry : transitionMatrix.getRow(maxPlayer2Choice)) { for (auto const& entry : transitionMatrix.getRow(maxPlayer2Choice)) {
@ -1188,7 +1199,7 @@ namespace storm {
} }
ValueType alternateDistance = probabilityDistances ? currentDistance * entry.getValue() : currentDistance + storm::utility::one<ValueType>(); ValueType alternateDistance = probabilityDistances ? currentDistance * entry.getValue() : currentDistance + storm::utility::one<ValueType>();
if (probabilityDistances ? alternateDistance > distances[player1Successor] : !probabilityDistances && alternateDistance < distances[player1Successor]) {
if (probabilityDistances ? alternateDistance > distances[player1Successor] : alternateDistance < distances[player1Successor]) {
distances[player1Successor] = alternateDistance; distances[player1Successor] = alternateDistance;
if (generatePredecessors) { if (generatePredecessors) {
result.predecessors[player1Successor] = std::make_pair(currentState, player1Labeling[maxPlayer2Successor]); result.predecessors[player1Successor] = std::make_pair(currentState, player1Labeling[maxPlayer2Successor]);
@ -1196,6 +1207,8 @@ namespace storm {
dijkstraQueue.emplace(alternateDistance, player1Successor); dijkstraQueue.emplace(alternateDistance, player1Successor);
} }
} }
} else {
STORM_LOG_ASSERT(targetStates.get(currentState), "Expecting max strategy for non-target states.");
} }
} }
@ -1205,6 +1218,7 @@ namespace storm {
// If we arrived at this point, we have explored all relevant states, but none of them was a pivot state, // If we arrived at this point, we have explored all relevant states, but none of them was a pivot state,
// which can happen when trying to refine using the qualitative result only. // which can happen when trying to refine using the qualitative result only.
STORM_LOG_TRACE("Did not find pivot state in explicit Dijkstra search.");
return boost::none; return boost::none;
} }

101
src/storm/modelchecker/abstraction/GameBasedMdpModelChecker.cpp

@ -2,6 +2,7 @@
#include "storm/modelchecker/results/ExplicitQuantitativeCheckResult.h" #include "storm/modelchecker/results/ExplicitQuantitativeCheckResult.h"
#include "storm/modelchecker/results/ExplicitQualitativeCheckResult.h" #include "storm/modelchecker/results/ExplicitQualitativeCheckResult.h"
#include "storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.h"
#include "storm/models/symbolic/StandardRewardModel.h" #include "storm/models/symbolic/StandardRewardModel.h"
#include "storm/models/symbolic/Dtmc.h" #include "storm/models/symbolic/Dtmc.h"
@ -33,6 +34,7 @@
#include "storm/solver/SymbolicGameSolver.h" #include "storm/solver/SymbolicGameSolver.h"
#include "storm/solver/StandardGameSolver.h" #include "storm/solver/StandardGameSolver.h"
#include "storm/environment/Environment.h"
#include "storm/settings/SettingsManager.h" #include "storm/settings/SettingsManager.h"
#include "storm/settings/modules/CoreSettings.h" #include "storm/settings/modules/CoreSettings.h"
@ -414,8 +416,10 @@ namespace storm {
// If there is a previous result, unpack the previous values with respect to the new ODD. // If there is a previous result, unpack the previous values with respect to the new ODD.
if (previousResult) { if (previousResult) {
previousResult.get().odd.oldToNewIndex(odd, [&previousResult,&result,player2Min] (uint64_t oldOffset, uint64_t newOffset) {
previousResult.get().odd.oldToNewIndex(odd, [&previousResult,&result,player2Min,player1Prob1States] (uint64_t oldOffset, uint64_t newOffset) {
if (!player2Min && !player1Prob1States.get(newOffset)) {
result.getValues()[newOffset] = player2Min ? previousResult.get().values.getMin().getValues()[oldOffset] : previousResult.get().values.getMax().getValues()[oldOffset]; result.getValues()[newOffset] = player2Min ? previousResult.get().values.getMin().getValues()[oldOffset] : previousResult.get().values.getMax().getValues()[oldOffset];
}
}); });
} }
@ -820,6 +824,100 @@ namespace storm {
return result; return result;
} }
// std::cout << "target states " << targetStates << std::endl;
for (uint64_t state = 0; state < player1Groups.size() - 1; ++state) {
std::cout << "state " << state << " has values [" << quantitativeResult.getMin().getValues()[state] << ", " << quantitativeResult.getMax().getValues()[state] << "]" << std::endl;
STORM_LOG_ASSERT(targetStates.get(state) || minStrategyPair.getPlayer1Strategy().hasDefinedChoice(state), "Expected lower player 1 choice in state " << state << ".");
STORM_LOG_ASSERT(targetStates.get(state) || maxStrategyPair.getPlayer1Strategy().hasDefinedChoice(state), "Expected upper player 1 choice in state " << state << ".");
if (minStrategyPair.getPlayer1Strategy().hasDefinedChoice(state)) {
uint64_t lowerPlayer1Choice = minStrategyPair.getPlayer1Strategy().getChoice(state);
STORM_LOG_ASSERT(minStrategyPair.getPlayer2Strategy().hasDefinedChoice(lowerPlayer1Choice), "Expected lower player 2 choice for state " << state << " (upper player 1 choice " << lowerPlayer1Choice << ").");
uint64_t lowerPlayer2Choice = minStrategyPair.getPlayer2Strategy().getChoice(lowerPlayer1Choice);
if (maxStrategyPair.getPlayer2Strategy().hasDefinedChoice(lowerPlayer1Choice)) {
uint64_t upperPlayer2Choice = maxStrategyPair.getPlayer2Strategy().getChoice(lowerPlayer1Choice);
if (lowerPlayer2Choice != upperPlayer2Choice) {
ValueType lowerValueUnderLowerChoice = transitionMatrix.multiplyRowWithVector(lowerPlayer2Choice, quantitativeResult.getMin().getValues());
ValueType lowerValueUnderUpperChoice = transitionMatrix.multiplyRowWithVector(upperPlayer2Choice, quantitativeResult.getMin().getValues());
if (lowerValueUnderUpperChoice <= lowerValueUnderLowerChoice) {
minStrategyPair.getPlayer2Strategy().setChoice(lowerPlayer1Choice, upperPlayer2Choice);
std::cout << "[min] redirecting choice of state " << state << ": " << lowerValueUnderLowerChoice << " vs. " << lowerValueUnderUpperChoice << std::endl;
}
}
}
}
if (maxStrategyPair.getPlayer1Strategy().hasDefinedChoice(state)) {
uint64_t upperPlayer1Choice = maxStrategyPair.getPlayer1Strategy().getChoice(state);
STORM_LOG_ASSERT(maxStrategyPair.getPlayer2Strategy().hasDefinedChoice(upperPlayer1Choice), "Expected upper player 2 choice for state " << state << " (upper player 1 choice " << upperPlayer1Choice << ").");
uint64_t upperPlayer2Choice = minStrategyPair.getPlayer2Strategy().getChoice(upperPlayer1Choice);
if (minStrategyPair.getPlayer2Strategy().hasDefinedChoice(upperPlayer1Choice)) {
uint64_t lowerPlayer2Choice = minStrategyPair.getPlayer2Strategy().getChoice(upperPlayer1Choice);
if (lowerPlayer2Choice != upperPlayer2Choice) {
ValueType lowerValueUnderLowerChoice = transitionMatrix.multiplyRowWithVector(lowerPlayer2Choice, quantitativeResult.getMin().getValues());
ValueType lowerValueUnderUpperChoice = transitionMatrix.multiplyRowWithVector(upperPlayer2Choice, quantitativeResult.getMin().getValues());
if (lowerValueUnderUpperChoice <= lowerValueUnderLowerChoice) {
minStrategyPair.getPlayer2Strategy().setChoice(upperPlayer1Choice, upperPlayer2Choice);
std::cout << "[max] redirecting choice of state " << state << ": " << lowerValueUnderLowerChoice << " vs. " << lowerValueUnderUpperChoice << std::endl;
}
}
}
}
}
///////// SANITY CHECK: apply lower strategy, obtain DTMC matrix and model check it. the values should
///////// still be the lower ones.
storm::storage::SparseMatrixBuilder<ValueType> dtmcMatrixBuilder(player1Groups.size() - 1, player1Groups.size() - 1);
for (uint64_t state = 0; state < player1Groups.size() - 1; ++state) {
if (targetStates.get(state)) {
dtmcMatrixBuilder.addNextValue(state, state, storm::utility::one<ValueType>());
} else {
uint64_t player2Choice = minStrategyPair.getPlayer2Strategy().getChoice(minStrategyPair.getPlayer1Strategy().getChoice(state));
for (auto const& entry : transitionMatrix.getRow(player2Choice)) {
dtmcMatrixBuilder.addNextValue(state, entry.getColumn(), entry.getValue());
}
}
}
auto dtmcMatrix = dtmcMatrixBuilder.build();
std::vector<ValueType> sanityValues = storm::modelchecker::helper::SparseDtmcPrctlHelper<ValueType>::computeUntilProbabilities(Environment(), storm::solver::SolveGoal<ValueType>(), dtmcMatrix, dtmcMatrix.transpose(), constraintStates, targetStates, false);
for (uint64_t state = 0; state < player1Groups.size() - 1; ++state) {
std::cout << "state " << state << ": " << sanityValues[state] << " vs " << quantitativeResult.getMin().getValues()[state] << std::endl;
std::cout << "[min] state is prob0? " << qualitativeResult.getProb0Min().getStates().get(state) << ", prob1? " << qualitativeResult.getProb1Min().getStates().get(state) << std::endl;
STORM_LOG_ASSERT(std::abs(sanityValues[state] - quantitativeResult.getMin().getValues()[state]) < 1e-6, "Got weird min divergences!");
}
///////// SANITY CHECK: apply upper strategy, obtain DTMC matrix and model check it. the values should
///////// still be the upper ones.
dtmcMatrixBuilder = storm::storage::SparseMatrixBuilder<ValueType>(player1Groups.size() - 1, player1Groups.size() - 1);
for (uint64_t state = 0; state < player1Groups.size() - 1; ++state) {
if (targetStates.get(state)) {
dtmcMatrixBuilder.addNextValue(state, state, storm::utility::one<ValueType>());
} else {
uint64_t player2Choice = maxStrategyPair.getPlayer2Strategy().getChoice(maxStrategyPair.getPlayer1Strategy().getChoice(state));
for (auto const& entry : transitionMatrix.getRow(player2Choice)) {
dtmcMatrixBuilder.addNextValue(state, entry.getColumn(), entry.getValue());
}
}
}
dtmcMatrix = dtmcMatrixBuilder.build();
sanityValues = storm::modelchecker::helper::SparseDtmcPrctlHelper<ValueType>::computeUntilProbabilities(Environment(), storm::solver::SolveGoal<ValueType>(), dtmcMatrix, dtmcMatrix.transpose(), constraintStates, targetStates, false);
for (uint64_t state = 0; state < player1Groups.size() - 1; ++state) {
std::cout << "state " << state << ": " << sanityValues[state] << " vs " << quantitativeResult.getMax().getValues()[state] << std::endl;
std::cout << "[max] state is prob0? " << qualitativeResult.getProb0Max().getStates().get(state) << ", prob1? " << qualitativeResult.getProb1Max().getStates().get(state) << std::endl;
STORM_LOG_ASSERT(std::abs(sanityValues[state] - quantitativeResult.getMax().getValues()[state]) < 1e-6, "Got weird max divergences!");
}
// Make sure that all strategies are still valid strategies. // Make sure that all strategies are still valid strategies.
STORM_LOG_ASSERT(minStrategyPair.getNumberOfUndefinedPlayer1States() <= targetStates.getNumberOfSetBits(), "Expected at most " << targetStates.getNumberOfSetBits() << " (number of target states) player 1 states with undefined choice but got " << minStrategyPair.getNumberOfUndefinedPlayer1States() << "."); STORM_LOG_ASSERT(minStrategyPair.getNumberOfUndefinedPlayer1States() <= targetStates.getNumberOfSetBits(), "Expected at most " << targetStates.getNumberOfSetBits() << " (number of target states) player 1 states with undefined choice but got " << minStrategyPair.getNumberOfUndefinedPlayer1States() << ".");
STORM_LOG_ASSERT(maxStrategyPair.getNumberOfUndefinedPlayer1States() <= targetStates.getNumberOfSetBits(), "Expected at most " << targetStates.getNumberOfSetBits() << " (number of target states) player 1 states with undefined choice but got " << maxStrategyPair.getNumberOfUndefinedPlayer1States() << "."); STORM_LOG_ASSERT(maxStrategyPair.getNumberOfUndefinedPlayer1States() <= targetStates.getNumberOfSetBits(), "Expected at most " << targetStates.getNumberOfSetBits() << " (number of target states) player 1 states with undefined choice but got " << maxStrategyPair.getNumberOfUndefinedPlayer1States() << ".");
@ -926,7 +1024,6 @@ namespace storm {
ExplicitQualitativeGameResultMinMax result; ExplicitQualitativeGameResultMinMax result;
ExplicitQualitativeGameResult problematicStates = storm::utility::graph::performProb0(transitionMatrix, player1Groups, player1BackwardTransitions, player2BackwardTransitions, constraintStates, targetStates, storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Minimize); ExplicitQualitativeGameResult problematicStates = storm::utility::graph::performProb0(transitionMatrix, player1Groups, player1BackwardTransitions, player2BackwardTransitions, constraintStates, targetStates, storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Minimize);
std::cout << "Found " << problematicStates.getStates().getNumberOfSetBits() << " problematic states" << std::endl;
result.prob0Min = storm::utility::graph::performProb0(transitionMatrix, player1Groups, player1BackwardTransitions, player2BackwardTransitions, constraintStates, targetStates, player1Direction, storm::OptimizationDirection::Minimize, &minStrategyPair); result.prob0Min = storm::utility::graph::performProb0(transitionMatrix, player1Groups, player1BackwardTransitions, player2BackwardTransitions, constraintStates, targetStates, player1Direction, storm::OptimizationDirection::Minimize, &minStrategyPair);
result.prob1Min = storm::utility::graph::performProb1(transitionMatrix, player1Groups, player1BackwardTransitions, player2BackwardTransitions, constraintStates, targetStates, player1Direction, storm::OptimizationDirection::Minimize, &minStrategyPair); result.prob1Min = storm::utility::graph::performProb1(transitionMatrix, player1Groups, player1BackwardTransitions, player2BackwardTransitions, constraintStates, targetStates, player1Direction, storm::OptimizationDirection::Minimize, &minStrategyPair);

Loading…
Cancel
Save