From c74a6ea2011b951a75229558448b62656c505428 Mon Sep 17 00:00:00 2001 From: Alexander Bork Date: Fri, 28 Feb 2020 13:01:51 +0100 Subject: [PATCH] Removal of unused extractActions function --- .../ApproximatePOMDPModelchecker.cpp | 105 ++---------------- .../ApproximatePOMDPModelchecker.h | 46 -------- 2 files changed, 8 insertions(+), 143 deletions(-) diff --git a/src/storm-pomdp/modelchecker/ApproximatePOMDPModelchecker.cpp b/src/storm-pomdp/modelchecker/ApproximatePOMDPModelchecker.cpp index 66135c02b..e88daf09b 100644 --- a/src/storm-pomdp/modelchecker/ApproximatePOMDPModelchecker.cpp +++ b/src/storm-pomdp/modelchecker/ApproximatePOMDPModelchecker.cpp @@ -295,7 +295,7 @@ namespace storm { //beliefsToBeExpanded.push_back(initialBelief.id); I'm curious what happens if we do this instead of first triangulating. Should do nothing special if belief is on grid, otherwise it gets interesting // Expand the beliefs to generate the grid on-the-fly - if(explorationThreshold > 0){ + if (explorationThreshold > 0) { STORM_PRINT("Exploration threshold: " << explorationThreshold << std::endl) } while (!beliefsToBeExpanded.empty()) { @@ -340,7 +340,7 @@ namespace storm { observationResolutionVector[beliefList[idNextBelief].observation], pomdp.getNumberOfStates()); subSimplex = temp.first; lambdas = temp.second; - if(cacheSubsimplices){ + if (cacheSubsimplices) { subSimplexCache[idNextBelief] = subSimplex; lambdaCache[idNextBelief] = lambdas; } @@ -525,7 +525,7 @@ namespace storm { auto temp = computeSubSimplexAndLambdas(nextBelief.probabilities, gridResolution, pomdp.getNumberOfStates()); subSimplex = temp.first; lambdas = temp.second; - if(cacheSubsimplices) { + if (cacheSubsimplices) { subSimplexCache[nextBelief.id] = subSimplex; lambdaCache[nextBelief.id] = lambdas; } @@ -541,8 +541,8 @@ namespace storm { } // Update the selected actions if ((min && cc.isLess(storm::utility::zero(), chosenValue - currentValue)) || - (!min && cc.isLess(storm::utility::zero(), currentValue - chosenValue)) || - cc.isEqual(storm::utility::zero(), chosenValue - currentValue)) { + (!min && cc.isLess(storm::utility::zero(), currentValue - chosenValue)) || + cc.isEqual(storm::utility::zero(), chosenValue - currentValue)) { chosenValue = currentValue; if (!(useMdp && cc.isEqual(storm::utility::zero(), chosenValue - currentValue))) { chosenActionIndices.clear(); @@ -573,7 +573,7 @@ namespace storm { std::vector initialLambda; std::vector> initialSubsimplex; - if(cacheSubsimplices){ + if (cacheSubsimplices) { initialLambda = lambdaCache[0]; initialSubsimplex = subSimplexCache[0]; } else { @@ -647,7 +647,7 @@ namespace storm { std::map> lambdaCache; auto temp = computeSubSimplexAndLambdas(initialBelief.probabilities, gridResolution, pomdp.getNumberOfStates()); - if(cacheSubsimplices) { + if (cacheSubsimplices) { subSimplexCache[0] = temp.first; lambdaCache[0] = temp.second; } @@ -884,96 +884,6 @@ namespace storm { return smb.build(); } - template - std::vector ApproximatePOMDPModelchecker::extractBestActions( - storm::models::sparse::Pomdp const &pomdp, - std::vector> &beliefList, - std::vector &beliefIsTarget, - std::set const &targetObservations, - std::map>> &observationProbabilities, - std::map>> &nextBelieves, - std::map &result, - uint64_t gridResolution, uint64_t currentBeliefId, uint64_t nextId, bool min) { - storm::pomdp::Belief currentBelief = beliefList[currentBeliefId]; - - //TODO put this in extra function - std::vector> observationProbabilitiesInAction; - std::vector> nextBelievesInAction; - uint64_t numChoices = pomdp.getNumberOfChoices( - pomdp.getStatesWithObservation(currentBelief.observation).front()); - for (uint64_t action = 0; action < numChoices; ++action) { - std::map actionObservationProbabilities = computeObservationProbabilitiesAfterAction( - pomdp, currentBelief, action); - std::map actionObservationBelieves; - for (auto iter = actionObservationProbabilities.begin(); iter != actionObservationProbabilities.end(); ++iter) { - uint32_t observation = iter->first; - actionObservationBelieves[observation] = getBeliefAfterActionAndObservation(pomdp, beliefList, beliefIsTarget, targetObservations, currentBelief, - action, observation, nextId); - nextId = beliefList.size(); - } - observationProbabilitiesInAction.push_back(actionObservationProbabilities); - nextBelievesInAction.push_back(actionObservationBelieves); - } - observationProbabilities.emplace(std::make_pair(currentBeliefId, observationProbabilitiesInAction)); - nextBelieves.emplace(std::make_pair(currentBeliefId, nextBelievesInAction)); - - // choose the action which results in the value computed by the over-approximation - ValueType chosenValue = min ? storm::utility::infinity() : -storm::utility::infinity(); - std::vector chosenActionIndices; - ValueType currentValue; - - for (uint64_t action = 0; action < numChoices; ++action) { - currentValue = storm::utility::zero(); // simply change this for rewards? - for (auto iter = observationProbabilities[currentBelief.id][action].begin(); - iter != observationProbabilities[currentBelief.id][action].end(); ++iter) { - uint32_t observation = iter->first; - storm::pomdp::Belief nextBelief = beliefList[nextBelieves[currentBelief.id][action][observation]]; - - // compute subsimplex and lambdas according to the Lovejoy paper to approximate the next belief - auto temp = computeSubSimplexAndLambdas(nextBelief.probabilities, gridResolution, pomdp.getNumberOfStates()); - std::vector> subSimplex = temp.first; - std::vector lambdas = temp.second; - - auto sum = storm::utility::zero(); - for (size_t j = 0; j < lambdas.size(); ++j) { - if (!cc.isEqual(lambdas[j], storm::utility::zero())) { - sum += lambdas[j] * result.at(getBeliefIdInVector(beliefList, observation, subSimplex[j])); - } - } - currentValue += iter->second * sum; - } - - // Update the selected actions - if ((min && cc.isLess(storm::utility::zero(), chosenValue - currentValue)) || - (!min && - cc.isLess(storm::utility::zero(), currentValue - chosenValue)) || - cc.isEqual(storm::utility::zero(), chosenValue - currentValue)) { - chosenValue = currentValue; - if (!cc.isEqual(storm::utility::zero(), chosenValue - currentValue)) { - chosenActionIndices.clear(); - } - chosenActionIndices.push_back(action); - } - } - return chosenActionIndices; - } - - template - std::vector ApproximatePOMDPModelchecker::extractBestAction( - storm::models::sparse::Pomdp const &pomdp, - std::vector> &beliefList, - std::vector &beliefIsTarget, - std::set const &targetObservations, - std::map>> &observationProbabilities, - std::map>> &nextBelieves, - std::map &result, - uint64_t gridResolution, uint64_t currentBeliefId, uint64_t nextId, bool min) { - return std::vector{ - extractBestActions(pomdp, beliefList, beliefIsTarget, targetObservations, observationProbabilities, nextBelieves, result, gridResolution, currentBeliefId, - nextId, min).front()}; - } - - template uint64_t ApproximatePOMDPModelchecker::getBeliefIdInVector( std::vector> const &grid, uint32_t observation, @@ -1265,6 +1175,7 @@ namespace storm { class ApproximatePOMDPModelchecker; #ifdef STORM_HAVE_CARL + template class ApproximatePOMDPModelchecker; diff --git a/src/storm-pomdp/modelchecker/ApproximatePOMDPModelchecker.h b/src/storm-pomdp/modelchecker/ApproximatePOMDPModelchecker.h index 66e5d563d..27e8f6bb7 100644 --- a/src/storm-pomdp/modelchecker/ApproximatePOMDPModelchecker.h +++ b/src/storm-pomdp/modelchecker/ApproximatePOMDPModelchecker.h @@ -179,52 +179,6 @@ namespace storm { std::set const &targetObservations, bool min, uint64_t gridResolution, bool computeRewards); - /** - * TODO - * @param pomdp - * @param beliefList - * @param observationProbabilities - * @param nextBelieves - * @param result - * @param gridResolution - * @param currentBeliefId - * @param nextId - * @param min - * @return - */ - std::vector extractBestActions(storm::models::sparse::Pomdp const &pomdp, - std::vector> &beliefList, - std::vector &beliefIsTarget, - std::set const &target_observations, - std::map>> &observationProbabilities, - std::map>> &nextBelieves, - std::map &result, - uint64_t gridResolution, uint64_t currentBeliefId, uint64_t nextId, - bool min); - - /** - * TODO - * @param pomdp - * @param beliefList - * @param observationProbabilities - * @param nextBelieves - * @param result - * @param gridResolution - * @param currentBeliefId - * @param nextId - * @param min - * @return - */ - std::vector extractBestAction(storm::models::sparse::Pomdp const &pomdp, - std::vector> &beliefList, - std::vector &beliefIsTarget, - std::set const &target_observations, - std::map>> &observationProbabilities, - std::map>> &nextBelieves, - std::map &result, - uint64_t gridResolution, uint64_t currentBeliefId, uint64_t nextId, - bool min); - /** * Helper to compute an underapproximation of the reachability property. * The implemented method unrolls the belief support of the given POMDP up to a given number of belief states.