Browse Source

various pomdp functionalities

refactoring
Sebastian Junges 5 years ago
parent
commit
412e94469c
  1. 6
      src/mod_pomdp.cpp
  2. 46
      src/pomdp/qualitative_analysis.cpp
  3. 6
      src/pomdp/qualitative_analysis.h
  4. 15
      src/pomdp/tracker.cpp
  5. 4
      src/pomdp/tracker.h

6
src/mod_pomdp.cpp

@ -1,5 +1,8 @@
#include "common.h"
#include "pomdp/tracker.h"
#include "pomdp/qualitative_analysis.h"
#include "pomdp/transformations.h"
PYBIND11_MODULE(pomdp, m) {
@ -9,5 +12,8 @@ PYBIND11_MODULE(pomdp, m) {
py::options options;
options.disable_function_signatures();
#endif
define_tracker(m);
define_qualitative_policy_search<double>(m, "Double");
define_qualitative_policy_search_nt(m);
define_transformations<double>(m, "Double");
}

46
src/pomdp/qualitative_analysis.cpp

@ -0,0 +1,46 @@
#include "tracker.h"
#include "src/helpers.h"
#include <storm-pomdp/analysis/MemlessStrategySearchQualitative.h>
#include <storm-pomdp/analysis/QualitativeAnalysisOnGraphs.h>
#include <storm-pomdp/analysis/WinningRegionQueryInterface.h>
#include <storm/logic/Formula.h>
template<typename ValueType> using SparsePomdp = storm::models::sparse::Pomdp<ValueType>;
template<typename ValueType>
std::shared_ptr<storm::pomdp::MemlessStrategySearchQualitative<ValueType>> createWinningRegionSolver(SparsePomdp<ValueType> const& pomdp, storm::logic::Formula const& formula, storm::pomdp::MemlessSearchOptions const& options) {
STORM_LOG_TRACE("Run qualitative preprocessing...");
storm::analysis::QualitativeAnalysisOnGraphs<ValueType> qualitativeAnalysis(pomdp);
// After preprocessing, this might be done cheaper.
storm::storage::BitVector targetStates = qualitativeAnalysis.analyseProb1(formula.asProbabilityOperatorFormula());
storm::storage::BitVector surelyNotAlmostSurelyReachTarget = qualitativeAnalysis.analyseProbSmaller1(formula.asProbabilityOperatorFormula());
storm::expressions::ExpressionManager expressionManager;
std::shared_ptr<storm::utility::solver::SmtSolverFactory> smtSolverFactory = std::make_shared<storm::utility::solver::Z3SmtSolverFactory>();
return std::make_shared<storm::pomdp::MemlessStrategySearchQualitative<ValueType>>(pomdp, targetStates, surelyNotAlmostSurelyReachTarget, smtSolverFactory, options);
}
template<typename ValueType>
void define_qualitative_policy_search(py::module& m, std::string const& vtSuffix) {
m.def(("create_iterative_qualitative_search_solver_" + vtSuffix).c_str(), &createWinningRegionSolver<ValueType>, "Create solver " ,py::arg("pomdp"), py::arg("formula"), py::arg("options"));
py::class_<storm::pomdp::MemlessStrategySearchQualitative<ValueType>, std::shared_ptr<storm::pomdp::MemlessStrategySearchQualitative<ValueType>>> mssq(m, ("IterativeQualitativeSearchSolver" + vtSuffix).c_str(), "Solver for POMDPs that solves qualitative queries");
mssq.def("compute_winning_region", &storm::pomdp::MemlessStrategySearchQualitative<ValueType>::computeWinningRegion, py::arg("lookahead"));
mssq.def_property_readonly("last_winning_region", &storm::pomdp::MemlessStrategySearchQualitative<ValueType>::getLastWinningRegion, "get the last computed winning region");
py::class_<storm::pomdp::WinningRegionQueryInterface<ValueType>> wrqi(m, ("BeliefSupportWinningRegionQueryInterface" + vtSuffix).c_str());
wrqi.def(py::init<SparsePomdp <ValueType> const&, storm::pomdp::WinningRegion const&>(), py::arg("pomdp"), py::arg("BeliefSupportWinningRegion"));
wrqi.def("query_current_belief", &storm::pomdp::WinningRegionQueryInterface<ValueType>::isInWinningRegion, py::arg("current_belief"));
wrqi.def("query_action", &storm::pomdp::WinningRegionQueryInterface<ValueType>::staysInWinningRegion, py::arg("current_belief"), py::arg("action"));
}
template void define_qualitative_policy_search<double>(py::module& m, std::string const& vtSuffix);
void define_qualitative_policy_search_nt(py::module& m) {
py::class_<storm::pomdp::MemlessSearchOptions> mssqopts(m, "IterativeQualitativeSearchOptions", "Options for the IterativeQualitativeSearch");
mssqopts.def(py::init<>());
py::class_<storm::pomdp::WinningRegion> winningRegion(m, "BeliefSupportWinningRegion");
}

6
src/pomdp/qualitative_analysis.h

@ -0,0 +1,6 @@
#pragma once
#include "common.h"
template<typename VT>
void define_qualitative_policy_search(py::module& m, std::string const& vtSuffix);
void define_qualitative_policy_search_nt(py::module& m);

15
src/pomdp/tracker.cpp

@ -0,0 +1,15 @@
#include "tracker.h"
#include "src/helpers.h"
#include <storm-pomdp/generator/BeliefSupportTracker.h>
template<typename ValueType> using SparsePomdp = storm::models::sparse::Pomdp<ValueType>;
template<typename ValueType> using SparsePomdpTracker = storm::generator::BeliefSupportTracker<double>;
void define_tracker(py::module& m) {
py::class_<storm::generator::BeliefSupportTracker<double>> tracker(m, "BeliefSupportTrackerDouble", "Tracker for BeliefSupports");
tracker.def(py::init<SparsePomdp<double> const&>(), py::arg("pomdp"));
tracker.def("get_current_belief_support", &SparsePomdpTracker<double>::getCurrentBeliefSupport, "What is the support given the trace so far");
tracker.def("track", &SparsePomdpTracker<double>::track, py::arg("action"), py::arg("observation"));
}

4
src/pomdp/tracker.h

@ -0,0 +1,4 @@
#pragma once
#include "common.h"
void define_tracker(py::module& m);
Loading…
Cancel
Save