diff --git a/src/pomdp/qualitative_analysis.cpp b/src/pomdp/qualitative_analysis.cpp index 26d779f..1cb86b2 100644 --- a/src/pomdp/qualitative_analysis.cpp +++ b/src/pomdp/qualitative_analysis.cpp @@ -1,6 +1,6 @@ #include "tracker.h" #include "src/helpers.h" -#include +#include #include #include #include @@ -8,7 +8,7 @@ template using SparsePomdp = storm::models::sparse::Pomdp; template -std::shared_ptr> createWinningRegionSolver(SparsePomdp const& pomdp, storm::logic::Formula const& formula, storm::pomdp::MemlessSearchOptions const& options) { +std::shared_ptr> createWinningRegionSolver(SparsePomdp const& pomdp, storm::logic::Formula const& formula, storm::pomdp::MemlessSearchOptions const& options) { STORM_LOG_TRACE("Run qualitative preprocessing..."); storm::analysis::QualitativeAnalysisOnGraphs qualitativeAnalysis(pomdp); @@ -17,7 +17,7 @@ std::shared_ptr> creat storm::storage::BitVector surelyNotAlmostSurelyReachTarget = qualitativeAnalysis.analyseProbSmaller1(formula.asProbabilityOperatorFormula()); storm::expressions::ExpressionManager expressionManager; std::shared_ptr smtSolverFactory = std::make_shared(); - return std::make_shared>(pomdp, targetStates, surelyNotAlmostSurelyReachTarget, smtSolverFactory, options); + return std::make_shared>(pomdp, targetStates, surelyNotAlmostSurelyReachTarget, smtSolverFactory, options); } template @@ -34,10 +34,10 @@ template void define_qualitative_policy_search(py::module& m, std::string const& vtSuffix) { m.def(("create_iterative_qualitative_search_solver_" + vtSuffix).c_str(), &createWinningRegionSolver, "Create solver " ,py::arg("pomdp"), py::arg("formula"), py::arg("options")); m.def(("prepare_pomdp_for_qualitative_search_" +vtSuffix).c_str(), &preparePOMDPForQualitativeSearch, "Preprocess POMDP", py::arg("pomdp"), py::arg("formula")); - py::class_, std::shared_ptr>> mssq(m, ("IterativeQualitativeSearchSolver" + vtSuffix).c_str(), "Solver for POMDPs that solves qualitative queries"); - mssq.def("compute_winning_region", &storm::pomdp::MemlessStrategySearchQualitative::computeWinningRegion, py::arg("lookahead")); - mssq.def("compute_winning_policy_for_initial_states", &storm::pomdp::MemlessStrategySearchQualitative::analyzeForInitialStates, py::arg("lookahead")); - mssq.def_property_readonly("last_winning_region", &storm::pomdp::MemlessStrategySearchQualitative::getLastWinningRegion, "get the last computed winning region"); + py::class_, std::shared_ptr>> mssq(m, ("IterativeQualitativeSearchSolver" + vtSuffix).c_str(), "Solver for POMDPs that solves qualitative queries"); + mssq.def("compute_winning_region", &storm::pomdp::IterativePolicySearch::computeWinningRegion, py::arg("lookahead")); + mssq.def("compute_winning_policy_for_initial_states", &storm::pomdp::IterativePolicySearch::analyzeForInitialStates, py::arg("lookahead")); + mssq.def_property_readonly("last_winning_region", &storm::pomdp::IterativePolicySearch::getLastWinningRegion, "get the last computed winning region"); py::class_> wrqi(m, ("BeliefSupportWinningRegionQueryInterface" + vtSuffix).c_str()); wrqi.def(py::init const&, storm::pomdp::WinningRegion const&>(), py::arg("pomdp"), py::arg("BeliefSupportWinningRegion"));