Browse Source

build ppomdps

refactoring
Sebastian Junges 4 years ago
parent
commit
cba659ec7a
  1. 2
      lib/stormpy/__init__.py
  2. 2
      lib/stormpy/examples/files.py
  3. 142
      lib/stormpy/examples/files/pomdp/maze_2_par.prism
  4. 5
      lib/stormpy/pomdp/__init__.py
  5. 2
      src/mod_pomdp.cpp
  6. 1
      src/pomdp/transformations.cpp
  7. 16
      src/storage/model.cpp

2
lib/stormpy/__init__.py

@ -36,6 +36,8 @@ def _convert_sparse_model(model, parametric=False):
return model._as_sparse_pdtmc()
elif model.model_type == ModelType.MDP:
return model._as_sparse_pmdp()
elif model.model_type == ModelType.POMDP:
return model._as_sparse_ppomdp()
elif model.model_type == ModelType.CTMC:
return model._as_sparse_pctmc()
elif model.model_type == ModelType.MA:

2
lib/stormpy/examples/files.py

@ -37,6 +37,8 @@ prism_mdp_maze = _path("mdp", "maze_2.nm")
"""Prism example for the maze MDP"""
prism_pomdp_maze = _path("pomdp", "maze_2.prism")
"""Prism example for the maze POMDP"""
prism_par_pomdp_maze = _path("pomdp", "maze_2_par.prism")
"""Prism example for the parametric POMDP"""
dft_galileo_hecs = _path("dft", "hecs.dft")
"""DFT example for HECS (Galileo format)"""
dft_json_and = _path("dft", "and.json")

142
lib/stormpy/examples/files/pomdp/maze_2_par.prism

@ -0,0 +1,142 @@
// maze example (POMDP)
// slightly extends that presented in
// Littman, Cassandra and Kaelbling
// Learning policies for partially observable environments: Scaling up
// Technical Report CS, Brown University
// gxn 29/01/16
// state space (value of variable "s")
// 0 1 2 3 4
// 5 6 7
// 8 9 10
// 11 13 12
// 13 is the target
pomdp
// can observe the walls and target
observables
o
endobservables
// o=0 - observation in initial state
// o=1 - west and north walls (s0)
// o=2 - north and south walls (s1 and s3)
// o=3 - north wall (s2)
// o=4 - east and north way (s4)
// o=5 - east and west walls (s5, s6, s7, s8, s9 and s10)
// o=6 - east, west and south walls (s11 and s12)
// o=7 - the target (s13)
const double p;
module maze
s : [-1..13];
o : [0..7];
// initialisation
[] s=-1 -> 1/13 : (s'=0) & (o'=1)
+ 1/13 : (s'=1) & (o'=2)
+ 1/13 : (s'=2) & (o'=3)
+ 1/13 : (s'=3) & (o'=2)
+ 1/13 : (s'=4) & (o'=4)
+ 1/13 : (s'=5) & (o'=5)
+ 1/13 : (s'=6) & (o'=5)
+ 1/13 : (s'=7) & (o'=5)
+ 1/13 : (s'=8) & (o'=5)
+ 1/13 : (s'=9) & (o'=5)
+ 1/13 : (s'=10) & (o'=5)
+ 1/13 : (s'=11) & (o'=6)
+ 1/13 : (s'=12) & (o'=6);
// moving around the maze
[east] s=0 -> (s'=1) & (o'=2);
[west] s=0 -> (s'=0);
[north] s=0 -> (s'=0);
[south] s=0 -> (s'=5) & (o'=5);
[east] s=1 -> (s'=2) & (o'=3);
[west] s=1 -> (s'=0) & (o'=1);
[north] s=1 -> (s'=1);
[south] s=1 -> (s'=1);
[east] s=2 -> (s'=3) & (o'=2);
[west] s=2 -> (s'=1) & (o'=2);
[north] s=2 -> (s'=2);
[south] s=2 -> (s'=6) & (o'=5);
[east] s=3 -> (s'=4) & (o'=4);
[west] s=3 -> (s'=2) & (o'=3);
[north] s=3 -> (s'=3);
[south] s=3 -> (s'=3);
[east] s=4 -> (s'=4);
[west] s=4 -> (s'=3) & (o'=2);
[north] s=4 -> (s'=4);
[south] s=4 -> (s'=7) & (o'=5);
[east] s=5 -> (s'=5);
[west] s=5 -> (s'=5);
[north] s=5 -> (s'=0) & (o'=1);
[south] s=5 -> (s'=8);
[east] s=6 -> (s'=6);
[west] s=6 -> (s'=6);
[north] s=6 -> (s'=2) & (o'=3);
[south] s=6 -> (s'=9);
[east] s=7 -> (s'=7);
[west] s=7 -> (s'=7);
[north] s=7 -> (s'=4) & (o'=4);
[south] s=7 -> (s'=10);
[east] s=8 -> (s'=8);
[west] s=8 -> (s'=8);
[north] s=8 -> (s'=5);
[south] s=8 -> (s'=11) & (o'=6);
[east] s=9 -> (s'=9);
[west] s=9 -> (s'=9);
[north] s=9 -> (s'=6);
[south] s=9 -> p: (s'=13) & (o'=7) + (1-p):(s'=9);
[east] s=10 -> (s'=10);
[west] s=10 -> (s'=10);
[north] s=10 -> (s'=7);
[south] s=10 -> (s'=12) & (o'=6);
[east] s=11 -> (s'=11);
[west] s=11 -> (s'=11);
[north] s=11 -> (s'=8) & (o'=5);
[south] s=11 -> (s'=11);
[east] s=12 -> (s'=12);
[west] s=12 -> (s'=12);
[north] s=12 -> (s'=10) & (o'=5);
[south] s=12 -> (s'=12);
// loop when we reach the target
[done] s=13 -> true;
endmodule
// reward structure (number of steps to reach the target)
rewards
[east] true : 1;
[west] true : 1;
[north] true : 1;
[south] true : 1;
endrewards
// target observation
label "goal" = o=7;
label "bad" = o=6;

5
lib/stormpy/pomdp/__init__.py

@ -2,4 +2,7 @@ from . import pomdp
from .pomdp import *
def make_canonic(model):
return pomdp._make_canonic_Double(model)
if model.supports_parameters:
return pomdp._make_canonic_Rf(model)
else:
return pomdp._make_canonic_Double(model)

2
src/mod_pomdp.cpp

@ -4,6 +4,7 @@
#include "pomdp/tracker.h"
#include "pomdp/qualitative_analysis.h"
#include "pomdp/transformations.h"
#include <storm/adapters/RationalFunctionAdapter.h>
PYBIND11_MODULE(pomdp, m) {
m.doc() = "Functionality for POMDP analysis";
@ -16,4 +17,5 @@ PYBIND11_MODULE(pomdp, m) {
define_qualitative_policy_search<double>(m, "Double");
define_qualitative_policy_search_nt(m);
define_transformations<double>(m, "Double");
define_transformations<storm::RationalFunction>(m, "Rf");
}

1
src/pomdp/transformations.cpp

@ -13,3 +13,4 @@ void define_transformations(py::module& m, std::string const& vtSuffix) {
}
template void define_transformations<double>(py::module& m, std::string const& vtSuffix);
template void define_transformations<storm::RationalFunction>(py::module& m, std::string const& vtSuffix);

16
src/storage/model.cpp

@ -124,6 +124,9 @@ void define_model(py::module& m) {
.def("_as_sparse_pomdp", [](ModelBase &modelbase) {
return modelbase.as<SparsePomdp<double>>();
}, "Get model as sparse POMDP")
.def("_as_sparse_ppomdp", [](ModelBase &modelbase) {
return modelbase.as<SparsePomdp<RationalFunction>>();
}, "Get model as sparse pPOMDP")
.def("_as_sparse_ctmc", [](ModelBase &modelbase) {
return modelbase.as<SparseCtmc<double>>();
}, "Get model as sparse CTMC")
@ -261,11 +264,20 @@ void define_sparse_model(py::module& m) {
py::class_<SparseDtmc<RationalFunction>, std::shared_ptr<SparseDtmc<RationalFunction>>>(m, "SparseParametricDtmc", "pDTMC in sparse representation", modelRatFunc)
.def("__str__", &getModelInfoPrinter)
;
py::class_<SparseMdp<RationalFunction>, std::shared_ptr<SparseMdp<RationalFunction>>>(m, "SparseParametricMdp", "pMDP in sparse representation", modelRatFunc)
.def_property_readonly("nondeterministic_choice_indices", [](SparseMdp<RationalFunction> const& mdp) { return mdp.getNondeterministicChoiceIndices(); })
py::class_<SparseMdp<RationalFunction>, std::shared_ptr<SparseMdp<RationalFunction>>> pmdp(m, "SparseParametricMdp", "pMDP in sparse representation", modelRatFunc);
pmdp.def_property_readonly("nondeterministic_choice_indices", [](SparseMdp<RationalFunction> const& mdp) { return mdp.getNondeterministicChoiceIndices(); })
.def("apply_scheduler", [](SparseMdp<RationalFunction> const& mdp, storm::storage::Scheduler<RationalFunction> const& scheduler, bool dropUnreachableStates) { return mdp.applyScheduler(scheduler, dropUnreachableStates); } , "apply scheduler", "scheduler"_a, "drop_unreachable_states"_a = true)
.def("__str__", &getModelInfoPrinter)
;
py::class_<SparsePomdp<RationalFunction>, std::shared_ptr<SparsePomdp<RationalFunction>>>(m, "SparseParametricPomdp", "POMDP in sparse representation", pmdp)
.def(py::init<SparsePomdp<RationalFunction>>(), py::arg("other_model"))
.def("__str__", &getModelInfoPrinter)
.def("get_observation", &SparsePomdp<RationalFunction>::getObservation, py::arg("state"))
.def_property_readonly("observations", &SparsePomdp<RationalFunction>::getObservations)
.def_property_readonly("nr_observations", &SparsePomdp<RationalFunction>::getNrObservations)
;
py::class_<SparseCtmc<RationalFunction>, std::shared_ptr<SparseCtmc<RationalFunction>>>(m, "SparseParametricCtmc", "pCTMC in sparse representation", modelRatFunc)
.def("__str__", &getModelInfoPrinter)
;

Loading…
Cancel
Save