From b2b647203b3d3847325607fa135fb74b04c88ff3 Mon Sep 17 00:00:00 2001
From: Sebastian Junges <sebastian.junges@rwth-aachen.de>
Date: Tue, 2 Oct 2018 18:51:37 +0200
Subject: [PATCH] add pomdp support to stormpy

---
 examples/reward_models.py                     |   0
 lib/stormpy/__init__.py                       |   2 +
 lib/stormpy/examples/files/pomdp/maze_2.prism | 139 ++++++++++++++++++
 src/storage/model.cpp                         |  11 ++
 tests/storage/test_model.py                   |   8 +
 5 files changed, 160 insertions(+)
 create mode 100644 examples/reward_models.py
 create mode 100644 lib/stormpy/examples/files/pomdp/maze_2.prism

diff --git a/examples/reward_models.py b/examples/reward_models.py
new file mode 100644
index 0000000..e69de29
diff --git a/lib/stormpy/__init__.py b/lib/stormpy/__init__.py
index 9d3829c..72d0e3e 100644
--- a/lib/stormpy/__init__.py
+++ b/lib/stormpy/__init__.py
@@ -48,6 +48,8 @@ def _convert_sparse_model(model, parametric=False):
             return model._as_sparse_dtmc()
         elif model.model_type == ModelType.MDP:
             return model._as_sparse_mdp()
+        elif model.model_type == ModelType.POMDP:
+            return model._as_sparse_pomdp()
         elif model.model_type == ModelType.CTMC:
             return model._as_sparse_ctmc()
         elif model.model_type == ModelType.MA:
diff --git a/lib/stormpy/examples/files/pomdp/maze_2.prism b/lib/stormpy/examples/files/pomdp/maze_2.prism
new file mode 100644
index 0000000..f89d179
--- /dev/null
+++ b/lib/stormpy/examples/files/pomdp/maze_2.prism
@@ -0,0 +1,139 @@
+
+
+// maze example (POMDP)
+// slightly extends that presented in
+// Littman, Cassandra and Kaelbling
+// Learning policies for partially observable environments: Scaling up  
+// Technical Report CS, Brown University
+// gxn 29/01/16
+
+// state space (value of variable "s")
+
+//  0  1  2  3  4
+//  5     6     7
+//  8     9    10
+// 11     13   12
+
+// 13 is the target
+
+pomdp
+
+// can observe the walls and target
+observables
+	o
+endobservables
+// o=0 - observation in initial state
+// o=1 - west and north walls (s0)
+// o=2 - north and south ways (s1 and s3)
+// o=3 - north wall (s2)
+// o=4 - east and north way (s4)
+// o=5 - east and west walls (s5, s6, s7, s8, s9 and s10)
+// o=6 - east, west and south walls (s11 and s12)
+// o=7 - the target (s13)
+
+module maze
+
+	s : [-1..13];
+	o : [0..7];
+	
+	// initialisation
+	[] s=-1 -> 1/13 : (s'=0) & (o'=1)
+			 + 1/13 : (s'=1) & (o'=2)
+			 + 1/13 : (s'=2) & (o'=3)
+			 + 1/13 : (s'=3) & (o'=2)
+			 + 1/13 : (s'=4) & (o'=4)
+			 + 1/13 : (s'=5) & (o'=5)
+			 + 1/13 : (s'=6) & (o'=5)
+			 + 1/13 : (s'=7) & (o'=5)
+			 + 1/13 : (s'=8) & (o'=5)
+			 + 1/13 : (s'=9) & (o'=5)
+			 + 1/13 : (s'=10) & (o'=5)
+			 + 1/13 : (s'=11) & (o'=6)
+			 + 1/13 : (s'=12) & (o'=6);
+	
+	// moving around the maze
+	
+	[east] s=0 -> (s'=1) & (o'=2);
+	[west] s=0 -> (s'=0);
+	[north] s=0 -> (s'=0);
+	[south] s=0 -> (s'=5) & (o'=5);
+
+	[east] s=1 -> (s'=2) & (o'=3);
+	[west] s=1 -> (s'=0) & (o'=1);
+	[north] s=1 -> (s'=1);
+	[south] s=1 -> (s'=1);
+
+	[east] s=2 -> (s'=3) & (o'=2);
+	[west] s=2 -> (s'=1) & (o'=2);
+	[north] s=2 -> (s'=2);
+	[south] s=2 -> (s'=6) & (o'=5);
+
+	[east] s=3 -> (s'=4) & (o'=4);
+	[west] s=3 -> (s'=2) & (o'=2);
+	[north] s=3 -> (s'=3);
+	[south] s=3 -> (s'=3);
+
+	[east] s=4 -> (s'=4);
+	[west] s=4 -> (s'=3) & (o'=2);
+	[north] s=4 -> (s'=4);
+	[south] s=4 -> (s'=7) & (o'=5);
+
+	[east] s=5 -> (s'=5);
+	[west] s=5 -> (s'=5);
+	[north] s=5 -> (s'=0) & (o'=1);
+	[south] s=5 -> (s'=8);
+
+	[east] s=6 -> (s'=6);
+	[west] s=6 -> (s'=6);
+	[north] s=6 -> (s'=2) & (o'=3);
+	[south] s=6 -> (s'=9);
+
+	[east] s=7 -> (s'=7);
+	[west] s=7 -> (s'=7);
+	[north] s=7 -> (s'=4) & (o'=4);
+	[south] s=7 -> (s'=10);
+
+	[east] s=8 -> (s'=8);
+	[west] s=8 -> (s'=8);
+	[north] s=8 -> (s'=5);
+	[south] s=8 -> (s'=11) & (o'=6);
+
+	[east] s=9 -> (s'=9);
+	[west] s=9 -> (s'=9);
+	[north] s=9 -> (s'=6);
+	[south] s=9 -> (s'=13) & (o'=7);
+
+	[east] s=10 -> (s'=9);
+	[west] s=10 -> (s'=9);
+	[north] s=10 -> (s'=7);
+	[south] s=10 -> (s'=12) & (o'=6);
+
+	[east] s=11 -> (s'=11);
+	[west] s=11 -> (s'=11);
+	[north] s=11 -> (s'=8) & (o'=5);
+	[south] s=11 -> (s'=11);
+
+	[east] s=12 -> (s'=12);
+	[west] s=12 -> (s'=12);
+	[north] s=12 -> (s'=10) & (o'=5);
+	[south] s=12 -> (s'=12);
+
+	// loop when we reach the target
+	[done] s=13 -> true;
+
+endmodule
+
+// reward structure (number of steps to reach the target)
+rewards
+
+	[east] true : 1;
+	[west] true : 1;
+	[north] true : 1;
+	[south] true : 1;
+
+endrewards
+
+// target observation
+label "goal" = o=7;
+
+
diff --git a/src/storage/model.cpp b/src/storage/model.cpp
index 7844dce..a9c966a 100644
--- a/src/storage/model.cpp
+++ b/src/storage/model.cpp
@@ -5,6 +5,7 @@
 #include "storm/models/sparse/Model.h"
 #include "storm/models/sparse/Dtmc.h"
 #include "storm/models/sparse/Mdp.h"
+#include "storm/models/sparse/Pomdp.h"
 #include "storm/models/sparse/Ctmc.h"
 #include "storm/models/sparse/MarkovAutomaton.h"
 #include "storm/models/sparse/StandardRewardModel.h"
@@ -26,6 +27,7 @@ using ModelBase = storm::models::ModelBase;
 template<typename ValueType> using SparseModel = storm::models::sparse::Model<ValueType>;
 template<typename ValueType> using SparseDtmc = storm::models::sparse::Dtmc<ValueType>;
 template<typename ValueType> using SparseMdp = storm::models::sparse::Mdp<ValueType>;
+template<typename ValueType> using SparsePomdp = storm::models::sparse::Pomdp<ValueType>;
 template<typename ValueType> using SparseCtmc = storm::models::sparse::Ctmc<ValueType>;
 template<typename ValueType> using SparseMarkovAutomaton = storm::models::sparse::MarkovAutomaton<ValueType>;
 template<typename ValueType> using SparseRewardModel = storm::models::sparse::StandardRewardModel<ValueType>;
@@ -94,6 +96,7 @@ void define_model(py::module& m) {
     py::enum_<storm::models::ModelType>(m, "ModelType", "Type of the model")
         .value("DTMC", storm::models::ModelType::Dtmc)
         .value("MDP", storm::models::ModelType::Mdp)
+        .value("POMDP", storm::models::ModelType::Pomdp)
         .value("CTMC", storm::models::ModelType::Ctmc)
         .value("MA", storm::models::ModelType::MarkovAutomaton)
     ;
@@ -118,6 +121,9 @@ void define_model(py::module& m) {
         .def("_as_sparse_pmdp", [](ModelBase &modelbase) {
                 return modelbase.as<SparseMdp<RationalFunction>>();
             }, "Get model as sparse pMDP")
+        .def("_as_sparse_pomdp", [](ModelBase &modelbase) {
+                return modelbase.as<SparsePomdp<double>>();
+            }, "Get model as sparse POMDP")
         .def("_as_sparse_ctmc", [](ModelBase &modelbase) {
                 return modelbase.as<SparseCtmc<double>>();
             }, "Get model as sparse CTMC")
@@ -184,6 +190,11 @@ void define_sparse_model(py::module& m) {
     py::class_<SparseMdp<double>, std::shared_ptr<SparseMdp<double>>>(m, "SparseMdp", "MDP in sparse representation", model)
         .def("__str__", getModelInfoPrinter<double>("MDP"))
     ;
+    py::class_<SparsePomdp<double>, std::shared_ptr<SparsePomdp<double>>>(m, "SparsePomdp", "POMDP in sparse representation", model)
+            .def("__str__", getModelInfoPrinter<double>("POMDP"))
+            .def_property_readonly("observations", &SparsePomdp<double>::getObservations)
+            .def_property_readonly("nr_observations", &SparsePomdp<double>::getNrObservations)
+            ;
     py::class_<SparseCtmc<double>, std::shared_ptr<SparseCtmc<double>>>(m, "SparseCtmc", "CTMC in sparse representation", model)
         .def("__str__", getModelInfoPrinter<double>("CTMC"))
     ;
diff --git a/tests/storage/test_model.py b/tests/storage/test_model.py
index 2598e3b..532f88c 100644
--- a/tests/storage/test_model.py
+++ b/tests/storage/test_model.py
@@ -106,6 +106,14 @@ class TestSparseModel:
         assert not model.supports_parameters
         assert type(model) is stormpy.SparseCtmc
 
+    def test_build_pomdp(self):
+        program = stormpy.parse_prism_program(get_example_path("pomdp", "maze_2.prism"))
+        formulas = stormpy.parse_properties_for_prism_program("P=? [F \"goal\"]", program)
+        model = stormpy.build_model(program, formulas)
+        assert model.nr_states == 16
+        assert model.nr_observations == 8
+
+
     def test_build_ma(self):
         program = stormpy.parse_prism_program(get_example_path("ma", "simple.ma"))
         formulas = stormpy.parse_properties_for_prism_program("P=? [ F<=2 s=2 ]", program)