Sebastian Junges
6 years ago
5 changed files with 199 additions and 0 deletions
-
1doc/source/advanced_topics.rst
-
30examples/exploration/01-exploration.py
-
38examples/exploration/02-exploration.py
-
4lib/stormpy/examples/files.py
-
126lib/stormpy/examples/files/mdp/maze_2.nm
@ -0,0 +1,30 @@ |
|||
import stormpy |
|||
import stormpy.core |
|||
|
|||
import stormpy.examples |
|||
import stormpy.examples.files |
|||
|
|||
|
|||
def example_exploration_01(): |
|||
""" |
|||
Example to exploration of MDPs. |
|||
:return: |
|||
""" |
|||
program = stormpy.parse_prism_program(stormpy.examples.files.prism_pomdp_maze) |
|||
prop = "R=? [F \"goal\"]" |
|||
properties = stormpy.parse_properties_for_prism_program(prop, program, None) |
|||
model = stormpy.build_model(program, properties) |
|||
print(model.model_type) |
|||
|
|||
|
|||
for state in model.states: |
|||
if state.id in model.initial_states: |
|||
print(state) |
|||
for action in state.actions: |
|||
for transition in action.transitions: |
|||
print("From state {} by action {}, with probability {}, go to state {}".format(state, action, transition.value(), |
|||
transition.column)) |
|||
|
|||
|
|||
if __name__ == '__main__': |
|||
example_exploration_01() |
@ -0,0 +1,38 @@ |
|||
import stormpy |
|||
import stormpy.core |
|||
|
|||
import stormpy.examples |
|||
import stormpy.examples.files |
|||
|
|||
|
|||
def example_exploration_02(): |
|||
""" |
|||
Example to exploration of POMDPs. |
|||
:return: |
|||
""" |
|||
program = stormpy.parse_prism_program(stormpy.examples.files.prism_pomdp_maze) |
|||
prop = "R=? [F \"goal\"]" |
|||
properties = stormpy.parse_properties_for_prism_program(prop, program, None) |
|||
model = stormpy.build_model(program, properties) |
|||
print(model.model_type) |
|||
# Internally, POMDPs are just MDPs with additional observation information. |
|||
# Thus, data structure exploration for MDPs can be applied as before. |
|||
initial_state = model.initial_states[0] |
|||
|
|||
for state in model.states: |
|||
if state.id in model.initial_states: |
|||
print(state) |
|||
for action in state.actions: |
|||
for transition in action.transitions: |
|||
print("From state {} by action {}, with probability {}, go to state {}".format(state, action, transition.value(), |
|||
transition.column)) |
|||
|
|||
print(model.nr_observations) |
|||
|
|||
for state in model.states: |
|||
print("State {} has observation id {}".format(state.id, model.observations[state.id])) |
|||
|
|||
|
|||
|
|||
if __name__ == '__main__': |
|||
example_exploration_02() |
@ -0,0 +1,126 @@ |
|||
|
|||
|
|||
// maze example (POMDP) |
|||
// slightly extends that presented in |
|||
// Littman, Cassandra and Kaelbling |
|||
// Learning policies for partially observable environments: Scaling up |
|||
// Technical Report CS, Brown University |
|||
// gxn 29/01/16 |
|||
// Made into a MDP for documentation of stormpy. |
|||
|
|||
// state space (value of variable "s") |
|||
|
|||
// 0 1 2 3 4 |
|||
// 5 6 7 |
|||
// 8 9 10 |
|||
// 11 13 12 |
|||
|
|||
// 13 is the target |
|||
|
|||
mdp |
|||
|
|||
|
|||
module maze |
|||
|
|||
s : [-1..13]; |
|||
|
|||
// initialisation |
|||
[] s=-1 -> 1/13 : (s'=0) |
|||
+ 1/13 : (s'=1) |
|||
+ 1/13 : (s'=2) |
|||
+ 1/13 : (s'=3) |
|||
+ 1/13 : (s'=4) |
|||
+ 1/13 : (s'=5) |
|||
+ 1/13 : (s'=6) |
|||
+ 1/13 : (s'=7) |
|||
+ 1/13 : (s'=8) |
|||
+ 1/13 : (s'=9) |
|||
+ 1/13 : (s'=10) |
|||
+ 1/13 : (s'=11) |
|||
+ 1/13 : (s'=12); |
|||
|
|||
// moving around the maze |
|||
|
|||
[east] s=0 -> (s'=1); |
|||
[west] s=0 -> (s'=0); |
|||
[north] s=0 -> (s'=0); |
|||
[south] s=0 -> (s'=5); |
|||
|
|||
[east] s=1 -> (s'=2); |
|||
[west] s=1 -> (s'=0); |
|||
[north] s=1 -> (s'=1); |
|||
[south] s=1 -> (s'=1); |
|||
|
|||
[east] s=2 -> (s'=3); |
|||
[west] s=2 -> (s'=1); |
|||
[north] s=2 -> (s'=2); |
|||
[south] s=2 -> (s'=6); |
|||
|
|||
[east] s=3 -> (s'=4); |
|||
[west] s=3 -> (s'=2); |
|||
[north] s=3 -> (s'=3); |
|||
[south] s=3 -> (s'=3); |
|||
|
|||
[east] s=4 -> (s'=4); |
|||
[west] s=4 -> (s'=3); |
|||
[north] s=4 -> (s'=4); |
|||
[south] s=4 -> (s'=7); |
|||
|
|||
[east] s=5 -> (s'=5); |
|||
[west] s=5 -> (s'=5); |
|||
[north] s=5 -> (s'=0); |
|||
[south] s=5 -> (s'=8); |
|||
|
|||
[east] s=6 -> (s'=6); |
|||
[west] s=6 -> (s'=6); |
|||
[north] s=6 -> (s'=2); |
|||
[south] s=6 -> (s'=9); |
|||
|
|||
[east] s=7 -> (s'=7); |
|||
[west] s=7 -> (s'=7); |
|||
[north] s=7 -> (s'=4); |
|||
[south] s=7 -> (s'=10); |
|||
|
|||
[east] s=8 -> (s'=8); |
|||
[west] s=8 -> (s'=8); |
|||
[north] s=8 -> (s'=5); |
|||
[south] s=8 -> (s'=11); |
|||
|
|||
[east] s=9 -> (s'=9); |
|||
[west] s=9 -> (s'=9); |
|||
[north] s=9 -> (s'=6); |
|||
[south] s=9 -> (s'=13); |
|||
|
|||
[east] s=10 -> (s'=9); |
|||
[west] s=10 -> (s'=9); |
|||
[north] s=10 -> (s'=7); |
|||
[south] s=10 -> (s'=12); |
|||
[east] s=11 -> (s'=11); |
|||
[west] s=11 -> (s'=11); |
|||
[north] s=11 -> (s'=8); |
|||
[south] s=11 -> (s'=11); |
|||
|
|||
[east] s=12 -> (s'=12); |
|||
[west] s=12 -> (s'=12); |
|||
[north] s=12 -> (s'=10); |
|||
[south] s=12 -> (s'=12); |
|||
|
|||
// loop when we reach the target |
|||
[done] s=13 -> true; |
|||
|
|||
endmodule |
|||
|
|||
// reward structure (number of steps to reach the target) |
|||
rewards |
|||
|
|||
[east] true : 1; |
|||
[west] true : 1; |
|||
[north] true : 1; |
|||
[south] true : 1; |
|||
|
|||
endrewards |
|||
|
|||
// target observation |
|||
label "goal" = s=13; |
|||
|
|||
|
Write
Preview
Loading…
Cancel
Save
Reference in new issue