From db6a247fcc4b78b65e21fceab748f0c88a5a63d1 Mon Sep 17 00:00:00 2001 From: hannah Date: Fri, 26 Jun 2020 21:04:29 +0200 Subject: [PATCH] completed tests for ma, mdp, pomdp --- tests/storage/test_model_components.py | 334 ++++++++++++++++++++++++- 1 file changed, 324 insertions(+), 10 deletions(-) diff --git a/tests/storage/test_model_components.py b/tests/storage/test_model_components.py index 3359a3f..2a362d3 100644 --- a/tests/storage/test_model_components.py +++ b/tests/storage/test_model_components.py @@ -16,10 +16,7 @@ class TestSparseModelComponents: assert components.player1_matrix is None assert not components.rate_transitions - # todo mdp - # todo pomdp? - - def test_build_dtmc_from_model_components(self): + def test_build_dtmc(self): nr_states = 13 nr_choices = 13 @@ -148,7 +145,7 @@ class TestSparseModelComponents: assert dtmc.choice_origins.get_number_of_identifiers() == 9 @numpy_avail - def test_build_ctmc_from_model_components(self): + def test_build_ctmc(self): import numpy as np nr_states = 12 @@ -266,14 +263,12 @@ class TestSparseModelComponents: assert not ctmc.reward_models["served"].has_state_rewards assert ctmc.reward_models["served"].has_state_action_rewards assert ctmc.reward_models["served"].state_action_rewards == [0.0, 0.0, 0.0, 0.0, 0.0, 0.6666666666666666, 0.0, - 0.0, 1.0, 0.0, - 0.0, 0.0] + 0.0, 1.0, 0.0, 0.0, 0.0] assert not ctmc.reward_models["served"].has_transition_rewards assert ctmc.reward_models["waiting"].has_state_rewards assert not ctmc.reward_models["waiting"].has_state_action_rewards - assert ctmc.reward_models["waiting"].state_rewards == [0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, - 1.0] + assert ctmc.reward_models["waiting"].state_rewards == [0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0] assert not ctmc.reward_models["waiting"].has_transition_rewards # Test choice_labeling @@ -303,7 +298,7 @@ class TestSparseModelComponents: # Test exit_rates assert ctmc.exit_rates == [201.0, 200.5, 200.5, 201.0, 200.0, 1.5, 200.5, 200.5, 1.0, 200.0, 1.5, 1.0] - def test_build_ma_from_model_components(self): + def test_build_ma(self): nr_states = 5 nr_choices = 10 @@ -454,3 +449,322 @@ class TestSparseModelComponents: # Test markovian states assert ma.markovian_states == stormpy.BitVector(5, [0, 1, 2, 3, 4]) + + def test_build_mdp(self): + nr_states = 13 + nr_choices = 14 + + # Build transition matrix + builder = stormpy.SparseMatrixBuilder(rows=0, columns=0, entries=0, force_dimensions=False, + has_custom_row_grouping=True, row_groups=0) + + # Row group, state 0 + builder.new_row_group(0) + builder.add_next_value(0, 1, 0.5) + builder.add_next_value(0, 2, 0.5) + builder.add_next_value(1, 1, 0.2) + builder.add_next_value(1, 2, 0.8) + # Row group, state 1 + builder.new_row_group(2) + builder.add_next_value(2, 3, 0.5) + builder.add_next_value(2, 4, 0.5) + # Row group, state 2 + builder.new_row_group(3) + builder.add_next_value(3, 5, 0.5) + builder.add_next_value(3, 6, 0.5) + # Row group, state 3 + builder.new_row_group(4) + builder.add_next_value(4, 7, 0.5) + builder.add_next_value(4, 1, 0.5) + # Row group, state 4 + builder.new_row_group(5) + builder.add_next_value(5, 8, 0.5) + builder.add_next_value(5, 9, 0.5) + # Row group, state 5 + builder.new_row_group(6) + builder.add_next_value(6, 10, 0.5) + builder.add_next_value(6, 11, 0.5) + # Row group, state 6 + builder.new_row_group(7) + builder.add_next_value(7, 2, 0.5) + builder.add_next_value(7, 12, 0.5) + # final states + for s in range(8, 14): + builder.new_row_group(s) + builder.add_next_value(s, s - 1, 1) + + transition_matrix = builder.build(nr_choices, nr_states) + + # state_labeling + state_labeling = stormpy.storage.StateLabeling(nr_states) + labels = {'init', 'one', 'two', 'three', 'four', 'five', 'six', 'done', 'deadlock'} + for label in labels: + state_labeling.add_label(label) + state_labeling.add_label_to_state('init', 0) + state_labeling.add_label_to_state('one', 7) + state_labeling.add_label_to_state('two', 8) + state_labeling.add_label_to_state('three', 9) + state_labeling.add_label_to_state('four', 10) + state_labeling.add_label_to_state('five', 11) + state_labeling.add_label_to_state('six', 12) + + state_labeling.set_states('done', stormpy.BitVector(nr_states, [7, 8, 9, 10, 11, 12])) + + # reward_models + reward_models = {} + # Vector representing the state-action rewards + action_reward = [0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + reward_models['coin_flips'] = stormpy.SparseRewardModel(optional_state_action_reward_vector=action_reward) + + # choice_labeling + choice_labeling = stormpy.storage.ChoiceLabeling(nr_choices) + choice_labels = {'a', 'b'} + for label in choice_labels: + choice_labeling.add_label(label) + choice_labeling.add_label_to_choice('a', 0) + choice_labeling.add_label_to_choice('b', 1) + + # state_valuations + manager = stormpy.ExpressionManager() + var_s = manager.create_integer_variable(name='s') + var_d = manager.create_integer_variable(name='d') + v_builder = stormpy.StateValuationsBuilder() + v_builder.add_variable(var_s) + v_builder.add_variable(var_d) + for s in range(7): + # values: vector [value for s, value for d] + v_builder.add_state(state=s, boolean_values=[], integer_values=[s, 0], rational_values=[]) + for s in range(7, 13): + v_builder.add_state(state=s, boolean_values=[], integer_values=[7, s - 6], rational_values=[]) + state_valuations = v_builder.build(13) + + # choice_origins + prism_program = stormpy.parse_prism_program(get_example_path("mdp", "die_c1.nm")) + index_to_identifier_mapping = [1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 9, 9, 9] + + id_to_command_set_mapping = [stormpy.FlatSet() for _ in range(10)] + for i in range(1, 9): + # 0: no origin + id_to_command_set_mapping[i].insert(i - 1) + id_to_command_set_mapping[9].insert(8) + choice_origins = stormpy.PrismChoiceOrigins(prism_program, index_to_identifier_mapping, + id_to_command_set_mapping) + + # Construct Components + components = stormpy.SparseModelComponents(transition_matrix=transition_matrix, state_labeling=state_labeling, + reward_models=reward_models, rate_transitions=False) + components.state_valuations = state_valuations + components.choice_labeling = choice_labeling + components.choice_origins = choice_origins + + mdp = stormpy.storage.SparseMdp(components) + + assert type(mdp) is stormpy.SparseMdp + assert not mdp.supports_parameters + + # Test transition matrix + assert mdp.nr_choices == nr_choices + assert mdp.nr_states == nr_states + assert mdp.nr_transitions == 22 + assert mdp.transition_matrix.nr_entries == mdp.nr_transitions + for e in mdp.transition_matrix: + assert e.value() == 0.5 or e.value() == 0 or e.value() == 0.2 or e.value() == 0.8 or ( + e.value() == 1 and e.column > 6) + for state in mdp.states: + assert len(state.actions) <= 2 + + # Test state_labeling + assert mdp.labeling.get_labels() == {'init', 'deadlock', 'done', 'one', 'two', 'three', 'four', 'five', 'six'} + + # Test reward_models + assert len(mdp.reward_models) == 1 + assert not mdp.reward_models["coin_flips"].has_state_rewards + assert mdp.reward_models["coin_flips"].has_state_action_rewards + for reward in mdp.reward_models["coin_flips"].state_action_rewards: + assert reward == 1.0 or reward == 0.0 + assert not mdp.reward_models["coin_flips"].has_transition_rewards + + # Test choice_labeling + assert mdp.has_choice_labeling() + assert mdp.choice_labeling.get_labels() == {'a', 'b'} + + # Test state_valuations + assert mdp.has_state_valuations() + assert mdp.state_valuations + value_s = [None] * nr_states + value_d = [None] * nr_states + for s in range(0, mdp.nr_states): + value_s[s] = mdp.state_valuations.get_integer_value(s, var_s) + value_d[s] = mdp.state_valuations.get_integer_value(s, var_d) + assert value_s == [0, 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7] + assert value_d == [0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6] + + # Test choice_origins + assert mdp.has_choice_origins() + assert mdp.choice_origins is components.choice_origins + assert mdp.choice_origins.get_number_of_identifiers() == 10 + + @numpy_avail + def test_build_pomdp(self): + import numpy as np + nr_states = 10 + nr_choices = 34 + + # Build transition matrix + builder = stormpy.SparseMatrixBuilder(rows=0, columns=0, entries=0, force_dimensions=False, + has_custom_row_grouping=True, row_groups=0) + + transitions = np.array([ + [0, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0], + + [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + + [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], + + [0, 0, 0, 0, 0, 0, 0, 0, 0, 1], + [0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], + + [0, 0, 0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], + + [0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + + [0, 0, 0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 1], + + [0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 0, 0], + + [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]) + + transition_matrix = stormpy.build_sparse_matrix(transitions, + row_group_indices=[0, 1, 5, 9, 13, 17, 21, 25, 29, 33]) + + # state_labeling + state_labeling = stormpy.storage.StateLabeling(nr_states) + labels = {'deadlock', 'goal', 'init'} + for label in labels: + state_labeling.add_label(label) + state_labeling.add_label_to_state('init', 0) + state_labeling.add_label_to_state('goal', 9) + + # reward_models + reward_models = {} + # Vector representing state-action rewards + action_reward = [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0] + reward_models[''] = stormpy.SparseRewardModel(optional_state_action_reward_vector=action_reward) + + # choice_labeling + choice_labeling = stormpy.storage.ChoiceLabeling(nr_choices) + choice_labels = {'south', 'north', 'west', 'east', 'done'} + for label in choice_labels: + choice_labeling.add_label(label) + choice_labeling.set_choices('south', stormpy.BitVector(nr_choices, [4, 8, 12, 16, 20, 24, 28, 32])) + choice_labeling.set_choices('north', stormpy.BitVector(nr_choices, [3, 7, 11, 15, 19, 23, 27, 31])) + choice_labeling.set_choices('west', stormpy.BitVector(nr_choices, [2, 6, 10, 14, 18, 22, 26, 30])) + choice_labeling.set_choices('east', stormpy.BitVector(nr_choices, [1, 5, 9, 13, 17, 21, 25, 29])) + choice_labeling.set_choices('done', stormpy.BitVector(nr_choices, [33])) + + # StateValuations + manager = stormpy.ExpressionManager() + var_x = manager.create_integer_variable(name='x') + var_y = manager.create_integer_variable(name='y') + var_o = manager.create_integer_variable(name='o') + v_builder = stormpy.StateValuationsBuilder() + + v_builder.add_variable(var_x) + v_builder.add_variable(var_y) + v_builder.add_variable(var_o) + + v_builder.add_state(state=0, boolean_values=[], integer_values=[0, 0, 0], rational_values=[]) + v_builder.add_state(state=1, boolean_values=[], integer_values=[0, 0, 1], rational_values=[]) + v_builder.add_state(state=2, boolean_values=[], integer_values=[0, 1, 1], rational_values=[]) + v_builder.add_state(state=3, boolean_values=[], integer_values=[0, 2, 1], rational_values=[]) + v_builder.add_state(state=4, boolean_values=[], integer_values=[1, 0, 1], rational_values=[]) + v_builder.add_state(state=5, boolean_values=[], integer_values=[1, 1, 1], rational_values=[]) + v_builder.add_state(state=6, boolean_values=[], integer_values=[1, 2, 1], rational_values=[]) + v_builder.add_state(state=7, boolean_values=[], integer_values=[2, 1, 1], rational_values=[]) + v_builder.add_state(state=8, boolean_values=[], integer_values=[2, 2, 1], rational_values=[]) + v_builder.add_state(state=9, boolean_values=[], integer_values=[2, 0, 2], rational_values=[]) + + state_valuations = v_builder.build(nr_states) + + observations = [1, 0, 0, 0, 0, 0, 0, 0, 0, 2] + + # Build components, set rate_transitions to False + components = stormpy.SparseModelComponents(transition_matrix=transition_matrix, state_labeling=state_labeling, + reward_models=reward_models, rate_transitions=False) + components.state_valuations = state_valuations + components.choice_labeling = choice_labeling + # components.choice_origins=choice_origins + components.observability_classes = observations + + pomdp = stormpy.storage.SparsePomdp(components) + assert type(pomdp) is stormpy.SparsePomdp + assert not pomdp.supports_parameters + + # Test transition matrix + assert pomdp.nr_choices == nr_choices + assert pomdp.nr_states == nr_states + assert pomdp.nr_transitions == 41 + for e in pomdp.transition_matrix: + assert e.value() == 1 or e.value() == 0 or e.value() == 0.125 + for state in pomdp.states: + assert len(state.actions) <= 4 + + # Test state_labeling + assert pomdp.labeling.get_labels() == {'init', 'goal', 'deadlock'} + + # Test reward_models + assert len(pomdp.reward_models) == 1 + assert not pomdp.reward_models[''].has_state_rewards + assert pomdp.reward_models[''].has_state_action_rewards + for reward in pomdp.reward_models[''].state_action_rewards: + assert reward == 1.0 or reward == 0.0 + assert not pomdp.reward_models[''].has_transition_rewards + + # Test choice_labeling + assert pomdp.has_choice_labeling() + assert pomdp.choice_labeling.get_labels() == {'east', 'west', 'north', 'south', 'done'} + + # Test state_valuations + assert pomdp.has_state_valuations() + assert pomdp.state_valuations + value_x = [None] * nr_states + value_y = [None] * nr_states + value_o = [None] * nr_states + for s in range(0, pomdp.nr_states): + value_x[s] = pomdp.state_valuations.get_integer_value(s, var_x) + value_y[s] = pomdp.state_valuations.get_integer_value(s, var_y) + value_o[s] = pomdp.state_valuations.get_integer_value(s, var_o) + assert value_x == [0, 0, 0, 0, 1, 1, 1, 2, 2, 2] + assert value_y == [0, 0, 1, 2, 0, 1, 2, 1, 2, 0] + assert value_o == [0, 1, 1, 1, 1, 1, 1, 1, 1, 2] + + # Test choice_origins + assert not pomdp.has_choice_origins() + + assert pomdp.observations == [1, 0, 0, 0, 0, 0, 0, 0, 0, 2]