Browse Source

fixing some bisimulation issues

tempestpy_adaptions
dehnert 7 years ago
parent
commit
2a209d18e1
  1. 1
      src/storm/storage/Distribution.h
  2. 53
      src/storm/storage/DistributionWithReward.cpp
  3. 53
      src/storm/storage/DistributionWithReward.h
  4. 22
      src/storm/storage/bisimulation/BisimulationDecomposition.cpp
  5. 11
      src/storm/storage/bisimulation/BisimulationDecomposition.h
  6. 18
      src/storm/storage/bisimulation/DeterministicModelBisimulationDecomposition.cpp
  7. 44
      src/storm/storage/bisimulation/NondeterministicModelBisimulationDecomposition.cpp
  8. 6
      src/storm/storage/bisimulation/NondeterministicModelBisimulationDecomposition.h
  9. 6
      src/storm/storage/dd/bisimulation/InternalCuddSignatureRefiner.cpp

1
src/storm/storage/Distribution.h

@ -132,7 +132,6 @@ namespace storm {
bool less(Distribution<ValueType, StateType> const& other, storm::utility::ConstantsComparator<ValueType> const& comparator) const;
/*!
* Returns the probability of the given state
* @param state The state for which the probability is returned.

53
src/storm/storage/DistributionWithReward.cpp

@ -0,0 +1,53 @@
#include "storm/storage/DistributionWithReward.h"
#include "storm/adapters/RationalFunctionAdapter.h"
#include "storm/utility/ConstantsComparator.h"
namespace storm {
namespace storage {
template<typename ValueType, typename StateType>
DistributionWithReward<ValueType, StateType>::DistributionWithReward(ValueType const& reward) : Distribution<ValueType, StateType>(), reward(reward) {
// Intentionally left empty.
}
template<typename ValueType, typename StateType>
bool DistributionWithReward<ValueType, StateType>::equals(DistributionWithReward<ValueType, StateType> const& other, storm::utility::ConstantsComparator<ValueType> const& comparator) const {
if (this->reward != other.reward) {
return false;
}
return Distribution<ValueType, StateType>::equals(other, comparator);
}
template<typename ValueType, typename StateType>
bool DistributionWithReward<ValueType, StateType>::less(DistributionWithReward<ValueType, StateType> const& other, storm::utility::ConstantsComparator<ValueType> const& comparator) const {
if (comparator.isLess(this->reward, other.reward)) {
return true;
} else if (comparator.isLess(other.reward, this->reward)) {
return false;
} else {
return Distribution<ValueType, StateType>::less(other, comparator);
}
}
template<typename ValueType, typename StateType>
void DistributionWithReward<ValueType, StateType>::setReward(ValueType const& reward) {
this->reward = reward;
}
template<typename ValueType, typename StateType>
ValueType const& DistributionWithReward<ValueType, StateType>::getReward() const {
return reward;
}
template class DistributionWithReward<double>;
#ifdef STORM_HAVE_CARL
template class DistributionWithReward<storm::RationalNumber>;
template class DistributionWithReward<storm::RationalFunction>;
#endif
}
}

53
src/storm/storage/DistributionWithReward.h

@ -0,0 +1,53 @@
#pragma once
#include "storm/storage/Distribution.h"
#include "storm/utility/constants.h"
namespace storm {
namespace utility {
template <typename ValueType>
class ConstantsComparator;
}
namespace storage {
template<typename ValueType, typename StateType = uint32_t>
class DistributionWithReward : public Distribution<ValueType, StateType> {
public:
/*!
* Creates an empty distribution.
*/
DistributionWithReward(ValueType const& reward = storm::utility::zero<ValueType>());
DistributionWithReward(DistributionWithReward const& other) = default;
DistributionWithReward& operator=(DistributionWithReward const& other) = default;
DistributionWithReward(DistributionWithReward&& other) = default;
DistributionWithReward& operator=(DistributionWithReward&& other) = default;
/*!
* Checks whether the two distributions specify the same probabilities to go to the same states.
*
* @param other The distribution with which the current distribution is to be compared.
* @return True iff the two distributions are equal.
*/
bool equals(DistributionWithReward<ValueType, StateType> const& other, storm::utility::ConstantsComparator<ValueType> const& comparator = storm::utility::ConstantsComparator<ValueType>()) const;
bool less(DistributionWithReward<ValueType, StateType> const& other, storm::utility::ConstantsComparator<ValueType> const& comparator) const;
/*!
* Sets the reward of this distribution.
*/
void setReward(ValueType const& reward);
/*!
* Retrieves the reward of this distribution.
*/
ValueType const& getReward() const;
private:
ValueType reward;
};
}
}

22
src/storm/storage/bisimulation/BisimulationDecomposition.cpp

@ -168,7 +168,7 @@ namespace storm {
template<typename ModelType, typename BlockDataType>
BisimulationDecomposition<ModelType, BlockDataType>::BisimulationDecomposition(ModelType const& model, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, Options const& options) : model(model), backwardTransitions(backwardTransitions), options(options), partition(), comparator(), quotient(nullptr) {
STORM_LOG_THROW(!options.getKeepRewards() || !model.hasRewardModel() || model.hasUniqueRewardModel(), storm::exceptions::IllegalFunctionCallException, "Bisimulation currently only supports models with at most one reward model.");
STORM_LOG_THROW(!options.getKeepRewards() || !model.hasRewardModel() || model.getUniqueRewardModel().hasOnlyStateRewards(), storm::exceptions::IllegalFunctionCallException, "Bisimulation is currently supported for models with state rewards only. Consider converting the transition rewards to state rewards (via suitable function calls).");
STORM_LOG_THROW(!options.getKeepRewards() || !model.hasRewardModel() || !model.getUniqueRewardModel().hasTransitionRewards(), storm::exceptions::IllegalFunctionCallException, "Bisimulation is currently supported for models with state or action rewards rewards only. Consider converting the transition rewards to state rewards (via suitable function calls).");
STORM_LOG_THROW(options.getType() != BisimulationType::Weak || !options.getBounded(), storm::exceptions::IllegalFunctionCallException, "Weak bisimulation cannot preserve bounded properties.");
// Fix the respected atomic propositions if they were not explicitly given.
@ -259,9 +259,19 @@ namespace storm {
}
template<typename ModelType, typename BlockDataType>
void BisimulationDecomposition<ModelType, BlockDataType>::splitInitialPartitionBasedOnStateRewards() {
std::vector<ValueType> const& stateRewardVector = model.getUniqueRewardModel().getStateRewardVector();
partition.split([&stateRewardVector] (storm::storage::sparse::state_type const& a, storm::storage::sparse::state_type const& b) { return stateRewardVector[a] < stateRewardVector[b]; });
void BisimulationDecomposition<ModelType, BlockDataType>::splitInitialPartitionBasedOnRewards() {
auto const& rewardModel = model.getUniqueRewardModel();
if (rewardModel.hasStateRewards()) {
this->splitInitialPartitionBasedOnRewards(rewardModel.getStateRewardVector());
}
if (rewardModel.hasStateActionRewards() && (model.isOfType(storm::models::ModelType::Dtmc) || model.isOfType(storm::models::ModelType::Ctmc))) {
this->splitInitialPartitionBasedOnRewards(rewardModel.getStateActionRewardVector());
}
}
template<typename ModelType, typename BlockDataType>
void BisimulationDecomposition<ModelType, BlockDataType>::splitInitialPartitionBasedOnRewards(std::vector<ValueType> const& rewardVector) {
partition.split([&rewardVector] (storm::storage::sparse::state_type const& a, storm::storage::sparse::state_type const& b) { return rewardVector[a] < rewardVector[b]; });
}
template<typename ModelType, typename BlockDataType>
@ -278,7 +288,7 @@ namespace storm {
// If the model has state rewards, we need to consider them, because otherwise reward properties are not
// preserved.
if (options.getKeepRewards() && model.hasRewardModel()) {
this->splitInitialPartitionBasedOnStateRewards();
this->splitInitialPartitionBasedOnRewards();
}
}
@ -296,7 +306,7 @@ namespace storm {
// If the model has state rewards, we need to consider them, because otherwise reward properties are not
// preserved.
if (options.getKeepRewards() && model.hasRewardModel()) {
this->splitInitialPartitionBasedOnStateRewards();
this->splitInitialPartitionBasedOnRewards();
}
}

11
src/storm/storage/bisimulation/BisimulationDecomposition.h

@ -250,11 +250,16 @@ namespace storm {
* @return The states with probability 0 and 1.
*/
virtual std::pair<storm::storage::BitVector, storm::storage::BitVector> getStatesWithProbability01() = 0;
/*!
* Splits the initial partition based on the (unique) reward model of the current model.
*/
virtual void splitInitialPartitionBasedOnRewards();
/*!
* Splits the initial partition based on the (unique) state reward vector of the model.
* Splits the initial partition based on the given reward vector.
*/
virtual void splitInitialPartitionBasedOnStateRewards();
virtual void splitInitialPartitionBasedOnRewards(std::vector<ValueType> const& rewardVector);
/*!
* Constructs the blocks of the decomposition object based on the current partition.

18
src/storm/storage/bisimulation/DeterministicModelBisimulationDecomposition.cpp

@ -118,7 +118,6 @@ namespace storm {
this->initializeSilentProbabilities();
}
template<typename ModelType>
void DeterministicModelBisimulationDecomposition<ModelType>::postProcessInitialPartition() {
if (this->options.getType() == BisimulationType::Weak && this->model.getType() == storm::models::ModelType::Dtmc) {
@ -127,14 +126,15 @@ namespace storm {
if (this->options.getKeepRewards() && this->model.hasRewardModel() && this->options.getType() == BisimulationType::Weak) {
// For a weak bisimulation that is to preserve reward properties, we have to flag all blocks of states
// with non-zero reward as reward blocks to they can be refined wrt. strong bisimulation.
// with non-zero reward as reward blocks so they can be refined wrt. strong bisimulation.
// Here, we assume that the initial partition already respects state rewards. Therefore, it suffices to
// Here, we assume that the initial partition already respects state (and action) rewards. Therefore, it suffices to
// check the first state of each block for a non-zero reward.
std::vector<ValueType> const& stateRewardVector = this->model.getUniqueRewardModel().getStateRewardVector();
boost::optional<std::vector<ValueType>> const& optionalStateRewardVector = this->model.getUniqueRewardModel().getOptionalStateRewardVector();
boost::optional<std::vector<ValueType>> const& optionalStateActionRewardVector = this->model.getUniqueRewardModel().getOptionalStateActionRewardVector();
for (auto& block : this->partition.getBlocks()) {
auto state = *this->partition.begin(*block);
block->data().setHasRewards(!storm::utility::isZero(stateRewardVector[state]));
block->data().setHasRewards((optionalStateRewardVector && !storm::utility::isZero(optionalStateRewardVector.get()[state])) || (optionalStateActionRewardVector && !storm::utility::isZero(optionalStateActionRewardVector.get()[state])));
}
}
}
@ -661,7 +661,13 @@ namespace storm {
// If the model has state rewards, we simply copy the state reward of the representative state, because
// all states in a block are guaranteed to have the same state reward.
if (this->options.getKeepRewards() && this->model.hasRewardModel()) {
stateRewards.get()[blockIndex] = this->model.getUniqueRewardModel().getStateRewardVector()[representativeState];
auto const& rewardModel = this->model.getUniqueRewardModel();
if (rewardModel.hasStateRewards()) {
stateRewards.get()[blockIndex] = rewardModel.getStateRewardVector()[representativeState];
}
if (rewardModel.hasStateActionRewards()) {
stateRewards.get()[blockIndex] += rewardModel.getStateActionRewardVector()[representativeState];
}
}
}

44
src/storm/storage/bisimulation/NondeterministicModelBisimulationDecomposition.cpp

@ -63,6 +63,12 @@ namespace storm {
// Otherwise, we compute the probabilities from the transition matrix.
for (auto stateIt = this->partition.begin(*block), stateIte = this->partition.end(*block); stateIt != stateIte; ++stateIt) {
for (uint_fast64_t choice = nondeterministicChoiceIndices[*stateIt]; choice < nondeterministicChoiceIndices[*stateIt + 1]; ++choice) {
if (this->options.getKeepRewards() && this->model.hasRewardModel()) {
auto const& rewardModel = this->model.getUniqueRewardModel();
if (rewardModel.hasStateActionRewards()) {
this->quotientDistributions[choice].setReward(rewardModel.getStateActionReward(choice));
}
}
for (auto entry : this->model.getTransitionMatrix().getRow(choice)) {
if (!this->comparator.isZero(entry.getValue())) {
this->quotientDistributions[choice].addProbability(this->partition.getBlock(entry.getColumn()).getId(), entry.getValue());
@ -107,10 +113,18 @@ namespace storm {
newLabeling.addLabel(ap);
}
// If the model had state rewards, we need to build the state rewards for the quotient as well.
// If the model had state (action) rewards, we need to build the state rewards for the quotient as well.
boost::optional<std::vector<ValueType>> stateRewards;
boost::optional<std::vector<ValueType>> stateActionRewards;
boost::optional<storm::models::sparse::StandardRewardModel<ValueType> const&> rewardModel;
if (this->options.getKeepRewards() && this->model.hasRewardModel()) {
stateRewards = std::vector<ValueType>(this->blocks.size());
rewardModel = this->model.getUniqueRewardModel();
if (rewardModel.get().hasStateRewards()) {
stateRewards = std::vector<ValueType>(this->blocks.size());
}
if (rewardModel.get().hasStateActionRewards()) {
stateActionRewards = std::vector<ValueType>();
}
}
// Now build (a) and (b) by traversing all blocks.
@ -137,6 +151,11 @@ namespace storm {
representativeState = oldBlock.data().representativeState();
}
// Give the choice a reward of zero as we artificially introduced that the block is absorbing.
if (this->options.getKeepRewards() && rewardModel && rewardModel.get().hasStateActionRewards()) {
stateActionRewards.get().push_back(storm::utility::zero<ValueType>());
}
// Add all of the selected atomic propositions that hold in the representative state to the state
// representing the block.
for (auto const& ap : atomicPropositions) {
@ -155,6 +174,9 @@ namespace storm {
for (auto entry : quotientDistributions[choice]) {
builder.addNextValue(currentRow, entry.first, entry.second);
}
if (this->options.getKeepRewards() && rewardModel && rewardModel.get().hasStateActionRewards()) {
stateActionRewards.get().push_back(quotientDistributions[choice].getReward());
}
++currentRow;
}
@ -169,8 +191,8 @@ namespace storm {
// If the model has state rewards, we simply copy the state reward of the representative state, because
// all states in a block are guaranteed to have the same state reward.
if (this->options.getKeepRewards() && this->model.hasRewardModel()) {
stateRewards.get()[blockIndex] = this->model.getUniqueRewardModel().getStateRewardVector()[representativeState];
if (this->options.getKeepRewards() && rewardModel && rewardModel.get().hasStateRewards()) {
stateRewards.get()[blockIndex] = rewardModel.get().getStateRewardVector()[representativeState];
}
}
@ -185,7 +207,7 @@ namespace storm {
if (this->options.getKeepRewards() && this->model.hasRewardModel()) {
STORM_LOG_THROW(this->model.hasUniqueRewardModel(), storm::exceptions::IllegalFunctionCallException, "Cannot preserve more than one reward model.");
typename std::unordered_map<std::string, typename ModelType::RewardModelType>::const_iterator nameRewardModelPair = this->model.getRewardModels().begin();
rewardModels.insert(std::make_pair(nameRewardModelPair->first, typename ModelType::RewardModelType(stateRewards)));
rewardModels.insert(std::make_pair(nameRewardModelPair->first, typename ModelType::RewardModelType(stateRewards, stateActionRewards)));
}
// Finally construct the quotient model.
@ -217,7 +239,7 @@ namespace storm {
continue;
}
// If the predecessor block is not marked as to-refined, we do so now.
// If the predecessor block is not marked as to-be-refined, we do so now.
if (!predecessorBlock.data().splitter()) {
predecessorBlock.data().setSplitter();
splitterQueue.push_back(&predecessorBlock);
@ -250,7 +272,13 @@ namespace storm {
std::vector<uint_fast64_t> nondeterministicChoiceIndices = this->model.getTransitionMatrix().getRowGroupIndices();
for (decltype(this->model.getNumberOfStates()) state = 0; state < this->model.getNumberOfStates(); ++state) {
for (auto choice = nondeterministicChoiceIndices[state]; choice < nondeterministicChoiceIndices[state + 1]; ++choice) {
storm::storage::Distribution<ValueType> distribution;
storm::storage::DistributionWithReward<ValueType> distribution;
if (this->options.getKeepRewards() && this->model.hasRewardModel()) {
auto const& rewardModel = this->model.getUniqueRewardModel();
if (rewardModel.hasStateActionRewards()) {
distribution.setReward(rewardModel.getStateActionReward(choice));
}
}
for (auto const& element : this->model.getTransitionMatrix().getRow(choice)) {
distribution.addProbability(this->partition.getBlock(element.getColumn()).getId(), element.getValue());
}
@ -348,7 +376,7 @@ namespace storm {
for (auto el : newBlocks) {
this->updateQuotientDistributionsOfPredecessors(*el, block, splitterQueue);
}
return split;
}

6
src/storm/storage/bisimulation/NondeterministicModelBisimulationDecomposition.h

@ -4,7 +4,7 @@
#include "storm/storage/bisimulation/BisimulationDecomposition.h"
#include "storm/storage/bisimulation/DeterministicBlockData.h"
#include "storm/storage/Distribution.h"
#include "storm/storage/DistributionWithReward.h"
namespace storm {
namespace utility {
@ -73,10 +73,10 @@ namespace storm {
std::vector<storm::storage::sparse::state_type> choiceToStateMapping;
// A vector that holds the quotient distributions for all nondeterministic choices of all states.
std::vector<storm::storage::Distribution<ValueType>> quotientDistributions;
std::vector<storm::storage::DistributionWithReward<ValueType>> quotientDistributions;
// A vector that stores for each state the ordered list of quotient distributions.
std::vector<storm::storage::Distribution<ValueType> const*> orderedQuotientDistributions;
std::vector<storm::storage::DistributionWithReward<ValueType> const*> orderedQuotientDistributions;
};
}
}

6
src/storm/storage/dd/bisimulation/InternalCuddSignatureRefiner.cpp

@ -117,10 +117,11 @@ namespace storm {
DdNode* partitionThen;
DdNode* partitionElse;
short offset;
bool isNondeterminismVariable = false;
bool isNondeterminismVariable;
while (skipped && !Cudd_IsConstant(nonBlockVariablesNode)) {
// Remember an offset that indicates whether the top variable is a nondeterminism variable or not.
offset = options.shiftStateVariables ? 1 : 0;
isNondeterminismVariable = false;
if (!Cudd_IsConstant(nondeterminismVariablesNode) && Cudd_NodeReadIndex(nondeterminismVariablesNode) == Cudd_NodeReadIndex(nonBlockVariablesNode)) {
offset = 0;
isNondeterminismVariable = true;
@ -260,10 +261,11 @@ namespace storm {
DdNode* signatureThen;
DdNode* signatureElse;
short offset;
bool isNondeterminismVariable = false;
bool isNondeterminismVariable;
while (skippedBoth && !Cudd_IsConstant(nonBlockVariablesNode)) {
// Remember an offset that indicates whether the top variable is a nondeterminism variable or not.
offset = options.shiftStateVariables ? 1 : 0;
isNondeterminismVariable = false;
if (!Cudd_IsConstant(nondeterminismVariablesNode) && Cudd_NodeReadIndex(nondeterminismVariablesNode) == Cudd_NodeReadIndex(nonBlockVariablesNode)) {
offset = 0;
isNondeterminismVariable = true;

Loading…
Cancel
Save