Browse Source

Merge branch 'dft-approximation' of https://sselab.de/lab9/private/git/storm into dft-approximation

Former-commit-id: 595c5e7891
main
sjunges 9 years ago
parent
commit
f1e69e42cd
  1. 82
      src/builder/DftExplorationHeuristic.cpp
  2. 177
      src/builder/DftExplorationHeuristic.h
  3. 2
      src/builder/DftSmtBuilder.cpp
  4. 1
      src/builder/ExplicitDFTModelBuilder.cpp
  5. 328
      src/builder/ExplicitDFTModelBuilderApprox.cpp
  6. 57
      src/builder/ExplicitDFTModelBuilderApprox.h
  7. 4
      src/generator/DftNextStateGenerator.cpp
  8. 58
      src/modelchecker/dft/DFTModelChecker.cpp
  9. 7
      src/modelchecker/dft/DFTModelChecker.h
  10. 6
      src/settings/modules/DFTSettings.cpp
  11. 203
      src/storage/BucketPriorityQueue.cpp
  12. 73
      src/storage/BucketPriorityQueue.h
  13. 16
      src/storage/bisimulation/BisimulationDecomposition.cpp
  14. 8
      src/storage/bisimulation/BisimulationDecomposition.h
  15. 28
      src/storage/bisimulation/DeterministicModelBisimulationDecomposition.cpp
  16. 8
      src/storage/bisimulation/DeterministicModelBisimulationDecomposition.h
  17. 14
      src/storage/bisimulation/NondeterministicModelBisimulationDecomposition.cpp
  18. 6
      src/storage/bisimulation/NondeterministicModelBisimulationDecomposition.h
  19. 59
      src/storage/dft/DFTState.cpp
  20. 53
      src/storage/dft/DFTState.h
  21. 2
      src/storm-dyftee.cpp
  22. 2
      src/utility/bitoperations.h

82
src/builder/DftExplorationHeuristic.cpp

@ -3,85 +3,73 @@
#include "src/utility/macros.h"
#include "src/utility/constants.h"
#include "src/exceptions/NotImplementedException.h"
#include "src/storage/dft/DFTState.h"
#include <limits>
namespace storm {
namespace builder {
template<typename ValueType>
DFTExplorationHeuristic<ValueType>::DFTExplorationHeuristic() : skip(true), depth(std::numeric_limits<std::size_t>::max()), rate(storm::utility::zero<ValueType>()), exitRate(storm::utility::zero<ValueType>()) {
DFTExplorationHeuristic<ValueType>::DFTExplorationHeuristic(size_t id) : id(id), expand(true), lowerBound(storm::utility::zero<ValueType>()), upperBound(storm::utility::infinity<ValueType>()), depth(0), probability(storm::utility::one<ValueType>()) {
// Intentionally left empty
}
template<typename ValueType>
bool DFTExplorationHeuristic<ValueType>::isSkip(double approximationThreshold, ApproximationHeuristic heuristic) const {
if (!skip) {
return false;
}
switch (heuristic) {
case ApproximationHeuristic::NONE:
return false;
case ApproximationHeuristic::DEPTH:
return depth > approximationThreshold;
case ApproximationHeuristic::RATERATIO:
// TODO Matthias: implement
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Heuristic rate ration does not work.");
}
DFTExplorationHeuristic<ValueType>::DFTExplorationHeuristic(size_t id, DFTExplorationHeuristic const& predecessor, ValueType rate, ValueType exitRate) : id(id), expand(false), lowerBound(storm::utility::zero<ValueType>()), upperBound(storm::utility::zero<ValueType>()), depth(predecessor.depth + 1) {
STORM_LOG_ASSERT(storm::utility::zero<ValueType>() < exitRate, "Exit rate is 0");
probability = predecessor.probability * rate/exitRate;
}
template<typename ValueType>
void DFTExplorationHeuristic<ValueType>::setNotSkip() {
skip = false;
void DFTExplorationHeuristic<ValueType>::setBounds(ValueType lowerBound, ValueType upperBound) {
this->lowerBound = lowerBound;
this->upperBound = upperBound;
}
template<typename ValueType>
size_t DFTExplorationHeuristic<ValueType>::getDepth() const {
return depth;
template<>
bool DFTExplorationHeuristicProbability<double>::updateHeuristicValues(DFTExplorationHeuristic<double> const& predecessor, double rate, double exitRate) {
STORM_LOG_ASSERT(exitRate > 0, "Exit rate is 0");
probability += predecessor.getProbability() * rate/exitRate;
return true;
}
template<typename ValueType>
void DFTExplorationHeuristic<ValueType>::setHeuristicValues(size_t depth, ValueType rate, ValueType exitRate) {
this->depth = depth;
this->rate = rate;
this->exitRate = exitRate;
template<>
bool DFTExplorationHeuristicProbability<storm::RationalFunction>::updateHeuristicValues(DFTExplorationHeuristic<storm::RationalFunction> const& predecessor, storm::RationalFunction rate, storm::RationalFunction exitRate) {
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Heuristic rate ration does not work for rational functions.");
return false;
}
template<typename ValueType>
double DFTExplorationHeuristic<ValueType>::getPriority() const {
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Approximation works only for double.");
template<>
double DFTExplorationHeuristicProbability<double>::getPriority() const {
return probability;
}
template<>
double DFTExplorationHeuristic<double>::getPriority() const {
// TODO Matthias: change according to heuristic
//return rate/exitRate;
return depth;
double DFTExplorationHeuristicProbability<storm::RationalFunction>::getPriority() const {
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Heuristic rate ration does not work for rational functions.");
}
template<>
double DFTExplorationHeuristic<storm::RationalFunction>::getPriority() const {
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Approximation works only for double.");
/*std::cout << (rate / exitRate) << " < " << threshold << ": " << (number < threshold) << std::endl;
std::map<storm::Variable, storm::RationalNumber> mapping;
storm::RationalFunction eval(number.evaluate(mapping));
std::cout << "Evaluated: " << eval << std::endl;
return eval < threshold;*/
double DFTExplorationHeuristicBoundDifference<double>::getPriority() const {
return upperBound / lowerBound;
}
template<typename ValueType>
bool compareDepth(std::shared_ptr<storm::storage::DFTState<ValueType>> stateA, std::shared_ptr<storm::storage::DFTState<ValueType>> stateB) {
return stateA->getPriority() > stateB->getPriority();
template<>
double DFTExplorationHeuristicBoundDifference<storm::RationalFunction>::getPriority() const {
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Heuristic bound difference does not work for rational functions.");
}
// Instantiate templates.
template class DFTExplorationHeuristic<double>;
template bool compareDepth(std::shared_ptr<storm::storage::DFTState<double>>, std::shared_ptr<storm::storage::DFTState<double>>);
template class DFTExplorationHeuristicNone<double>;
template class DFTExplorationHeuristicDepth<double>;
template class DFTExplorationHeuristicProbability<double>;
template class DFTExplorationHeuristicBoundDifference<double>;
#ifdef STORM_HAVE_CARL
template class DFTExplorationHeuristic<storm::RationalFunction>;
template bool compareDepth(std::shared_ptr<storm::storage::DFTState<storm::RationalFunction>>, std::shared_ptr<storm::storage::DFTState<storm::RationalFunction>>);
template class DFTExplorationHeuristicNone<storm::RationalFunction>;
template class DFTExplorationHeuristicDepth<storm::RationalFunction>;
template class DFTExplorationHeuristicProbability<storm::RationalFunction>;
template class DFTExplorationHeuristicBoundDifference<storm::RationalFunction>;
#endif
}
}

177
src/builder/DftExplorationHeuristic.h

@ -2,50 +2,189 @@
#define STORM_BUILDER_DFTEXPLORATIONHEURISTIC_H_
#include <memory>
#include <algorithm>
namespace storm {
// Forward declaration
namespace storage {
template<typename ValueType>
class DFTState;
}
namespace builder {
/*!
* Enum representing the heuristic used for deciding which states to expand.
*/
enum class ApproximationHeuristic { NONE, DEPTH, RATERATIO };
enum class ApproximationHeuristic { NONE, DEPTH, PROBABILITY };
/*!
* General super class for appoximation heuristics.
*/
template<typename ValueType>
class DFTExplorationHeuristic {
public:
DFTExplorationHeuristic();
DFTExplorationHeuristic(size_t id);
void setHeuristicValues(size_t depth, ValueType rate, ValueType exitRate);
DFTExplorationHeuristic(size_t id, DFTExplorationHeuristic const& predecessor, ValueType rate, ValueType exitRate);
bool isSkip(double approximationThreshold, ApproximationHeuristic heuristic) const;
void setBounds(ValueType lowerBound, ValueType upperBound);
void setNotSkip();
virtual bool updateHeuristicValues(DFTExplorationHeuristic const& predecessor, ValueType rate, ValueType exitRate) = 0;
size_t getDepth() const;
virtual double getPriority() const = 0;
double getPriority() const;
virtual bool isSkip(double approximationThreshold) const = 0;
private:
void markExpand() {
expand = true;
}
size_t getId() const {
return id;
}
bool isExpand() {
return expand;
}
size_t getDepth() const {
return depth;
}
ValueType getProbability() const {
return probability;
}
ValueType getLowerBound() const {
return lowerBound;
}
bool skip;
ValueType getUpperBound() const {
return upperBound;
}
protected:
size_t id;
bool expand;
ValueType lowerBound;
ValueType upperBound;
size_t depth;
ValueType rate;
ValueType exitRate;
ValueType probability;
};
template<typename ValueType>
class DFTExplorationHeuristicNone : public DFTExplorationHeuristic<ValueType> {
public:
DFTExplorationHeuristicNone(size_t id) : DFTExplorationHeuristic<ValueType>(id) {
// Intentionally left empty
}
DFTExplorationHeuristicNone(size_t id, DFTExplorationHeuristicNone<ValueType> const& predecessor, ValueType rate, ValueType exitRate) : DFTExplorationHeuristic<ValueType>(id, predecessor, rate, exitRate) {
// Intentionally left empty
}
bool updateHeuristicValues(DFTExplorationHeuristic<ValueType> const& predecessor, ValueType rate, ValueType exitRate) override {
return false;
}
double getPriority() const override {
return 0;
}
bool isSkip(double approximationThreshold) const override {
return false;
}
bool operator<(DFTExplorationHeuristicNone<ValueType> const& other) const {
return this->id > other.id;
}
};
template<typename ValueType>
bool compareDepth(std::shared_ptr<storm::storage::DFTState<ValueType>> stateA, std::shared_ptr<storm::storage::DFTState<ValueType>> stateB);
class DFTExplorationHeuristicDepth : public DFTExplorationHeuristic<ValueType> {
public:
DFTExplorationHeuristicDepth(size_t id) : DFTExplorationHeuristic<ValueType>(id) {
// Intentionally left empty
}
DFTExplorationHeuristicDepth(size_t id, DFTExplorationHeuristicDepth<ValueType> const& predecessor, ValueType rate, ValueType exitRate) : DFTExplorationHeuristic<ValueType>(id, predecessor, rate, exitRate) {
// Intentionally left empty
}
bool updateHeuristicValues(DFTExplorationHeuristic<ValueType> const& predecessor, ValueType rate, ValueType exitRate) override {
if (predecessor.getDepth() < this->depth) {
this->depth = predecessor.getDepth();
return true;
}
return false;
}
double getPriority() const override {
return this->depth;
}
bool isSkip(double approximationThreshold) const override {
return !this->expand && this->depth > approximationThreshold;
}
bool operator<(DFTExplorationHeuristicDepth<ValueType> const& other) const {
return this->depth > other.depth;
}
};
template<typename ValueType>
class DFTExplorationHeuristicProbability : public DFTExplorationHeuristic<ValueType> {
public:
DFTExplorationHeuristicProbability(size_t id) : DFTExplorationHeuristic<ValueType>(id) {
// Intentionally left empty
}
DFTExplorationHeuristicProbability(size_t id, DFTExplorationHeuristicProbability<ValueType> const& predecessor, ValueType rate, ValueType exitRate) : DFTExplorationHeuristic<ValueType>(id, predecessor, rate, exitRate) {
// Intentionally left empty
}
bool updateHeuristicValues(DFTExplorationHeuristic<ValueType> const& predecessor, ValueType rate, ValueType exitRate) override;
double getPriority() const override;
bool isSkip(double approximationThreshold) const override {
return !this->expand && this->getPriority() < approximationThreshold;
}
bool operator<(DFTExplorationHeuristicProbability<ValueType> const& other) const {
return this->getPriority() < other.getPriority();
}
};
template<typename ValueType>
class DFTExplorationHeuristicBoundDifference : public DFTExplorationHeuristic<ValueType> {
public:
DFTExplorationHeuristicBoundDifference(size_t id) : DFTExplorationHeuristic<ValueType>(id) {
// Intentionally left empty
}
DFTExplorationHeuristicBoundDifference(size_t id, DFTExplorationHeuristicBoundDifference<ValueType> const& predecessor, ValueType rate, ValueType exitRate) : DFTExplorationHeuristic<ValueType>(id, predecessor, rate, exitRate) {
// Intentionally left empty
}
bool updateHeuristicValues(DFTExplorationHeuristic<ValueType> const& predecessor, ValueType rate, ValueType exitRate) override {
return false;
}
double getPriority() const override;
bool isSkip(double approximationThreshold) const override {
return !this->expand && this->getPriority() < approximationThreshold;
}
bool operator<(DFTExplorationHeuristicBoundDifference<ValueType> const& other) const {
return this->getPriority() < other.getPriority();
}
private:
ValueType lowerBound;
ValueType upperBound;
};
}
}

2
src/builder/DftSmtBuilder.cpp

@ -1,4 +1,4 @@
#include "src/builder/DFTSMTBuilder.h"
#include "src/builder/DftSmtBuilder.h"
#include "src/exceptions/NotImplementedException.h"
namespace storm {

1
src/builder/ExplicitDFTModelBuilder.cpp

@ -58,6 +58,7 @@ namespace storm {
STORM_LOG_ASSERT(mStates.getValue(pseudoStatePair.second) >= OFFSET_PSEUDO_STATE, "State is no pseudo state.");
STORM_LOG_TRACE("Create pseudo state from bit vector " << pseudoStatePair.second);
DFTStatePointer pseudoState = std::make_shared<storm::storage::DFTState<ValueType>>(pseudoStatePair.second, mDft, *mStateGenerationInfo, newIndex);
pseudoState->construct();
STORM_LOG_ASSERT(pseudoStatePair.second == pseudoState->status(), "Pseudo states do not coincide.");
STORM_LOG_TRACE("Explore pseudo state " << mDft.getStateString(pseudoState) << " with id " << pseudoState->getId());
auto exploreResult = exploreStates(pseudoState, rowOffset, transitionMatrixBuilder, tmpMarkovianStates, modelComponents.exitRates);

328
src/builder/ExplicitDFTModelBuilderApprox.cpp

@ -3,6 +3,7 @@
#include <src/models/sparse/Ctmc.h>
#include <src/utility/constants.h>
#include <src/utility/vector.h>
#include "src/utility/bitoperations.h"
#include <src/exceptions/UnexpectedException.h>
#include "src/settings/modules/DFTSettings.h"
#include "src/settings/SettingsManager.h"
@ -31,18 +32,75 @@ namespace storm {
generator(dft, *stateGenerationInfo, enableDC, mergeFailedStates),
matrixBuilder(!generator.isDeterministicModel()),
stateStorage(((dft.stateVectorSize() / 64) + 1) * 64),
explorationQueue([this](StateType idA, StateType idB) {
return isPriorityGreater(idA, idB);
})
// TODO Matthias: make choosable
//explorationQueue(dft.nrElements()+1, 0, 1)
explorationQueue(200, 0, 0.9)
{
// Intentionally left empty.
// TODO Matthias: remove again
heuristic = storm::builder::ApproximationHeuristic::PROBABILITY;
// Compute independent subtrees
if (dft.topLevelType() == storm::storage::DFTElementType::OR) {
// We only support this for approximation with top level element OR
for (auto const& child : dft.getGate(dft.getTopLevelIndex())->children()) {
// Consider all children of the top level gate
std::vector<size_t> isubdft;
if (child->nrParents() > 1 || child->hasOutgoingDependencies()) {
STORM_LOG_TRACE("child " << child->name() << "does not allow modularisation.");
isubdft.clear();
} else if (dft.isGate(child->id())) {
isubdft = dft.getGate(child->id())->independentSubDft(false);
} else {
STORM_LOG_ASSERT(dft.isBasicElement(child->id()), "Child is no BE.");
if(dft.getBasicElement(child->id())->hasIngoingDependencies()) {
STORM_LOG_TRACE("child " << child->name() << "does not allow modularisation.");
isubdft.clear();
} else {
isubdft = {child->id()};
}
}
if(isubdft.empty()) {
subtreeBEs.clear();
break;
} else {
// Add new independent subtree
std::vector<size_t> subtree;
for (size_t id : isubdft) {
if (dft.isBasicElement(id)) {
subtree.push_back(id);
}
}
subtreeBEs.push_back(subtree);
}
}
}
if (subtreeBEs.empty()) {
// Consider complete tree
std::vector<size_t> subtree;
for (size_t id = 0; id < dft.nrElements(); ++id) {
if (dft.isBasicElement(id)) {
subtree.push_back(id);
}
}
subtreeBEs.push_back(subtree);
}
for (auto tree : subtreeBEs) {
std::stringstream stream;
stream << "Subtree: ";
for (size_t id : tree) {
stream << id << ", ";
}
STORM_LOG_TRACE(stream.str());
}
}
template<typename ValueType, typename StateType>
void ExplicitDFTModelBuilderApprox<ValueType, StateType>::buildModel(LabelOptions const& labelOpts, bool firstTime, double approximationThreshold) {
void ExplicitDFTModelBuilderApprox<ValueType, StateType>::buildModel(LabelOptions const& labelOpts, size_t iteration, double approximationThreshold) {
STORM_LOG_TRACE("Generating DFT state space");
if (firstTime) {
if (iteration < 1) {
// Initialize
modelComponents.markovianStates = storm::storage::BitVector(INITIAL_BITVECTOR_SIZE);
@ -75,11 +133,28 @@ namespace storm {
STORM_LOG_ASSERT(stateStorage.initialStateIndices.size() == 1, "Only one initial state assumed.");
initialStateIndex = stateStorage.initialStateIndices[0];
STORM_LOG_TRACE("Initial state: " << initialStateIndex);
// Initialize heuristic values for inital state
STORM_LOG_ASSERT(!statesNotExplored.at(initialStateIndex).second, "Heuristic for initial state is already initialized");
ExplorationHeuristicPointer heuristic = std::make_shared<ExplorationHeuristic>(initialStateIndex);
heuristic->markExpand();
statesNotExplored[initialStateIndex].second = heuristic;
explorationQueue.push(heuristic);
} else {
initializeNextIteration();
}
switch (heuristic) {
case storm::builder::ApproximationHeuristic::NONE:
// Do not change anything
approximationThreshold = dft.nrElements()+10;
break;
case storm::builder::ApproximationHeuristic::DEPTH:
approximationThreshold = iteration;
break;
case storm::builder::ApproximationHeuristic::PROBABILITY:
approximationThreshold = 10 * std::pow(2, iteration);
break;
}
exploreStateSpace(approximationThreshold);
size_t stateSize = stateStorage.getNumberOfStates() + (mergeFailedStates ? 1 : 0);
@ -94,8 +169,7 @@ namespace storm {
STORM_LOG_TRACE("State remapping: " << matrixBuilder.stateRemapping);
STORM_LOG_TRACE("Markovian states: " << modelComponents.markovianStates);
STORM_LOG_DEBUG("Generated " << stateSize << " states");
STORM_LOG_DEBUG("Skipped " << skippedStates.size() << " states");
STORM_LOG_DEBUG("Model has " << stateSize << " states");
STORM_LOG_DEBUG("Model is " << (generator.isDeterministicModel() ? "deterministic" : "non-deterministic"));
// Build transition matrix
@ -117,8 +191,8 @@ namespace storm {
// Push skipped states to explore queue
// TODO Matthias: remove
for (auto const& skippedState : skippedStates) {
statesNotExplored[skippedState.second->getId()] = skippedState.second;
explorationQueue.push(skippedState.second->getId());
statesNotExplored[skippedState.second.first->getId()] = skippedState.second;
explorationQueue.push(skippedState.second.second);
}
// Initialize matrix builder again
@ -134,7 +208,8 @@ namespace storm {
auto iterSkipped = skippedStates.begin();
size_t skippedBefore = 0;
for (size_t i = 0; i < indexRemapping.size(); ++i) {
while (iterSkipped->first <= i) {
while (iterSkipped != skippedStates.end() && iterSkipped->first <= i) {
STORM_LOG_ASSERT(iterSkipped->first < indexRemapping.size(), "Skipped is too high.");
++skippedBefore;
++iterSkipped;
}
@ -146,7 +221,7 @@ namespace storm {
matrixBuilder.mappingOffset = nrStates;
STORM_LOG_TRACE("# expanded states: " << nrExpandedStates);
StateType skippedIndex = nrExpandedStates;
std::map<StateType, DFTStatePointer> skippedStatesNew;
std::map<StateType, std::pair<DFTStatePointer, ExplorationHeuristicPointer>> skippedStatesNew;
for (size_t id = 0; id < matrixBuilder.stateRemapping.size(); ++id) {
StateType index = matrixBuilder.stateRemapping[id];
auto itFind = skippedStates.find(index);
@ -193,7 +268,7 @@ namespace storm {
auto itFind = skippedStates.find(itEntry->getColumn());
if (itFind != skippedStates.end()) {
// Set id for skipped states as we remap it later
matrixBuilder.addTransition(matrixBuilder.mappingOffset + itFind->second->getId(), itEntry->getValue());
matrixBuilder.addTransition(matrixBuilder.mappingOffset + itFind->second.first->getId(), itEntry->getValue());
} else {
// Set newly remapped index for expanded states
matrixBuilder.addTransition(indexRemapping[itEntry->getColumn()], itEntry->getValue());
@ -212,13 +287,17 @@ namespace storm {
template<typename ValueType, typename StateType>
void ExplicitDFTModelBuilderApprox<ValueType, StateType>::exploreStateSpace(double approximationThreshold) {
size_t nrExpandedStates = 0;
size_t nrSkippedStates = 0;
// TODO Matthias: do not empty queue every time but break before
while (!explorationQueue.empty()) {
// Get the first state in the queue
StateType currentId = explorationQueue.popTop();
ExplorationHeuristicPointer currentExplorationHeuristic = explorationQueue.popTop();
StateType currentId = currentExplorationHeuristic->getId();
auto itFind = statesNotExplored.find(currentId);
STORM_LOG_ASSERT(itFind != statesNotExplored.end(), "Id " << currentId << " not found");
DFTStatePointer currentState = itFind->second;
DFTStatePointer currentState = itFind->second.first;
STORM_LOG_ASSERT(currentExplorationHeuristic == itFind->second.second, "Exploration heuristics do not match");
STORM_LOG_ASSERT(currentState->getId() == currentId, "Ids do not match");
// Remove it from the list of not explored states
statesNotExplored.erase(itFind);
@ -240,18 +319,21 @@ namespace storm {
// Try to explore the next state
generator.load(currentState);
if (currentState->isSkip(approximationThreshold, heuristic)) {
if (nrExpandedStates > approximationThreshold && !currentExplorationHeuristic->isExpand()) {
//if (currentExplorationHeuristic->isSkip(approximationThreshold)) {
// Skip the current state
++nrSkippedStates;
STORM_LOG_TRACE("Skip expansion of state: " << dft.getStateString(currentState));
setMarkovian(true);
// Add transition to target state with temporary value 0
// TODO Matthias: what to do when there is no unique target state?
matrixBuilder.addTransition(failedStateId, storm::utility::zero<ValueType>());
// Remember skipped state
skippedStates[matrixBuilder.getCurrentRowGroup() - 1] = currentState;
skippedStates[matrixBuilder.getCurrentRowGroup() - 1] = std::make_pair(currentState, currentExplorationHeuristic);
matrixBuilder.finishRow();
} else {
// Explore the current state
++nrExpandedStates;
storm::generator::StateBehavior<ValueType, StateType> behavior = generator.expand(std::bind(&ExplicitDFTModelBuilderApprox::getOrAddStateIndex, this, std::placeholders::_1));
STORM_LOG_ASSERT(!behavior.empty(), "Behavior is empty.");
setMarkovian(behavior.begin()->isMarkovian());
@ -260,18 +342,40 @@ namespace storm {
for (auto const& choice : behavior) {
// Add the probabilistic behavior to the matrix.
for (auto const& stateProbabilityPair : choice) {
STORM_LOG_ASSERT(!storm::utility::isZero(stateProbabilityPair.second), "Probability zero.");
// Set transition to state id + offset. This helps in only remapping all previously skipped states.
matrixBuilder.addTransition(matrixBuilder.mappingOffset + stateProbabilityPair.first, stateProbabilityPair.second);
// TODO Matthias: set heuristic values here
// Set heuristic values for reached states
auto iter = statesNotExplored.find(stateProbabilityPair.first);
if (iter != statesNotExplored.end()) {
// Update heuristic values
DFTStatePointer state = iter->second.first;
if (!iter->second.second) {
// Initialize heuristic values
ExplorationHeuristicPointer heuristic = std::make_shared<ExplorationHeuristic>(stateProbabilityPair.first, *currentExplorationHeuristic, stateProbabilityPair.second, choice.getTotalMass());
iter->second.second = heuristic;
if (state->hasFailed(dft.getTopLevelIndex()) || state->isFailsafe(dft.getTopLevelIndex()) || state->nrFailableDependencies() > 0 || (state->nrFailableDependencies() == 0 && state->nrFailableBEs() == 0)) {
// Do not skip absorbing state or if reached by dependencies
iter->second.second->markExpand();
}
explorationQueue.push(heuristic);
} else if (!iter->second.second->isExpand()) {
double oldPriority = iter->second.second->getPriority();
if (iter->second.second->updateHeuristicValues(*currentExplorationHeuristic, stateProbabilityPair.second, choice.getTotalMass())) {
// Update priority queue
explorationQueue.update(iter->second.second, oldPriority);
}
}
}
}
matrixBuilder.finishRow();
}
}
// Update priority queue
// TODO Matthias: only when necessary
explorationQueue.fix();
} // end exploration
STORM_LOG_INFO("Expanded " << nrExpandedStates << " states");
STORM_LOG_INFO("Skipped " << nrSkippedStates << " states");
STORM_LOG_ASSERT(nrSkippedStates == skippedStates.size(), "Nr skipped states is wrong");
}
template<typename ValueType, typename StateType>
@ -328,11 +432,7 @@ namespace storm {
template<typename ValueType, typename StateType>
std::shared_ptr<storm::models::sparse::Model<ValueType>> ExplicitDFTModelBuilderApprox<ValueType, StateType>::getModelApproximation(bool lowerBound) {
// TODO Matthias: handle case with no skipped states
if (lowerBound) {
changeMatrixLowerBound(modelComponents.transitionMatrix);
} else {
changeMatrixUpperBound(modelComponents.transitionMatrix);
}
changeMatrixBound(modelComponents.transitionMatrix, lowerBound);
return createModel(true);
}
@ -388,42 +488,148 @@ namespace storm {
}
template<typename ValueType, typename StateType>
void ExplicitDFTModelBuilderApprox<ValueType, StateType>::changeMatrixLowerBound(storm::storage::SparseMatrix<ValueType> & matrix) const {
void ExplicitDFTModelBuilderApprox<ValueType, StateType>::changeMatrixBound(storm::storage::SparseMatrix<ValueType> & matrix, bool lowerBound) const {
// Set lower bound for skipped states
for (auto it = skippedStates.begin(); it != skippedStates.end(); ++it) {
auto matrixEntry = matrix.getRow(it->first, 0).begin();
STORM_LOG_ASSERT(matrixEntry->getColumn() == failedStateId, "Transition has wrong target state.");
// Get the lower bound by considering the failure of all possible BEs
ValueType newRate = storm::utility::zero<ValueType>();
for (size_t index = 0; index < it->second->nrFailableBEs(); ++index) {
newRate += it->second->getFailableBERate(index);
STORM_LOG_ASSERT(!it->second.first->isPseudoState(), "State is still pseudo state.");
ExplorationHeuristicPointer heuristic = it->second.second;
if (storm::utility::isZero(heuristic->getUpperBound())) {
// Initialize bounds
ValueType lowerBound = getLowerBound(it->second.first);
ValueType upperBound = getUpperBound(it->second.first);
heuristic->setBounds(lowerBound, upperBound);
}
for (size_t index = 0; index < it->second->nrNotFailableBEs(); ++index) {
newRate += it->second->getNotFailableBERate(index);
// Change bound
if (lowerBound) {
matrixEntry->setValue(it->second.second->getLowerBound());
} else {
matrixEntry->setValue(it->second.second->getUpperBound());
}
matrixEntry->setValue(newRate);
}
}
template<typename ValueType, typename StateType>
void ExplicitDFTModelBuilderApprox<ValueType, StateType>::changeMatrixUpperBound(storm::storage::SparseMatrix<ValueType> & matrix) const {
// Set uppper bound for skipped states
for (auto it = skippedStates.begin(); it != skippedStates.end(); ++it) {
auto matrixEntry = matrix.getRow(it->first, 0).begin();
STORM_LOG_ASSERT(matrixEntry->getColumn() == failedStateId, "Transition has wrong target state.");
// Get the upper bound by considering the failure of all BEs
// The used formula for the rate is 1/( 1/a + 1/b + ...)
// TODO Matthias: improve by using closed formula for AND of all BEs
ValueType newRate = storm::utility::zero<ValueType>();
for (size_t index = 0; index < it->second->nrFailableBEs(); ++index) {
newRate += storm::utility::one<ValueType>() / it->second->getFailableBERate(index);
ValueType ExplicitDFTModelBuilderApprox<ValueType, StateType>::getLowerBound(DFTStatePointer const& state) const {
// Get the lower bound by considering the failure of all possible BEs
ValueType lowerBound = storm::utility::zero<ValueType>();
for (size_t index = 0; index < state->nrFailableBEs(); ++index) {
lowerBound += state->getFailableBERate(index);
}
return lowerBound;
}
template<typename ValueType, typename StateType>
ValueType ExplicitDFTModelBuilderApprox<ValueType, StateType>::getUpperBound(DFTStatePointer const& state) const {
// Get the upper bound by considering the failure of all BEs
ValueType upperBound = storm::utility::one<ValueType>();
ValueType rateSum = storm::utility::zero<ValueType>();
// Compute for each independent subtree
for (std::vector<size_t> const& subtree : subtreeBEs) {
// Get all possible rates
std::vector<ValueType> rates;
storm::storage::BitVector coldBEs(subtree.size(), false);
for (size_t i = 0; i < subtree.size(); ++i) {
size_t id = subtree[i];
if (state->isOperational(id)) {
// Get BE rate
ValueType rate = state->getBERate(id);
if (storm::utility::isZero<ValueType>(rate)) {
// Get active failure rate for cold BE
rate = dft.getBasicElement(id)->activeFailureRate();
// Mark BE as cold
coldBEs.set(i, true);
}
rates.push_back(rate);
rateSum += rate;
}
}
STORM_LOG_ASSERT(rates.size() > 0, "No rates failable");
// Sort rates
std::sort(rates.begin(), rates.end());
std::vector<size_t> rateCount(rates.size(), 0);
size_t lastIndex = 0;
for (size_t i = 0; i < rates.size(); ++i) {
if (rates[lastIndex] != rates[i]) {
lastIndex = i;
}
++rateCount[lastIndex];
}
for (size_t index = 0; index < it->second->nrNotFailableBEs(); ++index) {
newRate += storm::utility::one<ValueType>() / it->second->getNotFailableBERate(index);
for (size_t i = 0; i < rates.size(); ++i) {
// Cold BEs cannot fail in the first step
if (!coldBEs.get(i) && rateCount[i] > 0) {
// Compute AND MTTF of subtree without current rate and scale with current rate
upperBound += rates.back() * rateCount[i] * computeMTTFAnd(rates, rates.size() - 1);
// Swap here to avoid swapping back
std::iter_swap(rates.begin() + i, rates.end() - 1);
}
}
newRate = storm::utility::one<ValueType>() / newRate;
matrixEntry->setValue(newRate);
}
STORM_LOG_TRACE("Upper bound is " << (rateSum / upperBound) << " for state " << state->getId());
STORM_LOG_ASSERT(!storm::utility::isZero(upperBound), "Upper bound is 0");
STORM_LOG_ASSERT(!storm::utility::isZero(rateSum), "State is absorbing");
return rateSum / upperBound;
}
template<typename ValueType, typename StateType>
ValueType ExplicitDFTModelBuilderApprox<ValueType, StateType>::computeMTTFAnd(std::vector<ValueType> const& rates, size_t size) const {
ValueType result = storm::utility::zero<ValueType>();
if (size == 0) {
return result;
}
// TODO Matthias: works only for <64 BEs!
// WARNING: this code produces wrong results for more than 32 BEs
/*for (size_t i = 1; i < 4 && i <= rates.size(); ++i) {
size_t permutation = smallestIntWithNBitsSet(static_cast<size_t>(i));
ValueType sum = storm::utility::zero<ValueType>();
do {
ValueType permResult = storm::utility::zero<ValueType>();
for(size_t j = 0; j < rates.size(); ++j) {
if(permutation & static_cast<size_t>(1 << static_cast<size_t>(j))) {
// WARNING: if the first bit is set, it also recognizes the 32nd bit as set
// TODO: fix
permResult += rates[j];
}
}
permutation = nextBitPermutation(permutation);
STORM_LOG_ASSERT(!storm::utility::isZero(permResult), "PermResult is 0");
sum += storm::utility::one<ValueType>() / permResult;
} while(permutation < (static_cast<size_t>(1) << rates.size()) && permutation != 0);
if (i % 2 == 0) {
result -= sum;
} else {
result += sum;
}
}*/
// Compute result with permutations of size <= 3
ValueType one = storm::utility::one<ValueType>();
for (size_t i1 = 0; i1 < size; ++i1) {
// + 1/a
ValueType sum = rates[i1];
result += one / sum;
for (size_t i2 = 0; i2 < i1; ++i2) {
// - 1/(a+b)
ValueType sum2 = sum + rates[i2];
result -= one / sum2;
for (size_t i3 = 0; i3 < i2; ++i3) {
// + 1/(a+b+c)
result += one / (sum2 + rates[i3]);
}
}
}
STORM_LOG_ASSERT(!storm::utility::isZero(result), "UpperBound is 0");
return result;
}
template<typename ValueType, typename StateType>
@ -446,14 +652,14 @@ namespace storm {
// Check if state is pseudo state
// If state is explored already the possible pseudo state was already constructed
auto iter = statesNotExplored.find(stateId);
if (iter != statesNotExplored.end() && iter->second->isPseudoState()) {
if (iter != statesNotExplored.end() && iter->second.first->isPseudoState()) {
// Create pseudo state now
STORM_LOG_ASSERT(iter->second->getId() == stateId, "Ids do not match.");
STORM_LOG_ASSERT(iter->second->status() == state->status(), "Pseudo states do not coincide.");
STORM_LOG_ASSERT(iter->second.first->getId() == stateId, "Ids do not match.");
STORM_LOG_ASSERT(iter->second.first->status() == state->status(), "Pseudo states do not coincide.");
state->setId(stateId);
// Update mapping to map to concrete state now
statesNotExplored[stateId] = state;
// TODO Matthias: copy explorationHeuristic
// TODO Matthias: just change pointer?
statesNotExplored[stateId] = std::make_pair(state, iter->second.second);
// We do not push the new state on the exploration queue as the pseudo state was already pushed
STORM_LOG_TRACE("Created pseudo state " << dft.getStateString(state));
}
@ -466,8 +672,8 @@ namespace storm {
stateId = stateStorage.stateToId.findOrAdd(state->status(), state->getId());
STORM_LOG_ASSERT(stateId == state->getId(), "Ids do not match.");
// Insert state as not yet explored
statesNotExplored[stateId] = state;
explorationQueue.push(stateId);
ExplorationHeuristicPointer nullHeuristic;
statesNotExplored[stateId] = std::make_pair(state, nullHeuristic);
// Reserve one slot for the new state in the remapping
matrixBuilder.stateRemapping.push_back(0);
STORM_LOG_TRACE("New " << (state->isPseudoState() ? "pseudo" : "concrete") << " state: " << dft.getStateString(state));
@ -485,9 +691,11 @@ namespace storm {
}
template<typename ValueType, typename StateType>
bool ExplicitDFTModelBuilderApprox<ValueType, StateType>::isPriorityGreater(StateType idA, StateType idB) const {
// TODO Matthias: compare directly and according to heuristic
return storm::builder::compareDepth(statesNotExplored.at(idA), statesNotExplored.at(idB));
void ExplicitDFTModelBuilderApprox<ValueType, StateType>::printNotExplored() const {
std::cout << "states not explored:" << std::endl;
for (auto it : statesNotExplored) {
std::cout << it.first << " -> " << dft.getStateString(it.second.first) << std::endl;
}
}

57
src/builder/ExplicitDFTModelBuilderApprox.h

@ -10,7 +10,7 @@
#include "src/storage/sparse/StateStorage.h"
#include "src/storage/dft/DFT.h"
#include "src/storage/dft/SymmetricUnits.h"
#include "src/storage/DynamicPriorityQueue.h"
#include "src/storage/BucketPriorityQueue.h"
#include <boost/container/flat_set.hpp>
#include <boost/optional/optional.hpp>
#include <stack>
@ -26,11 +26,10 @@ namespace storm {
template<typename ValueType, typename StateType = uint32_t>
class ExplicitDFTModelBuilderApprox {
using DFTElementPointer = std::shared_ptr<storm::storage::DFTElement<ValueType>>;
using DFTElementCPointer = std::shared_ptr<storm::storage::DFTElement<ValueType> const>;
using DFTGatePointer = std::shared_ptr<storm::storage::DFTGate<ValueType>>;
using DFTStatePointer = std::shared_ptr<storm::storage::DFTState<ValueType>>;
using DFTRestrictionPointer = std::shared_ptr<storm::storage::DFTRestriction<ValueType>>;
// TODO Matthias: make choosable
using ExplorationHeuristic = DFTExplorationHeuristicProbability<ValueType>;
using ExplorationHeuristicPointer = std::shared_ptr<ExplorationHeuristic>;
// A structure holding the individual components of a model.
@ -159,10 +158,10 @@ namespace storm {
* Build model from DFT.
*
* @param labelOpts Options for labeling.
* @param firstTime Flag indicating if the model is built for the first time or rebuilt.
* @param iteration Current number of iteration.
* @param approximationThreshold Threshold determining when to skip exploring states.
*/
void buildModel(LabelOptions const& labelOpts, bool firstTime, double approximationThreshold = 0.0);
void buildModel(LabelOptions const& labelOpts, size_t iteration, double approximationThreshold = 0.0);
/*!
* Get the built model.
@ -218,18 +217,36 @@ namespace storm {
void setMarkovian(bool markovian);
/**
* Change matrix to reflect the lower approximation bound.
* Change matrix to reflect the lower or upper approximation bound.
*
* @param matrix Matrix to change. The change are reflected here.
* @param matrix Matrix to change. The change are reflected here.
* @param lowerBound Flag indicating if the lower bound should be used. Otherwise the upper bound is used.
*/
void changeMatrixLowerBound(storm::storage::SparseMatrix<ValueType> & matrix) const;
void changeMatrixBound(storm::storage::SparseMatrix<ValueType> & matrix, bool lowerBound) const;
/*!
* Change matrix to reflect the upper approximation bound.
* Get lower bound approximation for state.
*
* @param matrix Matrix to change. The change are reflected here.
* @return Lower bound approximation.
*/
void changeMatrixUpperBound(storm::storage::SparseMatrix<ValueType> & matrix) const;
ValueType getLowerBound(DFTStatePointer const& state) const;
/*!
* Get upper bound approximation for state.
*
* @return Upper bound approximation.
*/
ValueType getUpperBound(DFTStatePointer const& state) const;
/*!
* Compute the MTTF of an AND gate via a closed formula.
* The used formula is 1/( 1/a + 1/b + 1/c + ... - 1/(a+b) - 1/(a+c) - ... + 1/(a+b+c) + ... - ...)
*
* @param rates List of rates of children of AND.
* @param size Only indices < size are considered in the vector.
* @return MTTF.
*/
ValueType computeMTTFAnd(std::vector<ValueType> const& rates, size_t size) const;
/*!
* Compares the priority of two states.
@ -241,6 +258,8 @@ namespace storm {
*/
bool isPriorityGreater(StateType idA, StateType idB) const;
void printNotExplored() const;
/*!
* Create the model model from the model components.
*
@ -294,16 +313,20 @@ namespace storm {
storm::storage::sparse::StateStorage<StateType> stateStorage;
// A priority queue of states that still need to be explored.
storm::storage::DynamicPriorityQueue<StateType, std::vector<StateType>, std::function<bool(StateType, StateType)>> explorationQueue;
storm::storage::BucketPriorityQueue<ValueType> explorationQueue;
//storm::storage::DynamicPriorityQueue<ExplorationHeuristicPointer, std::vector<ExplorationHeuristicPointer>, std::function<bool(ExplorationHeuristicPointer, ExplorationHeuristicPointer)>> explorationQueue;
// A mapping of not yet explored states from the id to the state object.
std::map<StateType, DFTStatePointer> statesNotExplored;
// A mapping of not yet explored states from the id to the tuple (state object, heuristic values).
std::map<StateType, std::pair<DFTStatePointer, ExplorationHeuristicPointer>> statesNotExplored;
// Holds all skipped states which were not yet expanded. More concretely it is a mapping from matrix indices
// to the corresponding skipped states.
// Notice that we need an ordered map here to easily iterate in increasing order over state ids.
// TODO remove again
std::map<StateType, DFTStatePointer> skippedStates;
std::map<StateType, std::pair<DFTStatePointer, ExplorationHeuristicPointer>> skippedStates;
// List of independent subtrees and the BEs contained in them.
std::vector<std::vector<size_t>> subtreeBEs;
};
}

4
src/generator/DftNextStateGenerator.cpp

@ -20,7 +20,6 @@ namespace storm {
template<typename ValueType, typename StateType>
std::vector<StateType> DftNextStateGenerator<ValueType, StateType>::getInitialStates(StateToIdCallback const& stateToIdCallback) {
DFTStatePointer initialState = std::make_shared<storm::storage::DFTState<ValueType>>(mDft, mStateGenerationInfo, 0);
initialState->setHeuristicValues(0, storm::utility::zero<ValueType>(), storm::utility::zero<ValueType>());
// Register initial state
StateType id = stateToIdCallback(initialState);
@ -158,10 +157,8 @@ namespace storm {
ValueType remainingProbability = storm::utility::one<ValueType>() - probability;
choice.addProbability(unsuccessfulStateId, remainingProbability);
STORM_LOG_TRACE("Added transition to " << unsuccessfulStateId << " with remaining probability " << remainingProbability);
unsuccessfulState->setHeuristicValues(state, remainingProbability, storm::utility::one<ValueType>());
}
result.addChoice(std::move(choice));
newState->setHeuristicValues(state, probability, storm::utility::one<ValueType>());
} else {
// Failure is due to "normal" BE failure
// Set failure rate according to activation
@ -174,7 +171,6 @@ namespace storm {
STORM_LOG_ASSERT(!storm::utility::isZero(rate), "Rate is 0.");
choice.addProbability(newStateId, rate);
STORM_LOG_TRACE("Added transition to " << newStateId << " with " << (isActive ? "active" : "passive") << " failure rate " << rate);
newState->setHeuristicValues(state, rate, choice.getTotalMass());
}
++currentFailable;

58
src/modelchecker/dft/DFTModelChecker.cpp

@ -17,13 +17,13 @@ namespace storm {
template<typename ValueType>
void DFTModelChecker<ValueType>::check(storm::storage::DFT<ValueType> const& origDft, std::shared_ptr<const storm::logic::Formula> const& formula, bool symred, bool allowModularisation, bool enableDC, double approximationError) {
// Initialize
this->buildingTime = std::chrono::duration<double>::zero();
this->explorationTime = std::chrono::duration<double>::zero();
this->buildingTime = std::chrono::duration<double>::zero();
this->bisimulationTime = std::chrono::duration<double>::zero();
this->modelCheckingTime = std::chrono::duration<double>::zero();
this->totalTime = std::chrono::duration<double>::zero();
this->approximationError = approximationError;
std::chrono::high_resolution_clock::time_point totalStart = std::chrono::high_resolution_clock::now();
totalStart = std::chrono::high_resolution_clock::now();
// Optimizing DFT
storm::storage::DFT<ValueType> dft = origDft.optimize();
@ -100,6 +100,8 @@ namespace storm {
ValueType result = storm::utility::zero<ValueType>();
int limK = invResults ? -1 : nrM+1;
int chK = invResults ? -1 : 1;
// WARNING: there is a bug for computing permutations with more than 32 elements
STORM_LOG_ASSERT(res.size() < 32, "Permutations work only for < 32 elements");
for(int cK = nrK; cK != limK; cK += chK ) {
STORM_LOG_ASSERT(cK >= 0, "ck negative.");
size_t permutation = smallestIntWithNBitsSet(static_cast<size_t>(cK));
@ -132,7 +134,7 @@ namespace storm {
template<typename ValueType>
typename DFTModelChecker<ValueType>::dft_result DFTModelChecker<ValueType>::checkDFT(storm::storage::DFT<ValueType> const& dft, std::shared_ptr<const storm::logic::Formula> const& formula, bool symred, bool enableDC, double approximationError) {
std::chrono::high_resolution_clock::time_point buildingStart = std::chrono::high_resolution_clock::now();
std::chrono::high_resolution_clock::time_point checkingStart = std::chrono::high_resolution_clock::now();
// Find symmetries
std::map<size_t, std::vector<std::vector<size_t>>> emptySymmetry;
@ -143,35 +145,38 @@ namespace storm {
STORM_LOG_INFO("Found " << symmetries.groups.size() << " symmetries.");
STORM_LOG_TRACE("Symmetries: " << std::endl << symmetries);
}
std::chrono::high_resolution_clock::time_point buildingEnd = std::chrono::high_resolution_clock::now();
buildingTime += buildingEnd - buildingStart;
if (approximationError > 0.0) {
// Comparator for checking the error of the approximation
storm::utility::ConstantsComparator<ValueType> comparator;
// Build approximate Markov Automata for lower and upper bound
double currentApproximationError = approximationError;
approximation_result approxResult = std::make_pair(storm::utility::zero<ValueType>(), storm::utility::zero<ValueType>());
std::chrono::high_resolution_clock::time_point explorationStart;
std::shared_ptr<storm::models::sparse::Model<ValueType>> model;
storm::builder::ExplicitDFTModelBuilderApprox<ValueType> builder(dft, symmetries, enableDC);
typename storm::builder::ExplicitDFTModelBuilderApprox<ValueType>::LabelOptions labeloptions; // TODO initialize this with the formula
bool probabilityFormula = formula->isProbabilityOperatorFormula();
STORM_LOG_ASSERT((formula->isTimeOperatorFormula() && !probabilityFormula) || (!formula->isTimeOperatorFormula() && probabilityFormula), "Probability formula not initialized correctly");
size_t iteration = 0;
do {
// Iteratively build finer models
explorationStart = std::chrono::high_resolution_clock::now();
std::chrono::high_resolution_clock::time_point explorationStart = std::chrono::high_resolution_clock::now();
STORM_LOG_INFO("Building model...");
// TODO Matthias refine model using existing model and MC results
currentApproximationError = pow(0.1, iteration) * approximationError;
builder.buildModel(labeloptions, iteration < 1, iteration);
builder.buildModel(labeloptions, iteration, approximationError);
std::chrono::high_resolution_clock::time_point explorationEnd = std::chrono::high_resolution_clock::now();
explorationTime = explorationEnd - explorationStart;
// TODO Matthias: possible to do bisimulation on approximated model and not on concrete one?
// Build model for lower bound
STORM_LOG_INFO("Getting model for lower bound...");
model = builder.getModelApproximation(true);
explorationTime += std::chrono::high_resolution_clock::now() - explorationStart;
model = builder.getModelApproximation(probabilityFormula ? false : true);
// We only output the info from the lower bound as the info for the upper bound is the same
STORM_LOG_INFO("No. states: " << model->getNumberOfStates());
STORM_LOG_INFO("No. transitions: " << model->getNumberOfTransitions());
buildingTime += std::chrono::high_resolution_clock::now() - explorationEnd;
// Check lower bound
std::unique_ptr<storm::modelchecker::CheckResult> result = checkModel(model, formula);
result->filter(storm::modelchecker::ExplicitQualitativeCheckResult(model->getInitialStates()));
@ -181,9 +186,9 @@ namespace storm {
// Build model for upper bound
STORM_LOG_INFO("Getting model for upper bound...");
explorationStart = std::chrono::high_resolution_clock::now();
model = builder.getModelApproximation(false);
explorationTime += std::chrono::high_resolution_clock::now() - explorationStart;
explorationEnd = std::chrono::high_resolution_clock::now();
model = builder.getModelApproximation(probabilityFormula ? true : false);
buildingTime += std::chrono::high_resolution_clock::now() - explorationEnd;
// Check upper bound
result = checkModel(model, formula);
result->filter(storm::modelchecker::ExplicitQualitativeCheckResult(model->getInitialStates()));
@ -193,8 +198,10 @@ namespace storm {
++iteration;
STORM_LOG_INFO("Result after iteration " << iteration << ": (" << std::setprecision(10) << approxResult.first << ", " << approxResult.second << ")");
totalTime = std::chrono::high_resolution_clock::now() - totalStart;
printTimings();
STORM_LOG_THROW(!storm::utility::isInfinity<ValueType>(approxResult.first) && !storm::utility::isInfinity<ValueType>(approxResult.second), storm::exceptions::NotSupportedException, "Approximation does not work if result might be infinity.");
} while (!isApproximationSufficient(approxResult.first, approxResult.second, approximationError));
} while (!isApproximationSufficient(approxResult.first, approxResult.second, approximationError, probabilityFormula));
STORM_LOG_INFO("Finished approximation after " << iteration << " iteration" << (iteration > 1 ? "s." : "."));
return approxResult;
@ -203,10 +210,10 @@ namespace storm {
STORM_LOG_INFO("Building Model...");
std::shared_ptr<storm::models::sparse::Model<ValueType>> model;
// TODO Matthias: use only one builder if everything works again
if (approximationError >= 0.0) {
if (storm::settings::getModule<storm::settings::modules::DFTSettings>().isApproximationErrorSet()) {
storm::builder::ExplicitDFTModelBuilderApprox<ValueType> builder(dft, symmetries, enableDC);
typename storm::builder::ExplicitDFTModelBuilderApprox<ValueType>::LabelOptions labeloptions; // TODO initialize this with the formula
builder.buildModel(labeloptions, true);
builder.buildModel(labeloptions, 0);
model = builder.getModel();
} else {
storm::builder::ExplicitDFTModelBuilder<ValueType> builder(dft, symmetries, enableDC);
@ -216,7 +223,7 @@ namespace storm {
//model->printModelInformationToStream(std::cout);
STORM_LOG_INFO("No. states (Explored): " << model->getNumberOfStates());
STORM_LOG_INFO("No. transitions (Explored): " << model->getNumberOfTransitions());
explorationTime += std::chrono::high_resolution_clock::now() - buildingEnd;
explorationTime += std::chrono::high_resolution_clock::now() - checkingStart;
// Model checking
std::unique_ptr<storm::modelchecker::CheckResult> result = checkModel(model, formula);
@ -248,20 +255,25 @@ namespace storm {
}
template<typename ValueType>
bool DFTModelChecker<ValueType>::isApproximationSufficient(ValueType lowerBound, ValueType upperBound, double approximationError) {
bool DFTModelChecker<ValueType>::isApproximationSufficient(ValueType lowerBound, ValueType upperBound, double approximationError, bool relative) {
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Approximation works only for double.");
}
template<>
bool DFTModelChecker<double>::isApproximationSufficient(double lowerBound, double upperBound, double approximationError) {
return upperBound - lowerBound <= approximationError * (lowerBound + upperBound) / 2;
bool DFTModelChecker<double>::isApproximationSufficient(double lowerBound, double upperBound, double approximationError, bool relative) {
STORM_LOG_THROW(!std::isnan(lowerBound) && !std::isnan(upperBound), storm::exceptions::NotSupportedException, "Approximation does not work if result is NaN.");
if (relative) {
return upperBound - lowerBound <= approximationError;
} else {
return upperBound - lowerBound <= approximationError * (lowerBound + upperBound) / 2;
}
}
template<typename ValueType>
void DFTModelChecker<ValueType>::printTimings(std::ostream& os) {
os << "Times:" << std::endl;
os << "Building:\t" << buildingTime.count() << std::endl;
os << "Exploration:\t" << explorationTime.count() << std::endl;
os << "Building:\t" << buildingTime.count() << std::endl;
os << "Bisimulation:\t" << bisimulationTime.count() << std::endl;
os << "Modelchecking:\t" << modelCheckingTime.count() << std::endl;
os << "Total:\t\t" << totalTime.count() << std::endl;

7
src/modelchecker/dft/DFTModelChecker.h

@ -61,6 +61,7 @@ namespace storm {
std::chrono::duration<double> bisimulationTime = std::chrono::duration<double>::zero();
std::chrono::duration<double> modelCheckingTime = std::chrono::duration<double>::zero();
std::chrono::duration<double> totalTime = std::chrono::duration<double>::zero();
std::chrono::high_resolution_clock::time_point totalStart;
// Model checking result
dft_result checkResult;
@ -107,15 +108,17 @@ namespace storm {
/*!
* Checks if the computed approximation is sufficient, i.e.
* upperBound - lowerBound <= approximationError * mean(upperBound, lowerBound).
* upperBound - lowerBound <= approximationError * mean(lowerBound, upperBound).
*
* @param lowerBound The lower bound on the result.
* @param upperBound The upper bound on the result.
* @param approximationError The allowed error for approximating.
* @param relative Flag indicating if the error should be relative to 1 or
to the mean of lower and upper bound.
*
* @return True, if the approximation is sufficient.
*/
bool isApproximationSufficient(ValueType lowerBound, ValueType upperBound, double approximationError);
bool isApproximationSufficient(ValueType lowerBound, ValueType upperBound, double approximationError, bool relative);
};
}

6
src/settings/modules/DFTSettings.cpp

@ -40,7 +40,7 @@ namespace storm {
this->addOption(storm::settings::OptionBuilder(moduleName, modularisationOptionName, false, "Use modularisation (not applicable for expected time).").build());
this->addOption(storm::settings::OptionBuilder(moduleName, disableDCOptionName, false, "Disable Dont Care propagation.").build());
this->addOption(storm::settings::OptionBuilder(moduleName, approximationErrorOptionName, false, "Approximation error allowed.").setShortName(approximationErrorOptionShortName).addArgument(storm::settings::ArgumentBuilder::createDoubleArgument("error", "The relative approximation error to use.").addValidationFunctionDouble(storm::settings::ArgumentValidators::doubleGreaterValidatorIncluding(0.0)).build()).build());
this->addOption(storm::settings::OptionBuilder(moduleName, approximationHeuristicOptionName, false, "Set the heuristic used for approximation.").addArgument(storm::settings::ArgumentBuilder::createStringArgument("heuristic", "Sets which heuristic is used for approximation. Must be in {depth, rateratio}. Default is").setDefaultValueString("depth").addValidationFunctionString(storm::settings::ArgumentValidators::stringInListValidator({"depth", "rateratio"})).build()).build());
this->addOption(storm::settings::OptionBuilder(moduleName, approximationHeuristicOptionName, false, "Set the heuristic used for approximation.").addArgument(storm::settings::ArgumentBuilder::createStringArgument("heuristic", "Sets which heuristic is used for approximation. Must be in {depth, probability}. Default is").setDefaultValueString("depth").addValidationFunctionString(storm::settings::ArgumentValidators::stringInListValidator({"depth", "rateratio"})).build()).build());
this->addOption(storm::settings::OptionBuilder(moduleName, propExpectedTimeOptionName, false, "Compute expected time of system failure.").setShortName(propExpectedTimeOptionShortName).build());
this->addOption(storm::settings::OptionBuilder(moduleName, propProbabilityOptionName, false, "Compute probability of system failure.").build());
this->addOption(storm::settings::OptionBuilder(moduleName, propTimeBoundOptionName, false, "Compute probability of system failure up to given timebound.").addArgument(storm::settings::ArgumentBuilder::createDoubleArgument("time", "The timebound to use.").addValidationFunctionDouble(storm::settings::ArgumentValidators::doubleGreaterValidatorExcluding(0.0)).build()).build());
@ -87,8 +87,8 @@ namespace storm {
std::string heuristicAsString = this->getOption(approximationHeuristicOptionName).getArgumentByName("heuristic").getValueAsString();
if (heuristicAsString == "depth") {
return storm::builder::ApproximationHeuristic::DEPTH;
} else if (heuristicAsString == "rateratio") {
return storm::builder::ApproximationHeuristic::RATERATIO;
} else if (heuristicAsString == "probability") {
return storm::builder::ApproximationHeuristic::PROBABILITY;
}
STORM_LOG_THROW(false, storm::exceptions::IllegalArgumentValueException, "Illegal value '" << heuristicAsString << "' set as heuristic for approximation.");
}

203
src/storage/BucketPriorityQueue.cpp

@ -0,0 +1,203 @@
#include "src/storage/BucketPriorityQueue.h"
#include "src/utility/macros.h"
#include "src/adapters/CarlAdapter.h"
#include <cmath>
namespace storm {
namespace storage {
template<typename ValueType>
BucketPriorityQueue<ValueType>::BucketPriorityQueue(size_t nrBuckets, double lowerValue, double ratio) : lowerValue(lowerValue), logBase(std::log(ratio)), nrBuckets(nrBuckets), nrUnsortedItems(0), buckets(nrBuckets), currentBucket(nrBuckets) {
compare = ([this](HeuristicPointer a, HeuristicPointer b) {
return *a < *b;
});
}
template<typename ValueType>
void BucketPriorityQueue<ValueType>::fix() {
if (currentBucket < nrBuckets && nrUnsortedItems > buckets[currentBucket].size() / 10) {
// Fix current bucket
std::make_heap(buckets[currentBucket].begin(), buckets[currentBucket].end(), compare);
nrUnsortedItems = 0;
}
}
template<typename ValueType>
bool BucketPriorityQueue<ValueType>::empty() const {
return currentBucket == nrBuckets && immediateBucket.empty();
}
template<typename ValueType>
std::size_t BucketPriorityQueue<ValueType>::size() const {
size_t size = immediateBucket.size();
for (size_t i = currentBucket; currentBucket < nrBuckets; ++i) {
size += buckets[i].size();
}
return size;
}
template<typename ValueType>
typename BucketPriorityQueue<ValueType>::HeuristicPointer const& BucketPriorityQueue<ValueType>::top() const {
if (!immediateBucket.empty()) {
return immediateBucket.back();
}
STORM_LOG_ASSERT(!empty(), "BucketPriorityQueue is empty");
return buckets[currentBucket].front();
}
template<typename ValueType>
void BucketPriorityQueue<ValueType>::push(HeuristicPointer const& item) {
if (item->isExpand()) {
immediateBucket.push_back(item);
return;
}
size_t bucket = getBucket(item->getPriority());
if (bucket < currentBucket) {
currentBucket = bucket;
nrUnsortedItems = 0;
}
buckets[bucket].push_back(item);
if (bucket == currentBucket) {
// Insert in first bucket
if (AUTOSORT) {
std::push_heap(buckets[currentBucket].begin(), buckets[currentBucket].end(), compare);
} else {
++nrUnsortedItems;
}
}
}
template<typename ValueType>
void BucketPriorityQueue<ValueType>::update(HeuristicPointer const& item, double oldPriority) {
STORM_LOG_ASSERT(!item->isExpand(), "Item is marked for expansion");
size_t newBucket = getBucket(item->getPriority());
size_t oldBucket = getBucket(oldPriority);
if (oldBucket == newBucket) {
if (currentBucket < newBucket) {
// No change as the bucket is not sorted yet
} else {
if (AUTOSORT) {
// Sort first bucket
fix();
} else {
++nrUnsortedItems;
}
}
} else {
// Move to new bucket
STORM_LOG_ASSERT(newBucket < oldBucket, "Will update to higher bucket");
if (newBucket < currentBucket) {
currentBucket = newBucket;
nrUnsortedItems = 0;
}
// Remove old entry by swap-and-pop
if (buckets[oldBucket].size() >= 2) {
// Find old index by linear search
// Notice: using a map to rememeber index was not efficient
size_t oldIndex = 0;
for ( ; oldIndex < buckets[oldBucket].size(); ++oldIndex) {
if (buckets[oldBucket][oldIndex]->getId() == item->getId()) {
break;
}
}
STORM_LOG_ASSERT(oldIndex < buckets[oldBucket].size(), "Id " << item->getId() << " not found");
std::iter_swap(buckets[oldBucket].begin() + oldIndex, buckets[oldBucket].end() - 1);
}
buckets[oldBucket].pop_back();
// Insert new element
buckets[newBucket].push_back(item);
if (newBucket == currentBucket) {
if (AUTOSORT) {
// Sort in first bucket
std::push_heap(buckets[currentBucket].begin(), buckets[currentBucket].end(), compare);
} else {
++nrUnsortedItems;
}
}
}
}
template<typename ValueType>
void BucketPriorityQueue<ValueType>::pop() {
if (!immediateBucket.empty()) {
immediateBucket.pop_back();
return;
}
STORM_LOG_ASSERT(!empty(), "BucketPriorityQueue is empty");
std::pop_heap(buckets[currentBucket].begin(), buckets[currentBucket].end(), compare);
buckets[currentBucket].pop_back();
if (buckets[currentBucket].empty()) {
// Find next bucket with elements
for ( ; currentBucket < nrBuckets; ++currentBucket) {
if (!buckets[currentBucket].empty()) {
nrUnsortedItems = buckets[currentBucket].size();
if (AUTOSORT) {
fix();
}
break;
}
}
}
}
template<typename ValueType>
typename BucketPriorityQueue<ValueType>::HeuristicPointer BucketPriorityQueue<ValueType>::popTop() {
HeuristicPointer item = top();
pop();
return item;
}
template<typename ValueType>
size_t BucketPriorityQueue<ValueType>::getBucket(double priority) const {
STORM_LOG_ASSERT(priority >= lowerValue, "Priority " << priority << " is too low");
size_t newBucket = std::log(priority - lowerValue) / logBase;
if (newBucket >= nrBuckets) {
newBucket = nrBuckets - 1;
}
if (!HIGHER) {
newBucket = nrBuckets-1 - newBucket;
}
//std::cout << "get Bucket: " << priority << ", " << newBucket << std::endl;
STORM_LOG_ASSERT(newBucket < nrBuckets, "Priority " << priority << " is too high");
return newBucket;
}
template<typename ValueType>
void BucketPriorityQueue<ValueType>::print(std::ostream& out) const {
out << "Bucket priority queue with size " << buckets.size() << ", lower value: " << lowerValue << " and logBase: " << logBase << std::endl;
out << "Immediate bucket: ";
for (HeuristicPointer heuristic : immediateBucket) {
out << heuristic->getId() << ", ";
}
out << std::endl;
out << "Current bucket (" << currentBucket << ") has " << nrUnsortedItems << " unsorted items" << std::endl;
for (size_t bucket = 0; bucket < buckets.size(); ++bucket) {
if (!buckets[bucket].empty()) {
out << "Bucket " << bucket << ":" << std::endl;
for (HeuristicPointer heuristic : buckets[bucket]) {
out << "\t" << heuristic->getId() << ": " << heuristic->getPriority() << std::endl;
}
}
}
}
template<typename ValueType>
void BucketPriorityQueue<ValueType>::printSizes(std::ostream& out) const {
out << "Bucket sizes: " << immediateBucket.size() << " | ";
for (size_t bucket = 0; bucket < buckets.size(); ++bucket) {
out << buckets[bucket].size() << " ";
}
std::cout << std::endl;
}
// Template instantiations
template class BucketPriorityQueue<double>;
#ifdef STORM_HAVE_CARL
template class BucketPriorityQueue<storm::RationalFunction>;
#endif
}
}

73
src/storage/BucketPriorityQueue.h

@ -0,0 +1,73 @@
#ifndef STORM_STORAGE_BUCKETPRIORITYQUEUE_H_
#define STORM_STORAGE_BUCKETPRIORITYQUEUE_H_
#include "src/builder/DftExplorationHeuristic.h"
#include <algorithm>
#include <functional>
#include <unordered_map>
#include <vector>
namespace storm {
namespace storage {
template<typename ValueType>
class BucketPriorityQueue {
using HeuristicPointer = std::shared_ptr<storm::builder::DFTExplorationHeuristicProbability<ValueType>>;
public:
explicit BucketPriorityQueue(size_t nrBuckets, double lowerValue, double ratio);
void fix();
bool empty() const;
std::size_t size() const;
HeuristicPointer const& top() const;
void push(HeuristicPointer const& item);
void update(HeuristicPointer const& item, double oldPriority);
void pop();
HeuristicPointer popTop();
void print(std::ostream& out) const;
void printSizes(std::ostream& out) const;
private:
size_t getBucket(double priority) const;
const double lowerValue;
const bool HIGHER = true;
const bool AUTOSORT = false;
const double logBase;
const size_t nrBuckets;
size_t nrUnsortedItems;
// List of buckets
std::vector<std::vector<HeuristicPointer>> buckets;
// Bucket containing all items which should be considered immediately
std::vector<HeuristicPointer> immediateBucket;
// Index of first bucket which contains items
size_t currentBucket;
std::function<bool(HeuristicPointer, HeuristicPointer)> compare;
};
}
}
#endif // STORM_STORAGE_BUCKETPRIORITYQUEUE_H_

16
src/storage/bisimulation/BisimulationDecomposition.cpp

@ -230,25 +230,25 @@ namespace storm {
template<typename ModelType, typename BlockDataType>
void BisimulationDecomposition<ModelType, BlockDataType>::performPartitionRefinement() {
// Insert all blocks into the splitter queue as a (potential) splitter.
std::deque<Block<BlockDataType>*> splitterQueue;
std::for_each(partition.getBlocks().begin(), partition.getBlocks().end(), [&] (std::unique_ptr<Block<BlockDataType>> const& block) { block->data().setSplitter(); splitterQueue.push_back(block.get()); } );
// Insert all blocks into the splitter vector as a (potential) splitter.
std::vector<Block<BlockDataType>*> splitterVector;
std::for_each(partition.getBlocks().begin(), partition.getBlocks().end(), [&] (std::unique_ptr<Block<BlockDataType>> const& block) { block->data().setSplitter(); splitterVector.push_back(block.get()); } );
// Then perform the actual splitting until there are no more splitters.
uint_fast64_t iterations = 0;
while (!splitterQueue.empty()) {
while (!splitterVector.empty()) {
++iterations;
// Get and prepare the next splitter.
// Sort the splitters according to their sizes to prefer small splitters. That is just a heuristic, but
// tends to work well.
std::sort(splitterQueue.begin(), splitterQueue.end(), [] (Block<BlockDataType> const* b1, Block<BlockDataType> const* b2) { return b1->getNumberOfStates() < b2->getNumberOfStates(); } );
Block<BlockDataType>* splitter = splitterQueue.front();
splitterQueue.pop_front();
std::sort(splitterVector.begin(), splitterVector.end(), [] (Block<BlockDataType> const* b1, Block<BlockDataType> const* b2) { return b1->getNumberOfStates() > b2->getNumberOfStates(); } );
Block<BlockDataType>* splitter = splitterVector.back();
splitterVector.pop_back();
splitter->data().setSplitter(false);
// Now refine the partition using the current splitter.
refinePartitionBasedOnSplitter(*splitter, splitterQueue);
refinePartitionBasedOnSplitter(*splitter, splitterVector);
}
}

8
src/storage/bisimulation/BisimulationDecomposition.h

@ -1,7 +1,7 @@
#ifndef STORM_STORAGE_BISIMULATIONDECOMPOSITION_H_
#define STORM_STORAGE_BISIMULATIONDECOMPOSITION_H_
#include <deque>
#include <vector>
#include "src/settings/SettingsManager.h"
#include "src/settings/modules/BisimulationSettings.h"
@ -219,12 +219,12 @@ namespace storm {
/*!
* Refines the partition by considering the given splitter. All blocks that become potential splitters
* because of this refinement, are marked as splitters and inserted into the splitter queue.
* because of this refinement, are marked as splitters and inserted into the splitter vector.
*
* @param splitter The splitter to use.
* @param splitterQueue The queue into which to insert the newly discovered potential splitters.
* @param splitterVector The vector into which to insert the newly discovered potential splitters.
*/
virtual void refinePartitionBasedOnSplitter(bisimulation::Block<BlockDataType>& splitter, std::deque<bisimulation::Block<BlockDataType>*>& splitterQueue) = 0;
virtual void refinePartitionBasedOnSplitter(bisimulation::Block<BlockDataType>& splitter, std::vector<bisimulation::Block<BlockDataType>*>& splitterVector) = 0;
/*!
* Builds the quotient model based on the previously computed equivalence classes (stored in the blocks

28
src/storage/bisimulation/DeterministicModelBisimulationDecomposition.cpp

@ -157,7 +157,7 @@ namespace storm {
}
template<typename ModelType>
void DeterministicModelBisimulationDecomposition<ModelType>::refinePredecessorBlocksOfSplitterStrong(std::list<Block<BlockDataType>*> const& predecessorBlocks, std::deque<bisimulation::Block<BlockDataType>*>& splitterQueue) {
void DeterministicModelBisimulationDecomposition<ModelType>::refinePredecessorBlocksOfSplitterStrong(std::list<Block<BlockDataType>*> const& predecessorBlocks, std::vector<bisimulation::Block<BlockDataType>*>& splitterVector) {
for (auto block : predecessorBlocks) {
STORM_LOG_TRACE("Refining predecessor " << block->getId() << " of splitter");
@ -176,15 +176,15 @@ namespace storm {
[this] (storm::storage::sparse::state_type state1, storm::storage::sparse::state_type state2) {
return this->comparator.isLess(getProbabilityToSplitter(state1), getProbabilityToSplitter(state2));
},
[&splitterQueue] (Block<BlockDataType>& block) {
splitterQueue.emplace_back(&block); block.data().setSplitter();
[&splitterVector] (Block<BlockDataType>& block) {
splitterVector.emplace_back(&block); block.data().setSplitter();
});
// If the predecessor block was split, we need to insert it into the splitter queue if it is not already
// If the predecessor block was split, we need to insert it into the splitter vector if it is not already
// marked as a splitter.
if (split && !blockToRefineProbabilistically->data().splitter()) {
splitterQueue.emplace_back(blockToRefineProbabilistically);
splitterVector.emplace_back(blockToRefineProbabilistically);
blockToRefineProbabilistically->data().setSplitter();
}
@ -378,7 +378,7 @@ namespace storm {
}
template<typename ModelType>
void DeterministicModelBisimulationDecomposition<ModelType>::refinePredecessorBlockOfSplitterWeak(bisimulation::Block<BlockDataType>& block, std::deque<bisimulation::Block<BlockDataType>*>& splitterQueue) {
void DeterministicModelBisimulationDecomposition<ModelType>::refinePredecessorBlockOfSplitterWeak(bisimulation::Block<BlockDataType>& block, std::vector<bisimulation::Block<BlockDataType>*>& splitterVector) {
// First, we need to turn the one-step probabilities to go to the splitter to the conditional probabilities
// for all non-silent states.
computeConditionalProbabilitiesForNonSilentStates(block);
@ -395,12 +395,12 @@ namespace storm {
[&weakStateLabels,&block,originalBlockIndex,this] (storm::storage::sparse::state_type state1, storm::storage::sparse::state_type state2) {
return weakStateLabels[this->partition.getPosition(state1) - originalBlockIndex] < weakStateLabels[this->partition.getPosition(state2) - originalBlockIndex];
},
[this, &splitterQueue] (bisimulation::Block<BlockDataType>& block) {
[this, &splitterVector] (bisimulation::Block<BlockDataType>& block) {
updateSilentProbabilitiesBasedOnTransitions(block);
// Insert the new block as a splitter.
block.data().setSplitter();
splitterQueue.emplace_back(&block);
splitterVector.emplace_back(&block);
});
// If the block was split, we also update the silent probabilities.
@ -410,16 +410,16 @@ namespace storm {
if (!block.data().splitter()) {
// Insert the new block as a splitter.
block.data().setSplitter();
splitterQueue.emplace_back(&block);
splitterVector.emplace_back(&block);
}
}
}
template<typename ModelType>
void DeterministicModelBisimulationDecomposition<ModelType>::refinePredecessorBlocksOfSplitterWeak(bisimulation::Block<BlockDataType>& splitter, std::list<bisimulation::Block<BlockDataType>*> const& predecessorBlocks, std::deque<bisimulation::Block<BlockDataType>*>& splitterQueue) {
void DeterministicModelBisimulationDecomposition<ModelType>::refinePredecessorBlocksOfSplitterWeak(bisimulation::Block<BlockDataType>& splitter, std::list<bisimulation::Block<BlockDataType>*> const& predecessorBlocks, std::vector<bisimulation::Block<BlockDataType>*>& splitterVector) {
for (auto block : predecessorBlocks) {
if (*block != splitter) {
refinePredecessorBlockOfSplitterWeak(*block, splitterQueue);
refinePredecessorBlockOfSplitterWeak(*block, splitterVector);
} else {
// If the block to split is the splitter itself, we must not do any splitting here.
}
@ -439,7 +439,7 @@ namespace storm {
}
template<typename ModelType>
void DeterministicModelBisimulationDecomposition<ModelType>::refinePartitionBasedOnSplitter(bisimulation::Block<BlockDataType>& splitter, std::deque<bisimulation::Block<BlockDataType>*>& splitterQueue) {
void DeterministicModelBisimulationDecomposition<ModelType>::refinePartitionBasedOnSplitter(bisimulation::Block<BlockDataType>& splitter, std::vector<bisimulation::Block<BlockDataType>*>& splitterVector) {
STORM_LOG_TRACE("Refining partition based on splitter " << splitter.getId());
// The outline of the refinement is as follows.
@ -513,7 +513,7 @@ namespace storm {
if (this->options.getType() == BisimulationType::Strong || this->model.getType() == storm::models::ModelType::Ctmc) {
// In the case of CTMCs and weak bisimulation, we still call the "splitStrong" method, but we already have
// taken care of not adding the splitter to the predecessor blocks, so this is safe.
refinePredecessorBlocksOfSplitterStrong(predecessorBlocks, splitterQueue);
refinePredecessorBlocksOfSplitterStrong(predecessorBlocks, splitterVector);
} else {
// If the splitter is a predecessor of we can use the computed probabilities to update the silent
// probabilities.
@ -521,7 +521,7 @@ namespace storm {
updateSilentProbabilitiesBasedOnProbabilitiesToSplitter(splitter);
}
refinePredecessorBlocksOfSplitterWeak(splitter, predecessorBlocks, splitterQueue);
refinePredecessorBlocksOfSplitterWeak(splitter, predecessorBlocks, splitterVector);
}
}

8
src/storage/bisimulation/DeterministicModelBisimulationDecomposition.h

@ -39,11 +39,11 @@ namespace storm {
virtual void buildQuotient() override;
virtual void refinePartitionBasedOnSplitter(bisimulation::Block<BlockDataType>& splitter, std::deque<bisimulation::Block<BlockDataType>*>& splitterQueue) override;
virtual void refinePartitionBasedOnSplitter(bisimulation::Block<BlockDataType>& splitter, std::vector<bisimulation::Block<BlockDataType>*>& splitterVector) override;
private:
// Refines the predecessor blocks wrt. strong bisimulation.
void refinePredecessorBlocksOfSplitterStrong(std::list<bisimulation::Block<BlockDataType>*> const& predecessorBlocks, std::deque<bisimulation::Block<BlockDataType>*>& splitterQueue);
void refinePredecessorBlocksOfSplitterStrong(std::list<bisimulation::Block<BlockDataType>*> const& predecessorBlocks, std::vector<bisimulation::Block<BlockDataType>*>& splitterVector);
/*!
* Performs the necessary steps to compute a weak bisimulation on a DTMC.
@ -99,10 +99,10 @@ namespace storm {
void updateSilentProbabilitiesBasedOnTransitions(bisimulation::Block<BlockDataType>& block);
// Refines the predecessor blocks of the splitter wrt. weak bisimulation in DTMCs.
void refinePredecessorBlocksOfSplitterWeak(bisimulation::Block<BlockDataType>& splitter, std::list<bisimulation::Block<BlockDataType>*> const& predecessorBlocks, std::deque<bisimulation::Block<BlockDataType>*>& splitterQueue);
void refinePredecessorBlocksOfSplitterWeak(bisimulation::Block<BlockDataType>& splitter, std::list<bisimulation::Block<BlockDataType>*> const& predecessorBlocks, std::vector<bisimulation::Block<BlockDataType>*>& splitterVector);
// Refines the given block wrt to weak bisimulation in DTMCs.
void refinePredecessorBlockOfSplitterWeak(bisimulation::Block<BlockDataType>& block, std::deque<bisimulation::Block<BlockDataType>*>& splitterQueue);
void refinePredecessorBlockOfSplitterWeak(bisimulation::Block<BlockDataType>& block, std::vector<bisimulation::Block<BlockDataType>*>& splitterVector);
// Converts the one-step probabilities of going into the splitter into the conditional probabilities needed
// for weak bisimulation (on DTMCs).

14
src/storage/bisimulation/NondeterministicModelBisimulationDecomposition.cpp

@ -198,7 +198,7 @@ namespace storm {
}
template<typename ModelType>
void NondeterministicModelBisimulationDecomposition<ModelType>::updateQuotientDistributionsOfPredecessors(Block<BlockDataType> const& newBlock, Block<BlockDataType> const& oldBlock, std::deque<Block<BlockDataType>*>& splitterQueue) {
void NondeterministicModelBisimulationDecomposition<ModelType>::updateQuotientDistributionsOfPredecessors(Block<BlockDataType> const& newBlock, Block<BlockDataType> const& oldBlock, std::vector<Block<BlockDataType>*>& splitterVector) {
uint_fast64_t lastState = 0;
bool lastStateInitialized = false;
@ -220,7 +220,7 @@ namespace storm {
// If the predecessor block is not marked as to-refined, we do so now.
if (!predecessorBlock.data().splitter()) {
predecessorBlock.data().setSplitter();
splitterQueue.push_back(&predecessorBlock);
splitterVector.push_back(&predecessorBlock);
}
if (lastStateInitialized) {
@ -332,7 +332,7 @@ namespace storm {
}
template<typename ModelType>
bool NondeterministicModelBisimulationDecomposition<ModelType>::splitBlockAccordingToCurrentQuotientDistributions(Block<BlockDataType>& block, std::deque<Block<BlockDataType>*>& splitterQueue) {
bool NondeterministicModelBisimulationDecomposition<ModelType>::splitBlockAccordingToCurrentQuotientDistributions(Block<BlockDataType>& block, std::vector<Block<BlockDataType>*>& splitterVector) {
std::list<Block<BlockDataType>*> newBlocks;
bool split = this->partition.splitBlock(block,
[this] (storm::storage::sparse::state_type state1, storm::storage::sparse::state_type state2) {
@ -340,7 +340,7 @@ namespace storm {
// std::cout << state1 << " is " << (!result ? "not" : "") << " smaller than " << state2 << std::endl;
return result;
},
[this, &block, &splitterQueue, &newBlocks] (Block<BlockDataType>& newBlock) {
[this, &block, &splitterVector, &newBlocks] (Block<BlockDataType>& newBlock) {
newBlocks.push_back(&newBlock);
// this->checkBlockStable(newBlock);
@ -357,7 +357,7 @@ namespace storm {
// defer updating the quotient distributions until *after* all splits, because
// it otherwise influences the subsequent splits!
for (auto el : newBlocks) {
this->updateQuotientDistributionsOfPredecessors(*el, block, splitterQueue);
this->updateQuotientDistributionsOfPredecessors(*el, block, splitterVector);
}
// this->checkQuotientDistributions();
@ -405,14 +405,14 @@ namespace storm {
}
template<typename ModelType>
void NondeterministicModelBisimulationDecomposition<ModelType>::refinePartitionBasedOnSplitter(bisimulation::Block<BlockDataType>& splitter, std::deque<bisimulation::Block<BlockDataType>*>& splitterQueue) {
void NondeterministicModelBisimulationDecomposition<ModelType>::refinePartitionBasedOnSplitter(bisimulation::Block<BlockDataType>& splitter, std::vector<bisimulation::Block<BlockDataType>*>& splitterVector) {
if (!possiblyNeedsRefinement(splitter)) {
return;
}
STORM_LOG_TRACE("Refining block " << splitter.getId());
splitBlockAccordingToCurrentQuotientDistributions(splitter, splitterQueue);
splitBlockAccordingToCurrentQuotientDistributions(splitter, splitterVector);
}
template class NondeterministicModelBisimulationDecomposition<storm::models::sparse::Mdp<double>>;

6
src/storage/bisimulation/NondeterministicModelBisimulationDecomposition.h

@ -37,7 +37,7 @@ namespace storm {
virtual void buildQuotient() override;
virtual void refinePartitionBasedOnSplitter(bisimulation::Block<BlockDataType>& splitter, std::deque<bisimulation::Block<BlockDataType>*>& splitterQueue) override;
virtual void refinePartitionBasedOnSplitter(bisimulation::Block<BlockDataType>& splitter, std::vector<bisimulation::Block<BlockDataType>*>& splitterVector) override;
virtual void initialize() override;
@ -52,7 +52,7 @@ namespace storm {
bool possiblyNeedsRefinement(bisimulation::Block<BlockDataType> const& block) const;
// Splits the given block according to the current quotient distributions.
bool splitBlockAccordingToCurrentQuotientDistributions(bisimulation::Block<BlockDataType>& block, std::deque<bisimulation::Block<BlockDataType>*>& splitterQueue);
bool splitBlockAccordingToCurrentQuotientDistributions(bisimulation::Block<BlockDataType>& block, std::vector<bisimulation::Block<BlockDataType>*>& splitterVector);
// Retrieves whether the quotient distributions of state 1 are considered to be less than the ones of state 2.
bool quotientDistributionsLess(storm::storage::sparse::state_type state1, storm::storage::sparse::state_type state2) const;
@ -62,7 +62,7 @@ namespace storm {
// Updates the quotient distributions of the predecessors of the new block by taking the probability mass
// away from the old block.
void updateQuotientDistributionsOfPredecessors(bisimulation::Block<BlockDataType> const& newBlock, bisimulation::Block<BlockDataType> const& oldBlock, std::deque<bisimulation::Block<BlockDataType>*>& splitterQueue);
void updateQuotientDistributionsOfPredecessors(bisimulation::Block<BlockDataType> const& newBlock, bisimulation::Block<BlockDataType> const& oldBlock, std::vector<bisimulation::Block<BlockDataType>*>& splitterVector);
bool checkQuotientDistributions() const;
bool checkBlockStable(bisimulation::Block<BlockDataType> const& newBlock) const;

59
src/storage/dft/DFTState.cpp

@ -6,7 +6,7 @@ namespace storm {
namespace storage {
template<typename ValueType>
DFTState<ValueType>::DFTState(DFT<ValueType> const& dft, DFTStateGenerationInfo const& stateGenerationInfo, size_t id) : mStatus(dft.stateVectorSize()), mId(id), mPseudoState(false), mDft(dft), mStateGenerationInfo(stateGenerationInfo), exploreHeuristic() {
DFTState<ValueType>::DFTState(DFT<ValueType> const& dft, DFTStateGenerationInfo const& stateGenerationInfo, size_t id) : mStatus(dft.stateVectorSize()), mId(id), mPseudoState(false), mDft(dft), mStateGenerationInfo(stateGenerationInfo) {
// TODO Matthias: use construct()
// Initialize uses
@ -17,27 +17,15 @@ namespace storm {
this->setUses(spareId, elem->children()[0]->id());
}
for (auto elem : mDft.getBasicElements()) {
mCurrentlyNotFailableBE.push_back(elem->id());
}
// Initialize activation
propagateActivation(mDft.getTopLevelIndex());
std::vector<size_t> alwaysActiveBEs = mDft.nonColdBEs();
mCurrentlyFailableBE.insert(mCurrentlyFailableBE.end(), alwaysActiveBEs.begin(), alwaysActiveBEs.end());
// Remove always active BEs from currently not failable BEs
for (size_t id : alwaysActiveBEs) {
auto it = std::find(mCurrentlyNotFailableBE.begin(), mCurrentlyNotFailableBE.end(), id);
STORM_LOG_ASSERT(it != mCurrentlyNotFailableBE.end(), "Id not found.");
mCurrentlyNotFailableBE.erase(it);
}
sortFailableBEs();
}
template<typename ValueType>
DFTState<ValueType>::DFTState(storm::storage::BitVector const& status, DFT<ValueType> const& dft, DFTStateGenerationInfo const& stateGenerationInfo, size_t id) : mStatus(status), mId(id), mPseudoState(true), mDft(dft), mStateGenerationInfo(stateGenerationInfo), exploreHeuristic() {
DFTState<ValueType>::DFTState(storm::storage::BitVector const& status, DFT<ValueType> const& dft, DFTStateGenerationInfo const& stateGenerationInfo, size_t id) : mStatus(status), mId(id), mPseudoState(true), mDft(dft), mStateGenerationInfo(stateGenerationInfo) {
// Intentionally left empty
}
@ -46,7 +34,6 @@ namespace storm {
STORM_LOG_TRACE("Construct concrete state from pseudo state " << mDft.getStateString(mStatus, mStateGenerationInfo, mId));
// Clear information from pseudo state
mCurrentlyFailableBE.clear();
mCurrentlyNotFailableBE.clear();
mFailableDependencies.clear();
mUsedRepresentants.clear();
STORM_LOG_ASSERT(mPseudoState, "Only pseudo states can be constructed.");
@ -57,10 +44,6 @@ namespace storm {
if ((!be->isColdBasicElement() && be->canFail()) || !mDft.hasRepresentant(index) || isActive(mDft.getRepresentant(index))) {
mCurrentlyFailableBE.push_back(index);
STORM_LOG_TRACE("Currently failable: " << mDft.getBasicElement(index)->toString());
} else {
// BE currently is not failable
mCurrentlyNotFailableBE.push_back(index);
STORM_LOG_TRACE("Currently not failable: " << mDft.getBasicElement(index)->toString());
}
} else if (mDft.getElement(index)->isSpareGate()) {
// Initialize used representants
@ -69,7 +52,6 @@ namespace storm {
STORM_LOG_TRACE("Spare " << index << " uses " << useId);
}
}
sortFailableBEs();
// Initialize failable dependencies
for (size_t dependencyId : mDft.getDependencies()) {
@ -85,9 +67,7 @@ namespace storm {
template<typename ValueType>
std::shared_ptr<DFTState<ValueType>> DFTState<ValueType>::copy() const {
std::shared_ptr<DFTState<ValueType>> stateCopy = std::make_shared<storm::storage::DFTState<ValueType>>(*this);
stateCopy->exploreHeuristic = storm::builder::DFTExplorationHeuristic<ValueType>();
return stateCopy;
return std::make_shared<storm::storage::DFTState<ValueType>>(*this);
}
template<typename ValueType>
@ -205,7 +185,7 @@ namespace storm {
template<typename ValueType>
void DFTState<ValueType>::beNoLongerFailable(size_t id) {
auto it = std::find(mCurrentlyFailableBE.begin(), mCurrentlyFailableBE.end(), id);
if(it != mCurrentlyFailableBE.end()) {
if (it != mCurrentlyFailableBE.end()) {
mCurrentlyFailableBE.erase(it);
}
}
@ -239,11 +219,14 @@ namespace storm {
}
template<typename ValueType>
ValueType DFTState<ValueType>::getBERate(size_t id, bool considerPassive) const {
ValueType DFTState<ValueType>::getBERate(size_t id) const {
STORM_LOG_ASSERT(mDft.isBasicElement(id), "Element is no BE.");
if (considerPassive && mDft.hasRepresentant(id) && !isActive(mDft.getRepresentant(id))) {
if (mDft.hasRepresentant(id) && !isActive(mDft.getRepresentant(id))) {
// Return passive failure rate
return mDft.getBasicElement(id)->passiveFailureRate();
} else {
// Return active failure rate
return mDft.getBasicElement(id)->activeFailureRate();
}
}
@ -251,16 +234,7 @@ namespace storm {
template<typename ValueType>
ValueType DFTState<ValueType>::getFailableBERate(size_t index) const {
STORM_LOG_ASSERT(index < nrFailableBEs(), "Index invalid.");
return getBERate(mCurrentlyFailableBE[index], true);
}
template<typename ValueType>
ValueType DFTState<ValueType>::getNotFailableBERate(size_t index) const {
STORM_LOG_ASSERT(index < nrNotFailableBEs(), "Index invalid.");
STORM_LOG_ASSERT(storm::utility::isZero<ValueType>(mDft.getBasicElement(mCurrentlyNotFailableBE[index])->activeFailureRate()) ||
(mDft.hasRepresentant(mCurrentlyNotFailableBE[index]) && !isActive(mDft.getRepresentant(mCurrentlyNotFailableBE[index]))), "BE " << mCurrentlyNotFailableBE[index] << " can fail");
// Use active failure rate as passive failure rate is 0.
return getBERate(mCurrentlyNotFailableBE[index], false);
return getBERate(mCurrentlyFailableBE[index]);
}
template<typename ValueType>
@ -317,16 +291,11 @@ namespace storm {
if (be->isColdBasicElement() && be->canFail()) {
// Add to failable BEs
mCurrentlyFailableBE.push_back(elem);
// Remove from not failable BEs
auto it = std::find(mCurrentlyNotFailableBE.begin(), mCurrentlyNotFailableBE.end(), elem);
STORM_LOG_ASSERT(it != mCurrentlyNotFailableBE.end(), "Element " << mDft.getElement(elem)->name() << " not found.");
mCurrentlyNotFailableBE.erase(it);
}
} else if (mDft.getElement(elem)->isSpareGate() && !isActive(uses(elem))) {
propagateActivation(uses(elem));
}
}
sortFailableBEs();
}
template<typename ValueType>
@ -425,14 +394,6 @@ namespace storm {
return changed;
}
template<typename ValueType>
void DFTState<ValueType>::sortFailableBEs() {
std::sort(mCurrentlyFailableBE.begin( ), mCurrentlyFailableBE.end( ), [&](size_t const& lhs, size_t const& rhs) {
// Sort decreasing
return getBERate(rhs, true) < getBERate(lhs, true);
});
}
// Explicitly instantiate the class.
template class DFTState<double>;

53
src/storage/dft/DFTState.h

@ -27,14 +27,12 @@ namespace storm {
storm::storage::BitVector mStatus;
size_t mId;
std::vector<size_t> mCurrentlyFailableBE;
std::vector<size_t> mCurrentlyNotFailableBE;
std::vector<size_t> mFailableDependencies;
std::vector<size_t> mUsedRepresentants;
bool mPseudoState;
bool mValid = true;
const DFT<ValueType>& mDft;
const DFTStateGenerationInfo& mStateGenerationInfo;
storm::builder::DFTExplorationHeuristic<ValueType> exploreHeuristic;
public:
/**
@ -123,26 +121,6 @@ namespace storm {
return mPseudoState;
}
void setHeuristicValues(size_t depth, ValueType rate, ValueType exitRate) {
exploreHeuristic.setHeuristicValues(depth, rate, exitRate);
}
void setHeuristicValues(std::shared_ptr<storm::storage::DFTState<ValueType>> oldState, ValueType rate, ValueType exitRate) {
if (hasFailed(mDft.getTopLevelIndex()) || isFailsafe(mDft.getTopLevelIndex()) || nrFailableDependencies() > 0 || (nrFailableDependencies() == 0 && nrFailableBEs() == 0)) {
// Do not skip absorbing state or if reached by dependencies
exploreHeuristic.setNotSkip();
}
exploreHeuristic.setHeuristicValues(oldState->exploreHeuristic.getDepth() + 1, rate, exitRate);
}
bool isSkip(double approximationThreshold, storm::builder::ApproximationHeuristic heuristic) {
return exploreHeuristic.isSkip(approximationThreshold, heuristic);
}
double getPriority() const {
return exploreHeuristic.getPriority();
}
storm::storage::BitVector const& status() const {
return mStatus;
}
@ -202,15 +180,6 @@ namespace storm {
return mCurrentlyFailableBE.size();
}
/**
* Get number of currently not failable BEs. These are cold BE which can fail in the future.
*
* @return Number of not failable BEs.
*/
size_t nrNotFailableBEs() const {
return mCurrentlyNotFailableBE.size();
}
/**
* Get the failure rate of the currently failable BE on the given index.
*
@ -221,13 +190,13 @@ namespace storm {
ValueType getFailableBERate(size_t index) const;
/**
* Get the failure rate of the currently not failable BE on the given index.
* Get the current failure rate of the given BE.
*
* @param index Index of BE in list of currently not failable BEs.
* @param id Id of BE.
*
* @return Failure rate of the BE.
*/
ValueType getNotFailableBERate(size_t index) const;
ValueType getBERate(size_t id) const;
/** Get number of currently failable dependencies.
*
@ -323,22 +292,6 @@ namespace storm {
private:
void propagateActivation(size_t representativeId);
/**
* Get the failure rate of the given BE.
*
* @param id Id of BE.
* @param considerPassive Flag indicating if the passive failure rate should be returned if
* the BE currently is passive.
*
* @return Failure rate of the BE.
*/
ValueType getBERate(size_t id, bool considerPassive) const;
/*!
* Sort failable BEs in decreasing order of their active failure rate.
*/
void sortFailableBEs();
};
}

2
src/storm-dyftee.cpp

@ -120,6 +120,7 @@ int main(const int argc, const char** argv) {
parametric = generalSettings.isParametricSet();
#endif
#ifdef STORM_HAVE_Z3
if (dftSettings.solveWithSMT()) {
// Solve with SMT
if (parametric) {
@ -130,6 +131,7 @@ int main(const int argc, const char** argv) {
storm::utility::cleanUp();
return 0;
}
#endif
// Set min or max
bool minimal = true;

2
src/utility/bitoperations.h

@ -10,6 +10,6 @@ inline size_t smallestIntWithNBitsSet(size_t n) {
inline size_t nextBitPermutation(size_t v) {
if (v==0) return static_cast<size_t>(0);
// From https://graphics.stanford.edu/~seander/bithacks.html#NextBitPermutation
unsigned int t = (v | (v - 1)) + 1;
size_t t = (v | (v - 1)) + 1;
return t | ((((t & -t) / (v & -v)) >> 1) - 1);
}
|||||||
100:0
Loading…
Cancel
Save