Browse Source

Reachability Rewards can now be computed in parametric DTMCs (modulo bugs)

Former-commit-id: 26ee20ef76
main
dehnert 10 years ago
parent
commit
90b0f20167
  1. 1
      src/adapters/ExplicitModelAdapter.h
  2. 13
      src/modelchecker/reachability/SparseSccModelChecker.cpp
  3. 14
      src/storage/DeterministicModelBisimulationDecomposition.cpp

1
src/adapters/ExplicitModelAdapter.h

@ -739,7 +739,6 @@ namespace storm {
static std::vector<ValueType> buildStateRewards(std::vector<storm::prism::StateReward> const& rewards, StateInformation const& stateInformation, expressions::ExpressionEvaluation<ValueType>& eval) {
std::vector<ValueType> result(stateInformation.reachableStates.size());
for (uint_fast64_t index = 0; index < stateInformation.reachableStates.size(); index++) {
std::cout << index << ": " << *stateInformation.reachableStates[index] << std::endl;
result[index] = ValueType(0);
for (auto const& reward : rewards) {
// Add this reward to the state if the state is included in the state reward.

13
src/modelchecker/reachability/SparseSccModelChecker.cpp

@ -161,13 +161,6 @@ namespace storm {
// Project the state reward vector to all maybe-states.
boost::optional<std::vector<ValueType>> stateRewards(maybeStates.getNumberOfSetBits());
storm::utility::vector::selectVectorValues(stateRewards.get(), maybeStates, dtmc.getStateRewardVector());
for (uint_fast64_t i = 0; i < dtmc.getStateRewardVector().size(); ++i) {
std::cout << i << ": " << dtmc.getStateRewardVector()[i] << ", ";
}
std::cout << std::endl;
std::cout << maybeStates << std::endl;
std::cout << psiStates << std::endl;
std::cout << infinityStates << std::endl;
// Determine the set of initial states of the sub-DTMC.
storm::storage::BitVector newInitialStates = dtmc.getInitialStates() % maybeStates;
@ -516,7 +509,11 @@ namespace storm {
} else {
// If we are computing rewards, we basically scale the state reward of the state to eliminate and
// add the result to the state reward of the predecessor.
stateRewards.get()[predecessor] += storm::utility::simplify(multiplyElement->getValue() * storm::utility::pow(loopProbability, 2) * stateRewards.get()[state]);
if (hasSelfLoop) {
stateRewards.get()[predecessor] += storm::utility::simplify(multiplyFactor * storm::utility::pow(loopProbability, 2) * stateRewards.get()[state]);
} else {
stateRewards.get()[predecessor] += storm::utility::simplify(multiplyFactor * stateRewards.get()[state]);
}
}
}

14
src/storage/DeterministicModelBisimulationDecomposition.cpp

@ -640,9 +640,6 @@ namespace storm {
// (b) the new labeling,
// (c) the new reward structures.
std::cout << model.getTransitionMatrix() << std::endl;
std::cout << model.getLabeledStates("end") << std::endl;;
// Prepare a matrix builder for (a).
storm::storage::SparseMatrixBuilder<ValueType> builder(this->size(), this->size());
@ -742,13 +739,6 @@ namespace storm {
// If the model has state rewards, we simply copy the state reward of the representative state, because
// all states in a block are guaranteed to have the same state reward.
if (keepRewards && model.hasStateRewards()) {
std::cout << "is block absorbing? " << oldBlock.isAbsorbing() << std::endl;
std::cout << "setting reward for block " << blockIndex << " to " << model.getStateRewardVector()[representativeState] << std::endl;
std::cout << "block: " << std::endl;
for (auto element : this->blocks[blockIndex]) {
std::cout << element << ", ";
}
std::cout << std::endl;
stateRewards.get()[blockIndex] = model.getStateRewardVector()[representativeState];
}
}
@ -761,8 +751,6 @@ namespace storm {
// Finally construct the quotient model.
this->quotient = std::shared_ptr<storm::models::AbstractDeterministicModel<ValueType>>(new ModelType(builder.build(), std::move(newLabeling), std::move(stateRewards)));
std::cout << quotient->getTransitionMatrix() << std::endl;
}
template<typename ValueType>
@ -1237,8 +1225,6 @@ namespace storm {
this->initializeSilentProbabilities(model, partition);
}
partition.print();
return partition;
}

Loading…
Cancel
Save