Browse Source

ContinuousToDiscreteTimeModelTransformer can now transform the model out-of-place as well

tempestpy_adaptions
TimQu 8 years ago
parent
commit
aa158f5144
  1. 23
      src/storm/storage/SparseMatrix.cpp
  2. 9
      src/storm/storage/SparseMatrix.h
  3. 194
      src/storm/transformer/ContinuousToDiscreteTimeModelTransformer.cpp
  4. 23
      src/storm/transformer/ContinuousToDiscreteTimeModelTransformer.h

23
src/storm/storage/SparseMatrix.cpp

@ -1361,7 +1361,7 @@ namespace storm {
}
template<typename ValueType>
void SparseMatrix<ValueType>::scaleRowsInPlace(std::vector<value_type> const& factors) {
void SparseMatrix<ValueType>::scaleRowsInPlace(std::vector<ValueType> const& factors) {
STORM_LOG_ASSERT(factors.size() == this->getRowCount(), "Can not scale rows: Number of rows and number of scaling factors do not match.");
uint_fast64_t row = 0;
for (auto const& factor : factors) {
@ -1371,7 +1371,26 @@ namespace storm {
++row;
}
}
template<typename ValueType>
void SparseMatrix<ValueType>::divideRowsInPlace(std::vector<ValueType> const& divisors) {
STORM_LOG_ASSERT(divisors.size() == this->getRowCount(), "Can not divide rows: Number of rows and number of divisors do not match.");
uint_fast64_t row = 0;
for (auto const& divisor : divisors) {
STORM_LOG_ASSERT(!storm::utility::isZero(divisor), "Can not divide row " << row << " by 0.");
for (auto& entry : getRow(row)) {
entry.setValue(entry.getValue() / divisor);
}
++row;
}
}
#ifdef STORM_HAVE_CARL
template<>
void SparseMatrix<Interval>::divideRowsInPlace(std::vector<Interval> const&) {
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "This operation is not supported.");
}
#endif
template<typename ValueType>
typename SparseMatrix<ValueType>::const_rows SparseMatrix<ValueType>::getRows(index_type startRow, index_type endRow) const {

9
src/storm/storage/SparseMatrix.h

@ -796,9 +796,18 @@ s * @param insertDiagonalEntries If set to true, the resulting matri
/*!
* Scales each row of the matrix, i.e., multiplies each element in row i with factors[i]
*
* @param factors The factors with which each row is scaled.
*/
void scaleRowsInPlace(std::vector<value_type> const& factors);
/*!
* Divides each row of the matrix, i.e., divides each element in row i with divisors[i]
*
* @param divisors The divisors with which each row is divided.
*/
void divideRowsInPlace(std::vector<value_type> const& divisors);
/*!
* Performs one step of the successive over-relaxation technique.
*

194
src/storm/transformer/ContinuousToDiscreteTimeModelTransformer.cpp

@ -14,8 +14,8 @@
namespace storm {
namespace transformer {
template <typename ValueType>
void transformContinuousToDiscreteModelInPlace(std::shared_ptr<storm::models::sparse::Model<ValueType>>& markovModel, std::shared_ptr<storm::logic::Formula const>& formula) {
template <typename ValueType, typename RewardModelType>
std::shared_ptr<storm::models::sparse::Model<ValueType>> transformContinuousToDiscreteModel(std::shared_ptr<storm::models::sparse::Model<ValueType, RewardModelType>> markovModel, std::shared_ptr<storm::logic::Formula const>& formula) {
boost::optional<std::string> timeRewardModelName;
if (formula->isTimeOperatorFormula()) {
auto const& timeOpFormula = formula->asTimeOperatorFormula();
@ -29,13 +29,42 @@ namespace storm {
}
if (markovModel->isOfType(storm::models::ModelType::Ctmc)) {
SparseCtmcToSparseDtmcTransformer<storm::models::sparse::Ctmc<ValueType>> transformer;
SparseCtmcToSparseDtmcTransformer<ValueType, RewardModelType> transformer;
if (transformer.transformationPreservesProperty(*formula)) {
STORM_LOG_INFO("Transforming Ctmc to embedded Dtmc...");
markovModel = transformer.translate(*markovModel->template as<storm::models::sparse::Ctmc<ValueType>>(), timeRewardModelName);
}
} else if (markovModel->isOfType(storm::models::ModelType::MarkovAutomaton)) {
SparseMaToSparseMdpTransformer<ValueType, RewardModelType> transformer;
if (transformer.transformationPreservesProperty(*formula)) {
STORM_LOG_INFO("Transforming Markov automaton to embedded Mdp...");
markovModel = transformer.translate(*markovModel->template as<storm::models::sparse::MarkovAutomaton<ValueType>>(), timeRewardModelName);
}
}
}
template <typename ValueType, typename RewardModelType>
void transformContinuousToDiscreteModelInPlace(std::shared_ptr<storm::models::sparse::Model<ValueType, RewardModelType>>& markovModel, std::shared_ptr<storm::logic::Formula const>& formula) {
boost::optional<std::string> timeRewardModelName;
if (formula->isTimeOperatorFormula()) {
auto const& timeOpFormula = formula->asTimeOperatorFormula();
if (timeOpFormula.getSubformula().isReachabilityTimeFormula()) {
auto reachabilityRewardFormula = std::make_shared<storm::logic::EventuallyFormula>(storm::logic::CloneVisitor().clone(timeOpFormula.getSubformula().asReachabilityTimeFormula().getSubformula()), storm::logic::FormulaContext::Reward);
timeRewardModelName = "time";
// make sure that the reward model name is not already in use
while (markovModel->hasRewardModel(*timeRewardModelName)) *timeRewardModelName += "_";
formula = std::make_shared<storm::logic::RewardOperatorFormula const>(reachabilityRewardFormula, timeRewardModelName, timeOpFormula.getOperatorInformation());
}
}
if (markovModel->isOfType(storm::models::ModelType::Ctmc)) {
SparseCtmcToSparseDtmcTransformer<ValueType, RewardModelType> transformer;
if (transformer.transformationPreservesProperty(*formula)) {
STORM_LOG_INFO("Transforming Ctmc to embedded Dtmc...");
markovModel = transformer.translate(std::move(*markovModel->template as<storm::models::sparse::Ctmc<ValueType>>()), timeRewardModelName);
}
} else if (markovModel->isOfType(storm::models::ModelType::MarkovAutomaton)) {
SparseMaToSparseMdpTransformer<storm::models::sparse::MarkovAutomaton<ValueType>> transformer;
SparseMaToSparseMdpTransformer<ValueType, RewardModelType> transformer;
if (transformer.transformationPreservesProperty(*formula)) {
STORM_LOG_INFO("Transforming Markov automaton to embedded Mdp...");
markovModel = transformer.translate(std::move(*markovModel->template as<storm::models::sparse::MarkovAutomaton<ValueType>>()), timeRewardModelName);
@ -43,23 +72,54 @@ namespace storm {
}
}
template<typename CtmcType>
std::shared_ptr<storm::models::sparse::Dtmc<typename CtmcType::ValueType, typename CtmcType::RewardModelType>> SparseCtmcToSparseDtmcTransformer<CtmcType>::translate(CtmcType&& ctmc, boost::optional<std::string> const& timeRewardModelName) {
// Turn the rates into probabilities by scaling each row of the transition matrix with the exit rate
std::vector<typename CtmcType::ValueType>& exitRates = ctmc.getExitRateVector();
storm::storage::SparseMatrix<typename CtmcType::ValueType> matrix(std::move(ctmc.getTransitionMatrix()));
auto exitRateIt = exitRates.begin();
for (uint_fast64_t state = 0; state < matrix.getRowCount(); ++state) {
for (auto& entry : matrix.getRow(state)) {
entry.setValue(entry.getValue() / *exitRateIt);
template <typename ValueType, typename RewardModelType>
std::shared_ptr<storm::models::sparse::Dtmc<ValueType, RewardModelType>> SparseCtmcToSparseDtmcTransformer<ValueType, RewardModelType>::translate(storm::models::sparse::Ctmc<ValueType, RewardModelType> const& ctmc, boost::optional<std::string> const& timeRewardModelName) {
// Init the dtmc components
storm::storage::sparse::ModelComponents<ValueType, RewardModelType> dtmcComponents(ctmc.getTransitionMatrix(), ctmc.getStateLabeling(), ctmc.getRewardModels());
dtmcComponents.choiceLabeling = ctmc.getOptionalChoiceLabeling();
dtmcComponents.stateValuations = ctmc.getOptionalStateValuations();
dtmcComponents.choiceOrigins = ctmc.getOptionalChoiceOrigins();
// Turn the rates into probabilities by dividing each row of the transition matrix with the exit rate
std::vector<ValueType> const& exitRates = ctmc.getExitRateVector();
dtmcComponents.transitionMatrix.divideRowsInPlace(exitRates);
// Transform the reward models
for (auto& rewardModel : dtmcComponents.rewardModels) {
if (rewardModel.second.hasStateRewards()) {
storm::utility::vector::divideVectorsPointwise(rewardModel.second.getStateRewardVector(), exitRates, rewardModel.second.getStateRewardVector());
}
++exitRateIt;
}
STORM_LOG_ASSERT(exitRateIt == exitRates.end(), "Unexpected size of rate vector.");
if (timeRewardModelName) {
// Invert the exit rate vector in place
std::vector<ValueType> timeRewardVector;
timeRewardVector.reserve(exitRates.size());
for (auto const& r : exitRates) {
timeRewardVector.push_back(storm::utility::one<ValueType>() / r);
}
RewardModelType timeRewards(std::move(timeRewardVector));
auto insertRes = dtmcComponents.rewardModels.insert(std::make_pair(*timeRewardModelName, std::move(timeRewards)));
STORM_LOG_THROW(insertRes.second, storm::exceptions::InvalidArgumentException, "Could not insert auxiliary reward model " << *timeRewardModelName << " because a model with this name already exists.");
}
return std::make_shared<storm::models::sparse::Dtmc<ValueType, RewardModelType>>(std::move(dtmcComponents));
}
template <typename ValueType, typename RewardModelType>
std::shared_ptr<storm::models::sparse::Dtmc<ValueType, RewardModelType>> SparseCtmcToSparseDtmcTransformer<ValueType, RewardModelType>::translate(storm::models::sparse::Ctmc<ValueType, RewardModelType>&& ctmc, boost::optional<std::string> const& timeRewardModelName) {
// Init the dtmc components
storm::storage::sparse::ModelComponents<ValueType, RewardModelType> dtmcComponents(std::move(ctmc.getTransitionMatrix()), std::move(ctmc.getStateLabeling()), std::move(ctmc.getRewardModels()));
dtmcComponents.choiceLabeling = std::move(ctmc.getOptionalChoiceLabeling());
dtmcComponents.stateValuations = std::move(ctmc.getOptionalStateValuations());
dtmcComponents.choiceOrigins = std::move(ctmc.getOptionalChoiceOrigins());
// Turn the rates into probabilities by dividing each row of the transition matrix with the exit rate
std::vector<ValueType>& exitRates = ctmc.getExitRateVector();
dtmcComponents.transitionMatrix.divideRowsInPlace(exitRates);
// Transform the reward models
std::unordered_map<std::string, typename CtmcType::RewardModelType> rewardModels(std::move(ctmc.getRewardModels()));
for (auto& rewardModel : rewardModels) {
for (auto& rewardModel : dtmcComponents.rewardModels) {
if (rewardModel.second.hasStateRewards()) {
storm::utility::vector::divideVectorsPointwise(rewardModel.second.getStateRewardVector(), exitRates, rewardModel.second.getStateRewardVector());
}
@ -67,18 +127,18 @@ namespace storm {
if (timeRewardModelName) {
// Invert the exit rate vector in place
storm::utility::vector::applyPointwise<typename CtmcType::ValueType, typename CtmcType::ValueType>(exitRates, exitRates, [&] (typename CtmcType::ValueType const& r) -> typename CtmcType::ValueType { return storm::utility::one<typename CtmcType::ValueType>() / r; });
typename CtmcType::RewardModelType timeRewards(std::move(exitRates));
auto insertRes = rewardModels.insert(std::make_pair(*timeRewardModelName, std::move(timeRewards)));
storm::utility::vector::applyPointwise<ValueType, ValueType>(exitRates, exitRates, [&] (ValueType const& r) -> ValueType { return storm::utility::one<ValueType>() / r; });
RewardModelType timeRewards(std::move(exitRates));
auto insertRes = dtmcComponents.rewardModels.insert(std::make_pair(*timeRewardModelName, std::move(timeRewards)));
STORM_LOG_THROW(insertRes.second, storm::exceptions::InvalidArgumentException, "Could not insert auxiliary reward model " << *timeRewardModelName << " because a model with this name already exists.");
}
// exitRates might be invalidated at this point!!
// Note: exitRates might be invalidated at this point.
return std::make_shared<storm::models::sparse::Dtmc<typename CtmcType::ValueType, typename CtmcType::RewardModelType>>(std::move(matrix), std::move(ctmc.getStateLabeling()), std::move(rewardModels));
return std::make_shared<storm::models::sparse::Dtmc<ValueType, RewardModelType>>(std::move(dtmcComponents));
}
template<typename CtmcType>
bool SparseCtmcToSparseDtmcTransformer<CtmcType>::transformationPreservesProperty(storm::logic::Formula const& formula) {
template <typename ValueType, typename RewardModelType>
bool SparseCtmcToSparseDtmcTransformer<ValueType, RewardModelType>::transformationPreservesProperty(storm::logic::Formula const& formula) {
storm::logic::FragmentSpecification fragment = storm::logic::propositional();
fragment.setProbabilityOperatorsAllowed(true);
fragment.setGloballyFormulasAllowed(true);
@ -90,16 +150,22 @@ namespace storm {
return formula.isInFragment(fragment);
}
template<typename MaType>
std::shared_ptr<storm::models::sparse::Mdp<typename MaType::ValueType, typename MaType::RewardModelType>> SparseMaToSparseMdpTransformer<MaType>::translate(MaType&& ma, boost::optional<std::string> const& timeRewardModelName) {
template <typename ValueType, typename RewardModelType>
std::shared_ptr<storm::models::sparse::Mdp<ValueType, RewardModelType>> SparseMaToSparseMdpTransformer<ValueType, RewardModelType>::translate(storm::models::sparse::MarkovAutomaton<ValueType, RewardModelType> const& ma, boost::optional<std::string> const& timeRewardModelName) {
STORM_LOG_THROW(ma.isClosed(), storm::exceptions::InvalidArgumentException, "Transformation of MA to its underlying MDP is only possible for closed MAs");
std::vector<typename MaType::ValueType>& exitRates = ma.getExitRates();
// Init the mdp components
storm::storage::sparse::ModelComponents<ValueType, RewardModelType> mdpComponents(ma.getTransitionMatrix(), ma.getStateLabeling(), ma.getRewardModels());
mdpComponents.choiceLabeling = ma.getOptionalChoiceLabeling();
mdpComponents.stateValuations = ma.getOptionalStateValuations();
mdpComponents.choiceOrigins = ma.getOptionalChoiceOrigins();
// Markov automata already store the probability matrix
// Transform the reward models
std::unordered_map<std::string, typename MaType::RewardModelType> rewardModels(std::move(ma.getRewardModels()));
for (auto& rewardModel : rewardModels) {
std::vector<ValueType> const& exitRates = ma.getExitRates();
for (auto& rewardModel : mdpComponents.rewardModels) {
if (rewardModel.second.hasStateRewards()) {
auto& stateRewards = rewardModel.second.getStateRewardVector();
for (auto state : ma.getMarkovianStates()) {
@ -110,20 +176,57 @@ namespace storm {
if (timeRewardModelName) {
// Invert the exit rate vector. Avoid division by zero at probabilistic states
std::vector<typename MaType::ValueType> timeRewardVector(exitRates.size(), storm::utility::zero<typename MaType::ValueType>());
std::vector<ValueType> timeRewardVector(exitRates.size(), storm::utility::zero<ValueType>());
for (auto state : ma.getMarkovianStates()) {
timeRewardVector[state] = storm::utility::one<typename MaType::ValueType>() / exitRates[state];
timeRewardVector[state] = storm::utility::one<ValueType>() / exitRates[state];
}
typename MaType::RewardModelType timeRewards(std::move(timeRewardVector));
auto insertRes = rewardModels.insert(std::make_pair(*timeRewardModelName, std::move(timeRewards)));
RewardModelType timeRewards(std::move(timeRewardVector));
auto insertRes = mdpComponents.rewardModels.insert(std::make_pair(*timeRewardModelName, std::move(timeRewards)));
STORM_LOG_THROW(insertRes.second, storm::exceptions::InvalidArgumentException, "Could not insert auxiliary reward model " << *timeRewardModelName << " because a model with this name already exists.");
}
return std::make_shared<storm::models::sparse::Mdp<typename MaType::ValueType, typename MaType::RewardModelType>>(std::move(ma.getTransitionMatrix()), std::move(ma.getStateLabeling()), std::move(rewardModels));
return std::make_shared<storm::models::sparse::Mdp<ValueType, RewardModelType>>(std::move(mdpComponents));
}
template <typename ValueType, typename RewardModelType>
std::shared_ptr<storm::models::sparse::Mdp<ValueType, RewardModelType>> SparseMaToSparseMdpTransformer<ValueType, RewardModelType>::translate(storm::models::sparse::MarkovAutomaton<ValueType, RewardModelType>&& ma, boost::optional<std::string> const& timeRewardModelName) {
STORM_LOG_THROW(ma.isClosed(), storm::exceptions::InvalidArgumentException, "Transformation of MA to its underlying MDP is only possible for closed MAs");
std::vector<ValueType>& exitRates = ma.getExitRates();
// Init the mdp components
storm::storage::sparse::ModelComponents<ValueType, RewardModelType> mdpComponents(std::move(ma.getTransitionMatrix()), std::move(ma.getStateLabeling()), std::move(ma.getRewardModels()));
mdpComponents.choiceLabeling = std::move(ma.getOptionalChoiceLabeling());
mdpComponents.stateValuations = std::move(ma.getOptionalStateValuations());
mdpComponents.choiceOrigins = std::move(ma.getOptionalChoiceOrigins());
// Markov automata already store the probability matrix
// Transform the reward models
for (auto& rewardModel : mdpComponents.rewardModels) {
if (rewardModel.second.hasStateRewards()) {
auto& stateRewards = rewardModel.second.getStateRewardVector();
for (auto state : ma.getMarkovianStates()) {
stateRewards[state] /= exitRates[state];
}
}
}
if (timeRewardModelName) {
// Invert the exit rate vector. Avoid division by zero at probabilistic states
std::vector<ValueType> timeRewardVector(exitRates.size(), storm::utility::zero<ValueType>());
for (auto state : ma.getMarkovianStates()) {
timeRewardVector[state] = storm::utility::one<ValueType>() / exitRates[state];
}
RewardModelType timeRewards(std::move(timeRewardVector));
auto insertRes = mdpComponents.rewardModels.insert(std::make_pair(*timeRewardModelName, std::move(timeRewards)));
STORM_LOG_THROW(insertRes.second, storm::exceptions::InvalidArgumentException, "Could not insert auxiliary reward model " << *timeRewardModelName << " because a model with this name already exists.");
}
return std::make_shared<storm::models::sparse::Mdp<ValueType, RewardModelType>>(std::move(mdpComponents));
}
template<typename MaType>
bool SparseMaToSparseMdpTransformer<MaType>::transformationPreservesProperty(storm::logic::Formula const& formula) {
template <typename ValueType, typename RewardModelType>
bool SparseMaToSparseMdpTransformer<ValueType, RewardModelType>::transformationPreservesProperty(storm::logic::Formula const& formula) {
storm::logic::FragmentSpecification fragment = storm::logic::propositional();
fragment.setProbabilityOperatorsAllowed(true);
fragment.setGloballyFormulasAllowed(true);
@ -135,15 +238,18 @@ namespace storm {
return formula.isInFragment(fragment);
}
template std::shared_ptr<storm::models::sparse::Model<double>> transformContinuousToDiscreteModel(std::shared_ptr<storm::models::sparse::Model<double>> markovModel, std::shared_ptr<storm::logic::Formula const>& formula);
template std::shared_ptr<storm::models::sparse::Model<storm::RationalNumber>> transformContinuousToDiscreteModel(std::shared_ptr<storm::models::sparse::Model<storm::RationalNumber>> markovModel, std::shared_ptr<storm::logic::Formula const>& formula);
template std::shared_ptr<storm::models::sparse::Model<storm::RationalFunction>> transformContinuousToDiscreteModel(std::shared_ptr<storm::models::sparse::Model<storm::RationalFunction>> markovModel, std::shared_ptr<storm::logic::Formula const>& formula);
template void transformContinuousToDiscreteModelInPlace<double>(std::shared_ptr<storm::models::sparse::Model<double>>& markovModel, std::shared_ptr<storm::logic::Formula const>& formula);
template void transformContinuousToDiscreteModelInPlace<storm::RationalNumber>(std::shared_ptr<storm::models::sparse::Model<storm::RationalNumber>>& markovModel, std::shared_ptr<storm::logic::Formula const>& formula);
template void transformContinuousToDiscreteModelInPlace<storm::RationalFunction>(std::shared_ptr<storm::models::sparse::Model<storm::RationalFunction>>& markovModel, std::shared_ptr<storm::logic::Formula const>& formula);
template class SparseCtmcToSparseDtmcTransformer<storm::models::sparse::Ctmc<double>>;
template class SparseCtmcToSparseDtmcTransformer<storm::models::sparse::Ctmc<storm::RationalNumber>>;
template class SparseCtmcToSparseDtmcTransformer<storm::models::sparse::Ctmc<storm::RationalFunction>>;
template class SparseMaToSparseMdpTransformer<storm::models::sparse::MarkovAutomaton<double>>;
template class SparseMaToSparseMdpTransformer<storm::models::sparse::MarkovAutomaton<storm::RationalNumber>>;
template class SparseMaToSparseMdpTransformer<storm::models::sparse::MarkovAutomaton<storm::RationalFunction>>;
template class SparseCtmcToSparseDtmcTransformer<double>;
template class SparseCtmcToSparseDtmcTransformer<storm::RationalNumber>;
template class SparseCtmcToSparseDtmcTransformer<storm::RationalFunction>;
template class SparseMaToSparseMdpTransformer<double>;
template class SparseMaToSparseMdpTransformer<storm::RationalNumber>;
template class SparseMaToSparseMdpTransformer<storm::RationalFunction>;
}
}

23
src/storm/transformer/ContinuousToDiscreteTimeModelTransformer.h

@ -6,35 +6,44 @@
#include "storm/models/sparse/Mdp.h"
#include "storm/models/sparse/Ctmc.h"
#include "storm/models/sparse/MarkovAutomaton.h"
#include "storm/models/sparse/StandardRewardModel.h"
#include "storm/logic/Formula.h"
namespace storm {
namespace transformer {
// Transforms the given continuous model to a discrete time model.
// If such a transformation does not preserve the given formula, the transformation does not take place and the original model is returned
// Moreover, the given formula might be changed (e.g. TimeOperatorFormulas become RewardOperatorFormulas).
template <typename ValueType, typename RewardModelType = storm::models::sparse::StandardRewardModel<ValueType>>
std::shared_ptr<storm::models::sparse::Model<ValueType>> transformContinuousToDiscreteModel(std::shared_ptr<storm::models::sparse::Model<ValueType, RewardModelType>> markovModel, std::shared_ptr<storm::logic::Formula const>& formula);
// Transforms the given continuous model to a discrete time model IN PLACE (i.e., the continuous model will be invalidated).
// If such a transformation does not preserve the given formula, the transformation does not take place.
// Moreover, the given formula might be changed (e.g. TimeOperatorFormulas become RewardOperatorFormulas).
template <typename ValueType>
void transformContinuousToDiscreteModelInPlace(std::shared_ptr<storm::models::sparse::Model<ValueType>>& markovModel, std::shared_ptr<storm::logic::Formula const>& formula);
template <typename ValueType, typename RewardModelType = storm::models::sparse::StandardRewardModel<ValueType>>
void transformContinuousToDiscreteModelInPlace(std::shared_ptr<storm::models::sparse::Model<ValueType, RewardModelType>>& markovModel, std::shared_ptr<storm::logic::Formula const>& formula);
template<typename CtmcType>
template <typename ValueType, typename RewardModelType = storm::models::sparse::StandardRewardModel<ValueType>>
class SparseCtmcToSparseDtmcTransformer {
public:
// Transforms the given CTMC to its underlying (aka embedded) DTMC.
// A reward model for time is added if a corresponding reward model name is given
static std::shared_ptr<storm::models::sparse::Dtmc<typename CtmcType::ValueType, typename CtmcType::RewardModelType>> translate(CtmcType&& ctmc, boost::optional<std::string> const& timeRewardModelName = boost::none);
static std::shared_ptr<storm::models::sparse::Dtmc<ValueType, RewardModelType>> translate(storm::models::sparse::Ctmc<ValueType, RewardModelType> const& ctmc, boost::optional<std::string> const& timeRewardModelName = boost::none);
static std::shared_ptr<storm::models::sparse::Dtmc<ValueType, RewardModelType>> translate(storm::models::sparse::Ctmc<ValueType, RewardModelType>&& ctmc, boost::optional<std::string> const& timeRewardModelName = boost::none);
// If this method returns true, the given formula is preserced by the transformation
static bool transformationPreservesProperty(storm::logic::Formula const& formula);
};
template<typename MaType>
template <typename ValueType, typename RewardModelType = storm::models::sparse::StandardRewardModel<ValueType>>
class SparseMaToSparseMdpTransformer {
public:
// Transforms the given MA to its underlying (aka embedded) MDP.
// A reward model for time is added if a corresponding reward model name is given
static std::shared_ptr<storm::models::sparse::Mdp<typename MaType::ValueType, typename MaType::RewardModelType>> translate(MaType&& ma, boost::optional<std::string> const& timeRewardModelName = boost::none);
static std::shared_ptr<storm::models::sparse::Mdp<ValueType, RewardModelType>> translate(storm::models::sparse::MarkovAutomaton<ValueType, RewardModelType> const& ma, boost::optional<std::string> const& timeRewardModelName = boost::none);
static std::shared_ptr<storm::models::sparse::Mdp<ValueType, RewardModelType>> translate(storm::models::sparse::MarkovAutomaton<ValueType, RewardModelType>&& ma, boost::optional<std::string> const& timeRewardModelName = boost::none);
// If this method returns true, the given formula is preserved by the transformation
static bool transformationPreservesProperty(storm::logic::Formula const& formula);

Loading…
Cancel
Save