#include "storm/transformer/ContinuousToDiscreteTimeModelTransformer.h" #include #include "storm/models/sparse/StandardRewardModel.h" #include "storm/logic/Formulas.h" #include "storm/logic/FragmentSpecification.h" #include "storm/logic/ExpectedTimeToExpectedRewardVisitor.h" #include "storm/utility/macros.h" #include "storm/utility/vector.h" #include "storm/exceptions/InvalidArgumentException.h" #include "storm/exceptions/UnexpectedException.h" namespace storm { namespace transformer { template std::shared_ptr> ContinuousToDiscreteTimeModelTransformer::transform(storm::models::sparse::Ctmc const& ctmc, boost::optional const& timeRewardModelName) { // Init the dtmc components storm::storage::sparse::ModelComponents dtmcComponents(ctmc.getTransitionMatrix(), ctmc.getStateLabeling(), ctmc.getRewardModels()); dtmcComponents.choiceLabeling = ctmc.getOptionalChoiceLabeling(); dtmcComponents.stateValuations = ctmc.getOptionalStateValuations(); dtmcComponents.choiceOrigins = ctmc.getOptionalChoiceOrigins(); // Turn the rates into probabilities by dividing each row of the transition matrix with the exit rate std::vector const& exitRates = ctmc.getExitRateVector(); dtmcComponents.transitionMatrix.divideRowsInPlace(exitRates); // Transform the reward models for (auto& rewardModel : dtmcComponents.rewardModels) { if (rewardModel.second.hasStateRewards()) { storm::utility::vector::divideVectorsPointwise(rewardModel.second.getStateRewardVector(), exitRates, rewardModel.second.getStateRewardVector()); } } if (timeRewardModelName) { // Invert the exit rate vector in place std::vector timeRewardVector; timeRewardVector.reserve(exitRates.size()); for (auto const& r : exitRates) { timeRewardVector.push_back(storm::utility::one() / r); } RewardModelType timeRewards(std::move(timeRewardVector)); auto insertRes = dtmcComponents.rewardModels.insert(std::make_pair(*timeRewardModelName, std::move(timeRewards))); STORM_LOG_THROW(insertRes.second, storm::exceptions::InvalidArgumentException, "Could not insert auxiliary reward model " << *timeRewardModelName << " because a model with this name already exists."); } return std::make_shared>(std::move(dtmcComponents)); } template std::shared_ptr> ContinuousToDiscreteTimeModelTransformer::transform(storm::models::sparse::Ctmc&& ctmc, boost::optional const& timeRewardModelName) { // Init the dtmc components storm::storage::sparse::ModelComponents dtmcComponents(std::move(ctmc.getTransitionMatrix()), std::move(ctmc.getStateLabeling()), std::move(ctmc.getRewardModels())); dtmcComponents.choiceLabeling = std::move(ctmc.getOptionalChoiceLabeling()); dtmcComponents.stateValuations = std::move(ctmc.getOptionalStateValuations()); dtmcComponents.choiceOrigins = std::move(ctmc.getOptionalChoiceOrigins()); // Turn the rates into probabilities by dividing each row of the transition matrix with the exit rate std::vector& exitRates = ctmc.getExitRateVector(); dtmcComponents.transitionMatrix.divideRowsInPlace(exitRates); // Transform the reward models for (auto& rewardModel : dtmcComponents.rewardModels) { if (rewardModel.second.hasStateRewards()) { storm::utility::vector::divideVectorsPointwise(rewardModel.second.getStateRewardVector(), exitRates, rewardModel.second.getStateRewardVector()); } } if (timeRewardModelName) { // Invert the exit rate vector in place storm::utility::vector::applyPointwise(exitRates, exitRates, [&] (ValueType const& r) -> ValueType { return storm::utility::one() / r; }); RewardModelType timeRewards(std::move(exitRates)); auto insertRes = dtmcComponents.rewardModels.insert(std::make_pair(*timeRewardModelName, std::move(timeRewards))); STORM_LOG_THROW(insertRes.second, storm::exceptions::InvalidArgumentException, "Could not insert auxiliary reward model " << *timeRewardModelName << " because a model with this name already exists."); } // Note: exitRates might be invalidated at this point. return std::make_shared>(std::move(dtmcComponents)); } template bool ContinuousToDiscreteTimeModelTransformer::preservesFormula(storm::logic::Formula const& formula) { storm::logic::FragmentSpecification fragment = storm::logic::propositional(); fragment.setProbabilityOperatorsAllowed(true); fragment.setGloballyFormulasAllowed(true); fragment.setReachabilityProbabilityFormulasAllowed(true); fragment.setNextFormulasAllowed(true); fragment.setUntilFormulasAllowed(true); fragment.setRewardOperatorsAllowed(true); fragment.setReachabilityRewardFormulasAllowed(true); return formula.isInFragment(fragment); } template std::vector> ContinuousToDiscreteTimeModelTransformer::checkAndTransformFormulas(std::vector> const& formulas, std::string const& timeRewardName) { std::vector> result; storm::logic::ExpectedTimeToExpectedRewardVisitor v(timeRewardName); for (auto const& f : formulas) { // Translate expected time formulas auto newF = v.substitute(*f); if(preservesFormula(*newF)) { result.push_back(newF); } else { STORM_LOG_INFO("Continuous to discrete time transformation does not preserve formula " << *f); } } return result; } template std::shared_ptr> ContinuousToDiscreteTimeModelTransformer::transform(storm::models::sparse::MarkovAutomaton const& ma, boost::optional const& timeRewardModelName) { STORM_LOG_THROW(ma.isClosed(), storm::exceptions::InvalidArgumentException, "Transformation of MA to its underlying MDP is only possible for closed MAs"); // Init the mdp components storm::storage::sparse::ModelComponents mdpComponents(ma.getTransitionMatrix(), ma.getStateLabeling(), ma.getRewardModels()); mdpComponents.choiceLabeling = ma.getOptionalChoiceLabeling(); mdpComponents.stateValuations = ma.getOptionalStateValuations(); mdpComponents.choiceOrigins = ma.getOptionalChoiceOrigins(); // Markov automata already store the probability matrix // Transform the reward models std::vector const& exitRates = ma.getExitRates(); for (auto& rewardModel : mdpComponents.rewardModels) { if (rewardModel.second.hasStateRewards()) { auto& stateRewards = rewardModel.second.getStateRewardVector(); for (uint_fast64_t state = 0; state < stateRewards.size(); ++state) { if (ma.getMarkovianStates().get(state)) { stateRewards[state] /= exitRates[state]; } else { stateRewards[state] = storm::utility::zero(); } } } } if (timeRewardModelName) { // Invert the exit rate vector. Avoid division by zero at probabilistic states std::vector timeRewardVector(exitRates.size(), storm::utility::zero()); for (auto state : ma.getMarkovianStates()) { timeRewardVector[state] = storm::utility::one() / exitRates[state]; } RewardModelType timeRewards(std::move(timeRewardVector)); auto insertRes = mdpComponents.rewardModels.insert(std::make_pair(*timeRewardModelName, std::move(timeRewards))); STORM_LOG_THROW(insertRes.second, storm::exceptions::InvalidArgumentException, "Could not insert auxiliary reward model " << *timeRewardModelName << " because a model with this name already exists."); } return std::make_shared>(std::move(mdpComponents)); } template std::shared_ptr> ContinuousToDiscreteTimeModelTransformer::transform(storm::models::sparse::MarkovAutomaton&& ma, boost::optional const& timeRewardModelName) { STORM_LOG_THROW(ma.isClosed(), storm::exceptions::InvalidArgumentException, "Transformation of MA to its underlying MDP is only possible for closed MAs"); std::vector& exitRates = ma.getExitRates(); // Init the mdp components storm::storage::sparse::ModelComponents mdpComponents(std::move(ma.getTransitionMatrix()), std::move(ma.getStateLabeling()), std::move(ma.getRewardModels())); mdpComponents.choiceLabeling = std::move(ma.getOptionalChoiceLabeling()); mdpComponents.stateValuations = std::move(ma.getOptionalStateValuations()); mdpComponents.choiceOrigins = std::move(ma.getOptionalChoiceOrigins()); // Markov automata already store the probability matrix // Transform the reward models for (auto& rewardModel : mdpComponents.rewardModels) { if (rewardModel.second.hasStateRewards()) { auto& stateRewards = rewardModel.second.getStateRewardVector(); for (uint_fast64_t state = 0; state < stateRewards.size(); ++state) { if (ma.getMarkovianStates().get(state)) { stateRewards[state] /= exitRates[state]; } else { stateRewards[state] = storm::utility::zero(); } } } } if (timeRewardModelName) { // Invert the exit rate vector. Avoid division by zero at probabilistic states std::vector timeRewardVector(exitRates.size(), storm::utility::zero()); for (auto state : ma.getMarkovianStates()) { timeRewardVector[state] = storm::utility::one() / exitRates[state]; } RewardModelType timeRewards(std::move(timeRewardVector)); auto insertRes = mdpComponents.rewardModels.insert(std::make_pair(*timeRewardModelName, std::move(timeRewards))); STORM_LOG_THROW(insertRes.second, storm::exceptions::InvalidArgumentException, "Could not insert auxiliary reward model " << *timeRewardModelName << " because a model with this name already exists."); } return std::make_shared>(std::move(mdpComponents)); } template class ContinuousToDiscreteTimeModelTransformer; template class ContinuousToDiscreteTimeModelTransformer; template class ContinuousToDiscreteTimeModelTransformer; } }