diff --git a/src/models/sparse/NondeterministicModel.cpp b/src/models/sparse/NondeterministicModel.cpp index 340400dde..c5e3e9ecf 100644 --- a/src/models/sparse/NondeterministicModel.cpp +++ b/src/models/sparse/NondeterministicModel.cpp @@ -4,6 +4,8 @@ #include "src/adapters/CarlAdapter.h" +#include "src/exceptions/InvalidOperationException.h" + namespace storm { namespace models { namespace sparse { @@ -45,6 +47,22 @@ namespace storm { return indices[state+1] - indices[state]; } + template + void NondeterministicModel::modifyStateActionRewards(RewardModelType& rewardModel, std::map, typename RewardModelType::ValueType> const& modifications) const { + STORM_LOG_THROW(rewardModel.hasStateActionRewards(), storm::exceptions::InvalidOperationException, "Cannot modify state-action rewards, because the reward model does not have state-action rewards."); + STORM_LOG_THROW(this->hasChoiceLabeling(), storm::exceptions::InvalidOperationException, "Cannot modify state-action rewards, because the model does not have an action labeling."); + std::vector const& choiceLabels = this->getChoiceLabeling(); + for (auto const& modification : modifications) { + uint_fast64_t stateIndex = modification.first.first; + for (uint_fast64_t row = this->getNondeterministicChoiceIndices()[stateIndex]; row < this->getNondeterministicChoiceIndices()[stateIndex + 1]; ++row) { + // If the action label of the row matches the requested one, we set the reward value accordingly. + if (choiceLabels[row] == modification.first.second) { + rewardModel.setStateActionRewardValue(row, modification.second); + } + } + } + } + template void NondeterministicModel::reduceToStateBasedRewards() { for (auto& rewardModel : this->getRewardModels()) { diff --git a/src/models/sparse/NondeterministicModel.h b/src/models/sparse/NondeterministicModel.h index 9c5a01b05..5bacf6dd7 100644 --- a/src/models/sparse/NondeterministicModel.h +++ b/src/models/sparse/NondeterministicModel.h @@ -73,6 +73,15 @@ namespace storm { */ uint_fast64_t getNumberOfChoices(uint_fast64_t state) const; + /*! + * Modifies the state-action reward vector of the given reward model by setting the value specified in + * the map for the corresponding state-action pairs. + * + * @param rewardModel The reward model whose state-action rewards to modify. + * @param modifications A mapping from state-action pairs to the their new reward values. + */ + void modifyStateActionRewards(RewardModelType& rewardModel, std::map, typename RewardModelType::ValueType> const& modifications) const; + virtual void reduceToStateBasedRewards() override; virtual void printModelInformationToStream(std::ostream& out) const override; diff --git a/src/models/sparse/StandardRewardModel.cpp b/src/models/sparse/StandardRewardModel.cpp index b8fa2e3be..5788dae10 100644 --- a/src/models/sparse/StandardRewardModel.cpp +++ b/src/models/sparse/StandardRewardModel.cpp @@ -197,6 +197,11 @@ namespace storm { return result; } + template + void StandardRewardModel::setStateActionRewardValue(uint_fast64_t row, ValueType const& value) { + this->optionalStateActionRewardVector.get()[row] = value; + } + template bool StandardRewardModel::empty() const { return !(static_cast(this->optionalStateRewardVector) || static_cast(this->optionalStateActionRewardVector) || static_cast(this->optionalTransitionRewardMatrix)); diff --git a/src/models/sparse/StandardRewardModel.h b/src/models/sparse/StandardRewardModel.h index 457d4df5b..eb396d90a 100644 --- a/src/models/sparse/StandardRewardModel.h +++ b/src/models/sparse/StandardRewardModel.h @@ -10,9 +10,11 @@ namespace storm { namespace models { namespace sparse { - template + template class StandardRewardModel { public: + typedef CValueType ValueType; + /*! * Constructs a reward model by copying the given data. * @@ -213,6 +215,15 @@ namespace storm { */ std::vector getTotalStateActionRewardVector(uint_fast64_t numberOfRows, std::vector const& rowGroupIndices, storm::storage::BitVector const& filter) const; + /*! + * Sets the given value in the state-action reward vector at the given row. This assumes that the reward + * model has state-action rewards. + * + * @param row The row at which to set the given value. + * @param value The value to set. + */ + void setStateActionRewardValue(uint_fast64_t row, ValueType const& value); + /*! * Retrieves whether the reward model is empty, i.e. contains no state-, state-action- or transition-based * rewards. diff --git a/src/utility/solver.cpp b/src/utility/solver.cpp index c5c3ecaa0..6c63004bd 100644 --- a/src/utility/solver.cpp +++ b/src/utility/solver.cpp @@ -133,28 +133,37 @@ namespace storm { } - std::unique_ptr LpSolverFactory::create(std::string const& name) const { - storm::solver::LpSolverType lpSolver = storm::settings::generalSettings().getLpSolver(); - switch (lpSolver) { + std::unique_ptr LpSolverFactory::create(std::string const& name, storm::solver::LpSolverTypeSelection solvT) const { + storm::solver::LpSolverType t; + if(solvT == storm::solver::LpSolverTypeSelection::FROMSETTINGS) { + t = storm::settings::generalSettings().getLpSolver(); + } else { + t = convert(solvT); + } + switch (t) { case storm::solver::LpSolverType::Gurobi: return std::unique_ptr(new storm::solver::GurobiLpSolver(name)); case storm::solver::LpSolverType::Glpk: return std::unique_ptr(new storm::solver::GlpkLpSolver(name)); - default: return std::unique_ptr(new storm::solver::GurobiLpSolver(name)); } } + std::unique_ptr LpSolverFactory::create(std::string const& name) const { + return LpSolverFactory::create(name, storm::solver::LpSolverTypeSelection::FROMSETTINGS); + } + std::unique_ptr GlpkLpSolverFactory::create(std::string const& name) const { - return std::unique_ptr(new storm::solver::GlpkLpSolver(name)); + return LpSolverFactory::create(name, storm::solver::LpSolverTypeSelection::Glpk); } std::unique_ptr GurobiLpSolverFactory::create(std::string const& name) const { - return std::unique_ptr(new storm::solver::GurobiLpSolver(name)); + return LpSolverFactory::create(name, storm::solver::LpSolverTypeSelection::Gurobi); } - std::unique_ptr getLpSolver(std::string const& name) { + std::unique_ptr getLpSolver(std::string const& name, storm::solver::LpSolverTypeSelection solvType) { std::unique_ptr factory(new LpSolverFactory()); - return factory->create(name); + return factory->create(name, solvType); } + template class SymbolicLinearEquationSolverFactory; template class SymbolicMinMaxLinearEquationSolverFactory; template class SymbolicGameSolverFactory; diff --git a/src/utility/solver.h b/src/utility/solver.h index e18f435d6..ff5abb3e2 100644 --- a/src/utility/solver.h +++ b/src/utility/solver.h @@ -112,6 +112,7 @@ namespace storm { * @return A pointer to the newly created solver. */ virtual std::unique_ptr create(std::string const& name) const; + virtual std::unique_ptr create(std::string const& name, storm::solver::LpSolverTypeSelection solvType) const; }; class GlpkLpSolverFactory : public LpSolverFactory { @@ -124,7 +125,7 @@ namespace storm { virtual std::unique_ptr create(std::string const& name) const override; }; - std::unique_ptr getLpSolver(std::string const& name); + std::unique_ptr getLpSolver(std::string const& name, storm::solver::LpSolverTypeSelection solvType = storm::solver::LpSolverTypeSelection::FROMSETTINGS) ; } } }