Browse Source

Merge branch 'master' into future

Former-commit-id: c0e2920a94
tempestpy_adaptions
sjunges 9 years ago
parent
commit
0311e85f62
  1. 18
      src/models/sparse/NondeterministicModel.cpp
  2. 9
      src/models/sparse/NondeterministicModel.h
  3. 5
      src/models/sparse/StandardRewardModel.cpp
  4. 13
      src/models/sparse/StandardRewardModel.h
  5. 25
      src/utility/solver.cpp
  6. 3
      src/utility/solver.h

18
src/models/sparse/NondeterministicModel.cpp

@ -4,6 +4,8 @@
#include "src/adapters/CarlAdapter.h"
#include "src/exceptions/InvalidOperationException.h"
namespace storm {
namespace models {
namespace sparse {
@ -45,6 +47,22 @@ namespace storm {
return indices[state+1] - indices[state];
}
template<typename ValueType, typename RewardModelType>
void NondeterministicModel<ValueType, RewardModelType>::modifyStateActionRewards(RewardModelType& rewardModel, std::map<std::pair<uint_fast64_t, LabelSet>, typename RewardModelType::ValueType> const& modifications) const {
STORM_LOG_THROW(rewardModel.hasStateActionRewards(), storm::exceptions::InvalidOperationException, "Cannot modify state-action rewards, because the reward model does not have state-action rewards.");
STORM_LOG_THROW(this->hasChoiceLabeling(), storm::exceptions::InvalidOperationException, "Cannot modify state-action rewards, because the model does not have an action labeling.");
std::vector<LabelSet> const& choiceLabels = this->getChoiceLabeling();
for (auto const& modification : modifications) {
uint_fast64_t stateIndex = modification.first.first;
for (uint_fast64_t row = this->getNondeterministicChoiceIndices()[stateIndex]; row < this->getNondeterministicChoiceIndices()[stateIndex + 1]; ++row) {
// If the action label of the row matches the requested one, we set the reward value accordingly.
if (choiceLabels[row] == modification.first.second) {
rewardModel.setStateActionRewardValue(row, modification.second);
}
}
}
}
template<typename ValueType, typename RewardModelType>
void NondeterministicModel<ValueType, RewardModelType>::reduceToStateBasedRewards() {
for (auto& rewardModel : this->getRewardModels()) {

9
src/models/sparse/NondeterministicModel.h

@ -73,6 +73,15 @@ namespace storm {
*/
uint_fast64_t getNumberOfChoices(uint_fast64_t state) const;
/*!
* Modifies the state-action reward vector of the given reward model by setting the value specified in
* the map for the corresponding state-action pairs.
*
* @param rewardModel The reward model whose state-action rewards to modify.
* @param modifications A mapping from state-action pairs to the their new reward values.
*/
void modifyStateActionRewards(RewardModelType& rewardModel, std::map<std::pair<uint_fast64_t, LabelSet>, typename RewardModelType::ValueType> const& modifications) const;
virtual void reduceToStateBasedRewards() override;
virtual void printModelInformationToStream(std::ostream& out) const override;

5
src/models/sparse/StandardRewardModel.cpp

@ -197,6 +197,11 @@ namespace storm {
return result;
}
template<typename ValueType>
void StandardRewardModel<ValueType>::setStateActionRewardValue(uint_fast64_t row, ValueType const& value) {
this->optionalStateActionRewardVector.get()[row] = value;
}
template<typename ValueType>
bool StandardRewardModel<ValueType>::empty() const {
return !(static_cast<bool>(this->optionalStateRewardVector) || static_cast<bool>(this->optionalStateActionRewardVector) || static_cast<bool>(this->optionalTransitionRewardMatrix));

13
src/models/sparse/StandardRewardModel.h

@ -10,9 +10,11 @@
namespace storm {
namespace models {
namespace sparse {
template <typename ValueType>
template <typename CValueType>
class StandardRewardModel {
public:
typedef CValueType ValueType;
/*!
* Constructs a reward model by copying the given data.
*
@ -213,6 +215,15 @@ namespace storm {
*/
std::vector<ValueType> getTotalStateActionRewardVector(uint_fast64_t numberOfRows, std::vector<uint_fast64_t> const& rowGroupIndices, storm::storage::BitVector const& filter) const;
/*!
* Sets the given value in the state-action reward vector at the given row. This assumes that the reward
* model has state-action rewards.
*
* @param row The row at which to set the given value.
* @param value The value to set.
*/
void setStateActionRewardValue(uint_fast64_t row, ValueType const& value);
/*!
* Retrieves whether the reward model is empty, i.e. contains no state-, state-action- or transition-based
* rewards.

25
src/utility/solver.cpp

@ -133,28 +133,37 @@ namespace storm {
}
std::unique_ptr<storm::solver::LpSolver> LpSolverFactory::create(std::string const& name) const {
storm::solver::LpSolverType lpSolver = storm::settings::generalSettings().getLpSolver();
switch (lpSolver) {
std::unique_ptr<storm::solver::LpSolver> LpSolverFactory::create(std::string const& name, storm::solver::LpSolverTypeSelection solvT) const {
storm::solver::LpSolverType t;
if(solvT == storm::solver::LpSolverTypeSelection::FROMSETTINGS) {
t = storm::settings::generalSettings().getLpSolver();
} else {
t = convert(solvT);
}
switch (t) {
case storm::solver::LpSolverType::Gurobi: return std::unique_ptr<storm::solver::LpSolver>(new storm::solver::GurobiLpSolver(name));
case storm::solver::LpSolverType::Glpk: return std::unique_ptr<storm::solver::LpSolver>(new storm::solver::GlpkLpSolver(name));
default: return std::unique_ptr<storm::solver::LpSolver>(new storm::solver::GurobiLpSolver(name));
}
}
std::unique_ptr<storm::solver::LpSolver> LpSolverFactory::create(std::string const& name) const {
return LpSolverFactory::create(name, storm::solver::LpSolverTypeSelection::FROMSETTINGS);
}
std::unique_ptr<storm::solver::LpSolver> GlpkLpSolverFactory::create(std::string const& name) const {
return std::unique_ptr<storm::solver::LpSolver>(new storm::solver::GlpkLpSolver(name));
return LpSolverFactory::create(name, storm::solver::LpSolverTypeSelection::Glpk);
}
std::unique_ptr<storm::solver::LpSolver> GurobiLpSolverFactory::create(std::string const& name) const {
return std::unique_ptr<storm::solver::LpSolver>(new storm::solver::GurobiLpSolver(name));
return LpSolverFactory::create(name, storm::solver::LpSolverTypeSelection::Gurobi);
}
std::unique_ptr<storm::solver::LpSolver> getLpSolver(std::string const& name) {
std::unique_ptr<storm::solver::LpSolver> getLpSolver(std::string const& name, storm::solver::LpSolverTypeSelection solvType) {
std::unique_ptr<storm::utility::solver::LpSolverFactory> factory(new LpSolverFactory());
return factory->create(name);
return factory->create(name, solvType);
}
template class SymbolicLinearEquationSolverFactory<storm::dd::DdType::CUDD, double>;
template class SymbolicMinMaxLinearEquationSolverFactory<storm::dd::DdType::CUDD, double>;
template class SymbolicGameSolverFactory<storm::dd::DdType::CUDD>;

3
src/utility/solver.h

@ -112,6 +112,7 @@ namespace storm {
* @return A pointer to the newly created solver.
*/
virtual std::unique_ptr<storm::solver::LpSolver> create(std::string const& name) const;
virtual std::unique_ptr<storm::solver::LpSolver> create(std::string const& name, storm::solver::LpSolverTypeSelection solvType) const;
};
class GlpkLpSolverFactory : public LpSolverFactory {
@ -124,7 +125,7 @@ namespace storm {
virtual std::unique_ptr<storm::solver::LpSolver> create(std::string const& name) const override;
};
std::unique_ptr<storm::solver::LpSolver> getLpSolver(std::string const& name);
std::unique_ptr<storm::solver::LpSolver> getLpSolver(std::string const& name, storm::solver::LpSolverTypeSelection solvType = storm::solver::LpSolverTypeSelection::FROMSETTINGS) ;
}
}
}
Loading…
Cancel
Save