Browse Source

Removed a duplicated method in StandardRewardModel (setStateActionRewardValue did the same as setStateActionReward)

tempestpy_adaptions
Tim Quatmann 5 years ago
parent
commit
a80553a700
  1. 5
      src/storm/models/sparse/StandardRewardModel.cpp
  2. 9
      src/storm/models/sparse/StandardRewardModel.h

5
src/storm/models/sparse/StandardRewardModel.cpp

@ -349,11 +349,6 @@ namespace storm {
return result; return result;
} }
template<typename ValueType>
void StandardRewardModel<ValueType>::setStateActionRewardValue(uint_fast64_t row, ValueType const& value) {
this->optionalStateActionRewardVector.get()[row] = value;
}
template<typename ValueType> template<typename ValueType>
template<typename MatrixValueType> template<typename MatrixValueType>
void StandardRewardModel<ValueType>::clearRewardAtState(uint_fast64_t state, storm::storage::SparseMatrix<MatrixValueType> const& transitions) { void StandardRewardModel<ValueType>::clearRewardAtState(uint_fast64_t state, storm::storage::SparseMatrix<MatrixValueType> const& transitions) {

9
src/storm/models/sparse/StandardRewardModel.h

@ -287,15 +287,6 @@ namespace storm {
template<typename MatrixValueType> template<typename MatrixValueType>
storm::storage::BitVector getChoicesWithFilter(storm::storage::SparseMatrix<MatrixValueType> const& transitionMatrix, std::function<bool(ValueType const&)> const& filter) const; storm::storage::BitVector getChoicesWithFilter(storm::storage::SparseMatrix<MatrixValueType> const& transitionMatrix, std::function<bool(ValueType const&)> const& filter) const;
/*!
* Sets the given value in the state-action reward vector at the given row. This assumes that the reward
* model has state-action rewards.
*
* @param row The row at which to set the given value.
* @param value The value to set.
*/
void setStateActionRewardValue(uint_fast64_t row, ValueType const& value);
/*! /*!
* Retrieves whether the reward model is empty, i.e. contains no state-, state-action- or transition-based * Retrieves whether the reward model is empty, i.e. contains no state-, state-action- or transition-based
* rewards. * rewards.

Loading…
Cancel
Save