Browse Source

gauss-seidel style multiplication for gmm++

tempestpy_adaptions
dehnert 7 years ago
parent
commit
c77b9ce404
  1. 113
      resources/3rdparty/gmm-5.2/include/gmm/gmm_blas.h
  2. 167
      src/storm/adapters/GmmxxAdapter.cpp
  3. 59
      src/storm/adapters/GmmxxAdapter.h
  4. 32
      src/storm/solver/GmmxxLinearEquationSolver.cpp
  5. 4
      src/storm/solver/GmmxxLinearEquationSolver.h
  6. 12
      src/storm/solver/StandardMinMaxLinearEquationSolver.cpp
  7. 1
      src/storm/solver/StandardMinMaxLinearEquationSolver.h
  8. 10
      src/storm/storage/SparseMatrix.cpp
  9. 3
      src/storm/storage/SparseMatrix.h
  10. 16
      src/test/storm/modelchecker/GmmxxHybridMdpPrctlModelCheckerTest.cpp
  11. 8
      src/test/storm/modelchecker/GmmxxMdpPrctlModelCheckerTest.cpp

113
resources/3rdparty/gmm-5.2/include/gmm/gmm_blas.h

@ -1645,9 +1645,7 @@ namespace gmm {
}
#ifdef STORM_HAVE_INTELTBB
/* Official Intel Hint on blocked_range vs. linear iterators: http://software.intel.com/en-us/forums/topic/289505
*/
// Official Intel Hint on blocked_range vs. linear iterators: http://software.intel.com/en-us/forums/topic/289505
template <typename IT1, typename IT2>
class forward_range_mult {
IT1 my_begin;
@ -1707,22 +1705,67 @@ namespace gmm {
my_l2(l2)
{}
};
template <typename L1, typename L2, typename L3>
class tbbHelper_mult_add_by_row {
L2 const* my_l2;
L3 const* my_l3;
// Typedefs for Iterator Types
typedef typename linalg_traits<L3>::iterator frm_IT1;
typedef typename linalg_traits<L1>::const_row_iterator frm_IT2;
public:
void operator()( const forward_range_mult<frm_IT1, frm_IT2>& r ) const {
L2 const& l2 = *my_l2;
frm_IT1 it = r.begin();
frm_IT1 ite = r.end();
frm_IT2 itr = r.begin_row();
frm_IT1 addIt = my_l3->begin();
for (; it != ite; ++it, ++itr, ++addIt) {
*it = vect_sp(linalg_traits<L1>::row(itr), l2,
typename linalg_traits<L1>::storage_type(),
typename linalg_traits<L2>::storage_type()) + *addIt;
}
}
tbbHelper_mult_add_by_row(L2 const* l2, L2 const* l3) : my_l2(l2), my_l3(l3) {
// Intentionally left empty.
}
};
template <typename L1, typename L2, typename L3>
void mult_by_row_parallel(const L1& l1, const L2& l2, L3& l3, abstract_dense) {
tbb::parallel_for(forward_range_mult<typename linalg_traits<L3>::iterator, typename linalg_traits<L1>::const_row_iterator>(it, ite, itr), tbbHelper_mult_by_row<L1, L2, L3>(&l2));
}
template <typename L1, typename L2, typename L3, typename L4>
void mult_add_by_row_parallel(const L1& l1, const L2& l2, const L3& l3, L4& l4, abstract_dense) {
tbb::parallel_for(forward_range_mult<typename linalg_traits<L3>::iterator, typename linalg_traits<L1>::const_row_iterator>(it, ite, itr), tbbHelper_mult_add_by_row<L1, L2, L3, L4>(&l2, &l3));
}
#endif
template <typename L1, typename L2, typename L3>
void mult_by_row(const L1& l1, const L2& l2, L3& l3, abstract_dense) {
typename linalg_traits<L3>::iterator it=vect_begin(l3), ite=vect_end(l3);
auto itr = mat_row_const_begin(l1);
#ifdef STORM_HAVE_INTELTBB
tbb::parallel_for(forward_range_mult<typename linalg_traits<L3>::iterator, typename linalg_traits<L1>::const_row_iterator>(it, ite, itr), tbbHelper_mult_by_row<L1, L2, L3>(&l2));
#else
for (; it != ite; ++it, ++itr)
*it = vect_sp(linalg_traits<L1>::row(itr), l2,
typename linalg_traits<L1>::storage_type(),
typename linalg_traits<L2>::storage_type());
#endif
for (; it != ite; ++it, ++itr)
*it = vect_sp(linalg_traits<L1>::row(itr), l2,
typename linalg_traits<L1>::storage_type(),
typename linalg_traits<L2>::storage_type());
}
template <typename L1, typename L2, typename L3>
void mult_by_row_bwd(const L1& l1, const L2& l2, L3& l3, abstract_dense) {
typename linalg_traits<L3>::iterator target_it=vect_end(l3) - 1, target_ite=vect_begin(l3) - 1;
typename linalg_traits<L1>::const_row_iterator
itr = mat_row_const_end(l1) - 1;
for (; target_it != target_ite; --target_it, --itr)
*target_it = vect_sp(linalg_traits<L1>::row(itr), l2);
}
template <typename L1, typename L2, typename L3>
void mult_by_col(const L1& l1, const L2& l2, L3& l3, abstract_dense) {
clear(l3);
@ -1753,6 +1796,12 @@ namespace gmm {
void mult_spec(const L1& l1, const L2& l2, L3& l3, row_major)
{ mult_by_row(l1, l2, l3, typename linalg_traits<L3>::storage_type()); }
#ifdef STORM_HAVE_INTELTBB
template <typename L1, typename L2, typename L3> inline
void mult_parallel_spec(const L1& l1, const L2& l2, L3& l3, row_major)
{ mult_by_row_parallel(l1, l2, l3, typename linalg_traits<L3>::storage_type()); }
#endif
template <typename L1, typename L2, typename L3> inline
void mult_spec(const L1& l1, const L2& l2, L3& l3, col_major)
{ mult_by_col(l1, l2, l3, typename linalg_traits<L2>::storage_type()); }
@ -1828,6 +1877,28 @@ namespace gmm {
linalg_traits<L1>::sub_orientation>::potype());
}
}
#ifdef STORM_HAVE_INTELTBB
/** Multiply-accumulate. l4 = l1*l2 + l3; */
template <typename L1, typename L2, typename L3, typename L4> inline
void mult_add_parallel(const L1& l1, const L2& l2, const L3& l3, L4& l4) {
size_type m = mat_nrows(l1), n = mat_ncols(l1);
if (!m || !n) return;
GMM_ASSERT2(n==vect_size(l2) && m==vect_size(l3) && vect_size(l3) == vect_size(l4), "dimensions mismatch");
if (!same_origin(l2, l4) && !same_origin(l3, l4) && !same_origin(l2, l3)) {
mult_add_parallel_spec(l1, l2, l3, l4, typename principal_orientation_type<typename
linalg_traits<L1>::sub_orientation>::potype());
} else {
GMM_WARNING2("Warning, Multiple temporaries are used for mult\n");
typename temporary_vector<L2>::vector_type l2tmp(vect_size(l2));
copy(l2, l2tmp);
typename temporary_vector<L3>::vector_type l3tmp(vect_size(l3));
copy(l3, l3tmp);
mult_add_parallel_spec(l1, l2tmp, l3tmp, l4, typename principal_orientation_type<typename
linalg_traits<L1>::sub_orientation>::potype());
}
}
#endif
///@cond DOXY_SHOW_ALL_FUNCTIONS
template <typename L1, typename L2, typename L3> inline
@ -1868,10 +1939,20 @@ namespace gmm {
typename linalg_traits<L4>::iterator target_it=vect_begin(l4), target_ite=vect_end(l4);
typename linalg_traits<L1>::const_row_iterator
itr = mat_row_const_begin(l1);
for (; add_it != add_ite; ++add_it, ++target_it, ++itr)
*target_it = vect_sp(linalg_traits<L1>::row(itr), l2) + *add_it;
for (; add_it != add_ite; ++add_it, ++target_it, ++itr)
*target_it = vect_sp(linalg_traits<L1>::row(itr), l2) + *add_it;
}
template <typename L1, typename L2, typename L3, typename L4>
void mult_add_by_row_bwd(const L1& l1, const L2& l2, const L3& l3, L4& l4, abstract_dense) {
typename linalg_traits<L3>::const_iterator add_it=vect_end(l3) - 1, add_ite=vect_begin(l3) - 1;
typename linalg_traits<L4>::iterator target_it=vect_end(l4) - 1, target_ite=vect_begin(l4) - 1;
typename linalg_traits<L1>::const_row_iterator
itr = mat_row_const_end(l1) - 1;
for (; add_it != add_ite; --add_it, --target_it, --itr)
*target_it = vect_sp(linalg_traits<L1>::row(itr), l2) + *add_it;
}
template <typename L1, typename L2, typename L3>
void mult_add_by_col(const L1& l1, const L2& l2, L3& l3, abstract_dense) {
size_type nc = mat_ncols(l1);
@ -1903,6 +1984,12 @@ namespace gmm {
void mult_add_spec(const L1& l1, const L2& l2, const L3& l3, L4& l4, row_major)
{ mult_add_by_row(l1, l2, l3, l4, typename linalg_traits<L3>::storage_type()); }
#ifdef STORM_HAVE_INTELTBB
template <typename L1, typename L2, typename L3, typename L4> inline
void mult_add_parallel_spec(const L1& l1, const L2& l2, const L3& l3, L4& l4, row_major)
{ mult_add_by_row_parallel(l1, l2, l3, l4, typename linalg_traits<L4>::storage_type()); }
#endif
template <typename L1, typename L2, typename L3> inline
void mult_add_spec(const L1& l1, const L2& l2, L3& l3, col_major)
{ mult_add_by_col(l1, l2, l3, typename linalg_traits<L2>::storage_type()); }

167
src/storm/adapters/GmmxxAdapter.cpp

@ -0,0 +1,167 @@
#include "storm/adapters/GmmxxAdapter.h"
#include <algorithm>
#include "storm/adapters/RationalNumberAdapter.h"
#include "storm/adapters/RationalFunctionAdapter.h"
#include "storm/utility/constants.h"
#include "storm/exceptions/NotSupportedException.h"
namespace storm {
namespace adapters {
template<typename T>
std::unique_ptr<gmm::csr_matrix<T>> GmmxxAdapter<T>::toGmmxxSparseMatrix(storm::storage::SparseMatrix<T> const& matrix) {
uint_fast64_t realNonZeros = matrix.getEntryCount();
STORM_LOG_DEBUG("Converting " << matrix.getRowCount() << "x" << matrix.getColumnCount() << " matrix with " << realNonZeros << " non-zeros to gmm++ format.");
// Prepare the resulting matrix.
std::unique_ptr<gmm::csr_matrix<T>> result(new gmm::csr_matrix<T>(matrix.getRowCount(), matrix.getColumnCount()));
// Copy Row Indications
std::copy(matrix.rowIndications.begin(), matrix.rowIndications.end(), result->jc.begin());
// Copy columns and values.
std::vector<T> values;
values.reserve(matrix.getEntryCount());
// To match the correct vector type for gmm, we create the vector with the exact same type.
decltype(result->ir) columns;
columns.reserve(matrix.getEntryCount());
for (auto const& entry : matrix) {
columns.emplace_back(entry.getColumn());
values.emplace_back(entry.getValue());
}
std::swap(result->ir, columns);
std::swap(result->pr, values);
STORM_LOG_DEBUG("Done converting matrix to gmm++ format.");
return result;
}
template<typename T>
void GmmxxMultiplier<T>::multAdd(gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result) {
if (b) {
gmm::mult_add(matrix, x, *b, result);
} else {
gmm::mult(matrix, x, result);
}
}
template<typename T>
void GmmxxMultiplier<T>::multAddGaussSeidelBackward(gmm::csr_matrix<T> const& matrix, std::vector<T>& x, std::vector<T> const* b) {
STORM_LOG_ASSERT(matrix.nr == matrix.nc, "Expecting square matrix.");
if (b) {
gmm::mult_add_by_row_bwd(matrix, x, *b, x, gmm::abstract_dense());
} else {
gmm::mult_by_row_bwd(matrix, x, x, gmm::abstract_dense());
}
}
template<typename T>
void GmmxxMultiplier<T>::multAddReduce(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result, std::vector<uint64_t>* choices) {
std::vector<T>* target = &result;
std::unique_ptr<std::vector<T>> temporary;
if (&x == &result) {
STORM_LOG_WARN("Using temporary in 'multAddReduce'.");
temporary = std::make_unique<std::vector<T>>(x.size());
target = temporary.get();
}
multAddReduceHelper(dir, rowGroupIndices, matrix, x, b, *target, choices);
}
template<typename T>
void GmmxxMultiplier<T>::multAddReduceGaussSeidel(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<T> const& matrix, std::vector<T>& x, std::vector<T> const* b, std::vector<uint64_t>* choices) {
multAddReduceHelper(dir, rowGroupIndices, matrix, x, b, x, choices);
}
template<typename T>
void GmmxxMultiplier<T>::multAddReduceHelper(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result, std::vector<uint64_t>* choices) {
typedef std::vector<T> VectorType;
typedef gmm::csr_matrix<T> MatrixType;
typename gmm::linalg_traits<VectorType>::const_iterator add_it, add_ite;
if (b) {
add_it = gmm::vect_end(*b) - 1;
add_ite = gmm::vect_begin(*b) - 1;
}
typename gmm::linalg_traits<VectorType>::iterator target_it = gmm::vect_end(result) - 1;
typename gmm::linalg_traits<MatrixType>::const_row_iterator itr = mat_row_const_end(matrix) - 1;
typename std::vector<uint64_t>::iterator choice_it;
if (choices) {
choice_it = choices->end() - 1;
}
uint64_t choice;
for (auto row_group_it = rowGroupIndices.end() - 2, row_group_ite = rowGroupIndices.begin() - 1; row_group_it != row_group_ite; --row_group_it, --choice_it, --target_it) {
T currentValue = b ? *add_it : storm::utility::zero<T>();
currentValue += vect_sp(gmm::linalg_traits<MatrixType>::row(itr), x);
if (choices) {
choice = *(row_group_it + 1) - 1 - *row_group_it;
*choice_it = choice;
}
--itr;
if (b) {
--add_it;
}
for (uint64_t row = *row_group_it + 1, rowEnd = *(row_group_it + 1); row < rowEnd; ++row, --itr) {
T newValue = b ? *add_it : storm::utility::zero<T>();
newValue += vect_sp(gmm::linalg_traits<MatrixType>::row(itr), x);
if (choices) {
--choice;
}
if ((dir == OptimizationDirection::Minimize && newValue < currentValue) || (dir == OptimizationDirection::Maximize && newValue > currentValue)) {
currentValue = newValue;
if (choices) {
*choice_it = choice;
}
}
if (b) {
--add_it;
}
}
// Write back final value.
*target_it = currentValue;
}
}
template<>
void GmmxxMultiplier<storm::RationalFunction>::multAddReduceHelper(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<storm::RationalFunction> const& matrix, std::vector<storm::RationalFunction> const& x, std::vector<storm::RationalFunction> const* b, std::vector<storm::RationalFunction>& result, std::vector<uint64_t>* choices) {
STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "Operation not supported for this data type.");
}
#ifdef STORM_HAVE_INTELTBB
template<typename T>
void GmmxxMultiplier<T>::multAddParallel(gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result) {
if (b) {
gmm::mult_add_parallel(matrix, x, *b, result);
} else {
gmm::mult_parallel(matrix, x, result);
}
}
#endif
template class GmmxxAdapter<double>;
template class GmmxxMultiplier<double>;
#ifdef STORM_HAVE_CARL
template class GmmxxAdapter<storm::RationalNumber>;
template class GmmxxAdapter<storm::RationalFunction>;
template class GmmxxMultiplier<storm::RationalNumber>;
template class GmmxxMultiplier<storm::RationalFunction>;
#endif
}
}

59
src/storm/adapters/GmmxxAdapter.h

@ -1,7 +1,5 @@
#ifndef STORM_ADAPTERS_GMMXXADAPTER_H_
#define STORM_ADAPTERS_GMMXXADAPTER_H_
#pragma once
#include <algorithm>
#include <memory>
#include "storm/utility/gmm.h"
@ -11,49 +9,34 @@
#include "storm/utility/macros.h"
namespace storm {
namespace adapters {
template<typename T>
class GmmxxAdapter {
public:
/*!
* Converts a sparse matrix into a sparse matrix in the gmm++ format.
* @return A pointer to a row-major sparse matrix in gmm++ format.
*/
template<class T>
static std::unique_ptr<gmm::csr_matrix<T>> toGmmxxSparseMatrix(storm::storage::SparseMatrix<T> const& matrix) {
uint_fast64_t realNonZeros = matrix.getEntryCount();
STORM_LOG_DEBUG("Converting " << matrix.getRowCount() << "x" << matrix.getColumnCount() << " matrix with " << realNonZeros << " non-zeros to gmm++ format.");
// Prepare the resulting matrix.
std::unique_ptr<gmm::csr_matrix<T>> result(new gmm::csr_matrix<T>(matrix.getRowCount(), matrix.getColumnCount()));
// Copy Row Indications
std::copy(matrix.rowIndications.begin(), matrix.rowIndications.end(), result->jc.begin());
// Copy columns and values.
std::vector<T> values;
values.reserve(matrix.getEntryCount());
// To match the correct vector type for gmm, we create the vector with the exact same type.
decltype(result->ir) columns;
columns.reserve(matrix.getEntryCount());
for (auto const& entry : matrix) {
columns.emplace_back(entry.getColumn());
values.emplace_back(entry.getValue());
}
std::swap(result->ir, columns);
std::swap(result->pr, values);
STORM_LOG_DEBUG("Done converting matrix to gmm++ format.");
return result;
}
static std::unique_ptr<gmm::csr_matrix<T>> toGmmxxSparseMatrix(storm::storage::SparseMatrix<T> const& matrix);
};
} // namespace adapters
} // namespace storm
template<class T>
class GmmxxMultiplier {
public:
static void multAdd(gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result);
static void multAddGaussSeidelBackward(gmm::csr_matrix<T> const& matrix, std::vector<T>& x, std::vector<T> const* b);
#endif /* STORM_ADAPTERS_GMMXXADAPTER_H_ */
static void multAddReduce(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result, std::vector<uint64_t>* choices = nullptr);
static void multAddReduceGaussSeidel(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<T> const& matrix, std::vector<T>& x, std::vector<T> const* b, std::vector<uint64_t>* choices = nullptr);
#ifdef STORM_HAVE_INTELTBB
static void multAddParallel(gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const& b, std::vector<T>& result);
#endif
private:
static void multAddReduceHelper(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result, std::vector<uint64_t>* choices = nullptr);
};
}
}

32
src/storm/solver/GmmxxLinearEquationSolver.cpp

@ -110,13 +110,13 @@ namespace storm {
template<typename ValueType>
void GmmxxLinearEquationSolver<ValueType>::setMatrix(storm::storage::SparseMatrix<ValueType> const& A) {
gmmxxA = storm::adapters::GmmxxAdapter::toGmmxxSparseMatrix<ValueType>(A);
gmmxxA = storm::adapters::GmmxxAdapter<ValueType>::toGmmxxSparseMatrix(A);
clearCache();
}
template<typename ValueType>
void GmmxxLinearEquationSolver<ValueType>::setMatrix(storm::storage::SparseMatrix<ValueType>&& A) {
gmmxxA = storm::adapters::GmmxxAdapter::toGmmxxSparseMatrix<ValueType>(A);
gmmxxA = storm::adapters::GmmxxAdapter<ValueType>::toGmmxxSparseMatrix(A);
clearCache();
}
@ -187,17 +187,33 @@ namespace storm {
template<typename ValueType>
void GmmxxLinearEquationSolver<ValueType>::multiply(std::vector<ValueType>& x, std::vector<ValueType> const* b, std::vector<ValueType>& result) const {
if (b) {
gmm::mult_add(*gmmxxA, x, *b, result);
} else {
gmm::mult(*gmmxxA, x, result);
}
storm::adapters::GmmxxMultiplier<ValueType>::multAdd(*gmmxxA, x, b, result);
if(!this->isCachingEnabled()) {
if (!this->isCachingEnabled()) {
clearCache();
}
}
template<typename ValueType>
void GmmxxLinearEquationSolver<ValueType>::multiplyAndReduce(OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, std::vector<ValueType>& x, std::vector<ValueType> const* b, std::vector<ValueType>& result, std::vector<uint_fast64_t>* choices) const {
storm::adapters::GmmxxMultiplier<ValueType>::multAddReduce(dir, rowGroupIndices, *gmmxxA, x, b, result, choices);
}
template<typename ValueType>
bool GmmxxLinearEquationSolver<ValueType>::supportsGaussSeidelMultiplication() const {
return true;
}
template<typename ValueType>
void GmmxxLinearEquationSolver<ValueType>::multiplyGaussSeidel(std::vector<ValueType>& x, std::vector<ValueType> const* b) const {
storm::adapters::GmmxxMultiplier<ValueType>::multAddGaussSeidelBackward(*gmmxxA, x, b);
}
template<typename ValueType>
void GmmxxLinearEquationSolver<ValueType>::multiplyAndReduceGaussSeidel(OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, std::vector<ValueType>& x, std::vector<ValueType> const* b, std::vector<uint64_t>* choices) const {
storm::adapters::GmmxxMultiplier<ValueType>::multAddReduceGaussSeidel(dir, rowGroupIndices, *gmmxxA, x, b, choices);
}
template<typename ValueType>
void GmmxxLinearEquationSolver<ValueType>::setSettings(GmmxxLinearEquationSolverSettings<ValueType> const& newSettings) {
settings = newSettings;

4
src/storm/solver/GmmxxLinearEquationSolver.h

@ -87,6 +87,10 @@ namespace storm {
virtual bool solveEquations(std::vector<ValueType>& x, std::vector<ValueType> const& b) const override;
virtual void multiply(std::vector<ValueType>& x, std::vector<ValueType> const* b, std::vector<ValueType>& result) const override;
virtual void multiplyAndReduce(OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, std::vector<ValueType>& x, std::vector<ValueType> const* b, std::vector<ValueType>& result, std::vector<uint_fast64_t>* choices = nullptr) const override;
virtual bool supportsGaussSeidelMultiplication() const override;
virtual void multiplyGaussSeidel(std::vector<ValueType>& x, std::vector<ValueType> const* b) const override;
virtual void multiplyAndReduceGaussSeidel(OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, std::vector<ValueType>& x, std::vector<ValueType> const* b, std::vector<uint_fast64_t>* choices = nullptr) const override;
void setSettings(GmmxxLinearEquationSolverSettings<ValueType> const& newSettings);
GmmxxLinearEquationSolverSettings<ValueType> const& getSettings() const;

12
src/storm/solver/StandardMinMaxLinearEquationSolver.cpp

@ -48,17 +48,13 @@ namespace storm {
linEqSolverA->setCachingEnabled(true);
}
if (!auxiliaryRowVector) {
auxiliaryRowVector = std::make_unique<std::vector<ValueType>>(A->getRowCount());
if (!auxiliaryRowGroupVector) {
auxiliaryRowGroupVector = std::make_unique<std::vector<ValueType>>(this->A->getRowGroupCount());
}
std::vector<ValueType>& multiplyResult = *auxiliaryRowVector;
for (uint64_t i = 0; i < n; ++i) {
linEqSolverA->multiply(x, b, multiplyResult);
// Reduce the vector x' by applying min/max for all non-deterministic choices as given by the topmost
// element of the min/max operator stack.
storm::utility::vector::reduceVectorMinOrMax(dir, multiplyResult, x, this->A->getRowGroupIndices());
linEqSolverA->multiplyAndReduce(dir, this->A->getRowGroupIndices(), x, b, *auxiliaryRowGroupVector);
std::swap(x, *auxiliaryRowGroupVector);
}
if (!this->isCachingEnabled()) {

1
src/storm/solver/StandardMinMaxLinearEquationSolver.h

@ -27,6 +27,7 @@ namespace storm {
// possibly cached data
mutable std::unique_ptr<storm::solver::LinearEquationSolver<ValueType>> linEqSolverA;
mutable std::unique_ptr<std::vector<ValueType>> auxiliaryRowVector; // A.rowCount() entries
mutable std::unique_ptr<std::vector<ValueType>> auxiliaryRowGroupVector; // A.rowCount() entries
/// The factory used to obtain linear equation solvers.
std::unique_ptr<LinearEquationSolverFactory<ValueType>> linearEquationSolverFactory;

10
src/storm/storage/SparseMatrix.cpp

@ -1497,7 +1497,7 @@ namespace storm {
if ((dir == OptimizationDirection::Minimize && newValue < currentValue) || (dir == OptimizationDirection::Maximize && newValue > currentValue)) {
currentValue = newValue;
if (choices) {
*choiceIt = *rowIt - *rowGroupIt;
*choiceIt = std::distance(rowIndications.begin(), rowIt) - *rowGroupIt;
}
}
if (summand) {
@ -1538,7 +1538,7 @@ namespace storm {
currentValue += elementIt->getValue() * vector[elementIt->getColumn()];
}
if (choices) {
*choiceIt = 0;
*choiceIt = std::distance(rowIndications.begin(), rowIt) - *rowGroupIt;
}
--rowIt;
@ -1546,7 +1546,7 @@ namespace storm {
--summandIt;
}
for (; std::distance(rowIndications.begin(), rowIt) >= static_cast<int_fast64_t>(*rowGroupIt); --rowIt) {
for (uint64_t i = *rowGroupIt + 1, end = *(rowGroupIt + 1); i < end; --rowIt, ++i) {
ValueType newValue = summand ? *summandIt : storm::utility::zero<ValueType>();
for (auto elementIte = this->begin() + *rowIt - 1; elementIt != elementIte; --elementIt) {
newValue += elementIt->getValue() * vector[elementIt->getColumn()];
@ -1555,7 +1555,7 @@ namespace storm {
if ((dir == OptimizationDirection::Minimize && newValue < currentValue) || (dir == OptimizationDirection::Maximize && newValue > currentValue)) {
currentValue = newValue;
if (choices) {
*choiceIt = *rowIt - *rowGroupIt;
*choiceIt = std::distance(rowIndications.begin(), rowIt) - *rowGroupIt;
}
}
if (summand) {
@ -1617,7 +1617,7 @@ namespace storm {
if ((dir == OptimizationDirection::Minimize && newValue < currentValue) || (dir == OptimizationDirection::Maximize && newValue > currentValue)) {
currentValue = newValue;
if (choices) {
*choiceIt = *rowIt - *rowGroupIt;
*choiceIt = std::distance(rowIndications.begin(), rowIt) - *rowGroupIt;
}
}
}

3
src/storm/storage/SparseMatrix.h

@ -19,6 +19,7 @@
// Forward declaration for adapter classes.
namespace storm {
namespace adapters {
template <typename ValueType>
class GmmxxAdapter;
class EigenAdapter;
class StormAdapter;
@ -315,7 +316,7 @@ namespace storm {
class SparseMatrix {
public:
// Declare adapter classes as friends to use internal data.
friend class storm::adapters::GmmxxAdapter;
friend class storm::adapters::GmmxxAdapter<ValueType>;
friend class storm::adapters::EigenAdapter;
friend class storm::adapters::StormAdapter;
friend class storm::solver::TopologicalValueIterationMinMaxLinearEquationSolver<ValueType>;

16
src/test/storm/modelchecker/GmmxxHybridMdpPrctlModelCheckerTest.cpp

@ -116,8 +116,8 @@ TEST(GmmxxHybridMdpPrctlModelCheckerTest, Dice_Cudd) {
result->filter(storm::modelchecker::SymbolicQualitativeCheckResult<storm::dd::DdType::CUDD>(model->getReachableStates(), model->getInitialStates()));
storm::modelchecker::HybridQuantitativeCheckResult<storm::dd::DdType::CUDD>& quantitativeResult8 = result->asHybridQuantitativeCheckResult<storm::dd::DdType::CUDD, double>();
EXPECT_NEAR(7.3333294987678528, quantitativeResult8.getMin(), storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
EXPECT_NEAR(7.3333294987678528, quantitativeResult8.getMax(), storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
EXPECT_NEAR(7.3333328887820244, quantitativeResult8.getMin(), storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
EXPECT_NEAR(7.3333328887820244, quantitativeResult8.getMax(), storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
}
TEST(GmmxxHybridMdpPrctlModelCheckerTest, Dice_Sylvan) {
@ -214,8 +214,8 @@ TEST(GmmxxHybridMdpPrctlModelCheckerTest, Dice_Sylvan) {
result->filter(storm::modelchecker::SymbolicQualitativeCheckResult<storm::dd::DdType::Sylvan>(model->getReachableStates(), model->getInitialStates()));
storm::modelchecker::HybridQuantitativeCheckResult<storm::dd::DdType::Sylvan>& quantitativeResult8 = result->asHybridQuantitativeCheckResult<storm::dd::DdType::Sylvan, double>();
EXPECT_NEAR(7.3333294987678528, quantitativeResult8.getMin(), storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
EXPECT_NEAR(7.3333294987678528, quantitativeResult8.getMax(), storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
EXPECT_NEAR(7.3333328887820244, quantitativeResult8.getMin(), storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
EXPECT_NEAR(7.3333328887820244, quantitativeResult8.getMax(), storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
}
TEST(GmmxxHybridMdpPrctlModelCheckerTest, AsynchronousLeader_Cudd) {
@ -294,8 +294,8 @@ TEST(GmmxxHybridMdpPrctlModelCheckerTest, AsynchronousLeader_Cudd) {
result->filter(storm::modelchecker::SymbolicQualitativeCheckResult<storm::dd::DdType::CUDD>(model->getReachableStates(), model->getInitialStates()));
storm::modelchecker::HybridQuantitativeCheckResult<storm::dd::DdType::CUDD>& quantitativeResult6 = result->asHybridQuantitativeCheckResult<storm::dd::DdType::CUDD, double>();
EXPECT_NEAR(4.2856904354441401, quantitativeResult6.getMin(), storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
EXPECT_NEAR(4.2856904354441401, quantitativeResult6.getMax(), storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
EXPECT_NEAR(4.2857120959008661, quantitativeResult6.getMin(), storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
EXPECT_NEAR(4.2857120959008661, quantitativeResult6.getMax(), storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
}
TEST(GmmxxHybridMdpPrctlModelCheckerTest, AsynchronousLeader_Sylvan) {
@ -374,7 +374,7 @@ TEST(GmmxxHybridMdpPrctlModelCheckerTest, AsynchronousLeader_Sylvan) {
result->filter(storm::modelchecker::SymbolicQualitativeCheckResult<storm::dd::DdType::Sylvan>(model->getReachableStates(), model->getInitialStates()));
storm::modelchecker::HybridQuantitativeCheckResult<storm::dd::DdType::Sylvan>& quantitativeResult6 = result->asHybridQuantitativeCheckResult<storm::dd::DdType::Sylvan, double>();
EXPECT_NEAR(4.2856904354441401, quantitativeResult6.getMin(), storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
EXPECT_NEAR(4.2856904354441401, quantitativeResult6.getMax(), storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
EXPECT_NEAR(4.2857120959008661, quantitativeResult6.getMin(), storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
EXPECT_NEAR(4.2857120959008661, quantitativeResult6.getMax(), storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
}

8
src/test/storm/modelchecker/GmmxxMdpPrctlModelCheckerTest.cpp

@ -86,7 +86,7 @@ TEST(GmmxxMdpPrctlModelCheckerTest, Dice) {
result = checker.check(*formula);
storm::modelchecker::ExplicitQuantitativeCheckResult<double>& quantitativeResult8 = result->asExplicitQuantitativeCheckResult<double>();
EXPECT_NEAR(7.333329499, quantitativeResult8[0], storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
EXPECT_NEAR(7.3333328887820244, quantitativeResult8[0], storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
abstractModel = storm::parser::AutoParser<>::parseModel(STORM_TEST_RESOURCES_DIR "/tra/two_dice.tra", STORM_TEST_RESOURCES_DIR "/lab/two_dice.lab", STORM_TEST_RESOURCES_DIR "/rew/two_dice.flip.state.rew", "");
@ -108,7 +108,7 @@ TEST(GmmxxMdpPrctlModelCheckerTest, Dice) {
result = stateRewardModelChecker.check(*formula);
storm::modelchecker::ExplicitQuantitativeCheckResult<double>& quantitativeResult10 = result->asExplicitQuantitativeCheckResult<double>();
EXPECT_NEAR(7.333329499, quantitativeResult10[0], storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
EXPECT_NEAR(7.3333328887820244, quantitativeResult10[0], storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
abstractModel = storm::parser::AutoParser<>::parseModel(STORM_TEST_RESOURCES_DIR "/tra/two_dice.tra", STORM_TEST_RESOURCES_DIR "/lab/two_dice.lab", STORM_TEST_RESOURCES_DIR "/rew/two_dice.flip.state.rew", STORM_TEST_RESOURCES_DIR "/rew/two_dice.flip.trans.rew");
@ -130,7 +130,7 @@ TEST(GmmxxMdpPrctlModelCheckerTest, Dice) {
result = stateAndTransitionRewardModelChecker.check(*formula);
storm::modelchecker::ExplicitQuantitativeCheckResult<double>& quantitativeResult12 = result->asExplicitQuantitativeCheckResult<double>();
EXPECT_NEAR(14.666658998, quantitativeResult12[0], storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
EXPECT_NEAR(14.666665777564049, quantitativeResult12[0], storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
}
TEST(GmmxxMdpPrctlModelCheckerTest, AsynchronousLeader) {
@ -188,7 +188,7 @@ TEST(GmmxxMdpPrctlModelCheckerTest, AsynchronousLeader) {
result = checker.check(*formula);
storm::modelchecker::ExplicitQuantitativeCheckResult<double>& quantitativeResult6 = result->asExplicitQuantitativeCheckResult<double>();
EXPECT_NEAR(4.285689611, quantitativeResult6[0], storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
EXPECT_NEAR(4.2857120959008661, quantitativeResult6[0], storm::settings::getModule<storm::settings::modules::NativeEquationSolverSettings>().getPrecision());
}
TEST(GmmxxMdpPrctlModelCheckerTest, SchedulerGeneration) {

Loading…
Cancel
Save