From c77b9ce404787fb95899e7b821a667feb5aca414 Mon Sep 17 00:00:00 2001 From: dehnert Date: Sun, 10 Sep 2017 22:23:09 +0200 Subject: [PATCH] gauss-seidel style multiplication for gmm++ --- .../3rdparty/gmm-5.2/include/gmm/gmm_blas.h | 113 ++++++++++-- src/storm/adapters/GmmxxAdapter.cpp | 167 ++++++++++++++++++ src/storm/adapters/GmmxxAdapter.h | 59 +++---- .../solver/GmmxxLinearEquationSolver.cpp | 32 +++- src/storm/solver/GmmxxLinearEquationSolver.h | 4 + .../StandardMinMaxLinearEquationSolver.cpp | 12 +- .../StandardMinMaxLinearEquationSolver.h | 1 + src/storm/storage/SparseMatrix.cpp | 10 +- src/storm/storage/SparseMatrix.h | 3 +- .../GmmxxHybridMdpPrctlModelCheckerTest.cpp | 16 +- .../GmmxxMdpPrctlModelCheckerTest.cpp | 8 +- 11 files changed, 340 insertions(+), 85 deletions(-) create mode 100644 src/storm/adapters/GmmxxAdapter.cpp diff --git a/resources/3rdparty/gmm-5.2/include/gmm/gmm_blas.h b/resources/3rdparty/gmm-5.2/include/gmm/gmm_blas.h index 0dcb9463e..de19b243d 100644 --- a/resources/3rdparty/gmm-5.2/include/gmm/gmm_blas.h +++ b/resources/3rdparty/gmm-5.2/include/gmm/gmm_blas.h @@ -1645,9 +1645,7 @@ namespace gmm { } #ifdef STORM_HAVE_INTELTBB - /* Official Intel Hint on blocked_range vs. linear iterators: http://software.intel.com/en-us/forums/topic/289505 - - */ + // Official Intel Hint on blocked_range vs. linear iterators: http://software.intel.com/en-us/forums/topic/289505 template class forward_range_mult { IT1 my_begin; @@ -1707,22 +1705,67 @@ namespace gmm { my_l2(l2) {} }; + + template + class tbbHelper_mult_add_by_row { + L2 const* my_l2; + L3 const* my_l3; + + // Typedefs for Iterator Types + typedef typename linalg_traits::iterator frm_IT1; + typedef typename linalg_traits::const_row_iterator frm_IT2; + + public: + void operator()( const forward_range_mult& r ) const { + L2 const& l2 = *my_l2; + + frm_IT1 it = r.begin(); + frm_IT1 ite = r.end(); + frm_IT2 itr = r.begin_row(); + frm_IT1 addIt = my_l3->begin(); + + for (; it != ite; ++it, ++itr, ++addIt) { + *it = vect_sp(linalg_traits::row(itr), l2, + typename linalg_traits::storage_type(), + typename linalg_traits::storage_type()) + *addIt; + } + } + + tbbHelper_mult_add_by_row(L2 const* l2, L2 const* l3) : my_l2(l2), my_l3(l3) { + // Intentionally left empty. + } + }; + + template + void mult_by_row_parallel(const L1& l1, const L2& l2, L3& l3, abstract_dense) { + tbb::parallel_for(forward_range_mult::iterator, typename linalg_traits::const_row_iterator>(it, ite, itr), tbbHelper_mult_by_row(&l2)); + } + + template + void mult_add_by_row_parallel(const L1& l1, const L2& l2, const L3& l3, L4& l4, abstract_dense) { + tbb::parallel_for(forward_range_mult::iterator, typename linalg_traits::const_row_iterator>(it, ite, itr), tbbHelper_mult_add_by_row(&l2, &l3)); + } #endif template void mult_by_row(const L1& l1, const L2& l2, L3& l3, abstract_dense) { typename linalg_traits::iterator it=vect_begin(l3), ite=vect_end(l3); auto itr = mat_row_const_begin(l1); -#ifdef STORM_HAVE_INTELTBB - tbb::parallel_for(forward_range_mult::iterator, typename linalg_traits::const_row_iterator>(it, ite, itr), tbbHelper_mult_by_row(&l2)); -#else - for (; it != ite; ++it, ++itr) - *it = vect_sp(linalg_traits::row(itr), l2, - typename linalg_traits::storage_type(), - typename linalg_traits::storage_type()); -#endif + for (; it != ite; ++it, ++itr) + *it = vect_sp(linalg_traits::row(itr), l2, + typename linalg_traits::storage_type(), + typename linalg_traits::storage_type()); } + template + void mult_by_row_bwd(const L1& l1, const L2& l2, L3& l3, abstract_dense) { + typename linalg_traits::iterator target_it=vect_end(l3) - 1, target_ite=vect_begin(l3) - 1; + typename linalg_traits::const_row_iterator + itr = mat_row_const_end(l1) - 1; + for (; target_it != target_ite; --target_it, --itr) + *target_it = vect_sp(linalg_traits::row(itr), l2); + } + template void mult_by_col(const L1& l1, const L2& l2, L3& l3, abstract_dense) { clear(l3); @@ -1753,6 +1796,12 @@ namespace gmm { void mult_spec(const L1& l1, const L2& l2, L3& l3, row_major) { mult_by_row(l1, l2, l3, typename linalg_traits::storage_type()); } +#ifdef STORM_HAVE_INTELTBB + template inline + void mult_parallel_spec(const L1& l1, const L2& l2, L3& l3, row_major) + { mult_by_row_parallel(l1, l2, l3, typename linalg_traits::storage_type()); } +#endif + template inline void mult_spec(const L1& l1, const L2& l2, L3& l3, col_major) { mult_by_col(l1, l2, l3, typename linalg_traits::storage_type()); } @@ -1828,6 +1877,28 @@ namespace gmm { linalg_traits::sub_orientation>::potype()); } } + +#ifdef STORM_HAVE_INTELTBB + /** Multiply-accumulate. l4 = l1*l2 + l3; */ + template inline + void mult_add_parallel(const L1& l1, const L2& l2, const L3& l3, L4& l4) { + size_type m = mat_nrows(l1), n = mat_ncols(l1); + if (!m || !n) return; + GMM_ASSERT2(n==vect_size(l2) && m==vect_size(l3) && vect_size(l3) == vect_size(l4), "dimensions mismatch"); + if (!same_origin(l2, l4) && !same_origin(l3, l4) && !same_origin(l2, l3)) { + mult_add_parallel_spec(l1, l2, l3, l4, typename principal_orientation_type::sub_orientation>::potype()); + } else { + GMM_WARNING2("Warning, Multiple temporaries are used for mult\n"); + typename temporary_vector::vector_type l2tmp(vect_size(l2)); + copy(l2, l2tmp); + typename temporary_vector::vector_type l3tmp(vect_size(l3)); + copy(l3, l3tmp); + mult_add_parallel_spec(l1, l2tmp, l3tmp, l4, typename principal_orientation_type::sub_orientation>::potype()); + } + } +#endif ///@cond DOXY_SHOW_ALL_FUNCTIONS template inline @@ -1868,10 +1939,20 @@ namespace gmm { typename linalg_traits::iterator target_it=vect_begin(l4), target_ite=vect_end(l4); typename linalg_traits::const_row_iterator itr = mat_row_const_begin(l1); - for (; add_it != add_ite; ++add_it, ++target_it, ++itr) - *target_it = vect_sp(linalg_traits::row(itr), l2) + *add_it; + for (; add_it != add_ite; ++add_it, ++target_it, ++itr) + *target_it = vect_sp(linalg_traits::row(itr), l2) + *add_it; } + template + void mult_add_by_row_bwd(const L1& l1, const L2& l2, const L3& l3, L4& l4, abstract_dense) { + typename linalg_traits::const_iterator add_it=vect_end(l3) - 1, add_ite=vect_begin(l3) - 1; + typename linalg_traits::iterator target_it=vect_end(l4) - 1, target_ite=vect_begin(l4) - 1; + typename linalg_traits::const_row_iterator + itr = mat_row_const_end(l1) - 1; + for (; add_it != add_ite; --add_it, --target_it, --itr) + *target_it = vect_sp(linalg_traits::row(itr), l2) + *add_it; + } + template void mult_add_by_col(const L1& l1, const L2& l2, L3& l3, abstract_dense) { size_type nc = mat_ncols(l1); @@ -1903,6 +1984,12 @@ namespace gmm { void mult_add_spec(const L1& l1, const L2& l2, const L3& l3, L4& l4, row_major) { mult_add_by_row(l1, l2, l3, l4, typename linalg_traits::storage_type()); } +#ifdef STORM_HAVE_INTELTBB + template inline + void mult_add_parallel_spec(const L1& l1, const L2& l2, const L3& l3, L4& l4, row_major) + { mult_add_by_row_parallel(l1, l2, l3, l4, typename linalg_traits::storage_type()); } +#endif + template inline void mult_add_spec(const L1& l1, const L2& l2, L3& l3, col_major) { mult_add_by_col(l1, l2, l3, typename linalg_traits::storage_type()); } diff --git a/src/storm/adapters/GmmxxAdapter.cpp b/src/storm/adapters/GmmxxAdapter.cpp new file mode 100644 index 000000000..63769cd34 --- /dev/null +++ b/src/storm/adapters/GmmxxAdapter.cpp @@ -0,0 +1,167 @@ +#include "storm/adapters/GmmxxAdapter.h" + +#include + +#include "storm/adapters/RationalNumberAdapter.h" +#include "storm/adapters/RationalFunctionAdapter.h" + +#include "storm/utility/constants.h" +#include "storm/exceptions/NotSupportedException.h" + +namespace storm { + namespace adapters { + + template + std::unique_ptr> GmmxxAdapter::toGmmxxSparseMatrix(storm::storage::SparseMatrix const& matrix) { + uint_fast64_t realNonZeros = matrix.getEntryCount(); + STORM_LOG_DEBUG("Converting " << matrix.getRowCount() << "x" << matrix.getColumnCount() << " matrix with " << realNonZeros << " non-zeros to gmm++ format."); + + // Prepare the resulting matrix. + std::unique_ptr> result(new gmm::csr_matrix(matrix.getRowCount(), matrix.getColumnCount())); + + // Copy Row Indications + std::copy(matrix.rowIndications.begin(), matrix.rowIndications.end(), result->jc.begin()); + + // Copy columns and values. + std::vector values; + values.reserve(matrix.getEntryCount()); + + // To match the correct vector type for gmm, we create the vector with the exact same type. + decltype(result->ir) columns; + columns.reserve(matrix.getEntryCount()); + + for (auto const& entry : matrix) { + columns.emplace_back(entry.getColumn()); + values.emplace_back(entry.getValue()); + } + + std::swap(result->ir, columns); + std::swap(result->pr, values); + + STORM_LOG_DEBUG("Done converting matrix to gmm++ format."); + + return result; + } + + template + void GmmxxMultiplier::multAdd(gmm::csr_matrix const& matrix, std::vector const& x, std::vector const* b, std::vector& result) { + if (b) { + gmm::mult_add(matrix, x, *b, result); + } else { + gmm::mult(matrix, x, result); + } + } + + template + void GmmxxMultiplier::multAddGaussSeidelBackward(gmm::csr_matrix const& matrix, std::vector& x, std::vector const* b) { + STORM_LOG_ASSERT(matrix.nr == matrix.nc, "Expecting square matrix."); + if (b) { + gmm::mult_add_by_row_bwd(matrix, x, *b, x, gmm::abstract_dense()); + } else { + gmm::mult_by_row_bwd(matrix, x, x, gmm::abstract_dense()); + } + } + + template + void GmmxxMultiplier::multAddReduce(storm::solver::OptimizationDirection const& dir, std::vector const& rowGroupIndices, gmm::csr_matrix const& matrix, std::vector const& x, std::vector const* b, std::vector& result, std::vector* choices) { + std::vector* target = &result; + std::unique_ptr> temporary; + if (&x == &result) { + STORM_LOG_WARN("Using temporary in 'multAddReduce'."); + temporary = std::make_unique>(x.size()); + target = temporary.get(); + } + + multAddReduceHelper(dir, rowGroupIndices, matrix, x, b, *target, choices); + } + + template + void GmmxxMultiplier::multAddReduceGaussSeidel(storm::solver::OptimizationDirection const& dir, std::vector const& rowGroupIndices, gmm::csr_matrix const& matrix, std::vector& x, std::vector const* b, std::vector* choices) { + multAddReduceHelper(dir, rowGroupIndices, matrix, x, b, x, choices); + } + + template + void GmmxxMultiplier::multAddReduceHelper(storm::solver::OptimizationDirection const& dir, std::vector const& rowGroupIndices, gmm::csr_matrix const& matrix, std::vector const& x, std::vector const* b, std::vector& result, std::vector* choices) { + typedef std::vector VectorType; + typedef gmm::csr_matrix MatrixType; + + typename gmm::linalg_traits::const_iterator add_it, add_ite; + if (b) { + add_it = gmm::vect_end(*b) - 1; + add_ite = gmm::vect_begin(*b) - 1; + } + typename gmm::linalg_traits::iterator target_it = gmm::vect_end(result) - 1; + typename gmm::linalg_traits::const_row_iterator itr = mat_row_const_end(matrix) - 1; + typename std::vector::iterator choice_it; + if (choices) { + choice_it = choices->end() - 1; + } + + uint64_t choice; + for (auto row_group_it = rowGroupIndices.end() - 2, row_group_ite = rowGroupIndices.begin() - 1; row_group_it != row_group_ite; --row_group_it, --choice_it, --target_it) { + T currentValue = b ? *add_it : storm::utility::zero(); + currentValue += vect_sp(gmm::linalg_traits::row(itr), x); + + if (choices) { + choice = *(row_group_it + 1) - 1 - *row_group_it; + *choice_it = choice; + } + + --itr; + if (b) { + --add_it; + } + + for (uint64_t row = *row_group_it + 1, rowEnd = *(row_group_it + 1); row < rowEnd; ++row, --itr) { + T newValue = b ? *add_it : storm::utility::zero(); + newValue += vect_sp(gmm::linalg_traits::row(itr), x); + + if (choices) { + --choice; + } + + if ((dir == OptimizationDirection::Minimize && newValue < currentValue) || (dir == OptimizationDirection::Maximize && newValue > currentValue)) { + currentValue = newValue; + if (choices) { + *choice_it = choice; + } + } + if (b) { + --add_it; + } + } + + // Write back final value. + *target_it = currentValue; + } + } + + template<> + void GmmxxMultiplier::multAddReduceHelper(storm::solver::OptimizationDirection const& dir, std::vector const& rowGroupIndices, gmm::csr_matrix const& matrix, std::vector const& x, std::vector const* b, std::vector& result, std::vector* choices) { + STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "Operation not supported for this data type."); + } + +#ifdef STORM_HAVE_INTELTBB + template + void GmmxxMultiplier::multAddParallel(gmm::csr_matrix const& matrix, std::vector const& x, std::vector const* b, std::vector& result) { + if (b) { + gmm::mult_add_parallel(matrix, x, *b, result); + } else { + gmm::mult_parallel(matrix, x, result); + } + } +#endif + + template class GmmxxAdapter; + template class GmmxxMultiplier; + +#ifdef STORM_HAVE_CARL + template class GmmxxAdapter; + template class GmmxxAdapter; + + template class GmmxxMultiplier; + template class GmmxxMultiplier; +#endif + + } +} diff --git a/src/storm/adapters/GmmxxAdapter.h b/src/storm/adapters/GmmxxAdapter.h index dd14ddc4d..442b2575f 100644 --- a/src/storm/adapters/GmmxxAdapter.h +++ b/src/storm/adapters/GmmxxAdapter.h @@ -1,7 +1,5 @@ -#ifndef STORM_ADAPTERS_GMMXXADAPTER_H_ -#define STORM_ADAPTERS_GMMXXADAPTER_H_ +#pragma once -#include #include #include "storm/utility/gmm.h" @@ -11,49 +9,34 @@ #include "storm/utility/macros.h" namespace storm { - namespace adapters { + template class GmmxxAdapter { public: /*! * Converts a sparse matrix into a sparse matrix in the gmm++ format. * @return A pointer to a row-major sparse matrix in gmm++ format. */ - template - static std::unique_ptr> toGmmxxSparseMatrix(storm::storage::SparseMatrix const& matrix) { - uint_fast64_t realNonZeros = matrix.getEntryCount(); - STORM_LOG_DEBUG("Converting " << matrix.getRowCount() << "x" << matrix.getColumnCount() << " matrix with " << realNonZeros << " non-zeros to gmm++ format."); - - // Prepare the resulting matrix. - std::unique_ptr> result(new gmm::csr_matrix(matrix.getRowCount(), matrix.getColumnCount())); - - // Copy Row Indications - std::copy(matrix.rowIndications.begin(), matrix.rowIndications.end(), result->jc.begin()); - - // Copy columns and values. - std::vector values; - values.reserve(matrix.getEntryCount()); - - // To match the correct vector type for gmm, we create the vector with the exact same type. - decltype(result->ir) columns; - columns.reserve(matrix.getEntryCount()); - - for (auto const& entry : matrix) { - columns.emplace_back(entry.getColumn()); - values.emplace_back(entry.getValue()); - } - - std::swap(result->ir, columns); - std::swap(result->pr, values); - - STORM_LOG_DEBUG("Done converting matrix to gmm++ format."); - - return result; - } + static std::unique_ptr> toGmmxxSparseMatrix(storm::storage::SparseMatrix const& matrix); }; - } // namespace adapters -} // namespace storm + template + class GmmxxMultiplier { + public: + static void multAdd(gmm::csr_matrix const& matrix, std::vector const& x, std::vector const* b, std::vector& result); + static void multAddGaussSeidelBackward(gmm::csr_matrix const& matrix, std::vector& x, std::vector const* b); -#endif /* STORM_ADAPTERS_GMMXXADAPTER_H_ */ + static void multAddReduce(storm::solver::OptimizationDirection const& dir, std::vector const& rowGroupIndices, gmm::csr_matrix const& matrix, std::vector const& x, std::vector const* b, std::vector& result, std::vector* choices = nullptr); + static void multAddReduceGaussSeidel(storm::solver::OptimizationDirection const& dir, std::vector const& rowGroupIndices, gmm::csr_matrix const& matrix, std::vector& x, std::vector const* b, std::vector* choices = nullptr); + +#ifdef STORM_HAVE_INTELTBB + static void multAddParallel(gmm::csr_matrix const& matrix, std::vector const& x, std::vector const& b, std::vector& result); +#endif + + private: + static void multAddReduceHelper(storm::solver::OptimizationDirection const& dir, std::vector const& rowGroupIndices, gmm::csr_matrix const& matrix, std::vector const& x, std::vector const* b, std::vector& result, std::vector* choices = nullptr); + }; + + } +} diff --git a/src/storm/solver/GmmxxLinearEquationSolver.cpp b/src/storm/solver/GmmxxLinearEquationSolver.cpp index 6e017ef46..26cf7d44a 100644 --- a/src/storm/solver/GmmxxLinearEquationSolver.cpp +++ b/src/storm/solver/GmmxxLinearEquationSolver.cpp @@ -110,13 +110,13 @@ namespace storm { template void GmmxxLinearEquationSolver::setMatrix(storm::storage::SparseMatrix const& A) { - gmmxxA = storm::adapters::GmmxxAdapter::toGmmxxSparseMatrix(A); + gmmxxA = storm::adapters::GmmxxAdapter::toGmmxxSparseMatrix(A); clearCache(); } template void GmmxxLinearEquationSolver::setMatrix(storm::storage::SparseMatrix&& A) { - gmmxxA = storm::adapters::GmmxxAdapter::toGmmxxSparseMatrix(A); + gmmxxA = storm::adapters::GmmxxAdapter::toGmmxxSparseMatrix(A); clearCache(); } @@ -187,17 +187,33 @@ namespace storm { template void GmmxxLinearEquationSolver::multiply(std::vector& x, std::vector const* b, std::vector& result) const { - if (b) { - gmm::mult_add(*gmmxxA, x, *b, result); - } else { - gmm::mult(*gmmxxA, x, result); - } + storm::adapters::GmmxxMultiplier::multAdd(*gmmxxA, x, b, result); - if(!this->isCachingEnabled()) { + if (!this->isCachingEnabled()) { clearCache(); } } + template + void GmmxxLinearEquationSolver::multiplyAndReduce(OptimizationDirection const& dir, std::vector const& rowGroupIndices, std::vector& x, std::vector const* b, std::vector& result, std::vector* choices) const { + storm::adapters::GmmxxMultiplier::multAddReduce(dir, rowGroupIndices, *gmmxxA, x, b, result, choices); + } + + template + bool GmmxxLinearEquationSolver::supportsGaussSeidelMultiplication() const { + return true; + } + + template + void GmmxxLinearEquationSolver::multiplyGaussSeidel(std::vector& x, std::vector const* b) const { + storm::adapters::GmmxxMultiplier::multAddGaussSeidelBackward(*gmmxxA, x, b); + } + + template + void GmmxxLinearEquationSolver::multiplyAndReduceGaussSeidel(OptimizationDirection const& dir, std::vector const& rowGroupIndices, std::vector& x, std::vector const* b, std::vector* choices) const { + storm::adapters::GmmxxMultiplier::multAddReduceGaussSeidel(dir, rowGroupIndices, *gmmxxA, x, b, choices); + } + template void GmmxxLinearEquationSolver::setSettings(GmmxxLinearEquationSolverSettings const& newSettings) { settings = newSettings; diff --git a/src/storm/solver/GmmxxLinearEquationSolver.h b/src/storm/solver/GmmxxLinearEquationSolver.h index 988b444c5..fe301de12 100644 --- a/src/storm/solver/GmmxxLinearEquationSolver.h +++ b/src/storm/solver/GmmxxLinearEquationSolver.h @@ -87,6 +87,10 @@ namespace storm { virtual bool solveEquations(std::vector& x, std::vector const& b) const override; virtual void multiply(std::vector& x, std::vector const* b, std::vector& result) const override; + virtual void multiplyAndReduce(OptimizationDirection const& dir, std::vector const& rowGroupIndices, std::vector& x, std::vector const* b, std::vector& result, std::vector* choices = nullptr) const override; + virtual bool supportsGaussSeidelMultiplication() const override; + virtual void multiplyGaussSeidel(std::vector& x, std::vector const* b) const override; + virtual void multiplyAndReduceGaussSeidel(OptimizationDirection const& dir, std::vector const& rowGroupIndices, std::vector& x, std::vector const* b, std::vector* choices = nullptr) const override; void setSettings(GmmxxLinearEquationSolverSettings const& newSettings); GmmxxLinearEquationSolverSettings const& getSettings() const; diff --git a/src/storm/solver/StandardMinMaxLinearEquationSolver.cpp b/src/storm/solver/StandardMinMaxLinearEquationSolver.cpp index 4e2eabf03..03ba63cd9 100644 --- a/src/storm/solver/StandardMinMaxLinearEquationSolver.cpp +++ b/src/storm/solver/StandardMinMaxLinearEquationSolver.cpp @@ -48,17 +48,13 @@ namespace storm { linEqSolverA->setCachingEnabled(true); } - if (!auxiliaryRowVector) { - auxiliaryRowVector = std::make_unique>(A->getRowCount()); + if (!auxiliaryRowGroupVector) { + auxiliaryRowGroupVector = std::make_unique>(this->A->getRowGroupCount()); } - std::vector& multiplyResult = *auxiliaryRowVector; for (uint64_t i = 0; i < n; ++i) { - linEqSolverA->multiply(x, b, multiplyResult); - - // Reduce the vector x' by applying min/max for all non-deterministic choices as given by the topmost - // element of the min/max operator stack. - storm::utility::vector::reduceVectorMinOrMax(dir, multiplyResult, x, this->A->getRowGroupIndices()); + linEqSolverA->multiplyAndReduce(dir, this->A->getRowGroupIndices(), x, b, *auxiliaryRowGroupVector); + std::swap(x, *auxiliaryRowGroupVector); } if (!this->isCachingEnabled()) { diff --git a/src/storm/solver/StandardMinMaxLinearEquationSolver.h b/src/storm/solver/StandardMinMaxLinearEquationSolver.h index 4529b2dc1..a8517109e 100644 --- a/src/storm/solver/StandardMinMaxLinearEquationSolver.h +++ b/src/storm/solver/StandardMinMaxLinearEquationSolver.h @@ -27,6 +27,7 @@ namespace storm { // possibly cached data mutable std::unique_ptr> linEqSolverA; mutable std::unique_ptr> auxiliaryRowVector; // A.rowCount() entries + mutable std::unique_ptr> auxiliaryRowGroupVector; // A.rowCount() entries /// The factory used to obtain linear equation solvers. std::unique_ptr> linearEquationSolverFactory; diff --git a/src/storm/storage/SparseMatrix.cpp b/src/storm/storage/SparseMatrix.cpp index e966ff80e..39c99a8e0 100644 --- a/src/storm/storage/SparseMatrix.cpp +++ b/src/storm/storage/SparseMatrix.cpp @@ -1497,7 +1497,7 @@ namespace storm { if ((dir == OptimizationDirection::Minimize && newValue < currentValue) || (dir == OptimizationDirection::Maximize && newValue > currentValue)) { currentValue = newValue; if (choices) { - *choiceIt = *rowIt - *rowGroupIt; + *choiceIt = std::distance(rowIndications.begin(), rowIt) - *rowGroupIt; } } if (summand) { @@ -1538,7 +1538,7 @@ namespace storm { currentValue += elementIt->getValue() * vector[elementIt->getColumn()]; } if (choices) { - *choiceIt = 0; + *choiceIt = std::distance(rowIndications.begin(), rowIt) - *rowGroupIt; } --rowIt; @@ -1546,7 +1546,7 @@ namespace storm { --summandIt; } - for (; std::distance(rowIndications.begin(), rowIt) >= static_cast(*rowGroupIt); --rowIt) { + for (uint64_t i = *rowGroupIt + 1, end = *(rowGroupIt + 1); i < end; --rowIt, ++i) { ValueType newValue = summand ? *summandIt : storm::utility::zero(); for (auto elementIte = this->begin() + *rowIt - 1; elementIt != elementIte; --elementIt) { newValue += elementIt->getValue() * vector[elementIt->getColumn()]; @@ -1555,7 +1555,7 @@ namespace storm { if ((dir == OptimizationDirection::Minimize && newValue < currentValue) || (dir == OptimizationDirection::Maximize && newValue > currentValue)) { currentValue = newValue; if (choices) { - *choiceIt = *rowIt - *rowGroupIt; + *choiceIt = std::distance(rowIndications.begin(), rowIt) - *rowGroupIt; } } if (summand) { @@ -1617,7 +1617,7 @@ namespace storm { if ((dir == OptimizationDirection::Minimize && newValue < currentValue) || (dir == OptimizationDirection::Maximize && newValue > currentValue)) { currentValue = newValue; if (choices) { - *choiceIt = *rowIt - *rowGroupIt; + *choiceIt = std::distance(rowIndications.begin(), rowIt) - *rowGroupIt; } } } diff --git a/src/storm/storage/SparseMatrix.h b/src/storm/storage/SparseMatrix.h index 1dd1c19fe..1045848f4 100644 --- a/src/storm/storage/SparseMatrix.h +++ b/src/storm/storage/SparseMatrix.h @@ -19,6 +19,7 @@ // Forward declaration for adapter classes. namespace storm { namespace adapters { + template class GmmxxAdapter; class EigenAdapter; class StormAdapter; @@ -315,7 +316,7 @@ namespace storm { class SparseMatrix { public: // Declare adapter classes as friends to use internal data. - friend class storm::adapters::GmmxxAdapter; + friend class storm::adapters::GmmxxAdapter; friend class storm::adapters::EigenAdapter; friend class storm::adapters::StormAdapter; friend class storm::solver::TopologicalValueIterationMinMaxLinearEquationSolver; diff --git a/src/test/storm/modelchecker/GmmxxHybridMdpPrctlModelCheckerTest.cpp b/src/test/storm/modelchecker/GmmxxHybridMdpPrctlModelCheckerTest.cpp index 203730667..663e615cc 100644 --- a/src/test/storm/modelchecker/GmmxxHybridMdpPrctlModelCheckerTest.cpp +++ b/src/test/storm/modelchecker/GmmxxHybridMdpPrctlModelCheckerTest.cpp @@ -116,8 +116,8 @@ TEST(GmmxxHybridMdpPrctlModelCheckerTest, Dice_Cudd) { result->filter(storm::modelchecker::SymbolicQualitativeCheckResult(model->getReachableStates(), model->getInitialStates())); storm::modelchecker::HybridQuantitativeCheckResult& quantitativeResult8 = result->asHybridQuantitativeCheckResult(); - EXPECT_NEAR(7.3333294987678528, quantitativeResult8.getMin(), storm::settings::getModule().getPrecision()); - EXPECT_NEAR(7.3333294987678528, quantitativeResult8.getMax(), storm::settings::getModule().getPrecision()); + EXPECT_NEAR(7.3333328887820244, quantitativeResult8.getMin(), storm::settings::getModule().getPrecision()); + EXPECT_NEAR(7.3333328887820244, quantitativeResult8.getMax(), storm::settings::getModule().getPrecision()); } TEST(GmmxxHybridMdpPrctlModelCheckerTest, Dice_Sylvan) { @@ -214,8 +214,8 @@ TEST(GmmxxHybridMdpPrctlModelCheckerTest, Dice_Sylvan) { result->filter(storm::modelchecker::SymbolicQualitativeCheckResult(model->getReachableStates(), model->getInitialStates())); storm::modelchecker::HybridQuantitativeCheckResult& quantitativeResult8 = result->asHybridQuantitativeCheckResult(); - EXPECT_NEAR(7.3333294987678528, quantitativeResult8.getMin(), storm::settings::getModule().getPrecision()); - EXPECT_NEAR(7.3333294987678528, quantitativeResult8.getMax(), storm::settings::getModule().getPrecision()); + EXPECT_NEAR(7.3333328887820244, quantitativeResult8.getMin(), storm::settings::getModule().getPrecision()); + EXPECT_NEAR(7.3333328887820244, quantitativeResult8.getMax(), storm::settings::getModule().getPrecision()); } TEST(GmmxxHybridMdpPrctlModelCheckerTest, AsynchronousLeader_Cudd) { @@ -294,8 +294,8 @@ TEST(GmmxxHybridMdpPrctlModelCheckerTest, AsynchronousLeader_Cudd) { result->filter(storm::modelchecker::SymbolicQualitativeCheckResult(model->getReachableStates(), model->getInitialStates())); storm::modelchecker::HybridQuantitativeCheckResult& quantitativeResult6 = result->asHybridQuantitativeCheckResult(); - EXPECT_NEAR(4.2856904354441401, quantitativeResult6.getMin(), storm::settings::getModule().getPrecision()); - EXPECT_NEAR(4.2856904354441401, quantitativeResult6.getMax(), storm::settings::getModule().getPrecision()); + EXPECT_NEAR(4.2857120959008661, quantitativeResult6.getMin(), storm::settings::getModule().getPrecision()); + EXPECT_NEAR(4.2857120959008661, quantitativeResult6.getMax(), storm::settings::getModule().getPrecision()); } TEST(GmmxxHybridMdpPrctlModelCheckerTest, AsynchronousLeader_Sylvan) { @@ -374,7 +374,7 @@ TEST(GmmxxHybridMdpPrctlModelCheckerTest, AsynchronousLeader_Sylvan) { result->filter(storm::modelchecker::SymbolicQualitativeCheckResult(model->getReachableStates(), model->getInitialStates())); storm::modelchecker::HybridQuantitativeCheckResult& quantitativeResult6 = result->asHybridQuantitativeCheckResult(); - EXPECT_NEAR(4.2856904354441401, quantitativeResult6.getMin(), storm::settings::getModule().getPrecision()); - EXPECT_NEAR(4.2856904354441401, quantitativeResult6.getMax(), storm::settings::getModule().getPrecision()); + EXPECT_NEAR(4.2857120959008661, quantitativeResult6.getMin(), storm::settings::getModule().getPrecision()); + EXPECT_NEAR(4.2857120959008661, quantitativeResult6.getMax(), storm::settings::getModule().getPrecision()); } diff --git a/src/test/storm/modelchecker/GmmxxMdpPrctlModelCheckerTest.cpp b/src/test/storm/modelchecker/GmmxxMdpPrctlModelCheckerTest.cpp index f0858e2ca..1725ff795 100644 --- a/src/test/storm/modelchecker/GmmxxMdpPrctlModelCheckerTest.cpp +++ b/src/test/storm/modelchecker/GmmxxMdpPrctlModelCheckerTest.cpp @@ -86,7 +86,7 @@ TEST(GmmxxMdpPrctlModelCheckerTest, Dice) { result = checker.check(*formula); storm::modelchecker::ExplicitQuantitativeCheckResult& quantitativeResult8 = result->asExplicitQuantitativeCheckResult(); - EXPECT_NEAR(7.333329499, quantitativeResult8[0], storm::settings::getModule().getPrecision()); + EXPECT_NEAR(7.3333328887820244, quantitativeResult8[0], storm::settings::getModule().getPrecision()); abstractModel = storm::parser::AutoParser<>::parseModel(STORM_TEST_RESOURCES_DIR "/tra/two_dice.tra", STORM_TEST_RESOURCES_DIR "/lab/two_dice.lab", STORM_TEST_RESOURCES_DIR "/rew/two_dice.flip.state.rew", ""); @@ -108,7 +108,7 @@ TEST(GmmxxMdpPrctlModelCheckerTest, Dice) { result = stateRewardModelChecker.check(*formula); storm::modelchecker::ExplicitQuantitativeCheckResult& quantitativeResult10 = result->asExplicitQuantitativeCheckResult(); - EXPECT_NEAR(7.333329499, quantitativeResult10[0], storm::settings::getModule().getPrecision()); + EXPECT_NEAR(7.3333328887820244, quantitativeResult10[0], storm::settings::getModule().getPrecision()); abstractModel = storm::parser::AutoParser<>::parseModel(STORM_TEST_RESOURCES_DIR "/tra/two_dice.tra", STORM_TEST_RESOURCES_DIR "/lab/two_dice.lab", STORM_TEST_RESOURCES_DIR "/rew/two_dice.flip.state.rew", STORM_TEST_RESOURCES_DIR "/rew/two_dice.flip.trans.rew"); @@ -130,7 +130,7 @@ TEST(GmmxxMdpPrctlModelCheckerTest, Dice) { result = stateAndTransitionRewardModelChecker.check(*formula); storm::modelchecker::ExplicitQuantitativeCheckResult& quantitativeResult12 = result->asExplicitQuantitativeCheckResult(); - EXPECT_NEAR(14.666658998, quantitativeResult12[0], storm::settings::getModule().getPrecision()); + EXPECT_NEAR(14.666665777564049, quantitativeResult12[0], storm::settings::getModule().getPrecision()); } TEST(GmmxxMdpPrctlModelCheckerTest, AsynchronousLeader) { @@ -188,7 +188,7 @@ TEST(GmmxxMdpPrctlModelCheckerTest, AsynchronousLeader) { result = checker.check(*formula); storm::modelchecker::ExplicitQuantitativeCheckResult& quantitativeResult6 = result->asExplicitQuantitativeCheckResult(); - EXPECT_NEAR(4.285689611, quantitativeResult6[0], storm::settings::getModule().getPrecision()); + EXPECT_NEAR(4.2857120959008661, quantitativeResult6[0], storm::settings::getModule().getPrecision()); } TEST(GmmxxMdpPrctlModelCheckerTest, SchedulerGeneration) {