Browse Source

Extraction and update of TBB-parallelized stuff

tempestpy_adaptions
dehnert 7 years ago
parent
commit
c5134c364f
  1. 84
      resources/3rdparty/gmm-5.2/include/gmm/gmm_blas.h
  2. 116
      src/storm/adapters/GmmxxAdapter.cpp
  3. 19
      src/storm/adapters/GmmxxAdapter.h
  4. 16
      src/storm/settings/modules/CoreSettings.cpp
  5. 11
      src/storm/settings/modules/CoreSettings.h
  6. 11
      src/storm/solver/GmmxxLinearEquationSolver.cpp
  7. 8
      src/storm/solver/GmmxxLinearEquationSolver.h
  8. 221
      src/storm/solver/GmmxxMultiplier.cpp
  9. 31
      src/storm/solver/GmmxxMultiplier.h
  10. 2
      src/storm/solver/LinearEquationSolver.cpp
  11. 5
      src/storm/solver/LinearEquationSolver.h
  12. 19
      src/storm/solver/NativeLinearEquationSolver.cpp
  13. 7
      src/storm/solver/NativeLinearEquationSolver.h
  14. 101
      src/storm/solver/NativeMultiplier.cpp
  15. 31
      src/storm/solver/NativeMultiplier.h
  16. 2
      src/storm/solver/TopologicalMinMaxLinearEquationSolver.cpp
  17. 250
      src/storm/storage/SparseMatrix.cpp
  18. 42
      src/storm/storage/SparseMatrix.h
  19. 57
      src/storm/utility/VectorHelper.cpp
  20. 25
      src/storm/utility/VectorHelper.h
  21. 198
      src/storm/utility/vector.h

84
resources/3rdparty/gmm-5.2/include/gmm/gmm_blas.h

@ -1706,7 +1706,7 @@ namespace gmm {
{}
};
template <typename L1, typename L2, typename L3>
template <typename L1, typename L2, typename L3, typename L4>
class tbbHelper_mult_add_by_row {
L2 const* my_l2;
L3 const* my_l3;
@ -1714,6 +1714,7 @@ namespace gmm {
// Typedefs for Iterator Types
typedef typename linalg_traits<L3>::iterator frm_IT1;
typedef typename linalg_traits<L1>::const_row_iterator frm_IT2;
typedef typename linalg_traits<L4>::const_iterator frm_IT3;
public:
void operator()( const forward_range_mult<frm_IT1, frm_IT2>& r ) const {
@ -1722,7 +1723,7 @@ namespace gmm {
frm_IT1 it = r.begin();
frm_IT1 ite = r.end();
frm_IT2 itr = r.begin_row();
frm_IT1 addIt = my_l3->begin();
frm_IT3 addIt = my_l3->begin();
for (; it != ite; ++it, ++itr, ++addIt) {
*it = vect_sp(linalg_traits<L1>::row(itr), l2,
@ -1736,14 +1737,65 @@ namespace gmm {
}
};
template<typename L1, typename L2, typename L3>
class TbbMultFunctor {
public:
TbbMultFunctor(L1 const& l1, L2 const& l2, L3& l3) : l1(l1), l2(l2), l3(l3) {
// Intentionally left empty.
}
void operator()(tbb::blocked_range<unsigned long> const& range) const {
auto itr = mat_row_const_begin(l1) + range.begin();
auto l2it = l2.begin() + range.begin();
auto l3it = l3.begin() + range.begin();
auto l3ite = l3.begin() + range.end();
for (; l3it != l3ite; ++l3it, ++l2it, ++itr) {
*l3it = vect_sp(linalg_traits<L1>::row(itr), l2, typename linalg_traits<L1>::storage_type(), typename linalg_traits<L2>::storage_type());
}
}
private:
L1 const& l1;
L2 const& l2;
L3& l3;
};
template <typename L1, typename L2, typename L3>
void mult_by_row_parallel(const L1& l1, const L2& l2, L3& l3, abstract_dense) {
tbb::parallel_for(forward_range_mult<typename linalg_traits<L3>::iterator, typename linalg_traits<L1>::const_row_iterator>(it, ite, itr), tbbHelper_mult_by_row<L1, L2, L3>(&l2));
tbb::parallel_for(tbb::blocked_range<unsigned long>(0, vect_size(l3), 10), TbbMultFunctor<L1, L2, L3>(l1, l2, l3));
}
template<typename L1, typename L2, typename L3, typename L4>
class TbbMultAddFunctor {
public:
TbbMultAddFunctor(L1 const& l1, L2 const& l2, L3 const& l3, L4& l4) : l1(l1), l2(l2), l3(l3), l4(l4) {
// Intentionally left empty.
}
void operator()(tbb::blocked_range<unsigned long> const& range) const {
auto itr = mat_row_const_begin(l1) + range.begin();
auto l2it = l2.begin() + range.begin();
auto l3it = l3.begin() + range.begin();
auto l4it = l4.begin() + range.begin();
auto l4ite = l4.begin() + range.end();
for (; l4it != l4ite; ++l4it, ++l3it, ++l2it, ++itr) {
*l4it = vect_sp(linalg_traits<L1>::row(itr), l2, typename linalg_traits<L1>::storage_type(), typename linalg_traits<L2>::storage_type()) + *l3it;
}
}
private:
L1 const& l1;
L2 const& l2;
L3 const& l3;
L4& l4;
};
template <typename L1, typename L2, typename L3, typename L4>
void mult_add_by_row_parallel(const L1& l1, const L2& l2, const L3& l3, L4& l4, abstract_dense) {
tbb::parallel_for(forward_range_mult<typename linalg_traits<L3>::iterator, typename linalg_traits<L1>::const_row_iterator>(it, ite, itr), tbbHelper_mult_add_by_row<L1, L2, L3, L4>(&l2, &l3));
tbb::parallel_for(tbb::blocked_range<unsigned long>(0, vect_size(l4), 10), TbbMultAddFunctor<L1, L2, L3, L4>(l1, l2, l3, l4));
}
#endif
@ -1879,6 +1931,24 @@ namespace gmm {
}
#ifdef STORM_HAVE_INTELTBB
/** Multiply. l3 = l1*l2; */
template <typename L1, typename L2, typename L3> inline
void mult_parallel(const L1& l1, const L2& l2, L3& l3) {
size_type m = mat_nrows(l1), n = mat_ncols(l1);
if (!m || !n) return;
GMM_ASSERT2(n==vect_size(l2), "dimensions mismatch");
if (!same_origin(l2, l3)) {
mult_parallel_spec(l1, l2, l3, typename principal_orientation_type<typename
linalg_traits<L1>::sub_orientation>::potype());
} else {
GMM_WARNING2("Warning, temporaries are used for mult\n");
typename temporary_vector<L2>::vector_type l2tmp(vect_size(l2));
copy(l2, l2tmp);
mult_parallel_spec(l1, l2tmp, l3, typename principal_orientation_type<typename
linalg_traits<L1>::sub_orientation>::potype());
}
}
/** Multiply-accumulate. l4 = l1*l2 + l3; */
template <typename L1, typename L2, typename L3, typename L4> inline
void mult_add_parallel(const L1& l1, const L2& l2, const L3& l3, L4& l4) {
@ -1886,16 +1956,14 @@ namespace gmm {
if (!m || !n) return;
GMM_ASSERT2(n==vect_size(l2) && m==vect_size(l3) && vect_size(l3) == vect_size(l4), "dimensions mismatch");
if (!same_origin(l2, l4) && !same_origin(l3, l4) && !same_origin(l2, l3)) {
mult_add_parallel_spec(l1, l2, l3, l4, typename principal_orientation_type<typename
linalg_traits<L1>::sub_orientation>::potype());
mult_add_parallel_spec(l1, l2, l3, l4, typename principal_orientation_type<typename linalg_traits<L1>::sub_orientation>::potype());
} else {
GMM_WARNING2("Warning, Multiple temporaries are used for mult\n");
typename temporary_vector<L2>::vector_type l2tmp(vect_size(l2));
copy(l2, l2tmp);
typename temporary_vector<L3>::vector_type l3tmp(vect_size(l3));
copy(l3, l3tmp);
mult_add_parallel_spec(l1, l2tmp, l3tmp, l4, typename principal_orientation_type<typename
linalg_traits<L1>::sub_orientation>::potype());
mult_add_parallel_spec(l1, l2tmp, l3tmp, l4, typename principal_orientation_type<typename linalg_traits<L1>::sub_orientation>::potype());
}
}
#endif

116
src/storm/adapters/GmmxxAdapter.cpp

@ -5,8 +5,7 @@
#include "storm/adapters/RationalNumberAdapter.h"
#include "storm/adapters/RationalFunctionAdapter.h"
#include "storm/utility/constants.h"
#include "storm/exceptions/NotSupportedException.h"
#include "storm/utility/macros.h"
namespace storm {
namespace adapters {
@ -43,124 +42,11 @@ namespace storm {
return result;
}
template<typename T>
void GmmxxMultiplier<T>::multAdd(gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result) {
if (b) {
gmm::mult_add(matrix, x, *b, result);
} else {
gmm::mult(matrix, x, result);
}
}
template<typename T>
void GmmxxMultiplier<T>::multAddGaussSeidelBackward(gmm::csr_matrix<T> const& matrix, std::vector<T>& x, std::vector<T> const* b) {
STORM_LOG_ASSERT(matrix.nr == matrix.nc, "Expecting square matrix.");
if (b) {
gmm::mult_add_by_row_bwd(matrix, x, *b, x, gmm::abstract_dense());
} else {
gmm::mult_by_row_bwd(matrix, x, x, gmm::abstract_dense());
}
}
template<typename T>
void GmmxxMultiplier<T>::multAddReduce(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result, std::vector<uint64_t>* choices) {
std::vector<T>* target = &result;
std::unique_ptr<std::vector<T>> temporary;
if (&x == &result) {
STORM_LOG_WARN("Using temporary in 'multAddReduce'.");
temporary = std::make_unique<std::vector<T>>(x.size());
target = temporary.get();
}
multAddReduceHelper(dir, rowGroupIndices, matrix, x, b, *target, choices);
}
template<typename T>
void GmmxxMultiplier<T>::multAddReduceGaussSeidel(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<T> const& matrix, std::vector<T>& x, std::vector<T> const* b, std::vector<uint64_t>* choices) {
multAddReduceHelper(dir, rowGroupIndices, matrix, x, b, x, choices);
}
template<typename T>
void GmmxxMultiplier<T>::multAddReduceHelper(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result, std::vector<uint64_t>* choices) {
typedef std::vector<T> VectorType;
typedef gmm::csr_matrix<T> MatrixType;
typename gmm::linalg_traits<VectorType>::const_iterator add_it, add_ite;
if (b) {
add_it = gmm::vect_end(*b) - 1;
add_ite = gmm::vect_begin(*b) - 1;
}
typename gmm::linalg_traits<VectorType>::iterator target_it = gmm::vect_end(result) - 1;
typename gmm::linalg_traits<MatrixType>::const_row_iterator itr = mat_row_const_end(matrix) - 1;
typename std::vector<uint64_t>::iterator choice_it;
if (choices) {
choice_it = choices->end() - 1;
}
uint64_t choice;
for (auto row_group_it = rowGroupIndices.end() - 2, row_group_ite = rowGroupIndices.begin() - 1; row_group_it != row_group_ite; --row_group_it, --choice_it, --target_it) {
T currentValue = b ? *add_it : storm::utility::zero<T>();
currentValue += vect_sp(gmm::linalg_traits<MatrixType>::row(itr), x);
if (choices) {
choice = *(row_group_it + 1) - 1 - *row_group_it;
*choice_it = choice;
}
--itr;
if (b) {
--add_it;
}
for (uint64_t row = *row_group_it + 1, rowEnd = *(row_group_it + 1); row < rowEnd; ++row, --itr) {
T newValue = b ? *add_it : storm::utility::zero<T>();
newValue += vect_sp(gmm::linalg_traits<MatrixType>::row(itr), x);
if (choices) {
--choice;
}
if ((dir == OptimizationDirection::Minimize && newValue < currentValue) || (dir == OptimizationDirection::Maximize && newValue > currentValue)) {
currentValue = newValue;
if (choices) {
*choice_it = choice;
}
}
if (b) {
--add_it;
}
}
// Write back final value.
*target_it = currentValue;
}
}
template<>
void GmmxxMultiplier<storm::RationalFunction>::multAddReduceHelper(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<storm::RationalFunction> const& matrix, std::vector<storm::RationalFunction> const& x, std::vector<storm::RationalFunction> const* b, std::vector<storm::RationalFunction>& result, std::vector<uint64_t>* choices) {
STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "Operation not supported for this data type.");
}
#ifdef STORM_HAVE_INTELTBB
template<typename T>
void GmmxxMultiplier<T>::multAddParallel(gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result) {
if (b) {
gmm::mult_add_parallel(matrix, x, *b, result);
} else {
gmm::mult_parallel(matrix, x, result);
}
}
#endif
template class GmmxxAdapter<double>;
template class GmmxxMultiplier<double>;
#ifdef STORM_HAVE_CARL
template class GmmxxAdapter<storm::RationalNumber>;
template class GmmxxAdapter<storm::RationalFunction>;
template class GmmxxMultiplier<storm::RationalNumber>;
template class GmmxxMultiplier<storm::RationalFunction>;
#endif
}

19
src/storm/adapters/GmmxxAdapter.h

@ -6,8 +6,6 @@
#include "storm/storage/SparseMatrix.h"
#include "storm/utility/macros.h"
namespace storm {
namespace adapters {
@ -21,22 +19,5 @@ namespace storm {
static std::unique_ptr<gmm::csr_matrix<T>> toGmmxxSparseMatrix(storm::storage::SparseMatrix<T> const& matrix);
};
template<class T>
class GmmxxMultiplier {
public:
static void multAdd(gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result);
static void multAddGaussSeidelBackward(gmm::csr_matrix<T> const& matrix, std::vector<T>& x, std::vector<T> const* b);
static void multAddReduce(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result, std::vector<uint64_t>* choices = nullptr);
static void multAddReduceGaussSeidel(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<T> const& matrix, std::vector<T>& x, std::vector<T> const* b, std::vector<uint64_t>* choices = nullptr);
#ifdef STORM_HAVE_INTELTBB
static void multAddParallel(gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const& b, std::vector<T>& result);
#endif
private:
static void multAddReduceHelper(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result, std::vector<uint64_t>* choices = nullptr);
};
}
}

16
src/storm/settings/modules/CoreSettings.cpp

@ -12,6 +12,7 @@
#include "storm/utility/macros.h"
#include "storm/exceptions/IllegalArgumentValueException.h"
#include "storm/exceptions/InvalidOptionException.h"
namespace storm {
namespace settings {
@ -31,6 +32,8 @@ namespace storm {
const std::string CoreSettings::engineOptionShortName = "e";
const std::string CoreSettings::ddLibraryOptionName = "ddlib";
const std::string CoreSettings::cudaOptionName = "cuda";
const std::string CoreSettings::intelTbbOptionName = "enable-tbb";
const std::string CoreSettings::intelTbbOptionShortName = "tbb";
CoreSettings::CoreSettings() : ModuleSettings(moduleName), engine(CoreSettings::Engine::Sparse) {
this->addOption(storm::settings::OptionBuilder(moduleName, counterexampleOptionName, false, "Generates a counterexample for the given PRCTL formulas if not satisfied by the model.").setShortName(counterexampleOptionShortName).build());
@ -56,7 +59,9 @@ namespace storm {
this->addOption(storm::settings::OptionBuilder(moduleName, smtSolverOptionName, false, "Sets which SMT solver is preferred.")
.addArgument(storm::settings::ArgumentBuilder::createStringArgument("name", "The name of an SMT solver.").addValidatorString(ArgumentValidatorFactory::createMultipleChoiceValidator(smtSolvers)).setDefaultValueString("z3").build()).build());
this->addOption(storm::settings::OptionBuilder(moduleName, statisticsOptionName, false, "Sets whether to display statistics if available.").setShortName(statisticsOptionShortName).build());
this->addOption(storm::settings::OptionBuilder(moduleName, cudaOptionName, false, "Sets whether to use CUDA.").build());
this->addOption(storm::settings::OptionBuilder(moduleName, intelTbbOptionName, false, "Sets whether to use Intel TBB (if Storm was built with support for TBB).").setShortName(intelTbbOptionShortName).build());
}
bool CoreSettings::isCounterexampleSet() const {
@ -128,7 +133,11 @@ namespace storm {
return this->getOption(statisticsOptionName).getHasOptionBeenSet();
}
bool CoreSettings::isCudaSet() const {
bool CoreSettings::isUseIntelTbbSet() const {
return this->getOption(intelTbbOptionName).getHasOptionBeenSet();
}
bool CoreSettings::isUseCudaSet() const {
return this->getOption(cudaOptionName).getHasOptionBeenSet();
}
@ -159,7 +168,12 @@ namespace storm {
}
bool CoreSettings::check() const {
#ifdef STORM_HAVE_INTELTBB
return true;
#else
STORM_LOG_WARN_COND(!isUseIntelTbbSet(), "Enabling TBB is not supported in this version of Storm as it was not built with support for it.");
return true;
#endif
}
} // namespace modules

11
src/storm/settings/modules/CoreSettings.h

@ -116,12 +116,19 @@ namespace storm {
*/
bool isShowStatisticsSet() const;
/*!
* Retrieves whether the option to use Intel TBB is set.
*
* @return True iff the option was set.
*/
bool isUseIntelTbbSet() const;
/*!
* Retrieves whether the option to use CUDA is set.
*
* @return True iff the option was set.
*/
bool isCudaSet() const;
bool isUseCudaSet() const;
/*!
* Retrieves the selected engine.
@ -157,6 +164,8 @@ namespace storm {
static const std::string engineOptionName;
static const std::string engineOptionShortName;
static const std::string ddLibraryOptionName;
static const std::string intelTbbOptionName;
static const std::string intelTbbOptionShortName;
static const std::string cudaOptionName;
};

11
src/storm/solver/GmmxxLinearEquationSolver.cpp

@ -4,6 +4,9 @@
#include <utility>
#include "storm/adapters/GmmxxAdapter.h"
#include "storm/solver/GmmxxMultiplier.h"
#include "storm/settings/SettingsManager.h"
#include "storm/utility/vector.h"
#include "storm/utility/constants.h"
@ -187,7 +190,7 @@ namespace storm {
template<typename ValueType>
void GmmxxLinearEquationSolver<ValueType>::multiply(std::vector<ValueType>& x, std::vector<ValueType> const* b, std::vector<ValueType>& result) const {
storm::adapters::GmmxxMultiplier<ValueType>::multAdd(*gmmxxA, x, b, result);
multiplier.multAdd(*gmmxxA, x, b, result);
if (!this->isCachingEnabled()) {
clearCache();
@ -196,7 +199,7 @@ namespace storm {
template<typename ValueType>
void GmmxxLinearEquationSolver<ValueType>::multiplyAndReduce(OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, std::vector<ValueType>& x, std::vector<ValueType> const* b, std::vector<ValueType>& result, std::vector<uint_fast64_t>* choices) const {
storm::adapters::GmmxxMultiplier<ValueType>::multAddReduce(dir, rowGroupIndices, *gmmxxA, x, b, result, choices);
multiplier.multAddReduce(dir, rowGroupIndices, *gmmxxA, x, b, result, choices);
}
template<typename ValueType>
@ -206,12 +209,12 @@ namespace storm {
template<typename ValueType>
void GmmxxLinearEquationSolver<ValueType>::multiplyGaussSeidel(std::vector<ValueType>& x, std::vector<ValueType> const* b) const {
storm::adapters::GmmxxMultiplier<ValueType>::multAddGaussSeidelBackward(*gmmxxA, x, b);
multiplier.multAddGaussSeidelBackward(*gmmxxA, x, b);
}
template<typename ValueType>
void GmmxxLinearEquationSolver<ValueType>::multiplyAndReduceGaussSeidel(OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, std::vector<ValueType>& x, std::vector<ValueType> const* b, std::vector<uint64_t>* choices) const {
storm::adapters::GmmxxMultiplier<ValueType>::multAddReduceGaussSeidel(dir, rowGroupIndices, *gmmxxA, x, b, choices);
multiplier.multAddReduceGaussSeidel(dir, rowGroupIndices, *gmmxxA, x, b, choices);
}
template<typename ValueType>

8
src/storm/solver/GmmxxLinearEquationSolver.h

@ -5,7 +5,9 @@
#include "storm/utility/gmm.h"
#include "LinearEquationSolver.h"
#include "storm/solver/GmmxxMultiplier.h"
#include "storm/solver/LinearEquationSolver.h"
namespace storm {
namespace solver {
@ -95,7 +97,6 @@ namespace storm {
void setSettings(GmmxxLinearEquationSolverSettings<ValueType> const& newSettings);
GmmxxLinearEquationSolverSettings<ValueType> const& getSettings() const;
virtual void clearCache() const override;
private:
@ -108,6 +109,9 @@ namespace storm {
// The settings used by the solver.
GmmxxLinearEquationSolverSettings<ValueType> settings;
// A multiplier object used to dispatch the multiplication calls.
GmmxxMultiplier<ValueType> multiplier;
// cached data obtained during solving
mutable std::unique_ptr<gmm::ilu_precond<gmm::csr_matrix<ValueType>>> iluPreconditioner;
mutable std::unique_ptr<gmm::diagonal_precond<gmm::csr_matrix<ValueType>>> diagonalPreconditioner;

221
src/storm/solver/GmmxxMultiplier.cpp

@ -0,0 +1,221 @@
#include "storm/solver/GmmxxMultiplier.h"
#include "storm/adapters/RationalNumberAdapter.h"
#include "storm/adapters/RationalFunctionAdapter.h"
#include "storm/utility/constants.h"
#include "storm/exceptions/NotSupportedException.h"
#include "storm/utility/macros.h"
namespace storm {
namespace solver {
template<typename T>
GmmxxMultiplier<T>::GmmxxMultiplier() : storm::utility::VectorHelper<T>() {
// Intentionally left empty.
}
template<typename T>
void GmmxxMultiplier<T>::multAdd(gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result) const {
if (this->parallelize()) {
multAddParallel(matrix, x, b, result);
} else {
if (b) {
gmm::mult_add(matrix, x, *b, result);
} else {
gmm::mult(matrix, x, result);
}
}
}
template<typename T>
void GmmxxMultiplier<T>::multAddGaussSeidelBackward(gmm::csr_matrix<T> const& matrix, std::vector<T>& x, std::vector<T> const* b) const {
STORM_LOG_ASSERT(matrix.nr == matrix.nc, "Expecting square matrix.");
if (b) {
gmm::mult_add_by_row_bwd(matrix, x, *b, x, gmm::abstract_dense());
} else {
gmm::mult_by_row_bwd(matrix, x, x, gmm::abstract_dense());
}
}
template<typename T>
void GmmxxMultiplier<T>::multAddReduce(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result, std::vector<uint64_t>* choices) const {
std::vector<T>* target = &result;
std::unique_ptr<std::vector<T>> temporary;
if (&x == &result) {
STORM_LOG_WARN("Using temporary in 'multAddReduce'.");
temporary = std::make_unique<std::vector<T>>(x.size());
target = temporary.get();
}
if (this->parallelize()) {
multAddReduceParallel(dir, rowGroupIndices, matrix, x, b, *target, choices);
} else {
multAddReduceHelper(dir, rowGroupIndices, matrix, x, b, *target, choices);
}
}
template<typename T>
void GmmxxMultiplier<T>::multAddReduceGaussSeidel(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<T> const& matrix, std::vector<T>& x, std::vector<T> const* b, std::vector<uint64_t>* choices) const {
multAddReduceHelper(dir, rowGroupIndices, matrix, x, b, x, choices);
}
template<typename T>
void GmmxxMultiplier<T>::multAddReduceHelper(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result, std::vector<uint64_t>* choices) const {
typedef std::vector<T> VectorType;
typedef gmm::csr_matrix<T> MatrixType;
typename gmm::linalg_traits<VectorType>::const_iterator add_it, add_ite;
if (b) {
add_it = gmm::vect_end(*b) - 1;
add_ite = gmm::vect_begin(*b) - 1;
}
typename gmm::linalg_traits<VectorType>::iterator target_it = gmm::vect_end(result) - 1;
typename gmm::linalg_traits<MatrixType>::const_row_iterator itr = mat_row_const_end(matrix) - 1;
typename std::vector<uint64_t>::iterator choice_it;
if (choices) {
choice_it = choices->end() - 1;
}
uint64_t choice;
for (auto row_group_it = rowGroupIndices.end() - 2, row_group_ite = rowGroupIndices.begin() - 1; row_group_it != row_group_ite; --row_group_it, --choice_it, --target_it) {
T currentValue = b ? *add_it : storm::utility::zero<T>();
currentValue += vect_sp(gmm::linalg_traits<MatrixType>::row(itr), x);
if (choices) {
choice = *(row_group_it + 1) - 1 - *row_group_it;
*choice_it = choice;
}
--itr;
if (b) {
--add_it;
}
for (uint64_t row = *row_group_it + 1, rowEnd = *(row_group_it + 1); row < rowEnd; ++row, --itr) {
T newValue = b ? *add_it : storm::utility::zero<T>();
newValue += vect_sp(gmm::linalg_traits<MatrixType>::row(itr), x);
if (choices) {
--choice;
}
if ((dir == OptimizationDirection::Minimize && newValue < currentValue) || (dir == OptimizationDirection::Maximize && newValue > currentValue)) {
currentValue = newValue;
if (choices) {
*choice_it = choice;
}
}
if (b) {
--add_it;
}
}
// Write back final value.
*target_it = currentValue;
}
}
template<>
void GmmxxMultiplier<storm::RationalFunction>::multAddReduceHelper(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<storm::RationalFunction> const& matrix, std::vector<storm::RationalFunction> const& x, std::vector<storm::RationalFunction> const* b, std::vector<storm::RationalFunction>& result, std::vector<uint64_t>* choices) const {
STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "Operation not supported for this data type.");
}
template<typename T>
void GmmxxMultiplier<T>::multAddParallel(gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result) const {
#ifdef STORM_HAVE_INTELTBB
if (b) {
gmm::mult_add_parallel(matrix, x, *b, result);
} else {
gmm::mult_parallel(matrix, x, result);
}
#else
STORM_LOG_WARN("Storm was built without support for Intel TBB, defaulting to sequential version.");
multAdd(matrix, x, b, result);
#endif
}
template<typename T>
class TbbMultAddReduceFunctor {
public:
TbbMultAddReduceFunctor(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result, std::vector<uint64_t>* choices) : dir(dir), rowGroupIndices(rowGroupIndices), matrix(matrix), x(x), b(b), result(result), choices(choices) {
// Intentionally left empty.
}
void operator()(tbb::blocked_range<unsigned long> const& range) const {
auto groupIt = rowGroupIndices.begin() + range.begin();
auto groupIte = rowGroupIndices.begin() + range.end();
auto itr = mat_row_const_begin(matrix) + *groupIt;
typename std::vector<T>::const_iterator bIt;
if (b) {
bIt = b->begin() + *groupIt;
}
typename std::vector<uint64_t>::iterator choiceIt;
if (choices) {
choiceIt = choices->begin() + range.begin();
}
auto resultIt = result.begin() + range.begin();
for (; groupIt != groupIte; ++groupIt, ++resultIt, ++choiceIt) {
T currentValue = vect_sp(gmm::linalg_traits<gmm::csr_matrix<T>>::row(itr), x, typename gmm::linalg_traits<gmm::csr_matrix<T>>::storage_type(), typename gmm::linalg_traits<std::vector<T>>::storage_type());
if (b) {
currentValue += *bIt;
++bIt;
}
if (choices) {
*choiceIt = 0;
}
++itr;
for (auto itre = mat_row_const_begin(matrix) + *(groupIt + 1); itr != itre; ++itr) {
T newValue = vect_sp(gmm::linalg_traits<gmm::csr_matrix<T>>::row(itr), x, typename gmm::linalg_traits<gmm::csr_matrix<T>>::storage_type(), typename gmm::linalg_traits<std::vector<T>>::storage_type());
if (b) {
newValue += *bIt;
++bIt;
}
if ((dir == OptimizationDirection::Minimize && newValue < currentValue) || (dir == OptimizationDirection::Maximize && newValue > currentValue)) {
currentValue = newValue;
if (choices) {
*choiceIt = std::distance(mat_row_const_begin(matrix), itr) - *groupIt;
}
}
}
*resultIt = currentValue;
}
}
private:
storm::solver::OptimizationDirection dir;
std::vector<uint64_t> const& rowGroupIndices;
gmm::csr_matrix<T> const& matrix;
std::vector<T> const& x;
std::vector<T> const* b;
std::vector<T>& result;
std::vector<uint64_t>* choices;
};
template<typename T>
void GmmxxMultiplier<T>::multAddReduceParallel(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result, std::vector<uint64_t>* choices) const {
tbb::parallel_for(tbb::blocked_range<unsigned long>(0, rowGroupIndices.size() - 1, 10), TbbMultAddReduceFunctor<T>(dir, rowGroupIndices, matrix, x, b, result, choices));
}
template<>
void GmmxxMultiplier<storm::RationalFunction>::multAddReduceParallel(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<storm::RationalFunction> const& matrix, std::vector<storm::RationalFunction> const& x, std::vector<storm::RationalFunction> const* b, std::vector<storm::RationalFunction>& result, std::vector<uint64_t>* choices) const {
STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "This operation is not supported.");
}
template class GmmxxMultiplier<double>;
#ifdef STORM_HAVE_CARL
template class GmmxxMultiplier<storm::RationalNumber>;
template class GmmxxMultiplier<storm::RationalFunction>;
#endif
}
}

31
src/storm/solver/GmmxxMultiplier.h

@ -0,0 +1,31 @@
#pragma once
#include "storm/utility/VectorHelper.h"
#include "storm/adapters/GmmxxAdapter.h"
#include "storm-config.h"
namespace storm {
namespace solver {
template<class T>
class GmmxxMultiplier : public storm::utility::VectorHelper<T> {
public:
GmmxxMultiplier();
void multAdd(gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result) const;
void multAddGaussSeidelBackward(gmm::csr_matrix<T> const& matrix, std::vector<T>& x, std::vector<T> const* b) const;
void multAddReduce(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result, std::vector<uint64_t>* choices = nullptr) const;
void multAddReduceGaussSeidel(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<T> const& matrix, std::vector<T>& x, std::vector<T> const* b, std::vector<uint64_t>* choices = nullptr) const;
void multAddParallel(gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result) const;
void multAddReduceParallel(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result, std::vector<uint64_t>* choices = nullptr) const;
private:
void multAddReduceHelper(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, gmm::csr_matrix<T> const& matrix, std::vector<T> const& x, std::vector<T> const* b, std::vector<T>& result, std::vector<uint64_t>* choices = nullptr) const;
};
}
}

2
src/storm/solver/LinearEquationSolver.cpp

@ -69,7 +69,7 @@ namespace storm {
setCachingEnabled(true);
this->multiply(x, b, *cachedRowVector);
storm::utility::vector::reduceVectorMinOrMax(dir, *cachedRowVector, result, rowGroupIndices, choices);
vectorHelper.reduceVector(dir, *cachedRowVector, result, rowGroupIndices, choices);
// restore the old caching setting
setCachingEnabled(cachingWasEnabled);

5
src/storm/solver/LinearEquationSolver.h

@ -8,6 +8,8 @@
#include "storm/solver/MultiplicationStyle.h"
#include "storm/solver/OptimizationDirection.h"
#include "storm/utility/VectorHelper.h"
#include "storm/storage/SparseMatrix.h"
namespace storm {
@ -171,6 +173,9 @@ namespace storm {
/// Whether some of the generated data during solver calls should be cached.
mutable bool cachingEnabled;
/// An object that can be used to reduce vectors.
storm::utility::VectorHelper<ValueType> vectorHelper;
};
template<typename ValueType>

19
src/storm/solver/NativeLinearEquationSolver.cpp

@ -174,7 +174,8 @@ namespace storm {
while (!converged && iterationCount < this->getSettings().getMaximalNumberOfIterations() && !(this->hasCustomTerminationCondition() && this->getTerminationCondition().terminateNow(*currentX))) {
// Compute D^-1 * (b - LU * x) and store result in nextX.
jacobiLU.multiplyWithVector(*currentX, *nextX);
multiplier.multAdd(jacobiLU, *currentX, nullptr, *nextX);
storm::utility::vector::subtractVectors(b, *nextX, *nextX);
storm::utility::vector::multiplyVectorsPointwise(jacobiD, *nextX, *nextX);
@ -292,7 +293,7 @@ namespace storm {
// Create a vector that always holds Ax.
std::vector<ValueType> currentAx(x.size());
walkerChaeData->matrix.multiplyWithVector(*currentX, currentAx);
multiplier.multAdd(walkerChaeData->matrix, *currentX, nullptr, currentAx);
// (3) Perform iterations until convergence.
bool converged = false;
@ -302,7 +303,7 @@ namespace storm {
walkerChaeData->matrix.performWalkerChaeStep(*currentX, walkerChaeData->columnSums, walkerChaeData->b, currentAx, *nextX);
// Compute new Ax.
walkerChaeData->matrix.multiplyWithVector(*nextX, currentAx);
multiplier.multAdd(walkerChaeData->matrix, *nextX, nullptr, currentAx);
// Check for convergence.
converged = storm::utility::vector::computeSquaredNorm2Difference(currentAx, walkerChaeData->b) <= squaredErrorBound;
@ -355,14 +356,14 @@ namespace storm {
template<typename ValueType>
void NativeLinearEquationSolver<ValueType>::multiply(std::vector<ValueType>& x, std::vector<ValueType> const* b, std::vector<ValueType>& result) const {
if (&x != &result) {
A->multiplyWithVector(x, result, b);
multiplier.multAdd(*A, x, b, result);
} else {
// If the two vectors are aliases, we need to create a temporary.
if (!this->cachedRowVector) {
this->cachedRowVector = std::make_unique<std::vector<ValueType>>(getMatrixRowCount());
}
A->multiplyWithVector(x, *this->cachedRowVector, b);
multiplier.multAdd(*A, x, b, *this->cachedRowVector);
result.swap(*this->cachedRowVector);
if (!this->isCachingEnabled()) {
@ -374,14 +375,14 @@ namespace storm {
template<typename ValueType>
void NativeLinearEquationSolver<ValueType>::multiplyAndReduce(OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, std::vector<ValueType>& x, std::vector<ValueType> const* b, std::vector<ValueType>& result, std::vector<uint_fast64_t>* choices) const {
if (&x != &result) {
A->multiplyAndReduce(dir, rowGroupIndices, x, b, result, choices);
multiplier.multAddReduce(dir, rowGroupIndices, *A, x, b, result, choices);
} else {
// If the two vectors are aliases, we need to create a temporary.
if (!this->cachedRowVector) {
this->cachedRowVector = std::make_unique<std::vector<ValueType>>(getMatrixRowCount());
}
this->A->multiplyAndReduce(dir, rowGroupIndices, x, b, *this->cachedRowVector, choices);
multiplier.multAddReduce(dir, rowGroupIndices, *A, x, b, *this->cachedRowVector, choices);
result.swap(*this->cachedRowVector);
if (!this->isCachingEnabled()) {
@ -398,12 +399,12 @@ namespace storm {
template<typename ValueType>
void NativeLinearEquationSolver<ValueType>::multiplyGaussSeidel(std::vector<ValueType>& x, std::vector<ValueType> const* b) const {
STORM_LOG_ASSERT(this->A->getRowCount() == this->A->getColumnCount(), "This function is only applicable for square matrices.");
A->multiplyWithVector(x, x, b, true, storm::storage::SparseMatrix<ValueType>::MultiplicationDirection::Backward);
multiplier.multAddGaussSeidelBackward(*A, x, b);
}
template<typename ValueType>
void NativeLinearEquationSolver<ValueType>::multiplyAndReduceGaussSeidel(OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, std::vector<ValueType>& x, std::vector<ValueType> const* b, std::vector<uint_fast64_t>* choices) const {
A->multiplyAndReduce(dir, rowGroupIndices, x, b, x, choices, true, storm::storage::SparseMatrix<ValueType>::MultiplicationDirection::Backward);
multiplier.multAddReduceGaussSeidelBackward(dir, rowGroupIndices, *A, x, b, choices);
}
template<typename ValueType>

7
src/storm/solver/NativeLinearEquationSolver.h

@ -3,7 +3,9 @@
#include <ostream>
#include "LinearEquationSolver.h"
#include "storm/solver/LinearEquationSolver.h"
#include "storm/solver/NativeMultiplier.h"
namespace storm {
namespace solver {
@ -80,6 +82,9 @@ namespace storm {
// The settings used by the solver.
NativeLinearEquationSolverSettings<ValueType> settings;
// An object to dispatch all multiplication operations.
NativeMultiplier<ValueType> multiplier;
// cached auxiliary data
mutable std::unique_ptr<std::pair<storm::storage::SparseMatrix<ValueType>, std::vector<ValueType>>> jacobiDecomposition;

101
src/storm/solver/NativeMultiplier.cpp

@ -0,0 +1,101 @@
#include "storm/solver/NativeMultiplier.h"
#include "storm-config.h"
#include "storm/storage/SparseMatrix.h"
#include "storm/adapters/RationalNumberAdapter.h"
#include "storm/adapters/RationalFunctionAdapter.h"
#include "storm/utility/macros.h"
namespace storm {
namespace solver {
template<typename ValueType>
NativeMultiplier<ValueType>::NativeMultiplier() : storm::utility::VectorHelper<ValueType>() {
// Intentionally left empty.
}
template<typename ValueType>
void NativeMultiplier<ValueType>::multAdd(storm::storage::SparseMatrix<ValueType> const& matrix, std::vector<ValueType> const& x, std::vector<ValueType> const* b, std::vector<ValueType>& result) const {
std::vector<ValueType>* target = &result;
std::unique_ptr<std::vector<ValueType>> temporary;
if (&x == &result) {
STORM_LOG_WARN("Using temporary in 'multAdd'.");
temporary = std::make_unique<std::vector<ValueType>>(x.size());
target = temporary.get();
}
if (this->parallelize()) {
multAddParallel(matrix, x, b, result);
} else {
matrix.multiplyWithVector(x, result, b);
}
if (target == temporary.get()) {
std::swap(result, *temporary);
}
}
template<typename ValueType>
void NativeMultiplier<ValueType>::multAddGaussSeidelBackward(storm::storage::SparseMatrix<ValueType> const& matrix, std::vector<ValueType>& x, std::vector<ValueType> const* b) const {
matrix.multiplyWithVectorBackward(x, x, b);
}
template<typename ValueType>
void NativeMultiplier<ValueType>::multAddReduce(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, storm::storage::SparseMatrix<ValueType> const& matrix, std::vector<ValueType> const& x, std::vector<ValueType> const* b, std::vector<ValueType>& result, std::vector<uint64_t>* choices) const {
std::vector<ValueType>* target = &result;
std::unique_ptr<std::vector<ValueType>> temporary;
if (&x == &result) {
STORM_LOG_WARN("Using temporary in 'multAddReduce'.");
temporary = std::make_unique<std::vector<ValueType>>(x.size());
target = temporary.get();
}
if (this->parallelize()) {
multAddReduceParallel(dir, rowGroupIndices, matrix, x, b, *target, choices);
} else {
matrix.multiplyAndReduce(dir, rowGroupIndices, x, b, *target, choices);
}
if (target == temporary.get()) {
std::swap(result, *temporary);
}
}
template<typename ValueType>
void NativeMultiplier<ValueType>::multAddReduceGaussSeidelBackward(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, storm::storage::SparseMatrix<ValueType> const& matrix, std::vector<ValueType>& x, std::vector<ValueType> const* b, std::vector<uint64_t>* choices) const {
matrix.multiplyAndReduceBackward(dir, rowGroupIndices, x, b, x, choices);
}
template<typename ValueType>
void NativeMultiplier<ValueType>::multAddParallel(storm::storage::SparseMatrix<ValueType> const& matrix, std::vector<ValueType> const& x, std::vector<ValueType> const* b, std::vector<ValueType>& result) const {
#ifdef STORM_HAVE_INTELTBB
matrix.multiplyWithVectorParallel(x, result, b);
#else
STORM_LOG_WARN("Storm was built without support for Intel TBB, defaulting to sequential version.");
multAdd(matrix, x, b, result);
#endif
}
template<typename ValueType>
void NativeMultiplier<ValueType>::multAddReduceParallel(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, storm::storage::SparseMatrix<ValueType> const& matrix, std::vector<ValueType> const& x, std::vector<ValueType> const* b, std::vector<ValueType>& result, std::vector<uint64_t>* choices) const {
#ifdef STORM_HAVE_INTELTBB
matrix.multiplyAndReduceParallel(dir, rowGroupIndices, x, b, result, choices);
#else
STORM_LOG_WARN("Storm was built without support for Intel TBB, defaulting to sequential version.");
multAddReduce(dir, rowGroupIndices, x, b, result, choices);
#endif
}
template class NativeMultiplier<double>;
#ifdef STORM_HAVE_CARL
template class NativeMultiplier<storm::RationalNumber>;
template class NativeMultiplier<storm::RationalFunction>;
#endif
}
}

31
src/storm/solver/NativeMultiplier.h

@ -0,0 +1,31 @@
#pragma once
#include "storm/utility/VectorHelper.h"
#include "storm/solver/OptimizationDirection.h"
namespace storm {
namespace storage {
template<typename ValueType>
class SparseMatrix;
}
namespace solver {
template<typename ValueType>
class NativeMultiplier : public storm::utility::VectorHelper<ValueType> {
public:
NativeMultiplier();
void multAdd(storm::storage::SparseMatrix<ValueType> const& matrix, std::vector<ValueType> const& x, std::vector<ValueType> const* b, std::vector<ValueType>& result) const;
void multAddGaussSeidelBackward(storm::storage::SparseMatrix<ValueType> const& matrix, std::vector<ValueType>& x, std::vector<ValueType> const* b) const;
void multAddReduce(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, storm::storage::SparseMatrix<ValueType> const& matrix, std::vector<ValueType> const& x, std::vector<ValueType> const* b, std::vector<ValueType>& result, std::vector<uint64_t>* choices = nullptr) const;
void multAddReduceGaussSeidelBackward(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, storm::storage::SparseMatrix<ValueType> const& matrix, std::vector<ValueType>& x, std::vector<ValueType> const* b, std::vector<uint64_t>* choices = nullptr) const;
void multAddParallel(storm::storage::SparseMatrix<ValueType> const& matrix, std::vector<ValueType> const& x, std::vector<ValueType> const* b, std::vector<ValueType>& result) const;
void multAddReduceParallel(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, storm::storage::SparseMatrix<ValueType> const& matrix, std::vector<ValueType> const& x, std::vector<ValueType> const* b, std::vector<ValueType>& result, std::vector<uint64_t>* choices = nullptr) const;
};
}
}

2
src/storm/solver/TopologicalMinMaxLinearEquationSolver.cpp

@ -25,7 +25,7 @@ namespace storm {
template<typename ValueType>
TopologicalMinMaxLinearEquationSolver<ValueType>::TopologicalMinMaxLinearEquationSolver(double precision, uint_fast64_t maximalNumberOfIterations, bool relative) : precision(precision), maximalNumberOfIterations(maximalNumberOfIterations), relative(relative) {
// Get the settings object to customize solving.
this->enableCuda = storm::settings::getModule<storm::settings::modules::CoreSettings>().isCudaSet();
this->enableCuda = storm::settings::getModule<storm::settings::modules::CoreSettings>().isUseCudaSet();
#ifdef STORM_HAVE_CUDA
STORM_LOG_INFO_COND(this->enableCuda, "Option CUDA was not set, but the topological value iteration solver will use it anyways.");
#endif

250
src/storm/storage/SparseMatrix.cpp

@ -1304,36 +1304,23 @@ namespace storm {
}
template<typename ValueType>
void SparseMatrix<ValueType>::multiplyWithVector(std::vector<ValueType> const& vector, std::vector<ValueType>& result, std::vector<value_type> const* summand, bool allowAliasing, MultiplicationDirection const& multiplicationDirection) const {
void SparseMatrix<ValueType>::multiplyWithVector(std::vector<ValueType> const& vector, std::vector<ValueType>& result, std::vector<value_type> const* summand) const {
// If the vector and the result are aliases and this is not set to be allowed, we need and temporary vector.
std::vector<ValueType>* target;
std::vector<ValueType> temporary;
bool vectorsAliased = &vector == &result;
if (!allowAliasing && vectorsAliased) {
STORM_LOG_WARN("Vectors are aliased but are not allowed to be. Using temporary, which is potentially slow.");
if (&vector == &result) {
STORM_LOG_WARN("Vectors are aliased. Using temporary, which is potentially slow.");
temporary = std::vector<ValueType>(vector.size());
target = &temporary;
STORM_LOG_WARN_COND(multiplicationDirection != MultiplicationDirection::DontCare, "Not specifying multiplication direction for aliased vectors may yield unexpected results.");
} else {
target = &result;
}
STORM_LOG_WARN_COND(vectorsAliased || multiplicationDirection == MultiplicationDirection::DontCare, "Setting a multiplication direction for unaliased vectors. Check whether this is intended.");
this->multiplyWithVectorForward(vector, *target, summand);
#ifdef STORM_HAVE_INTELTBB
bool useParallel = !allowAliasing && multiplicationDirection == MultiplicationDirection::DontCare && this->getNonzeroEntryCount() > 10000;
if (useParallel) {
this->multiplyWithVectorParallel(vector, *target, summand);
} else {
#endif
if (multiplicationDirection == MultiplicationDirection::Forward || (multiplicationDirection == MultiplicationDirection::DontCare && !vectorsAliased)) {
this->multiplyWithVectorForward(vector, *target, summand);
} else {
this->multiplyWithVectorBackward(vector, *target, summand);
}
#ifdef STORM_HAVE_INTELTBB
if (target == &temporary) {
std::swap(result, *target);
}
#endif
}
template<typename ValueType>
@ -1389,6 +1376,47 @@ namespace storm {
}
#ifdef STORM_HAVE_INTELTBB
template <typename ValueType>
class TbbMultAddFunctor {
public:
typedef typename storm::storage::SparseMatrix<ValueType>::index_type index_type;
typedef typename storm::storage::SparseMatrix<ValueType>::value_type value_type;
typedef typename storm::storage::SparseMatrix<ValueType>::const_iterator const_iterator;
TbbMultAddFunctor(std::vector<MatrixEntry<index_type, value_type>> const& columnsAndEntries, std::vector<uint64_t> const& rowIndications, std::vector<ValueType> const& x, std::vector<ValueType>& result, std::vector<value_type> const* summand) : columnsAndEntries(columnsAndEntries), rowIndications(rowIndications), x(x), result(result), summand(summand) {
// Intentionally left empty.
}
void operator()(tbb::blocked_range<index_type> const& range) const {
index_type startRow = range.begin();
index_type endRow = range.end();
typename std::vector<index_type>::const_iterator rowIterator = rowIndications.begin() + startRow;
const_iterator it = columnsAndEntries.begin() + *rowIterator;
const_iterator ite;
typename std::vector<ValueType>::iterator resultIterator = result.begin() + startRow;
typename std::vector<ValueType>::iterator resultIteratorEnd = result.begin() + endRow;
typename std::vector<ValueType>::const_iterator summandIterator;
if (summand) {
summandIterator = summand->begin() + startRow;
}
for (; resultIterator != resultIteratorEnd; ++rowIterator, ++resultIterator, ++summandIterator) {
*resultIterator = summand ? *summandIterator : storm::utility::zero<ValueType>();
for (ite = columnsAndEntries.begin() + *(rowIterator + 1); it != ite; ++it) {
*resultIterator += it->getValue() * x[it->getColumn()];
}
}
}
private:
std::vector<MatrixEntry<index_type, value_type>> const& columnsAndEntries;
std::vector<uint64_t> const& rowIndications;
std::vector<ValueType> const& x;
std::vector<ValueType>& result;
std::vector<value_type> const* summand;
};
template<typename ValueType>
void SparseMatrix<ValueType>::multiplyWithVectorParallel(std::vector<ValueType> const& vector, std::vector<ValueType>& result, std::vector<value_type> const* summand) const {
if (&vector == &result) {
@ -1397,34 +1425,7 @@ namespace storm {
multiplyWithVectorParallel(vector, tmpVector);
result = std::move(tmpVector);
} else {
tbb::parallel_for(tbb::blocked_range<index_type>(0, result.size(), 10),
[&] (tbb::blocked_range<index_type> const& range) {
index_type startRow = range.begin();
index_type endRow = range.end();
const_iterator it = this->begin(startRow);
const_iterator ite;
std::vector<index_type>::const_iterator rowIterator = this->rowIndications.begin() + startRow;
std::vector<index_type>::const_iterator rowIteratorEnd = this->rowIndications.begin() + endRow;
typename std::vector<ValueType>::iterator resultIterator = result.begin() + startRow;
typename std::vector<ValueType>::iterator resultIteratorEnd = result.begin() + endRow;
typename std::vector<ValueType>::const_iterator summandIterator;
if (summand) {
summandIterator = summand->begin() + startRow;
}
for (; resultIterator != resultIteratorEnd; ++rowIterator, ++resultIterator) {
if (summand) {
*resultIterator = *summandIterator;
++summandIterator;
} else {
*resultIterator = storm::utility::zero<ValueType>();
}
for (ite = this->begin() + *(rowIterator + 1); it != ite; ++it) {
*resultIterator += it->getValue() * vector[it->getColumn()];
}
}
});
tbb::parallel_for(tbb::blocked_range<index_type>(0, result.size(), 10), TbbMultAddFunctor<ValueType>(columnsAndValues, rowIndications, vector, result, summand));
}
}
#endif
@ -1560,7 +1561,7 @@ namespace storm {
#ifdef STORM_HAVE_CARL
template<>
void SparseMatrix<storm::RationalFunction>::multiplyAndReduceForward(OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, std::vector<storm::RationalFunction> const& vector, std::vector<storm::RationalFunction> const* b, std::vector<storm::RationalFunction>& result, std::vector<uint_fast64_t>* choices) const {
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "This operation is not supported.");
STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "This operation is not supported.");
}
#endif
@ -1618,96 +1619,111 @@ namespace storm {
#ifdef STORM_HAVE_CARL
template<>
void SparseMatrix<storm::RationalFunction>::multiplyAndReduceBackward(OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, std::vector<storm::RationalFunction> const& vector, std::vector<storm::RationalFunction> const* b, std::vector<storm::RationalFunction>& result, std::vector<uint_fast64_t>* choices) const {
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "This operation is not supported.");
STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "This operation is not supported.");
}
#endif
#ifdef STORM_HAVE_INTELTBB
template <typename ValueType>
class TbbMultAddReduceFunctor {
public:
typedef typename storm::storage::SparseMatrix<ValueType>::index_type index_type;
typedef typename storm::storage::SparseMatrix<ValueType>::value_type value_type;
typedef typename storm::storage::SparseMatrix<ValueType>::const_iterator const_iterator;
TbbMultAddReduceFunctor(OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, std::vector<MatrixEntry<index_type, value_type>> const& columnsAndEntries, std::vector<uint64_t> const& rowIndications, std::vector<ValueType> const& x, std::vector<ValueType>& result, std::vector<value_type> const* summand, std::vector<uint_fast64_t>* choices) : dir(dir), rowGroupIndices(rowGroupIndices), columnsAndEntries(columnsAndEntries), rowIndications(rowIndications), x(x), result(result), summand(summand), choices(choices) {
// Intentionally left empty.
}
void operator()(tbb::blocked_range<index_type> const& range) const {
auto groupIt = rowGroupIndices.begin() + range.begin();
auto groupIte = rowGroupIndices.begin() + range.end();
auto rowIt = rowIndications.begin() + *groupIt;
auto elementIt = columnsAndEntries.begin() + *rowIt;
typename std::vector<ValueType>::const_iterator summandIt;
if (summand) {
summandIt = summand->begin() + *groupIt;
}
typename std::vector<uint_fast64_t>::iterator choiceIt;
if (choices) {
choiceIt = choices->begin() + range.begin();
}
auto resultIt = result.begin() + range.begin();
for (; groupIt != groupIte; ++groupIt, ++resultIt, ++choiceIt) {
ValueType currentValue = summand ? *summandIt : storm::utility::zero<ValueType>();
for (auto elementIte = columnsAndEntries.begin() + *(rowIt + 1); elementIt != elementIte; ++elementIt) {
currentValue += elementIt->getValue() * x[elementIt->getColumn()];
}
if (choices) {
*choiceIt = 0;
}
++rowIt;
++summandIt;
for (; static_cast<uint_fast64_t>(std::distance(rowIndications.begin(), rowIt)) < *(groupIt + 1); ++rowIt, ++summandIt) {
ValueType newValue = summand ? *summandIt : storm::utility::zero<ValueType>();
for (auto elementIte = columnsAndEntries.begin() + *(rowIt + 1); elementIt != elementIte; ++elementIt) {
newValue += elementIt->getValue() * x[elementIt->getColumn()];
}
if ((dir == OptimizationDirection::Minimize && newValue < currentValue) || (dir == OptimizationDirection::Maximize && newValue > currentValue)) {
currentValue = newValue;
if (choices) {
*choiceIt = std::distance(rowIndications.begin(), rowIt) - *groupIt;
}
}
}
// Finally write value to target vector.
*resultIt = currentValue;
}
}
private:
OptimizationDirection dir;
std::vector<uint64_t> const& rowGroupIndices;
std::vector<MatrixEntry<index_type, value_type>> const& columnsAndEntries;
std::vector<uint64_t> const& rowIndications;
std::vector<ValueType> const& x;
std::vector<ValueType>& result;
std::vector<value_type> const* summand;
std::vector<uint_fast64_t>* choices;
};
template<typename ValueType>
void SparseMatrix<ValueType>::multiplyAndReduceParallel(OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, std::vector<ValueType> const& vector, std::vector<ValueType> const* summand, std::vector<ValueType>& result, std::vector<uint_fast64_t>* choices) const {
tbb::parallel_for(tbb::blocked_range<index_type>(0, rowGroupIndices.size() - 1, 10),
[&] (tbb::blocked_range<index_type> const& range) {
index_type startRowGroup = range.begin();
index_type endRowGroup = range.end();
auto rowGroupIt = rowGroupIndices.begin() + startRowGroup;
auto rowIt = rowIndications.begin() + startRowGroup;
auto elementIt = this->begin(*rowIt);
typename std::vector<ValueType>::const_iterator summandIt;
if (summand) {
summandIt = summand->begin();
}
typename std::vector<uint_fast64_t>::iterator choiceIt;
if (choices) {
choiceIt = choices->begin() + startRowGroup;
}
for (auto resultIt = result.begin() + startRowGroup, resultIte = result.begin() + endRow; resultIt != resultIte; ++resultIt, ++choiceIt, ++rowGroupIt) {
ValueType currentValue = summand ? *summandIt : storm::utility::zero<ValueType>();
for (auto elementIte = this->begin() + *(rowIt + 1); elementIt != elementIte; ++elementIt) {
currentValue += elementIt->getValue() * x[elementIt->getColumn()];
}
if (choices) {
*choicesIt = 0;
}
++rowIt;
++summandIt;
for (; *rowIt < *(rowGroupIt + 1); ++rowIt) {
ValueType newValue = summand ? *summandIt : storm::utility::zero<ValueType>();
for (auto elementIte = this->begin() + *(rowIt + 1); elementIt != elementIte; ++elementIt) {
newValue += elementIt->getValue() * x[elementIt->getColumn()];
}
if ((dir == OptimizationDirection::Minimize && newValue < currentValue) || (dir == OptimizationDirection::Maximize && newValue > currentValue)) {
currentValue = newValue;
if (choices) {
*choiceIt = std::distance(rowIndications.begin(), rowIt) - *rowGroupIt;
}
}
}
// Finally write value to target vector.
*resultIt = currentValue;
}
});
tbb::parallel_for(tbb::blocked_range<index_type>(0, rowGroupIndices.size() - 1, 10), TbbMultAddReduceFunctor<ValueType>(dir, rowGroupIndices, columnsAndValues, rowIndications, vector, result, summand, choices));
}
#ifdef STORM_HAVE_CARL
template<>
void SparseMatrix<storm::RationalFunction>::multiplyAndReduceParallel(OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, std::vector<storm::RationalFunction> const& vector, std::vector<storm::RationalFunction> const* summand, std::vector<storm::RationalFunction>& result, std::vector<uint_fast64_t>* choices) const {
STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "This operation is not supported.");
}
#endif
#endif
template<typename ValueType>
void SparseMatrix<ValueType>::multiplyAndReduce(OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, std::vector<ValueType> const& vector, std::vector<ValueType> const* summand, std::vector<ValueType>& result, std::vector<uint_fast64_t>* choices, bool allowAliasing, MultiplicationDirection const& multiplicationDirection) const {
void SparseMatrix<ValueType>::multiplyAndReduce(OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, std::vector<ValueType> const& vector, std::vector<ValueType> const* summand, std::vector<ValueType>& result, std::vector<uint_fast64_t>* choices) const {
// If the vector and the result are aliases and this is not set to be allowed, we need and temporary vector.
// If the vector and the result are aliases, we need and temporary vector.
std::vector<ValueType>* target;
std::vector<ValueType> temporary;
bool vectorsAliased = &vector == &result;
if (!allowAliasing && vectorsAliased) {
if (&vector == &result) {
STORM_LOG_WARN("Vectors are aliased but are not allowed to be. Using temporary, which is potentially slow.");
temporary = std::vector<ValueType>(vector.size());
target = &temporary;
STORM_LOG_WARN_COND(multiplicationDirection != MultiplicationDirection::DontCare, "Not specifying multiplication direction for aliased vectors may yield unexpected results.");
} else {
target = &result;
}
STORM_LOG_WARN_COND(vectorsAliased || multiplicationDirection == MultiplicationDirection::DontCare, "Setting a multiplication direction for unaliased vectors. Check whether this is intended.");
this->multiplyAndReduceForward(dir, rowGroupIndices, vector, summand, *target, choices);
#ifdef STORM_HAVE_INTELTBB
bool useParallel = !vectorsAliased && multiplicationDirection == MultiplicationDirection::DontCare && this->getNonzeroEntryCount() > 10000;
if (useParallel) {
multiplyAndReduceParallel(dir, rowGroupIndices, vector, summand, *target, choices);
} else {
#endif
if (multiplicationDirection == MultiplicationDirection::Forward || (multiplicationDirection == MultiplicationDirection::DontCare && !vectorsAliased)) {
multiplyAndReduceForward(dir, rowGroupIndices, vector, summand, *target, choices);
} else {
multiplyAndReduceBackward(dir, rowGroupIndices, vector, summand, *target, choices);
}
#ifdef STORM_HAVE_INTELTBB
}
#endif
if (target == &temporary) {
std::swap(temporary, result);
}

42
src/storm/storage/SparseMatrix.h

@ -776,23 +776,21 @@ namespace storm {
template<typename OtherValueType, typename ResultValueType = OtherValueType>
std::vector<ResultValueType> getPointwiseProductRowSumVector(storm::storage::SparseMatrix<OtherValueType> const& otherMatrix) const;
enum class MultiplicationDirection {
Forward, Backward, DontCare
};
/*!
* Multiplies the matrix with the given vector and writes the result to the given result vector.
*
* @param vector The vector with which to multiply the matrix.
* @param result The vector that is supposed to hold the result of the multiplication after the operation.
* @param summand If given, this summand will be added to the result of the multiplication.
* @param allowAliasing If set, the vector and result vector may be identical in which case the multiplication
* reuses the updated information in the multiplication (like gauss-seidel).
* @param multiplicationDirection The direction in which to perform the multiplication. If the vector and the
* result vector are aliased, the direction will make a difference as other values will be reused.
* @return The product of the matrix and the given vector as the content of the given result vector.
*/
void multiplyWithVector(std::vector<value_type> const& vector, std::vector<value_type>& result, std::vector<value_type> const* summand = nullptr, bool allowAliasing = false, MultiplicationDirection const& multiplicationDirection = MultiplicationDirection::DontCare) const;
void multiplyWithVector(std::vector<value_type> const& vector, std::vector<value_type>& result, std::vector<value_type> const* summand = nullptr) const;
void multiplyWithVectorForward(std::vector<value_type> const& vector, std::vector<value_type>& result, std::vector<value_type> const* summand = nullptr) const;
void multiplyWithVectorBackward(std::vector<value_type> const& vector, std::vector<value_type>& result, std::vector<value_type> const* summand = nullptr) const;
#ifdef STORM_HAVE_INTELTBB
void multiplyWithVectorParallel(std::vector<value_type> const& vector, std::vector<value_type>& result, std::vector<value_type> const* summand = nullptr) const;
#endif
/*!
* Multiplies the matrix with the given vector, reduces it according to the given direction and and writes
@ -804,13 +802,15 @@ namespace storm {
* @param summand If given, this summand will be added to the result of the multiplication.
* @param result The vector that is supposed to hold the result of the multiplication after the operation.
* @param choices If given, the choices made in the reduction process will be written to this vector.
* @param allowAliasing If set, the vector and result vector may be identical in which case the multiplication
* reuses the updated information in the multiplication (like gauss-seidel).
* @param multiplicationDirection The direction in which to perform the multiplication. If the vector and the
* result vector are aliased, the direction will make a difference as other values will be reused.
* @return The product of the matrix and the given vector as the content of the given result vector.
* @return The resulting vector the content of the given result vector.
*/
void multiplyAndReduce(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, std::vector<ValueType> const& vector, std::vector<ValueType> const* summand, std::vector<ValueType>& result, std::vector<uint_fast64_t>* choices, bool allowAliasing = false, MultiplicationDirection const& multiplicationDirection = MultiplicationDirection::DontCare) const;
void multiplyAndReduce(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, std::vector<ValueType> const& vector, std::vector<ValueType> const* summand, std::vector<ValueType>& result, std::vector<uint_fast64_t>* choices) const;
void multiplyAndReduceForward(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, std::vector<ValueType> const& vector, std::vector<ValueType> const* b, std::vector<ValueType>& result, std::vector<uint_fast64_t>* choices) const;
void multiplyAndReduceBackward(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, std::vector<ValueType> const& vector, std::vector<ValueType> const* b, std::vector<ValueType>& result, std::vector<uint_fast64_t>* choices) const;
#ifdef STORM_HAVE_INTELTBB
void multiplyAndReduceParallel(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, std::vector<ValueType> const& vector, std::vector<ValueType> const* b, std::vector<ValueType>& result, std::vector<uint_fast64_t>* choices) const;
#endif
/*!
* Multiplies a single row of the matrix with the given vector and returns the result
@ -1074,18 +1074,6 @@ namespace storm {
*/
SparseMatrix getSubmatrix(storm::storage::BitVector const& rowGroupConstraint, storm::storage::BitVector const& columnConstraint, std::vector<index_type> const& rowGroupIndices, bool insertDiagonalEntries = false) const;
void multiplyWithVectorForward(std::vector<value_type> const& vector, std::vector<value_type>& result, std::vector<value_type> const* summand = nullptr) const;
void multiplyWithVectorBackward(std::vector<value_type> const& vector, std::vector<value_type>& result, std::vector<value_type> const* summand = nullptr) const;
#ifdef STORM_HAVE_INTELTBB
void multiplyWithVectorParallel(std::vector<value_type> const& vector, std::vector<value_type>& result, std::vector<value_type> const* summand = nullptr) const;
#endif
void multiplyAndReduceForward(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, std::vector<ValueType> const& vector, std::vector<ValueType> const* b, std::vector<ValueType>& result, std::vector<uint_fast64_t>* choices) const;
void multiplyAndReduceBackward(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, std::vector<ValueType> const& vector, std::vector<ValueType> const* b, std::vector<ValueType>& result, std::vector<uint_fast64_t>* choices) const;
#ifdef STORM_HAVE_INTELTBB
void multiplyAndReduceParallel(storm::solver::OptimizationDirection const& dir, std::vector<uint64_t> const& rowGroupIndices, std::vector<ValueType> const& vector, std::vector<ValueType> const* b, std::vector<ValueType>& result, std::vector<uint_fast64_t>* choices) const;
#endif
// The number of rows of the matrix.
index_type rowCount;

57
src/storm/utility/VectorHelper.cpp

@ -0,0 +1,57 @@
#include "storm/utility/VectorHelper.h"
#include "storm/settings/SettingsManager.h"
#include "storm/settings/modules/CoreSettings.h"
#include "storm/adapters/RationalNumberAdapter.h"
#include "storm/adapters/RationalFunctionAdapter.h"
#include "storm-config.h"
#include "storm/utility/vector.h"
#include "storm/utility/macros.h"
#include "storm/exceptions/InvalidSettingsException.h"
#include "storm/exceptions/NotSupportedException.h"
namespace storm {
namespace utility {
template<typename ValueType>
VectorHelper<ValueType>::VectorHelper() : doParallelize(storm::settings::getModule<storm::settings::modules::CoreSettings>().isUseIntelTbbSet()) {
#ifndef STORM_HAVE_INTELTBB
STORM_LOG_THROW(!parallelize, storm::exceptions::InvalidSettingsException, "Cannot parallelize without TBB.");
#endif
}
template<typename ValueType>
bool VectorHelper<ValueType>::parallelize() const {
return doParallelize;
}
template<typename ValueType>
void VectorHelper<ValueType>::reduceVector(storm::solver::OptimizationDirection dir, std::vector<ValueType> const& source, std::vector<ValueType>& target, std::vector<uint_fast64_t> const& rowGrouping, std::vector<uint_fast64_t>* choices) const {
#ifdef STORM_HAVE_INTELTBB
if (this->parallelize()) {
storm::utility::vector::reduceVectorMinOrMaxParallel(dir, source, target, rowGrouping, choices);
} else {
storm::utility::vector::reduceVectorMinOrMax(dir, source, target, rowGrouping, choices);
}
#else
storm::utility::vector::reduceVectorMinOrMax(dir, source, target, rowGrouping, choices);
#endif
}
template<>
void VectorHelper<storm::RationalFunction>::reduceVector(storm::solver::OptimizationDirection dir, std::vector<storm::RationalFunction> const& source, std::vector<storm::RationalFunction>& target, std::vector<uint_fast64_t> const& rowGrouping, std::vector<uint_fast64_t>* choices) const {
STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "This operation is not supported.");
}
template class VectorHelper<double>;
#ifdef STORM_HAVE_CARL
template class VectorHelper<storm::RationalNumber>;
template class VectorHelper<storm::RationalFunction>;
#endif
}
}

25
src/storm/utility/VectorHelper.h

@ -0,0 +1,25 @@
#pragma once
#include <vector>
#include <cstdint>
#include "storm/solver/OptimizationDirection.h"
namespace storm {
namespace utility {
template<typename ValueType>
class VectorHelper {
public:
VectorHelper();
void reduceVector(storm::solver::OptimizationDirection dir, std::vector<ValueType> const& source, std::vector<ValueType>& target, std::vector<uint_fast64_t> const& rowGrouping, std::vector<uint_fast64_t>* choices = nullptr) const;
bool parallelize() const;
private:
// A flag that stores whether the parallelization to be used.
bool doParallelize;
};
}
}

198
src/storm/utility/vector.h

@ -309,7 +309,21 @@ namespace storm {
*/
template<class InValueType1, class InValueType2, class OutValueType, class Operation>
void applyPointwiseTernary(std::vector<InValueType1> const& firstOperand, std::vector<InValueType2> const& secondOperand, std::vector<OutValueType>& target, Operation f = Operation()) {
auto firstIt = firstOperand.begin();
auto firstIte = firstOperand.end();
auto secondIt = secondOperand.begin();
auto targetIt = target.begin();
while (firstIt != firstIte) {
*targetIt = f(*firstIt, *secondIt, *targetIt);
++targetIt;
++firstIt;
++secondIt;
}
}
#ifdef STORM_HAVE_INTELTBB
template<class InValueType1, class InValueType2, class OutValueType, class Operation>
void applyPointwiseTernaryParallel(std::vector<InValueType1> const& firstOperand, std::vector<InValueType2> const& secondOperand, std::vector<OutValueType>& target, Operation f = Operation()) {
tbb::parallel_for(tbb::blocked_range<uint_fast64_t>(0, target.size()),
[&](tbb::blocked_range<uint_fast64_t> const& range) {
auto firstIt = firstOperand.begin() + range.begin();
@ -323,19 +337,8 @@ namespace storm {
++secondIt;
}
});
#else
auto firstIt = firstOperand.begin();
auto firstIte = firstOperand.end();
auto secondIt = secondOperand.begin();
auto targetIt = target.begin();
while (firstIt != firstIte) {
*targetIt = f(*firstIt, *secondIt, *targetIt);
++targetIt;
++firstIt;
++secondIt;
}
#endif
}
#endif
/*!
* Applies the given operation pointwise on the two given vectors and writes the result to the third vector.
@ -347,15 +350,19 @@ namespace storm {
*/
template<class InValueType1, class InValueType2, class OutValueType, class Operation>
void applyPointwise(std::vector<InValueType1> const& firstOperand, std::vector<InValueType2> const& secondOperand, std::vector<OutValueType>& target, Operation f = Operation()) {
std::transform(firstOperand.begin(), firstOperand.end(), secondOperand.begin(), target.begin(), f);
}
#ifdef STORM_HAVE_INTELTBB
template<class InValueType1, class InValueType2, class OutValueType, class Operation>
void applyPointwiseParallel(std::vector<InValueType1> const& firstOperand, std::vector<InValueType2> const& secondOperand, std::vector<OutValueType>& target, Operation f = Operation()) {
tbb::parallel_for(tbb::blocked_range<uint_fast64_t>(0, target.size()),
[&](tbb::blocked_range<uint_fast64_t> const& range) {
std::transform(firstOperand.begin() + range.begin(), firstOperand.begin() + range.end(), secondOperand.begin() + range.begin(), target.begin() + range.begin(), f);
});
#else
std::transform(firstOperand.begin(), firstOperand.end(), secondOperand.begin(), target.begin(), f);
#endif
}
#endif
/*!
* Applies the given function pointwise on the given vector.
@ -366,15 +373,18 @@ namespace storm {
*/
template<class InValueType, class OutValueType, class Operation>
void applyPointwise(std::vector<InValueType> const& operand, std::vector<OutValueType>& target, Operation f = Operation()) {
std::transform(operand.begin(), operand.end(), target.begin(), f);
}
#ifdef STORM_HAVE_INTELTBB
template<class InValueType, class OutValueType, class Operation>
void applyPointwiseParallel(std::vector<InValueType> const& operand, std::vector<OutValueType>& target, Operation f = Operation()) {
tbb::parallel_for(tbb::blocked_range<uint_fast64_t>(0, target.size()),
[&](tbb::blocked_range<uint_fast64_t> const& range) {
std::transform(operand.begin() + range.begin(), operand.begin() + range.end(), target.begin() + range.begin(), f);
});
#else
std::transform(operand.begin(), operand.end(), target.begin(), f);
#endif
}
#endif
/*!
* Adds the two given vectors and writes the result to the target vector.
@ -593,6 +603,73 @@ namespace storm {
return current;
}
#ifdef STORM_HAVE_INTELTBB
template<class T, class Filter>
class TbbReduceVectorFunctor {
public:
TbbReduceVectorFunctor(std::vector<T> const& source, std::vector<T>& target, std::vector<uint_fast64_t> const& rowGrouping, std::vector<uint_fast64_t>* choices, Filter const& f) : source(source), target(target), rowGrouping(rowGrouping), choices(choices), f(f) {
// Intentionally left empty.
}
void operator()(tbb::blocked_range<uint64_t> const& range) const {
uint_fast64_t startRow = range.begin();
uint_fast64_t endRow = range.end();
typename std::vector<T>::iterator targetIt = target.begin() + startRow;
typename std::vector<T>::iterator targetIte = target.begin() + endRow;
typename std::vector<uint_fast64_t>::const_iterator rowGroupingIt = rowGrouping.begin() + startRow;
typename std::vector<T>::const_iterator sourceIt = source.begin() + *rowGroupingIt;
typename std::vector<T>::const_iterator sourceIte;
typename std::vector<uint_fast64_t>::iterator choiceIt;
uint_fast64_t localChoice;
if (choices != nullptr) {
choiceIt = choices->begin() + startRow;
}
for (; targetIt != targetIte; ++targetIt, ++rowGroupingIt) {
// Only do work if the row group is not empty.
if (*rowGroupingIt != *(rowGroupingIt + 1)) {
*targetIt = *sourceIt;
++sourceIt;
localChoice = 1;
if (choices != nullptr) {
*choiceIt = 0;
}
for (sourceIte = source.begin() + *(rowGroupingIt + 1); sourceIt != sourceIte; ++sourceIt, ++localChoice) {
if (f(*sourceIt, *targetIt)) {
*targetIt = *sourceIt;
if (choices != nullptr) {
*choiceIt = localChoice;
}
}
}
if (choices != nullptr) {
++choiceIt;
}
} else {
// Compensate for the 'wrong' move forward in the loop header.
--targetIt;
// Record dummy choice.
if (choices != nullptr) {
*choiceIt = 0;
++choiceIt;
}
}
}
}
private:
std::vector<T> const& source;
std::vector<T>& target;
std::vector<uint_fast64_t> const& rowGrouping;
std::vector<uint_fast64_t>* choices;
Filter const& f;
};
#endif
/*!
* Reduces the given source vector by selecting an element according to the given filter out of each row group.
*
@ -606,58 +683,6 @@ namespace storm {
template<class T, class Filter>
void reduceVector(std::vector<T> const& source, std::vector<T>& target, std::vector<uint_fast64_t> const& rowGrouping, std::vector<uint_fast64_t>* choices) {
Filter f;
#ifdef STORM_HAVE_INTELTBB
tbb::parallel_for(tbb::blocked_range<uint_fast64_t>(0, target.size()),
[&](tbb::blocked_range<uint_fast64_t> const& range) {
uint_fast64_t startRow = range.begin();
uint_fast64_t endRow = range.end();
typename std::vector<T>::iterator targetIt = target.begin() + startRow;
typename std::vector<T>::iterator targetIte = target.begin() + endRow;
typename std::vector<uint_fast64_t>::const_iterator rowGroupingIt = rowGrouping.begin() + startRow;
typename std::vector<T>::const_iterator sourceIt = source.begin() + *rowGroupingIt;
typename std::vector<T>::const_iterator sourceIte;
typename std::vector<uint_fast64_t>::iterator choiceIt;
uint_fast64_t localChoice;
if (choices != nullptr) {
choiceIt = choices->begin();
}
for (; targetIt != targetIte; ++targetIt, ++rowGroupingIt) {
// Only do work if the row group is not empty.
if (*rowGroupingIt != *(rowGroupingIt + 1)) {
*targetIt = *sourceIt;
++sourceIt;
localChoice = 1;
if (choices != nullptr) {
*choiceIt = 0;
}
for (sourceIte = source.begin() + *(rowGroupingIt + 1); sourceIt != sourceIte; ++sourceIt, ++localChoice) {
if (f(*sourceIt, *targetIt)) {
*targetIt = *sourceIt;
if (choices != nullptr) {
*choiceIt = localChoice;
}
}
}
if (choices != nullptr) {
++choiceIt;
}
} else {
// Compensate for the 'wrong' move forward in the loop header.
--targetIt;
// Record dummy choice.
if (choices != nullptr) {
*choiceIt = 0;
++choiceIt;
}
}
}
});
#else
typename std::vector<T>::iterator targetIt = target.begin();
typename std::vector<T>::iterator targetIte = target.end();
typename std::vector<uint_fast64_t>::const_iterator rowGroupingIt = rowGrouping.begin();
@ -700,10 +725,14 @@ namespace storm {
}
}
}
#endif
}
#ifdef STORM_HAVE_INTELTBB
template<class T, class Filter>
void reduceVectorParallel(std::vector<T> const& source, std::vector<T>& target, std::vector<uint_fast64_t> const& rowGrouping, std::vector<uint_fast64_t>* choices) {
tbb::parallel_for(tbb::blocked_range<uint64_t>(0, target.size()), TbbReduceVectorFunctor<T, Filter>(source, target, rowGrouping, choices, Filter()));
}
#endif
/*!
* Reduces the given source vector by selecting the smallest element out of each row group.
@ -718,6 +747,13 @@ namespace storm {
reduceVector<T, std::less<T>>(source, target, rowGrouping, choices);
}
#ifdef STORM_HAVE_INTELTBB
template<class T>
void reduceVectorMinParallel(std::vector<T> const& source, std::vector<T>& target, std::vector<uint_fast64_t> const& rowGrouping, std::vector<uint_fast64_t>* choices = nullptr) {
reduceVector<T, std::less<T>>(source, target, rowGrouping, choices);
}
#endif
/*!
* Reduces the given source vector by selecting the largest element out of each row group.
*
@ -731,6 +767,13 @@ namespace storm {
reduceVector<T, std::greater<T>>(source, target, rowGrouping, choices);
}
#ifdef STORM_HAVE_INTELTBB
template<class T>
void reduceVectorMaxParallel(std::vector<T> const& source, std::vector<T>& target, std::vector<uint_fast64_t> const& rowGrouping, std::vector<uint_fast64_t>* choices = nullptr) {
reduceVector<T, std::greater<T>>(source, target, rowGrouping, choices);
}
#endif
/*!
* Reduces the given source vector by selecting either the smallest or the largest out of each row group.
*
@ -749,6 +792,17 @@ namespace storm {
}
}
#ifdef STORM_HAVE_INTELTBB
template<class T>
void reduceVectorMinOrMaxParallel(storm::solver::OptimizationDirection dir, std::vector<T> const& source, std::vector<T>& target, std::vector<uint_fast64_t> const& rowGrouping, std::vector<uint_fast64_t>* choices = nullptr) {
if(dir == storm::solver::OptimizationDirection::Minimize) {
reduceVectorMinParallel(source, target, rowGrouping, choices);
} else {
reduceVectorMaxParallel(source, target, rowGrouping, choices);
}
}
#endif
/*!
* Compares the given elements and determines whether they are equal modulo the given precision. The provided flag
* additionaly specifies whether the error is computed in relative or absolute terms.
Loading…
Cancel
Save