You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

485 lines
14 KiB

  1. //g++ -O3 -g0 -DNDEBUG sparse_product.cpp -I.. -I/home/gael/Coding/LinearAlgebra/mtl4/ -DDENSITY=0.005 -DSIZE=10000 && ./a.out
  2. //g++ -O3 -g0 -DNDEBUG sparse_product.cpp -I.. -I/home/gael/Coding/LinearAlgebra/mtl4/ -DDENSITY=0.05 -DSIZE=2000 && ./a.out
  3. // -DNOGMM -DNOMTL -DCSPARSE
  4. // -I /home/gael/Coding/LinearAlgebra/CSparse/Include/ /home/gael/Coding/LinearAlgebra/CSparse/Lib/libcsparse.a
  5. #ifndef SIZE
  6. #define SIZE 100000
  7. #endif
  8. #ifndef NBPERROW
  9. #define NBPERROW 24
  10. #endif
  11. #ifndef REPEAT
  12. #define REPEAT 2
  13. #endif
  14. #ifndef NBTRIES
  15. #define NBTRIES 2
  16. #endif
  17. #ifndef KK
  18. #define KK 10
  19. #endif
  20. #ifndef NOGOOGLE
  21. #define EIGEN_GOOGLEHASH_SUPPORT
  22. #include <google/sparse_hash_map>
  23. #endif
  24. #include "BenchSparseUtil.h"
  25. #define CHECK_MEM
  26. // #define CHECK_MEM std/**/::cout << "check mem\n"; getchar();
  27. #define BENCH(X) \
  28. timer.reset(); \
  29. for (int _j=0; _j<NBTRIES; ++_j) { \
  30. timer.start(); \
  31. for (int _k=0; _k<REPEAT; ++_k) { \
  32. X \
  33. } timer.stop(); }
  34. typedef std::vector<Vector2i> Coordinates;
  35. typedef std::vector<float> Values;
  36. EIGEN_DONT_INLINE Scalar* setinnerrand_eigen(const Coordinates& coords, const Values& vals);
  37. EIGEN_DONT_INLINE Scalar* setrand_eigen_dynamic(const Coordinates& coords, const Values& vals);
  38. EIGEN_DONT_INLINE Scalar* setrand_eigen_compact(const Coordinates& coords, const Values& vals);
  39. EIGEN_DONT_INLINE Scalar* setrand_eigen_sumeq(const Coordinates& coords, const Values& vals);
  40. EIGEN_DONT_INLINE Scalar* setrand_eigen_gnu_hash(const Coordinates& coords, const Values& vals);
  41. EIGEN_DONT_INLINE Scalar* setrand_eigen_google_dense(const Coordinates& coords, const Values& vals);
  42. EIGEN_DONT_INLINE Scalar* setrand_eigen_google_sparse(const Coordinates& coords, const Values& vals);
  43. EIGEN_DONT_INLINE Scalar* setrand_scipy(const Coordinates& coords, const Values& vals);
  44. EIGEN_DONT_INLINE Scalar* setrand_ublas_mapped(const Coordinates& coords, const Values& vals);
  45. EIGEN_DONT_INLINE Scalar* setrand_ublas_coord(const Coordinates& coords, const Values& vals);
  46. EIGEN_DONT_INLINE Scalar* setrand_ublas_compressed(const Coordinates& coords, const Values& vals);
  47. EIGEN_DONT_INLINE Scalar* setrand_ublas_genvec(const Coordinates& coords, const Values& vals);
  48. EIGEN_DONT_INLINE Scalar* setrand_mtl(const Coordinates& coords, const Values& vals);
  49. int main(int argc, char *argv[])
  50. {
  51. int rows = SIZE;
  52. int cols = SIZE;
  53. bool fullyrand = true;
  54. BenchTimer timer;
  55. Coordinates coords;
  56. Values values;
  57. if(fullyrand)
  58. {
  59. Coordinates pool;
  60. pool.reserve(cols*NBPERROW);
  61. std::cerr << "fill pool" << "\n";
  62. for (int i=0; i<cols*NBPERROW; )
  63. {
  64. // DynamicSparseMatrix<int> stencil(SIZE,SIZE);
  65. Vector2i ij(internal::random<int>(0,rows-1),internal::random<int>(0,cols-1));
  66. // if(stencil.coeffRef(ij.x(), ij.y())==0)
  67. {
  68. // stencil.coeffRef(ij.x(), ij.y()) = 1;
  69. pool.push_back(ij);
  70. }
  71. ++i;
  72. }
  73. std::cerr << "pool ok" << "\n";
  74. int n = cols*NBPERROW*KK;
  75. coords.reserve(n);
  76. values.reserve(n);
  77. for (int i=0; i<n; ++i)
  78. {
  79. int i = internal::random<int>(0,pool.size());
  80. coords.push_back(pool[i]);
  81. values.push_back(internal::random<Scalar>());
  82. }
  83. }
  84. else
  85. {
  86. for (int j=0; j<cols; ++j)
  87. for (int i=0; i<NBPERROW; ++i)
  88. {
  89. coords.push_back(Vector2i(internal::random<int>(0,rows-1),j));
  90. values.push_back(internal::random<Scalar>());
  91. }
  92. }
  93. std::cout << "nnz = " << coords.size() << "\n";
  94. CHECK_MEM
  95. // dense matrices
  96. #ifdef DENSEMATRIX
  97. {
  98. BENCH(setrand_eigen_dense(coords,values);)
  99. std::cout << "Eigen Dense\t" << timer.value() << "\n";
  100. }
  101. #endif
  102. // eigen sparse matrices
  103. // if (!fullyrand)
  104. // {
  105. // BENCH(setinnerrand_eigen(coords,values);)
  106. // std::cout << "Eigen fillrand\t" << timer.value() << "\n";
  107. // }
  108. {
  109. BENCH(setrand_eigen_dynamic(coords,values);)
  110. std::cout << "Eigen dynamic\t" << timer.value() << "\n";
  111. }
  112. // {
  113. // BENCH(setrand_eigen_compact(coords,values);)
  114. // std::cout << "Eigen compact\t" << timer.value() << "\n";
  115. // }
  116. {
  117. BENCH(setrand_eigen_sumeq(coords,values);)
  118. std::cout << "Eigen sumeq\t" << timer.value() << "\n";
  119. }
  120. {
  121. // BENCH(setrand_eigen_gnu_hash(coords,values);)
  122. // std::cout << "Eigen std::map\t" << timer.value() << "\n";
  123. }
  124. {
  125. BENCH(setrand_scipy(coords,values);)
  126. std::cout << "scipy\t" << timer.value() << "\n";
  127. }
  128. #ifndef NOGOOGLE
  129. {
  130. BENCH(setrand_eigen_google_dense(coords,values);)
  131. std::cout << "Eigen google dense\t" << timer.value() << "\n";
  132. }
  133. {
  134. BENCH(setrand_eigen_google_sparse(coords,values);)
  135. std::cout << "Eigen google sparse\t" << timer.value() << "\n";
  136. }
  137. #endif
  138. #ifndef NOUBLAS
  139. {
  140. // BENCH(setrand_ublas_mapped(coords,values);)
  141. // std::cout << "ublas mapped\t" << timer.value() << "\n";
  142. }
  143. {
  144. BENCH(setrand_ublas_genvec(coords,values);)
  145. std::cout << "ublas vecofvec\t" << timer.value() << "\n";
  146. }
  147. /*{
  148. timer.reset();
  149. timer.start();
  150. for (int k=0; k<REPEAT; ++k)
  151. setrand_ublas_compressed(coords,values);
  152. timer.stop();
  153. std::cout << "ublas comp\t" << timer.value() << "\n";
  154. }
  155. {
  156. timer.reset();
  157. timer.start();
  158. for (int k=0; k<REPEAT; ++k)
  159. setrand_ublas_coord(coords,values);
  160. timer.stop();
  161. std::cout << "ublas coord\t" << timer.value() << "\n";
  162. }*/
  163. #endif
  164. // MTL4
  165. #ifndef NOMTL
  166. {
  167. BENCH(setrand_mtl(coords,values));
  168. std::cout << "MTL\t" << timer.value() << "\n";
  169. }
  170. #endif
  171. return 0;
  172. }
  173. EIGEN_DONT_INLINE Scalar* setinnerrand_eigen(const Coordinates& coords, const Values& vals)
  174. {
  175. using namespace StormEigen;
  176. SparseMatrix<Scalar> mat(SIZE,SIZE);
  177. //mat.startFill(2000000/*coords.size()*/);
  178. for (int i=0; i<coords.size(); ++i)
  179. {
  180. mat.insert(coords[i].x(), coords[i].y()) = vals[i];
  181. }
  182. mat.finalize();
  183. CHECK_MEM;
  184. return 0;
  185. }
  186. EIGEN_DONT_INLINE Scalar* setrand_eigen_dynamic(const Coordinates& coords, const Values& vals)
  187. {
  188. using namespace StormEigen;
  189. DynamicSparseMatrix<Scalar> mat(SIZE,SIZE);
  190. mat.reserve(coords.size()/10);
  191. for (int i=0; i<coords.size(); ++i)
  192. {
  193. mat.coeffRef(coords[i].x(), coords[i].y()) += vals[i];
  194. }
  195. mat.finalize();
  196. CHECK_MEM;
  197. return &mat.coeffRef(coords[0].x(), coords[0].y());
  198. }
  199. EIGEN_DONT_INLINE Scalar* setrand_eigen_sumeq(const Coordinates& coords, const Values& vals)
  200. {
  201. using namespace StormEigen;
  202. int n = coords.size()/KK;
  203. DynamicSparseMatrix<Scalar> mat(SIZE,SIZE);
  204. for (int j=0; j<KK; ++j)
  205. {
  206. DynamicSparseMatrix<Scalar> aux(SIZE,SIZE);
  207. mat.reserve(n);
  208. for (int i=j*n; i<(j+1)*n; ++i)
  209. {
  210. aux.insert(coords[i].x(), coords[i].y()) += vals[i];
  211. }
  212. aux.finalize();
  213. mat += aux;
  214. }
  215. return &mat.coeffRef(coords[0].x(), coords[0].y());
  216. }
  217. EIGEN_DONT_INLINE Scalar* setrand_eigen_compact(const Coordinates& coords, const Values& vals)
  218. {
  219. using namespace StormEigen;
  220. DynamicSparseMatrix<Scalar> setter(SIZE,SIZE);
  221. setter.reserve(coords.size()/10);
  222. for (int i=0; i<coords.size(); ++i)
  223. {
  224. setter.coeffRef(coords[i].x(), coords[i].y()) += vals[i];
  225. }
  226. SparseMatrix<Scalar> mat = setter;
  227. CHECK_MEM;
  228. return &mat.coeffRef(coords[0].x(), coords[0].y());
  229. }
  230. EIGEN_DONT_INLINE Scalar* setrand_eigen_gnu_hash(const Coordinates& coords, const Values& vals)
  231. {
  232. using namespace StormEigen;
  233. SparseMatrix<Scalar> mat(SIZE,SIZE);
  234. {
  235. RandomSetter<SparseMatrix<Scalar>, StdMapTraits > setter(mat);
  236. for (int i=0; i<coords.size(); ++i)
  237. {
  238. setter(coords[i].x(), coords[i].y()) += vals[i];
  239. }
  240. CHECK_MEM;
  241. }
  242. return &mat.coeffRef(coords[0].x(), coords[0].y());
  243. }
  244. #ifndef NOGOOGLE
  245. EIGEN_DONT_INLINE Scalar* setrand_eigen_google_dense(const Coordinates& coords, const Values& vals)
  246. {
  247. using namespace StormEigen;
  248. SparseMatrix<Scalar> mat(SIZE,SIZE);
  249. {
  250. RandomSetter<SparseMatrix<Scalar>, GoogleDenseHashMapTraits> setter(mat);
  251. for (int i=0; i<coords.size(); ++i)
  252. setter(coords[i].x(), coords[i].y()) += vals[i];
  253. CHECK_MEM;
  254. }
  255. return &mat.coeffRef(coords[0].x(), coords[0].y());
  256. }
  257. EIGEN_DONT_INLINE Scalar* setrand_eigen_google_sparse(const Coordinates& coords, const Values& vals)
  258. {
  259. using namespace StormEigen;
  260. SparseMatrix<Scalar> mat(SIZE,SIZE);
  261. {
  262. RandomSetter<SparseMatrix<Scalar>, GoogleSparseHashMapTraits> setter(mat);
  263. for (int i=0; i<coords.size(); ++i)
  264. setter(coords[i].x(), coords[i].y()) += vals[i];
  265. CHECK_MEM;
  266. }
  267. return &mat.coeffRef(coords[0].x(), coords[0].y());
  268. }
  269. #endif
  270. template <class T>
  271. void coo_tocsr(const int n_row,
  272. const int n_col,
  273. const int nnz,
  274. const Coordinates Aij,
  275. const Values Ax,
  276. int Bp[],
  277. int Bj[],
  278. T Bx[])
  279. {
  280. //compute number of non-zero entries per row of A coo_tocsr
  281. std::fill(Bp, Bp + n_row, 0);
  282. for (int n = 0; n < nnz; n++){
  283. Bp[Aij[n].x()]++;
  284. }
  285. //cumsum the nnz per row to get Bp[]
  286. for(int i = 0, cumsum = 0; i < n_row; i++){
  287. int temp = Bp[i];
  288. Bp[i] = cumsum;
  289. cumsum += temp;
  290. }
  291. Bp[n_row] = nnz;
  292. //write Aj,Ax into Bj,Bx
  293. for(int n = 0; n < nnz; n++){
  294. int row = Aij[n].x();
  295. int dest = Bp[row];
  296. Bj[dest] = Aij[n].y();
  297. Bx[dest] = Ax[n];
  298. Bp[row]++;
  299. }
  300. for(int i = 0, last = 0; i <= n_row; i++){
  301. int temp = Bp[i];
  302. Bp[i] = last;
  303. last = temp;
  304. }
  305. //now Bp,Bj,Bx form a CSR representation (with possible duplicates)
  306. }
  307. template< class T1, class T2 >
  308. bool kv_pair_less(const std::pair<T1,T2>& x, const std::pair<T1,T2>& y){
  309. return x.first < y.first;
  310. }
  311. template<class I, class T>
  312. void csr_sort_indices(const I n_row,
  313. const I Ap[],
  314. I Aj[],
  315. T Ax[])
  316. {
  317. std::vector< std::pair<I,T> > temp;
  318. for(I i = 0; i < n_row; i++){
  319. I row_start = Ap[i];
  320. I row_end = Ap[i+1];
  321. temp.clear();
  322. for(I jj = row_start; jj < row_end; jj++){
  323. temp.push_back(std::make_pair(Aj[jj],Ax[jj]));
  324. }
  325. std::sort(temp.begin(),temp.end(),kv_pair_less<I,T>);
  326. for(I jj = row_start, n = 0; jj < row_end; jj++, n++){
  327. Aj[jj] = temp[n].first;
  328. Ax[jj] = temp[n].second;
  329. }
  330. }
  331. }
  332. template <class I, class T>
  333. void csr_sum_duplicates(const I n_row,
  334. const I n_col,
  335. I Ap[],
  336. I Aj[],
  337. T Ax[])
  338. {
  339. I nnz = 0;
  340. I row_end = 0;
  341. for(I i = 0; i < n_row; i++){
  342. I jj = row_end;
  343. row_end = Ap[i+1];
  344. while( jj < row_end ){
  345. I j = Aj[jj];
  346. T x = Ax[jj];
  347. jj++;
  348. while( jj < row_end && Aj[jj] == j ){
  349. x += Ax[jj];
  350. jj++;
  351. }
  352. Aj[nnz] = j;
  353. Ax[nnz] = x;
  354. nnz++;
  355. }
  356. Ap[i+1] = nnz;
  357. }
  358. }
  359. EIGEN_DONT_INLINE Scalar* setrand_scipy(const Coordinates& coords, const Values& vals)
  360. {
  361. using namespace StormEigen;
  362. SparseMatrix<Scalar> mat(SIZE,SIZE);
  363. mat.resizeNonZeros(coords.size());
  364. // std::cerr << "setrand_scipy...\n";
  365. coo_tocsr<Scalar>(SIZE,SIZE, coords.size(), coords, vals, mat._outerIndexPtr(), mat._innerIndexPtr(), mat._valuePtr());
  366. // std::cerr << "coo_tocsr ok\n";
  367. csr_sort_indices(SIZE, mat._outerIndexPtr(), mat._innerIndexPtr(), mat._valuePtr());
  368. csr_sum_duplicates(SIZE, SIZE, mat._outerIndexPtr(), mat._innerIndexPtr(), mat._valuePtr());
  369. mat.resizeNonZeros(mat._outerIndexPtr()[SIZE]);
  370. return &mat.coeffRef(coords[0].x(), coords[0].y());
  371. }
  372. #ifndef NOUBLAS
  373. EIGEN_DONT_INLINE Scalar* setrand_ublas_mapped(const Coordinates& coords, const Values& vals)
  374. {
  375. using namespace boost;
  376. using namespace boost::numeric;
  377. using namespace boost::numeric::ublas;
  378. mapped_matrix<Scalar> aux(SIZE,SIZE);
  379. for (int i=0; i<coords.size(); ++i)
  380. {
  381. aux(coords[i].x(), coords[i].y()) += vals[i];
  382. }
  383. CHECK_MEM;
  384. compressed_matrix<Scalar> mat(aux);
  385. return 0;// &mat(coords[0].x(), coords[0].y());
  386. }
  387. /*EIGEN_DONT_INLINE Scalar* setrand_ublas_coord(const Coordinates& coords, const Values& vals)
  388. {
  389. using namespace boost;
  390. using namespace boost::numeric;
  391. using namespace boost::numeric::ublas;
  392. coordinate_matrix<Scalar> aux(SIZE,SIZE);
  393. for (int i=0; i<coords.size(); ++i)
  394. {
  395. aux(coords[i].x(), coords[i].y()) = vals[i];
  396. }
  397. compressed_matrix<Scalar> mat(aux);
  398. return 0;//&mat(coords[0].x(), coords[0].y());
  399. }
  400. EIGEN_DONT_INLINE Scalar* setrand_ublas_compressed(const Coordinates& coords, const Values& vals)
  401. {
  402. using namespace boost;
  403. using namespace boost::numeric;
  404. using namespace boost::numeric::ublas;
  405. compressed_matrix<Scalar> mat(SIZE,SIZE);
  406. for (int i=0; i<coords.size(); ++i)
  407. {
  408. mat(coords[i].x(), coords[i].y()) = vals[i];
  409. }
  410. return 0;//&mat(coords[0].x(), coords[0].y());
  411. }*/
  412. EIGEN_DONT_INLINE Scalar* setrand_ublas_genvec(const Coordinates& coords, const Values& vals)
  413. {
  414. using namespace boost;
  415. using namespace boost::numeric;
  416. using namespace boost::numeric::ublas;
  417. // ublas::vector<coordinate_vector<Scalar> > foo;
  418. generalized_vector_of_vector<Scalar, row_major, ublas::vector<coordinate_vector<Scalar> > > aux(SIZE,SIZE);
  419. for (int i=0; i<coords.size(); ++i)
  420. {
  421. aux(coords[i].x(), coords[i].y()) += vals[i];
  422. }
  423. CHECK_MEM;
  424. compressed_matrix<Scalar,row_major> mat(aux);
  425. return 0;//&mat(coords[0].x(), coords[0].y());
  426. }
  427. #endif
  428. #ifndef NOMTL
  429. EIGEN_DONT_INLINE void setrand_mtl(const Coordinates& coords, const Values& vals);
  430. #endif