You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

806 lines
29 KiB

  1. /* -*- c++ -*- (enables emacs c++ mode) */
  2. /*===========================================================================
  3. Copyright (C) 2002-2012 Yves Renard
  4. This file is a part of GETFEM++
  5. Getfem++ is free software; you can redistribute it and/or modify it
  6. under the terms of the GNU Lesser General Public License as published
  7. by the Free Software Foundation; either version 3 of the License, or
  8. (at your option) any later version along with the GCC Runtime Library
  9. Exception either version 3.1 or (at your option) any later version.
  10. This program is distributed in the hope that it will be useful, but
  11. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  12. or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
  13. License and GCC Runtime Library Exception for more details.
  14. You should have received a copy of the GNU Lesser General Public License
  15. along with this program; if not, write to the Free Software Foundation,
  16. Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
  17. As a special exception, you may use this file as it is a part of a free
  18. software library without restriction. Specifically, if other files
  19. instantiate templates or use macros or inline functions from this file,
  20. or you compile this file and link it with other files to produce an
  21. executable, this file does not by itself cause the resulting executable
  22. to be covered by the GNU Lesser General Public License. This exception
  23. does not however invalidate any other reasons why the executable file
  24. might be covered by the GNU Lesser General Public License.
  25. ===========================================================================*/
  26. /**@file gmm_solver_Schwarz_additive.h
  27. @author Yves Renard <Yves.Renard@insa-lyon.fr>
  28. @author Michel Fournie <fournie@mip.ups-tlse.fr>
  29. @date October 13, 2002.
  30. */
  31. #ifndef GMM_SOLVERS_SCHWARZ_ADDITIVE_H__
  32. #define GMM_SOLVERS_SCHWARZ_ADDITIVE_H__
  33. #include "gmm_kernel.h"
  34. #include "gmm_superlu_interface.h"
  35. #include "gmm_solver_cg.h"
  36. #include "gmm_solver_gmres.h"
  37. #include "gmm_solver_bicgstab.h"
  38. #include "gmm_solver_qmr.h"
  39. namespace gmm {
  40. /* ******************************************************************** */
  41. /* Additive Schwarz interfaced local solvers */
  42. /* ******************************************************************** */
  43. struct using_cg {};
  44. struct using_gmres {};
  45. struct using_bicgstab {};
  46. struct using_qmr {};
  47. template <typename P, typename local_solver, typename Matrix>
  48. struct actual_precond {
  49. typedef P APrecond;
  50. static const APrecond &transform(const P &PP) { return PP; }
  51. };
  52. template <typename Matrix1, typename Precond, typename Vector>
  53. void AS_local_solve(using_cg, const Matrix1 &A, Vector &x, const Vector &b,
  54. const Precond &P, iteration &iter)
  55. { cg(A, x, b, P, iter); }
  56. template <typename Matrix1, typename Precond, typename Vector>
  57. void AS_local_solve(using_gmres, const Matrix1 &A, Vector &x,
  58. const Vector &b, const Precond &P, iteration &iter)
  59. { gmres(A, x, b, P, 100, iter); }
  60. template <typename Matrix1, typename Precond, typename Vector>
  61. void AS_local_solve(using_bicgstab, const Matrix1 &A, Vector &x,
  62. const Vector &b, const Precond &P, iteration &iter)
  63. { bicgstab(A, x, b, P, iter); }
  64. template <typename Matrix1, typename Precond, typename Vector>
  65. void AS_local_solve(using_qmr, const Matrix1 &A, Vector &x,
  66. const Vector &b, const Precond &P, iteration &iter)
  67. { qmr(A, x, b, P, iter); }
  68. #if defined(GMM_USES_SUPERLU)
  69. struct using_superlu {};
  70. template <typename P, typename Matrix>
  71. struct actual_precond<P, using_superlu, Matrix> {
  72. typedef typename linalg_traits<Matrix>::value_type value_type;
  73. typedef SuperLU_factor<value_type> APrecond;
  74. template <typename PR>
  75. static APrecond transform(const PR &) { return APrecond(); }
  76. static const APrecond &transform(const APrecond &PP) { return PP; }
  77. };
  78. template <typename Matrix1, typename Precond, typename Vector>
  79. void AS_local_solve(using_superlu, const Matrix1 &, Vector &x,
  80. const Vector &b, const Precond &P, iteration &iter)
  81. { P.solve(x, b); iter.set_iteration(1); }
  82. #endif
  83. /* ******************************************************************** */
  84. /* Additive Schwarz Linear system */
  85. /* ******************************************************************** */
  86. template <typename Matrix1, typename Matrix2, typename Precond,
  87. typename local_solver>
  88. struct add_schwarz_mat{
  89. typedef typename linalg_traits<Matrix1>::value_type value_type;
  90. const Matrix1 *A;
  91. const std::vector<Matrix2> *vB;
  92. std::vector<Matrix2> vAloc;
  93. mutable iteration iter;
  94. double residual;
  95. mutable size_type itebilan;
  96. mutable std::vector<std::vector<value_type> > gi, fi;
  97. std::vector<typename actual_precond<Precond, local_solver,
  98. Matrix1>::APrecond> precond1;
  99. void init(const Matrix1 &A_, const std::vector<Matrix2> &vB_,
  100. iteration iter_, const Precond &P, double residual_);
  101. add_schwarz_mat(void) {}
  102. add_schwarz_mat(const Matrix1 &A_, const std::vector<Matrix2> &vB_,
  103. iteration iter_, const Precond &P, double residual_)
  104. { init(A_, vB_, iter_, P, residual_); }
  105. };
  106. template <typename Matrix1, typename Matrix2, typename Precond,
  107. typename local_solver>
  108. void add_schwarz_mat<Matrix1, Matrix2, Precond, local_solver>::init(
  109. const Matrix1 &A_, const std::vector<Matrix2> &vB_,
  110. iteration iter_, const Precond &P, double residual_) {
  111. vB = &vB_; A = &A_; iter = iter_;
  112. residual = residual_;
  113. size_type nb_sub = vB->size();
  114. vAloc.resize(nb_sub);
  115. gi.resize(nb_sub); fi.resize(nb_sub);
  116. precond1.resize(nb_sub);
  117. std::fill(precond1.begin(), precond1.end(),
  118. actual_precond<Precond, local_solver, Matrix1>::transform(P));
  119. itebilan = 0;
  120. if (iter.get_noisy()) cout << "Init pour sub dom ";
  121. #ifdef GMM_USES_MPI
  122. int size,tranche,borne_sup,borne_inf,rank,tag1=11,tag2=12,tag3=13,sizepr = 0;
  123. // int tab[4];
  124. double t_ref,t_final;
  125. MPI_Status status;
  126. t_ref=MPI_Wtime();
  127. MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  128. MPI_Comm_size(MPI_COMM_WORLD, &size);
  129. tranche=nb_sub/size;
  130. borne_inf=rank*tranche;
  131. borne_sup=(rank+1)*tranche;
  132. // if (rank==size-1) borne_sup = nb_sub;
  133. cout << "Nombre de sous domaines " << borne_sup - borne_inf << endl;
  134. int sizeA = mat_nrows(*A);
  135. gmm::csr_matrix<value_type> Acsr(sizeA, sizeA), Acsrtemp(sizeA, sizeA);
  136. gmm::copy(gmm::eff_matrix(*A), Acsr);
  137. int next = (rank + 1) % size;
  138. int previous = (rank + size - 1) % size;
  139. //communication of local information on ring pattern
  140. //Each process receive Nproc-1 contributions
  141. for (int nproc = 0; nproc < size; ++nproc) {
  142. for (size_type i = size_type(borne_inf); i < size_type(borne_sup); ++i) {
  143. // for (size_type i = 0; i < nb_sub/size; ++i) {
  144. // for (size_type i = 0; i < nb_sub; ++i) {
  145. // size_type i=(rank+size*(j-1)+nb_sub)%nb_sub;
  146. cout << "Sous domaines " << i << " : " << mat_ncols((*vB)[i]) << endl;
  147. #else
  148. for (size_type i = 0; i < nb_sub; ++i) {
  149. #endif
  150. if (iter.get_noisy()) cout << i << " " << std::flush;
  151. Matrix2 Maux(mat_ncols((*vB)[i]), mat_nrows((*vB)[i]));
  152. #ifdef GMM_USES_MPI
  153. Matrix2 Maux2(mat_ncols((*vB)[i]), mat_ncols((*vB)[i]));
  154. if (nproc == 0) {
  155. gmm::resize(vAloc[i], mat_ncols((*vB)[i]), mat_ncols((*vB)[i]));
  156. gmm::clear(vAloc[i]);
  157. }
  158. gmm::mult(gmm::transposed((*vB)[i]), Acsr, Maux);
  159. gmm::mult(Maux, (*vB)[i], Maux2);
  160. gmm::add(Maux2, vAloc[i]);
  161. #else
  162. gmm::resize(vAloc[i], mat_ncols((*vB)[i]), mat_ncols((*vB)[i]));
  163. gmm::mult(gmm::transposed((*vB)[i]), *A, Maux);
  164. gmm::mult(Maux, (*vB)[i], vAloc[i]);
  165. #endif
  166. #ifdef GMM_USES_MPI
  167. if (nproc == size - 1 ) {
  168. #endif
  169. precond1[i].build_with(vAloc[i]);
  170. gmm::resize(fi[i], mat_ncols((*vB)[i]));
  171. gmm::resize(gi[i], mat_ncols((*vB)[i]));
  172. #ifdef GMM_USES_MPI
  173. }
  174. #else
  175. }
  176. #endif
  177. #ifdef GMM_USES_MPI
  178. }
  179. if (nproc != size - 1) {
  180. MPI_Sendrecv(Acsr.jc, sizeA+1, MPI_INT, next, tag2,
  181. Acsrtemp.jc, sizeA+1,MPI_INT,previous,tag2,
  182. MPI_COMM_WORLD,&status);
  183. if (Acsrtemp.jc[sizeA] > size_type(sizepr)) {
  184. sizepr = Acsrtemp.jc[sizeA];
  185. delete[] Acsrtemp.pr; delete[] Acsrtemp.ir;
  186. Acsrtemp.pr = new value_type[sizepr];
  187. Acsrtemp.ir = new unsigned int[sizepr];
  188. }
  189. MPI_Sendrecv(Acsr.ir, Acsr.jc[sizeA], MPI_INT, next, tag1,
  190. Acsrtemp.ir, Acsrtemp.jc[sizeA],MPI_INT,previous,tag1,
  191. MPI_COMM_WORLD,&status);
  192. MPI_Sendrecv(Acsr.pr, Acsr.jc[sizeA], mpi_type(value_type()), next, tag3,
  193. Acsrtemp.pr, Acsrtemp.jc[sizeA],mpi_type(value_type()),previous,tag3,
  194. MPI_COMM_WORLD,&status);
  195. gmm::copy(Acsrtemp, Acsr);
  196. }
  197. }
  198. t_final=MPI_Wtime();
  199. cout<<"temps boucle precond "<< t_final-t_ref<<endl;
  200. #endif
  201. if (iter.get_noisy()) cout << "\n";
  202. }
  203. template <typename Matrix1, typename Matrix2, typename Precond,
  204. typename Vector2, typename Vector3, typename local_solver>
  205. void mult(const add_schwarz_mat<Matrix1, Matrix2, Precond, local_solver> &M,
  206. const Vector2 &p, Vector3 &q) {
  207. size_type itebilan = 0;
  208. #ifdef GMM_USES_MPI
  209. static double tmult_tot = 0.0;
  210. double t_ref = MPI_Wtime();
  211. #endif
  212. // cout << "tmult AS begin " << endl;
  213. mult(*(M.A), p, q);
  214. #ifdef GMM_USES_MPI
  215. tmult_tot += MPI_Wtime()-t_ref;
  216. cout << "tmult_tot = " << tmult_tot << endl;
  217. #endif
  218. std::vector<double> qbis(gmm::vect_size(q));
  219. std::vector<double> qter(gmm::vect_size(q));
  220. #ifdef GMM_USES_MPI
  221. // MPI_Status status;
  222. // MPI_Request request,request1;
  223. // int tag=111;
  224. int size,tranche,borne_sup,borne_inf,rank;
  225. size_type nb_sub=M.fi.size();
  226. MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  227. MPI_Comm_size(MPI_COMM_WORLD, &size);
  228. tranche=nb_sub/size;
  229. borne_inf=rank*tranche;
  230. borne_sup=(rank+1)*tranche;
  231. // if (rank==size-1) borne_sup=nb_sub;
  232. // int next = (rank + 1) % size;
  233. // int previous = (rank + size - 1) % size;
  234. t_ref = MPI_Wtime();
  235. for (size_type i = size_type(borne_inf); i < size_type(borne_sup); ++i)
  236. // for (size_type i = 0; i < nb_sub/size; ++i)
  237. // for (size_type j = 0; j < nb_sub; ++j)
  238. #else
  239. for (size_type i = 0; i < M.fi.size(); ++i)
  240. #endif
  241. {
  242. #ifdef GMM_USES_MPI
  243. // size_type i=j; // (rank+size*(j-1)+nb_sub)%nb_sub;
  244. #endif
  245. gmm::mult(gmm::transposed((*(M.vB))[i]), q, M.fi[i]);
  246. M.iter.init();
  247. AS_local_solve(local_solver(), (M.vAloc)[i], (M.gi)[i],
  248. (M.fi)[i],(M.precond1)[i],M.iter);
  249. itebilan = std::max(itebilan, M.iter.get_iteration());
  250. }
  251. #ifdef GMM_USES_MPI
  252. cout << "First AS loop time " << MPI_Wtime() - t_ref << endl;
  253. #endif
  254. gmm::clear(q);
  255. #ifdef GMM_USES_MPI
  256. t_ref = MPI_Wtime();
  257. // for (size_type j = 0; j < nb_sub; ++j)
  258. for (size_type i = size_type(borne_inf); i < size_type(borne_sup); ++i)
  259. #else
  260. for (size_type i = 0; i < M.gi.size(); ++i)
  261. #endif
  262. {
  263. #ifdef GMM_USES_MPI
  264. // size_type i=j; // (rank+size*(j-1)+nb_sub)%nb_sub;
  265. // gmm::mult((*(M.vB))[i], M.gi[i], qbis,qbis);
  266. gmm::mult((*(M.vB))[i], M.gi[i], qter);
  267. add(qter,qbis,qbis);
  268. #else
  269. gmm::mult((*(M.vB))[i], M.gi[i], q, q);
  270. #endif
  271. }
  272. #ifdef GMM_USES_MPI
  273. //WARNING this add only if you use the ring pattern below
  274. // need to do this below if using a n explicit ring pattern communication
  275. // add(qbis,q,q);
  276. cout << "Second AS loop time " << MPI_Wtime() - t_ref << endl;
  277. #endif
  278. #ifdef GMM_USES_MPI
  279. // int tag1=11;
  280. static double t_tot = 0.0;
  281. double t_final;
  282. t_ref=MPI_Wtime();
  283. // int next = (rank + 1) % size;
  284. // int previous = (rank + size - 1) % size;
  285. //communication of local information on ring pattern
  286. //Each process receive Nproc-1 contributions
  287. // if (size > 1) {
  288. // for (int nproc = 0; nproc < size-1; ++nproc)
  289. // {
  290. // MPI_Sendrecv(&(qbis[0]), gmm::vect_size(q), MPI_DOUBLE, next, tag1,
  291. // &(qter[0]), gmm::vect_size(q),MPI_DOUBLE,previous,tag1,
  292. // MPI_COMM_WORLD,&status);
  293. // gmm::copy(qter, qbis);
  294. // add(qbis,q,q);
  295. // }
  296. // }
  297. MPI_Allreduce(&(qbis[0]), &(q[0]),gmm::vect_size(q), MPI_DOUBLE,
  298. MPI_SUM,MPI_COMM_WORLD);
  299. t_final=MPI_Wtime();
  300. t_tot += t_final-t_ref;
  301. cout<<"["<< rank<<"] temps reduce Resol "<< t_final-t_ref << " t_tot = " << t_tot << endl;
  302. #endif
  303. if (M.iter.get_noisy() > 0) cout << "itebloc = " << itebilan << endl;
  304. M.itebilan += itebilan;
  305. M.iter.set_resmax((M.iter.get_resmax() + M.residual) * 0.5);
  306. }
  307. template <typename Matrix1, typename Matrix2, typename Precond,
  308. typename Vector2, typename Vector3, typename local_solver>
  309. void mult(const add_schwarz_mat<Matrix1, Matrix2, Precond, local_solver> &M,
  310. const Vector2 &p, const Vector3 &q) {
  311. mult(M, p, const_cast<Vector3 &>(q));
  312. }
  313. template <typename Matrix1, typename Matrix2, typename Precond,
  314. typename Vector2, typename Vector3, typename Vector4,
  315. typename local_solver>
  316. void mult(const add_schwarz_mat<Matrix1, Matrix2, Precond, local_solver> &M,
  317. const Vector2 &p, const Vector3 &p2, Vector4 &q)
  318. { mult(M, p, q); add(p2, q); }
  319. template <typename Matrix1, typename Matrix2, typename Precond,
  320. typename Vector2, typename Vector3, typename Vector4,
  321. typename local_solver>
  322. void mult(const add_schwarz_mat<Matrix1, Matrix2, Precond, local_solver> &M,
  323. const Vector2 &p, const Vector3 &p2, const Vector4 &q)
  324. { mult(M, p, const_cast<Vector4 &>(q)); add(p2, q); }
  325. /* ******************************************************************** */
  326. /* Additive Schwarz interfaced global solvers */
  327. /* ******************************************************************** */
  328. template <typename ASM_type, typename Vect>
  329. void AS_global_solve(using_cg, const ASM_type &ASM, Vect &x,
  330. const Vect &b, iteration &iter)
  331. { cg(ASM, x, b, *(ASM.A), identity_matrix(), iter); }
  332. template <typename ASM_type, typename Vect>
  333. void AS_global_solve(using_gmres, const ASM_type &ASM, Vect &x,
  334. const Vect &b, iteration &iter)
  335. { gmres(ASM, x, b, identity_matrix(), 100, iter); }
  336. template <typename ASM_type, typename Vect>
  337. void AS_global_solve(using_bicgstab, const ASM_type &ASM, Vect &x,
  338. const Vect &b, iteration &iter)
  339. { bicgstab(ASM, x, b, identity_matrix(), iter); }
  340. template <typename ASM_type, typename Vect>
  341. void AS_global_solve(using_qmr,const ASM_type &ASM, Vect &x,
  342. const Vect &b, iteration &iter)
  343. { qmr(ASM, x, b, identity_matrix(), iter); }
  344. #if defined(GMM_USES_SUPERLU)
  345. template <typename ASM_type, typename Vect>
  346. void AS_global_solve(using_superlu, const ASM_type &, Vect &,
  347. const Vect &, iteration &) {
  348. GMM_ASSERT1(false, "You cannot use SuperLU as "
  349. "global solver in additive Schwarz meethod");
  350. }
  351. #endif
  352. /* ******************************************************************** */
  353. /* Linear Additive Schwarz method */
  354. /* ******************************************************************** */
  355. /* ref : Domain decomposition algorithms for the p-version finite */
  356. /* element method for elliptic problems, Luca F. Pavarino, */
  357. /* PhD thesis, Courant Institute of Mathematical Sciences, 1992. */
  358. /* ******************************************************************** */
  359. /** Function to call if the ASM matrix is precomputed for successive solve
  360. * with the same system.
  361. */
  362. template <typename Matrix1, typename Matrix2,
  363. typename Vector2, typename Vector3, typename Precond,
  364. typename local_solver, typename global_solver>
  365. void additive_schwarz(
  366. add_schwarz_mat<Matrix1, Matrix2, Precond, local_solver> &ASM, Vector3 &u,
  367. const Vector2 &f, iteration &iter, const global_solver&) {
  368. typedef typename linalg_traits<Matrix1>::value_type value_type;
  369. size_type nb_sub = ASM.vB->size(), nb_dof = gmm::vect_size(f);
  370. ASM.itebilan = 0;
  371. std::vector<value_type> g(nb_dof);
  372. std::vector<value_type> gbis(nb_dof);
  373. #ifdef GMM_USES_MPI
  374. double t_init=MPI_Wtime();
  375. int size,tranche,borne_sup,borne_inf,rank;
  376. MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  377. MPI_Comm_size(MPI_COMM_WORLD, &size);
  378. tranche=nb_sub/size;
  379. borne_inf=rank*tranche;
  380. borne_sup=(rank+1)*tranche;
  381. // if (rank==size-1) borne_sup=nb_sub*size;
  382. for (size_type i = size_type(borne_inf); i < size_type(borne_sup); ++i)
  383. // for (size_type i = 0; i < nb_sub/size; ++i)
  384. // for (size_type j = 0; j < nb_sub; ++j)
  385. // for (size_type i = rank; i < nb_sub; i+=size)
  386. #else
  387. for (size_type i = 0; i < nb_sub; ++i)
  388. #endif
  389. {
  390. #ifdef GMM_USES_MPI
  391. // size_type i=j; // (rank+size*(j-1)+nb_sub)%nb_sub;
  392. #endif
  393. gmm::mult(gmm::transposed((*(ASM.vB))[i]), f, ASM.fi[i]);
  394. ASM.iter.init();
  395. AS_local_solve(local_solver(), ASM.vAloc[i], ASM.gi[i], ASM.fi[i],
  396. ASM.precond1[i], ASM.iter);
  397. ASM.itebilan = std::max(ASM.itebilan, ASM.iter.get_iteration());
  398. #ifdef GMM_USES_MPI
  399. gmm::mult((*(ASM.vB))[i], ASM.gi[i], gbis,gbis);
  400. #else
  401. gmm::mult((*(ASM.vB))[i], ASM.gi[i], g, g);
  402. #endif
  403. }
  404. #ifdef GMM_USES_MPI
  405. cout<<"temps boucle init "<< MPI_Wtime()-t_init<<endl;
  406. double t_ref,t_final;
  407. t_ref=MPI_Wtime();
  408. MPI_Allreduce(&(gbis[0]), &(g[0]),gmm::vect_size(g), MPI_DOUBLE,
  409. MPI_SUM,MPI_COMM_WORLD);
  410. t_final=MPI_Wtime();
  411. cout<<"temps reduce init "<< t_final-t_ref<<endl;
  412. #endif
  413. #ifdef GMM_USES_MPI
  414. t_ref=MPI_Wtime();
  415. cout<<"begin global AS"<<endl;
  416. #endif
  417. AS_global_solve(global_solver(), ASM, u, g, iter);
  418. #ifdef GMM_USES_MPI
  419. t_final=MPI_Wtime();
  420. cout<<"temps AS Global Solve "<< t_final-t_ref<<endl;
  421. #endif
  422. if (iter.get_noisy())
  423. cout << "Total number of internal iterations : " << ASM.itebilan << endl;
  424. }
  425. /** Global function. Compute the ASM matrix and call the previous function.
  426. * The ASM matrix represent the preconditionned linear system.
  427. */
  428. template <typename Matrix1, typename Matrix2,
  429. typename Vector2, typename Vector3, typename Precond,
  430. typename local_solver, typename global_solver>
  431. void additive_schwarz(const Matrix1 &A, Vector3 &u,
  432. const Vector2 &f, const Precond &P,
  433. const std::vector<Matrix2> &vB,
  434. iteration &iter, local_solver,
  435. global_solver) {
  436. iter.set_rhsnorm(vect_norm2(f));
  437. if (iter.get_rhsnorm() == 0.0) { gmm::clear(u); return; }
  438. iteration iter2 = iter; iter2.reduce_noisy();
  439. iter2.set_maxiter(size_type(-1));
  440. add_schwarz_mat<Matrix1, Matrix2, Precond, local_solver>
  441. ASM(A, vB, iter2, P, iter.get_resmax());
  442. additive_schwarz(ASM, u, f, iter, global_solver());
  443. }
  444. /* ******************************************************************** */
  445. /* Sequential Non-Linear Additive Schwarz method */
  446. /* ******************************************************************** */
  447. /* ref : Nonlinearly Preconditionned Inexact Newton Algorithms, */
  448. /* Xiao-Chuan Cai, David E. Keyes, */
  449. /* SIAM J. Sci. Comp. 24: p183-200. l */
  450. /* ******************************************************************** */
  451. template <typename Matrixt, typename MatrixBi>
  452. class NewtonAS_struct {
  453. public :
  454. typedef Matrixt tangent_matrix_type;
  455. typedef MatrixBi B_matrix_type;
  456. typedef typename linalg_traits<Matrixt>::value_type value_type;
  457. typedef std::vector<value_type> Vector;
  458. virtual size_type size(void) = 0;
  459. virtual const std::vector<MatrixBi> &get_vB() = 0;
  460. virtual void compute_F(Vector &f, Vector &x) = 0;
  461. virtual void compute_tangent_matrix(Matrixt &M, Vector &x) = 0;
  462. // compute Bi^T grad(F(X)) Bi
  463. virtual void compute_sub_tangent_matrix(Matrixt &Mloc, Vector &x,
  464. size_type i) = 0;
  465. // compute Bi^T F(X)
  466. virtual void compute_sub_F(Vector &fi, Vector &x, size_type i) = 0;
  467. virtual ~NewtonAS_struct() {}
  468. };
  469. template <typename Matrixt, typename MatrixBi>
  470. struct AS_exact_gradient {
  471. const std::vector<MatrixBi> &vB;
  472. std::vector<Matrixt> vM;
  473. std::vector<Matrixt> vMloc;
  474. void init(void) {
  475. for (size_type i = 0; i < vB.size(); ++i) {
  476. Matrixt aux(gmm::mat_ncols(vB[i]), gmm::mat_ncols(vM[i]));
  477. gmm::resize(vMloc[i], gmm::mat_ncols(vB[i]), gmm::mat_ncols(vB[i]));
  478. gmm::mult(gmm::transposed(vB[i]), vM[i], aux);
  479. gmm::mult(aux, vB[i], vMloc[i]);
  480. }
  481. }
  482. AS_exact_gradient(const std::vector<MatrixBi> &vB_) : vB(vB_) {
  483. vM.resize(vB.size()); vMloc.resize(vB.size());
  484. for (size_type i = 0; i < vB.size(); ++i) {
  485. gmm::resize(vM[i], gmm::mat_nrows(vB[i]), gmm::mat_nrows(vB[i]));
  486. }
  487. }
  488. };
  489. template <typename Matrixt, typename MatrixBi,
  490. typename Vector2, typename Vector3>
  491. void mult(const AS_exact_gradient<Matrixt, MatrixBi> &M,
  492. const Vector2 &p, Vector3 &q) {
  493. gmm::clear(q);
  494. typedef typename gmm::linalg_traits<Vector3>::value_type T;
  495. std::vector<T> v(gmm::vect_size(p)), w, x;
  496. for (size_type i = 0; i < M.vB.size(); ++i) {
  497. w.resize(gmm::mat_ncols(M.vB[i]));
  498. x.resize(gmm::mat_ncols(M.vB[i]));
  499. gmm::mult(M.vM[i], p, v);
  500. gmm::mult(gmm::transposed(M.vB[i]), v, w);
  501. double rcond;
  502. SuperLU_solve(M.vMloc[i], x, w, rcond);
  503. // gmm::iteration iter(1E-10, 0, 100000);
  504. //gmm::gmres(M.vMloc[i], x, w, gmm::identity_matrix(), 50, iter);
  505. gmm::mult_add(M.vB[i], x, q);
  506. }
  507. }
  508. template <typename Matrixt, typename MatrixBi,
  509. typename Vector2, typename Vector3>
  510. void mult(const AS_exact_gradient<Matrixt, MatrixBi> &M,
  511. const Vector2 &p, const Vector3 &q) {
  512. mult(M, p, const_cast<Vector3 &>(q));
  513. }
  514. template <typename Matrixt, typename MatrixBi,
  515. typename Vector2, typename Vector3, typename Vector4>
  516. void mult(const AS_exact_gradient<Matrixt, MatrixBi> &M,
  517. const Vector2 &p, const Vector3 &p2, Vector4 &q)
  518. { mult(M, p, q); add(p2, q); }
  519. template <typename Matrixt, typename MatrixBi,
  520. typename Vector2, typename Vector3, typename Vector4>
  521. void mult(const AS_exact_gradient<Matrixt, MatrixBi> &M,
  522. const Vector2 &p, const Vector3 &p2, const Vector4 &q)
  523. { mult(M, p, const_cast<Vector4 &>(q)); add(p2, q); }
  524. struct S_default_newton_line_search {
  525. double conv_alpha, conv_r;
  526. size_t it, itmax, glob_it;
  527. double alpha, alpha_old, alpha_mult, first_res, alpha_max_ratio;
  528. double alpha_min_ratio, alpha_min;
  529. size_type count, count_pat;
  530. bool max_ratio_reached;
  531. double alpha_max_ratio_reached, r_max_ratio_reached;
  532. size_type it_max_ratio_reached;
  533. double converged_value(void) { return conv_alpha; };
  534. double converged_residual(void) { return conv_r; };
  535. virtual void init_search(double r, size_t git, double = 0.0) {
  536. alpha_min_ratio = 0.9;
  537. alpha_min = 1e-10;
  538. alpha_max_ratio = 10.0;
  539. alpha_mult = 0.25;
  540. itmax = size_type(-1);
  541. glob_it = git; if (git <= 1) count_pat = 0;
  542. conv_alpha = alpha = alpha_old = 1.;
  543. conv_r = first_res = r; it = 0;
  544. count = 0;
  545. max_ratio_reached = false;
  546. }
  547. virtual double next_try(void) {
  548. alpha_old = alpha;
  549. if (alpha >= 0.4) alpha *= 0.5; else alpha *= alpha_mult; ++it;
  550. return alpha_old;
  551. }
  552. virtual bool is_converged(double r, double = 0.0) {
  553. // cout << "r = " << r << " alpha = " << alpha / alpha_mult << " count_pat = " << count_pat << endl;
  554. if (!max_ratio_reached && r < first_res * alpha_max_ratio) {
  555. alpha_max_ratio_reached = alpha_old; r_max_ratio_reached = r;
  556. it_max_ratio_reached = it; max_ratio_reached = true;
  557. }
  558. if (max_ratio_reached && r < r_max_ratio_reached * 0.5
  559. && r > first_res * 1.1 && it <= it_max_ratio_reached+1) {
  560. alpha_max_ratio_reached = alpha_old; r_max_ratio_reached = r;
  561. it_max_ratio_reached = it;
  562. }
  563. if (count == 0 || r < conv_r)
  564. { conv_r = r; conv_alpha = alpha_old; count = 1; }
  565. if (conv_r < first_res) ++count;
  566. if (r < first_res * alpha_min_ratio)
  567. { count_pat = 0.; return true; }
  568. if (count >= 5 || (alpha < alpha_min && max_ratio_reached)) {
  569. if (conv_r < first_res * 0.99) count_pat = 0;
  570. if (/*gmm::random() * 50. < -log(conv_alpha)-4.0 ||*/ count_pat >= 3)
  571. { conv_r=r_max_ratio_reached; conv_alpha=alpha_max_ratio_reached; }
  572. if (conv_r >= first_res * 0.9999) count_pat++;
  573. return true;
  574. }
  575. return false;
  576. }
  577. S_default_newton_line_search(void) { count_pat = 0; }
  578. };
  579. template <typename Matrixt, typename MatrixBi, typename Vector,
  580. typename Precond, typename local_solver, typename global_solver>
  581. void Newton_additive_Schwarz(NewtonAS_struct<Matrixt, MatrixBi> &NS,
  582. const Vector &u_,
  583. iteration &iter, const Precond &P,
  584. local_solver, global_solver) {
  585. Vector &u = const_cast<Vector &>(u_);
  586. typedef typename linalg_traits<Vector>::value_type value_type;
  587. typedef typename number_traits<value_type>::magnitude_type mtype;
  588. typedef actual_precond<Precond, local_solver, Matrixt> chgt_precond;
  589. double residual = iter.get_resmax();
  590. S_default_newton_line_search internal_ls;
  591. S_default_newton_line_search external_ls;
  592. typename chgt_precond::APrecond PP = chgt_precond::transform(P);
  593. iter.set_rhsnorm(mtype(1));
  594. iteration iternc(iter);
  595. iternc.reduce_noisy(); iternc.set_maxiter(size_type(-1));
  596. iteration iter2(iternc);
  597. iteration iter3(iter2); iter3.reduce_noisy();
  598. iteration iter4(iter3);
  599. iternc.set_name("Local Newton");
  600. iter2.set_name("Linear System for Global Newton");
  601. iternc.set_resmax(residual/100.0);
  602. iter3.set_resmax(residual/10000.0);
  603. iter2.set_resmax(residual/1000.0);
  604. iter4.set_resmax(residual/1000.0);
  605. std::vector<value_type> rhs(NS.size()), x(NS.size()), d(NS.size());
  606. std::vector<value_type> xi, xii, fi, di;
  607. std::vector< std::vector<value_type> > vx(NS.get_vB().size());
  608. for (size_type i = 0; i < NS.get_vB().size(); ++i) // for exact gradient
  609. vx[i].resize(NS.size()); // for exact gradient
  610. Matrixt Mloc, M(NS.size(), NS.size());
  611. NS.compute_F(rhs, u);
  612. mtype act_res=gmm::vect_norm2(rhs), act_res_new(0), precond_res = act_res;
  613. mtype alpha;
  614. while(!iter.finished(std::min(act_res, precond_res))) {
  615. for (int SOR_step = 0; SOR_step >= 0; --SOR_step) {
  616. gmm::clear(rhs);
  617. for (size_type isd = 0; isd < NS.get_vB().size(); ++isd) {
  618. const MatrixBi &Bi = (NS.get_vB())[isd];
  619. size_type si = mat_ncols(Bi);
  620. gmm::resize(Mloc, si, si);
  621. xi.resize(si); xii.resize(si); fi.resize(si); di.resize(si);
  622. iternc.init();
  623. iternc.set_maxiter(30); // ?
  624. if (iternc.get_noisy())
  625. cout << "Non-linear local problem " << isd << endl;
  626. gmm::clear(xi);
  627. gmm::copy(u, x);
  628. NS.compute_sub_F(fi, x, isd); gmm::scale(fi, value_type(-1));
  629. mtype r = gmm::vect_norm2(fi), r_t(r);
  630. if (r > value_type(0)) {
  631. iternc.set_rhsnorm(std::max(r, mtype(1)));
  632. while(!iternc.finished(r)) {
  633. NS.compute_sub_tangent_matrix(Mloc, x, isd);
  634. PP.build_with(Mloc);
  635. iter3.init();
  636. AS_local_solve(local_solver(), Mloc, di, fi, PP, iter3);
  637. internal_ls.init_search(r, iternc.get_iteration());
  638. do {
  639. alpha = internal_ls.next_try();
  640. gmm::add(xi, gmm::scaled(di, -alpha), xii);
  641. gmm::mult(Bi, gmm::scaled(xii, -1.0), u, x);
  642. NS.compute_sub_F(fi, x, isd); gmm::scale(fi, value_type(-1));
  643. r_t = gmm::vect_norm2(fi);
  644. } while (!internal_ls.is_converged(r_t));
  645. if (alpha != internal_ls.converged_value()) {
  646. alpha = internal_ls.converged_value();
  647. gmm::add(xi, gmm::scaled(di, -alpha), xii);
  648. gmm::mult(Bi, gmm::scaled(xii, -1.0), u, x);
  649. NS.compute_sub_F(fi, x, isd); gmm::scale(fi, value_type(-1));
  650. r_t = gmm::vect_norm2(fi);
  651. }
  652. gmm::copy(x, vx[isd]); // for exact gradient
  653. if (iternc.get_noisy()) cout << "(step=" << alpha << ")\t";
  654. ++iternc; r = r_t; gmm::copy(xii, xi);
  655. }
  656. if (SOR_step) gmm::mult(Bi, gmm::scaled(xii, -1.0), u, u);
  657. gmm::mult(Bi, gmm::scaled(xii, -1.0), rhs, rhs);
  658. }
  659. }
  660. precond_res = gmm::vect_norm2(rhs);
  661. if (SOR_step) cout << "SOR step residual = " << precond_res << endl;
  662. if (precond_res < residual) break;
  663. cout << "Precond residual = " << precond_res << endl;
  664. }
  665. iter2.init();
  666. // solving linear system for the global Newton method
  667. if (0) {
  668. NS.compute_tangent_matrix(M, u);
  669. add_schwarz_mat<Matrixt, MatrixBi, Precond, local_solver>
  670. ASM(M, NS.get_vB(), iter4, P, iter.get_resmax());
  671. AS_global_solve(global_solver(), ASM, d, rhs, iter2);
  672. }
  673. else { // for exact gradient
  674. AS_exact_gradient<Matrixt, MatrixBi> eg(NS.get_vB());
  675. for (size_type i = 0; i < NS.get_vB().size(); ++i) {
  676. NS.compute_tangent_matrix(eg.vM[i], vx[i]);
  677. }
  678. eg.init();
  679. gmres(eg, d, rhs, gmm::identity_matrix(), 50, iter2);
  680. }
  681. // gmm::add(gmm::scaled(rhs, 0.1), u); ++iter;
  682. external_ls.init_search(act_res, iter.get_iteration());
  683. do {
  684. alpha = external_ls.next_try();
  685. gmm::add(gmm::scaled(d, alpha), u, x);
  686. NS.compute_F(rhs, x);
  687. act_res_new = gmm::vect_norm2(rhs);
  688. } while (!external_ls.is_converged(act_res_new));
  689. if (alpha != external_ls.converged_value()) {
  690. alpha = external_ls.converged_value();
  691. gmm::add(gmm::scaled(d, alpha), u, x);
  692. NS.compute_F(rhs, x);
  693. act_res_new = gmm::vect_norm2(rhs);
  694. }
  695. if (iter.get_noisy() > 1) cout << endl;
  696. act_res = act_res_new;
  697. if (iter.get_noisy()) cout << "(step=" << alpha << ")\t unprecond res = " << act_res << " ";
  698. ++iter; gmm::copy(x, u);
  699. }
  700. }
  701. }
  702. #endif // GMM_SOLVERS_SCHWARZ_ADDITIVE_H__