You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

805 lines
29 KiB

  1. /* -*- c++ -*- (enables emacs c++ mode) */
  2. /*===========================================================================
  3. Copyright (C) 2002-2017 Yves Renard
  4. This file is a part of GetFEM++
  5. GetFEM++ is free software; you can redistribute it and/or modify it
  6. under the terms of the GNU Lesser General Public License as published
  7. by the Free Software Foundation; either version 3 of the License, or
  8. (at your option) any later version along with the GCC Runtime Library
  9. Exception either version 3.1 or (at your option) any later version.
  10. This program is distributed in the hope that it will be useful, but
  11. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  12. or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
  13. License and GCC Runtime Library Exception for more details.
  14. You should have received a copy of the GNU Lesser General Public License
  15. along with this program; if not, write to the Free Software Foundation,
  16. Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
  17. As a special exception, you may use this file as it is a part of a free
  18. software library without restriction. Specifically, if other files
  19. instantiate templates or use macros or inline functions from this file,
  20. or you compile this file and link it with other files to produce an
  21. executable, this file does not by itself cause the resulting executable
  22. to be covered by the GNU Lesser General Public License. This exception
  23. does not however invalidate any other reasons why the executable file
  24. might be covered by the GNU Lesser General Public License.
  25. ===========================================================================*/
  26. /**@file gmm_solver_Schwarz_additive.h
  27. @author Yves Renard <Yves.Renard@insa-lyon.fr>
  28. @author Michel Fournie <fournie@mip.ups-tlse.fr>
  29. @date October 13, 2002.
  30. */
  31. #ifndef GMM_SOLVERS_SCHWARZ_ADDITIVE_H__
  32. #define GMM_SOLVERS_SCHWARZ_ADDITIVE_H__
  33. #include "gmm_kernel.h"
  34. #include "gmm_superlu_interface.h"
  35. #include "gmm_solver_cg.h"
  36. #include "gmm_solver_gmres.h"
  37. #include "gmm_solver_bicgstab.h"
  38. #include "gmm_solver_qmr.h"
  39. namespace gmm {
  40. /* ******************************************************************** */
  41. /* Additive Schwarz interfaced local solvers */
  42. /* ******************************************************************** */
  43. struct using_cg {};
  44. struct using_gmres {};
  45. struct using_bicgstab {};
  46. struct using_qmr {};
  47. template <typename P, typename local_solver, typename Matrix>
  48. struct actual_precond {
  49. typedef P APrecond;
  50. static const APrecond &transform(const P &PP) { return PP; }
  51. };
  52. template <typename Matrix1, typename Precond, typename Vector>
  53. void AS_local_solve(using_cg, const Matrix1 &A, Vector &x, const Vector &b,
  54. const Precond &P, iteration &iter)
  55. { cg(A, x, b, P, iter); }
  56. template <typename Matrix1, typename Precond, typename Vector>
  57. void AS_local_solve(using_gmres, const Matrix1 &A, Vector &x,
  58. const Vector &b, const Precond &P, iteration &iter)
  59. { gmres(A, x, b, P, 100, iter); }
  60. template <typename Matrix1, typename Precond, typename Vector>
  61. void AS_local_solve(using_bicgstab, const Matrix1 &A, Vector &x,
  62. const Vector &b, const Precond &P, iteration &iter)
  63. { bicgstab(A, x, b, P, iter); }
  64. template <typename Matrix1, typename Precond, typename Vector>
  65. void AS_local_solve(using_qmr, const Matrix1 &A, Vector &x,
  66. const Vector &b, const Precond &P, iteration &iter)
  67. { qmr(A, x, b, P, iter); }
  68. #if defined(GMM_USES_SUPERLU)
  69. struct using_superlu {};
  70. template <typename P, typename Matrix>
  71. struct actual_precond<P, using_superlu, Matrix> {
  72. typedef typename linalg_traits<Matrix>::value_type value_type;
  73. typedef SuperLU_factor<value_type> APrecond;
  74. template <typename PR>
  75. static APrecond transform(const PR &) { return APrecond(); }
  76. static const APrecond &transform(const APrecond &PP) { return PP; }
  77. };
  78. template <typename Matrix1, typename Precond, typename Vector>
  79. void AS_local_solve(using_superlu, const Matrix1 &, Vector &x,
  80. const Vector &b, const Precond &P, iteration &iter)
  81. { P.solve(x, b); iter.set_iteration(1); }
  82. #endif
  83. /* ******************************************************************** */
  84. /* Additive Schwarz Linear system */
  85. /* ******************************************************************** */
  86. template <typename Matrix1, typename Matrix2, typename Precond,
  87. typename local_solver>
  88. struct add_schwarz_mat{
  89. typedef typename linalg_traits<Matrix1>::value_type value_type;
  90. const Matrix1 *A;
  91. const std::vector<Matrix2> *vB;
  92. std::vector<Matrix2> vAloc;
  93. mutable iteration iter;
  94. double residual;
  95. mutable size_type itebilan;
  96. mutable std::vector<std::vector<value_type> > gi, fi;
  97. std::vector<typename actual_precond<Precond, local_solver,
  98. Matrix1>::APrecond> precond1;
  99. void init(const Matrix1 &A_, const std::vector<Matrix2> &vB_,
  100. iteration iter_, const Precond &P, double residual_);
  101. add_schwarz_mat(void) {}
  102. add_schwarz_mat(const Matrix1 &A_, const std::vector<Matrix2> &vB_,
  103. iteration iter_, const Precond &P, double residual_)
  104. { init(A_, vB_, iter_, P, residual_); }
  105. };
  106. template <typename Matrix1, typename Matrix2, typename Precond,
  107. typename local_solver>
  108. void add_schwarz_mat<Matrix1, Matrix2, Precond, local_solver>::init(
  109. const Matrix1 &A_, const std::vector<Matrix2> &vB_,
  110. iteration iter_, const Precond &P, double residual_) {
  111. vB = &vB_; A = &A_; iter = iter_;
  112. residual = residual_;
  113. size_type nb_sub = vB->size();
  114. vAloc.resize(nb_sub);
  115. gi.resize(nb_sub); fi.resize(nb_sub);
  116. precond1.resize(nb_sub);
  117. std::fill(precond1.begin(), precond1.end(),
  118. actual_precond<Precond, local_solver, Matrix1>::transform(P));
  119. itebilan = 0;
  120. if (iter.get_noisy()) cout << "Init pour sub dom ";
  121. #ifdef GMM_USES_MPI
  122. int size,tranche,borne_sup,borne_inf,rank,tag1=11,tag2=12,tag3=13,sizepr = 0;
  123. // int tab[4];
  124. double t_ref,t_final;
  125. MPI_Status status;
  126. t_ref=MPI_Wtime();
  127. MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  128. MPI_Comm_size(MPI_COMM_WORLD, &size);
  129. tranche=nb_sub/size;
  130. borne_inf=rank*tranche;
  131. borne_sup=(rank+1)*tranche;
  132. // if (rank==size-1) borne_sup = nb_sub;
  133. cout << "Nombre de sous domaines " << borne_sup - borne_inf << endl;
  134. int sizeA = mat_nrows(*A);
  135. gmm::csr_matrix<value_type> Acsr(sizeA, sizeA), Acsrtemp(sizeA, sizeA);
  136. gmm::copy(gmm::eff_matrix(*A), Acsr);
  137. int next = (rank + 1) % size;
  138. int previous = (rank + size - 1) % size;
  139. //communication of local information on ring pattern
  140. //Each process receive Nproc-1 contributions
  141. for (int nproc = 0; nproc < size; ++nproc) {
  142. for (size_type i = size_type(borne_inf); i < size_type(borne_sup); ++i) {
  143. // for (size_type i = 0; i < nb_sub/size; ++i) {
  144. // for (size_type i = 0; i < nb_sub; ++i) {
  145. // size_type i=(rank+size*(j-1)+nb_sub)%nb_sub;
  146. cout << "Sous domaines " << i << " : " << mat_ncols((*vB)[i]) << endl;
  147. #else
  148. for (size_type i = 0; i < nb_sub; ++i) {
  149. #endif
  150. if (iter.get_noisy()) cout << i << " " << std::flush;
  151. Matrix2 Maux(mat_ncols((*vB)[i]), mat_nrows((*vB)[i]));
  152. #ifdef GMM_USES_MPI
  153. Matrix2 Maux2(mat_ncols((*vB)[i]), mat_ncols((*vB)[i]));
  154. if (nproc == 0) {
  155. gmm::resize(vAloc[i], mat_ncols((*vB)[i]), mat_ncols((*vB)[i]));
  156. gmm::clear(vAloc[i]);
  157. }
  158. gmm::mult(gmm::transposed((*vB)[i]), Acsr, Maux);
  159. gmm::mult(Maux, (*vB)[i], Maux2);
  160. gmm::add(Maux2, vAloc[i]);
  161. #else
  162. gmm::resize(vAloc[i], mat_ncols((*vB)[i]), mat_ncols((*vB)[i]));
  163. gmm::mult(gmm::transposed((*vB)[i]), *A, Maux);
  164. gmm::mult(Maux, (*vB)[i], vAloc[i]);
  165. #endif
  166. #ifdef GMM_USES_MPI
  167. if (nproc == size - 1 ) {
  168. #endif
  169. precond1[i].build_with(vAloc[i]);
  170. gmm::resize(fi[i], mat_ncols((*vB)[i]));
  171. gmm::resize(gi[i], mat_ncols((*vB)[i]));
  172. #ifdef GMM_USES_MPI
  173. }
  174. #else
  175. }
  176. #endif
  177. #ifdef GMM_USES_MPI
  178. }
  179. if (nproc != size - 1) {
  180. MPI_Sendrecv(&(Acsr.jc[0]), sizeA+1, MPI_INT, next, tag2,
  181. &(Acsrtemp.jc[0]), sizeA+1, MPI_INT, previous, tag2,
  182. MPI_COMM_WORLD, &status);
  183. if (Acsrtemp.jc[sizeA] > size_type(sizepr)) {
  184. sizepr = Acsrtemp.jc[sizeA];
  185. gmm::resize(Acsrtemp.pr, sizepr);
  186. gmm::resize(Acsrtemp.ir, sizepr);
  187. }
  188. MPI_Sendrecv(&(Acsr.ir[0]), Acsr.jc[sizeA], MPI_INT, next, tag1,
  189. &(Acsrtemp.ir[0]), Acsrtemp.jc[sizeA], MPI_INT, previous, tag1,
  190. MPI_COMM_WORLD, &status);
  191. MPI_Sendrecv(&(Acsr.pr[0]), Acsr.jc[sizeA], mpi_type(value_type()), next, tag3,
  192. &(Acsrtemp.pr[0]), Acsrtemp.jc[sizeA], mpi_type(value_type()), previous, tag3,
  193. MPI_COMM_WORLD, &status);
  194. gmm::copy(Acsrtemp, Acsr);
  195. }
  196. }
  197. t_final=MPI_Wtime();
  198. cout<<"temps boucle precond "<< t_final-t_ref<<endl;
  199. #endif
  200. if (iter.get_noisy()) cout << "\n";
  201. }
  202. template <typename Matrix1, typename Matrix2, typename Precond,
  203. typename Vector2, typename Vector3, typename local_solver>
  204. void mult(const add_schwarz_mat<Matrix1, Matrix2, Precond, local_solver> &M,
  205. const Vector2 &p, Vector3 &q) {
  206. size_type itebilan = 0;
  207. #ifdef GMM_USES_MPI
  208. static double tmult_tot = 0.0;
  209. double t_ref = MPI_Wtime();
  210. #endif
  211. // cout << "tmult AS begin " << endl;
  212. mult(*(M.A), p, q);
  213. #ifdef GMM_USES_MPI
  214. tmult_tot += MPI_Wtime()-t_ref;
  215. cout << "tmult_tot = " << tmult_tot << endl;
  216. #endif
  217. std::vector<double> qbis(gmm::vect_size(q));
  218. std::vector<double> qter(gmm::vect_size(q));
  219. #ifdef GMM_USES_MPI
  220. // MPI_Status status;
  221. // MPI_Request request,request1;
  222. // int tag=111;
  223. int size,tranche,borne_sup,borne_inf,rank;
  224. size_type nb_sub=M.fi.size();
  225. MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  226. MPI_Comm_size(MPI_COMM_WORLD, &size);
  227. tranche=nb_sub/size;
  228. borne_inf=rank*tranche;
  229. borne_sup=(rank+1)*tranche;
  230. // if (rank==size-1) borne_sup=nb_sub;
  231. // int next = (rank + 1) % size;
  232. // int previous = (rank + size - 1) % size;
  233. t_ref = MPI_Wtime();
  234. for (size_type i = size_type(borne_inf); i < size_type(borne_sup); ++i)
  235. // for (size_type i = 0; i < nb_sub/size; ++i)
  236. // for (size_type j = 0; j < nb_sub; ++j)
  237. #else
  238. for (size_type i = 0; i < M.fi.size(); ++i)
  239. #endif
  240. {
  241. #ifdef GMM_USES_MPI
  242. // size_type i=j; // (rank+size*(j-1)+nb_sub)%nb_sub;
  243. #endif
  244. gmm::mult(gmm::transposed((*(M.vB))[i]), q, M.fi[i]);
  245. M.iter.init();
  246. AS_local_solve(local_solver(), (M.vAloc)[i], (M.gi)[i],
  247. (M.fi)[i],(M.precond1)[i],M.iter);
  248. itebilan = std::max(itebilan, M.iter.get_iteration());
  249. }
  250. #ifdef GMM_USES_MPI
  251. cout << "First AS loop time " << MPI_Wtime() - t_ref << endl;
  252. #endif
  253. gmm::clear(q);
  254. #ifdef GMM_USES_MPI
  255. t_ref = MPI_Wtime();
  256. // for (size_type j = 0; j < nb_sub; ++j)
  257. for (size_type i = size_type(borne_inf); i < size_type(borne_sup); ++i)
  258. #else
  259. for (size_type i = 0; i < M.gi.size(); ++i)
  260. #endif
  261. {
  262. #ifdef GMM_USES_MPI
  263. // size_type i=j; // (rank+size*(j-1)+nb_sub)%nb_sub;
  264. // gmm::mult((*(M.vB))[i], M.gi[i], qbis,qbis);
  265. gmm::mult((*(M.vB))[i], M.gi[i], qter);
  266. add(qter,qbis,qbis);
  267. #else
  268. gmm::mult((*(M.vB))[i], M.gi[i], q, q);
  269. #endif
  270. }
  271. #ifdef GMM_USES_MPI
  272. //WARNING this add only if you use the ring pattern below
  273. // need to do this below if using a n explicit ring pattern communication
  274. // add(qbis,q,q);
  275. cout << "Second AS loop time " << MPI_Wtime() - t_ref << endl;
  276. #endif
  277. #ifdef GMM_USES_MPI
  278. // int tag1=11;
  279. static double t_tot = 0.0;
  280. double t_final;
  281. t_ref=MPI_Wtime();
  282. // int next = (rank + 1) % size;
  283. // int previous = (rank + size - 1) % size;
  284. //communication of local information on ring pattern
  285. //Each process receive Nproc-1 contributions
  286. // if (size > 1) {
  287. // for (int nproc = 0; nproc < size-1; ++nproc)
  288. // {
  289. // MPI_Sendrecv(&(qbis[0]), gmm::vect_size(q), MPI_DOUBLE, next, tag1,
  290. // &(qter[0]), gmm::vect_size(q),MPI_DOUBLE,previous,tag1,
  291. // MPI_COMM_WORLD,&status);
  292. // gmm::copy(qter, qbis);
  293. // add(qbis,q,q);
  294. // }
  295. // }
  296. MPI_Allreduce(&(qbis[0]), &(q[0]),gmm::vect_size(q), MPI_DOUBLE,
  297. MPI_SUM,MPI_COMM_WORLD);
  298. t_final=MPI_Wtime();
  299. t_tot += t_final-t_ref;
  300. cout<<"["<< rank<<"] temps reduce Resol "<< t_final-t_ref << " t_tot = " << t_tot << endl;
  301. #endif
  302. if (M.iter.get_noisy() > 0) cout << "itebloc = " << itebilan << endl;
  303. M.itebilan += itebilan;
  304. M.iter.set_resmax((M.iter.get_resmax() + M.residual) * 0.5);
  305. }
  306. template <typename Matrix1, typename Matrix2, typename Precond,
  307. typename Vector2, typename Vector3, typename local_solver>
  308. void mult(const add_schwarz_mat<Matrix1, Matrix2, Precond, local_solver> &M,
  309. const Vector2 &p, const Vector3 &q) {
  310. mult(M, p, const_cast<Vector3 &>(q));
  311. }
  312. template <typename Matrix1, typename Matrix2, typename Precond,
  313. typename Vector2, typename Vector3, typename Vector4,
  314. typename local_solver>
  315. void mult(const add_schwarz_mat<Matrix1, Matrix2, Precond, local_solver> &M,
  316. const Vector2 &p, const Vector3 &p2, Vector4 &q)
  317. { mult(M, p, q); add(p2, q); }
  318. template <typename Matrix1, typename Matrix2, typename Precond,
  319. typename Vector2, typename Vector3, typename Vector4,
  320. typename local_solver>
  321. void mult(const add_schwarz_mat<Matrix1, Matrix2, Precond, local_solver> &M,
  322. const Vector2 &p, const Vector3 &p2, const Vector4 &q)
  323. { mult(M, p, const_cast<Vector4 &>(q)); add(p2, q); }
  324. /* ******************************************************************** */
  325. /* Additive Schwarz interfaced global solvers */
  326. /* ******************************************************************** */
  327. template <typename ASM_type, typename Vect>
  328. void AS_global_solve(using_cg, const ASM_type &ASM, Vect &x,
  329. const Vect &b, iteration &iter)
  330. { cg(ASM, x, b, *(ASM.A), identity_matrix(), iter); }
  331. template <typename ASM_type, typename Vect>
  332. void AS_global_solve(using_gmres, const ASM_type &ASM, Vect &x,
  333. const Vect &b, iteration &iter)
  334. { gmres(ASM, x, b, identity_matrix(), 100, iter); }
  335. template <typename ASM_type, typename Vect>
  336. void AS_global_solve(using_bicgstab, const ASM_type &ASM, Vect &x,
  337. const Vect &b, iteration &iter)
  338. { bicgstab(ASM, x, b, identity_matrix(), iter); }
  339. template <typename ASM_type, typename Vect>
  340. void AS_global_solve(using_qmr,const ASM_type &ASM, Vect &x,
  341. const Vect &b, iteration &iter)
  342. { qmr(ASM, x, b, identity_matrix(), iter); }
  343. #if defined(GMM_USES_SUPERLU)
  344. template <typename ASM_type, typename Vect>
  345. void AS_global_solve(using_superlu, const ASM_type &, Vect &,
  346. const Vect &, iteration &) {
  347. GMM_ASSERT1(false, "You cannot use SuperLU as "
  348. "global solver in additive Schwarz meethod");
  349. }
  350. #endif
  351. /* ******************************************************************** */
  352. /* Linear Additive Schwarz method */
  353. /* ******************************************************************** */
  354. /* ref : Domain decomposition algorithms for the p-version finite */
  355. /* element method for elliptic problems, Luca F. Pavarino, */
  356. /* PhD thesis, Courant Institute of Mathematical Sciences, 1992. */
  357. /* ******************************************************************** */
  358. /** Function to call if the ASM matrix is precomputed for successive solve
  359. * with the same system.
  360. */
  361. template <typename Matrix1, typename Matrix2,
  362. typename Vector2, typename Vector3, typename Precond,
  363. typename local_solver, typename global_solver>
  364. void additive_schwarz(
  365. add_schwarz_mat<Matrix1, Matrix2, Precond, local_solver> &ASM, Vector3 &u,
  366. const Vector2 &f, iteration &iter, const global_solver&) {
  367. typedef typename linalg_traits<Matrix1>::value_type value_type;
  368. size_type nb_sub = ASM.vB->size(), nb_dof = gmm::vect_size(f);
  369. ASM.itebilan = 0;
  370. std::vector<value_type> g(nb_dof);
  371. std::vector<value_type> gbis(nb_dof);
  372. #ifdef GMM_USES_MPI
  373. double t_init=MPI_Wtime();
  374. int size,tranche,borne_sup,borne_inf,rank;
  375. MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  376. MPI_Comm_size(MPI_COMM_WORLD, &size);
  377. tranche=nb_sub/size;
  378. borne_inf=rank*tranche;
  379. borne_sup=(rank+1)*tranche;
  380. // if (rank==size-1) borne_sup=nb_sub*size;
  381. for (size_type i = size_type(borne_inf); i < size_type(borne_sup); ++i)
  382. // for (size_type i = 0; i < nb_sub/size; ++i)
  383. // for (size_type j = 0; j < nb_sub; ++j)
  384. // for (size_type i = rank; i < nb_sub; i+=size)
  385. #else
  386. for (size_type i = 0; i < nb_sub; ++i)
  387. #endif
  388. {
  389. #ifdef GMM_USES_MPI
  390. // size_type i=j; // (rank+size*(j-1)+nb_sub)%nb_sub;
  391. #endif
  392. gmm::mult(gmm::transposed((*(ASM.vB))[i]), f, ASM.fi[i]);
  393. ASM.iter.init();
  394. AS_local_solve(local_solver(), ASM.vAloc[i], ASM.gi[i], ASM.fi[i],
  395. ASM.precond1[i], ASM.iter);
  396. ASM.itebilan = std::max(ASM.itebilan, ASM.iter.get_iteration());
  397. #ifdef GMM_USES_MPI
  398. gmm::mult((*(ASM.vB))[i], ASM.gi[i], gbis,gbis);
  399. #else
  400. gmm::mult((*(ASM.vB))[i], ASM.gi[i], g, g);
  401. #endif
  402. }
  403. #ifdef GMM_USES_MPI
  404. cout<<"temps boucle init "<< MPI_Wtime()-t_init<<endl;
  405. double t_ref,t_final;
  406. t_ref=MPI_Wtime();
  407. MPI_Allreduce(&(gbis[0]), &(g[0]),gmm::vect_size(g), MPI_DOUBLE,
  408. MPI_SUM,MPI_COMM_WORLD);
  409. t_final=MPI_Wtime();
  410. cout<<"temps reduce init "<< t_final-t_ref<<endl;
  411. #endif
  412. #ifdef GMM_USES_MPI
  413. t_ref=MPI_Wtime();
  414. cout<<"begin global AS"<<endl;
  415. #endif
  416. AS_global_solve(global_solver(), ASM, u, g, iter);
  417. #ifdef GMM_USES_MPI
  418. t_final=MPI_Wtime();
  419. cout<<"temps AS Global Solve "<< t_final-t_ref<<endl;
  420. #endif
  421. if (iter.get_noisy())
  422. cout << "Total number of internal iterations : " << ASM.itebilan << endl;
  423. }
  424. /** Global function. Compute the ASM matrix and call the previous function.
  425. * The ASM matrix represent the preconditionned linear system.
  426. */
  427. template <typename Matrix1, typename Matrix2,
  428. typename Vector2, typename Vector3, typename Precond,
  429. typename local_solver, typename global_solver>
  430. void additive_schwarz(const Matrix1 &A, Vector3 &u,
  431. const Vector2 &f, const Precond &P,
  432. const std::vector<Matrix2> &vB,
  433. iteration &iter, local_solver,
  434. global_solver) {
  435. iter.set_rhsnorm(vect_norm2(f));
  436. if (iter.get_rhsnorm() == 0.0) { gmm::clear(u); return; }
  437. iteration iter2 = iter; iter2.reduce_noisy();
  438. iter2.set_maxiter(size_type(-1));
  439. add_schwarz_mat<Matrix1, Matrix2, Precond, local_solver>
  440. ASM(A, vB, iter2, P, iter.get_resmax());
  441. additive_schwarz(ASM, u, f, iter, global_solver());
  442. }
  443. /* ******************************************************************** */
  444. /* Sequential Non-Linear Additive Schwarz method */
  445. /* ******************************************************************** */
  446. /* ref : Nonlinearly Preconditionned Inexact Newton Algorithms, */
  447. /* Xiao-Chuan Cai, David E. Keyes, */
  448. /* SIAM J. Sci. Comp. 24: p183-200. l */
  449. /* ******************************************************************** */
  450. template <typename Matrixt, typename MatrixBi>
  451. class NewtonAS_struct {
  452. public :
  453. typedef Matrixt tangent_matrix_type;
  454. typedef MatrixBi B_matrix_type;
  455. typedef typename linalg_traits<Matrixt>::value_type value_type;
  456. typedef std::vector<value_type> Vector;
  457. virtual size_type size(void) = 0;
  458. virtual const std::vector<MatrixBi> &get_vB() = 0;
  459. virtual void compute_F(Vector &f, Vector &x) = 0;
  460. virtual void compute_tangent_matrix(Matrixt &M, Vector &x) = 0;
  461. // compute Bi^T grad(F(X)) Bi
  462. virtual void compute_sub_tangent_matrix(Matrixt &Mloc, Vector &x,
  463. size_type i) = 0;
  464. // compute Bi^T F(X)
  465. virtual void compute_sub_F(Vector &fi, Vector &x, size_type i) = 0;
  466. virtual ~NewtonAS_struct() {}
  467. };
  468. template <typename Matrixt, typename MatrixBi>
  469. struct AS_exact_gradient {
  470. const std::vector<MatrixBi> &vB;
  471. std::vector<Matrixt> vM;
  472. std::vector<Matrixt> vMloc;
  473. void init(void) {
  474. for (size_type i = 0; i < vB.size(); ++i) {
  475. Matrixt aux(gmm::mat_ncols(vB[i]), gmm::mat_ncols(vM[i]));
  476. gmm::resize(vMloc[i], gmm::mat_ncols(vB[i]), gmm::mat_ncols(vB[i]));
  477. gmm::mult(gmm::transposed(vB[i]), vM[i], aux);
  478. gmm::mult(aux, vB[i], vMloc[i]);
  479. }
  480. }
  481. AS_exact_gradient(const std::vector<MatrixBi> &vB_) : vB(vB_) {
  482. vM.resize(vB.size()); vMloc.resize(vB.size());
  483. for (size_type i = 0; i < vB.size(); ++i) {
  484. gmm::resize(vM[i], gmm::mat_nrows(vB[i]), gmm::mat_nrows(vB[i]));
  485. }
  486. }
  487. };
  488. template <typename Matrixt, typename MatrixBi,
  489. typename Vector2, typename Vector3>
  490. void mult(const AS_exact_gradient<Matrixt, MatrixBi> &M,
  491. const Vector2 &p, Vector3 &q) {
  492. gmm::clear(q);
  493. typedef typename gmm::linalg_traits<Vector3>::value_type T;
  494. std::vector<T> v(gmm::vect_size(p)), w, x;
  495. for (size_type i = 0; i < M.vB.size(); ++i) {
  496. w.resize(gmm::mat_ncols(M.vB[i]));
  497. x.resize(gmm::mat_ncols(M.vB[i]));
  498. gmm::mult(M.vM[i], p, v);
  499. gmm::mult(gmm::transposed(M.vB[i]), v, w);
  500. double rcond;
  501. SuperLU_solve(M.vMloc[i], x, w, rcond);
  502. // gmm::iteration iter(1E-10, 0, 100000);
  503. //gmm::gmres(M.vMloc[i], x, w, gmm::identity_matrix(), 50, iter);
  504. gmm::mult_add(M.vB[i], x, q);
  505. }
  506. }
  507. template <typename Matrixt, typename MatrixBi,
  508. typename Vector2, typename Vector3>
  509. void mult(const AS_exact_gradient<Matrixt, MatrixBi> &M,
  510. const Vector2 &p, const Vector3 &q) {
  511. mult(M, p, const_cast<Vector3 &>(q));
  512. }
  513. template <typename Matrixt, typename MatrixBi,
  514. typename Vector2, typename Vector3, typename Vector4>
  515. void mult(const AS_exact_gradient<Matrixt, MatrixBi> &M,
  516. const Vector2 &p, const Vector3 &p2, Vector4 &q)
  517. { mult(M, p, q); add(p2, q); }
  518. template <typename Matrixt, typename MatrixBi,
  519. typename Vector2, typename Vector3, typename Vector4>
  520. void mult(const AS_exact_gradient<Matrixt, MatrixBi> &M,
  521. const Vector2 &p, const Vector3 &p2, const Vector4 &q)
  522. { mult(M, p, const_cast<Vector4 &>(q)); add(p2, q); }
  523. struct S_default_newton_line_search {
  524. double conv_alpha, conv_r;
  525. size_t it, itmax, glob_it;
  526. double alpha, alpha_old, alpha_mult, first_res, alpha_max_ratio;
  527. double alpha_min_ratio, alpha_min;
  528. size_type count, count_pat;
  529. bool max_ratio_reached;
  530. double alpha_max_ratio_reached, r_max_ratio_reached;
  531. size_type it_max_ratio_reached;
  532. double converged_value(void) { return conv_alpha; };
  533. double converged_residual(void) { return conv_r; };
  534. virtual void init_search(double r, size_t git, double = 0.0) {
  535. alpha_min_ratio = 0.9;
  536. alpha_min = 1e-10;
  537. alpha_max_ratio = 10.0;
  538. alpha_mult = 0.25;
  539. itmax = size_type(-1);
  540. glob_it = git; if (git <= 1) count_pat = 0;
  541. conv_alpha = alpha = alpha_old = 1.;
  542. conv_r = first_res = r; it = 0;
  543. count = 0;
  544. max_ratio_reached = false;
  545. }
  546. virtual double next_try(void) {
  547. alpha_old = alpha;
  548. if (alpha >= 0.4) alpha *= 0.5; else alpha *= alpha_mult; ++it;
  549. return alpha_old;
  550. }
  551. virtual bool is_converged(double r, double = 0.0) {
  552. // cout << "r = " << r << " alpha = " << alpha / alpha_mult << " count_pat = " << count_pat << endl;
  553. if (!max_ratio_reached && r < first_res * alpha_max_ratio) {
  554. alpha_max_ratio_reached = alpha_old; r_max_ratio_reached = r;
  555. it_max_ratio_reached = it; max_ratio_reached = true;
  556. }
  557. if (max_ratio_reached && r < r_max_ratio_reached * 0.5
  558. && r > first_res * 1.1 && it <= it_max_ratio_reached+1) {
  559. alpha_max_ratio_reached = alpha_old; r_max_ratio_reached = r;
  560. it_max_ratio_reached = it;
  561. }
  562. if (count == 0 || r < conv_r)
  563. { conv_r = r; conv_alpha = alpha_old; count = 1; }
  564. if (conv_r < first_res) ++count;
  565. if (r < first_res * alpha_min_ratio)
  566. { count_pat = 0; return true; }
  567. if (count >= 5 || (alpha < alpha_min && max_ratio_reached)) {
  568. if (conv_r < first_res * 0.99) count_pat = 0;
  569. if (/*gmm::random() * 50. < -log(conv_alpha)-4.0 ||*/ count_pat >= 3)
  570. { conv_r=r_max_ratio_reached; conv_alpha=alpha_max_ratio_reached; }
  571. if (conv_r >= first_res * 0.9999) count_pat++;
  572. return true;
  573. }
  574. return false;
  575. }
  576. S_default_newton_line_search(void) { count_pat = 0; }
  577. };
  578. template <typename Matrixt, typename MatrixBi, typename Vector,
  579. typename Precond, typename local_solver, typename global_solver>
  580. void Newton_additive_Schwarz(NewtonAS_struct<Matrixt, MatrixBi> &NS,
  581. const Vector &u_,
  582. iteration &iter, const Precond &P,
  583. local_solver, global_solver) {
  584. Vector &u = const_cast<Vector &>(u_);
  585. typedef typename linalg_traits<Vector>::value_type value_type;
  586. typedef typename number_traits<value_type>::magnitude_type mtype;
  587. typedef actual_precond<Precond, local_solver, Matrixt> chgt_precond;
  588. double residual = iter.get_resmax();
  589. S_default_newton_line_search internal_ls;
  590. S_default_newton_line_search external_ls;
  591. typename chgt_precond::APrecond PP = chgt_precond::transform(P);
  592. iter.set_rhsnorm(mtype(1));
  593. iteration iternc(iter);
  594. iternc.reduce_noisy(); iternc.set_maxiter(size_type(-1));
  595. iteration iter2(iternc);
  596. iteration iter3(iter2); iter3.reduce_noisy();
  597. iteration iter4(iter3);
  598. iternc.set_name("Local Newton");
  599. iter2.set_name("Linear System for Global Newton");
  600. iternc.set_resmax(residual/100.0);
  601. iter3.set_resmax(residual/10000.0);
  602. iter2.set_resmax(residual/1000.0);
  603. iter4.set_resmax(residual/1000.0);
  604. std::vector<value_type> rhs(NS.size()), x(NS.size()), d(NS.size());
  605. std::vector<value_type> xi, xii, fi, di;
  606. std::vector< std::vector<value_type> > vx(NS.get_vB().size());
  607. for (size_type i = 0; i < NS.get_vB().size(); ++i) // for exact gradient
  608. vx[i].resize(NS.size()); // for exact gradient
  609. Matrixt Mloc, M(NS.size(), NS.size());
  610. NS.compute_F(rhs, u);
  611. mtype act_res=gmm::vect_norm2(rhs), act_res_new(0), precond_res = act_res;
  612. mtype alpha;
  613. while(!iter.finished(std::min(act_res, precond_res))) {
  614. for (int SOR_step = 0; SOR_step >= 0; --SOR_step) {
  615. gmm::clear(rhs);
  616. for (size_type isd = 0; isd < NS.get_vB().size(); ++isd) {
  617. const MatrixBi &Bi = (NS.get_vB())[isd];
  618. size_type si = mat_ncols(Bi);
  619. gmm::resize(Mloc, si, si);
  620. xi.resize(si); xii.resize(si); fi.resize(si); di.resize(si);
  621. iternc.init();
  622. iternc.set_maxiter(30); // ?
  623. if (iternc.get_noisy())
  624. cout << "Non-linear local problem " << isd << endl;
  625. gmm::clear(xi);
  626. gmm::copy(u, x);
  627. NS.compute_sub_F(fi, x, isd); gmm::scale(fi, value_type(-1));
  628. mtype r = gmm::vect_norm2(fi), r_t(r);
  629. if (r > value_type(0)) {
  630. iternc.set_rhsnorm(std::max(r, mtype(1)));
  631. while(!iternc.finished(r)) {
  632. NS.compute_sub_tangent_matrix(Mloc, x, isd);
  633. PP.build_with(Mloc);
  634. iter3.init();
  635. AS_local_solve(local_solver(), Mloc, di, fi, PP, iter3);
  636. internal_ls.init_search(r, iternc.get_iteration());
  637. do {
  638. alpha = internal_ls.next_try();
  639. gmm::add(xi, gmm::scaled(di, -alpha), xii);
  640. gmm::mult(Bi, gmm::scaled(xii, -1.0), u, x);
  641. NS.compute_sub_F(fi, x, isd); gmm::scale(fi, value_type(-1));
  642. r_t = gmm::vect_norm2(fi);
  643. } while (!internal_ls.is_converged(r_t));
  644. if (alpha != internal_ls.converged_value()) {
  645. alpha = internal_ls.converged_value();
  646. gmm::add(xi, gmm::scaled(di, -alpha), xii);
  647. gmm::mult(Bi, gmm::scaled(xii, -1.0), u, x);
  648. NS.compute_sub_F(fi, x, isd); gmm::scale(fi, value_type(-1));
  649. r_t = gmm::vect_norm2(fi);
  650. }
  651. gmm::copy(x, vx[isd]); // for exact gradient
  652. if (iternc.get_noisy()) cout << "(step=" << alpha << ")\t";
  653. ++iternc; r = r_t; gmm::copy(xii, xi);
  654. }
  655. if (SOR_step) gmm::mult(Bi, gmm::scaled(xii, -1.0), u, u);
  656. gmm::mult(Bi, gmm::scaled(xii, -1.0), rhs, rhs);
  657. }
  658. }
  659. precond_res = gmm::vect_norm2(rhs);
  660. if (SOR_step) cout << "SOR step residual = " << precond_res << endl;
  661. if (precond_res < residual) break;
  662. cout << "Precond residual = " << precond_res << endl;
  663. }
  664. iter2.init();
  665. // solving linear system for the global Newton method
  666. if (0) {
  667. NS.compute_tangent_matrix(M, u);
  668. add_schwarz_mat<Matrixt, MatrixBi, Precond, local_solver>
  669. ASM(M, NS.get_vB(), iter4, P, iter.get_resmax());
  670. AS_global_solve(global_solver(), ASM, d, rhs, iter2);
  671. }
  672. else { // for exact gradient
  673. AS_exact_gradient<Matrixt, MatrixBi> eg(NS.get_vB());
  674. for (size_type i = 0; i < NS.get_vB().size(); ++i) {
  675. NS.compute_tangent_matrix(eg.vM[i], vx[i]);
  676. }
  677. eg.init();
  678. gmres(eg, d, rhs, gmm::identity_matrix(), 50, iter2);
  679. }
  680. // gmm::add(gmm::scaled(rhs, 0.1), u); ++iter;
  681. external_ls.init_search(act_res, iter.get_iteration());
  682. do {
  683. alpha = external_ls.next_try();
  684. gmm::add(gmm::scaled(d, alpha), u, x);
  685. NS.compute_F(rhs, x);
  686. act_res_new = gmm::vect_norm2(rhs);
  687. } while (!external_ls.is_converged(act_res_new));
  688. if (alpha != external_ls.converged_value()) {
  689. alpha = external_ls.converged_value();
  690. gmm::add(gmm::scaled(d, alpha), u, x);
  691. NS.compute_F(rhs, x);
  692. act_res_new = gmm::vect_norm2(rhs);
  693. }
  694. if (iter.get_noisy() > 1) cout << endl;
  695. act_res = act_res_new;
  696. if (iter.get_noisy()) cout << "(step=" << alpha << ")\t unprecond res = " << act_res << " ";
  697. ++iter; gmm::copy(x, u);
  698. }
  699. }
  700. }
  701. #endif // GMM_SOLVERS_SCHWARZ_ADDITIVE_H__