You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

707 lines
20 KiB

  1. /**CFile***********************************************************************
  2. FileName [cuddMatMult.c]
  3. PackageName [cudd]
  4. Synopsis [Matrix multiplication functions.]
  5. Description [External procedures included in this module:
  6. <ul>
  7. <li> Cudd_addMatrixMultiply()
  8. <li> Cudd_addTimesPlus()
  9. <li> Cudd_addTriangle()
  10. <li> Cudd_addOuterSum()
  11. </ul>
  12. Static procedures included in this module:
  13. <ul>
  14. <li> addMMRecur()
  15. <li> addTriangleRecur()
  16. <li> cuddAddOuterSumRecur()
  17. </ul>]
  18. Author [Fabio Somenzi]
  19. Copyright [Copyright (c) 1995-2012, Regents of the University of Colorado
  20. All rights reserved.
  21. Redistribution and use in source and binary forms, with or without
  22. modification, are permitted provided that the following conditions
  23. are met:
  24. Redistributions of source code must retain the above copyright
  25. notice, this list of conditions and the following disclaimer.
  26. Redistributions in binary form must reproduce the above copyright
  27. notice, this list of conditions and the following disclaimer in the
  28. documentation and/or other materials provided with the distribution.
  29. Neither the name of the University of Colorado nor the names of its
  30. contributors may be used to endorse or promote products derived from
  31. this software without specific prior written permission.
  32. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  33. "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  34. LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  35. FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  36. COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  37. INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
  38. BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  39. LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  40. CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  41. LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
  42. ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  43. POSSIBILITY OF SUCH DAMAGE.]
  44. ******************************************************************************/
  45. #include "util.h"
  46. #include "cuddInt.h"
  47. /*---------------------------------------------------------------------------*/
  48. /* Constant declarations */
  49. /*---------------------------------------------------------------------------*/
  50. /*---------------------------------------------------------------------------*/
  51. /* Stucture declarations */
  52. /*---------------------------------------------------------------------------*/
  53. /*---------------------------------------------------------------------------*/
  54. /* Type declarations */
  55. /*---------------------------------------------------------------------------*/
  56. /*---------------------------------------------------------------------------*/
  57. /* Variable declarations */
  58. /*---------------------------------------------------------------------------*/
  59. #ifndef lint
  60. static char rcsid[] DD_UNUSED = "$Id: cuddMatMult.c,v 1.18 2012/02/05 01:07:19 fabio Exp $";
  61. #endif
  62. /*---------------------------------------------------------------------------*/
  63. /* Macro declarations */
  64. /*---------------------------------------------------------------------------*/
  65. /**AutomaticStart*************************************************************/
  66. /*---------------------------------------------------------------------------*/
  67. /* Static function prototypes */
  68. /*---------------------------------------------------------------------------*/
  69. static DdNode * addMMRecur (DdManager *dd, DdNode *A, DdNode *B, int topP, int *vars);
  70. static DdNode * addTriangleRecur (DdManager *dd, DdNode *f, DdNode *g, int *vars, DdNode *cube);
  71. static DdNode * cuddAddOuterSumRecur (DdManager *dd, DdNode *M, DdNode *r, DdNode *c);
  72. /**AutomaticEnd***************************************************************/
  73. /*---------------------------------------------------------------------------*/
  74. /* Definition of exported functions */
  75. /*---------------------------------------------------------------------------*/
  76. /**Function********************************************************************
  77. Synopsis [Calculates the product of two matrices represented as
  78. ADDs.]
  79. Description [Calculates the product of two matrices, A and B,
  80. represented as ADDs. This procedure implements the quasiring multiplication
  81. algorithm. A is assumed to depend on variables x (rows) and z
  82. (columns). B is assumed to depend on variables z (rows) and y
  83. (columns). The product of A and B then depends on x (rows) and y
  84. (columns). Only the z variables have to be explicitly identified;
  85. they are the "summation" variables. Returns a pointer to the
  86. result if successful; NULL otherwise.]
  87. SideEffects [None]
  88. SeeAlso [Cudd_addTimesPlus Cudd_addTriangle Cudd_bddAndAbstract]
  89. ******************************************************************************/
  90. DdNode *
  91. Cudd_addMatrixMultiply(
  92. DdManager * dd,
  93. DdNode * A,
  94. DdNode * B,
  95. DdNode ** z,
  96. int nz)
  97. {
  98. int i, nvars, *vars;
  99. DdNode *res;
  100. /* Array vars says what variables are "summation" variables. */
  101. nvars = dd->size;
  102. vars = ALLOC(int,nvars);
  103. if (vars == NULL) {
  104. dd->errorCode = CUDD_MEMORY_OUT;
  105. return(NULL);
  106. }
  107. for (i = 0; i < nvars; i++) {
  108. vars[i] = 0;
  109. }
  110. for (i = 0; i < nz; i++) {
  111. vars[z[i]->index] = 1;
  112. }
  113. do {
  114. dd->reordered = 0;
  115. res = addMMRecur(dd,A,B,-1,vars);
  116. } while (dd->reordered == 1);
  117. FREE(vars);
  118. return(res);
  119. } /* end of Cudd_addMatrixMultiply */
  120. /**Function********************************************************************
  121. Synopsis [Calculates the product of two matrices represented as
  122. ADDs.]
  123. Description [Calculates the product of two matrices, A and B,
  124. represented as ADDs, using the CMU matrix by matrix multiplication
  125. procedure by Clarke et al.. Matrix A has x's as row variables and z's
  126. as column variables, while matrix B has z's as row variables and y's
  127. as column variables. Returns the pointer to the result if successful;
  128. NULL otherwise. The resulting matrix has x's as row variables and y's
  129. as column variables.]
  130. SideEffects [None]
  131. SeeAlso [Cudd_addMatrixMultiply]
  132. ******************************************************************************/
  133. DdNode *
  134. Cudd_addTimesPlus(
  135. DdManager * dd,
  136. DdNode * A,
  137. DdNode * B,
  138. DdNode ** z,
  139. int nz)
  140. {
  141. DdNode *w, *cube, *tmp, *res;
  142. int i;
  143. tmp = Cudd_addApply(dd,Cudd_addTimes,A,B);
  144. if (tmp == NULL) return(NULL);
  145. Cudd_Ref(tmp);
  146. Cudd_Ref(cube = DD_ONE(dd));
  147. for (i = nz-1; i >= 0; i--) {
  148. w = Cudd_addIte(dd,z[i],cube,DD_ZERO(dd));
  149. if (w == NULL) {
  150. Cudd_RecursiveDeref(dd,tmp);
  151. return(NULL);
  152. }
  153. Cudd_Ref(w);
  154. Cudd_RecursiveDeref(dd,cube);
  155. cube = w;
  156. }
  157. res = Cudd_addExistAbstract(dd,tmp,cube);
  158. if (res == NULL) {
  159. Cudd_RecursiveDeref(dd,tmp);
  160. Cudd_RecursiveDeref(dd,cube);
  161. return(NULL);
  162. }
  163. Cudd_Ref(res);
  164. Cudd_RecursiveDeref(dd,cube);
  165. Cudd_RecursiveDeref(dd,tmp);
  166. Cudd_Deref(res);
  167. return(res);
  168. } /* end of Cudd_addTimesPlus */
  169. /**Function********************************************************************
  170. Synopsis [Performs the triangulation step for the shortest path
  171. computation.]
  172. Description [Implements the semiring multiplication algorithm used in
  173. the triangulation step for the shortest path computation. f
  174. is assumed to depend on variables x (rows) and z (columns). g is
  175. assumed to depend on variables z (rows) and y (columns). The product
  176. of f and g then depends on x (rows) and y (columns). Only the z
  177. variables have to be explicitly identified; they are the
  178. "abstraction" variables. Returns a pointer to the result if
  179. successful; NULL otherwise. ]
  180. SideEffects [None]
  181. SeeAlso [Cudd_addMatrixMultiply Cudd_bddAndAbstract]
  182. ******************************************************************************/
  183. DdNode *
  184. Cudd_addTriangle(
  185. DdManager * dd,
  186. DdNode * f,
  187. DdNode * g,
  188. DdNode ** z,
  189. int nz)
  190. {
  191. int i, nvars, *vars;
  192. DdNode *res, *cube;
  193. nvars = dd->size;
  194. vars = ALLOC(int, nvars);
  195. if (vars == NULL) {
  196. dd->errorCode = CUDD_MEMORY_OUT;
  197. return(NULL);
  198. }
  199. for (i = 0; i < nvars; i++) vars[i] = -1;
  200. for (i = 0; i < nz; i++) vars[z[i]->index] = i;
  201. cube = Cudd_addComputeCube(dd, z, NULL, nz);
  202. if (cube == NULL) {
  203. FREE(vars);
  204. return(NULL);
  205. }
  206. cuddRef(cube);
  207. do {
  208. dd->reordered = 0;
  209. res = addTriangleRecur(dd, f, g, vars, cube);
  210. } while (dd->reordered == 1);
  211. if (res != NULL) cuddRef(res);
  212. Cudd_RecursiveDeref(dd,cube);
  213. if (res != NULL) cuddDeref(res);
  214. FREE(vars);
  215. return(res);
  216. } /* end of Cudd_addTriangle */
  217. /**Function********************************************************************
  218. Synopsis [Takes the minimum of a matrix and the outer sum of two vectors.]
  219. Description [Takes the pointwise minimum of a matrix and the outer
  220. sum of two vectors. This procedure is used in the Floyd-Warshall
  221. all-pair shortest path algorithm. Returns a pointer to the result if
  222. successful; NULL otherwise.]
  223. SideEffects [None]
  224. SeeAlso []
  225. ******************************************************************************/
  226. DdNode *
  227. Cudd_addOuterSum(
  228. DdManager *dd,
  229. DdNode *M,
  230. DdNode *r,
  231. DdNode *c)
  232. {
  233. DdNode *res;
  234. do {
  235. dd->reordered = 0;
  236. res = cuddAddOuterSumRecur(dd, M, r, c);
  237. } while (dd->reordered == 1);
  238. return(res);
  239. } /* end of Cudd_addOuterSum */
  240. /*---------------------------------------------------------------------------*/
  241. /* Definition of internal functions */
  242. /*---------------------------------------------------------------------------*/
  243. /*---------------------------------------------------------------------------*/
  244. /* Definition of static functions */
  245. /*---------------------------------------------------------------------------*/
  246. /**Function********************************************************************
  247. Synopsis [Performs the recursive step of Cudd_addMatrixMultiply.]
  248. Description [Performs the recursive step of Cudd_addMatrixMultiply.
  249. Returns a pointer to the result if successful; NULL otherwise.]
  250. SideEffects [None]
  251. ******************************************************************************/
  252. static DdNode *
  253. addMMRecur(
  254. DdManager * dd,
  255. DdNode * A,
  256. DdNode * B,
  257. int topP,
  258. int * vars)
  259. {
  260. DdNode *zero,
  261. *At, /* positive cofactor of first operand */
  262. *Ae, /* negative cofactor of first operand */
  263. *Bt, /* positive cofactor of second operand */
  264. *Be, /* negative cofactor of second operand */
  265. *t, /* positive cofactor of result */
  266. *e, /* negative cofactor of result */
  267. *scaled, /* scaled result */
  268. *add_scale, /* ADD representing the scaling factor */
  269. *res;
  270. int i; /* loop index */
  271. double scale; /* scaling factor */
  272. int index; /* index of the top variable */
  273. CUDD_VALUE_TYPE value;
  274. unsigned int topA, topB, topV;
  275. DD_CTFP cacheOp;
  276. statLine(dd);
  277. zero = DD_ZERO(dd);
  278. if (A == zero || B == zero) {
  279. return(zero);
  280. }
  281. if (cuddIsConstant(A) && cuddIsConstant(B)) {
  282. /* Compute the scaling factor. It is 2^k, where k is the
  283. ** number of summation variables below the current variable.
  284. ** Indeed, these constants represent blocks of 2^k identical
  285. ** constant values in both A and B.
  286. */
  287. value = cuddV(A) * cuddV(B);
  288. for (i = 0; i < dd->size; i++) {
  289. if (vars[i]) {
  290. if (dd->perm[i] > topP) {
  291. value *= (CUDD_VALUE_TYPE) 2;
  292. }
  293. }
  294. }
  295. res = cuddUniqueConst(dd, value);
  296. return(res);
  297. }
  298. /* Standardize to increase cache efficiency. Clearly, A*B != B*A
  299. ** in matrix multiplication. However, which matrix is which is
  300. ** determined by the variables appearing in the ADDs and not by
  301. ** which one is passed as first argument.
  302. */
  303. if (A > B) {
  304. DdNode *tmp = A;
  305. A = B;
  306. B = tmp;
  307. }
  308. topA = cuddI(dd,A->index); topB = cuddI(dd,B->index);
  309. topV = ddMin(topA,topB);
  310. cacheOp = (DD_CTFP) addMMRecur;
  311. res = cuddCacheLookup2(dd,cacheOp,A,B);
  312. if (res != NULL) {
  313. /* If the result is 0, there is no need to normalize.
  314. ** Otherwise we count the number of z variables between
  315. ** the current depth and the top of the ADDs. These are
  316. ** the missing variables that determine the size of the
  317. ** constant blocks.
  318. */
  319. if (res == zero) return(res);
  320. scale = 1.0;
  321. for (i = 0; i < dd->size; i++) {
  322. if (vars[i]) {
  323. if (dd->perm[i] > topP && (unsigned) dd->perm[i] < topV) {
  324. scale *= 2;
  325. }
  326. }
  327. }
  328. if (scale > 1.0) {
  329. cuddRef(res);
  330. add_scale = cuddUniqueConst(dd,(CUDD_VALUE_TYPE)scale);
  331. if (add_scale == NULL) {
  332. Cudd_RecursiveDeref(dd, res);
  333. return(NULL);
  334. }
  335. cuddRef(add_scale);
  336. scaled = cuddAddApplyRecur(dd,Cudd_addTimes,res,add_scale);
  337. if (scaled == NULL) {
  338. Cudd_RecursiveDeref(dd, add_scale);
  339. Cudd_RecursiveDeref(dd, res);
  340. return(NULL);
  341. }
  342. cuddRef(scaled);
  343. Cudd_RecursiveDeref(dd, add_scale);
  344. Cudd_RecursiveDeref(dd, res);
  345. res = scaled;
  346. cuddDeref(res);
  347. }
  348. return(res);
  349. }
  350. /* compute the cofactors */
  351. if (topV == topA) {
  352. At = cuddT(A);
  353. Ae = cuddE(A);
  354. } else {
  355. At = Ae = A;
  356. }
  357. if (topV == topB) {
  358. Bt = cuddT(B);
  359. Be = cuddE(B);
  360. } else {
  361. Bt = Be = B;
  362. }
  363. t = addMMRecur(dd, At, Bt, (int)topV, vars);
  364. if (t == NULL) return(NULL);
  365. cuddRef(t);
  366. e = addMMRecur(dd, Ae, Be, (int)topV, vars);
  367. if (e == NULL) {
  368. Cudd_RecursiveDeref(dd, t);
  369. return(NULL);
  370. }
  371. cuddRef(e);
  372. index = dd->invperm[topV];
  373. if (vars[index] == 0) {
  374. /* We have split on either the rows of A or the columns
  375. ** of B. We just need to connect the two subresults,
  376. ** which correspond to two submatrices of the result.
  377. */
  378. res = (t == e) ? t : cuddUniqueInter(dd,index,t,e);
  379. if (res == NULL) {
  380. Cudd_RecursiveDeref(dd, t);
  381. Cudd_RecursiveDeref(dd, e);
  382. return(NULL);
  383. }
  384. cuddRef(res);
  385. cuddDeref(t);
  386. cuddDeref(e);
  387. } else {
  388. /* we have simultaneously split on the columns of A and
  389. ** the rows of B. The two subresults must be added.
  390. */
  391. res = cuddAddApplyRecur(dd,Cudd_addPlus,t,e);
  392. if (res == NULL) {
  393. Cudd_RecursiveDeref(dd, t);
  394. Cudd_RecursiveDeref(dd, e);
  395. return(NULL);
  396. }
  397. cuddRef(res);
  398. Cudd_RecursiveDeref(dd, t);
  399. Cudd_RecursiveDeref(dd, e);
  400. }
  401. cuddCacheInsert2(dd,cacheOp,A,B,res);
  402. /* We have computed (and stored in the computed table) a minimal
  403. ** result; that is, a result that assumes no summation variables
  404. ** between the current depth of the recursion and its top
  405. ** variable. We now take into account the z variables by properly
  406. ** scaling the result.
  407. */
  408. if (res != zero) {
  409. scale = 1.0;
  410. for (i = 0; i < dd->size; i++) {
  411. if (vars[i]) {
  412. if (dd->perm[i] > topP && (unsigned) dd->perm[i] < topV) {
  413. scale *= 2;
  414. }
  415. }
  416. }
  417. if (scale > 1.0) {
  418. add_scale = cuddUniqueConst(dd,(CUDD_VALUE_TYPE)scale);
  419. if (add_scale == NULL) {
  420. Cudd_RecursiveDeref(dd, res);
  421. return(NULL);
  422. }
  423. cuddRef(add_scale);
  424. scaled = cuddAddApplyRecur(dd,Cudd_addTimes,res,add_scale);
  425. if (scaled == NULL) {
  426. Cudd_RecursiveDeref(dd, res);
  427. Cudd_RecursiveDeref(dd, add_scale);
  428. return(NULL);
  429. }
  430. cuddRef(scaled);
  431. Cudd_RecursiveDeref(dd, add_scale);
  432. Cudd_RecursiveDeref(dd, res);
  433. res = scaled;
  434. }
  435. }
  436. cuddDeref(res);
  437. return(res);
  438. } /* end of addMMRecur */
  439. /**Function********************************************************************
  440. Synopsis [Performs the recursive step of Cudd_addTriangle.]
  441. Description [Performs the recursive step of Cudd_addTriangle. Returns
  442. a pointer to the result if successful; NULL otherwise.]
  443. SideEffects [None]
  444. ******************************************************************************/
  445. static DdNode *
  446. addTriangleRecur(
  447. DdManager * dd,
  448. DdNode * f,
  449. DdNode * g,
  450. int * vars,
  451. DdNode *cube)
  452. {
  453. DdNode *fv, *fvn, *gv, *gvn, *t, *e, *res;
  454. CUDD_VALUE_TYPE value;
  455. int top, topf, topg, index;
  456. statLine(dd);
  457. if (f == DD_PLUS_INFINITY(dd) || g == DD_PLUS_INFINITY(dd)) {
  458. return(DD_PLUS_INFINITY(dd));
  459. }
  460. if (cuddIsConstant(f) && cuddIsConstant(g)) {
  461. value = cuddV(f) + cuddV(g);
  462. res = cuddUniqueConst(dd, value);
  463. return(res);
  464. }
  465. if (f < g) {
  466. DdNode *tmp = f;
  467. f = g;
  468. g = tmp;
  469. }
  470. if (f->ref != 1 || g->ref != 1) {
  471. res = cuddCacheLookup(dd, DD_ADD_TRIANGLE_TAG, f, g, cube);
  472. if (res != NULL) {
  473. return(res);
  474. }
  475. }
  476. topf = cuddI(dd,f->index); topg = cuddI(dd,g->index);
  477. top = ddMin(topf,topg);
  478. if (top == topf) {fv = cuddT(f); fvn = cuddE(f);} else {fv = fvn = f;}
  479. if (top == topg) {gv = cuddT(g); gvn = cuddE(g);} else {gv = gvn = g;}
  480. t = addTriangleRecur(dd, fv, gv, vars, cube);
  481. if (t == NULL) return(NULL);
  482. cuddRef(t);
  483. e = addTriangleRecur(dd, fvn, gvn, vars, cube);
  484. if (e == NULL) {
  485. Cudd_RecursiveDeref(dd, t);
  486. return(NULL);
  487. }
  488. cuddRef(e);
  489. index = dd->invperm[top];
  490. if (vars[index] < 0) {
  491. res = (t == e) ? t : cuddUniqueInter(dd,index,t,e);
  492. if (res == NULL) {
  493. Cudd_RecursiveDeref(dd, t);
  494. Cudd_RecursiveDeref(dd, e);
  495. return(NULL);
  496. }
  497. cuddDeref(t);
  498. cuddDeref(e);
  499. } else {
  500. res = cuddAddApplyRecur(dd,Cudd_addMinimum,t,e);
  501. if (res == NULL) {
  502. Cudd_RecursiveDeref(dd, t);
  503. Cudd_RecursiveDeref(dd, e);
  504. return(NULL);
  505. }
  506. cuddRef(res);
  507. Cudd_RecursiveDeref(dd, t);
  508. Cudd_RecursiveDeref(dd, e);
  509. cuddDeref(res);
  510. }
  511. if (f->ref != 1 || g->ref != 1) {
  512. cuddCacheInsert(dd, DD_ADD_TRIANGLE_TAG, f, g, cube, res);
  513. }
  514. return(res);
  515. } /* end of addTriangleRecur */
  516. /**Function********************************************************************
  517. Synopsis [Performs the recursive step of Cudd_addOuterSum.]
  518. Description [Performs the recursive step of Cudd_addOuterSum.
  519. Returns a pointer to the result if successful; NULL otherwise.]
  520. SideEffects [None]
  521. SeeAlso []
  522. ******************************************************************************/
  523. static DdNode *
  524. cuddAddOuterSumRecur(
  525. DdManager *dd,
  526. DdNode *M,
  527. DdNode *r,
  528. DdNode *c)
  529. {
  530. DdNode *P, *R, *Mt, *Me, *rt, *re, *ct, *ce, *Rt, *Re;
  531. int topM, topc, topr;
  532. int v, index;
  533. statLine(dd);
  534. /* Check special cases. */
  535. if (r == DD_PLUS_INFINITY(dd) || c == DD_PLUS_INFINITY(dd)) return(M);
  536. if (cuddIsConstant(c) && cuddIsConstant(r)) {
  537. R = cuddUniqueConst(dd,Cudd_V(c)+Cudd_V(r));
  538. cuddRef(R);
  539. if (cuddIsConstant(M)) {
  540. if (cuddV(R) <= cuddV(M)) {
  541. cuddDeref(R);
  542. return(R);
  543. } else {
  544. Cudd_RecursiveDeref(dd,R);
  545. return(M);
  546. }
  547. } else {
  548. P = Cudd_addApply(dd,Cudd_addMinimum,R,M);
  549. cuddRef(P);
  550. Cudd_RecursiveDeref(dd,R);
  551. cuddDeref(P);
  552. return(P);
  553. }
  554. }
  555. /* Check the cache. */
  556. R = cuddCacheLookup(dd,DD_ADD_OUT_SUM_TAG,M,r,c);
  557. if (R != NULL) return(R);
  558. topM = cuddI(dd,M->index); topr = cuddI(dd,r->index);
  559. topc = cuddI(dd,c->index);
  560. v = ddMin(topM,ddMin(topr,topc));
  561. /* Compute cofactors. */
  562. if (topM == v) { Mt = cuddT(M); Me = cuddE(M); } else { Mt = Me = M; }
  563. if (topr == v) { rt = cuddT(r); re = cuddE(r); } else { rt = re = r; }
  564. if (topc == v) { ct = cuddT(c); ce = cuddE(c); } else { ct = ce = c; }
  565. /* Recursively solve. */
  566. Rt = cuddAddOuterSumRecur(dd,Mt,rt,ct);
  567. if (Rt == NULL) return(NULL);
  568. cuddRef(Rt);
  569. Re = cuddAddOuterSumRecur(dd,Me,re,ce);
  570. if (Re == NULL) {
  571. Cudd_RecursiveDeref(dd, Rt);
  572. return(NULL);
  573. }
  574. cuddRef(Re);
  575. index = dd->invperm[v];
  576. R = (Rt == Re) ? Rt : cuddUniqueInter(dd,index,Rt,Re);
  577. if (R == NULL) {
  578. Cudd_RecursiveDeref(dd, Rt);
  579. Cudd_RecursiveDeref(dd, Re);
  580. return(NULL);
  581. }
  582. cuddDeref(Rt);
  583. cuddDeref(Re);
  584. /* Store the result in the cache. */
  585. cuddCacheInsert(dd,DD_ADD_OUT_SUM_TAG,M,r,c,R);
  586. return(R);
  587. } /* end of cuddAddOuterSumRecur */