The source code and dockerfile for the GSW2024 AI Lab.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
This repo is archived. You can view files and clone it, but cannot push or open issues/pull-requests.

2326 lines
56 KiB

2 months ago
  1. /**
  2. @file
  3. @ingroup cudd
  4. @brief Generalized cofactors for BDDs and ADDs.
  5. @author Fabio Somenzi
  6. @copyright@parblock
  7. Copyright (c) 1995-2015, Regents of the University of Colorado
  8. All rights reserved.
  9. Redistribution and use in source and binary forms, with or without
  10. modification, are permitted provided that the following conditions
  11. are met:
  12. Redistributions of source code must retain the above copyright
  13. notice, this list of conditions and the following disclaimer.
  14. Redistributions in binary form must reproduce the above copyright
  15. notice, this list of conditions and the following disclaimer in the
  16. documentation and/or other materials provided with the distribution.
  17. Neither the name of the University of Colorado nor the names of its
  18. contributors may be used to endorse or promote products derived from
  19. this software without specific prior written permission.
  20. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  21. "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  22. LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  23. FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  24. COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  25. INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
  26. BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  27. LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  28. CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  29. LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
  30. ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  31. POSSIBILITY OF SUCH DAMAGE.
  32. @endparblock
  33. */
  34. #include "util.h"
  35. #include "cuddInt.h"
  36. /*---------------------------------------------------------------------------*/
  37. /* Constant declarations */
  38. /*---------------------------------------------------------------------------*/
  39. /* Codes for edge markings in Cudd_bddLICompaction. The codes are defined
  40. ** so that they can be bitwise ORed to implement the code priority scheme.
  41. */
  42. #define DD_LIC_DC 0
  43. #define DD_LIC_1 1
  44. #define DD_LIC_0 2
  45. #define DD_LIC_NL 3
  46. /*---------------------------------------------------------------------------*/
  47. /* Stucture declarations */
  48. /*---------------------------------------------------------------------------*/
  49. /*---------------------------------------------------------------------------*/
  50. /* Type declarations */
  51. /*---------------------------------------------------------------------------*/
  52. /** Key for the cache used in the edge marking phase. */
  53. typedef struct MarkCacheKey {
  54. DdNode *f;
  55. DdNode *c;
  56. } MarkCacheKey;
  57. /*---------------------------------------------------------------------------*/
  58. /* Variable declarations */
  59. /*---------------------------------------------------------------------------*/
  60. /*---------------------------------------------------------------------------*/
  61. /* Macro declarations */
  62. /*---------------------------------------------------------------------------*/
  63. /** \cond */
  64. /*---------------------------------------------------------------------------*/
  65. /* Static function prototypes */
  66. /*---------------------------------------------------------------------------*/
  67. static int cuddBddConstrainDecomp (DdManager *dd, DdNode *f, DdNode **decomp);
  68. static DdNode * cuddBddCharToVect (DdManager *dd, DdNode *f, DdNode *x);
  69. static int cuddBddLICMarkEdges (DdManager *dd, DdNode *f, DdNode *c, st_table *table, st_table *cache);
  70. static DdNode * cuddBddLICBuildResult (DdManager *dd, DdNode *f, st_table *cache, st_table *table);
  71. static int MarkCacheHash (void const *ptr, int modulus);
  72. static int MarkCacheCompare (const void *ptr1, const void *ptr2);
  73. static enum st_retval MarkCacheCleanUp (void *key, void *value, void *arg);
  74. static DdNode * cuddBddSqueeze (DdManager *dd, DdNode *l, DdNode *u);
  75. static DdNode * cuddBddInterpolate (DdManager * dd, DdNode * l, DdNode * u);
  76. /** \endcond */
  77. /*---------------------------------------------------------------------------*/
  78. /* Definition of exported functions */
  79. /*---------------------------------------------------------------------------*/
  80. /**
  81. @brief Computes f constrain c.
  82. @details Computes f constrain c (f @ c).
  83. Uses a canonical form: (f' @ c) = (f @ c)'. (Note: this is not true
  84. for c.) List of special cases:
  85. <ul>
  86. <li> f @ 0 = 0
  87. <li> f @ 1 = f
  88. <li> 0 @ c = 0
  89. <li> 1 @ c = 1
  90. <li> f @ f = 1
  91. <li> f @ f'= 0
  92. </ul>
  93. Note that if F=(f1,...,fn) and reordering takes place while computing F @ c,
  94. then the image restriction property (Img(F,c) = Img(F @ c)) is lost.
  95. @return a pointer to the result if successful; NULL otherwise.
  96. @sideeffect None
  97. @see Cudd_bddRestrict Cudd_addConstrain
  98. */
  99. DdNode *
  100. Cudd_bddConstrain(
  101. DdManager * dd,
  102. DdNode * f,
  103. DdNode * c)
  104. {
  105. DdNode *res;
  106. do {
  107. dd->reordered = 0;
  108. res = cuddBddConstrainRecur(dd,f,c);
  109. } while (dd->reordered == 1);
  110. if (dd->errorCode == CUDD_TIMEOUT_EXPIRED && dd->timeoutHandler) {
  111. dd->timeoutHandler(dd, dd->tohArg);
  112. }
  113. return(res);
  114. } /* end of Cudd_bddConstrain */
  115. /**
  116. @brief %BDD restrict according to Coudert and Madre's algorithm
  117. (ICCAD90).
  118. @details If application of restrict results in a %BDD larger than the
  119. input %BDD, the input %BDD is returned.
  120. @return the restricted %BDD if successful; otherwise NULL.
  121. @sideeffect None
  122. @see Cudd_bddConstrain Cudd_addRestrict
  123. */
  124. DdNode *
  125. Cudd_bddRestrict(
  126. DdManager * dd,
  127. DdNode * f,
  128. DdNode * c)
  129. {
  130. DdNode *suppF, *suppC, *commonSupport;
  131. DdNode *cplus, *res;
  132. int retval;
  133. int sizeF, sizeRes;
  134. /* Check terminal cases here to avoid computing supports in trivial cases.
  135. ** This also allows us notto check later for the case c == 0, in which
  136. ** there is no common support. */
  137. if (c == Cudd_Not(DD_ONE(dd))) return(Cudd_Not(DD_ONE(dd)));
  138. if (Cudd_IsConstantInt(f)) return(f);
  139. if (f == c) return(DD_ONE(dd));
  140. if (f == Cudd_Not(c)) return(Cudd_Not(DD_ONE(dd)));
  141. /* Check if supports intersect. */
  142. retval = Cudd_ClassifySupport(dd,f,c,&commonSupport,&suppF,&suppC);
  143. if (retval == 0) {
  144. return(NULL);
  145. }
  146. cuddRef(commonSupport); cuddRef(suppF); cuddRef(suppC);
  147. Cudd_IterDerefBdd(dd,suppF);
  148. if (commonSupport == DD_ONE(dd)) {
  149. Cudd_IterDerefBdd(dd,commonSupport);
  150. Cudd_IterDerefBdd(dd,suppC);
  151. return(f);
  152. }
  153. Cudd_IterDerefBdd(dd,commonSupport);
  154. /* Abstract from c the variables that do not appear in f. */
  155. cplus = Cudd_bddExistAbstract(dd, c, suppC);
  156. if (cplus == NULL) {
  157. Cudd_IterDerefBdd(dd,suppC);
  158. return(NULL);
  159. }
  160. cuddRef(cplus);
  161. Cudd_IterDerefBdd(dd,suppC);
  162. do {
  163. dd->reordered = 0;
  164. res = cuddBddRestrictRecur(dd, f, cplus);
  165. } while (dd->reordered == 1);
  166. if (res == NULL) {
  167. Cudd_IterDerefBdd(dd,cplus);
  168. if (dd->errorCode == CUDD_TIMEOUT_EXPIRED && dd->timeoutHandler) {
  169. dd->timeoutHandler(dd, dd->tohArg);
  170. }
  171. return(NULL);
  172. }
  173. cuddRef(res);
  174. Cudd_IterDerefBdd(dd,cplus);
  175. /* Make restric safe by returning the smaller of the input and the
  176. ** result. */
  177. sizeF = Cudd_DagSize(f);
  178. sizeRes = Cudd_DagSize(res);
  179. if (sizeF <= sizeRes) {
  180. Cudd_IterDerefBdd(dd, res);
  181. return(f);
  182. } else {
  183. cuddDeref(res);
  184. return(res);
  185. }
  186. } /* end of Cudd_bddRestrict */
  187. /**
  188. @brief Computes f non-polluting-and g.
  189. @details The non-polluting AND of f and g is a hybrid of AND and
  190. Restrict. From Restrict, this operation takes the idea of
  191. existentially quantifying the top variable of the second operand if
  192. it does not appear in the first. Therefore, the variables that
  193. appear in the result also appear in f. For the rest, the function
  194. behaves like AND. Since the two operands play different roles,
  195. non-polluting AND is not commutative.
  196. @return a pointer to the result if successful; NULL otherwise.
  197. @sideeffect None
  198. @see Cudd_bddConstrain Cudd_bddRestrict
  199. */
  200. DdNode *
  201. Cudd_bddNPAnd(
  202. DdManager * dd,
  203. DdNode * f,
  204. DdNode * g)
  205. {
  206. DdNode *res;
  207. do {
  208. dd->reordered = 0;
  209. res = cuddBddNPAndRecur(dd,f,g);
  210. } while (dd->reordered == 1);
  211. if (dd->errorCode == CUDD_TIMEOUT_EXPIRED && dd->timeoutHandler) {
  212. dd->timeoutHandler(dd, dd->tohArg);
  213. }
  214. return(res);
  215. } /* end of Cudd_bddNPAnd */
  216. /**
  217. @brief Computes f constrain c for ADDs.
  218. @details Computes f constrain c (f @ c), for f an %ADD and c a 0-1
  219. %ADD. List of special cases:
  220. <ul>
  221. <li> F @ 0 = 0
  222. <li> F @ 1 = F
  223. <li> 0 @ c = 0
  224. <li> 1 @ c = 1
  225. <li> F @ F = 1
  226. </ul>
  227. @return a pointer to the result if successful; NULL otherwise.
  228. @sideeffect None
  229. @see Cudd_bddConstrain
  230. */
  231. DdNode *
  232. Cudd_addConstrain(
  233. DdManager * dd,
  234. DdNode * f,
  235. DdNode * c)
  236. {
  237. DdNode *res;
  238. do {
  239. dd->reordered = 0;
  240. res = cuddAddConstrainRecur(dd,f,c);
  241. } while (dd->reordered == 1);
  242. if (dd->errorCode == CUDD_TIMEOUT_EXPIRED && dd->timeoutHandler) {
  243. dd->timeoutHandler(dd, dd->tohArg);
  244. }
  245. return(res);
  246. } /* end of Cudd_addConstrain */
  247. /**
  248. @brief %BDD conjunctive decomposition as in McMillan's CAV96 paper.
  249. @details The decomposition is canonical only for a given variable
  250. order. If canonicity is required, variable ordering must be disabled
  251. after the decomposition has been computed. The components of the
  252. solution have their reference counts already incremented (unlike the
  253. results of most other functions in the package).
  254. @return an array with one entry for each %BDD variable in the manager
  255. if successful; otherwise NULL.
  256. @sideeffect None
  257. @see Cudd_bddConstrain Cudd_bddExistAbstract
  258. */
  259. DdNode **
  260. Cudd_bddConstrainDecomp(
  261. DdManager * dd,
  262. DdNode * f)
  263. {
  264. DdNode **decomp;
  265. int res;
  266. int i;
  267. /* Create an initialize decomposition array. */
  268. decomp = ALLOC(DdNode *,dd->size);
  269. if (decomp == NULL) {
  270. dd->errorCode = CUDD_MEMORY_OUT;
  271. return(NULL);
  272. }
  273. for (i = 0; i < dd->size; i++) {
  274. decomp[i] = NULL;
  275. }
  276. do {
  277. dd->reordered = 0;
  278. /* Clean up the decomposition array in case reordering took place. */
  279. for (i = 0; i < dd->size; i++) {
  280. if (decomp[i] != NULL) {
  281. Cudd_IterDerefBdd(dd, decomp[i]);
  282. decomp[i] = NULL;
  283. }
  284. }
  285. res = cuddBddConstrainDecomp(dd,f,decomp);
  286. } while (dd->reordered == 1);
  287. if (res == 0) {
  288. FREE(decomp);
  289. if (dd->errorCode == CUDD_TIMEOUT_EXPIRED && dd->timeoutHandler) {
  290. dd->timeoutHandler(dd, dd->tohArg);
  291. }
  292. return(NULL);
  293. }
  294. /* Missing components are constant ones. */
  295. for (i = 0; i < dd->size; i++) {
  296. if (decomp[i] == NULL) {
  297. decomp[i] = DD_ONE(dd);
  298. cuddRef(decomp[i]);
  299. }
  300. }
  301. return(decomp);
  302. } /* end of Cudd_bddConstrainDecomp */
  303. /**
  304. @brief %ADD restrict according to Coudert and Madre's algorithm
  305. (ICCAD90).
  306. @details If application of restrict results in an %ADD larger than
  307. the input %ADD, the input %ADD is returned.
  308. @return the restricted %ADD if successful; otherwise NULL.
  309. @sideeffect None
  310. @see Cudd_addConstrain Cudd_bddRestrict
  311. */
  312. DdNode *
  313. Cudd_addRestrict(
  314. DdManager * dd,
  315. DdNode * f,
  316. DdNode * c)
  317. {
  318. DdNode *supp_f, *supp_c;
  319. DdNode *res, *commonSupport;
  320. int intersection;
  321. int sizeF, sizeRes;
  322. /* Check if supports intersect. */
  323. supp_f = Cudd_Support(dd, f);
  324. if (supp_f == NULL) {
  325. return(NULL);
  326. }
  327. cuddRef(supp_f);
  328. supp_c = Cudd_Support(dd, c);
  329. if (supp_c == NULL) {
  330. Cudd_RecursiveDeref(dd,supp_f);
  331. return(NULL);
  332. }
  333. cuddRef(supp_c);
  334. commonSupport = Cudd_bddLiteralSetIntersection(dd, supp_f, supp_c);
  335. if (commonSupport == NULL) {
  336. Cudd_RecursiveDeref(dd,supp_f);
  337. Cudd_RecursiveDeref(dd,supp_c);
  338. return(NULL);
  339. }
  340. cuddRef(commonSupport);
  341. Cudd_RecursiveDeref(dd,supp_f);
  342. Cudd_RecursiveDeref(dd,supp_c);
  343. intersection = commonSupport != DD_ONE(dd);
  344. Cudd_RecursiveDeref(dd,commonSupport);
  345. if (intersection) {
  346. do {
  347. dd->reordered = 0;
  348. res = cuddAddRestrictRecur(dd, f, c);
  349. } while (dd->reordered == 1);
  350. if (res == 0) {
  351. if (dd->errorCode == CUDD_TIMEOUT_EXPIRED && dd->timeoutHandler) {
  352. dd->timeoutHandler(dd, dd->tohArg);
  353. }
  354. return(f);
  355. }
  356. sizeF = Cudd_DagSize(f);
  357. sizeRes = Cudd_DagSize(res);
  358. if (sizeF <= sizeRes) {
  359. cuddRef(res);
  360. Cudd_RecursiveDeref(dd, res);
  361. return(f);
  362. } else {
  363. return(res);
  364. }
  365. } else {
  366. return(f);
  367. }
  368. } /* end of Cudd_addRestrict */
  369. /**
  370. @brief Computes a vector of BDDs whose image equals a non-zero function.
  371. @details
  372. The result depends on the variable order. The i-th component of the vector
  373. depends only on the first i variables in the order. Each %BDD in the vector
  374. is not larger than the %BDD of the given characteristic function. This
  375. function is based on the description of char-to-vect in "Verification of
  376. Sequential Machines Using Boolean Functional Vectors" by O. Coudert, C.
  377. Berthet and J. C. Madre.
  378. @return a pointer to an array containing the result if successful;
  379. NULL otherwise. The size of the array equals the number of
  380. variables in the manager. The components of the solution have their
  381. reference counts already incremented (unlike the results of most
  382. other functions in the package).
  383. @sideeffect None
  384. @see Cudd_bddConstrain
  385. */
  386. DdNode **
  387. Cudd_bddCharToVect(
  388. DdManager * dd,
  389. DdNode * f)
  390. {
  391. int i, j;
  392. DdNode **vect;
  393. DdNode *res = NULL;
  394. if (f == Cudd_Not(DD_ONE(dd))) return(NULL);
  395. vect = ALLOC(DdNode *, dd->size);
  396. if (vect == NULL) {
  397. dd->errorCode = CUDD_MEMORY_OUT;
  398. return(NULL);
  399. }
  400. do {
  401. dd->reordered = 0;
  402. for (i = 0; i < dd->size; i++) {
  403. res = cuddBddCharToVect(dd,f,dd->vars[dd->invperm[i]]);
  404. if (res == NULL) {
  405. /* Clean up the vector array in case reordering took place. */
  406. for (j = 0; j < i; j++) {
  407. Cudd_IterDerefBdd(dd, vect[dd->invperm[j]]);
  408. }
  409. break;
  410. }
  411. cuddRef(res);
  412. vect[dd->invperm[i]] = res;
  413. }
  414. } while (dd->reordered == 1);
  415. if (res == NULL) {
  416. FREE(vect);
  417. if (dd->errorCode == CUDD_TIMEOUT_EXPIRED && dd->timeoutHandler) {
  418. dd->timeoutHandler(dd, dd->tohArg);
  419. }
  420. return(NULL);
  421. }
  422. return(vect);
  423. } /* end of Cudd_bddCharToVect */
  424. /**
  425. @brief Performs safe minimization of a %BDD.
  426. @details Given the %BDD `f` of a function to be minimized and a %BDD
  427. `c` representing the care set, Cudd_bddLICompaction produces the
  428. %BDD of a function that agrees with `f` wherever `c` is 1. Safe
  429. minimization means that the size of the result is guaranteed not to
  430. exceed the size of `f`. This function is based on the DAC97 paper by
  431. Hong et al..
  432. @return a pointer to the result if successful; NULL otherwise.
  433. @sideeffect None
  434. @see Cudd_bddRestrict
  435. */
  436. DdNode *
  437. Cudd_bddLICompaction(
  438. DdManager * dd /**< manager */,
  439. DdNode * f /**< function to be minimized */,
  440. DdNode * c /**< constraint (care set) */)
  441. {
  442. DdNode *res;
  443. do {
  444. dd->reordered = 0;
  445. res = cuddBddLICompaction(dd,f,c);
  446. } while (dd->reordered == 1);
  447. if (dd->errorCode == CUDD_TIMEOUT_EXPIRED && dd->timeoutHandler) {
  448. dd->timeoutHandler(dd, dd->tohArg);
  449. }
  450. return(res);
  451. } /* end of Cudd_bddLICompaction */
  452. /**
  453. @brief Finds a small %BDD in a function interval.
  454. @details Given BDDs `l` and `u`, representing the lower bound and
  455. upper bound of a function interval, Cudd_bddSqueeze produces the
  456. %BDD of a function within the interval with a small %BDD.
  457. @return a pointer to the result if successful; NULL otherwise.
  458. @sideeffect None
  459. @see Cudd_bddRestrict Cudd_bddLICompaction
  460. */
  461. DdNode *
  462. Cudd_bddSqueeze(
  463. DdManager * dd /**< manager */,
  464. DdNode * l /**< lower bound */,
  465. DdNode * u /**< upper bound */)
  466. {
  467. DdNode *res;
  468. int sizeRes, sizeL, sizeU;
  469. do {
  470. dd->reordered = 0;
  471. res = cuddBddSqueeze(dd,l,u);
  472. } while (dd->reordered == 1);
  473. if (res == NULL) {
  474. if (dd->errorCode == CUDD_TIMEOUT_EXPIRED && dd->timeoutHandler) {
  475. dd->timeoutHandler(dd, dd->tohArg);
  476. }
  477. return(NULL);
  478. }
  479. /* We now compare the result with the bounds and return the smallest.
  480. ** We first compare to u, so that in case l == 0 and u == 1, we return
  481. ** 0 as in other minimization algorithms. */
  482. sizeRes = Cudd_DagSize(res);
  483. sizeU = Cudd_DagSize(u);
  484. if (sizeU <= sizeRes) {
  485. cuddRef(res);
  486. Cudd_IterDerefBdd(dd,res);
  487. res = u;
  488. sizeRes = sizeU;
  489. }
  490. sizeL = Cudd_DagSize(l);
  491. if (sizeL <= sizeRes) {
  492. cuddRef(res);
  493. Cudd_IterDerefBdd(dd,res);
  494. res = l;
  495. }
  496. return(res);
  497. } /* end of Cudd_bddSqueeze */
  498. /**
  499. @brief Finds an interpolant of two functions.
  500. @details Given BDDs `l` and `u`, representing the lower bound and
  501. upper bound of a function interval, Cudd_bddInterpolate produces the
  502. %BDD of a function within the interval that only depends on the
  503. variables common to `l` and `u`.
  504. The approach is based on quantification as in Cudd_bddRestrict().
  505. The function assumes that `l` implies `u`, but does not check whether
  506. that's true.
  507. @return a pointer to the result if successful; NULL otherwise.
  508. @sideeffect None
  509. @see Cudd_bddRestrict Cudd_bddSqueeze
  510. */
  511. DdNode *
  512. Cudd_bddInterpolate(
  513. DdManager * dd /**< manager */,
  514. DdNode * l /**< lower bound */,
  515. DdNode * u /**< upper bound */)
  516. {
  517. DdNode *res;
  518. do {
  519. dd->reordered = 0;
  520. res = cuddBddInterpolate(dd,l,u);
  521. } while (dd->reordered == 1);
  522. if (dd->errorCode == CUDD_TIMEOUT_EXPIRED && dd->timeoutHandler) {
  523. dd->timeoutHandler(dd, dd->tohArg);
  524. }
  525. return(res);
  526. } /* end of Cudd_bddInterpolate */
  527. /**
  528. @brief Finds a small %BDD that agrees with `f` over `c`.
  529. @return a pointer to the result if successful; NULL otherwise.
  530. @sideeffect None
  531. @see Cudd_bddRestrict Cudd_bddLICompaction Cudd_bddSqueeze
  532. */
  533. DdNode *
  534. Cudd_bddMinimize(
  535. DdManager * dd,
  536. DdNode * f,
  537. DdNode * c)
  538. {
  539. DdNode *cplus, *res;
  540. if (c == Cudd_Not(DD_ONE(dd))) return(c);
  541. if (Cudd_IsConstantInt(f)) return(f);
  542. if (f == c) return(DD_ONE(dd));
  543. if (f == Cudd_Not(c)) return(Cudd_Not(DD_ONE(dd)));
  544. cplus = Cudd_RemapOverApprox(dd,c,0,0,1.0);
  545. if (cplus == NULL) return(NULL);
  546. cuddRef(cplus);
  547. res = Cudd_bddLICompaction(dd,f,cplus);
  548. if (res == NULL) {
  549. Cudd_IterDerefBdd(dd,cplus);
  550. return(NULL);
  551. }
  552. cuddRef(res);
  553. Cudd_IterDerefBdd(dd,cplus);
  554. cuddDeref(res);
  555. return(res);
  556. } /* end of Cudd_bddMinimize */
  557. /**
  558. @brief Find a dense subset of %BDD `f`.
  559. @details Density is the ratio of number of minterms to number of
  560. nodes. Uses several techniques in series. It is more expensive than
  561. other subsetting procedures, but often produces better results. See
  562. Cudd_SubsetShortPaths for a description of the threshold and nvars
  563. parameters.
  564. @return a pointer to the result if successful; NULL otherwise.
  565. @sideeffect None
  566. @see Cudd_RemapUnderApprox Cudd_SubsetShortPaths
  567. Cudd_SubsetHeavyBranch Cudd_bddSqueeze
  568. */
  569. DdNode *
  570. Cudd_SubsetCompress(
  571. DdManager * dd /**< manager */,
  572. DdNode * f /**< %BDD whose subset is sought */,
  573. int nvars /**< number of variables in the support of f */,
  574. int threshold /**< maximum number of nodes in the subset */)
  575. {
  576. DdNode *res, *tmp1, *tmp2;
  577. tmp1 = Cudd_SubsetShortPaths(dd, f, nvars, threshold, 0);
  578. if (tmp1 == NULL) return(NULL);
  579. cuddRef(tmp1);
  580. tmp2 = Cudd_RemapUnderApprox(dd,tmp1,nvars,0,0.95);
  581. if (tmp2 == NULL) {
  582. Cudd_IterDerefBdd(dd,tmp1);
  583. return(NULL);
  584. }
  585. cuddRef(tmp2);
  586. Cudd_IterDerefBdd(dd,tmp1);
  587. res = Cudd_bddSqueeze(dd,tmp2,f);
  588. if (res == NULL) {
  589. Cudd_IterDerefBdd(dd,tmp2);
  590. return(NULL);
  591. }
  592. cuddRef(res);
  593. Cudd_IterDerefBdd(dd,tmp2);
  594. cuddDeref(res);
  595. return(res);
  596. } /* end of Cudd_SubsetCompress */
  597. /**
  598. @brief Find a dense superset of %BDD `f`.
  599. @details Density is the ratio of number of minterms to number of
  600. nodes. Uses several techniques in series. It is more expensive than
  601. other supersetting procedures, but often produces better
  602. results. See Cudd_SupersetShortPaths for a description of the
  603. threshold and nvars parameters.
  604. @return a pointer to the result if successful; NULL otherwise.
  605. @sideeffect None
  606. @see Cudd_SubsetCompress Cudd_SupersetRemap Cudd_SupersetShortPaths
  607. Cudd_SupersetHeavyBranch Cudd_bddSqueeze
  608. */
  609. DdNode *
  610. Cudd_SupersetCompress(
  611. DdManager * dd /**< manager */,
  612. DdNode * f /**< %BDD whose superset is sought */,
  613. int nvars /**< number of variables in the support of f */,
  614. int threshold /**< maximum number of nodes in the superset */)
  615. {
  616. DdNode *subset;
  617. subset = Cudd_SubsetCompress(dd, Cudd_Not(f),nvars,threshold);
  618. return(Cudd_NotCond(subset, (subset != NULL)));
  619. } /* end of Cudd_SupersetCompress */
  620. /*---------------------------------------------------------------------------*/
  621. /* Definition of internal functions */
  622. /*---------------------------------------------------------------------------*/
  623. /**
  624. @brief Performs the recursive step of Cudd_bddConstrain.
  625. @return a pointer to the result if successful; NULL otherwise.
  626. @sideeffect None
  627. @see Cudd_bddConstrain
  628. */
  629. DdNode *
  630. cuddBddConstrainRecur(
  631. DdManager * dd,
  632. DdNode * f,
  633. DdNode * c)
  634. {
  635. DdNode *Fv, *Fnv, *Cv, *Cnv, *t, *e, *r;
  636. DdNode *one, *zero;
  637. int topf, topc;
  638. unsigned int index;
  639. int comple = 0;
  640. statLine(dd);
  641. one = DD_ONE(dd);
  642. zero = Cudd_Not(one);
  643. /* Trivial cases. */
  644. if (c == one) return(f);
  645. if (c == zero) return(zero);
  646. if (Cudd_IsConstantInt(f)) return(f);
  647. if (f == c) return(one);
  648. if (f == Cudd_Not(c)) return(zero);
  649. /* Make canonical to increase the utilization of the cache. */
  650. if (Cudd_IsComplement(f)) {
  651. f = Cudd_Not(f);
  652. comple = 1;
  653. }
  654. /* Now f is a regular pointer to a non-constant node; c is also
  655. ** non-constant, but may be complemented.
  656. */
  657. /* Check the cache. */
  658. r = cuddCacheLookup2(dd, Cudd_bddConstrain, f, c);
  659. if (r != NULL) {
  660. return(Cudd_NotCond(r,comple));
  661. }
  662. checkWhetherToGiveUp(dd);
  663. /* Recursive step. */
  664. topf = dd->perm[f->index];
  665. topc = dd->perm[Cudd_Regular(c)->index];
  666. if (topf <= topc) {
  667. index = f->index;
  668. Fv = cuddT(f); Fnv = cuddE(f);
  669. } else {
  670. index = Cudd_Regular(c)->index;
  671. Fv = Fnv = f;
  672. }
  673. if (topc <= topf) {
  674. Cv = cuddT(Cudd_Regular(c)); Cnv = cuddE(Cudd_Regular(c));
  675. if (Cudd_IsComplement(c)) {
  676. Cv = Cudd_Not(Cv);
  677. Cnv = Cudd_Not(Cnv);
  678. }
  679. } else {
  680. Cv = Cnv = c;
  681. }
  682. if (!Cudd_IsConstantInt(Cv)) {
  683. t = cuddBddConstrainRecur(dd, Fv, Cv);
  684. if (t == NULL)
  685. return(NULL);
  686. } else if (Cv == one) {
  687. t = Fv;
  688. } else { /* Cv == zero: return Fnv @ Cnv */
  689. if (Cnv == one) {
  690. r = Fnv;
  691. } else {
  692. r = cuddBddConstrainRecur(dd, Fnv, Cnv);
  693. if (r == NULL)
  694. return(NULL);
  695. }
  696. return(Cudd_NotCond(r,comple));
  697. }
  698. cuddRef(t);
  699. if (!Cudd_IsConstantInt(Cnv)) {
  700. e = cuddBddConstrainRecur(dd, Fnv, Cnv);
  701. if (e == NULL) {
  702. Cudd_IterDerefBdd(dd, t);
  703. return(NULL);
  704. }
  705. } else if (Cnv == one) {
  706. e = Fnv;
  707. } else { /* Cnv == zero: return Fv @ Cv previously computed */
  708. cuddDeref(t);
  709. return(Cudd_NotCond(t,comple));
  710. }
  711. cuddRef(e);
  712. if (Cudd_IsComplement(t)) {
  713. t = Cudd_Not(t);
  714. e = Cudd_Not(e);
  715. r = (t == e) ? t : cuddUniqueInter(dd, index, t, e);
  716. if (r == NULL) {
  717. Cudd_IterDerefBdd(dd, e);
  718. Cudd_IterDerefBdd(dd, t);
  719. return(NULL);
  720. }
  721. r = Cudd_Not(r);
  722. } else {
  723. r = (t == e) ? t : cuddUniqueInter(dd, index, t, e);
  724. if (r == NULL) {
  725. Cudd_IterDerefBdd(dd, e);
  726. Cudd_IterDerefBdd(dd, t);
  727. return(NULL);
  728. }
  729. }
  730. cuddDeref(t);
  731. cuddDeref(e);
  732. cuddCacheInsert2(dd, Cudd_bddConstrain, f, c, r);
  733. return(Cudd_NotCond(r,comple));
  734. } /* end of cuddBddConstrainRecur */
  735. /**
  736. @brief Performs the recursive step of Cudd_bddRestrict.
  737. @return the restricted %BDD if successful; otherwise NULL.
  738. @sideeffect None
  739. @see Cudd_bddRestrict
  740. */
  741. DdNode *
  742. cuddBddRestrictRecur(
  743. DdManager * dd,
  744. DdNode * f,
  745. DdNode * c)
  746. {
  747. DdNode *Fv, *Fnv, *Cv, *Cnv, *t, *e, *r, *one, *zero;
  748. int topf, topc;
  749. unsigned int index;
  750. int comple = 0;
  751. statLine(dd);
  752. one = DD_ONE(dd);
  753. zero = Cudd_Not(one);
  754. /* Trivial cases */
  755. if (c == one) return(f);
  756. if (c == zero) return(zero);
  757. if (Cudd_IsConstantInt(f)) return(f);
  758. if (f == c) return(one);
  759. if (f == Cudd_Not(c)) return(zero);
  760. /* Make canonical to increase the utilization of the cache. */
  761. if (Cudd_IsComplement(f)) {
  762. f = Cudd_Not(f);
  763. comple = 1;
  764. }
  765. /* Now f is a regular pointer to a non-constant node; c is also
  766. ** non-constant, but may be complemented.
  767. */
  768. /* Check the cache. */
  769. r = cuddCacheLookup2(dd, Cudd_bddRestrict, f, c);
  770. if (r != NULL) {
  771. return(Cudd_NotCond(r,comple));
  772. }
  773. checkWhetherToGiveUp(dd);
  774. topf = dd->perm[f->index];
  775. topc = dd->perm[Cudd_Regular(c)->index];
  776. if (topc < topf) { /* abstract top variable from c */
  777. DdNode *d, *s1, *s2;
  778. /* Find complements of cofactors of c. */
  779. if (Cudd_IsComplement(c)) {
  780. s1 = cuddT(Cudd_Regular(c));
  781. s2 = cuddE(Cudd_Regular(c));
  782. } else {
  783. s1 = Cudd_Not(cuddT(c));
  784. s2 = Cudd_Not(cuddE(c));
  785. }
  786. /* Take the OR by applying DeMorgan. */
  787. d = cuddBddAndRecur(dd, s1, s2);
  788. if (d == NULL) return(NULL);
  789. d = Cudd_Not(d);
  790. cuddRef(d);
  791. r = cuddBddRestrictRecur(dd, f, d);
  792. if (r == NULL) {
  793. Cudd_IterDerefBdd(dd, d);
  794. return(NULL);
  795. }
  796. cuddRef(r);
  797. Cudd_IterDerefBdd(dd, d);
  798. cuddCacheInsert2(dd, Cudd_bddRestrict, f, c, r);
  799. cuddDeref(r);
  800. return(Cudd_NotCond(r,comple));
  801. }
  802. /* Recursive step. Here topf <= topc. */
  803. index = f->index;
  804. Fv = cuddT(f); Fnv = cuddE(f);
  805. if (topc == topf) {
  806. Cv = cuddT(Cudd_Regular(c)); Cnv = cuddE(Cudd_Regular(c));
  807. if (Cudd_IsComplement(c)) {
  808. Cv = Cudd_Not(Cv);
  809. Cnv = Cudd_Not(Cnv);
  810. }
  811. } else {
  812. Cv = Cnv = c;
  813. }
  814. if (!Cudd_IsConstantInt(Cv)) {
  815. t = cuddBddRestrictRecur(dd, Fv, Cv);
  816. if (t == NULL) return(NULL);
  817. } else if (Cv == one) {
  818. t = Fv;
  819. } else { /* Cv == zero: return(Fnv @ Cnv) */
  820. if (Cnv == one) {
  821. r = Fnv;
  822. } else {
  823. r = cuddBddRestrictRecur(dd, Fnv, Cnv);
  824. if (r == NULL) return(NULL);
  825. }
  826. return(Cudd_NotCond(r,comple));
  827. }
  828. cuddRef(t);
  829. if (!Cudd_IsConstantInt(Cnv)) {
  830. e = cuddBddRestrictRecur(dd, Fnv, Cnv);
  831. if (e == NULL) {
  832. Cudd_IterDerefBdd(dd, t);
  833. return(NULL);
  834. }
  835. } else if (Cnv == one) {
  836. e = Fnv;
  837. } else { /* Cnv == zero: return (Fv @ Cv) previously computed */
  838. cuddDeref(t);
  839. return(Cudd_NotCond(t,comple));
  840. }
  841. cuddRef(e);
  842. if (Cudd_IsComplement(t)) {
  843. t = Cudd_Not(t);
  844. e = Cudd_Not(e);
  845. r = (t == e) ? t : cuddUniqueInter(dd, index, t, e);
  846. if (r == NULL) {
  847. Cudd_IterDerefBdd(dd, e);
  848. Cudd_IterDerefBdd(dd, t);
  849. return(NULL);
  850. }
  851. r = Cudd_Not(r);
  852. } else {
  853. r = (t == e) ? t : cuddUniqueInter(dd, index, t, e);
  854. if (r == NULL) {
  855. Cudd_IterDerefBdd(dd, e);
  856. Cudd_IterDerefBdd(dd, t);
  857. return(NULL);
  858. }
  859. }
  860. cuddDeref(t);
  861. cuddDeref(e);
  862. cuddCacheInsert2(dd, Cudd_bddRestrict, f, c, r);
  863. return(Cudd_NotCond(r,comple));
  864. } /* end of cuddBddRestrictRecur */
  865. /**
  866. @brief Implements the recursive step of Cudd_bddAnd.
  867. @return a pointer to the result is successful; NULL otherwise.
  868. @sideeffect None
  869. @see Cudd_bddNPAnd
  870. */
  871. DdNode *
  872. cuddBddNPAndRecur(
  873. DdManager * manager,
  874. DdNode * f,
  875. DdNode * g)
  876. {
  877. DdNode *F, *ft, *fe, *G, *gt, *ge;
  878. DdNode *one, *r, *t, *e;
  879. int topf, topg;
  880. unsigned int index;
  881. statLine(manager);
  882. one = DD_ONE(manager);
  883. /* Terminal cases. */
  884. F = Cudd_Regular(f);
  885. G = Cudd_Regular(g);
  886. if (F == G) {
  887. if (f == g) return(one);
  888. else return(Cudd_Not(one));
  889. }
  890. if (G == one) {
  891. if (g == one) return(f);
  892. else return(g);
  893. }
  894. if (F == one) {
  895. return(f);
  896. }
  897. /* At this point f and g are not constant. */
  898. /* Check cache. */
  899. if (F->ref != 1 || G->ref != 1) {
  900. r = cuddCacheLookup2(manager, Cudd_bddNPAnd, f, g);
  901. if (r != NULL) return(r);
  902. }
  903. checkWhetherToGiveUp(manager);
  904. /* Here we can skip the use of cuddI, because the operands are known
  905. ** to be non-constant.
  906. */
  907. topf = manager->perm[F->index];
  908. topg = manager->perm[G->index];
  909. if (topg < topf) { /* abstract top variable from g */
  910. DdNode *d;
  911. /* Find complements of cofactors of g. */
  912. if (Cudd_IsComplement(g)) {
  913. gt = cuddT(G);
  914. ge = cuddE(G);
  915. } else {
  916. gt = Cudd_Not(cuddT(g));
  917. ge = Cudd_Not(cuddE(g));
  918. }
  919. /* Take the OR by applying DeMorgan. */
  920. d = cuddBddAndRecur(manager, gt, ge);
  921. if (d == NULL) return(NULL);
  922. d = Cudd_Not(d);
  923. cuddRef(d);
  924. r = cuddBddNPAndRecur(manager, f, d);
  925. if (r == NULL) {
  926. Cudd_IterDerefBdd(manager, d);
  927. return(NULL);
  928. }
  929. cuddRef(r);
  930. Cudd_IterDerefBdd(manager, d);
  931. cuddCacheInsert2(manager, Cudd_bddNPAnd, f, g, r);
  932. cuddDeref(r);
  933. return(r);
  934. }
  935. /* Compute cofactors. */
  936. index = F->index;
  937. ft = cuddT(F);
  938. fe = cuddE(F);
  939. if (Cudd_IsComplement(f)) {
  940. ft = Cudd_Not(ft);
  941. fe = Cudd_Not(fe);
  942. }
  943. if (topg == topf) {
  944. gt = cuddT(G);
  945. ge = cuddE(G);
  946. if (Cudd_IsComplement(g)) {
  947. gt = Cudd_Not(gt);
  948. ge = Cudd_Not(ge);
  949. }
  950. } else {
  951. gt = ge = g;
  952. }
  953. t = cuddBddAndRecur(manager, ft, gt);
  954. if (t == NULL) return(NULL);
  955. cuddRef(t);
  956. e = cuddBddAndRecur(manager, fe, ge);
  957. if (e == NULL) {
  958. Cudd_IterDerefBdd(manager, t);
  959. return(NULL);
  960. }
  961. cuddRef(e);
  962. if (t == e) {
  963. r = t;
  964. } else {
  965. if (Cudd_IsComplement(t)) {
  966. r = cuddUniqueInter(manager,(int)index,Cudd_Not(t),Cudd_Not(e));
  967. if (r == NULL) {
  968. Cudd_IterDerefBdd(manager, t);
  969. Cudd_IterDerefBdd(manager, e);
  970. return(NULL);
  971. }
  972. r = Cudd_Not(r);
  973. } else {
  974. r = cuddUniqueInter(manager,(int)index,t,e);
  975. if (r == NULL) {
  976. Cudd_IterDerefBdd(manager, t);
  977. Cudd_IterDerefBdd(manager, e);
  978. return(NULL);
  979. }
  980. }
  981. }
  982. cuddDeref(e);
  983. cuddDeref(t);
  984. if (F->ref != 1 || G->ref != 1)
  985. cuddCacheInsert2(manager, Cudd_bddNPAnd, f, g, r);
  986. return(r);
  987. } /* end of cuddBddNPAndRecur */
  988. /**
  989. @brief Performs the recursive step of Cudd_addConstrain.
  990. @return a pointer to the result if successful; NULL otherwise.
  991. @sideeffect None
  992. @see Cudd_addConstrain
  993. */
  994. DdNode *
  995. cuddAddConstrainRecur(
  996. DdManager * dd,
  997. DdNode * f,
  998. DdNode * c)
  999. {
  1000. DdNode *Fv, *Fnv, *Cv, *Cnv, *t, *e, *r;
  1001. DdNode *one, *zero;
  1002. int topf, topc;
  1003. unsigned int index;
  1004. statLine(dd);
  1005. one = DD_ONE(dd);
  1006. zero = DD_ZERO(dd);
  1007. /* Trivial cases. */
  1008. if (c == one) return(f);
  1009. if (c == zero) return(zero);
  1010. if (cuddIsConstant(f)) return(f);
  1011. if (f == c) return(one);
  1012. /* Now f and c are non-constant. */
  1013. /* Check the cache. */
  1014. r = cuddCacheLookup2(dd, Cudd_addConstrain, f, c);
  1015. if (r != NULL) {
  1016. return(r);
  1017. }
  1018. checkWhetherToGiveUp(dd);
  1019. /* Recursive step. */
  1020. topf = dd->perm[f->index];
  1021. topc = dd->perm[c->index];
  1022. if (topf <= topc) {
  1023. index = f->index;
  1024. Fv = cuddT(f); Fnv = cuddE(f);
  1025. } else {
  1026. index = c->index;
  1027. Fv = Fnv = f;
  1028. }
  1029. if (topc <= topf) {
  1030. Cv = cuddT(c); Cnv = cuddE(c);
  1031. } else {
  1032. Cv = Cnv = c;
  1033. }
  1034. if (!cuddIsConstant(Cv)) {
  1035. t = cuddAddConstrainRecur(dd, Fv, Cv);
  1036. if (t == NULL)
  1037. return(NULL);
  1038. } else if (Cv == one) {
  1039. t = Fv;
  1040. } else { /* Cv == zero: return Fnv @ Cnv */
  1041. if (Cnv == one) {
  1042. r = Fnv;
  1043. } else {
  1044. r = cuddAddConstrainRecur(dd, Fnv, Cnv);
  1045. if (r == NULL)
  1046. return(NULL);
  1047. }
  1048. return(r);
  1049. }
  1050. cuddRef(t);
  1051. if (!cuddIsConstant(Cnv)) {
  1052. e = cuddAddConstrainRecur(dd, Fnv, Cnv);
  1053. if (e == NULL) {
  1054. Cudd_RecursiveDeref(dd, t);
  1055. return(NULL);
  1056. }
  1057. } else if (Cnv == one) {
  1058. e = Fnv;
  1059. } else { /* Cnv == zero: return Fv @ Cv previously computed */
  1060. cuddDeref(t);
  1061. return(t);
  1062. }
  1063. cuddRef(e);
  1064. r = (t == e) ? t : cuddUniqueInter(dd, index, t, e);
  1065. if (r == NULL) {
  1066. Cudd_RecursiveDeref(dd, e);
  1067. Cudd_RecursiveDeref(dd, t);
  1068. return(NULL);
  1069. }
  1070. cuddDeref(t);
  1071. cuddDeref(e);
  1072. cuddCacheInsert2(dd, Cudd_addConstrain, f, c, r);
  1073. return(r);
  1074. } /* end of cuddAddConstrainRecur */
  1075. /**
  1076. @brief Performs the recursive step of Cudd_addRestrict.
  1077. @return the restricted %ADD if successful; otherwise NULL.
  1078. @sideeffect None
  1079. @see Cudd_addRestrict
  1080. */
  1081. DdNode *
  1082. cuddAddRestrictRecur(
  1083. DdManager * dd,
  1084. DdNode * f,
  1085. DdNode * c)
  1086. {
  1087. DdNode *Fv, *Fnv, *Cv, *Cnv, *t, *e, *r, *one, *zero;
  1088. int topf, topc;
  1089. unsigned int index;
  1090. statLine(dd);
  1091. one = DD_ONE(dd);
  1092. zero = DD_ZERO(dd);
  1093. /* Trivial cases */
  1094. if (c == one) return(f);
  1095. if (c == zero) return(zero);
  1096. if (cuddIsConstant(f)) return(f);
  1097. if (f == c) return(one);
  1098. /* Now f and c are non-constant. */
  1099. /* Check the cache. */
  1100. r = cuddCacheLookup2(dd, Cudd_addRestrict, f, c);
  1101. if (r != NULL) {
  1102. return(r);
  1103. }
  1104. checkWhetherToGiveUp(dd);
  1105. topf = dd->perm[f->index];
  1106. topc = dd->perm[c->index];
  1107. if (topc < topf) { /* abstract top variable from c */
  1108. DdNode *d, *s1, *s2;
  1109. /* Find cofactors of c. */
  1110. s1 = cuddT(c);
  1111. s2 = cuddE(c);
  1112. /* Take the OR by applying DeMorgan. */
  1113. d = cuddAddApplyRecur(dd, Cudd_addOr, s1, s2);
  1114. if (d == NULL) return(NULL);
  1115. cuddRef(d);
  1116. r = cuddAddRestrictRecur(dd, f, d);
  1117. if (r == NULL) {
  1118. Cudd_RecursiveDeref(dd, d);
  1119. return(NULL);
  1120. }
  1121. cuddRef(r);
  1122. Cudd_RecursiveDeref(dd, d);
  1123. cuddCacheInsert2(dd, Cudd_addRestrict, f, c, r);
  1124. cuddDeref(r);
  1125. return(r);
  1126. }
  1127. /* Recursive step. Here topf <= topc. */
  1128. index = f->index;
  1129. Fv = cuddT(f); Fnv = cuddE(f);
  1130. if (topc == topf) {
  1131. Cv = cuddT(c); Cnv = cuddE(c);
  1132. } else {
  1133. Cv = Cnv = c;
  1134. }
  1135. if (!Cudd_IsConstantInt(Cv)) {
  1136. t = cuddAddRestrictRecur(dd, Fv, Cv);
  1137. if (t == NULL) return(NULL);
  1138. } else if (Cv == one) {
  1139. t = Fv;
  1140. } else { /* Cv == zero: return(Fnv @ Cnv) */
  1141. if (Cnv == one) {
  1142. r = Fnv;
  1143. } else {
  1144. r = cuddAddRestrictRecur(dd, Fnv, Cnv);
  1145. if (r == NULL) return(NULL);
  1146. }
  1147. return(r);
  1148. }
  1149. cuddRef(t);
  1150. if (!cuddIsConstant(Cnv)) {
  1151. e = cuddAddRestrictRecur(dd, Fnv, Cnv);
  1152. if (e == NULL) {
  1153. Cudd_RecursiveDeref(dd, t);
  1154. return(NULL);
  1155. }
  1156. } else if (Cnv == one) {
  1157. e = Fnv;
  1158. } else { /* Cnv == zero: return (Fv @ Cv) previously computed */
  1159. cuddDeref(t);
  1160. return(t);
  1161. }
  1162. cuddRef(e);
  1163. r = (t == e) ? t : cuddUniqueInter(dd, index, t, e);
  1164. if (r == NULL) {
  1165. Cudd_RecursiveDeref(dd, e);
  1166. Cudd_RecursiveDeref(dd, t);
  1167. return(NULL);
  1168. }
  1169. cuddDeref(t);
  1170. cuddDeref(e);
  1171. cuddCacheInsert2(dd, Cudd_addRestrict, f, c, r);
  1172. return(r);
  1173. } /* end of cuddAddRestrictRecur */
  1174. /**
  1175. @brief Performs safe minimization of a %BDD.
  1176. @details Given the %BDD `f` of a function to be minimized and a %BDD
  1177. `c` representing the care set, Cudd_bddLICompaction produces the
  1178. %BDD of a function that agrees with `f` wherever `c` is 1. Safe
  1179. minimization means that the size of the result is guaranteed not to
  1180. exceed the size of `f`. This function is based on the DAC97 paper by
  1181. Hong et al..
  1182. @return a pointer to the result if successful; NULL otherwise.
  1183. @sideeffect None
  1184. @see Cudd_bddLICompaction
  1185. */
  1186. DdNode *
  1187. cuddBddLICompaction(
  1188. DdManager * dd /**< manager */,
  1189. DdNode * f /**< function to be minimized */,
  1190. DdNode * c /**< constraint (care set) */)
  1191. {
  1192. st_table *marktable, *markcache, *buildcache;
  1193. DdNode *res, *zero;
  1194. zero = Cudd_Not(DD_ONE(dd));
  1195. if (c == zero) return(zero);
  1196. /* We need to use local caches for both steps of this operation.
  1197. ** The results of the edge marking step are only valid as long as the
  1198. ** edge markings themselves are available. However, the edge markings
  1199. ** are lost at the end of one invocation of Cudd_bddLICompaction.
  1200. ** Hence, the cache entries for the edge marking step must be
  1201. ** invalidated at the end of this function.
  1202. ** For the result of the building step we argue as follows. The result
  1203. ** for a node and a given constrain depends on the BDD in which the node
  1204. ** appears. Hence, the same node and constrain may give different results
  1205. ** in successive invocations.
  1206. */
  1207. marktable = st_init_table(st_ptrcmp,st_ptrhash);
  1208. if (marktable == NULL) {
  1209. return(NULL);
  1210. }
  1211. markcache = st_init_table(MarkCacheCompare,MarkCacheHash);
  1212. if (markcache == NULL) {
  1213. st_free_table(marktable);
  1214. return(NULL);
  1215. }
  1216. if (cuddBddLICMarkEdges(dd,f,c,marktable,markcache) == CUDD_OUT_OF_MEM) {
  1217. st_foreach(markcache, MarkCacheCleanUp, NULL);
  1218. st_free_table(marktable);
  1219. st_free_table(markcache);
  1220. return(NULL);
  1221. }
  1222. st_foreach(markcache, MarkCacheCleanUp, NULL);
  1223. st_free_table(markcache);
  1224. buildcache = st_init_table(st_ptrcmp,st_ptrhash);
  1225. if (buildcache == NULL) {
  1226. st_free_table(marktable);
  1227. return(NULL);
  1228. }
  1229. res = cuddBddLICBuildResult(dd,f,buildcache,marktable);
  1230. st_free_table(buildcache);
  1231. st_free_table(marktable);
  1232. return(res);
  1233. } /* end of cuddBddLICompaction */
  1234. /*---------------------------------------------------------------------------*/
  1235. /* Definition of static functions */
  1236. /*---------------------------------------------------------------------------*/
  1237. /**
  1238. @brief Performs the recursive step of Cudd_bddConstrainDecomp.
  1239. @return f super (i) if successful; otherwise NULL.
  1240. @sideeffect None
  1241. @see Cudd_bddConstrainDecomp
  1242. */
  1243. static int
  1244. cuddBddConstrainDecomp(
  1245. DdManager * dd,
  1246. DdNode * f,
  1247. DdNode ** decomp)
  1248. {
  1249. DdNode *F, *fv, *fvn;
  1250. DdNode *fAbs;
  1251. DdNode *result;
  1252. int ok;
  1253. if (Cudd_IsConstantInt(f)) return(1);
  1254. /* Compute complements of cofactors. */
  1255. F = Cudd_Regular(f);
  1256. fv = cuddT(F);
  1257. fvn = cuddE(F);
  1258. if (F == f) {
  1259. fv = Cudd_Not(fv);
  1260. fvn = Cudd_Not(fvn);
  1261. }
  1262. /* Compute abstraction of top variable. */
  1263. fAbs = cuddBddAndRecur(dd, fv, fvn);
  1264. if (fAbs == NULL) {
  1265. return(0);
  1266. }
  1267. cuddRef(fAbs);
  1268. fAbs = Cudd_Not(fAbs);
  1269. /* Recursively find the next abstraction and the components of the
  1270. ** decomposition. */
  1271. ok = cuddBddConstrainDecomp(dd, fAbs, decomp);
  1272. if (ok == 0) {
  1273. Cudd_IterDerefBdd(dd,fAbs);
  1274. return(0);
  1275. }
  1276. /* Compute the component of the decomposition corresponding to the
  1277. ** top variable and store it in the decomposition array. */
  1278. result = cuddBddConstrainRecur(dd, f, fAbs);
  1279. if (result == NULL) {
  1280. Cudd_IterDerefBdd(dd,fAbs);
  1281. return(0);
  1282. }
  1283. cuddRef(result);
  1284. decomp[F->index] = result;
  1285. Cudd_IterDerefBdd(dd, fAbs);
  1286. return(1);
  1287. } /* end of cuddBddConstrainDecomp */
  1288. /**
  1289. @brief Performs the recursive step of Cudd_bddCharToVect.
  1290. @details This function maintains the invariant that f is non-zero.
  1291. @return the i-th component of the vector if successful; otherwise NULL.
  1292. @sideeffect None
  1293. @see Cudd_bddCharToVect
  1294. */
  1295. static DdNode *
  1296. cuddBddCharToVect(
  1297. DdManager * dd,
  1298. DdNode * f,
  1299. DdNode * x)
  1300. {
  1301. int topf;
  1302. int level;
  1303. int comple;
  1304. DdNode *one, *zero, *res, *F, *fT, *fE, *T, *E;
  1305. statLine(dd);
  1306. /* Check the cache. */
  1307. res = cuddCacheLookup2(dd, cuddBddCharToVect, f, x);
  1308. if (res != NULL) {
  1309. return(res);
  1310. }
  1311. checkWhetherToGiveUp(dd);
  1312. F = Cudd_Regular(f);
  1313. topf = cuddI(dd,F->index);
  1314. level = dd->perm[x->index];
  1315. if (topf > level) return(x);
  1316. one = DD_ONE(dd);
  1317. zero = Cudd_Not(one);
  1318. comple = F != f;
  1319. fT = Cudd_NotCond(cuddT(F),comple);
  1320. fE = Cudd_NotCond(cuddE(F),comple);
  1321. if (topf == level) {
  1322. if (fT == zero) return(zero);
  1323. if (fE == zero) return(one);
  1324. return(x);
  1325. }
  1326. /* Here topf < level. */
  1327. if (fT == zero) return(cuddBddCharToVect(dd, fE, x));
  1328. if (fE == zero) return(cuddBddCharToVect(dd, fT, x));
  1329. T = cuddBddCharToVect(dd, fT, x);
  1330. if (T == NULL) {
  1331. return(NULL);
  1332. }
  1333. cuddRef(T);
  1334. E = cuddBddCharToVect(dd, fE, x);
  1335. if (E == NULL) {
  1336. Cudd_IterDerefBdd(dd,T);
  1337. return(NULL);
  1338. }
  1339. cuddRef(E);
  1340. res = cuddBddIteRecur(dd, dd->vars[F->index], T, E);
  1341. if (res == NULL) {
  1342. Cudd_IterDerefBdd(dd,T);
  1343. Cudd_IterDerefBdd(dd,E);
  1344. return(NULL);
  1345. }
  1346. cuddDeref(T);
  1347. cuddDeref(E);
  1348. cuddCacheInsert2(dd, cuddBddCharToVect, f, x, res);
  1349. return(res);
  1350. } /* end of cuddBddCharToVect */
  1351. /**
  1352. @brief Performs the edge marking step of Cudd_bddLICompaction.
  1353. @return the LUB of the markings of the two outgoing edges of
  1354. <code>f</code> if successful; otherwise CUDD_OUT_OF_MEM.
  1355. @sideeffect None
  1356. @see Cudd_bddLICompaction cuddBddLICBuildResult
  1357. */
  1358. static int
  1359. cuddBddLICMarkEdges(
  1360. DdManager * dd,
  1361. DdNode * f,
  1362. DdNode * c,
  1363. st_table * table,
  1364. st_table * cache)
  1365. {
  1366. DdNode *Fv, *Fnv, *Cv, *Cnv;
  1367. DdNode *one, *zero;
  1368. int topf, topc;
  1369. int comple;
  1370. int resT, resE, res, retval;
  1371. void **slot;
  1372. MarkCacheKey *key;
  1373. one = DD_ONE(dd);
  1374. zero = Cudd_Not(one);
  1375. /* Terminal cases. */
  1376. if (c == zero) return(DD_LIC_DC);
  1377. if (f == one) return(DD_LIC_1);
  1378. if (f == zero) return(DD_LIC_0);
  1379. /* Make canonical to increase the utilization of the cache. */
  1380. comple = Cudd_IsComplement(f);
  1381. f = Cudd_Regular(f);
  1382. /* Now f is a regular pointer to a non-constant node; c may be
  1383. ** constant, or it may be complemented.
  1384. */
  1385. /* Check the cache. */
  1386. key = ALLOC(MarkCacheKey, 1);
  1387. if (key == NULL) {
  1388. dd->errorCode = CUDD_MEMORY_OUT;
  1389. return(CUDD_OUT_OF_MEM);
  1390. }
  1391. key->f = f; key->c = c;
  1392. if (st_lookup_int(cache, key, &res)) {
  1393. FREE(key);
  1394. if (comple) {
  1395. if (res == DD_LIC_0) res = DD_LIC_1;
  1396. else if (res == DD_LIC_1) res = DD_LIC_0;
  1397. }
  1398. return(res);
  1399. }
  1400. /* Recursive step. */
  1401. topf = dd->perm[f->index];
  1402. topc = cuddI(dd,Cudd_Regular(c)->index);
  1403. if (topf <= topc) {
  1404. Fv = cuddT(f); Fnv = cuddE(f);
  1405. } else {
  1406. Fv = Fnv = f;
  1407. }
  1408. if (topc <= topf) {
  1409. /* We know that c is not constant because f is not. */
  1410. Cv = cuddT(Cudd_Regular(c)); Cnv = cuddE(Cudd_Regular(c));
  1411. if (Cudd_IsComplement(c)) {
  1412. Cv = Cudd_Not(Cv);
  1413. Cnv = Cudd_Not(Cnv);
  1414. }
  1415. } else {
  1416. Cv = Cnv = c;
  1417. }
  1418. resT = cuddBddLICMarkEdges(dd, Fv, Cv, table, cache);
  1419. if (resT == CUDD_OUT_OF_MEM) {
  1420. FREE(key);
  1421. return(CUDD_OUT_OF_MEM);
  1422. }
  1423. resE = cuddBddLICMarkEdges(dd, Fnv, Cnv, table, cache);
  1424. if (resE == CUDD_OUT_OF_MEM) {
  1425. FREE(key);
  1426. return(CUDD_OUT_OF_MEM);
  1427. }
  1428. /* Update edge markings. */
  1429. if (topf <= topc) {
  1430. retval = st_find_or_add(table, f, &slot);
  1431. if (retval == 0) {
  1432. *slot = (void **) (ptrint)((resT << 2) | resE);
  1433. } else if (retval == 1) {
  1434. *slot = (void **) (ptrint)((int)((ptrint) *slot) | (resT << 2) | resE);
  1435. } else {
  1436. FREE(key);
  1437. return(CUDD_OUT_OF_MEM);
  1438. }
  1439. }
  1440. /* Cache result. */
  1441. res = resT | resE;
  1442. if (st_insert(cache, key, (void *)(ptrint)res) == ST_OUT_OF_MEM) {
  1443. FREE(key);
  1444. return(CUDD_OUT_OF_MEM);
  1445. }
  1446. /* Take into account possible complementation. */
  1447. if (comple) {
  1448. if (res == DD_LIC_0) res = DD_LIC_1;
  1449. else if (res == DD_LIC_1) res = DD_LIC_0;
  1450. }
  1451. return(res);
  1452. } /* end of cuddBddLICMarkEdges */
  1453. /**
  1454. @brief Builds the result of Cudd_bddLICompaction.
  1455. @return a pointer to the minimized %BDD if successful; otherwise NULL.
  1456. @sideeffect None
  1457. @see Cudd_bddLICompaction cuddBddLICMarkEdges
  1458. */
  1459. static DdNode *
  1460. cuddBddLICBuildResult(
  1461. DdManager * dd,
  1462. DdNode * f,
  1463. st_table * cache,
  1464. st_table * table)
  1465. {
  1466. DdNode *Fv, *Fnv, *r, *t, *e;
  1467. DdNode *one, *zero;
  1468. unsigned int index;
  1469. int comple;
  1470. int markT, markE, markings;
  1471. one = DD_ONE(dd);
  1472. zero = Cudd_Not(one);
  1473. if (Cudd_IsConstantInt(f)) return(f);
  1474. /* Make canonical to increase the utilization of the cache. */
  1475. comple = Cudd_IsComplement(f);
  1476. f = Cudd_Regular(f);
  1477. /* Check the cache. */
  1478. if (st_lookup(cache, f, (void **) &r)) {
  1479. return(Cudd_NotCond(r,comple));
  1480. }
  1481. /* Retrieve the edge markings. */
  1482. if (st_lookup_int(table, f, &markings) == 0)
  1483. return(NULL);
  1484. markT = markings >> 2;
  1485. markE = markings & 3;
  1486. index = f->index;
  1487. Fv = cuddT(f); Fnv = cuddE(f);
  1488. if (markT == DD_LIC_NL) {
  1489. t = cuddBddLICBuildResult(dd,Fv,cache,table);
  1490. if (t == NULL) {
  1491. return(NULL);
  1492. }
  1493. } else if (markT == DD_LIC_1) {
  1494. t = one;
  1495. } else {
  1496. t = zero;
  1497. }
  1498. cuddRef(t);
  1499. if (markE == DD_LIC_NL) {
  1500. e = cuddBddLICBuildResult(dd,Fnv,cache,table);
  1501. if (e == NULL) {
  1502. Cudd_IterDerefBdd(dd,t);
  1503. return(NULL);
  1504. }
  1505. } else if (markE == DD_LIC_1) {
  1506. e = one;
  1507. } else {
  1508. e = zero;
  1509. }
  1510. cuddRef(e);
  1511. if (markT == DD_LIC_DC && markE != DD_LIC_DC) {
  1512. r = e;
  1513. } else if (markT != DD_LIC_DC && markE == DD_LIC_DC) {
  1514. r = t;
  1515. } else {
  1516. if (Cudd_IsComplement(t)) {
  1517. t = Cudd_Not(t);
  1518. e = Cudd_Not(e);
  1519. r = (t == e) ? t : cuddUniqueInter(dd, index, t, e);
  1520. if (r == NULL) {
  1521. Cudd_IterDerefBdd(dd, e);
  1522. Cudd_IterDerefBdd(dd, t);
  1523. return(NULL);
  1524. }
  1525. r = Cudd_Not(r);
  1526. } else {
  1527. r = (t == e) ? t : cuddUniqueInter(dd, index, t, e);
  1528. if (r == NULL) {
  1529. Cudd_IterDerefBdd(dd, e);
  1530. Cudd_IterDerefBdd(dd, t);
  1531. return(NULL);
  1532. }
  1533. }
  1534. }
  1535. cuddDeref(t);
  1536. cuddDeref(e);
  1537. if (st_insert(cache, f, r) == ST_OUT_OF_MEM) {
  1538. cuddRef(r);
  1539. Cudd_IterDerefBdd(dd,r);
  1540. return(NULL);
  1541. }
  1542. return(Cudd_NotCond(r,comple));
  1543. } /* end of cuddBddLICBuildResult */
  1544. /**
  1545. @brief Hash function for the computed table of cuddBddLICMarkEdges.
  1546. @return the bucket number.
  1547. @sideeffect None
  1548. @see Cudd_bddLICompaction
  1549. */
  1550. static int
  1551. MarkCacheHash(
  1552. void const * ptr,
  1553. int modulus)
  1554. {
  1555. int val = 0;
  1556. MarkCacheKey const *entry = (MarkCacheKey const *) ptr;
  1557. val = (int) (ptrint) entry->f;
  1558. val = val * 997 + (int) (ptrint) entry->c;
  1559. return ((val < 0) ? -val : val) % modulus;
  1560. } /* end of MarkCacheHash */
  1561. /**
  1562. @brief Comparison function for the computed table of
  1563. cuddBddLICMarkEdges.
  1564. @return 0 if the two nodes of the key are equal; 1 otherwise.
  1565. @sideeffect None
  1566. @see Cudd_bddLICompaction
  1567. */
  1568. static int
  1569. MarkCacheCompare(
  1570. const void * ptr1,
  1571. const void * ptr2)
  1572. {
  1573. MarkCacheKey const *entry1 = (MarkCacheKey const *) ptr1;
  1574. MarkCacheKey const *entry2 = (MarkCacheKey const *) ptr2;
  1575. return((entry1->f != entry2->f) || (entry1->c != entry2->c));
  1576. } /* end of MarkCacheCompare */
  1577. /**
  1578. @brief Frees memory associated with computed table of
  1579. cuddBddLICMarkEdges.
  1580. @return ST_CONTINUE.
  1581. @sideeffect None
  1582. @see Cudd_bddLICompaction
  1583. */
  1584. static enum st_retval
  1585. MarkCacheCleanUp(
  1586. void * key,
  1587. void * value,
  1588. void * arg)
  1589. {
  1590. MarkCacheKey *entry = (MarkCacheKey *) key;
  1591. (void) value; /* avoid warning */
  1592. (void) arg; /* avoid warning */
  1593. FREE(entry);
  1594. return ST_CONTINUE;
  1595. } /* end of MarkCacheCleanUp */
  1596. /**
  1597. @brief Performs the recursive step of Cudd_bddSqueeze.
  1598. @details This procedure exploits the fact that if we complement and
  1599. swap the bounds of the interval we obtain a valid solution by taking
  1600. the complement of the solution to the original problem. Therefore,
  1601. we can enforce the condition that the upper bound is always regular.
  1602. @return a pointer to the result if successful; NULL otherwise.
  1603. @sideeffect None
  1604. @see Cudd_bddSqueeze
  1605. */
  1606. static DdNode *
  1607. cuddBddSqueeze(
  1608. DdManager * dd,
  1609. DdNode * l,
  1610. DdNode * u)
  1611. {
  1612. DdNode *one, *zero, *r, *lt, *le, *ut, *ue, *t, *e;
  1613. #if 0
  1614. DdNode *ar;
  1615. #endif
  1616. int comple = 0;
  1617. int topu, topl;
  1618. unsigned int index;
  1619. statLine(dd);
  1620. if (l == u) {
  1621. return(l);
  1622. }
  1623. one = DD_ONE(dd);
  1624. zero = Cudd_Not(one);
  1625. /* The only case when l == zero && u == one is at the top level,
  1626. ** where returning either one or zero is OK. In all other cases
  1627. ** the procedure will detect such a case and will perform
  1628. ** remapping. Therefore the order in which we test l and u at this
  1629. ** point is immaterial. */
  1630. if (l == zero) return(l);
  1631. if (u == one) return(u);
  1632. /* Make canonical to increase the utilization of the cache. */
  1633. if (Cudd_IsComplement(u)) {
  1634. DdNode *temp;
  1635. temp = Cudd_Not(l);
  1636. l = Cudd_Not(u);
  1637. u = temp;
  1638. comple = 1;
  1639. }
  1640. /* At this point u is regular and non-constant; l is non-constant, but
  1641. ** may be complemented. */
  1642. /* Here we could check the relative sizes. */
  1643. /* Check the cache. */
  1644. r = cuddCacheLookup2(dd, Cudd_bddSqueeze, l, u);
  1645. if (r != NULL) {
  1646. return(Cudd_NotCond(r,comple));
  1647. }
  1648. checkWhetherToGiveUp(dd);
  1649. /* Recursive step. */
  1650. topu = dd->perm[u->index];
  1651. topl = dd->perm[Cudd_Regular(l)->index];
  1652. if (topu <= topl) {
  1653. index = u->index;
  1654. ut = cuddT(u); ue = cuddE(u);
  1655. } else {
  1656. index = Cudd_Regular(l)->index;
  1657. ut = ue = u;
  1658. }
  1659. if (topl <= topu) {
  1660. lt = cuddT(Cudd_Regular(l)); le = cuddE(Cudd_Regular(l));
  1661. if (Cudd_IsComplement(l)) {
  1662. lt = Cudd_Not(lt);
  1663. le = Cudd_Not(le);
  1664. }
  1665. } else {
  1666. lt = le = l;
  1667. }
  1668. /* If one interval is contained in the other, use the smaller
  1669. ** interval. This corresponds to one-sided matching. */
  1670. if ((lt == zero || Cudd_bddLeq(dd,lt,le)) &&
  1671. (ut == one || Cudd_bddLeq(dd,ue,ut))) { /* remap */
  1672. r = cuddBddSqueeze(dd, le, ue);
  1673. if (r == NULL)
  1674. return(NULL);
  1675. return(Cudd_NotCond(r,comple));
  1676. } else if ((le == zero || Cudd_bddLeq(dd,le,lt)) &&
  1677. (ue == one || Cudd_bddLeq(dd,ut,ue))) { /* remap */
  1678. r = cuddBddSqueeze(dd, lt, ut);
  1679. if (r == NULL)
  1680. return(NULL);
  1681. return(Cudd_NotCond(r,comple));
  1682. } else if ((le == zero || Cudd_bddLeq(dd,le,Cudd_Not(ut))) &&
  1683. (ue == one || Cudd_bddLeq(dd,Cudd_Not(lt),ue))) { /* c-remap */
  1684. t = cuddBddSqueeze(dd, lt, ut);
  1685. cuddRef(t);
  1686. if (Cudd_IsComplement(t)) {
  1687. r = cuddUniqueInter(dd, index, Cudd_Not(t), t);
  1688. if (r == NULL) {
  1689. Cudd_IterDerefBdd(dd, t);
  1690. return(NULL);
  1691. }
  1692. r = Cudd_Not(r);
  1693. } else {
  1694. r = cuddUniqueInter(dd, index, t, Cudd_Not(t));
  1695. if (r == NULL) {
  1696. Cudd_IterDerefBdd(dd, t);
  1697. return(NULL);
  1698. }
  1699. }
  1700. cuddDeref(t);
  1701. if (r == NULL)
  1702. return(NULL);
  1703. cuddCacheInsert2(dd, Cudd_bddSqueeze, l, u, r);
  1704. return(Cudd_NotCond(r,comple));
  1705. } else if ((lt == zero || Cudd_bddLeq(dd,lt,Cudd_Not(ue))) &&
  1706. (ut == one || Cudd_bddLeq(dd,Cudd_Not(le),ut))) { /* c-remap */
  1707. e = cuddBddSqueeze(dd, le, ue);
  1708. cuddRef(e);
  1709. if (Cudd_IsComplement(e)) {
  1710. r = cuddUniqueInter(dd, index, Cudd_Not(e), e);
  1711. if (r == NULL) {
  1712. Cudd_IterDerefBdd(dd, e);
  1713. return(NULL);
  1714. }
  1715. } else {
  1716. r = cuddUniqueInter(dd, index, e, Cudd_Not(e));
  1717. if (r == NULL) {
  1718. Cudd_IterDerefBdd(dd, e);
  1719. return(NULL);
  1720. }
  1721. r = Cudd_Not(r);
  1722. }
  1723. cuddDeref(e);
  1724. if (r == NULL)
  1725. return(NULL);
  1726. cuddCacheInsert2(dd, Cudd_bddSqueeze, l, u, r);
  1727. return(Cudd_NotCond(r,comple));
  1728. }
  1729. #if 0
  1730. /* If the two intervals intersect, take a solution from
  1731. ** the intersection of the intervals. This guarantees that the
  1732. ** splitting variable will not appear in the result.
  1733. ** This approach corresponds to two-sided matching, and is very
  1734. ** expensive. */
  1735. if (Cudd_bddLeq(dd,lt,ue) && Cudd_bddLeq(dd,le,ut)) {
  1736. DdNode *au, *al;
  1737. au = cuddBddAndRecur(dd,ut,ue);
  1738. if (au == NULL)
  1739. return(NULL);
  1740. cuddRef(au);
  1741. al = cuddBddAndRecur(dd,Cudd_Not(lt),Cudd_Not(le));
  1742. if (al == NULL) {
  1743. Cudd_IterDerefBdd(dd,au);
  1744. return(NULL);
  1745. }
  1746. cuddRef(al);
  1747. al = Cudd_Not(al);
  1748. ar = cuddBddSqueeze(dd, al, au);
  1749. if (ar == NULL) {
  1750. Cudd_IterDerefBdd(dd,au);
  1751. Cudd_IterDerefBdd(dd,al);
  1752. return(NULL);
  1753. }
  1754. cuddRef(ar);
  1755. Cudd_IterDerefBdd(dd,au);
  1756. Cudd_IterDerefBdd(dd,al);
  1757. } else {
  1758. ar = NULL;
  1759. }
  1760. #endif
  1761. t = cuddBddSqueeze(dd, lt, ut);
  1762. if (t == NULL) {
  1763. return(NULL);
  1764. }
  1765. cuddRef(t);
  1766. e = cuddBddSqueeze(dd, le, ue);
  1767. if (e == NULL) {
  1768. Cudd_IterDerefBdd(dd,t);
  1769. return(NULL);
  1770. }
  1771. cuddRef(e);
  1772. if (Cudd_IsComplement(t)) {
  1773. t = Cudd_Not(t);
  1774. e = Cudd_Not(e);
  1775. r = (t == e) ? t : cuddUniqueInter(dd, index, t, e);
  1776. if (r == NULL) {
  1777. Cudd_IterDerefBdd(dd, e);
  1778. Cudd_IterDerefBdd(dd, t);
  1779. return(NULL);
  1780. }
  1781. r = Cudd_Not(r);
  1782. } else {
  1783. r = (t == e) ? t : cuddUniqueInter(dd, index, t, e);
  1784. if (r == NULL) {
  1785. Cudd_IterDerefBdd(dd, e);
  1786. Cudd_IterDerefBdd(dd, t);
  1787. return(NULL);
  1788. }
  1789. }
  1790. cuddDeref(t);
  1791. cuddDeref(e);
  1792. #if 0
  1793. /* Check whether there is a result obtained by abstraction and whether
  1794. ** it is better than the one obtained by recursion. */
  1795. cuddRef(r);
  1796. if (ar != NULL) {
  1797. if (Cudd_DagSize(ar) <= Cudd_DagSize(r)) {
  1798. Cudd_IterDerefBdd(dd, r);
  1799. r = ar;
  1800. } else {
  1801. Cudd_IterDerefBdd(dd, ar);
  1802. }
  1803. }
  1804. cuddDeref(r);
  1805. #endif
  1806. cuddCacheInsert2(dd, Cudd_bddSqueeze, l, u, r);
  1807. return(Cudd_NotCond(r,comple));
  1808. } /* end of cuddBddSqueeze */
  1809. /**
  1810. @brief Performs the recursive step of Cudd_bddInterpolate.
  1811. @details This procedure exploits the fact that if we complement and
  1812. swap the bounds of the interval we obtain a valid solution by taking
  1813. the complement of the solution to the original problem. Therefore,
  1814. we can enforce the condition that the upper bound is always regular.
  1815. @return a pointer to the result if successful; NULL otherwise.
  1816. @sideeffect None
  1817. @see Cudd_bddInterpolate
  1818. */
  1819. static DdNode *
  1820. cuddBddInterpolate(
  1821. DdManager * dd,
  1822. DdNode * l,
  1823. DdNode * u)
  1824. {
  1825. DdNode *one, *zero, *r, *lt, *le, *ut, *ue, *t, *e;
  1826. #if 0
  1827. DdNode *ar;
  1828. #endif
  1829. int comple = 0;
  1830. int topu, topl;
  1831. unsigned int index;
  1832. statLine(dd);
  1833. if (l == u) {
  1834. return(l);
  1835. }
  1836. one = DD_ONE(dd);
  1837. zero = Cudd_Not(one);
  1838. if (l == zero) return(l);
  1839. if (u == one) return(u);
  1840. /* Make canonical to increase the utilization of the cache. */
  1841. if (Cudd_IsComplement(u)) {
  1842. DdNode *temp;
  1843. temp = Cudd_Not(l);
  1844. l = Cudd_Not(u);
  1845. u = temp;
  1846. comple = 1;
  1847. }
  1848. /* At this point u is regular and non-constant; l is non-constant, but
  1849. ** may be complemented. */
  1850. /* Check the cache. */
  1851. r = cuddCacheLookup2(dd, Cudd_bddInterpolate, l, u);
  1852. if (r != NULL) {
  1853. return(Cudd_NotCond(r,comple));
  1854. }
  1855. checkWhetherToGiveUp(dd);
  1856. /* Recursive step. */
  1857. topu = dd->perm[u->index];
  1858. topl = dd->perm[Cudd_Regular(l)->index];
  1859. if (topu < topl) {
  1860. /* Universally quantify top variable from upper bound. */
  1861. DdNode *qu;
  1862. ut = cuddT(u); ue = cuddE(u);
  1863. qu = cuddBddAndRecur(dd, ut, ue);
  1864. if (qu == NULL) return(NULL);
  1865. cuddRef(qu);
  1866. r = cuddBddInterpolate(dd, l, qu);
  1867. if (r == NULL) {
  1868. Cudd_IterDerefBdd(dd, qu);
  1869. return(NULL);
  1870. }
  1871. cuddRef(r);
  1872. Cudd_IterDerefBdd(dd, qu);
  1873. cuddCacheInsert2(dd, Cudd_bddInterpolate, l, u, r);
  1874. cuddDeref(r);
  1875. return(Cudd_NotCond(r, comple));
  1876. } else if (topl < topu) {
  1877. /* Existentially quantify top variable from lower bound. */
  1878. DdNode *ql;
  1879. /* Find complements of cofactors of c. */
  1880. if (Cudd_IsComplement(l)) {
  1881. lt = cuddT(Cudd_Regular(l));
  1882. le = cuddE(Cudd_Regular(l));
  1883. } else {
  1884. lt = Cudd_Not(cuddT(l));
  1885. le = Cudd_Not(cuddE(l));
  1886. }
  1887. /* Disjoin cofactors by applying DeMorgan. */
  1888. ql = cuddBddAndRecur(dd, lt, le);
  1889. if (ql == NULL) return (NULL);
  1890. cuddRef(ql);
  1891. ql = Cudd_Not(ql);
  1892. r = cuddBddInterpolate(dd, ql, u);
  1893. if (r == NULL) {
  1894. Cudd_IterDerefBdd(dd, ql);
  1895. return(NULL);
  1896. }
  1897. cuddRef(r);
  1898. Cudd_IterDerefBdd(dd, ql);
  1899. cuddCacheInsert2(dd, Cudd_bddInterpolate, l, u, r);
  1900. cuddDeref(r);
  1901. return(Cudd_NotCond(r, comple));
  1902. }
  1903. /* Both bounds depend on the top variable: split and recur. */
  1904. index = u->index;
  1905. ut = cuddT(u); ue = cuddE(u);
  1906. lt = cuddT(Cudd_Regular(l)); le = cuddE(Cudd_Regular(l));
  1907. if (Cudd_IsComplement(l)) {
  1908. lt = Cudd_Not(lt);
  1909. le = Cudd_Not(le);
  1910. }
  1911. t = cuddBddInterpolate(dd, lt, ut);
  1912. if (t == NULL) {
  1913. return(NULL);
  1914. }
  1915. cuddRef(t);
  1916. e = cuddBddInterpolate(dd, le, ue);
  1917. if (e == NULL) {
  1918. Cudd_IterDerefBdd(dd,t);
  1919. return(NULL);
  1920. }
  1921. cuddRef(e);
  1922. if (Cudd_IsComplement(t)) {
  1923. t = Cudd_Not(t);
  1924. e = Cudd_Not(e);
  1925. r = (t == e) ? t : cuddUniqueInter(dd, index, t, e);
  1926. if (r == NULL) {
  1927. Cudd_IterDerefBdd(dd, e);
  1928. Cudd_IterDerefBdd(dd, t);
  1929. return(NULL);
  1930. }
  1931. r = Cudd_Not(r);
  1932. } else {
  1933. r = (t == e) ? t : cuddUniqueInter(dd, index, t, e);
  1934. if (r == NULL) {
  1935. Cudd_IterDerefBdd(dd, e);
  1936. Cudd_IterDerefBdd(dd, t);
  1937. return(NULL);
  1938. }
  1939. }
  1940. cuddDeref(t);
  1941. cuddDeref(e);
  1942. cuddCacheInsert2(dd, Cudd_bddInterpolate, l, u, r);
  1943. return(Cudd_NotCond(r,comple));
  1944. } /* end of cuddBddInterpolate */