You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2560 lines
82 KiB

  1. /*
  2. * Copyright 2011-2014 Formal Methods and Tools, University of Twente
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <sylvan_config.h>
  17. #include <assert.h>
  18. #include <inttypes.h>
  19. #include <math.h>
  20. #include <pthread.h>
  21. #include <stdint.h>
  22. #include <stdio.h>
  23. #include <stdlib.h>
  24. #include <string.h>
  25. #include <avl.h>
  26. #include <refs.h>
  27. #include <sha2.h>
  28. #include <sylvan.h>
  29. #include <sylvan_common.h>
  30. /**
  31. * MDD node structure
  32. */
  33. typedef struct __attribute__((packed)) mddnode {
  34. uint64_t a, b;
  35. } * mddnode_t; // 16 bytes
  36. // RmRR RRRR RRRR VVVV | VVVV DcDD DDDD DDDD (little endian - in memory)
  37. // VVVV RRRR RRRR RRRm | DDDD DDDD DDDc VVVV (big endian)
  38. // Ensure our mddnode is 16 bytes
  39. typedef char __lddmc_check_mddnode_t_is_16_bytes[(sizeof(struct mddnode)==16) ? 1 : -1];
  40. static inline uint32_t __attribute__((unused))
  41. mddnode_getvalue(mddnode_t n)
  42. {
  43. return *(uint32_t*)((uint8_t*)n+6);
  44. }
  45. static inline uint8_t __attribute__((unused))
  46. mddnode_getmark(mddnode_t n)
  47. {
  48. return n->a & 1;
  49. }
  50. static inline uint8_t __attribute__((unused))
  51. mddnode_getcopy(mddnode_t n)
  52. {
  53. return n->b & 0x10000 ? 1 : 0;
  54. }
  55. static inline uint64_t __attribute__((unused))
  56. mddnode_getright(mddnode_t n)
  57. {
  58. return (n->a & 0x0000ffffffffffff) >> 1;
  59. }
  60. static inline uint64_t __attribute__((unused))
  61. mddnode_getdown(mddnode_t n)
  62. {
  63. return n->b >> 17;
  64. }
  65. static inline void __attribute__((unused))
  66. mddnode_setvalue(mddnode_t n, uint32_t value)
  67. {
  68. *(uint32_t*)((uint8_t*)n+6) = value;
  69. }
  70. static inline void __attribute__((unused))
  71. mddnode_setmark(mddnode_t n, uint8_t mark)
  72. {
  73. n->a = (n->a & 0xfffffffffffffffe) | (mark ? 1 : 0);
  74. }
  75. static inline void __attribute__((unused))
  76. mddnode_setright(mddnode_t n, uint64_t right)
  77. {
  78. n->a = (n->a & 0xffff000000000001) | (right << 1);
  79. }
  80. static inline void __attribute__((unused))
  81. mddnode_setdown(mddnode_t n, uint64_t down)
  82. {
  83. n->b = (n->b & 0x000000000001ffff) | (down << 16);
  84. }
  85. static inline void __attribute__((unused))
  86. mddnode_make(mddnode_t n, uint32_t value, uint64_t right, uint64_t down)
  87. {
  88. n->a = right << 1;
  89. n->b = down << 17;
  90. *(uint32_t*)((uint8_t*)n+6) = value;
  91. }
  92. static inline void __attribute__((unused))
  93. mddnode_makecopy(mddnode_t n, uint64_t right, uint64_t down)
  94. {
  95. n->a = right << 1;
  96. n->b = ((down << 1) | 1) << 16;
  97. }
  98. #define GETNODE(mdd) ((mddnode_t)llmsset_index_to_ptr(nodes, mdd))
  99. /**
  100. * Implementation of garbage collection
  101. */
  102. /* Recursively mark MDD nodes as 'in use' */
  103. VOID_TASK_IMPL_1(lddmc_gc_mark_rec, MDD, mdd)
  104. {
  105. if (mdd <= lddmc_true) return;
  106. if (llmsset_mark(nodes, mdd)) {
  107. mddnode_t n = GETNODE(mdd);
  108. SPAWN(lddmc_gc_mark_rec, mddnode_getright(n));
  109. CALL(lddmc_gc_mark_rec, mddnode_getdown(n));
  110. SYNC(lddmc_gc_mark_rec);
  111. }
  112. }
  113. /**
  114. * External references
  115. */
  116. refs_table_t mdd_refs;
  117. MDD
  118. lddmc_ref(MDD a)
  119. {
  120. if (a == lddmc_true || a == lddmc_false) return a;
  121. refs_up(&mdd_refs, a);
  122. return a;
  123. }
  124. void
  125. lddmc_deref(MDD a)
  126. {
  127. if (a == lddmc_true || a == lddmc_false) return;
  128. refs_down(&mdd_refs, a);
  129. }
  130. size_t
  131. lddmc_count_refs()
  132. {
  133. return refs_count(&mdd_refs);
  134. }
  135. /* Called during garbage collection */
  136. VOID_TASK_0(lddmc_gc_mark_external_refs)
  137. {
  138. // iterate through refs hash table, mark all found
  139. size_t count=0;
  140. uint64_t *it = refs_iter(&mdd_refs, 0, mdd_refs.refs_size);
  141. while (it != NULL) {
  142. SPAWN(lddmc_gc_mark_rec, refs_next(&mdd_refs, &it, mdd_refs.refs_size));
  143. count++;
  144. }
  145. while (count--) {
  146. SYNC(lddmc_gc_mark_rec);
  147. }
  148. }
  149. /* Infrastructure for internal markings */
  150. DECLARE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
  151. VOID_TASK_0(lddmc_refs_mark_task)
  152. {
  153. LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
  154. size_t i, j=0;
  155. for (i=0; i<lddmc_refs_key->r_count; i++) {
  156. if (j >= 40) {
  157. while (j--) SYNC(lddmc_gc_mark_rec);
  158. j=0;
  159. }
  160. SPAWN(lddmc_gc_mark_rec, lddmc_refs_key->results[i]);
  161. j++;
  162. }
  163. for (i=0; i<lddmc_refs_key->s_count; i++) {
  164. Task *t = lddmc_refs_key->spawns[i];
  165. if (!TASK_IS_STOLEN(t)) break;
  166. if (TASK_IS_COMPLETED(t)) {
  167. if (j >= 40) {
  168. while (j--) SYNC(lddmc_gc_mark_rec);
  169. j=0;
  170. }
  171. SPAWN(lddmc_gc_mark_rec, *(BDD*)TASK_RESULT(t));
  172. j++;
  173. }
  174. }
  175. while (j--) SYNC(lddmc_gc_mark_rec);
  176. }
  177. VOID_TASK_0(lddmc_refs_mark)
  178. {
  179. TOGETHER(lddmc_refs_mark_task);
  180. }
  181. VOID_TASK_0(lddmc_refs_init_task)
  182. {
  183. lddmc_refs_internal_t s = (lddmc_refs_internal_t)malloc(sizeof(struct lddmc_refs_internal));
  184. s->r_size = 128;
  185. s->r_count = 0;
  186. s->s_size = 128;
  187. s->s_count = 0;
  188. s->results = (BDD*)malloc(sizeof(BDD) * 128);
  189. s->spawns = (Task**)malloc(sizeof(Task*) * 128);
  190. SET_THREAD_LOCAL(lddmc_refs_key, s);
  191. }
  192. VOID_TASK_0(lddmc_refs_init)
  193. {
  194. INIT_THREAD_LOCAL(lddmc_refs_key);
  195. TOGETHER(lddmc_refs_init_task);
  196. sylvan_gc_add_mark(10, TASK(lddmc_refs_mark));
  197. }
  198. /**
  199. * Initialize and quit functions
  200. */
  201. static void
  202. lddmc_quit()
  203. {
  204. refs_free(&mdd_refs);
  205. }
  206. void
  207. sylvan_init_ldd()
  208. {
  209. sylvan_register_quit(lddmc_quit);
  210. sylvan_gc_add_mark(10, TASK(lddmc_gc_mark_external_refs));
  211. // Sanity check
  212. if (sizeof(struct mddnode) != 16) {
  213. fprintf(stderr, "Invalid size of mdd nodes: %ld\n", sizeof(struct mddnode));
  214. exit(1);
  215. }
  216. refs_create(&mdd_refs, 1024);
  217. LACE_ME;
  218. CALL(lddmc_refs_init);
  219. }
  220. /**
  221. * Primitives
  222. */
  223. MDD
  224. lddmc_makenode(uint32_t value, MDD ifeq, MDD ifneq)
  225. {
  226. if (ifeq == lddmc_false) return ifneq;
  227. // check if correct (should be false, or next in value)
  228. assert(ifneq != lddmc_true);
  229. if (ifneq != lddmc_false) assert(value < mddnode_getvalue(GETNODE(ifneq)));
  230. struct mddnode n;
  231. mddnode_make(&n, value, ifneq, ifeq);
  232. int created;
  233. uint64_t index = llmsset_lookup(nodes, n.a, n.b, &created);
  234. if (index == 0) {
  235. lddmc_refs_push(ifeq);
  236. lddmc_refs_push(ifneq);
  237. LACE_ME;
  238. sylvan_gc();
  239. lddmc_refs_pop(1);
  240. index = llmsset_lookup(nodes, n.a, n.b, &created);
  241. if (index == 0) {
  242. fprintf(stderr, "MDD Unique table full, %zu of %zu buckets filled!\n", llmsset_count_marked(nodes), llmsset_get_size(nodes));
  243. exit(1);
  244. }
  245. }
  246. if (created) sylvan_stats_count(LDD_NODES_CREATED);
  247. else sylvan_stats_count(LDD_NODES_REUSED);
  248. return (MDD)index;
  249. }
  250. MDD
  251. lddmc_make_copynode(MDD ifeq, MDD ifneq)
  252. {
  253. struct mddnode n;
  254. mddnode_makecopy(&n, ifneq, ifeq);
  255. int created;
  256. uint64_t index = llmsset_lookup(nodes, n.a, n.b, &created);
  257. if (index == 0) {
  258. lddmc_refs_push(ifeq);
  259. lddmc_refs_push(ifneq);
  260. LACE_ME;
  261. sylvan_gc();
  262. lddmc_refs_pop(1);
  263. index = llmsset_lookup(nodes, n.a, n.b, &created);
  264. if (index == 0) {
  265. fprintf(stderr, "MDD Unique table full, %zu of %zu buckets filled!\n", llmsset_count_marked(nodes), llmsset_get_size(nodes));
  266. exit(1);
  267. }
  268. }
  269. if (created) sylvan_stats_count(LDD_NODES_CREATED);
  270. else sylvan_stats_count(LDD_NODES_REUSED);
  271. return (MDD)index;
  272. }
  273. MDD
  274. lddmc_extendnode(MDD mdd, uint32_t value, MDD ifeq)
  275. {
  276. if (mdd <= lddmc_true) return lddmc_makenode(value, ifeq, mdd);
  277. mddnode_t n = GETNODE(mdd);
  278. if (mddnode_getcopy(n)) return lddmc_make_copynode(mddnode_getdown(n), lddmc_extendnode(mddnode_getright(n), value, ifeq));
  279. uint32_t n_value = mddnode_getvalue(n);
  280. if (n_value < value) return lddmc_makenode(n_value, mddnode_getdown(n), lddmc_extendnode(mddnode_getright(n), value, ifeq));
  281. if (n_value == value) return lddmc_makenode(value, ifeq, mddnode_getright(n));
  282. /* (n_value > value) */ return lddmc_makenode(value, ifeq, mdd);
  283. }
  284. uint32_t
  285. lddmc_getvalue(MDD mdd)
  286. {
  287. return mddnode_getvalue(GETNODE(mdd));
  288. }
  289. MDD
  290. lddmc_getdown(MDD mdd)
  291. {
  292. return mddnode_getdown(GETNODE(mdd));
  293. }
  294. MDD
  295. lddmc_getright(MDD mdd)
  296. {
  297. return mddnode_getright(GETNODE(mdd));
  298. }
  299. MDD
  300. lddmc_follow(MDD mdd, uint32_t value)
  301. {
  302. for (;;) {
  303. if (mdd <= lddmc_true) return mdd;
  304. const mddnode_t n = GETNODE(mdd);
  305. if (!mddnode_getcopy(n)) {
  306. const uint32_t v = mddnode_getvalue(n);
  307. if (v == value) return mddnode_getdown(n);
  308. if (v > value) return lddmc_false;
  309. }
  310. mdd = mddnode_getright(n);
  311. }
  312. }
  313. int
  314. lddmc_iscopy(MDD mdd)
  315. {
  316. if (mdd <= lddmc_true) return 0;
  317. mddnode_t n = GETNODE(mdd);
  318. return mddnode_getcopy(n) ? 1 : 0;
  319. }
  320. MDD
  321. lddmc_followcopy(MDD mdd)
  322. {
  323. if (mdd <= lddmc_true) return lddmc_false;
  324. mddnode_t n = GETNODE(mdd);
  325. if (mddnode_getcopy(n)) return mddnode_getdown(n);
  326. else return lddmc_false;
  327. }
  328. /**
  329. * MDD operations
  330. */
  331. static inline int
  332. match_ldds(MDD *one, MDD *two)
  333. {
  334. MDD m1 = *one, m2 = *two;
  335. if (m1 == lddmc_false || m2 == lddmc_false) return 0;
  336. mddnode_t n1 = GETNODE(m1), n2 = GETNODE(m2);
  337. uint32_t v1 = mddnode_getvalue(n1), v2 = mddnode_getvalue(n2);
  338. while (v1 != v2) {
  339. if (v1 < v2) {
  340. m1 = mddnode_getright(n1);
  341. if (m1 == lddmc_false) return 0;
  342. n1 = GETNODE(m1);
  343. v1 = mddnode_getvalue(n1);
  344. } else if (v1 > v2) {
  345. m2 = mddnode_getright(n2);
  346. if (m2 == lddmc_false) return 0;
  347. n2 = GETNODE(m2);
  348. v2 = mddnode_getvalue(n2);
  349. }
  350. }
  351. *one = m1;
  352. *two = m2;
  353. return 1;
  354. }
  355. TASK_IMPL_2(MDD, lddmc_union, MDD, a, MDD, b)
  356. {
  357. /* Terminal cases */
  358. if (a == b) return a;
  359. if (a == lddmc_false) return b;
  360. if (b == lddmc_false) return a;
  361. assert(a != lddmc_true && b != lddmc_true); // expecting same length
  362. /* Test gc */
  363. sylvan_gc_test();
  364. sylvan_stats_count(LDD_UNION);
  365. /* Improve cache behavior */
  366. if (a < b) { MDD tmp=b; b=a; a=tmp; }
  367. /* Access cache */
  368. MDD result;
  369. if (cache_get3(CACHE_MDD_UNION, a, b, 0, &result)) {
  370. sylvan_stats_count(LDD_UNION_CACHED);
  371. return result;
  372. }
  373. /* Get nodes */
  374. mddnode_t na = GETNODE(a);
  375. mddnode_t nb = GETNODE(b);
  376. const int na_copy = mddnode_getcopy(na) ? 1 : 0;
  377. const int nb_copy = mddnode_getcopy(nb) ? 1 : 0;
  378. const uint32_t na_value = mddnode_getvalue(na);
  379. const uint32_t nb_value = mddnode_getvalue(nb);
  380. /* Perform recursive calculation */
  381. if (na_copy && nb_copy) {
  382. lddmc_refs_spawn(SPAWN(lddmc_union, mddnode_getdown(na), mddnode_getdown(nb)));
  383. MDD right = CALL(lddmc_union, mddnode_getright(na), mddnode_getright(nb));
  384. lddmc_refs_push(right);
  385. MDD down = lddmc_refs_sync(SYNC(lddmc_union));
  386. lddmc_refs_pop(1);
  387. result = lddmc_make_copynode(down, right);
  388. } else if (na_copy) {
  389. MDD right = CALL(lddmc_union, mddnode_getright(na), b);
  390. result = lddmc_make_copynode(mddnode_getdown(na), right);
  391. } else if (nb_copy) {
  392. MDD right = CALL(lddmc_union, a, mddnode_getright(nb));
  393. result = lddmc_make_copynode(mddnode_getdown(nb), right);
  394. } else if (na_value < nb_value) {
  395. MDD right = CALL(lddmc_union, mddnode_getright(na), b);
  396. result = lddmc_makenode(na_value, mddnode_getdown(na), right);
  397. } else if (na_value == nb_value) {
  398. lddmc_refs_spawn(SPAWN(lddmc_union, mddnode_getdown(na), mddnode_getdown(nb)));
  399. MDD right = CALL(lddmc_union, mddnode_getright(na), mddnode_getright(nb));
  400. lddmc_refs_push(right);
  401. MDD down = lddmc_refs_sync(SYNC(lddmc_union));
  402. lddmc_refs_pop(1);
  403. result = lddmc_makenode(na_value, down, right);
  404. } else /* na_value > nb_value */ {
  405. MDD right = CALL(lddmc_union, a, mddnode_getright(nb));
  406. result = lddmc_makenode(nb_value, mddnode_getdown(nb), right);
  407. }
  408. /* Write to cache */
  409. if (cache_put3(CACHE_MDD_UNION, a, b, 0, result)) sylvan_stats_count(LDD_UNION_CACHEDPUT);
  410. return result;
  411. }
  412. TASK_IMPL_2(MDD, lddmc_minus, MDD, a, MDD, b)
  413. {
  414. /* Terminal cases */
  415. if (a == b) return lddmc_false;
  416. if (a == lddmc_false) return lddmc_false;
  417. if (b == lddmc_false) return a;
  418. assert(b != lddmc_true);
  419. assert(a != lddmc_true); // Universe is unknown!! // Possibly depth issue?
  420. /* Test gc */
  421. sylvan_gc_test();
  422. sylvan_stats_count(LDD_MINUS);
  423. /* Access cache */
  424. MDD result;
  425. if (cache_get3(CACHE_MDD_MINUS, a, b, 0, &result)) {
  426. sylvan_stats_count(LDD_MINUS_CACHED);
  427. return result;
  428. }
  429. /* Get nodes */
  430. mddnode_t na = GETNODE(a);
  431. mddnode_t nb = GETNODE(b);
  432. uint32_t na_value = mddnode_getvalue(na);
  433. uint32_t nb_value = mddnode_getvalue(nb);
  434. /* Perform recursive calculation */
  435. if (na_value < nb_value) {
  436. MDD right = CALL(lddmc_minus, mddnode_getright(na), b);
  437. result = lddmc_makenode(na_value, mddnode_getdown(na), right);
  438. } else if (na_value == nb_value) {
  439. lddmc_refs_spawn(SPAWN(lddmc_minus, mddnode_getright(na), mddnode_getright(nb)));
  440. MDD down = CALL(lddmc_minus, mddnode_getdown(na), mddnode_getdown(nb));
  441. lddmc_refs_push(down);
  442. MDD right = lddmc_refs_sync(SYNC(lddmc_minus));
  443. lddmc_refs_pop(1);
  444. result = lddmc_makenode(na_value, down, right);
  445. } else /* na_value > nb_value */ {
  446. result = CALL(lddmc_minus, a, mddnode_getright(nb));
  447. }
  448. /* Write to cache */
  449. if (cache_put3(CACHE_MDD_MINUS, a, b, 0, result)) sylvan_stats_count(LDD_MINUS_CACHEDPUT);
  450. return result;
  451. }
  452. /* result: a plus b; res2: b minus a */
  453. TASK_IMPL_3(MDD, lddmc_zip, MDD, a, MDD, b, MDD*, res2)
  454. {
  455. /* Terminal cases */
  456. if (a == b) {
  457. *res2 = lddmc_false;
  458. return a;
  459. }
  460. if (a == lddmc_false) {
  461. *res2 = b;
  462. return b;
  463. }
  464. if (b == lddmc_false) {
  465. *res2 = lddmc_false;
  466. return a;
  467. }
  468. assert(a != lddmc_true && b != lddmc_true); // expecting same length
  469. /* Test gc */
  470. sylvan_gc_test();
  471. /* Maybe not the ideal way */
  472. sylvan_stats_count(LDD_ZIP);
  473. /* Access cache */
  474. MDD result;
  475. if (cache_get3(CACHE_MDD_UNION, a, b, 0, &result) &&
  476. cache_get3(CACHE_MDD_MINUS, b, a, 0, res2)) {
  477. sylvan_stats_count(LDD_ZIP);
  478. return result;
  479. }
  480. /* Get nodes */
  481. mddnode_t na = GETNODE(a);
  482. mddnode_t nb = GETNODE(b);
  483. uint32_t na_value = mddnode_getvalue(na);
  484. uint32_t nb_value = mddnode_getvalue(nb);
  485. /* Perform recursive calculation */
  486. if (na_value < nb_value) {
  487. MDD right = CALL(lddmc_zip, mddnode_getright(na), b, res2);
  488. result = lddmc_makenode(na_value, mddnode_getdown(na), right);
  489. } else if (na_value == nb_value) {
  490. MDD down2, right2;
  491. lddmc_refs_spawn(SPAWN(lddmc_zip, mddnode_getdown(na), mddnode_getdown(nb), &down2));
  492. MDD right = CALL(lddmc_zip, mddnode_getright(na), mddnode_getright(nb), &right2);
  493. lddmc_refs_push(right);
  494. lddmc_refs_push(right2);
  495. MDD down = lddmc_refs_sync(SYNC(lddmc_zip));
  496. lddmc_refs_pop(2);
  497. result = lddmc_makenode(na_value, down, right);
  498. *res2 = lddmc_makenode(na_value, down2, right2);
  499. } else /* na_value > nb_value */ {
  500. MDD right2;
  501. MDD right = CALL(lddmc_zip, a, mddnode_getright(nb), &right2);
  502. result = lddmc_makenode(nb_value, mddnode_getdown(nb), right);
  503. *res2 = lddmc_makenode(nb_value, mddnode_getdown(nb), right2);
  504. }
  505. /* Write to cache */
  506. int c1 = cache_put3(CACHE_MDD_UNION, a, b, 0, result);
  507. int c2 = cache_put3(CACHE_MDD_MINUS, b, a, 0, *res2);
  508. if (c1 && c2) sylvan_stats_count(LDD_ZIP_CACHEDPUT);
  509. return result;
  510. }
  511. TASK_IMPL_2(MDD, lddmc_intersect, MDD, a, MDD, b)
  512. {
  513. /* Terminal cases */
  514. if (a == b) return a;
  515. if (a == lddmc_false || b == lddmc_false) return lddmc_false;
  516. assert(a != lddmc_true && b != lddmc_true);
  517. /* Test gc */
  518. sylvan_gc_test();
  519. sylvan_stats_count(LDD_INTERSECT);
  520. /* Get nodes */
  521. mddnode_t na = GETNODE(a);
  522. mddnode_t nb = GETNODE(b);
  523. uint32_t na_value = mddnode_getvalue(na);
  524. uint32_t nb_value = mddnode_getvalue(nb);
  525. /* Skip nodes if possible */
  526. while (na_value != nb_value) {
  527. if (na_value < nb_value) {
  528. a = mddnode_getright(na);
  529. if (a == lddmc_false) return lddmc_false;
  530. na = GETNODE(a);
  531. na_value = mddnode_getvalue(na);
  532. }
  533. if (nb_value < na_value) {
  534. b = mddnode_getright(nb);
  535. if (b == lddmc_false) return lddmc_false;
  536. nb = GETNODE(b);
  537. nb_value = mddnode_getvalue(nb);
  538. }
  539. }
  540. /* Access cache */
  541. MDD result;
  542. if (cache_get3(CACHE_MDD_INTERSECT, a, b, 0, &result)) {
  543. sylvan_stats_count(LDD_INTERSECT_CACHED);
  544. return result;
  545. }
  546. /* Perform recursive calculation */
  547. lddmc_refs_spawn(SPAWN(lddmc_intersect, mddnode_getright(na), mddnode_getright(nb)));
  548. MDD down = CALL(lddmc_intersect, mddnode_getdown(na), mddnode_getdown(nb));
  549. lddmc_refs_push(down);
  550. MDD right = lddmc_refs_sync(SYNC(lddmc_intersect));
  551. lddmc_refs_pop(1);
  552. result = lddmc_makenode(na_value, down, right);
  553. /* Write to cache */
  554. if (cache_put3(CACHE_MDD_INTERSECT, a, b, 0, result)) sylvan_stats_count(LDD_INTERSECT_CACHEDPUT);
  555. return result;
  556. }
  557. // proj: -1 (rest 0), 0 (no match), 1 (match)
  558. TASK_IMPL_3(MDD, lddmc_match, MDD, a, MDD, b, MDD, proj)
  559. {
  560. if (a == b) return a;
  561. if (a == lddmc_false || b == lddmc_false) return lddmc_false;
  562. mddnode_t p_node = GETNODE(proj);
  563. uint32_t p_val = mddnode_getvalue(p_node);
  564. if (p_val == (uint32_t)-1) return a;
  565. assert(a != lddmc_true);
  566. if (p_val == 1) assert(b != lddmc_true);
  567. /* Test gc */
  568. sylvan_gc_test();
  569. /* Skip nodes if possible */
  570. if (p_val == 1) {
  571. if (!match_ldds(&a, &b)) return lddmc_false;
  572. }
  573. sylvan_stats_count(LDD_MATCH);
  574. /* Access cache */
  575. MDD result;
  576. if (cache_get3(CACHE_MDD_MATCH, a, b, proj, &result)) {
  577. sylvan_stats_count(LDD_MATCH_CACHED);
  578. return result;
  579. }
  580. /* Perform recursive calculation */
  581. mddnode_t na = GETNODE(a);
  582. MDD down;
  583. if (p_val == 1) {
  584. mddnode_t nb = GETNODE(b);
  585. /* right = */ lddmc_refs_spawn(SPAWN(lddmc_match, mddnode_getright(na), mddnode_getright(nb), proj));
  586. down = CALL(lddmc_match, mddnode_getdown(na), mddnode_getdown(nb), mddnode_getdown(p_node));
  587. } else {
  588. /* right = */ lddmc_refs_spawn(SPAWN(lddmc_match, mddnode_getright(na), b, proj));
  589. down = CALL(lddmc_match, mddnode_getdown(na), b, mddnode_getdown(p_node));
  590. }
  591. lddmc_refs_push(down);
  592. MDD right = lddmc_refs_sync(SYNC(lddmc_match));
  593. lddmc_refs_pop(1);
  594. result = lddmc_makenode(mddnode_getvalue(na), down, right);
  595. /* Write to cache */
  596. if (cache_put3(CACHE_MDD_MATCH, a, b, proj, result)) sylvan_stats_count(LDD_MATCH_CACHEDPUT);
  597. return result;
  598. }
  599. TASK_4(MDD, lddmc_relprod_help, uint32_t, val, MDD, set, MDD, rel, MDD, proj)
  600. {
  601. return lddmc_makenode(val, CALL(lddmc_relprod, set, rel, proj), lddmc_false);
  602. }
  603. // meta: -1 (end; rest not in rel), 0 (not in rel), 1 (read), 2 (write), 3 (only-read), 4 (only-write)
  604. TASK_IMPL_3(MDD, lddmc_relprod, MDD, set, MDD, rel, MDD, meta)
  605. {
  606. if (set == lddmc_false) return lddmc_false;
  607. if (rel == lddmc_false) return lddmc_false;
  608. mddnode_t n_meta = GETNODE(meta);
  609. uint32_t m_val = mddnode_getvalue(n_meta);
  610. if (m_val == (uint32_t)-1) return set;
  611. if (m_val != 0) assert(set != lddmc_true && rel != lddmc_true);
  612. /* Skip nodes if possible */
  613. if (!mddnode_getcopy(GETNODE(rel))) {
  614. if (m_val == 1 || m_val == 3) {
  615. if (!match_ldds(&set, &rel)) return lddmc_false;
  616. }
  617. }
  618. /* Test gc */
  619. sylvan_gc_test();
  620. sylvan_stats_count(LDD_RELPROD);
  621. /* Access cache */
  622. MDD result;
  623. if (cache_get3(CACHE_MDD_RELPROD, set, rel, meta, &result)) {
  624. sylvan_stats_count(LDD_RELPROD_CACHED);
  625. return result;
  626. }
  627. mddnode_t n_set = GETNODE(set);
  628. mddnode_t n_rel = GETNODE(rel);
  629. /* Recursive operations */
  630. if (m_val == 0) { // not in rel
  631. lddmc_refs_spawn(SPAWN(lddmc_relprod, mddnode_getright(n_set), rel, meta));
  632. MDD down = CALL(lddmc_relprod, mddnode_getdown(n_set), rel, mddnode_getdown(n_meta));
  633. lddmc_refs_push(down);
  634. MDD right = lddmc_refs_sync(SYNC(lddmc_relprod));
  635. lddmc_refs_pop(1);
  636. result = lddmc_makenode(mddnode_getvalue(n_set), down, right);
  637. } else if (m_val == 1) { // read
  638. // read layer: if not copy, then set&rel are already matched
  639. lddmc_refs_spawn(SPAWN(lddmc_relprod, set, mddnode_getright(n_rel), meta)); // spawn next read in list
  640. // for this read, either it is copy ('for all') or it is normal match
  641. if (mddnode_getcopy(n_rel)) {
  642. // spawn for every value to copy (set)
  643. int count = 0;
  644. for (;;) {
  645. // stay same level of set (for write)
  646. lddmc_refs_spawn(SPAWN(lddmc_relprod, set, mddnode_getdown(n_rel), mddnode_getdown(n_meta)));
  647. count++;
  648. set = mddnode_getright(n_set);
  649. if (set == lddmc_false) break;
  650. n_set = GETNODE(set);
  651. }
  652. // sync+union (one by one)
  653. result = lddmc_false;
  654. while (count--) {
  655. lddmc_refs_push(result);
  656. MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod));
  657. lddmc_refs_push(result2);
  658. result = CALL(lddmc_union, result, result2);
  659. lddmc_refs_pop(2);
  660. }
  661. } else {
  662. // stay same level of set (for write)
  663. result = CALL(lddmc_relprod, set, mddnode_getdown(n_rel), mddnode_getdown(n_meta));
  664. }
  665. lddmc_refs_push(result);
  666. MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod)); // sync next read in list
  667. lddmc_refs_push(result2);
  668. result = CALL(lddmc_union, result, result2);
  669. lddmc_refs_pop(2);
  670. } else if (m_val == 3) { // only-read
  671. if (mddnode_getcopy(n_rel)) {
  672. // copy on read ('for any value')
  673. // result = union(result_with_copy, result_without_copy)
  674. lddmc_refs_spawn(SPAWN(lddmc_relprod, set, mddnode_getright(n_rel), meta)); // spawn without_copy
  675. // spawn for every value to copy (set)
  676. int count = 0;
  677. for (;;) {
  678. lddmc_refs_spawn(SPAWN(lddmc_relprod_help, mddnode_getvalue(n_set), mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta)));
  679. count++;
  680. set = mddnode_getright(n_set);
  681. if (set == lddmc_false) break;
  682. n_set = GETNODE(set);
  683. }
  684. // sync+union (one by one)
  685. result = lddmc_false;
  686. while (count--) {
  687. lddmc_refs_push(result);
  688. MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_help));
  689. lddmc_refs_push(result2);
  690. result = CALL(lddmc_union, result, result2);
  691. lddmc_refs_pop(2);
  692. }
  693. // add result from without_copy
  694. lddmc_refs_push(result);
  695. MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod));
  696. lddmc_refs_push(result2);
  697. result = CALL(lddmc_union, result, result2);
  698. lddmc_refs_pop(2);
  699. } else {
  700. // only-read, without copy
  701. lddmc_refs_spawn(SPAWN(lddmc_relprod, mddnode_getright(n_set), mddnode_getright(n_rel), meta));
  702. MDD down = CALL(lddmc_relprod, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta));
  703. lddmc_refs_push(down);
  704. MDD right = lddmc_refs_sync(SYNC(lddmc_relprod));
  705. lddmc_refs_pop(1);
  706. result = lddmc_makenode(mddnode_getvalue(n_set), down, right);
  707. }
  708. } else if (m_val == 2 || m_val == 4) { // write, only-write
  709. if (m_val == 4) {
  710. // only-write, so we need to include 'for all variables'
  711. lddmc_refs_spawn(SPAWN(lddmc_relprod, mddnode_getright(n_set), rel, meta)); // next in set
  712. }
  713. // spawn for every value to write (rel)
  714. int count = 0;
  715. for (;;) {
  716. uint32_t value;
  717. if (mddnode_getcopy(n_rel)) value = mddnode_getvalue(n_set);
  718. else value = mddnode_getvalue(n_rel);
  719. lddmc_refs_spawn(SPAWN(lddmc_relprod_help, value, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta)));
  720. count++;
  721. rel = mddnode_getright(n_rel);
  722. if (rel == lddmc_false) break;
  723. n_rel = GETNODE(rel);
  724. }
  725. // sync+union (one by one)
  726. result = lddmc_false;
  727. while (count--) {
  728. lddmc_refs_push(result);
  729. MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_help));
  730. lddmc_refs_push(result2);
  731. result = CALL(lddmc_union, result, result2);
  732. lddmc_refs_pop(2);
  733. }
  734. if (m_val == 4) {
  735. // sync+union with other variables
  736. lddmc_refs_push(result);
  737. MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod));
  738. lddmc_refs_push(result2);
  739. result = CALL(lddmc_union, result, result2);
  740. lddmc_refs_pop(2);
  741. }
  742. }
  743. /* Write to cache */
  744. if (cache_put3(CACHE_MDD_RELPROD, set, rel, meta, result)) sylvan_stats_count(LDD_RELPROD_CACHEDPUT);
  745. return result;
  746. }
  747. TASK_5(MDD, lddmc_relprod_union_help, uint32_t, val, MDD, set, MDD, rel, MDD, proj, MDD, un)
  748. {
  749. return lddmc_makenode(val, CALL(lddmc_relprod_union, set, rel, proj, un), lddmc_false);
  750. }
  751. // meta: -1 (end; rest not in rel), 0 (not in rel), 1 (read), 2 (write), 3 (only-read), 4 (only-write)
  752. TASK_IMPL_4(MDD, lddmc_relprod_union, MDD, set, MDD, rel, MDD, meta, MDD, un)
  753. {
  754. if (set == lddmc_false) return un;
  755. if (rel == lddmc_false) return un;
  756. if (un == lddmc_false) return CALL(lddmc_relprod, set, rel, meta);
  757. mddnode_t n_meta = GETNODE(meta);
  758. uint32_t m_val = mddnode_getvalue(n_meta);
  759. if (m_val == (uint32_t)-1) return CALL(lddmc_union, set, un);
  760. // check depths (this triggers on logic error)
  761. if (m_val != 0) assert(set != lddmc_true && rel != lddmc_true && un != lddmc_true);
  762. /* Skip nodes if possible */
  763. if (!mddnode_getcopy(GETNODE(rel))) {
  764. if (m_val == 1 || m_val == 3) {
  765. if (!match_ldds(&set, &rel)) return un;
  766. }
  767. }
  768. mddnode_t n_set = GETNODE(set);
  769. mddnode_t n_rel = GETNODE(rel);
  770. mddnode_t n_un = GETNODE(un);
  771. // in some cases, we know un.value < result.value
  772. if (m_val == 0 || m_val == 3) {
  773. // if m_val == 0, no read/write, then un.value < set.value?
  774. // if m_val == 3, only read (write same), then un.value < set.value?
  775. uint32_t set_value = mddnode_getvalue(n_set);
  776. uint32_t un_value = mddnode_getvalue(n_un);
  777. if (un_value < set_value) {
  778. MDD right = CALL(lddmc_relprod_union, set, rel, meta, mddnode_getright(n_un));
  779. if (right == mddnode_getright(n_un)) return un;
  780. else return lddmc_makenode(mddnode_getvalue(n_un), mddnode_getdown(n_un), right);
  781. }
  782. } else if (m_val == 2 || m_val == 4) {
  783. // if we write, then we only know for certain that un.value < result.value if
  784. // the root of rel is not a copy node
  785. if (!mddnode_getcopy(n_rel)) {
  786. uint32_t rel_value = mddnode_getvalue(n_rel);
  787. uint32_t un_value = mddnode_getvalue(n_un);
  788. if (un_value < rel_value) {
  789. MDD right = CALL(lddmc_relprod_union, set, rel, meta, mddnode_getright(n_un));
  790. if (right == mddnode_getright(n_un)) return un;
  791. else return lddmc_makenode(mddnode_getvalue(n_un), mddnode_getdown(n_un), right);
  792. }
  793. }
  794. }
  795. /* Test gc */
  796. sylvan_gc_test();
  797. sylvan_stats_count(LDD_RELPROD_UNION);
  798. /* Access cache */
  799. MDD result;
  800. if (cache_get4(CACHE_MDD_RELPROD, set, rel, meta, un, &result)) {
  801. sylvan_stats_count(LDD_RELPROD_UNION_CACHED);
  802. return result;
  803. }
  804. /* Recursive operations */
  805. if (m_val == 0) { // not in rel
  806. uint32_t set_value = mddnode_getvalue(n_set);
  807. uint32_t un_value = mddnode_getvalue(n_un);
  808. // set_value > un_value already checked above
  809. if (set_value < un_value) {
  810. lddmc_refs_spawn(SPAWN(lddmc_relprod_union, mddnode_getright(n_set), rel, meta, un));
  811. // going down, we don't need _union, since un does not contain this subtree
  812. MDD down = CALL(lddmc_relprod, mddnode_getdown(n_set), rel, mddnode_getdown(n_meta));
  813. lddmc_refs_push(down);
  814. MDD right = lddmc_refs_sync(SYNC(lddmc_relprod_union));
  815. lddmc_refs_pop(1);
  816. if (down == lddmc_false) result = right;
  817. else result = lddmc_makenode(mddnode_getvalue(n_set), down, right);
  818. } else /* set_value == un_value */ {
  819. lddmc_refs_spawn(SPAWN(lddmc_relprod_union, mddnode_getright(n_set), rel, meta, mddnode_getright(n_un)));
  820. MDD down = CALL(lddmc_relprod_union, mddnode_getdown(n_set), rel, mddnode_getdown(n_meta), mddnode_getdown(n_un));
  821. lddmc_refs_push(down);
  822. MDD right = lddmc_refs_sync(SYNC(lddmc_relprod_union));
  823. lddmc_refs_pop(1);
  824. if (right == mddnode_getright(n_un) && down == mddnode_getdown(n_un)) result = un;
  825. else result = lddmc_makenode(mddnode_getvalue(n_set), down, right);
  826. }
  827. } else if (m_val == 1) { // read
  828. // read layer: if not copy, then set&rel are already matched
  829. lddmc_refs_spawn(SPAWN(lddmc_relprod_union, set, mddnode_getright(n_rel), meta, un)); // spawn next read in list
  830. // for this read, either it is copy ('for all') or it is normal match
  831. if (mddnode_getcopy(n_rel)) {
  832. // spawn for every value in set (copy = for all)
  833. int count = 0;
  834. for (;;) {
  835. // stay same level of set and un (for write)
  836. lddmc_refs_spawn(SPAWN(lddmc_relprod_union, set, mddnode_getdown(n_rel), mddnode_getdown(n_meta), un));
  837. count++;
  838. set = mddnode_getright(n_set);
  839. if (set == lddmc_false) break;
  840. n_set = GETNODE(set);
  841. }
  842. // sync+union (one by one)
  843. result = lddmc_false;
  844. while (count--) {
  845. lddmc_refs_push(result);
  846. MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_union));
  847. lddmc_refs_push(result2);
  848. result = CALL(lddmc_union, result, result2);
  849. lddmc_refs_pop(2);
  850. }
  851. } else {
  852. // stay same level of set and un (for write)
  853. result = CALL(lddmc_relprod_union, set, mddnode_getdown(n_rel), mddnode_getdown(n_meta), un);
  854. }
  855. lddmc_refs_push(result);
  856. MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_union)); // sync next read in list
  857. lddmc_refs_push(result2);
  858. result = CALL(lddmc_union, result, result2);
  859. lddmc_refs_pop(2);
  860. } else if (m_val == 3) { // only-read
  861. // un < set already checked above
  862. if (mddnode_getcopy(n_rel)) {
  863. // copy on read ('for any value')
  864. // result = union(result_with_copy, result_without_copy)
  865. lddmc_refs_spawn(SPAWN(lddmc_relprod_union, set, mddnode_getright(n_rel), meta, un)); // spawn without_copy
  866. // spawn for every value to copy (set)
  867. int count = 0;
  868. result = lddmc_false;
  869. for (;;) {
  870. uint32_t set_value = mddnode_getvalue(n_set);
  871. uint32_t un_value = mddnode_getvalue(n_un);
  872. if (un_value < set_value) {
  873. // this is a bit tricky
  874. // the result of this will simply be "un_value, mddnode_getdown(n_un), false" which is intended
  875. lddmc_refs_spawn(SPAWN(lddmc_relprod_union_help, un_value, lddmc_false, lddmc_false, mddnode_getdown(n_meta), mddnode_getdown(n_un)));
  876. count++;
  877. un = mddnode_getright(n_un);
  878. if (un == lddmc_false) {
  879. result = CALL(lddmc_relprod, set, rel, meta);
  880. break;
  881. }
  882. n_un = GETNODE(un);
  883. } else if (un_value > set_value) {
  884. // tricky again. the result of this is a normal relprod
  885. lddmc_refs_spawn(SPAWN(lddmc_relprod_union_help, set_value, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), lddmc_false));
  886. count++;
  887. set = mddnode_getright(n_set);
  888. if (set == lddmc_false) {
  889. result = un;
  890. break;
  891. }
  892. n_set = GETNODE(set);
  893. } else /* un_value == set_value */ {
  894. lddmc_refs_spawn(SPAWN(lddmc_relprod_union_help, set_value, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_un)));
  895. count++;
  896. set = mddnode_getright(n_set);
  897. un = mddnode_getright(n_un);
  898. if (set == lddmc_false) {
  899. result = un;
  900. break;
  901. } else if (un == lddmc_false) {
  902. result = CALL(lddmc_relprod, set, rel, meta);
  903. break;
  904. }
  905. n_set = GETNODE(set);
  906. n_un = GETNODE(un);
  907. }
  908. }
  909. // sync+union (one by one)
  910. while (count--) {
  911. lddmc_refs_push(result);
  912. MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_union_help));
  913. lddmc_refs_push(result2);
  914. result = CALL(lddmc_union, result, result2);
  915. lddmc_refs_pop(2);
  916. }
  917. // add result from without_copy
  918. lddmc_refs_push(result);
  919. MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_union));
  920. lddmc_refs_push(result2);
  921. result = CALL(lddmc_union, result, result2);
  922. lddmc_refs_pop(2);
  923. } else {
  924. // only-read, not a copy node
  925. uint32_t set_value = mddnode_getvalue(n_set);
  926. uint32_t un_value = mddnode_getvalue(n_un);
  927. // already did un_value < set_value
  928. if (un_value > set_value) {
  929. lddmc_refs_spawn(SPAWN(lddmc_relprod_union, mddnode_getright(n_set), mddnode_getright(n_rel), meta, un));
  930. MDD down = CALL(lddmc_relprod, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta));
  931. lddmc_refs_push(down);
  932. MDD right = lddmc_refs_sync(SYNC(lddmc_relprod_union));
  933. lddmc_refs_pop(1);
  934. result = lddmc_makenode(mddnode_getvalue(n_set), down, right);
  935. } else /* un_value == set_value */ {
  936. lddmc_refs_spawn(SPAWN(lddmc_relprod_union, mddnode_getright(n_set), mddnode_getright(n_rel), meta, mddnode_getright(n_un)));
  937. MDD down = CALL(lddmc_relprod_union, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_un));
  938. lddmc_refs_push(down);
  939. MDD right = lddmc_refs_sync(SYNC(lddmc_relprod_union));
  940. lddmc_refs_pop(1);
  941. result = lddmc_makenode(mddnode_getvalue(n_set), down, right);
  942. }
  943. }
  944. } else if (m_val == 2 || m_val == 4) { // write, only-write
  945. if (m_val == 4) {
  946. // only-write, so we need to include 'for all variables'
  947. lddmc_refs_spawn(SPAWN(lddmc_relprod_union, mddnode_getright(n_set), rel, meta, un)); // next in set
  948. }
  949. // spawn for every value to write (rel)
  950. int count = 0;
  951. for (;;) {
  952. uint32_t value;
  953. if (mddnode_getcopy(n_rel)) value = mddnode_getvalue(n_set);
  954. else value = mddnode_getvalue(n_rel);
  955. uint32_t un_value = mddnode_getvalue(n_un);
  956. if (un_value < value) {
  957. // the result of this will simply be "un_value, mddnode_getdown(n_un), false" which is intended
  958. lddmc_refs_spawn(SPAWN(lddmc_relprod_union_help, un_value, lddmc_false, lddmc_false, mddnode_getdown(n_meta), mddnode_getdown(n_un)));
  959. count++;
  960. un = mddnode_getright(n_un);
  961. if (un == lddmc_false) {
  962. result = CALL(lddmc_relprod, set, rel, meta);
  963. break;
  964. }
  965. n_un = GETNODE(un);
  966. } else if (un_value > value) {
  967. lddmc_refs_spawn(SPAWN(lddmc_relprod_union_help, value, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), lddmc_false));
  968. count++;
  969. rel = mddnode_getright(n_rel);
  970. if (rel == lddmc_false) {
  971. result = un;
  972. break;
  973. }
  974. n_rel = GETNODE(rel);
  975. } else /* un_value == value */ {
  976. lddmc_refs_spawn(SPAWN(lddmc_relprod_union_help, value, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_un)));
  977. count++;
  978. rel = mddnode_getright(n_rel);
  979. un = mddnode_getright(n_un);
  980. if (rel == lddmc_false) {
  981. result = un;
  982. break;
  983. } else if (un == lddmc_false) {
  984. result = CALL(lddmc_relprod, set, rel, meta);
  985. break;
  986. }
  987. n_rel = GETNODE(rel);
  988. n_un = GETNODE(un);
  989. }
  990. }
  991. // sync+union (one by one)
  992. while (count--) {
  993. lddmc_refs_push(result);
  994. MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_union_help));
  995. lddmc_refs_push(result2);
  996. result = CALL(lddmc_union, result, result2);
  997. lddmc_refs_pop(2);
  998. }
  999. if (m_val == 4) {
  1000. // sync+union with other variables
  1001. lddmc_refs_push(result);
  1002. MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_union));
  1003. lddmc_refs_push(result2);
  1004. result = CALL(lddmc_union, result, result2);
  1005. lddmc_refs_pop(2);
  1006. }
  1007. }
  1008. /* Write to cache */
  1009. if (cache_put4(CACHE_MDD_RELPROD, set, rel, meta, un, result)) sylvan_stats_count(LDD_RELPROD_UNION_CACHEDPUT);
  1010. return result;
  1011. }
  1012. TASK_5(MDD, lddmc_relprev_help, uint32_t, val, MDD, set, MDD, rel, MDD, proj, MDD, uni)
  1013. {
  1014. return lddmc_makenode(val, CALL(lddmc_relprev, set, rel, proj, uni), lddmc_false);
  1015. }
  1016. /**
  1017. * Calculate all predecessors to a in uni according to rel[meta]
  1018. * <meta> follows the same semantics as relprod
  1019. * i.e. 0 (not in rel), 1 (read), 2 (write), 3 (only-read), 4 (only-write), -1 (end; rest=0)
  1020. */
  1021. TASK_IMPL_4(MDD, lddmc_relprev, MDD, set, MDD, rel, MDD, meta, MDD, uni)
  1022. {
  1023. if (set == lddmc_false) return lddmc_false;
  1024. if (rel == lddmc_false) return lddmc_false;
  1025. if (uni == lddmc_false) return lddmc_false;
  1026. mddnode_t n_meta = GETNODE(meta);
  1027. uint32_t m_val = mddnode_getvalue(n_meta);
  1028. if (m_val == (uint32_t)-1) {
  1029. if (set == uni) return set;
  1030. else return lddmc_intersect(set, uni);
  1031. }
  1032. if (m_val != 0) assert(set != lddmc_true && rel != lddmc_true && uni != lddmc_true);
  1033. /* Skip nodes if possible */
  1034. if (m_val == 0) {
  1035. // not in rel: match set and uni ('intersect')
  1036. if (!match_ldds(&set, &uni)) return lddmc_false;
  1037. } else if (mddnode_getcopy(GETNODE(rel))) {
  1038. // read+copy: no matching (pre is everything in uni)
  1039. // write+copy: no matching (match after split: set and uni)
  1040. // only-read+copy: match set and uni
  1041. // only-write+copy: no matching (match after split: set and uni)
  1042. if (m_val == 3) {
  1043. if (!match_ldds(&set, &uni)) return lddmc_false;
  1044. }
  1045. } else if (m_val == 1) {
  1046. // read: match uni and rel
  1047. if (!match_ldds(&uni, &rel)) return lddmc_false;
  1048. } else if (m_val == 2) {
  1049. // write: match set and rel
  1050. if (!match_ldds(&set, &rel)) return lddmc_false;
  1051. } else if (m_val == 3) {
  1052. // only-read: match uni and set and rel
  1053. mddnode_t n_set = GETNODE(set);
  1054. mddnode_t n_rel = GETNODE(rel);
  1055. mddnode_t n_uni = GETNODE(uni);
  1056. uint32_t n_set_value = mddnode_getvalue(n_set);
  1057. uint32_t n_rel_value = mddnode_getvalue(n_rel);
  1058. uint32_t n_uni_value = mddnode_getvalue(n_uni);
  1059. while (n_uni_value != n_rel_value || n_rel_value != n_set_value) {
  1060. if (n_uni_value < n_rel_value || n_uni_value < n_set_value) {
  1061. uni = mddnode_getright(n_uni);
  1062. if (uni == lddmc_false) return lddmc_false;
  1063. n_uni = GETNODE(uni);
  1064. n_uni_value = mddnode_getvalue(n_uni);
  1065. }
  1066. if (n_set_value < n_rel_value || n_set_value < n_uni_value) {
  1067. set = mddnode_getright(n_set);
  1068. if (set == lddmc_false) return lddmc_false;
  1069. n_set = GETNODE(set);
  1070. n_set_value = mddnode_getvalue(n_set);
  1071. }
  1072. if (n_rel_value < n_set_value || n_rel_value < n_uni_value) {
  1073. rel = mddnode_getright(n_rel);
  1074. if (rel == lddmc_false) return lddmc_false;
  1075. n_rel = GETNODE(rel);
  1076. n_rel_value = mddnode_getvalue(n_rel);
  1077. }
  1078. }
  1079. } else if (m_val == 4) {
  1080. // only-write: match set and rel (then use whole universe)
  1081. if (!match_ldds(&set, &rel)) return lddmc_false;
  1082. }
  1083. /* Test gc */
  1084. sylvan_gc_test();
  1085. sylvan_stats_count(LDD_RELPREV);
  1086. /* Access cache */
  1087. MDD result;
  1088. if (cache_get4(CACHE_MDD_RELPREV, set, rel, meta, uni, &result)) {
  1089. sylvan_stats_count(LDD_RELPREV_CACHED);
  1090. return result;
  1091. }
  1092. mddnode_t n_set = GETNODE(set);
  1093. mddnode_t n_rel = GETNODE(rel);
  1094. mddnode_t n_uni = GETNODE(uni);
  1095. /* Recursive operations */
  1096. if (m_val == 0) { // not in rel
  1097. // m_val == 0 : not in rel (intersection set and universe)
  1098. lddmc_refs_spawn(SPAWN(lddmc_relprev, mddnode_getright(n_set), rel, meta, mddnode_getright(n_uni)));
  1099. MDD down = CALL(lddmc_relprev, mddnode_getdown(n_set), rel, mddnode_getdown(n_meta), mddnode_getdown(n_uni));
  1100. lddmc_refs_push(down);
  1101. MDD right = lddmc_refs_sync(SYNC(lddmc_relprev));
  1102. lddmc_refs_pop(1);
  1103. result = lddmc_makenode(mddnode_getvalue(n_set), down, right);
  1104. } else if (m_val == 1) { // read level
  1105. // result value is in case of copy: everything in uni!
  1106. // result value is in case of not-copy: match uni and rel!
  1107. lddmc_refs_spawn(SPAWN(lddmc_relprev, set, mddnode_getright(n_rel), meta, uni)); // next in rel
  1108. if (mddnode_getcopy(n_rel)) {
  1109. // result is everything in uni
  1110. // spawn for every value to have been read (uni)
  1111. int count = 0;
  1112. for (;;) {
  1113. lddmc_refs_spawn(SPAWN(lddmc_relprev_help, mddnode_getvalue(n_uni), set, mddnode_getdown(n_rel), mddnode_getdown(n_meta), uni));
  1114. count++;
  1115. uni = mddnode_getright(n_uni);
  1116. if (uni == lddmc_false) break;
  1117. n_uni = GETNODE(uni);
  1118. }
  1119. // sync+union (one by one)
  1120. result = lddmc_false;
  1121. while (count--) {
  1122. lddmc_refs_push(result);
  1123. MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev_help));
  1124. lddmc_refs_push(result2);
  1125. result = CALL(lddmc_union, result, result2);
  1126. lddmc_refs_pop(2);
  1127. }
  1128. } else {
  1129. // already matched
  1130. MDD down = CALL(lddmc_relprev, set, mddnode_getdown(n_rel), mddnode_getdown(n_meta), uni);
  1131. result = lddmc_makenode(mddnode_getvalue(n_uni), down, lddmc_false);
  1132. }
  1133. lddmc_refs_push(result);
  1134. MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev));
  1135. lddmc_refs_push(result2);
  1136. result = CALL(lddmc_union, result, result2);
  1137. lddmc_refs_pop(2);
  1138. } else if (m_val == 3) { // only-read level
  1139. // result value is in case of copy: match set and uni! (already done first match)
  1140. // result value is in case of not-copy: match set and uni and rel!
  1141. lddmc_refs_spawn(SPAWN(lddmc_relprev, set, mddnode_getright(n_rel), meta, uni)); // next in rel
  1142. if (mddnode_getcopy(n_rel)) {
  1143. // spawn for every matching set+uni
  1144. int count = 0;
  1145. for (;;) {
  1146. lddmc_refs_spawn(SPAWN(lddmc_relprev_help, mddnode_getvalue(n_uni), mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_uni)));
  1147. count++;
  1148. uni = mddnode_getright(n_uni);
  1149. if (!match_ldds(&set, &uni)) break;
  1150. n_set = GETNODE(set);
  1151. n_uni = GETNODE(uni);
  1152. }
  1153. // sync+union (one by one)
  1154. result = lddmc_false;
  1155. while (count--) {
  1156. lddmc_refs_push(result);
  1157. MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev_help));
  1158. lddmc_refs_push(result2);
  1159. result = CALL(lddmc_union, result, result2);
  1160. lddmc_refs_pop(2);
  1161. }
  1162. } else {
  1163. // already matched
  1164. MDD down = CALL(lddmc_relprev, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_uni));
  1165. result = lddmc_makenode(mddnode_getvalue(n_uni), down, lddmc_false);
  1166. }
  1167. lddmc_refs_push(result);
  1168. MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev));
  1169. lddmc_refs_push(result2);
  1170. result = CALL(lddmc_union, result, result2);
  1171. lddmc_refs_pop(2);
  1172. } else if (m_val == 2) { // write level
  1173. // note: the read level has already matched the uni that was read.
  1174. // write+copy: only for the one set equal to uni...
  1175. // write: match set and rel (already done)
  1176. lddmc_refs_spawn(SPAWN(lddmc_relprev, set, mddnode_getright(n_rel), meta, uni));
  1177. if (mddnode_getcopy(n_rel)) {
  1178. MDD down = lddmc_follow(set, mddnode_getvalue(n_uni));
  1179. if (down != lddmc_false) {
  1180. result = CALL(lddmc_relprev, down, mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_uni));
  1181. } else {
  1182. result = lddmc_false;
  1183. }
  1184. } else {
  1185. result = CALL(lddmc_relprev, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_uni));
  1186. }
  1187. lddmc_refs_push(result);
  1188. MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev));
  1189. lddmc_refs_push(result2);
  1190. result = CALL(lddmc_union, result, result2);
  1191. lddmc_refs_pop(2);
  1192. } else if (m_val == 4) { // only-write level
  1193. // only-write+copy: match set and uni after spawn
  1194. // only-write: match set and rel (already done)
  1195. lddmc_refs_spawn(SPAWN(lddmc_relprev, set, mddnode_getright(n_rel), meta, uni));
  1196. if (mddnode_getcopy(n_rel)) {
  1197. // spawn for every matching set+uni
  1198. int count = 0;
  1199. for (;;) {
  1200. if (!match_ldds(&set, &uni)) break;
  1201. n_set = GETNODE(set);
  1202. n_uni = GETNODE(uni);
  1203. lddmc_refs_spawn(SPAWN(lddmc_relprev_help, mddnode_getvalue(n_uni), mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_uni)));
  1204. count++;
  1205. uni = mddnode_getright(n_uni);
  1206. }
  1207. // sync+union (one by one)
  1208. result = lddmc_false;
  1209. while (count--) {
  1210. lddmc_refs_push(result);
  1211. MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev_help));
  1212. lddmc_refs_push(result2);
  1213. result = CALL(lddmc_union, result, result2);
  1214. lddmc_refs_pop(2);
  1215. }
  1216. } else {
  1217. // spawn for every value in universe!!
  1218. int count = 0;
  1219. for (;;) {
  1220. lddmc_refs_spawn(SPAWN(lddmc_relprev_help, mddnode_getvalue(n_uni), mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_uni)));
  1221. count++;
  1222. uni = mddnode_getright(n_uni);
  1223. if (uni == lddmc_false) break;
  1224. n_uni = GETNODE(uni);
  1225. }
  1226. // sync+union (one by one)
  1227. result = lddmc_false;
  1228. while (count--) {
  1229. lddmc_refs_push(result);
  1230. MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev_help));
  1231. lddmc_refs_push(result2);
  1232. result = CALL(lddmc_union, result, result2);
  1233. lddmc_refs_pop(2);
  1234. }
  1235. }
  1236. lddmc_refs_push(result);
  1237. MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev));
  1238. lddmc_refs_push(result2);
  1239. result = CALL(lddmc_union, result, result2);
  1240. lddmc_refs_pop(2);
  1241. }
  1242. /* Write to cache */
  1243. if (cache_put4(CACHE_MDD_RELPREV, set, rel, meta, uni, result)) sylvan_stats_count(LDD_RELPREV_CACHEDPUT);
  1244. return result;
  1245. }
  1246. // Same 'proj' as project. So: proj: -2 (end; quantify rest), -1 (end; keep rest), 0 (quantify), 1 (keep)
  1247. TASK_IMPL_4(MDD, lddmc_join, MDD, a, MDD, b, MDD, a_proj, MDD, b_proj)
  1248. {
  1249. if (a == lddmc_false || b == lddmc_false) return lddmc_false;
  1250. /* Test gc */
  1251. sylvan_gc_test();
  1252. mddnode_t n_a_proj = GETNODE(a_proj);
  1253. mddnode_t n_b_proj = GETNODE(b_proj);
  1254. uint32_t a_proj_val = mddnode_getvalue(n_a_proj);
  1255. uint32_t b_proj_val = mddnode_getvalue(n_b_proj);
  1256. while (a_proj_val == 0 && b_proj_val == 0) {
  1257. a_proj = mddnode_getdown(n_a_proj);
  1258. b_proj = mddnode_getdown(n_b_proj);
  1259. n_a_proj = GETNODE(a_proj);
  1260. n_b_proj = GETNODE(b_proj);
  1261. a_proj_val = mddnode_getvalue(n_a_proj);
  1262. b_proj_val = mddnode_getvalue(n_b_proj);
  1263. }
  1264. if (a_proj_val == (uint32_t)-2) return b; // no a left
  1265. if (b_proj_val == (uint32_t)-2) return a; // no b left
  1266. if (a_proj_val == (uint32_t)-1 && b_proj_val == (uint32_t)-1) return CALL(lddmc_intersect, a, b);
  1267. // At this point, only proj_val {-1, 0, 1}; max one with -1; max one with 0.
  1268. const int keep_a = a_proj_val != 0;
  1269. const int keep_b = b_proj_val != 0;
  1270. if (keep_a && keep_b) {
  1271. // If both 'keep', then match values
  1272. if (!match_ldds(&a, &b)) return lddmc_false;
  1273. }
  1274. sylvan_stats_count(LDD_JOIN);
  1275. /* Access cache */
  1276. MDD result;
  1277. if (cache_get4(CACHE_MDD_JOIN, a, b, a_proj, b_proj, &result)) {
  1278. sylvan_stats_count(LDD_JOIN_CACHED);
  1279. return result;
  1280. }
  1281. /* Perform recursive calculation */
  1282. const mddnode_t na = GETNODE(a);
  1283. const mddnode_t nb = GETNODE(b);
  1284. uint32_t val;
  1285. MDD down;
  1286. // Make copies (for cache)
  1287. MDD _a_proj = a_proj, _b_proj = b_proj;
  1288. if (keep_a) {
  1289. if (keep_b) {
  1290. val = mddnode_getvalue(nb);
  1291. lddmc_refs_spawn(SPAWN(lddmc_join, mddnode_getright(na), mddnode_getright(nb), a_proj, b_proj));
  1292. if (a_proj_val != (uint32_t)-1) a_proj = mddnode_getdown(n_a_proj);
  1293. if (b_proj_val != (uint32_t)-1) b_proj = mddnode_getdown(n_b_proj);
  1294. down = CALL(lddmc_join, mddnode_getdown(na), mddnode_getdown(nb), a_proj, b_proj);
  1295. } else {
  1296. val = mddnode_getvalue(na);
  1297. lddmc_refs_spawn(SPAWN(lddmc_join, mddnode_getright(na), b, a_proj, b_proj));
  1298. if (a_proj_val != (uint32_t)-1) a_proj = mddnode_getdown(n_a_proj);
  1299. if (b_proj_val != (uint32_t)-1) b_proj = mddnode_getdown(n_b_proj);
  1300. down = CALL(lddmc_join, mddnode_getdown(na), b, a_proj, b_proj);
  1301. }
  1302. } else {
  1303. val = mddnode_getvalue(nb);
  1304. lddmc_refs_spawn(SPAWN(lddmc_join, a, mddnode_getright(nb), a_proj, b_proj));
  1305. if (a_proj_val != (uint32_t)-1) a_proj = mddnode_getdown(n_a_proj);
  1306. if (b_proj_val != (uint32_t)-1) b_proj = mddnode_getdown(n_b_proj);
  1307. down = CALL(lddmc_join, a, mddnode_getdown(nb), a_proj, b_proj);
  1308. }
  1309. lddmc_refs_push(down);
  1310. MDD right = lddmc_refs_sync(SYNC(lddmc_join));
  1311. lddmc_refs_pop(1);
  1312. result = lddmc_makenode(val, down, right);
  1313. /* Write to cache */
  1314. if (cache_put4(CACHE_MDD_JOIN, a, b, _a_proj, _b_proj, result)) sylvan_stats_count(LDD_JOIN_CACHEDPUT);
  1315. return result;
  1316. }
  1317. // so: proj: -2 (end; quantify rest), -1 (end; keep rest), 0 (quantify), 1 (keep)
  1318. TASK_IMPL_2(MDD, lddmc_project, const MDD, mdd, const MDD, proj)
  1319. {
  1320. if (mdd == lddmc_false) return lddmc_false; // projection of empty is empty
  1321. if (mdd == lddmc_true) return lddmc_true; // projection of universe is universe...
  1322. mddnode_t p_node = GETNODE(proj);
  1323. uint32_t p_val = mddnode_getvalue(p_node);
  1324. if (p_val == (uint32_t)-1) return mdd;
  1325. if (p_val == (uint32_t)-2) return lddmc_true; // because we always end with true.
  1326. sylvan_gc_test();
  1327. sylvan_stats_count(LDD_PROJECT);
  1328. MDD result;
  1329. if (cache_get3(CACHE_MDD_PROJECT, mdd, proj, 0, &result)) {
  1330. sylvan_stats_count(LDD_PROJECT_CACHED);
  1331. return result;
  1332. }
  1333. mddnode_t n = GETNODE(mdd);
  1334. if (p_val == 1) { // keep
  1335. lddmc_refs_spawn(SPAWN(lddmc_project, mddnode_getright(n), proj));
  1336. MDD down = CALL(lddmc_project, mddnode_getdown(n), mddnode_getdown(p_node));
  1337. lddmc_refs_push(down);
  1338. MDD right = lddmc_refs_sync(SYNC(lddmc_project));
  1339. lddmc_refs_pop(1);
  1340. result = lddmc_makenode(mddnode_getvalue(n), down, right);
  1341. } else { // quantify
  1342. if (mddnode_getdown(n) == lddmc_true) { // assume lowest level
  1343. result = lddmc_true;
  1344. } else {
  1345. int count = 0;
  1346. MDD p_down = mddnode_getdown(p_node), _mdd=mdd;
  1347. while (1) {
  1348. lddmc_refs_spawn(SPAWN(lddmc_project, mddnode_getdown(n), p_down));
  1349. count++;
  1350. _mdd = mddnode_getright(n);
  1351. assert(_mdd != lddmc_true);
  1352. if (_mdd == lddmc_false) break;
  1353. n = GETNODE(_mdd);
  1354. }
  1355. result = lddmc_false;
  1356. while (count--) {
  1357. lddmc_refs_push(result);
  1358. MDD down = lddmc_refs_sync(SYNC(lddmc_project));
  1359. lddmc_refs_push(down);
  1360. result = CALL(lddmc_union, result, down);
  1361. lddmc_refs_pop(2);
  1362. }
  1363. }
  1364. }
  1365. if (cache_put3(CACHE_MDD_PROJECT, mdd, proj, 0, result)) sylvan_stats_count(LDD_PROJECT_CACHEDPUT);
  1366. return result;
  1367. }
  1368. // so: proj: -2 (end; quantify rest), -1 (end; keep rest), 0 (quantify), 1 (keep)
  1369. TASK_IMPL_3(MDD, lddmc_project_minus, const MDD, mdd, const MDD, proj, MDD, avoid)
  1370. {
  1371. // This implementation assumed "avoid" has correct depth
  1372. if (avoid == lddmc_true) return lddmc_false;
  1373. if (mdd == avoid) return lddmc_false;
  1374. if (mdd == lddmc_false) return lddmc_false; // projection of empty is empty
  1375. if (mdd == lddmc_true) return lddmc_true; // avoid != lddmc_true
  1376. mddnode_t p_node = GETNODE(proj);
  1377. uint32_t p_val = mddnode_getvalue(p_node);
  1378. if (p_val == (uint32_t)-1) return lddmc_minus(mdd, avoid);
  1379. if (p_val == (uint32_t)-2) return lddmc_true;
  1380. sylvan_gc_test();
  1381. sylvan_stats_count(LDD_PROJECT_MINUS);
  1382. MDD result;
  1383. if (cache_get3(CACHE_MDD_PROJECT, mdd, proj, avoid, &result)) {
  1384. sylvan_stats_count(LDD_PROJECT_MINUS_CACHED);
  1385. return result;
  1386. }
  1387. mddnode_t n = GETNODE(mdd);
  1388. if (p_val == 1) { // keep
  1389. // move 'avoid' until it matches
  1390. uint32_t val = mddnode_getvalue(n);
  1391. MDD a_down = lddmc_false;
  1392. while (avoid != lddmc_false) {
  1393. mddnode_t a_node = GETNODE(avoid);
  1394. uint32_t a_val = mddnode_getvalue(a_node);
  1395. if (a_val > val) {
  1396. break;
  1397. } else if (a_val == val) {
  1398. a_down = mddnode_getdown(a_node);
  1399. break;
  1400. }
  1401. avoid = mddnode_getright(a_node);
  1402. }
  1403. lddmc_refs_spawn(SPAWN(lddmc_project_minus, mddnode_getright(n), proj, avoid));
  1404. MDD down = CALL(lddmc_project_minus, mddnode_getdown(n), mddnode_getdown(p_node), a_down);
  1405. lddmc_refs_push(down);
  1406. MDD right = lddmc_refs_sync(SYNC(lddmc_project_minus));
  1407. lddmc_refs_pop(1);
  1408. result = lddmc_makenode(val, down, right);
  1409. } else { // quantify
  1410. if (mddnode_getdown(n) == lddmc_true) { // assume lowest level
  1411. result = lddmc_true;
  1412. } else {
  1413. int count = 0;
  1414. MDD p_down = mddnode_getdown(p_node), _mdd=mdd;
  1415. while (1) {
  1416. lddmc_refs_spawn(SPAWN(lddmc_project_minus, mddnode_getdown(n), p_down, avoid));
  1417. count++;
  1418. _mdd = mddnode_getright(n);
  1419. assert(_mdd != lddmc_true);
  1420. if (_mdd == lddmc_false) break;
  1421. n = GETNODE(_mdd);
  1422. }
  1423. result = lddmc_false;
  1424. while (count--) {
  1425. lddmc_refs_push(result);
  1426. MDD down = lddmc_refs_sync(SYNC(lddmc_project_minus));
  1427. lddmc_refs_push(down);
  1428. result = CALL(lddmc_union, result, down);
  1429. lddmc_refs_pop(2);
  1430. }
  1431. }
  1432. }
  1433. if (cache_put3(CACHE_MDD_PROJECT, mdd, proj, avoid, result)) sylvan_stats_count(LDD_PROJECT_MINUS_CACHEDPUT);
  1434. return result;
  1435. }
  1436. MDD
  1437. lddmc_union_cube(MDD a, uint32_t* values, size_t count)
  1438. {
  1439. if (a == lddmc_false) return lddmc_cube(values, count);
  1440. if (a == lddmc_true) {
  1441. assert(count == 0);
  1442. return lddmc_true;
  1443. }
  1444. assert(count != 0);
  1445. mddnode_t na = GETNODE(a);
  1446. uint32_t na_value = mddnode_getvalue(na);
  1447. /* Only create a new node if something actually changed */
  1448. if (na_value < *values) {
  1449. MDD right = lddmc_union_cube(mddnode_getright(na), values, count);
  1450. if (right == mddnode_getright(na)) return a; // no actual change
  1451. return lddmc_makenode(na_value, mddnode_getdown(na), right);
  1452. } else if (na_value == *values) {
  1453. MDD down = lddmc_union_cube(mddnode_getdown(na), values+1, count-1);
  1454. if (down == mddnode_getdown(na)) return a; // no actual change
  1455. return lddmc_makenode(na_value, down, mddnode_getright(na));
  1456. } else /* na_value > *values */ {
  1457. return lddmc_makenode(*values, lddmc_cube(values+1, count-1), a);
  1458. }
  1459. }
  1460. MDD
  1461. lddmc_union_cube_copy(MDD a, uint32_t* values, int* copy, size_t count)
  1462. {
  1463. if (a == lddmc_false) return lddmc_cube_copy(values, copy, count);
  1464. if (a == lddmc_true) {
  1465. assert(count == 0);
  1466. return lddmc_true;
  1467. }
  1468. assert(count != 0);
  1469. mddnode_t na = GETNODE(a);
  1470. /* Only create a new node if something actually changed */
  1471. int na_copy = mddnode_getcopy(na);
  1472. if (na_copy && *copy) {
  1473. MDD down = lddmc_union_cube_copy(mddnode_getdown(na), values+1, copy+1, count-1);
  1474. if (down == mddnode_getdown(na)) return a; // no actual change
  1475. return lddmc_make_copynode(down, mddnode_getright(na));
  1476. } else if (na_copy) {
  1477. MDD right = lddmc_union_cube_copy(mddnode_getright(na), values, copy, count);
  1478. if (right == mddnode_getright(na)) return a; // no actual change
  1479. return lddmc_make_copynode(mddnode_getdown(na), right);
  1480. } else if (*copy) {
  1481. return lddmc_make_copynode(lddmc_cube_copy(values+1, copy+1, count-1), a);
  1482. }
  1483. uint32_t na_value = mddnode_getvalue(na);
  1484. if (na_value < *values) {
  1485. MDD right = lddmc_union_cube_copy(mddnode_getright(na), values, copy, count);
  1486. if (right == mddnode_getright(na)) return a; // no actual change
  1487. return lddmc_makenode(na_value, mddnode_getdown(na), right);
  1488. } else if (na_value == *values) {
  1489. MDD down = lddmc_union_cube_copy(mddnode_getdown(na), values+1, copy+1, count-1);
  1490. if (down == mddnode_getdown(na)) return a; // no actual change
  1491. return lddmc_makenode(na_value, down, mddnode_getright(na));
  1492. } else /* na_value > *values */ {
  1493. return lddmc_makenode(*values, lddmc_cube_copy(values+1, copy+1, count-1), a);
  1494. }
  1495. }
  1496. int
  1497. lddmc_member_cube(MDD a, uint32_t* values, size_t count)
  1498. {
  1499. while (1) {
  1500. if (a == lddmc_false) return 0;
  1501. if (a == lddmc_true) return 1;
  1502. assert(count > 0); // size mismatch
  1503. a = lddmc_follow(a, *values);
  1504. values++;
  1505. count--;
  1506. }
  1507. }
  1508. int
  1509. lddmc_member_cube_copy(MDD a, uint32_t* values, int* copy, size_t count)
  1510. {
  1511. while (1) {
  1512. if (a == lddmc_false) return 0;
  1513. if (a == lddmc_true) return 1;
  1514. assert(count > 0); // size mismatch
  1515. if (*copy) a = lddmc_followcopy(a);
  1516. else a = lddmc_follow(a, *values);
  1517. values++;
  1518. count--;
  1519. }
  1520. }
  1521. MDD
  1522. lddmc_cube(uint32_t* values, size_t count)
  1523. {
  1524. if (count == 0) return lddmc_true;
  1525. return lddmc_makenode(*values, lddmc_cube(values+1, count-1), lddmc_false);
  1526. }
  1527. MDD
  1528. lddmc_cube_copy(uint32_t* values, int* copy, size_t count)
  1529. {
  1530. if (count == 0) return lddmc_true;
  1531. if (*copy) return lddmc_make_copynode(lddmc_cube_copy(values+1, copy+1, count-1), lddmc_false);
  1532. else return lddmc_makenode(*values, lddmc_cube_copy(values+1, copy+1, count-1), lddmc_false);
  1533. }
  1534. /**
  1535. * Count number of nodes for each level
  1536. */
  1537. static void
  1538. lddmc_nodecount_levels_mark(MDD mdd, size_t *variables)
  1539. {
  1540. if (mdd <= lddmc_true) return;
  1541. mddnode_t n = GETNODE(mdd);
  1542. if (!mddnode_getmark(n)) {
  1543. mddnode_setmark(n, 1);
  1544. (*variables) += 1;
  1545. lddmc_nodecount_levels_mark(mddnode_getright(n), variables);
  1546. lddmc_nodecount_levels_mark(mddnode_getdown(n), variables+1);
  1547. }
  1548. }
  1549. static void
  1550. lddmc_nodecount_levels_unmark(MDD mdd)
  1551. {
  1552. if (mdd <= lddmc_true) return;
  1553. mddnode_t n = GETNODE(mdd);
  1554. if (mddnode_getmark(n)) {
  1555. mddnode_setmark(n, 0);
  1556. lddmc_nodecount_levels_unmark(mddnode_getright(n));
  1557. lddmc_nodecount_levels_unmark(mddnode_getdown(n));
  1558. }
  1559. }
  1560. void
  1561. lddmc_nodecount_levels(MDD mdd, size_t *variables)
  1562. {
  1563. lddmc_nodecount_levels_mark(mdd, variables);
  1564. lddmc_nodecount_levels_unmark(mdd);
  1565. }
  1566. /**
  1567. * Count number of nodes in MDD
  1568. */
  1569. static size_t
  1570. lddmc_nodecount_mark(MDD mdd)
  1571. {
  1572. if (mdd <= lddmc_true) return 0;
  1573. mddnode_t n = GETNODE(mdd);
  1574. if (mddnode_getmark(n)) return 0;
  1575. mddnode_setmark(n, 1);
  1576. return 1 + lddmc_nodecount_mark(mddnode_getdown(n)) + lddmc_nodecount_mark(mddnode_getright(n));
  1577. }
  1578. static void
  1579. lddmc_nodecount_unmark(MDD mdd)
  1580. {
  1581. if (mdd <= lddmc_true) return;
  1582. mddnode_t n = GETNODE(mdd);
  1583. if (mddnode_getmark(n)) {
  1584. mddnode_setmark(n, 0);
  1585. lddmc_nodecount_unmark(mddnode_getright(n));
  1586. lddmc_nodecount_unmark(mddnode_getdown(n));
  1587. }
  1588. }
  1589. size_t
  1590. lddmc_nodecount(MDD mdd)
  1591. {
  1592. size_t result = lddmc_nodecount_mark(mdd);
  1593. lddmc_nodecount_unmark(mdd);
  1594. return result;
  1595. }
  1596. /**
  1597. * CALCULATE NUMBER OF VAR ASSIGNMENTS THAT YIELD TRUE
  1598. */
  1599. TASK_IMPL_1(lddmc_satcount_double_t, lddmc_satcount_cached, MDD, mdd)
  1600. {
  1601. if (mdd == lddmc_false) return 0.0;
  1602. if (mdd == lddmc_true) return 1.0;
  1603. /* Perhaps execute garbage collection */
  1604. sylvan_gc_test();
  1605. union {
  1606. lddmc_satcount_double_t d;
  1607. uint64_t s;
  1608. } hack;
  1609. sylvan_stats_count(LDD_SATCOUNT);
  1610. if (cache_get3(CACHE_MDD_SATCOUNT, mdd, 0, 0, &hack.s)) {
  1611. sylvan_stats_count(LDD_SATCOUNT_CACHED);
  1612. return hack.d;
  1613. }
  1614. mddnode_t n = GETNODE(mdd);
  1615. SPAWN(lddmc_satcount_cached, mddnode_getdown(n));
  1616. lddmc_satcount_double_t right = CALL(lddmc_satcount_cached, mddnode_getright(n));
  1617. hack.d = right + SYNC(lddmc_satcount_cached);
  1618. if (cache_put3(CACHE_MDD_SATCOUNT, mdd, 0, 0, hack.s)) sylvan_stats_count(LDD_SATCOUNT_CACHEDPUT);
  1619. return hack.d;
  1620. }
  1621. TASK_IMPL_1(long double, lddmc_satcount, MDD, mdd)
  1622. {
  1623. if (mdd == lddmc_false) return 0.0;
  1624. if (mdd == lddmc_true) return 1.0;
  1625. /* Perhaps execute garbage collection */
  1626. sylvan_gc_test();
  1627. sylvan_stats_count(LDD_SATCOUNTL);
  1628. union {
  1629. long double d;
  1630. struct {
  1631. uint64_t s1;
  1632. uint64_t s2;
  1633. } s;
  1634. } hack;
  1635. if (cache_get3(CACHE_MDD_SATCOUNTL1, mdd, 0, 0, &hack.s.s1) &&
  1636. cache_get3(CACHE_MDD_SATCOUNTL2, mdd, 0, 0, &hack.s.s2)) {
  1637. sylvan_stats_count(LDD_SATCOUNTL_CACHED);
  1638. return hack.d;
  1639. }
  1640. mddnode_t n = GETNODE(mdd);
  1641. SPAWN(lddmc_satcount, mddnode_getdown(n));
  1642. long double right = CALL(lddmc_satcount, mddnode_getright(n));
  1643. hack.d = right + SYNC(lddmc_satcount);
  1644. int c1 = cache_put3(CACHE_MDD_SATCOUNTL1, mdd, 0, 0, hack.s.s1);
  1645. int c2 = cache_put3(CACHE_MDD_SATCOUNTL2, mdd, 0, 0, hack.s.s2);
  1646. if (c1 && c2) sylvan_stats_count(LDD_SATCOUNTL_CACHEDPUT);
  1647. return hack.d;
  1648. }
  1649. TASK_IMPL_5(MDD, lddmc_collect, MDD, mdd, lddmc_collect_cb, cb, void*, context, uint32_t*, values, size_t, count)
  1650. {
  1651. if (mdd == lddmc_false) return lddmc_false;
  1652. if (mdd == lddmc_true) {
  1653. return WRAP(cb, values, count, context);
  1654. }
  1655. mddnode_t n = GETNODE(mdd);
  1656. lddmc_refs_spawn(SPAWN(lddmc_collect, mddnode_getright(n), cb, context, values, count));
  1657. uint32_t newvalues[count+1];
  1658. if (count > 0) memcpy(newvalues, values, sizeof(uint32_t)*count);
  1659. newvalues[count] = mddnode_getvalue(n);
  1660. MDD down = CALL(lddmc_collect, mddnode_getdown(n), cb, context, newvalues, count+1);
  1661. if (down == lddmc_false) {
  1662. MDD result = lddmc_refs_sync(SYNC(lddmc_collect));
  1663. return result;
  1664. }
  1665. lddmc_refs_push(down);
  1666. MDD right = lddmc_refs_sync(SYNC(lddmc_collect));
  1667. if (right == lddmc_false) {
  1668. lddmc_refs_pop(1);
  1669. return down;
  1670. } else {
  1671. lddmc_refs_push(right);
  1672. MDD result = CALL(lddmc_union, down, right);
  1673. lddmc_refs_pop(2);
  1674. return result;
  1675. }
  1676. }
  1677. VOID_TASK_5(_lddmc_sat_all_nopar, MDD, mdd, lddmc_enum_cb, cb, void*, context, uint32_t*, values, size_t, count)
  1678. {
  1679. if (mdd == lddmc_false) return;
  1680. if (mdd == lddmc_true) {
  1681. WRAP(cb, values, count, context);
  1682. return;
  1683. }
  1684. mddnode_t n = GETNODE(mdd);
  1685. values[count] = mddnode_getvalue(n);
  1686. CALL(_lddmc_sat_all_nopar, mddnode_getdown(n), cb, context, values, count+1);
  1687. CALL(_lddmc_sat_all_nopar, mddnode_getright(n), cb, context, values, count);
  1688. }
  1689. VOID_TASK_IMPL_3(lddmc_sat_all_nopar, MDD, mdd, lddmc_enum_cb, cb, void*, context)
  1690. {
  1691. // determine depth
  1692. size_t count=0;
  1693. MDD _mdd = mdd;
  1694. while (_mdd > lddmc_true) {
  1695. _mdd = mddnode_getdown(GETNODE(_mdd));
  1696. assert(_mdd != lddmc_false);
  1697. count++;
  1698. }
  1699. uint32_t values[count];
  1700. CALL(_lddmc_sat_all_nopar, mdd, cb, context, values, 0);
  1701. }
  1702. VOID_TASK_IMPL_5(lddmc_sat_all_par, MDD, mdd, lddmc_enum_cb, cb, void*, context, uint32_t*, values, size_t, count)
  1703. {
  1704. if (mdd == lddmc_false) return;
  1705. if (mdd == lddmc_true) {
  1706. WRAP(cb, values, count, context);
  1707. return;
  1708. }
  1709. mddnode_t n = GETNODE(mdd);
  1710. SPAWN(lddmc_sat_all_par, mddnode_getright(n), cb, context, values, count);
  1711. uint32_t newvalues[count+1];
  1712. if (count > 0) memcpy(newvalues, values, sizeof(uint32_t)*count);
  1713. newvalues[count] = mddnode_getvalue(n);
  1714. CALL(lddmc_sat_all_par, mddnode_getdown(n), cb, context, newvalues, count+1);
  1715. SYNC(lddmc_sat_all_par);
  1716. }
  1717. struct lddmc_match_sat_info
  1718. {
  1719. MDD mdd;
  1720. MDD match;
  1721. MDD proj;
  1722. size_t count;
  1723. uint32_t values[0];
  1724. };
  1725. // proj: -1 (rest 0), 0 (no match), 1 (match)
  1726. VOID_TASK_3(lddmc_match_sat, struct lddmc_match_sat_info *, info, lddmc_enum_cb, cb, void*, context)
  1727. {
  1728. MDD a = info->mdd, b = info->match, proj = info->proj;
  1729. if (a == lddmc_false || b == lddmc_false) return;
  1730. if (a == lddmc_true) {
  1731. assert(b == lddmc_true);
  1732. WRAP(cb, info->values, info->count, context);
  1733. return;
  1734. }
  1735. mddnode_t p_node = GETNODE(proj);
  1736. uint32_t p_val = mddnode_getvalue(p_node);
  1737. if (p_val == (uint32_t)-1) {
  1738. assert(b == lddmc_true);
  1739. CALL(lddmc_sat_all_par, a, cb, context, info->values, info->count);
  1740. return;
  1741. }
  1742. /* Get nodes */
  1743. mddnode_t na = GETNODE(a);
  1744. mddnode_t nb = GETNODE(b);
  1745. uint32_t na_value = mddnode_getvalue(na);
  1746. uint32_t nb_value = mddnode_getvalue(nb);
  1747. /* Skip nodes if possible */
  1748. if (p_val == 1) {
  1749. while (na_value != nb_value) {
  1750. if (na_value < nb_value) {
  1751. a = mddnode_getright(na);
  1752. if (a == lddmc_false) return;
  1753. na = GETNODE(a);
  1754. na_value = mddnode_getvalue(na);
  1755. }
  1756. if (nb_value < na_value) {
  1757. b = mddnode_getright(nb);
  1758. if (b == lddmc_false) return;
  1759. nb = GETNODE(b);
  1760. nb_value = mddnode_getvalue(nb);
  1761. }
  1762. }
  1763. }
  1764. struct lddmc_match_sat_info *ri = (struct lddmc_match_sat_info*)alloca(sizeof(struct lddmc_match_sat_info)+sizeof(uint32_t[info->count]));
  1765. struct lddmc_match_sat_info *di = (struct lddmc_match_sat_info*)alloca(sizeof(struct lddmc_match_sat_info)+sizeof(uint32_t[info->count+1]));
  1766. ri->mdd = mddnode_getright(na);
  1767. di->mdd = mddnode_getdown(na);
  1768. ri->match = b;
  1769. di->match = mddnode_getdown(nb);
  1770. ri->proj = proj;
  1771. di->proj = mddnode_getdown(p_node);
  1772. ri->count = info->count;
  1773. di->count = info->count+1;
  1774. if (ri->count > 0) memcpy(ri->values, info->values, sizeof(uint32_t[info->count]));
  1775. if (di->count > 0) memcpy(di->values, info->values, sizeof(uint32_t[info->count]));
  1776. di->values[info->count] = na_value;
  1777. SPAWN(lddmc_match_sat, ri, cb, context);
  1778. CALL(lddmc_match_sat, di, cb, context);
  1779. SYNC(lddmc_match_sat);
  1780. }
  1781. VOID_TASK_IMPL_5(lddmc_match_sat_par, MDD, mdd, MDD, match, MDD, proj, lddmc_enum_cb, cb, void*, context)
  1782. {
  1783. struct lddmc_match_sat_info i;
  1784. i.mdd = mdd;
  1785. i.match = match;
  1786. i.proj = proj;
  1787. i.count = 0;
  1788. CALL(lddmc_match_sat, &i, cb, context);
  1789. }
  1790. int
  1791. lddmc_sat_one(MDD mdd, uint32_t* values, size_t count)
  1792. {
  1793. if (mdd == lddmc_false) return 0;
  1794. if (mdd == lddmc_true) return 1;
  1795. assert(count != 0);
  1796. mddnode_t n = GETNODE(mdd);
  1797. *values = mddnode_getvalue(n);
  1798. return lddmc_sat_one(mddnode_getdown(n), values+1, count-1);
  1799. }
  1800. MDD
  1801. lddmc_sat_one_mdd(MDD mdd)
  1802. {
  1803. if (mdd == lddmc_false) return lddmc_false;
  1804. if (mdd == lddmc_true) return lddmc_true;
  1805. mddnode_t n = GETNODE(mdd);
  1806. MDD down = lddmc_sat_one_mdd(mddnode_getdown(n));
  1807. return lddmc_makenode(mddnode_getvalue(n), down, lddmc_false);
  1808. }
  1809. TASK_IMPL_4(MDD, lddmc_compose, MDD, mdd, lddmc_compose_cb, cb, void*, context, int, depth)
  1810. {
  1811. if (depth == 0 || mdd == lddmc_false || mdd == lddmc_true) {
  1812. return WRAP(cb, mdd, context);
  1813. } else {
  1814. mddnode_t n = GETNODE(mdd);
  1815. lddmc_refs_spawn(SPAWN(lddmc_compose, mddnode_getright(n), cb, context, depth));
  1816. MDD down = lddmc_compose(mddnode_getdown(n), cb, context, depth-1);
  1817. lddmc_refs_push(down);
  1818. MDD right = lddmc_refs_sync(SYNC(lddmc_compose));
  1819. lddmc_refs_pop(1);
  1820. return lddmc_makenode(mddnode_getvalue(n), down, right);
  1821. }
  1822. }
  1823. VOID_TASK_IMPL_4(lddmc_visit_seq, MDD, mdd, lddmc_visit_callbacks_t*, cbs, size_t, ctx_size, void*, context)
  1824. {
  1825. if (WRAP(cbs->lddmc_visit_pre, mdd, context) == 0) return;
  1826. void* context_down = alloca(ctx_size);
  1827. void* context_right = alloca(ctx_size);
  1828. WRAP(cbs->lddmc_visit_init_context, context_down, context, 1);
  1829. WRAP(cbs->lddmc_visit_init_context, context_right, context, 0);
  1830. CALL(lddmc_visit_seq, mddnode_getdown(GETNODE(mdd)), cbs, ctx_size, context_down);
  1831. CALL(lddmc_visit_seq, mddnode_getright(GETNODE(mdd)), cbs, ctx_size, context_right);
  1832. WRAP(cbs->lddmc_visit_post, mdd, context);
  1833. }
  1834. VOID_TASK_IMPL_4(lddmc_visit_par, MDD, mdd, lddmc_visit_callbacks_t*, cbs, size_t, ctx_size, void*, context)
  1835. {
  1836. if (WRAP(cbs->lddmc_visit_pre, mdd, context) == 0) return;
  1837. void* context_down = alloca(ctx_size);
  1838. void* context_right = alloca(ctx_size);
  1839. WRAP(cbs->lddmc_visit_init_context, context_down, context, 1);
  1840. WRAP(cbs->lddmc_visit_init_context, context_right, context, 0);
  1841. SPAWN(lddmc_visit_par, mddnode_getdown(GETNODE(mdd)), cbs, ctx_size, context_down);
  1842. CALL(lddmc_visit_par, mddnode_getright(GETNODE(mdd)), cbs, ctx_size, context_right);
  1843. SYNC(lddmc_visit_par);
  1844. WRAP(cbs->lddmc_visit_post, mdd, context);
  1845. }
  1846. /**
  1847. * GENERIC MARK/UNMARK METHODS
  1848. */
  1849. static inline int
  1850. lddmc_mark(mddnode_t node)
  1851. {
  1852. if (mddnode_getmark(node)) return 0;
  1853. mddnode_setmark(node, 1);
  1854. return 1;
  1855. }
  1856. static inline int
  1857. lddmc_unmark(mddnode_t node)
  1858. {
  1859. if (mddnode_getmark(node)) {
  1860. mddnode_setmark(node, 0);
  1861. return 1;
  1862. } else {
  1863. return 0;
  1864. }
  1865. }
  1866. static void
  1867. lddmc_unmark_rec(mddnode_t node)
  1868. {
  1869. if (lddmc_unmark(node)) {
  1870. MDD node_right = mddnode_getright(node);
  1871. if (node_right > lddmc_true) lddmc_unmark_rec(GETNODE(node_right));
  1872. MDD node_down = mddnode_getdown(node);
  1873. if (node_down > lddmc_true) lddmc_unmark_rec(GETNODE(node_down));
  1874. }
  1875. }
  1876. /*************
  1877. * DOT OUTPUT
  1878. *************/
  1879. static void
  1880. lddmc_fprintdot_rec(FILE* out, MDD mdd)
  1881. {
  1882. // assert(mdd > lddmc_true);
  1883. // check mark
  1884. mddnode_t n = GETNODE(mdd);
  1885. if (mddnode_getmark(n)) return;
  1886. mddnode_setmark(n, 1);
  1887. // print the node
  1888. uint32_t val = mddnode_getvalue(n);
  1889. fprintf(out, "%" PRIu64 " [shape=record, label=\"", mdd);
  1890. if (mddnode_getcopy(n)) fprintf(out, "<c> *");
  1891. else fprintf(out, "<%u> %u", val, val);
  1892. MDD right = mddnode_getright(n);
  1893. while (right != lddmc_false) {
  1894. mddnode_t n2 = GETNODE(right);
  1895. uint32_t val2 = mddnode_getvalue(n2);
  1896. fprintf(out, "|<%u> %u", val2, val2);
  1897. right = mddnode_getright(n2);
  1898. // assert(right != lddmc_true);
  1899. }
  1900. fprintf(out, "\"];\n");
  1901. // recurse and print the edges
  1902. for (;;) {
  1903. MDD down = mddnode_getdown(n);
  1904. // assert(down != lddmc_false);
  1905. if (down > lddmc_true) {
  1906. lddmc_fprintdot_rec(out, down);
  1907. if (mddnode_getcopy(n)) {
  1908. fprintf(out, "%" PRIu64 ":c -> ", mdd);
  1909. } else {
  1910. fprintf(out, "%" PRIu64 ":%u -> ", mdd, mddnode_getvalue(n));
  1911. }
  1912. if (mddnode_getcopy(GETNODE(down))) {
  1913. fprintf(out, "%" PRIu64 ":c [style=solid];\n", down);
  1914. } else {
  1915. fprintf(out, "%" PRIu64 ":%u [style=solid];\n", down, mddnode_getvalue(GETNODE(down)));
  1916. }
  1917. }
  1918. MDD right = mddnode_getright(n);
  1919. if (right == lddmc_false) break;
  1920. n = GETNODE(right);
  1921. }
  1922. }
  1923. static void
  1924. lddmc_fprintdot_unmark(MDD mdd)
  1925. {
  1926. if (mdd <= lddmc_true) return;
  1927. mddnode_t n = GETNODE(mdd);
  1928. if (mddnode_getmark(n)) {
  1929. mddnode_setmark(n, 0);
  1930. for (;;) {
  1931. lddmc_fprintdot_unmark(mddnode_getdown(n));
  1932. mdd = mddnode_getright(n);
  1933. if (mdd == lddmc_false) return;
  1934. n = GETNODE(mdd);
  1935. }
  1936. }
  1937. }
  1938. void
  1939. lddmc_fprintdot(FILE *out, MDD mdd)
  1940. {
  1941. fprintf(out, "digraph \"DD\" {\n");
  1942. fprintf(out, "graph [dpi = 300];\n");
  1943. fprintf(out, "center = true;\n");
  1944. fprintf(out, "edge [dir = forward];\n");
  1945. // Special case: false
  1946. if (mdd == lddmc_false) {
  1947. fprintf(out, "0 [shape=record, label=\"False\"];\n");
  1948. fprintf(out, "}\n");
  1949. return;
  1950. }
  1951. // Special case: true
  1952. if (mdd == lddmc_true) {
  1953. fprintf(out, "1 [shape=record, label=\"True\"];\n");
  1954. fprintf(out, "}\n");
  1955. return;
  1956. }
  1957. lddmc_fprintdot_rec(out, mdd);
  1958. lddmc_fprintdot_unmark(mdd);
  1959. fprintf(out, "}\n");
  1960. }
  1961. void
  1962. lddmc_printdot(MDD mdd)
  1963. {
  1964. lddmc_fprintdot(stdout, mdd);
  1965. }
  1966. /**
  1967. * Some debug stuff
  1968. */
  1969. void
  1970. lddmc_fprint(FILE *f, MDD mdd)
  1971. {
  1972. lddmc_serialize_reset();
  1973. size_t v = lddmc_serialize_add(mdd);
  1974. fprintf(f, "%zu,", v);
  1975. lddmc_serialize_totext(f);
  1976. }
  1977. void
  1978. lddmc_print(MDD mdd)
  1979. {
  1980. lddmc_fprint(stdout, mdd);
  1981. }
  1982. /**
  1983. * SERIALIZATION
  1984. */
  1985. struct lddmc_ser {
  1986. MDD mdd;
  1987. size_t assigned;
  1988. };
  1989. // Define a AVL tree type with prefix 'lddmc_ser' holding
  1990. // nodes of struct lddmc_ser with the following compare() function...
  1991. AVL(lddmc_ser, struct lddmc_ser)
  1992. {
  1993. if (left->mdd > right->mdd) return 1;
  1994. if (left->mdd < right->mdd) return -1;
  1995. return 0;
  1996. }
  1997. // Define a AVL tree type with prefix 'lddmc_ser_reversed' holding
  1998. // nodes of struct lddmc_ser with the following compare() function...
  1999. AVL(lddmc_ser_reversed, struct lddmc_ser)
  2000. {
  2001. if (left->assigned > right->assigned) return 1;
  2002. if (left->assigned < right->assigned) return -1;
  2003. return 0;
  2004. }
  2005. // Initially, both sets are empty
  2006. static avl_node_t *lddmc_ser_set = NULL;
  2007. static avl_node_t *lddmc_ser_reversed_set = NULL;
  2008. // Start counting (assigning numbers to MDDs) at 2
  2009. static volatile size_t lddmc_ser_counter = 2;
  2010. static size_t lddmc_ser_done = 0;
  2011. // Given a MDD, assign unique numbers to all nodes
  2012. static size_t
  2013. lddmc_serialize_assign_rec(MDD mdd)
  2014. {
  2015. if (mdd <= lddmc_true) return mdd;
  2016. mddnode_t n = GETNODE(mdd);
  2017. struct lddmc_ser s, *ss;
  2018. s.mdd = mdd;
  2019. ss = lddmc_ser_search(lddmc_ser_set, &s);
  2020. if (ss == NULL) {
  2021. // assign dummy value
  2022. s.assigned = 0;
  2023. ss = lddmc_ser_put(&lddmc_ser_set, &s, NULL);
  2024. // first assign recursively
  2025. lddmc_serialize_assign_rec(mddnode_getright(n));
  2026. lddmc_serialize_assign_rec(mddnode_getdown(n));
  2027. // assign real value
  2028. ss->assigned = lddmc_ser_counter++;
  2029. // put a copy in the reversed table
  2030. lddmc_ser_reversed_insert(&lddmc_ser_reversed_set, ss);
  2031. }
  2032. return ss->assigned;
  2033. }
  2034. size_t
  2035. lddmc_serialize_add(MDD mdd)
  2036. {
  2037. return lddmc_serialize_assign_rec(mdd);
  2038. }
  2039. void
  2040. lddmc_serialize_reset()
  2041. {
  2042. lddmc_ser_free(&lddmc_ser_set);
  2043. lddmc_ser_free(&lddmc_ser_reversed_set);
  2044. lddmc_ser_counter = 2;
  2045. lddmc_ser_done = 0;
  2046. }
  2047. size_t
  2048. lddmc_serialize_get(MDD mdd)
  2049. {
  2050. if (mdd <= lddmc_true) return mdd;
  2051. struct lddmc_ser s, *ss;
  2052. s.mdd = mdd;
  2053. ss = lddmc_ser_search(lddmc_ser_set, &s);
  2054. assert(ss != NULL);
  2055. return ss->assigned;
  2056. }
  2057. MDD
  2058. lddmc_serialize_get_reversed(size_t value)
  2059. {
  2060. if ((MDD)value <= lddmc_true) return (MDD)value;
  2061. struct lddmc_ser s, *ss;
  2062. s.assigned = value;
  2063. ss = lddmc_ser_reversed_search(lddmc_ser_reversed_set, &s);
  2064. assert(ss != NULL);
  2065. return ss->mdd;
  2066. }
  2067. void
  2068. lddmc_serialize_totext(FILE *out)
  2069. {
  2070. avl_iter_t *it = lddmc_ser_reversed_iter(lddmc_ser_reversed_set);
  2071. struct lddmc_ser *s;
  2072. fprintf(out, "[");
  2073. while ((s=lddmc_ser_reversed_iter_next(it))) {
  2074. MDD mdd = s->mdd;
  2075. mddnode_t n = GETNODE(mdd);
  2076. fprintf(out, "(%zu,v=%u,d=%zu,r=%zu),", s->assigned,
  2077. mddnode_getvalue(n),
  2078. lddmc_serialize_get(mddnode_getdown(n)),
  2079. lddmc_serialize_get(mddnode_getright(n)));
  2080. }
  2081. fprintf(out, "]");
  2082. lddmc_ser_reversed_iter_free(it);
  2083. }
  2084. void
  2085. lddmc_serialize_tofile(FILE *out)
  2086. {
  2087. size_t count = avl_count(lddmc_ser_reversed_set);
  2088. assert(count >= lddmc_ser_done);
  2089. assert(count == lddmc_ser_counter-2);
  2090. count -= lddmc_ser_done;
  2091. fwrite(&count, sizeof(size_t), 1, out);
  2092. struct lddmc_ser *s;
  2093. avl_iter_t *it = lddmc_ser_reversed_iter(lddmc_ser_reversed_set);
  2094. /* Skip already written entries */
  2095. size_t index = 0;
  2096. while (index < lddmc_ser_done && (s=lddmc_ser_reversed_iter_next(it))) {
  2097. assert(s->assigned == index+2);
  2098. index++;
  2099. }
  2100. while ((s=lddmc_ser_reversed_iter_next(it))) {
  2101. assert(s->assigned == index+2);
  2102. index++;
  2103. mddnode_t n = GETNODE(s->mdd);
  2104. struct mddnode node;
  2105. uint64_t right = lddmc_serialize_get(mddnode_getright(n));
  2106. uint64_t down = lddmc_serialize_get(mddnode_getdown(n));
  2107. if (mddnode_getcopy(n)) mddnode_makecopy(&node, right, down);
  2108. else mddnode_make(&node, mddnode_getvalue(n), right, down);
  2109. assert(right <= index);
  2110. assert(down <= index);
  2111. fwrite(&node, sizeof(struct mddnode), 1, out);
  2112. }
  2113. lddmc_ser_done = lddmc_ser_counter-2;
  2114. lddmc_ser_reversed_iter_free(it);
  2115. }
  2116. void
  2117. lddmc_serialize_fromfile(FILE *in)
  2118. {
  2119. size_t count, i;
  2120. if (fread(&count, sizeof(size_t), 1, in) != 1) {
  2121. // TODO FIXME return error
  2122. printf("sylvan_serialize_fromfile: file format error, giving up\n");
  2123. exit(-1);
  2124. }
  2125. for (i=1; i<=count; i++) {
  2126. struct mddnode node;
  2127. if (fread(&node, sizeof(struct mddnode), 1, in) != 1) {
  2128. // TODO FIXME return error
  2129. printf("sylvan_serialize_fromfile: file format error, giving up\n");
  2130. exit(-1);
  2131. }
  2132. assert(mddnode_getright(&node) <= lddmc_ser_done+1);
  2133. assert(mddnode_getdown(&node) <= lddmc_ser_done+1);
  2134. MDD right = lddmc_serialize_get_reversed(mddnode_getright(&node));
  2135. MDD down = lddmc_serialize_get_reversed(mddnode_getdown(&node));
  2136. struct lddmc_ser s;
  2137. if (mddnode_getcopy(&node)) s.mdd = lddmc_make_copynode(down, right);
  2138. else s.mdd = lddmc_makenode(mddnode_getvalue(&node), down, right);
  2139. s.assigned = lddmc_ser_done+2; // starts at 0 but we want 2-based...
  2140. lddmc_ser_done++;
  2141. lddmc_ser_insert(&lddmc_ser_set, &s);
  2142. lddmc_ser_reversed_insert(&lddmc_ser_reversed_set, &s);
  2143. }
  2144. }
  2145. static void
  2146. lddmc_sha2_rec(MDD mdd, SHA256_CTX *ctx)
  2147. {
  2148. if (mdd <= lddmc_true) {
  2149. SHA256_Update(ctx, (void*)&mdd, sizeof(uint64_t));
  2150. return;
  2151. }
  2152. mddnode_t node = GETNODE(mdd);
  2153. if (lddmc_mark(node)) {
  2154. uint32_t val = mddnode_getvalue(node);
  2155. SHA256_Update(ctx, (void*)&val, sizeof(uint32_t));
  2156. lddmc_sha2_rec(mddnode_getdown(node), ctx);
  2157. lddmc_sha2_rec(mddnode_getright(node), ctx);
  2158. }
  2159. }
  2160. void
  2161. lddmc_printsha(MDD mdd)
  2162. {
  2163. lddmc_fprintsha(stdout, mdd);
  2164. }
  2165. void
  2166. lddmc_fprintsha(FILE *out, MDD mdd)
  2167. {
  2168. char buf[80];
  2169. lddmc_getsha(mdd, buf);
  2170. fprintf(out, "%s", buf);
  2171. }
  2172. void
  2173. lddmc_getsha(MDD mdd, char *target)
  2174. {
  2175. SHA256_CTX ctx;
  2176. SHA256_Init(&ctx);
  2177. lddmc_sha2_rec(mdd, &ctx);
  2178. if (mdd > lddmc_true) lddmc_unmark_rec(GETNODE(mdd));
  2179. SHA256_End(&ctx, target);
  2180. }
  2181. #ifndef NDEBUG
  2182. size_t
  2183. lddmc_test_ismdd(MDD mdd)
  2184. {
  2185. if (mdd == lddmc_true) return 1;
  2186. if (mdd == lddmc_false) return 0;
  2187. int first = 1;
  2188. size_t depth = 0;
  2189. if (mdd != lddmc_false) {
  2190. mddnode_t n = GETNODE(mdd);
  2191. if (mddnode_getcopy(n)) {
  2192. mdd = mddnode_getright(n);
  2193. depth = lddmc_test_ismdd(mddnode_getdown(n));
  2194. assert(depth >= 1);
  2195. }
  2196. }
  2197. uint32_t value = 0;
  2198. while (mdd != lddmc_false) {
  2199. assert(llmsset_is_marked(nodes, mdd));
  2200. mddnode_t n = GETNODE(mdd);
  2201. uint32_t next_value = mddnode_getvalue(n);
  2202. assert(mddnode_getcopy(n) == 0);
  2203. if (first) {
  2204. first = 0;
  2205. depth = lddmc_test_ismdd(mddnode_getdown(n));
  2206. assert(depth >= 1);
  2207. } else {
  2208. assert(value < next_value);
  2209. assert(depth == lddmc_test_ismdd(mddnode_getdown(n)));
  2210. }
  2211. value = next_value;
  2212. mdd = mddnode_getright(n);
  2213. }
  2214. return 1 + depth;
  2215. }
  2216. #endif