You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

876 lines
28 KiB

  1. // This file is part of Eigen, a lightweight C++ template library
  2. // for linear algebra.
  3. //
  4. // Copyright (C) 2015 Benoit Jacob <benoitjacob@google.com>
  5. //
  6. // This Source Code Form is subject to the terms of the Mozilla
  7. // Public License v. 2.0. If a copy of the MPL was not distributed
  8. // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
  9. #include <iostream>
  10. #include <cstdint>
  11. #include <cstdlib>
  12. #include <vector>
  13. #include <algorithm>
  14. #include <fstream>
  15. #include <string>
  16. #include <cmath>
  17. #include <cassert>
  18. #include <cstring>
  19. #include <memory>
  20. #include <Eigen/Core>
  21. using namespace std;
  22. const int default_precision = 4;
  23. // see --only-cubic-sizes
  24. bool only_cubic_sizes = false;
  25. // see --dump-tables
  26. bool dump_tables = false;
  27. uint8_t log2_pot(size_t x) {
  28. size_t l = 0;
  29. while (x >>= 1) l++;
  30. return l;
  31. }
  32. uint16_t compact_size_triple(size_t k, size_t m, size_t n)
  33. {
  34. return (log2_pot(k) << 8) | (log2_pot(m) << 4) | log2_pot(n);
  35. }
  36. // just a helper to store a triple of K,M,N sizes for matrix product
  37. struct size_triple_t
  38. {
  39. uint16_t k, m, n;
  40. size_triple_t() : k(0), m(0), n(0) {}
  41. size_triple_t(size_t _k, size_t _m, size_t _n) : k(_k), m(_m), n(_n) {}
  42. size_triple_t(const size_triple_t& o) : k(o.k), m(o.m), n(o.n) {}
  43. size_triple_t(uint16_t compact)
  44. {
  45. k = 1 << ((compact & 0xf00) >> 8);
  46. m = 1 << ((compact & 0x0f0) >> 4);
  47. n = 1 << ((compact & 0x00f) >> 0);
  48. }
  49. bool is_cubic() const { return k == m && m == n; }
  50. };
  51. ostream& operator<<(ostream& s, const size_triple_t& t)
  52. {
  53. return s << "(" << t.k << ", " << t.m << ", " << t.n << ")";
  54. }
  55. struct inputfile_entry_t
  56. {
  57. uint16_t product_size;
  58. uint16_t pot_block_size;
  59. size_triple_t nonpot_block_size;
  60. float gflops;
  61. };
  62. struct inputfile_t
  63. {
  64. enum class type_t {
  65. unknown,
  66. all_pot_sizes,
  67. default_sizes
  68. };
  69. string filename;
  70. vector<inputfile_entry_t> entries;
  71. type_t type;
  72. inputfile_t(const string& fname)
  73. : filename(fname)
  74. , type(type_t::unknown)
  75. {
  76. ifstream stream(filename);
  77. if (!stream.is_open()) {
  78. cerr << "couldn't open input file: " << filename << endl;
  79. exit(1);
  80. }
  81. string line;
  82. while (getline(stream, line)) {
  83. if (line.empty()) continue;
  84. if (line.find("BEGIN MEASUREMENTS ALL POT SIZES") == 0) {
  85. if (type != type_t::unknown) {
  86. cerr << "Input file " << filename << " contains redundant BEGIN MEASUREMENTS lines";
  87. exit(1);
  88. }
  89. type = type_t::all_pot_sizes;
  90. continue;
  91. }
  92. if (line.find("BEGIN MEASUREMENTS DEFAULT SIZES") == 0) {
  93. if (type != type_t::unknown) {
  94. cerr << "Input file " << filename << " contains redundant BEGIN MEASUREMENTS lines";
  95. exit(1);
  96. }
  97. type = type_t::default_sizes;
  98. continue;
  99. }
  100. if (type == type_t::unknown) {
  101. continue;
  102. }
  103. switch(type) {
  104. case type_t::all_pot_sizes: {
  105. unsigned int product_size, block_size;
  106. float gflops;
  107. int sscanf_result =
  108. sscanf(line.c_str(), "%x %x %f",
  109. &product_size,
  110. &block_size,
  111. &gflops);
  112. if (3 != sscanf_result ||
  113. !product_size ||
  114. product_size > 0xfff ||
  115. !block_size ||
  116. block_size > 0xfff ||
  117. !isfinite(gflops))
  118. {
  119. cerr << "ill-formed input file: " << filename << endl;
  120. cerr << "offending line:" << endl << line << endl;
  121. exit(1);
  122. }
  123. if (only_cubic_sizes && !size_triple_t(product_size).is_cubic()) {
  124. continue;
  125. }
  126. inputfile_entry_t entry;
  127. entry.product_size = uint16_t(product_size);
  128. entry.pot_block_size = uint16_t(block_size);
  129. entry.gflops = gflops;
  130. entries.push_back(entry);
  131. break;
  132. }
  133. case type_t::default_sizes: {
  134. unsigned int product_size;
  135. float gflops;
  136. int bk, bm, bn;
  137. int sscanf_result =
  138. sscanf(line.c_str(), "%x default(%d, %d, %d) %f",
  139. &product_size,
  140. &bk, &bm, &bn,
  141. &gflops);
  142. if (5 != sscanf_result ||
  143. !product_size ||
  144. product_size > 0xfff ||
  145. !isfinite(gflops))
  146. {
  147. cerr << "ill-formed input file: " << filename << endl;
  148. cerr << "offending line:" << endl << line << endl;
  149. exit(1);
  150. }
  151. if (only_cubic_sizes && !size_triple_t(product_size).is_cubic()) {
  152. continue;
  153. }
  154. inputfile_entry_t entry;
  155. entry.product_size = uint16_t(product_size);
  156. entry.pot_block_size = 0;
  157. entry.nonpot_block_size = size_triple_t(bk, bm, bn);
  158. entry.gflops = gflops;
  159. entries.push_back(entry);
  160. break;
  161. }
  162. default:
  163. break;
  164. }
  165. }
  166. stream.close();
  167. if (type == type_t::unknown) {
  168. cerr << "Unrecognized input file " << filename << endl;
  169. exit(1);
  170. }
  171. if (entries.empty()) {
  172. cerr << "didn't find any measurements in input file: " << filename << endl;
  173. exit(1);
  174. }
  175. }
  176. };
  177. struct preprocessed_inputfile_entry_t
  178. {
  179. uint16_t product_size;
  180. uint16_t block_size;
  181. float efficiency;
  182. };
  183. bool lower_efficiency(const preprocessed_inputfile_entry_t& e1, const preprocessed_inputfile_entry_t& e2)
  184. {
  185. return e1.efficiency < e2.efficiency;
  186. }
  187. struct preprocessed_inputfile_t
  188. {
  189. string filename;
  190. vector<preprocessed_inputfile_entry_t> entries;
  191. preprocessed_inputfile_t(const inputfile_t& inputfile)
  192. : filename(inputfile.filename)
  193. {
  194. if (inputfile.type != inputfile_t::type_t::all_pot_sizes) {
  195. abort();
  196. }
  197. auto it = inputfile.entries.begin();
  198. auto it_first_with_given_product_size = it;
  199. while (it != inputfile.entries.end()) {
  200. ++it;
  201. if (it == inputfile.entries.end() ||
  202. it->product_size != it_first_with_given_product_size->product_size)
  203. {
  204. import_input_file_range_one_product_size(it_first_with_given_product_size, it);
  205. it_first_with_given_product_size = it;
  206. }
  207. }
  208. }
  209. private:
  210. void import_input_file_range_one_product_size(
  211. const vector<inputfile_entry_t>::const_iterator& begin,
  212. const vector<inputfile_entry_t>::const_iterator& end)
  213. {
  214. uint16_t product_size = begin->product_size;
  215. float max_gflops = 0.0f;
  216. for (auto it = begin; it != end; ++it) {
  217. if (it->product_size != product_size) {
  218. cerr << "Unexpected ordering of entries in " << filename << endl;
  219. cerr << "(Expected all entries for product size " << hex << product_size << dec << " to be grouped)" << endl;
  220. exit(1);
  221. }
  222. max_gflops = max(max_gflops, it->gflops);
  223. }
  224. for (auto it = begin; it != end; ++it) {
  225. preprocessed_inputfile_entry_t entry;
  226. entry.product_size = it->product_size;
  227. entry.block_size = it->pot_block_size;
  228. entry.efficiency = it->gflops / max_gflops;
  229. entries.push_back(entry);
  230. }
  231. }
  232. };
  233. void check_all_files_in_same_exact_order(
  234. const vector<preprocessed_inputfile_t>& preprocessed_inputfiles)
  235. {
  236. if (preprocessed_inputfiles.empty()) {
  237. return;
  238. }
  239. const preprocessed_inputfile_t& first_file = preprocessed_inputfiles[0];
  240. const size_t num_entries = first_file.entries.size();
  241. for (size_t i = 0; i < preprocessed_inputfiles.size(); i++) {
  242. if (preprocessed_inputfiles[i].entries.size() != num_entries) {
  243. cerr << "these files have different number of entries: "
  244. << preprocessed_inputfiles[i].filename
  245. << " and "
  246. << first_file.filename
  247. << endl;
  248. exit(1);
  249. }
  250. }
  251. for (size_t entry_index = 0; entry_index < num_entries; entry_index++) {
  252. const uint16_t entry_product_size = first_file.entries[entry_index].product_size;
  253. const uint16_t entry_block_size = first_file.entries[entry_index].block_size;
  254. for (size_t file_index = 0; file_index < preprocessed_inputfiles.size(); file_index++) {
  255. const preprocessed_inputfile_t& cur_file = preprocessed_inputfiles[file_index];
  256. if (cur_file.entries[entry_index].product_size != entry_product_size ||
  257. cur_file.entries[entry_index].block_size != entry_block_size)
  258. {
  259. cerr << "entries not in same order between these files: "
  260. << first_file.filename
  261. << " and "
  262. << cur_file.filename
  263. << endl;
  264. exit(1);
  265. }
  266. }
  267. }
  268. }
  269. float efficiency_of_subset(
  270. const vector<preprocessed_inputfile_t>& preprocessed_inputfiles,
  271. const vector<size_t>& subset)
  272. {
  273. if (subset.size() <= 1) {
  274. return 1.0f;
  275. }
  276. const preprocessed_inputfile_t& first_file = preprocessed_inputfiles[subset[0]];
  277. const size_t num_entries = first_file.entries.size();
  278. float efficiency = 1.0f;
  279. size_t entry_index = 0;
  280. size_t first_entry_index_with_this_product_size = 0;
  281. uint16_t product_size = first_file.entries[0].product_size;
  282. while (entry_index < num_entries) {
  283. ++entry_index;
  284. if (entry_index == num_entries ||
  285. first_file.entries[entry_index].product_size != product_size)
  286. {
  287. float efficiency_this_product_size = 0.0f;
  288. for (size_t e = first_entry_index_with_this_product_size; e < entry_index; e++) {
  289. float efficiency_this_entry = 1.0f;
  290. for (auto i = subset.begin(); i != subset.end(); ++i) {
  291. efficiency_this_entry = min(efficiency_this_entry, preprocessed_inputfiles[*i].entries[e].efficiency);
  292. }
  293. efficiency_this_product_size = max(efficiency_this_product_size, efficiency_this_entry);
  294. }
  295. efficiency = min(efficiency, efficiency_this_product_size);
  296. if (entry_index < num_entries) {
  297. first_entry_index_with_this_product_size = entry_index;
  298. product_size = first_file.entries[entry_index].product_size;
  299. }
  300. }
  301. }
  302. return efficiency;
  303. }
  304. void dump_table_for_subset(
  305. const vector<preprocessed_inputfile_t>& preprocessed_inputfiles,
  306. const vector<size_t>& subset)
  307. {
  308. const preprocessed_inputfile_t& first_file = preprocessed_inputfiles[subset[0]];
  309. const size_t num_entries = first_file.entries.size();
  310. size_t entry_index = 0;
  311. size_t first_entry_index_with_this_product_size = 0;
  312. uint16_t product_size = first_file.entries[0].product_size;
  313. size_t i = 0;
  314. size_triple_t min_product_size(first_file.entries.front().product_size);
  315. size_triple_t max_product_size(first_file.entries.back().product_size);
  316. if (!min_product_size.is_cubic() || !max_product_size.is_cubic()) {
  317. abort();
  318. }
  319. if (only_cubic_sizes) {
  320. cerr << "Can't generate tables with --only-cubic-sizes." << endl;
  321. abort();
  322. }
  323. cout << "struct LookupTable {" << endl;
  324. cout << " static const size_t BaseSize = " << min_product_size.k << ";" << endl;
  325. const size_t NumSizes = log2_pot(max_product_size.k / min_product_size.k) + 1;
  326. const size_t TableSize = NumSizes * NumSizes * NumSizes;
  327. cout << " static const size_t NumSizes = " << NumSizes << ";" << endl;
  328. cout << " static const unsigned short* Data() {" << endl;
  329. cout << " static const unsigned short data[" << TableSize << "] = {";
  330. while (entry_index < num_entries) {
  331. ++entry_index;
  332. if (entry_index == num_entries ||
  333. first_file.entries[entry_index].product_size != product_size)
  334. {
  335. float best_efficiency_this_product_size = 0.0f;
  336. uint16_t best_block_size_this_product_size = 0;
  337. for (size_t e = first_entry_index_with_this_product_size; e < entry_index; e++) {
  338. float efficiency_this_entry = 1.0f;
  339. for (auto i = subset.begin(); i != subset.end(); ++i) {
  340. efficiency_this_entry = min(efficiency_this_entry, preprocessed_inputfiles[*i].entries[e].efficiency);
  341. }
  342. if (efficiency_this_entry > best_efficiency_this_product_size) {
  343. best_efficiency_this_product_size = efficiency_this_entry;
  344. best_block_size_this_product_size = first_file.entries[e].block_size;
  345. }
  346. }
  347. if ((i++) % NumSizes) {
  348. cout << " ";
  349. } else {
  350. cout << endl << " ";
  351. }
  352. cout << "0x" << hex << best_block_size_this_product_size << dec;
  353. if (entry_index < num_entries) {
  354. cout << ",";
  355. first_entry_index_with_this_product_size = entry_index;
  356. product_size = first_file.entries[entry_index].product_size;
  357. }
  358. }
  359. }
  360. if (i != TableSize) {
  361. cerr << endl << "Wrote " << i << " table entries, expected " << TableSize << endl;
  362. abort();
  363. }
  364. cout << endl << " };" << endl;
  365. cout << " return data;" << endl;
  366. cout << " }" << endl;
  367. cout << "};" << endl;
  368. }
  369. float efficiency_of_partition(
  370. const vector<preprocessed_inputfile_t>& preprocessed_inputfiles,
  371. const vector<vector<size_t>>& partition)
  372. {
  373. float efficiency = 1.0f;
  374. for (auto s = partition.begin(); s != partition.end(); ++s) {
  375. efficiency = min(efficiency, efficiency_of_subset(preprocessed_inputfiles, *s));
  376. }
  377. return efficiency;
  378. }
  379. void make_first_subset(size_t subset_size, vector<size_t>& out_subset, size_t set_size)
  380. {
  381. assert(subset_size >= 1 && subset_size <= set_size);
  382. out_subset.resize(subset_size);
  383. for (size_t i = 0; i < subset_size; i++) {
  384. out_subset[i] = i;
  385. }
  386. }
  387. bool is_last_subset(const vector<size_t>& subset, size_t set_size)
  388. {
  389. return subset[0] == set_size - subset.size();
  390. }
  391. void next_subset(vector<size_t>& inout_subset, size_t set_size)
  392. {
  393. if (is_last_subset(inout_subset, set_size)) {
  394. cerr << "iterating past the last subset" << endl;
  395. abort();
  396. }
  397. size_t i = 1;
  398. while (inout_subset[inout_subset.size() - i] == set_size - i) {
  399. i++;
  400. assert(i <= inout_subset.size());
  401. }
  402. size_t first_index_to_change = inout_subset.size() - i;
  403. inout_subset[first_index_to_change]++;
  404. size_t p = inout_subset[first_index_to_change];
  405. for (size_t j = first_index_to_change + 1; j < inout_subset.size(); j++) {
  406. inout_subset[j] = ++p;
  407. }
  408. }
  409. const size_t number_of_subsets_limit = 100;
  410. const size_t always_search_subsets_of_size_at_least = 2;
  411. bool is_number_of_subsets_feasible(size_t n, size_t p)
  412. {
  413. assert(n>0 && p>0 && p<=n);
  414. uint64_t numerator = 1, denominator = 1;
  415. for (size_t i = 0; i < p; i++) {
  416. numerator *= n - i;
  417. denominator *= i + 1;
  418. if (numerator > denominator * number_of_subsets_limit) {
  419. return false;
  420. }
  421. }
  422. return true;
  423. }
  424. size_t max_feasible_subset_size(size_t n)
  425. {
  426. assert(n > 0);
  427. const size_t minresult = min<size_t>(n-1, always_search_subsets_of_size_at_least);
  428. for (size_t p = 1; p <= n - 1; p++) {
  429. if (!is_number_of_subsets_feasible(n, p+1)) {
  430. return max(p, minresult);
  431. }
  432. }
  433. return n - 1;
  434. }
  435. void find_subset_with_efficiency_higher_than(
  436. const vector<preprocessed_inputfile_t>& preprocessed_inputfiles,
  437. float required_efficiency_to_beat,
  438. vector<size_t>& inout_remainder,
  439. vector<size_t>& out_subset)
  440. {
  441. out_subset.resize(0);
  442. if (required_efficiency_to_beat >= 1.0f) {
  443. cerr << "can't beat efficiency 1." << endl;
  444. abort();
  445. }
  446. while (!inout_remainder.empty()) {
  447. vector<size_t> candidate_indices(inout_remainder.size());
  448. for (size_t i = 0; i < candidate_indices.size(); i++) {
  449. candidate_indices[i] = i;
  450. }
  451. size_t candidate_indices_subset_size = max_feasible_subset_size(candidate_indices.size());
  452. while (candidate_indices_subset_size >= 1) {
  453. vector<size_t> candidate_indices_subset;
  454. make_first_subset(candidate_indices_subset_size,
  455. candidate_indices_subset,
  456. candidate_indices.size());
  457. vector<size_t> best_candidate_indices_subset;
  458. float best_efficiency = 0.0f;
  459. vector<size_t> trial_subset = out_subset;
  460. trial_subset.resize(out_subset.size() + candidate_indices_subset_size);
  461. while (true)
  462. {
  463. for (size_t i = 0; i < candidate_indices_subset_size; i++) {
  464. trial_subset[out_subset.size() + i] = inout_remainder[candidate_indices_subset[i]];
  465. }
  466. float trial_efficiency = efficiency_of_subset(preprocessed_inputfiles, trial_subset);
  467. if (trial_efficiency > best_efficiency) {
  468. best_efficiency = trial_efficiency;
  469. best_candidate_indices_subset = candidate_indices_subset;
  470. }
  471. if (is_last_subset(candidate_indices_subset, candidate_indices.size())) {
  472. break;
  473. }
  474. next_subset(candidate_indices_subset, candidate_indices.size());
  475. }
  476. if (best_efficiency > required_efficiency_to_beat) {
  477. for (size_t i = 0; i < best_candidate_indices_subset.size(); i++) {
  478. candidate_indices[i] = candidate_indices[best_candidate_indices_subset[i]];
  479. }
  480. candidate_indices.resize(best_candidate_indices_subset.size());
  481. }
  482. candidate_indices_subset_size--;
  483. }
  484. size_t candidate_index = candidate_indices[0];
  485. auto candidate_iterator = inout_remainder.begin() + candidate_index;
  486. vector<size_t> trial_subset = out_subset;
  487. trial_subset.push_back(*candidate_iterator);
  488. float trial_efficiency = efficiency_of_subset(preprocessed_inputfiles, trial_subset);
  489. if (trial_efficiency > required_efficiency_to_beat) {
  490. out_subset.push_back(*candidate_iterator);
  491. inout_remainder.erase(candidate_iterator);
  492. } else {
  493. break;
  494. }
  495. }
  496. }
  497. void find_partition_with_efficiency_higher_than(
  498. const vector<preprocessed_inputfile_t>& preprocessed_inputfiles,
  499. float required_efficiency_to_beat,
  500. vector<vector<size_t>>& out_partition)
  501. {
  502. out_partition.resize(0);
  503. vector<size_t> remainder;
  504. for (size_t i = 0; i < preprocessed_inputfiles.size(); i++) {
  505. remainder.push_back(i);
  506. }
  507. while (!remainder.empty()) {
  508. vector<size_t> new_subset;
  509. find_subset_with_efficiency_higher_than(
  510. preprocessed_inputfiles,
  511. required_efficiency_to_beat,
  512. remainder,
  513. new_subset);
  514. out_partition.push_back(new_subset);
  515. }
  516. }
  517. void print_partition(
  518. const vector<preprocessed_inputfile_t>& preprocessed_inputfiles,
  519. const vector<vector<size_t>>& partition)
  520. {
  521. float efficiency = efficiency_of_partition(preprocessed_inputfiles, partition);
  522. cout << "Partition into " << partition.size() << " subsets for " << efficiency * 100.0f << "% efficiency" << endl;
  523. for (auto subset = partition.begin(); subset != partition.end(); ++subset) {
  524. cout << " Subset " << (subset - partition.begin())
  525. << ", efficiency " << efficiency_of_subset(preprocessed_inputfiles, *subset) * 100.0f << "%:"
  526. << endl;
  527. for (auto file = subset->begin(); file != subset->end(); ++file) {
  528. cout << " " << preprocessed_inputfiles[*file].filename << endl;
  529. }
  530. if (dump_tables) {
  531. cout << " Table:" << endl;
  532. dump_table_for_subset(preprocessed_inputfiles, *subset);
  533. }
  534. }
  535. cout << endl;
  536. }
  537. struct action_t
  538. {
  539. virtual const char* invokation_name() const { abort(); return nullptr; }
  540. virtual void run(const vector<string>&) const { abort(); }
  541. virtual ~action_t() {}
  542. };
  543. struct partition_action_t : action_t
  544. {
  545. virtual const char* invokation_name() const override { return "partition"; }
  546. virtual void run(const vector<string>& input_filenames) const override
  547. {
  548. vector<preprocessed_inputfile_t> preprocessed_inputfiles;
  549. if (input_filenames.empty()) {
  550. cerr << "The " << invokation_name() << " action needs a list of input files." << endl;
  551. exit(1);
  552. }
  553. for (auto it = input_filenames.begin(); it != input_filenames.end(); ++it) {
  554. inputfile_t inputfile(*it);
  555. switch (inputfile.type) {
  556. case inputfile_t::type_t::all_pot_sizes:
  557. preprocessed_inputfiles.emplace_back(inputfile);
  558. break;
  559. case inputfile_t::type_t::default_sizes:
  560. cerr << "The " << invokation_name() << " action only uses measurements for all pot sizes, and "
  561. << "has no use for " << *it << " which contains measurements for default sizes." << endl;
  562. exit(1);
  563. break;
  564. default:
  565. cerr << "Unrecognized input file: " << *it << endl;
  566. exit(1);
  567. }
  568. }
  569. check_all_files_in_same_exact_order(preprocessed_inputfiles);
  570. float required_efficiency_to_beat = 0.0f;
  571. vector<vector<vector<size_t>>> partitions;
  572. cerr << "searching for partitions...\r" << flush;
  573. while (true)
  574. {
  575. vector<vector<size_t>> partition;
  576. find_partition_with_efficiency_higher_than(
  577. preprocessed_inputfiles,
  578. required_efficiency_to_beat,
  579. partition);
  580. float actual_efficiency = efficiency_of_partition(preprocessed_inputfiles, partition);
  581. cerr << "partition " << preprocessed_inputfiles.size() << " files into " << partition.size()
  582. << " subsets for " << 100.0f * actual_efficiency
  583. << " % efficiency"
  584. << " \r" << flush;
  585. partitions.push_back(partition);
  586. if (partition.size() == preprocessed_inputfiles.size() || actual_efficiency == 1.0f) {
  587. break;
  588. }
  589. required_efficiency_to_beat = actual_efficiency;
  590. }
  591. cerr << " " << endl;
  592. while (true) {
  593. bool repeat = false;
  594. for (size_t i = 0; i < partitions.size() - 1; i++) {
  595. if (partitions[i].size() >= partitions[i+1].size()) {
  596. partitions.erase(partitions.begin() + i);
  597. repeat = true;
  598. break;
  599. }
  600. }
  601. if (!repeat) {
  602. break;
  603. }
  604. }
  605. for (auto it = partitions.begin(); it != partitions.end(); ++it) {
  606. print_partition(preprocessed_inputfiles, *it);
  607. }
  608. }
  609. };
  610. struct evaluate_defaults_action_t : action_t
  611. {
  612. struct results_entry_t {
  613. uint16_t product_size;
  614. size_triple_t default_block_size;
  615. uint16_t best_pot_block_size;
  616. float default_gflops;
  617. float best_pot_gflops;
  618. float default_efficiency;
  619. };
  620. friend ostream& operator<<(ostream& s, const results_entry_t& entry)
  621. {
  622. return s
  623. << "Product size " << size_triple_t(entry.product_size)
  624. << ": default block size " << entry.default_block_size
  625. << " -> " << entry.default_gflops
  626. << " GFlop/s = " << entry.default_efficiency * 100.0f << " %"
  627. << " of best POT block size " << size_triple_t(entry.best_pot_block_size)
  628. << " -> " << entry.best_pot_gflops
  629. << " GFlop/s" << dec;
  630. }
  631. static bool lower_efficiency(const results_entry_t& e1, const results_entry_t& e2) {
  632. return e1.default_efficiency < e2.default_efficiency;
  633. }
  634. virtual const char* invokation_name() const override { return "evaluate-defaults"; }
  635. void show_usage_and_exit() const
  636. {
  637. cerr << "usage: " << invokation_name() << " default-sizes-data all-pot-sizes-data" << endl;
  638. cerr << "checks how well the performance with default sizes compares to the best "
  639. << "performance measured over all POT sizes." << endl;
  640. exit(1);
  641. }
  642. virtual void run(const vector<string>& input_filenames) const override
  643. {
  644. if (input_filenames.size() != 2) {
  645. show_usage_and_exit();
  646. }
  647. inputfile_t inputfile_default_sizes(input_filenames[0]);
  648. inputfile_t inputfile_all_pot_sizes(input_filenames[1]);
  649. if (inputfile_default_sizes.type != inputfile_t::type_t::default_sizes) {
  650. cerr << inputfile_default_sizes.filename << " is not an input file with default sizes." << endl;
  651. show_usage_and_exit();
  652. }
  653. if (inputfile_all_pot_sizes.type != inputfile_t::type_t::all_pot_sizes) {
  654. cerr << inputfile_all_pot_sizes.filename << " is not an input file with all POT sizes." << endl;
  655. show_usage_and_exit();
  656. }
  657. vector<results_entry_t> results;
  658. vector<results_entry_t> cubic_results;
  659. uint16_t product_size = 0;
  660. auto it_all_pot_sizes = inputfile_all_pot_sizes.entries.begin();
  661. for (auto it_default_sizes = inputfile_default_sizes.entries.begin();
  662. it_default_sizes != inputfile_default_sizes.entries.end();
  663. ++it_default_sizes)
  664. {
  665. if (it_default_sizes->product_size == product_size) {
  666. continue;
  667. }
  668. product_size = it_default_sizes->product_size;
  669. while (it_all_pot_sizes != inputfile_all_pot_sizes.entries.end() &&
  670. it_all_pot_sizes->product_size != product_size)
  671. {
  672. ++it_all_pot_sizes;
  673. }
  674. if (it_all_pot_sizes == inputfile_all_pot_sizes.entries.end()) {
  675. break;
  676. }
  677. uint16_t best_pot_block_size = 0;
  678. float best_pot_gflops = 0;
  679. for (auto it = it_all_pot_sizes;
  680. it != inputfile_all_pot_sizes.entries.end() && it->product_size == product_size;
  681. ++it)
  682. {
  683. if (it->gflops > best_pot_gflops) {
  684. best_pot_gflops = it->gflops;
  685. best_pot_block_size = it->pot_block_size;
  686. }
  687. }
  688. results_entry_t entry;
  689. entry.product_size = product_size;
  690. entry.default_block_size = it_default_sizes->nonpot_block_size;
  691. entry.best_pot_block_size = best_pot_block_size;
  692. entry.default_gflops = it_default_sizes->gflops;
  693. entry.best_pot_gflops = best_pot_gflops;
  694. entry.default_efficiency = entry.default_gflops / entry.best_pot_gflops;
  695. results.push_back(entry);
  696. size_triple_t t(product_size);
  697. if (t.k == t.m && t.m == t.n) {
  698. cubic_results.push_back(entry);
  699. }
  700. }
  701. cout << "All results:" << endl;
  702. for (auto it = results.begin(); it != results.end(); ++it) {
  703. cout << *it << endl;
  704. }
  705. cout << endl;
  706. sort(results.begin(), results.end(), lower_efficiency);
  707. const size_t n = min<size_t>(20, results.size());
  708. cout << n << " worst results:" << endl;
  709. for (size_t i = 0; i < n; i++) {
  710. cout << results[i] << endl;
  711. }
  712. cout << endl;
  713. cout << "cubic results:" << endl;
  714. for (auto it = cubic_results.begin(); it != cubic_results.end(); ++it) {
  715. cout << *it << endl;
  716. }
  717. cout << endl;
  718. sort(cubic_results.begin(), cubic_results.end(), lower_efficiency);
  719. cout.precision(2);
  720. vector<float> a = {0.5f, 0.20f, 0.10f, 0.05f, 0.02f, 0.01f};
  721. for (auto it = a.begin(); it != a.end(); ++it) {
  722. size_t n = min(results.size() - 1, size_t(*it * results.size()));
  723. cout << (100.0f * n / (results.size() - 1))
  724. << " % of product sizes have default efficiency <= "
  725. << 100.0f * results[n].default_efficiency << " %" << endl;
  726. }
  727. cout.precision(default_precision);
  728. }
  729. };
  730. void show_usage_and_exit(int argc, char* argv[],
  731. const vector<unique_ptr<action_t>>& available_actions)
  732. {
  733. cerr << "usage: " << argv[0] << " <action> [options...] <input files...>" << endl;
  734. cerr << "available actions:" << endl;
  735. for (auto it = available_actions.begin(); it != available_actions.end(); ++it) {
  736. cerr << " " << (*it)->invokation_name() << endl;
  737. }
  738. cerr << "the input files should each contain an output of benchmark-blocking-sizes" << endl;
  739. exit(1);
  740. }
  741. int main(int argc, char* argv[])
  742. {
  743. cout.precision(default_precision);
  744. cerr.precision(default_precision);
  745. vector<unique_ptr<action_t>> available_actions;
  746. available_actions.emplace_back(new partition_action_t);
  747. available_actions.emplace_back(new evaluate_defaults_action_t);
  748. vector<string> input_filenames;
  749. action_t* action = nullptr;
  750. if (argc < 2) {
  751. show_usage_and_exit(argc, argv, available_actions);
  752. }
  753. for (int i = 1; i < argc; i++) {
  754. bool arg_handled = false;
  755. // Step 1. Try to match action invokation names.
  756. for (auto it = available_actions.begin(); it != available_actions.end(); ++it) {
  757. if (!strcmp(argv[i], (*it)->invokation_name())) {
  758. if (!action) {
  759. action = it->get();
  760. arg_handled = true;
  761. break;
  762. } else {
  763. cerr << "can't specify more than one action!" << endl;
  764. show_usage_and_exit(argc, argv, available_actions);
  765. }
  766. }
  767. }
  768. if (arg_handled) {
  769. continue;
  770. }
  771. // Step 2. Try to match option names.
  772. if (argv[i][0] == '-') {
  773. if (!strcmp(argv[i], "--only-cubic-sizes")) {
  774. only_cubic_sizes = true;
  775. arg_handled = true;
  776. }
  777. if (!strcmp(argv[i], "--dump-tables")) {
  778. dump_tables = true;
  779. arg_handled = true;
  780. }
  781. if (!arg_handled) {
  782. cerr << "Unrecognized option: " << argv[i] << endl;
  783. show_usage_and_exit(argc, argv, available_actions);
  784. }
  785. }
  786. if (arg_handled) {
  787. continue;
  788. }
  789. // Step 3. Default to interpreting args as input filenames.
  790. input_filenames.emplace_back(argv[i]);
  791. }
  792. if (dump_tables && only_cubic_sizes) {
  793. cerr << "Incompatible options: --only-cubic-sizes and --dump-tables." << endl;
  794. show_usage_and_exit(argc, argv, available_actions);
  795. }
  796. if (!action) {
  797. show_usage_and_exit(argc, argv, available_actions);
  798. }
  799. action->run(input_filenames);
  800. }