From a89a728f6d41e7cea80e77301e53da901adffe8d Mon Sep 17 00:00:00 2001 From: Mavo Date: Thu, 18 Feb 2016 23:28:35 +0100 Subject: [PATCH] Moved benchmark script to SVN Former-commit-id: 92c39188e787522038ae542a6782c7d54762b657 --- benchmark_dft.py | 136 ----------------------------------------------- 1 file changed, 136 deletions(-) delete mode 100644 benchmark_dft.py diff --git a/benchmark_dft.py b/benchmark_dft.py deleted file mode 100644 index 6f25e08ed..000000000 --- a/benchmark_dft.py +++ /dev/null @@ -1,136 +0,0 @@ -import os -import os.path -import sys -import subprocess -import re -import time -import argparse - -DIR=os.getcwd() -STORM_PATH=os.path.join(DIR, "build/src/storm-dft") -EXAMPLE_DIR=os.path.join(DIR, "examples/dft/") - -benchmarks = [ - ("and", False, [3, 1]), - ("and_param", True, ["(4*x^2+2*x+1)/((x) * (2*x+1))", "1"]), - ("cardiac", False, [8597.360004, 1]), - ("cas", False, [0.859736, 1]), - ("cm2", False, [0.256272, 1]), - #("cm4", False, [0.338225, 1]), # big - ("cps", False, ["inf", 0.333333]), - #("deathegg", False, [24.642857, 1]), # contains fdep to gate - #("fdep", False, [0.666667, 1]), # contains fdep to two elements - ("fdep2", False, [2, 1]), - ("fdep3", False, [2.5, 1]), - #("ftpp_complex", False, [0, 1]), # Compute - #("ftpp_large", False, [0, 1]), # Compute - #("ftpp_standard", False, [0, 1]), # Compute - ("mdcs", False, [2.85414, 1]), - ("mdcs2", False, [2.85414, 1]), - ("mp", False, [1.66667, 1]), - ("or", False, [1, 1]), - ("pand", False, ["inf", 0.666667]), - ("pand_param", True, ["-1", "(x)/(y+x)"]), - ("pdep", False, [2.66667, 1]), - #("pdep2", False, [0, 1]), #Compute # contains pdep to two elements - ("pdep3", False, [2.79167, 1]), - ("spare", False, [3.53846, 1]), - ("spare2", False, [1.86957, 1]), - ("spare3", False, [1.27273, 1]), - ("spare4", False, [4.8459, 1]), - ("spare5", False, [2.66667, 1]), - ("spare6", False, [1.4, 1]), - ("spare7", False, [3.67333, 1]), - ("symmetry", False, [4.16667, 1]), - ("symmetry2", False, [3.06111, 1]), - ("tripple_and1", False, [4.16667, 1]), - ("tripple_and2", False, [3.66667, 1]), - ("tripple_and2_c", False, [3.6667, 1]), - ("tripple_and_c", False, [4.16667, 1]), - ("tripple_or", False, [0.5, 1]), - ("tripple_or2", False, [0.666667, 1]), - ("tripple_or2_c", False, [0.66667, 1]), - ("tripple_or_c", False, [0.5, 1]), - ("tripple_pand", False, ["inf", 0.0416667]), - ("tripple_pand2", False, ["inf", 0.166667]), - ("tripple_pand2_c", False, ["inf", 0.166667]), - ("tripple_pand_c", False, ["inf", 0.0416667]), - ("voting", False, [1.66667, 1]), - ("voting2", False, [0.588235, 1]) -] - -def run_storm_dft(filename, prop, parametric, quiet): - # Run storm-dft on filename and return result - dft_file = os.path.join(EXAMPLE_DIR, filename + ".dft") - args = [STORM_PATH, - dft_file, - prop] - if parametric: - args.append('--parametric') - - output = run_tool(args, quiet) - # Get result - match = re.search(r'Result: \[(.*)\]', output) - if not match: - return None - - result = match.group(1) - return result - -def run_tool(args, quiet=False): - """ - Executes a process, - :returns: the `stdout` - """ - pipe = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - result = ""; - for line in iter(pipe.stdout.readline, ""): - if not line and pipe.poll() is not None: - break - output = line.decode(encoding='UTF-8').rstrip() - if output != "": - if not quiet: - print("\t * " + output) - result = output - return result - -def isclose(a, b, rel_tol=1e-09, abs_tol=0.0): - if a == b: - return True - return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Benchmarking DFTs via Storm') - parser.add_argument('--debuglevel', type=int, default=0, help='the debug level (0=silent, 1=print benchmarks, 2=print output from storm') - args = parser.parse_args() - count = 0 - correct = 0 - properties = ['--expectedtime', '--probability'] - start = time.time() - for index, prop in enumerate(properties): - for (benchmark, parametric, result_original) in benchmarks: - expected_result = result_original[index] - # Run benchmark and check result - count += 1; - if args.debuglevel > 0: - print("Running '{}' with property '{}'".format(benchmark, prop)) - result = run_storm_dft(benchmark, prop, parametric, args.debuglevel<2) - if result is None: - print("Error occurred on example '{}' with property '{}'".format(benchmark, prop)) - continue - - if not parametric: - # Float - result = float(result) - if not isclose(result, float(expected_result), rel_tol=1e-05): - print("Wrong result on example '{}' with property '{}': result: {}, Expected: {}".format(benchmark, prop, result, expected_result)) - else: - correct += 1 - else: - # Parametric - if result != expected_result: - print("Wrong result on example '{}' with property '{}': result: {}, Expected: {}".format(benchmark, prop, result, expected_result)) - else: - correct += 1 - end = time.time() - print("Correct results for {} of {} DFT checks in {}s".format(correct, count, end-start))