Mavo
9 years ago
1 changed files with 0 additions and 136 deletions
-
136benchmark_dft.py
@ -1,136 +0,0 @@ |
|||
import os |
|||
import os.path |
|||
import sys |
|||
import subprocess |
|||
import re |
|||
import time |
|||
import argparse |
|||
|
|||
DIR=os.getcwd() |
|||
STORM_PATH=os.path.join(DIR, "build/src/storm-dft") |
|||
EXAMPLE_DIR=os.path.join(DIR, "examples/dft/") |
|||
|
|||
benchmarks = [ |
|||
("and", False, [3, 1]), |
|||
("and_param", True, ["(4*x^2+2*x+1)/((x) * (2*x+1))", "1"]), |
|||
("cardiac", False, [8597.360004, 1]), |
|||
("cas", False, [0.859736, 1]), |
|||
("cm2", False, [0.256272, 1]), |
|||
#("cm4", False, [0.338225, 1]), # big |
|||
("cps", False, ["inf", 0.333333]), |
|||
#("deathegg", False, [24.642857, 1]), # contains fdep to gate |
|||
#("fdep", False, [0.666667, 1]), # contains fdep to two elements |
|||
("fdep2", False, [2, 1]), |
|||
("fdep3", False, [2.5, 1]), |
|||
#("ftpp_complex", False, [0, 1]), # Compute |
|||
#("ftpp_large", False, [0, 1]), # Compute |
|||
#("ftpp_standard", False, [0, 1]), # Compute |
|||
("mdcs", False, [2.85414, 1]), |
|||
("mdcs2", False, [2.85414, 1]), |
|||
("mp", False, [1.66667, 1]), |
|||
("or", False, [1, 1]), |
|||
("pand", False, ["inf", 0.666667]), |
|||
("pand_param", True, ["-1", "(x)/(y+x)"]), |
|||
("pdep", False, [2.66667, 1]), |
|||
#("pdep2", False, [0, 1]), #Compute # contains pdep to two elements |
|||
("pdep3", False, [2.79167, 1]), |
|||
("spare", False, [3.53846, 1]), |
|||
("spare2", False, [1.86957, 1]), |
|||
("spare3", False, [1.27273, 1]), |
|||
("spare4", False, [4.8459, 1]), |
|||
("spare5", False, [2.66667, 1]), |
|||
("spare6", False, [1.4, 1]), |
|||
("spare7", False, [3.67333, 1]), |
|||
("symmetry", False, [4.16667, 1]), |
|||
("symmetry2", False, [3.06111, 1]), |
|||
("tripple_and1", False, [4.16667, 1]), |
|||
("tripple_and2", False, [3.66667, 1]), |
|||
("tripple_and2_c", False, [3.6667, 1]), |
|||
("tripple_and_c", False, [4.16667, 1]), |
|||
("tripple_or", False, [0.5, 1]), |
|||
("tripple_or2", False, [0.666667, 1]), |
|||
("tripple_or2_c", False, [0.66667, 1]), |
|||
("tripple_or_c", False, [0.5, 1]), |
|||
("tripple_pand", False, ["inf", 0.0416667]), |
|||
("tripple_pand2", False, ["inf", 0.166667]), |
|||
("tripple_pand2_c", False, ["inf", 0.166667]), |
|||
("tripple_pand_c", False, ["inf", 0.0416667]), |
|||
("voting", False, [1.66667, 1]), |
|||
("voting2", False, [0.588235, 1]) |
|||
] |
|||
|
|||
def run_storm_dft(filename, prop, parametric, quiet): |
|||
# Run storm-dft on filename and return result |
|||
dft_file = os.path.join(EXAMPLE_DIR, filename + ".dft") |
|||
args = [STORM_PATH, |
|||
dft_file, |
|||
prop] |
|||
if parametric: |
|||
args.append('--parametric') |
|||
|
|||
output = run_tool(args, quiet) |
|||
# Get result |
|||
match = re.search(r'Result: \[(.*)\]', output) |
|||
if not match: |
|||
return None |
|||
|
|||
result = match.group(1) |
|||
return result |
|||
|
|||
def run_tool(args, quiet=False): |
|||
""" |
|||
Executes a process, |
|||
:returns: the `stdout` |
|||
""" |
|||
pipe = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) |
|||
result = ""; |
|||
for line in iter(pipe.stdout.readline, ""): |
|||
if not line and pipe.poll() is not None: |
|||
break |
|||
output = line.decode(encoding='UTF-8').rstrip() |
|||
if output != "": |
|||
if not quiet: |
|||
print("\t * " + output) |
|||
result = output |
|||
return result |
|||
|
|||
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0): |
|||
if a == b: |
|||
return True |
|||
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) |
|||
|
|||
if __name__ == "__main__": |
|||
parser = argparse.ArgumentParser(description='Benchmarking DFTs via Storm') |
|||
parser.add_argument('--debuglevel', type=int, default=0, help='the debug level (0=silent, 1=print benchmarks, 2=print output from storm') |
|||
args = parser.parse_args() |
|||
count = 0 |
|||
correct = 0 |
|||
properties = ['--expectedtime', '--probability'] |
|||
start = time.time() |
|||
for index, prop in enumerate(properties): |
|||
for (benchmark, parametric, result_original) in benchmarks: |
|||
expected_result = result_original[index] |
|||
# Run benchmark and check result |
|||
count += 1; |
|||
if args.debuglevel > 0: |
|||
print("Running '{}' with property '{}'".format(benchmark, prop)) |
|||
result = run_storm_dft(benchmark, prop, parametric, args.debuglevel<2) |
|||
if result is None: |
|||
print("Error occurred on example '{}' with property '{}'".format(benchmark, prop)) |
|||
continue |
|||
|
|||
if not parametric: |
|||
# Float |
|||
result = float(result) |
|||
if not isclose(result, float(expected_result), rel_tol=1e-05): |
|||
print("Wrong result on example '{}' with property '{}': result: {}, Expected: {}".format(benchmark, prop, result, expected_result)) |
|||
else: |
|||
correct += 1 |
|||
else: |
|||
# Parametric |
|||
if result != expected_result: |
|||
print("Wrong result on example '{}' with property '{}': result: {}, Expected: {}".format(benchmark, prop, result, expected_result)) |
|||
else: |
|||
correct += 1 |
|||
end = time.time() |
|||
print("Correct results for {} of {} DFT checks in {}s".format(correct, count, end-start)) |
Write
Preview
Loading…
Cancel
Save
Reference in new issue