You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

433 lines
17 KiB

6 months ago
6 months ago
6 months ago
6 months ago
6 months ago
6 months ago
6 months ago
6 months ago
6 months ago
6 months ago
6 months ago
6 months ago
6 months ago
6 months ago
6 months ago
6 months ago
6 months ago
6 months ago
  1. import sys
  2. import operator
  3. from os import listdir, system
  4. import subprocess
  5. import re
  6. from collections import defaultdict
  7. from random import randrange
  8. from ale_py import ALEInterface, SDL_SUPPORT, Action
  9. from PIL import Image
  10. from matplotlib import pyplot as plt
  11. import cv2
  12. import pickle
  13. import queue
  14. from dataclasses import dataclass, field
  15. from sklearn.cluster import KMeans, DBSCAN
  16. from enum import Enum
  17. from copy import deepcopy
  18. import numpy as np
  19. import logging
  20. logger = logging.getLogger(__name__)
  21. #import readchar
  22. from sample_factory.algo.utils.tensor_dict import TensorDict
  23. from query_sample_factory_checkpoint import SampleFactoryNNQueryWrapper
  24. import time
  25. tempest_binary = "/home/spranger/projects/tempest-devel/ranking_release/bin/storm"
  26. rom_file = "/home/spranger/research/Skiing/env/lib/python3.10/site-packages/AutoROM/roms/skiing.bin"
  27. def tic():
  28. import time
  29. global startTime_for_tictoc
  30. startTime_for_tictoc = time.time()
  31. def toc():
  32. import time
  33. if 'startTime_for_tictoc' in globals():
  34. return time.time() - startTime_for_tictoc
  35. class Verdict(Enum):
  36. INCONCLUSIVE = 1
  37. GOOD = 2
  38. BAD = 3
  39. verdict_to_color_map = {Verdict.BAD: "200,0,0", Verdict.INCONCLUSIVE: "40,40,200", Verdict.GOOD: "00,200,100"}
  40. def convert(tuples):
  41. return dict(tuples)
  42. @dataclass(frozen=True)
  43. class State:
  44. x: int
  45. y: int
  46. ski_position: int
  47. #velocity: int
  48. def default_value():
  49. return {'action' : None, 'choiceValue' : None}
  50. @dataclass(frozen=True)
  51. class StateValue:
  52. ranking: float
  53. choices: dict = field(default_factory=default_value)
  54. def exec(command,verbose=True):
  55. if verbose: print(f"Executing {command}")
  56. system(f"echo {command} >> list_of_exec")
  57. return system(command)
  58. num_tests_per_cluster = 50
  59. factor_tests_per_cluster = 0.2
  60. num_ski_positions = 8
  61. def input_to_action(char):
  62. if char == "0":
  63. return Action.NOOP
  64. if char == "1":
  65. return Action.RIGHT
  66. if char == "2":
  67. return Action.LEFT
  68. if char == "3":
  69. return "reset"
  70. if char == "4":
  71. return "set_x"
  72. if char == "5":
  73. return "set_vel"
  74. if char in ["w", "a", "s", "d"]:
  75. return char
  76. def saveObservations(observations, verdict, testDir):
  77. testDir = f"images/testing_{experiment_id}/{verdict.name}_{testDir}_{len(observations)}"
  78. if len(observations) < 20:
  79. logger.warn(f"Potentially spurious test case for {testDir}")
  80. testDir = f"{testDir}_pot_spurious"
  81. exec(f"mkdir {testDir}", verbose=False)
  82. for i, obs in enumerate(observations):
  83. img = Image.fromarray(obs)
  84. img.save(f"{testDir}/{i:003}.png")
  85. ski_position_counter = {1: (Action.LEFT, 40), 2: (Action.LEFT, 35), 3: (Action.LEFT, 30), 4: (Action.LEFT, 10), 5: (Action.NOOP, 1), 6: (Action.RIGHT, 10), 7: (Action.RIGHT, 30), 8: (Action.RIGHT, 40) }
  86. #def run_single_test(ale, nn_wrapper, x,y,ski_position, velocity, duration=50):
  87. def run_single_test(ale, nn_wrapper, x,y,ski_position, duration=50):
  88. #print(f"Running Test from x: {x:04}, y: {y:04}, ski_position: {ski_position}", end="")
  89. #testDir = f"{x}_{y}_{ski_position}_{velocity}"
  90. testDir = f"{x}_{y}_{ski_position}"
  91. for i, r in enumerate(ramDICT[y]):
  92. ale.setRAM(i,r)
  93. ski_position_setting = ski_position_counter[ski_position]
  94. for i in range(0,ski_position_setting[1]):
  95. ale.act(ski_position_setting[0])
  96. ale.setRAM(14,0)
  97. ale.setRAM(25,x)
  98. ale.setRAM(14,180) # TODO
  99. all_obs = list()
  100. speed_list = list()
  101. resized_obs = cv2.resize(ale.getScreenGrayscale(), (84,84), interpolation=cv2.INTER_AREA)
  102. for i in range(0,4):
  103. all_obs.append(resized_obs)
  104. for i in range(0,duration-4):
  105. resized_obs = cv2.resize(ale.getScreenGrayscale(), (84,84), interpolation=cv2.INTER_AREA)
  106. all_obs.append(resized_obs)
  107. if i % 4 == 0:
  108. stack_tensor = TensorDict({"obs": np.array(all_obs[-4:])})
  109. action = nn_wrapper.query(stack_tensor)
  110. ale.act(input_to_action(str(action)))
  111. else:
  112. ale.act(input_to_action(str(action)))
  113. speed_list.append(ale.getRAM()[14])
  114. if len(speed_list) > 15 and sum(speed_list[-6:-1]) == 0:
  115. saveObservations(all_obs, Verdict.BAD, testDir)
  116. return Verdict.BAD
  117. saveObservations(all_obs, Verdict.GOOD, testDir)
  118. return Verdict.GOOD
  119. def computeStateRanking(mdp_file, iteration):
  120. logger.info("Computing state ranking")
  121. tic()
  122. try:
  123. command = f"{tempest_binary} --prism {mdp_file} --buildchoicelab --buildstateval --build-all-labels --prop 'Rmax=? [C <= 1000]'"
  124. result = subprocess.run(command, shell=True, check=True)
  125. print(result)
  126. except Exception as e:
  127. print(e)
  128. sys.exit(-1)
  129. exec(f"mv action_ranking action_ranking_{iteration:03}")
  130. logger.info(f"Computing state ranking - DONE: took {toc()} seconds")
  131. def fillStateRanking(file_name, match=""):
  132. logger.info(f"Parsing state ranking, {file_name}")
  133. tic()
  134. state_ranking = dict()
  135. try:
  136. with open(file_name, "r") as f:
  137. file_content = f.readlines()
  138. for line in file_content:
  139. if not "move=0" in line: continue
  140. ranking_value = float(re.search(r"Value:([+-]?(\d*\.\d+)|\d+)", line)[0].replace("Value:",""))
  141. if ranking_value <= 0.1:
  142. continue
  143. stateMapping = convert(re.findall(r"([a-zA-Z_]*[a-zA-Z])=(\d+)?", line))
  144. #print("stateMapping", stateMapping)
  145. choices = convert(re.findall(r"[a-zA-Z_]*(left|right|noop)[a-zA-Z_]*:(-?\d+\.?\d*)", line))
  146. choices = {key:float(value) for (key,value) in choices.items()}
  147. #print("choices", choices)
  148. #print("ranking_value", ranking_value)
  149. #state = State(int(stateMapping["x"]), int(stateMapping["y"]), int(stateMapping["ski_position"]), int(stateMapping["velocity"]))
  150. state = State(int(stateMapping["x"]), int(stateMapping["y"]), int(stateMapping["ski_position"]))
  151. value = StateValue(ranking_value, choices)
  152. state_ranking[state] = value
  153. logger.info(f"Parsing state ranking - DONE: took {toc()} seconds")
  154. return state_ranking
  155. except EnvironmentError:
  156. print("Ranking file not available. Exiting.")
  157. toc()
  158. sys.exit(1)
  159. except:
  160. toc()
  161. def createDisjunction(formulas):
  162. return " | ".join(formulas)
  163. def clusterFormula(cluster):
  164. formula = ""
  165. #states = [(s[0].x,s[0].y, s[0].ski_position, s[0].velocity) for s in cluster]
  166. states = [(s[0].x,s[0].y, s[0].ski_position) for s in cluster]
  167. skiPositionGroup = defaultdict(list)
  168. for item in states:
  169. skiPositionGroup[item[2]].append(item)
  170. first = True
  171. #todo add velocity here
  172. for skiPosition, group in skiPositionGroup.items():
  173. formula += f"ski_position={skiPosition} & ("
  174. yPosGroup = defaultdict(list)
  175. for item in group:
  176. yPosGroup[item[1]].append(item)
  177. for y, y_group in yPosGroup.items():
  178. if first:
  179. first = False
  180. else:
  181. formula += " | "
  182. sorted_y_group = sorted(y_group, key=lambda s: s[0])
  183. formula += f"( y={y} & ("
  184. current_x_min = sorted_y_group[0][0]
  185. current_x = sorted_y_group[0][0]
  186. x_ranges = list()
  187. for state in sorted_y_group[1:-1]:
  188. if state[0] - current_x == 1:
  189. current_x = state[0]
  190. else:
  191. x_ranges.append(f" ({current_x_min}<= x & x<={current_x})")
  192. current_x_min = state[0]
  193. current_x = state[0]
  194. x_ranges.append(f" ({current_x_min}<= x & x<={sorted_y_group[-1][0]})")
  195. formula += " | ".join(x_ranges)
  196. formula += ") )"
  197. formula += ")"
  198. return formula
  199. def createBalancedDisjunction(indices, name):
  200. #logger.info(f"Creating balanced disjunction for {len(indices)} ({indices}) formulas")
  201. if len(indices) == 0:
  202. return f"formula {name} = false;\n"
  203. else:
  204. while len(indices) > 1:
  205. indices_tmp = [f"({indices[i]} | {indices[i+1]})" for i in range(0,len(indices)//2)]
  206. if len(indices) % 2 == 1:
  207. indices_tmp.append(indices[-1])
  208. indices = indices_tmp
  209. disjunction = f"formula {name} = " + " ".join(indices) + ";\n"
  210. return disjunction
  211. def createUnsafeFormula(clusters):
  212. label = "label \"Unsafe\" = Unsafe;\n"
  213. formulas = ""
  214. indices = list()
  215. for i, cluster in enumerate(clusters):
  216. formulas += f"formula Unsafe_{i} = {clusterFormula(cluster)};\n"
  217. indices.append(f"Unsafe_{i}")
  218. return formulas + "\n" + createBalancedDisjunction(indices, "Unsafe") + label
  219. def createSafeFormula(clusters):
  220. label = "label \"Safe\" = Safe;\n"
  221. formulas = ""
  222. indices = list()
  223. for i, cluster in enumerate(clusters):
  224. formulas += f"formula Safe_{i} = {clusterFormula(cluster)};\n"
  225. indices.append(f"Safe_{i}")
  226. return formulas + "\n" + createBalancedDisjunction(indices, "Safe") + label
  227. def updatePrismFile(newFile, iteration, safeStates, unsafeStates):
  228. logger.info("Creating next prism file")
  229. tic()
  230. initFile = f"{newFile}_no_formulas.prism"
  231. newFile = f"{newFile}_{iteration:03}.prism"
  232. exec(f"cp {initFile} {newFile}", verbose=False)
  233. with open(newFile, "a") as prism:
  234. prism.write(createSafeFormula(safeStates))
  235. prism.write(createUnsafeFormula(unsafeStates))
  236. logger.info(f"Creating next prism file - DONE: took {toc()} seconds")
  237. ale = ALEInterface()
  238. #if SDL_SUPPORT:
  239. # ale.setBool("sound", True)
  240. # ale.setBool("display_screen", True)
  241. # Load the ROM file
  242. ale.loadROM(rom_file)
  243. with open('all_positions_v2.pickle', 'rb') as handle:
  244. ramDICT = pickle.load(handle)
  245. y_ram_setting = 60
  246. x = 70
  247. nn_wrapper = SampleFactoryNNQueryWrapper()
  248. experiment_id = int(time.time())
  249. init_mdp = "velocity_safety"
  250. exec(f"mkdir -p images/testing_{experiment_id}", verbose=False)
  251. markerSize = 1
  252. imagesDir = f"images/testing_{experiment_id}"
  253. def drawOntoSkiPosImage(states, color, target_prefix="cluster_", alpha_factor=1.0):
  254. markerList = {ski_position:list() for ski_position in range(1,num_ski_positions + 1)}
  255. for state in states:
  256. s = state[0]
  257. #marker = f"-fill 'rgba({color}, {alpha_factor * state[1].ranking})' -draw 'rectangle {s.x-markerSize},{s.y-markerSize} {s.x+markerSize},{s.y+markerSize} '"
  258. marker = f"-fill 'rgba({color}, {alpha_factor * state[1].ranking})' -draw 'point {s.x},{s.y} '"
  259. markerList[s.ski_position].append(marker)
  260. for pos, marker in markerList.items():
  261. command = f"convert {imagesDir}/{target_prefix}_{pos:02}_individual.png {' '.join(marker)} {imagesDir}/{target_prefix}_{pos:02}_individual.png"
  262. exec(command, verbose=False)
  263. def concatImages(prefix, iteration):
  264. exec(f"montage {imagesDir}/{prefix}_*_individual.png -geometry +0+0 -tile x1 {imagesDir}/{prefix}_{iteration}.png", verbose=False)
  265. exec(f"sxiv {imagesDir}/{prefix}_{iteration}.png&", verbose=False)
  266. def drawStatesOntoTiledImage(states, color, target, source="images/1_full_scaled_down.png", alpha_factor=1.0):
  267. """
  268. Useful to draw a set of states, e.g. a single cluster
  269. """
  270. markerList = {1: list(), 2:list(), 3:list(), 4:list(), 5:list(), 6:list(), 7:list(), 8:list()}
  271. logger.info(f"Drawing {len(states)} states onto {target}")
  272. tic()
  273. for state in states:
  274. s = state[0]
  275. marker = f"-fill 'rgba({color}, {alpha_factor * state[1].ranking})' -draw 'rectangle {s.x-markerSize},{s.y-markerSize} {s.x+markerSize},{s.y+markerSize} '"
  276. markerList[s.ski_position].append(marker)
  277. for pos, marker in markerList.items():
  278. command = f"convert {source} {' '.join(marker)} {imagesDir}/{target}_{pos:02}_individual.png"
  279. exec(command, verbose=False)
  280. exec(f"montage {imagesDir}/{target}_*_individual.png -geometry +0+0 -tile x1 {imagesDir}/{target}.png", verbose=False)
  281. logger.info(f"Drawing {len(states)} states onto {target} - Done: took {toc()} seconds")
  282. def drawClusters(clusterDict, target, iteration, alpha_factor=1.0):
  283. for ski_position in range(1, num_ski_positions + 1):
  284. source = "images/1_full_scaled_down.png"
  285. exec(f"cp {source} {imagesDir}/{target}_{ski_position:02}_individual.png", verbose=False)
  286. for _, clusterStates in clusterDict.items():
  287. color = f"{np.random.choice(range(256))}, {np.random.choice(range(256))}, {np.random.choice(range(256))}"
  288. drawOntoSkiPosImage(clusterStates, color, target, alpha_factor=alpha_factor)
  289. concatImages(target, iteration)
  290. def drawResult(clusterDict, target, iteration):
  291. for ski_position in range(1, num_ski_positions + 1):
  292. source = "images/1_full_scaled_down.png"
  293. exec(f"cp {source} {imagesDir}/{target}_{ski_position:02}_individual.png")
  294. for _, (clusterStates, result) in clusterDict.items():
  295. color = "100,100,100"
  296. if result == Verdict.GOOD:
  297. color = "0,200,0"
  298. elif result == Verdict.BAD:
  299. color = "200,0,0"
  300. drawOntoSkiPosImage(clusterStates, color, target, alpha_factor=0.7)
  301. concatImages(target, iteration)
  302. def _init_logger():
  303. logger = logging.getLogger('main')
  304. logger.setLevel(logging.INFO)
  305. handler = logging.StreamHandler(sys.stdout)
  306. formatter = logging.Formatter( '[%(levelname)s] %(module)s - %(message)s')
  307. handler.setFormatter(formatter)
  308. logger.addHandler(handler)
  309. def clusterImportantStates(ranking, iteration):
  310. logger.info(f"Starting to cluster {len(ranking)} states into clusters")
  311. tic()
  312. #states = [[s[0].x,s[0].y, s[0].ski_position * 10, s[0].velocity * 10, s[1].ranking] for s in ranking]
  313. states = [[s[0].x,s[0].y, s[0].ski_position * 30, s[1].ranking] for s in ranking]
  314. #kmeans = KMeans(n_clusters, random_state=0, n_init="auto").fit(states)
  315. dbscan = DBSCAN(eps=15).fit(states)
  316. labels = dbscan.labels_
  317. n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
  318. logger.info(f"Starting to cluster {len(ranking)} states into clusters - DONE: took {toc()} seconds with {n_clusters} cluster")
  319. clusterDict = {i : list() for i in range(0,n_clusters)}
  320. for i, state in enumerate(ranking):
  321. if labels[i] == -1: continue
  322. clusterDict[labels[i]].append(state)
  323. drawClusters(clusterDict, f"clusters", iteration)
  324. return clusterDict
  325. if __name__ == '__main__':
  326. _init_logger()
  327. logger = logging.getLogger('main')
  328. logger.info("Starting")
  329. n_clusters = 40
  330. testAll = False
  331. safeStates = list()
  332. unsafeStates = list()
  333. iteration = 0
  334. while True:
  335. updatePrismFile(init_mdp, iteration, safeStates, unsafeStates)
  336. computeStateRanking(f"{init_mdp}_{iteration:03}.prism", iteration)
  337. ranking = fillStateRanking(f"action_ranking_{iteration:03}")
  338. sorted_ranking = sorted( (x for x in ranking.items() if x[1].ranking > 0.1), key=lambda x: x[1].ranking)
  339. clusters = clusterImportantStates(sorted_ranking, iteration)
  340. if testAll: failingPerCluster = {i: list() for i in range(0, n_clusters)}
  341. clusterResult = dict()
  342. for id, cluster in clusters.items():
  343. num_tests = int(factor_tests_per_cluster * len(cluster))
  344. num_tests = 1
  345. logger.info(f"Testing {num_tests} states (from {len(cluster)} states) from cluster {id}")
  346. randomStates = np.random.choice(len(cluster), num_tests, replace=False)
  347. randomStates = [cluster[i] for i in randomStates]
  348. verdictGood = True
  349. for state in randomStates:
  350. x = state[0].x
  351. y = state[0].y
  352. ski_pos = state[0].ski_position
  353. #velocity = state[0].velocity
  354. result = run_single_test(ale,nn_wrapper,x,y,ski_pos, duration=50)
  355. #result = run_single_test(ale,nn_wrapper,x,y,ski_pos, velocity, duration=50)
  356. result = Verdict.BAD # TODO REMOVE ME!!!!!!!!!!!!!!
  357. if result == Verdict.BAD:
  358. if testAll:
  359. failingPerCluster[id].append(state)
  360. else:
  361. clusterResult[id] = (cluster, Verdict.BAD)
  362. verdictGood = False
  363. unsafeStates.append(cluster)
  364. break
  365. if verdictGood:
  366. clusterResult[id] = (cluster, Verdict.GOOD)
  367. safeStates.append(cluster)
  368. logger.info(f"Iteration: {iteration:03} -\tSafe Results : {sum([len(c) for c in safeStates])} -\tUnsafe Results:{sum([len(c) for c in unsafeStates])}")
  369. if testAll: drawClusters(failingPerCluster, f"failing", iteration)
  370. drawResult(clusterResult, "result", iteration)
  371. iteration += 1