You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

70 lines
2.1 KiB

  1. import gym
  2. from PIL import Image
  3. from copy import deepcopy
  4. import numpy as np
  5. from matplotlib import pyplot as plt
  6. import readchar
  7. env = gym.make("ALE/Skiing-v5", render_mode="human")
  8. observation, info = env.reset()
  9. y = 40
  10. standstillcounter = 0
  11. def update_y(y, ski_position):
  12. global standstillcounter
  13. if ski_position in [6,7, 8,9]:
  14. standstillcounter = 0
  15. y_update = 16
  16. elif ski_position in [4,5, 10,11]:
  17. standstillcounter = 0
  18. y_update = 12
  19. elif ski_position in [2,3, 12,13]:
  20. standstillcounter = 0
  21. y_update = 8
  22. elif ski_position in [1, 14] and standstillcounter >= 5:
  23. if standstillcounter >= 8:
  24. print("!!!!!!!!!! no more x updates!!!!!!!!!!!")
  25. y_update = 0
  26. elif ski_position in [1, 14]:
  27. y_update = 4
  28. if ski_position in [1, 14]:
  29. standstillcounter += 1
  30. return y_update
  31. def update_ski_position(ski_position, action):
  32. if action == 0:
  33. return ski_position
  34. elif action == 1:
  35. return min(ski_position+1, 14)
  36. elif action == 2:
  37. return max(ski_position-1, 1)
  38. approx_x_coordinate = 80
  39. ski_position = 8
  40. for _ in range(1000000):
  41. action = env.action_space.sample() # agent policy that uses the observation and info
  42. action = int(repr(readchar.readchar())[1])
  43. ski_position = update_ski_position(ski_position, action)
  44. y_update = update_y(y, ski_position)
  45. y += y_update if y_update else 0
  46. old_x = deepcopy(approx_x_coordinate)
  47. approx_x_coordinate = int(np.mean(np.where(observation[:,:,1] == 92)[1]))
  48. print(f"Action: {action},\tski position: {ski_position},\ty_update: {y_update},\ty: {y},\tx: {approx_x_coordinate},\tx_update:{approx_x_coordinate - old_x}")
  49. observation, reward, terminated, truncated, info = env.step(action)
  50. if terminated or truncated:
  51. observation, info = env.reset()
  52. observation, reward, terminated, truncated, info = env.step(0)
  53. observation, reward, terminated, truncated, info = env.step(0)
  54. observation, reward, terminated, truncated, info = env.step(0)
  55. observation, reward, terminated, truncated, info = env.step(0)
  56. env.close()