The source code and dockerfile for the GSW2024 AI Lab.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
This repo is archived. You can view files and clone it, but cannot push or open issues/pull-requests.

167 lines
5.2 KiB

3 months ago
  1. from __future__ import annotations
  2. from operator import add
  3. from gymnasium.spaces import Discrete
  4. from minigrid.core.grid import Grid
  5. from minigrid.core.mission import MissionSpace
  6. from minigrid.core.world_object import Ball, Goal
  7. from minigrid.minigrid_env import MiniGridEnv
  8. class DynamicObstaclesEnv(MiniGridEnv):
  9. """
  10. ## Description
  11. This environment is an empty room with moving obstacles.
  12. The goal of the agent is to reach the green goal square without colliding
  13. with any obstacle. A large penalty is subtracted if the agent collides with
  14. an obstacle and the episode finishes. This environment is useful to test
  15. Dynamic Obstacle Avoidance for mobile robots with Reinforcement Learning in
  16. Partial Observability.
  17. ## Mission Space
  18. "get to the green goal square"
  19. ## Action Space
  20. | Num | Name | Action |
  21. |-----|--------------|--------------|
  22. | 0 | left | Turn left |
  23. | 1 | right | Turn right |
  24. | 2 | forward | Move forward |
  25. | 3 | pickup | Unused |
  26. | 4 | drop | Unused |
  27. | 5 | toggle | Unused |
  28. | 6 | done | Unused |
  29. ## Observation Encoding
  30. - Each tile is encoded as a 3 dimensional tuple:
  31. `(OBJECT_IDX, COLOR_IDX, STATE)`
  32. - `OBJECT_TO_IDX` and `COLOR_TO_IDX` mapping can be found in
  33. [minigrid/minigrid.py](minigrid/minigrid.py)
  34. - `STATE` refers to the door state with 0=open, 1=closed and 2=locked
  35. ## Rewards
  36. A reward of '1 - 0.9 * (step_count / max_steps)' is given for success, and '0' for failure. A '-1' penalty is
  37. subtracted if the agent collides with an obstacle.
  38. ## Termination
  39. The episode ends if any one of the following conditions is met:
  40. 1. The agent reaches the goal.
  41. 2. The agent collides with an obstacle.
  42. 3. Timeout (see `max_steps`).
  43. ## Registered Configurations
  44. - `MiniGrid-Dynamic-Obstacles-5x5-v0`
  45. - `MiniGrid-Dynamic-Obstacles-Random-5x5-v0`
  46. - `MiniGrid-Dynamic-Obstacles-6x6-v0`
  47. - `MiniGrid-Dynamic-Obstacles-Random-6x6-v0`
  48. - `MiniGrid-Dynamic-Obstacles-8x8-v0`
  49. - `MiniGrid-Dynamic-Obstacles-16x16-v0`
  50. """
  51. def __init__(
  52. self,
  53. size=8,
  54. agent_start_pos=(1, 1),
  55. agent_start_dir=0,
  56. n_obstacles=4,
  57. max_steps: int | None = None,
  58. **kwargs,
  59. ):
  60. self.agent_start_pos = agent_start_pos
  61. self.agent_start_dir = agent_start_dir
  62. # Reduce obstacles if there are too many
  63. if n_obstacles <= size / 2 + 1:
  64. self.n_obstacles = int(n_obstacles)
  65. else:
  66. self.n_obstacles = int(size / 2)
  67. mission_space = MissionSpace(mission_func=self._gen_mission)
  68. if max_steps is None:
  69. max_steps = 4 * size**2
  70. super().__init__(
  71. mission_space=mission_space,
  72. grid_size=size,
  73. # Set this to True for maximum speed
  74. see_through_walls=True,
  75. max_steps=max_steps,
  76. **kwargs,
  77. )
  78. # Allow only 3 actions permitted: left, right, forward
  79. self.action_space = Discrete(self.actions.forward + 1)
  80. self.reward_range = (-1, 1)
  81. @staticmethod
  82. def _gen_mission():
  83. return "get to the green goal square"
  84. def _gen_grid(self, width, height):
  85. # Create an empty grid
  86. self.grid = Grid(width, height)
  87. # Generate the surrounding walls
  88. self.grid.wall_rect(0, 0, width, height)
  89. # Place a goal square in the bottom-right corner
  90. self.grid.set(width - 2, height - 2, Goal())
  91. # Place the agent
  92. if self.agent_start_pos is not None:
  93. self.agent_pos = self.agent_start_pos
  94. self.agent_dir = self.agent_start_dir
  95. else:
  96. self.place_agent()
  97. # Place obstacles
  98. self.obstacles = []
  99. for i_obst in range(self.n_obstacles):
  100. self.obstacles.append(Ball())
  101. self.place_obj(self.obstacles[i_obst], max_tries=100)
  102. self.mission = "get to the green goal square"
  103. def step(self, action):
  104. # Invalid action
  105. if action >= self.action_space.n:
  106. action = 0
  107. # Check if there is an obstacle in front of the agent
  108. front_cell = self.grid.get(*self.front_pos)
  109. not_clear = front_cell and front_cell.type != "goal"
  110. # Update obstacle positions
  111. for i_obst in range(len(self.obstacles)):
  112. old_pos = self.obstacles[i_obst].cur_pos
  113. top = tuple(map(add, old_pos, (-1, -1)))
  114. try:
  115. self.place_obj(
  116. self.obstacles[i_obst], top=top, size=(3, 3), max_tries=100
  117. )
  118. self.grid.set(old_pos[0], old_pos[1], None)
  119. except Exception:
  120. pass
  121. # Update the agent's position/direction
  122. obs, reward, terminated, truncated, info = super().step(action)
  123. # If the agent tried to walk over an obstacle or wall
  124. if action == self.actions.forward and not_clear:
  125. reward = -1
  126. terminated = True
  127. return obs, reward, terminated, truncated, info
  128. return obs, reward, terminated, truncated, info