8 changed files with 272 additions and 110 deletions
			
			
		- 
					48examples/shields/rl/11_minigridrl.py
 - 
					9examples/shields/rl/13_minigridsb.py
 - 
					42examples/shields/rl/14_train_eval.py
 - 
					118examples/shields/rl/15_train_eval_tune.py
 - 
					19examples/shields/rl/ShieldHandlers.py
 - 
					67examples/shields/rl/Wrappers.py
 - 
					61examples/shields/rl/callbacks.py
 - 
					6examples/shields/rl/helpers.py
 
@ -0,0 +1,118 @@ | 
				
			|||
 | 
				
			|||
import gymnasium as gym | 
				
			|||
 | 
				
			|||
import minigrid | 
				
			|||
# import numpy as np | 
				
			|||
 | 
				
			|||
# import ray | 
				
			|||
from ray.tune import register_env | 
				
			|||
from ray import tune, air | 
				
			|||
from ray.rllib.algorithms.ppo import PPOConfig | 
				
			|||
from ray.rllib.algorithms.dqn.dqn import DQNConfig | 
				
			|||
# from ray.rllib.algorithms.callbacks import DefaultCallbacks | 
				
			|||
from ray.tune.logger import pretty_print, TBXLogger, TBXLoggerCallback, DEFAULT_LOGGERS, UnifiedLogger | 
				
			|||
from ray.rllib.models import ModelCatalog | 
				
			|||
 | 
				
			|||
 | 
				
			|||
from TorchActionMaskModel import TorchActionMaskModel | 
				
			|||
from Wrappers import OneHotShieldingWrapper, MiniGridShieldingWrapper | 
				
			|||
from helpers import parse_arguments, create_log_dir, ShieldingConfig | 
				
			|||
from ShieldHandlers import MiniGridShieldHandler, create_shield_query | 
				
			|||
 | 
				
			|||
from callbacks import MyCallbacks | 
				
			|||
 | 
				
			|||
import matplotlib.pyplot as plt | 
				
			|||
from torch.utils.tensorboard import SummaryWriter | 
				
			|||
 | 
				
			|||
 | 
				
			|||
   | 
				
			|||
 | 
				
			|||
def shielding_env_creater(config): | 
				
			|||
    name = config.get("name", "MiniGrid-LavaCrossingS9N1-v0") | 
				
			|||
    framestack = config.get("framestack", 4) | 
				
			|||
    args = config.get("args", None) | 
				
			|||
    args.grid_path = F"{args.grid_path}_{config.worker_index}.txt" | 
				
			|||
    args.prism_path = F"{args.prism_path}_{config.worker_index}.prism" | 
				
			|||
     | 
				
			|||
    shielding = config.get("shielding", False) | 
				
			|||
     | 
				
			|||
    # if shielding: | 
				
			|||
    #     assert(False) | 
				
			|||
     | 
				
			|||
    shield_creator = MiniGridShieldHandler(args.grid_path, args.grid_to_prism_binary_path, args.prism_path, args.formula) | 
				
			|||
     | 
				
			|||
    env = gym.make(name) | 
				
			|||
    env = MiniGridShieldingWrapper(env, shield_creator=shield_creator, shield_query_creator=create_shield_query ,mask_actions=shielding) | 
				
			|||
 | 
				
			|||
    env = OneHotShieldingWrapper(env, | 
				
			|||
                        config.vector_index if hasattr(config, "vector_index") else 0, | 
				
			|||
                        framestack=framestack | 
				
			|||
                        ) | 
				
			|||
     | 
				
			|||
     | 
				
			|||
    return env | 
				
			|||
 | 
				
			|||
 | 
				
			|||
def register_minigrid_shielding_env(args): | 
				
			|||
    env_name = "mini-grid-shielding" | 
				
			|||
    register_env(env_name, shielding_env_creater) | 
				
			|||
 | 
				
			|||
    ModelCatalog.register_custom_model( | 
				
			|||
        "shielding_model",  | 
				
			|||
        TorchActionMaskModel | 
				
			|||
    ) | 
				
			|||
 | 
				
			|||
 | 
				
			|||
def ppo(args): | 
				
			|||
    register_minigrid_shielding_env(args) | 
				
			|||
     | 
				
			|||
    config = (PPOConfig() | 
				
			|||
        .rollouts(num_rollout_workers=args.workers) | 
				
			|||
        .resources(num_gpus=0) | 
				
			|||
        .environment( env="mini-grid-shielding", | 
				
			|||
                      env_config={"name": args.env, "args": args, "shielding": args.shielding is ShieldingConfig.Full or args.shielding is ShieldingConfig.Training}) | 
				
			|||
        .framework("torch") | 
				
			|||
        .callbacks(MyCallbacks) | 
				
			|||
        .evaluation(evaluation_config={  | 
				
			|||
                                       "evaluation_interval": 1, | 
				
			|||
                                        "evaluation_duration": 10, | 
				
			|||
                                        "evaluation_num_workers":1, | 
				
			|||
                                        "env": "mini-grid-shielding",  | 
				
			|||
                                        "env_config": {"name": args.env, "args": args, "shielding": args.shielding is ShieldingConfig.Full or args.shielding is ShieldingConfig.Evaluation}})         | 
				
			|||
        .rl_module(_enable_rl_module_api = False) | 
				
			|||
        .debugging(logger_config={ | 
				
			|||
            "type": UnifiedLogger,  | 
				
			|||
            "logdir": create_log_dir(args) | 
				
			|||
        }) | 
				
			|||
        .training(_enable_learner_api=False ,model={ | 
				
			|||
            "custom_model": "shielding_model"       | 
				
			|||
        })) | 
				
			|||
     | 
				
			|||
    tuner = tune.Tuner("PPO", | 
				
			|||
                        run_config=air.RunConfig( | 
				
			|||
                                stop = {"episode_reward_mean": 50},  | 
				
			|||
                                checkpoint_config=air.CheckpointConfig(checkpoint_at_end=True), | 
				
			|||
                                storage_path=F"{create_log_dir(args)}-tuner" | 
				
			|||
    ), | 
				
			|||
    param_space=config,) | 
				
			|||
     | 
				
			|||
    tuner.fit() | 
				
			|||
     | 
				
			|||
    iterations = args.iterations | 
				
			|||
    print(config.to_dict()) | 
				
			|||
    tune.run("PPO", config=config) | 
				
			|||
     | 
				
			|||
        # print(epsiode_reward_mean) | 
				
			|||
        # writer.add_scalar("evaluation/episode_reward", epsiode_reward_mean, i) | 
				
			|||
 | 
				
			|||
     | 
				
			|||
def main(): | 
				
			|||
    import argparse | 
				
			|||
    args = parse_arguments(argparse) | 
				
			|||
 | 
				
			|||
    ppo(args) | 
				
			|||
    | 
				
			|||
 | 
				
			|||
 | 
				
			|||
if __name__ == '__main__': | 
				
			|||
    main() | 
				
			|||
@ -0,0 +1,61 @@ | 
				
			|||
 | 
				
			|||
from typing import Dict | 
				
			|||
 | 
				
			|||
from ray.rllib.policy import Policy | 
				
			|||
from ray.rllib.utils.typing import PolicyID | 
				
			|||
 | 
				
			|||
from ray.rllib.algorithms.algorithm import Algorithm | 
				
			|||
from ray.rllib.env.base_env import BaseEnv | 
				
			|||
from ray.rllib.evaluation import RolloutWorker | 
				
			|||
from ray.rllib.evaluation.episode import Episode | 
				
			|||
from ray.rllib.evaluation.episode_v2 import EpisodeV2 | 
				
			|||
 | 
				
			|||
from ray.rllib.algorithms.callbacks import DefaultCallbacks, make_multi_callbacks | 
				
			|||
 | 
				
			|||
class MyCallbacks(DefaultCallbacks): | 
				
			|||
    def on_episode_start(self, *, worker: RolloutWorker, base_env: BaseEnv, policies: Dict[PolicyID, Policy], episode: Episode | EpisodeV2, env_index: int | None = None, **kwargs) -> None: | 
				
			|||
        # print(F"Epsiode started Environment: {base_env.get_sub_environments()}") | 
				
			|||
        env = base_env.get_sub_environments()[0] | 
				
			|||
        episode.user_data["count"] = 0 | 
				
			|||
        episode.user_data["ran_into_lava"] = [] | 
				
			|||
        episode.user_data["goals_reached"] = [] | 
				
			|||
        episode.hist_data["ran_into_lava"] = [] | 
				
			|||
        episode.hist_data["goals_reached"] = [] | 
				
			|||
        # print("On episode start print") | 
				
			|||
        # print(env.printGrid()) | 
				
			|||
        # print(worker) | 
				
			|||
        # print(env.action_space.n) | 
				
			|||
        # print(env.actions) | 
				
			|||
        # print(env.mission) | 
				
			|||
        # print(env.observation_space) | 
				
			|||
        # img = env.get_frame() | 
				
			|||
        # plt.imshow(img) | 
				
			|||
        # plt.show() | 
				
			|||
     | 
				
			|||
        | 
				
			|||
    def on_episode_step(self, *, worker: RolloutWorker, base_env: BaseEnv, policies: Dict[PolicyID, Policy] | None = None, episode: Episode | EpisodeV2, env_index: int | None = None, **kwargs) -> None: | 
				
			|||
         episode.user_data["count"] = episode.user_data["count"] + 1 | 
				
			|||
         env = base_env.get_sub_environments()[0] | 
				
			|||
        #  print(env.printGrid()) | 
				
			|||
     | 
				
			|||
    def on_episode_end(self, *, worker: RolloutWorker, base_env: BaseEnv, policies: Dict[PolicyID, Policy], episode: Episode | EpisodeV2 | Exception, env_index: int | None = None, **kwargs) -> None: | 
				
			|||
        # print(F"Epsiode end Environment: {base_env.get_sub_environments()}") | 
				
			|||
        env = base_env.get_sub_environments()[0] | 
				
			|||
        agent_tile = env.grid.get(env.agent_pos[0], env.agent_pos[1]) | 
				
			|||
     | 
				
			|||
        episode.user_data["goals_reached"].append(agent_tile is not None and agent_tile.type == "goal") | 
				
			|||
        episode.user_data["ran_into_lava"].append(agent_tile is not None and agent_tile.type == "lava") | 
				
			|||
        episode.custom_metrics["reached_goal"] = agent_tile is not None and agent_tile.type == "goal" | 
				
			|||
        episode.custom_metrics["ran_into_lava"] =  agent_tile is not None and agent_tile.type == "lava" | 
				
			|||
        #print("On episode end print") | 
				
			|||
        #print(env.printGrid()) | 
				
			|||
        episode.hist_data["goals_reached"] = episode.user_data["goals_reached"] | 
				
			|||
        episode.hist_data["ran_into_lava"] = episode.user_data["ran_into_lava"] | 
				
			|||
     | 
				
			|||
         | 
				
			|||
    def on_evaluate_start(self, *, algorithm: Algorithm, **kwargs) -> None: | 
				
			|||
        print("Evaluate Start") | 
				
			|||
         | 
				
			|||
    def on_evaluate_end(self, *, algorithm: Algorithm, evaluation_metrics: dict, **kwargs) -> None: | 
				
			|||
        print("Evaluate End") | 
				
			|||
         | 
				
			|||
						Write
						Preview
					
					
					Loading…
					
					Cancel
						Save
					
		Reference in new issue