Browse Source

fixed typo, removed ws

refactoring
sp 12 months ago
parent
commit
cc39cca0ab
  1. 59
      examples/shields/rl/15_train_eval_tune.py

59
examples/shields/rl/15_train_eval_tune.py

@ -19,23 +19,23 @@ from shieldhandlers import MiniGridShieldHandler, create_shield_query
from torch.utils.tensorboard import SummaryWriter
from callbacks import MyCallbacks
def shielding_env_creater(config):
name = config.get("name", "MiniGrid-LavaCrossingS9N3-v0")
framestack = config.get("framestack", 4)
args = config.get("args", None)
args.grid_path = F"{args.expname}_{args.grid_path}_{config.worker_index}.txt"
args.prism_path = F"{args.expname}_{args.prism_path}_{config.worker_index}.prism"
shielding = config.get("shielding", False)
shield_creator = MiniGridShieldHandler(grid_file=args.grid_path,
args.prism_path = F"{args.expname}_{args.prism_path}_{config.worker_index}.prism"
shielding = config.get("shielding", False)
shield_creator = MiniGridShieldHandler(grid_file=args.grid_path,
grid_to_prism_path=args.grid_to_prism_binary_path,
prism_path=args.prism_path,
formula=args.formula,
shield_value=args.shield_value,
prism_config=args.prism_config,
shield_comparision=args.shield_comparision)
prob_forward = args.prob_forward
prob_direct = args.prob_direct
prob_next = args.prob_next
@ -47,8 +47,8 @@ def shielding_env_creater(config):
config.vector_index if hasattr(config, "vector_index") else 0,
framestack=framestack
)
return env
@ -57,10 +57,10 @@ def register_minigrid_shielding_env(args):
register_env(env_name, shielding_env_creater)
ModelCatalog.register_custom_model(
"shielding_model",
"shielding_model",
TorchActionMaskModel
)
def trial_name_creator(trial : Trial):
return "trial"
@ -78,7 +78,7 @@ def ppo(args):
"shielding": args.shielding is ShieldingConfig.Full or args.shielding is ShieldingConfig.Training,
},)
.framework("torch")
.callbacks(MyCallbacks, ShieldInfoCallback(logdir, [1,12])
.callbacks(MyCallbacks, ShieldInfoCallback(logdir, [1,12]))
.evaluation(evaluation_config={
"evaluation_interval": 1,
"evaluation_duration": 10,
@ -106,25 +106,24 @@ def ppo(args):
),
run_config=air.RunConfig(
stop = {"episode_reward_mean": 94,
"timesteps_total": args.steps,},
"timesteps_total": args.steps,},
checkpoint_config=air.CheckpointConfig(checkpoint_at_end=True,
num_to_keep=1,
num_to_keep=1,
checkpoint_score_attribute="episode_reward_mean",
),
storage_path=F"{logdir}",
name=test_name(args),
)
,
),
param_space=config,)
results = tuner.fit()
best_result = results.get_best_result()
import pprint
metrics_to_print = [
"episode_reward_mean",
"episode_reward_max",
@ -134,14 +133,14 @@ def ppo(args):
pprint.pprint({k: v for k, v in best_result.metrics.items() if k in metrics_to_print})
# algo = Algorithm.from_checkpoint(best_result.checkpoint)
# eval_log_dir = F"{logdir}-eval"
# writer = SummaryWriter(log_dir=eval_log_dir)
# csv_logger = CSVLogger(config=config, logdir=eval_log_dir)
# for i in range(args.evaluations):
# eval_result = algo.evaluate()
# print(pretty_print(eval_result))
@ -149,23 +148,23 @@ def ppo(args):
# # logger.on_result(eval_result)
# csv_logger.on_result(eval_result)
# evaluation = eval_result['evaluation']
# epsiode_reward_mean = evaluation['episode_reward_mean']
# episode_len_mean = evaluation['episode_len_mean']
# print(epsiode_reward_mean)
# writer.add_scalar("evaluation/episode_reward_mean", epsiode_reward_mean, i)
# writer.add_scalar("evaluation/episode_len_mean", episode_len_mean, i)
def main():
ray.init(num_cpus=3)
import argparse
args = parse_arguments(argparse)
ppo(args)
ray.shutdown()
if __name__ == '__main__':
main()
main()
Loading…
Cancel
Save