-
Notifications
You must be signed in to change notification settings - Fork 9
/
dqn_eval.py
61 lines (52 loc) · 1.79 KB
/
dqn_eval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import random
from typing import Callable
import gymnasium as gym
import numpy as np
import torch
def evaluate(
model_path: str,
make_env: Callable,
env_id: str,
eval_episode: int,
run_name: str,
Model: torch.nn.Module,
device: torch.device = torch.device("cpu"),
epsilon: float = 0.05,
capture_video: bool = True
):
envs = gym.vector.SyncVectorEnv([make_env(env_id, 0, 0, capture_video, run_name)])
model = Model(envs).to(device)
model.load_state_dict(torch.load(model_path, map_location=device))
model.eval()
obs, _ = envs.reset()
episodic_returns = []
while len(episodic_returns) < eval_episode:
if random.random() < epsilon:
actions = np.array([envs.single_action_space.sample() for _ in range(envs.num_envs)])
else:
q_values = model(torch.Tensor(obs).to(device))
actions = torch.argmax(q_values, dim=1).cpu().numpy()
next_obs, _, _, _, infos = envs.step(actions)
if "final_info" in infos:
for info in infos["final_info"]:
if "episode" not in info:
continue
print(f"eval_episode={len(episodic_returns)}, episodic_return={info['episode']['r']}")
episodic_returns += [info["episode"]["r"]]
obs = next_obs
return episodic_returns
if __name__ == "__main__":
from huggingface_hub import hf_hub_download
from dqn_atari import QNetwork, make_env
model_path = hf_hub_download(repo_id="cleanrl/CartPole-v1-dqn-seed1", filename="dqn.cleanrl_model")
# model_path = ".pth"
evaluate(
model_path,
make_env,
"CartPole-v1",
eval_episode=0,
run_name=f"eval",
Model=QNetwork,
device="cpu",
capture_video=False
)