From 7e95325ee986fbeec30ff4e1120803b442a82924 Mon Sep 17 00:00:00 2001 From: Teddy Koker Date: Tue, 24 Nov 2020 17:59:31 -0500 Subject: [PATCH 1/4] Upgrade DQN to use .log --- pl_bolts/models/rl/dqn_model.py | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/pl_bolts/models/rl/dqn_model.py b/pl_bolts/models/rl/dqn_model.py index bbaffff922..7adab32204 100644 --- a/pl_bolts/models/rl/dqn_model.py +++ b/pl_bolts/models/rl/dqn_model.py @@ -288,28 +288,18 @@ def training_step(self, batch: Tuple[torch.Tensor, torch.Tensor], _) -> OrderedD if self.global_step % self.sync_rate == 0: self.target_net.load_state_dict(self.net.state_dict()) - log = { + self.log_dict({ "total_reward": self.total_rewards[-1], "avg_reward": self.avg_rewards, "train_loss": loss, "episodes": self.done_episodes, "episode_steps": self.total_episode_steps[-1] - } - status = { - "steps": self.global_step, - "avg_reward": self.avg_rewards, - "total_reward": self.total_rewards[-1], - "episodes": self.done_episodes, - "episode_steps": self.total_episode_steps[-1], - "epsilon": self.agent.epsilon, - } + }) return OrderedDict( { "loss": loss, "avg_reward": self.avg_rewards, - "log": log, - "progress_bar": status, } ) @@ -323,7 +313,7 @@ def test_epoch_end(self, outputs) -> Dict[str, torch.Tensor]: """Log the avg of the test results""" rewards = [x["test_reward"] for x in outputs] avg_reward = sum(rewards) / len(rewards) - tensorboard_logs = {"avg_test_reward": avg_reward} + self.log("avg_test_reward", avg_reward) return {"avg_test_reward": avg_reward, "log": tensorboard_logs} def configure_optimizers(self) -> List[Optimizer]: From 175b7cd664c16abfe5ac0c3db60289388d22b8f9 Mon Sep 17 00:00:00 2001 From: Teddy Koker Date: Tue, 24 Nov 2020 18:02:35 -0500 Subject: [PATCH 2/4] remove unused --- pl_bolts/models/rl/dqn_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pl_bolts/models/rl/dqn_model.py b/pl_bolts/models/rl/dqn_model.py index 7adab32204..2b90475343 100644 --- a/pl_bolts/models/rl/dqn_model.py +++ b/pl_bolts/models/rl/dqn_model.py @@ -313,8 +313,8 @@ def test_epoch_end(self, outputs) -> Dict[str, torch.Tensor]: """Log the avg of the test results""" rewards = [x["test_reward"] for x in outputs] avg_reward = sum(rewards) / len(rewards) - self.log("avg_test_reward", avg_reward) - return {"avg_test_reward": avg_reward, "log": tensorboard_logs} + self.log("avg_test_reward",avg_reward) + return {"avg_test_reward": avg_reward} def configure_optimizers(self) -> List[Optimizer]: """ Initialize Adam optimizer""" From b334c49c3d691591f273ce4d1162ce791adc1fdb Mon Sep 17 00:00:00 2001 From: Teddy Koker Date: Tue, 24 Nov 2020 18:03:39 -0500 Subject: [PATCH 3/4] pep8 --- pl_bolts/models/rl/dqn_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pl_bolts/models/rl/dqn_model.py b/pl_bolts/models/rl/dqn_model.py index 2b90475343..6f2460216e 100644 --- a/pl_bolts/models/rl/dqn_model.py +++ b/pl_bolts/models/rl/dqn_model.py @@ -313,7 +313,7 @@ def test_epoch_end(self, outputs) -> Dict[str, torch.Tensor]: """Log the avg of the test results""" rewards = [x["test_reward"] for x in outputs] avg_reward = sum(rewards) / len(rewards) - self.log("avg_test_reward",avg_reward) + self.log("avg_test_reward", avg_reward) return {"avg_test_reward": avg_reward} def configure_optimizers(self) -> List[Optimizer]: From 817453044f2c2065acec547199e782d47ec7a77e Mon Sep 17 00:00:00 2001 From: Teddy Koker Date: Tue, 24 Nov 2020 18:11:55 -0500 Subject: [PATCH 4/4] fixed other dqn --- pl_bolts/models/rl/double_dqn_model.py | 14 ++------------ pl_bolts/models/rl/per_dqn_model.py | 14 ++------------ 2 files changed, 4 insertions(+), 24 deletions(-) diff --git a/pl_bolts/models/rl/double_dqn_model.py b/pl_bolts/models/rl/double_dqn_model.py index 284c328f2d..150ea14dd9 100644 --- a/pl_bolts/models/rl/double_dqn_model.py +++ b/pl_bolts/models/rl/double_dqn_model.py @@ -65,27 +65,17 @@ def training_step(self, batch: Tuple[torch.Tensor, torch.Tensor], _) -> OrderedD if self.global_step % self.sync_rate == 0: self.target_net.load_state_dict(self.net.state_dict()) - log = { + self.log_dict({ "total_reward": self.total_rewards[-1], "avg_reward": self.avg_rewards, "train_loss": loss, # "episodes": self.total_episode_steps, - } - status = { - "steps": self.global_step, - "avg_reward": self.avg_rewards, - "total_reward": self.total_rewards[-1], - "episodes": self.done_episodes, - # "episode_steps": self.episode_steps, - "epsilon": self.agent.epsilon, - } + }) return OrderedDict( { "loss": loss, "avg_reward": self.avg_rewards, - "log": log, - "progress_bar": status, } ) diff --git a/pl_bolts/models/rl/per_dqn_model.py b/pl_bolts/models/rl/per_dqn_model.py index 69fe61bbe9..ec8636265e 100644 --- a/pl_bolts/models/rl/per_dqn_model.py +++ b/pl_bolts/models/rl/per_dqn_model.py @@ -130,27 +130,17 @@ def training_step(self, batch, _) -> OrderedDict: if self.global_step % self.sync_rate == 0: self.target_net.load_state_dict(self.net.state_dict()) - log = { + self.log_dict({ "total_reward": self.total_rewards[-1], "avg_reward": self.avg_rewards, "train_loss": loss, # "episodes": self.total_episode_steps, - } - status = { - "steps": self.global_step, - "avg_reward": self.avg_rewards, - "total_reward": self.total_rewards[-1], - "episodes": self.done_episodes, - # "episode_steps": self.episode_steps, - "epsilon": self.agent.epsilon, - } + }) return OrderedDict( { "loss": loss, "avg_reward": self.avg_rewards, - "log": log, - "progress_bar": status, } )