{"cells": [{"cell_type": "markdown", "id": "636e51e3", "metadata": {"papermill": {"duration": 0.003219, "end_time": "2025-04-03T20:56:52.409834", "exception": false, "start_time": "2025-04-03T20:56:52.406615", "status": "completed"}, "tags": []}, "source": ["\n", "# How to train a Deep Q Network\n", "\n", "* **Author:** Lightning.ai\n", "* **License:** CC BY-SA\n", "* **Generated:** 2025-04-03T20:56:45.942071\n", "\n", "Main takeaways:\n", "\n", "1. RL has the same flow as previous models we have seen, with a few additions\n", "2. Handle unsupervised learning by using an IterableDataset where the dataset itself is constantly updated during training\n", "3. Each training step carries has the agent taking an action in the environment and storing the experience in the IterableDataset\n", "\n", "\n", "---\n", "Open in [{height=\"20px\" width=\"117px\"}](https://colab.research.google.com/github/PytorchLightning/lightning-tutorials/blob/publication/.notebooks/lightning_examples/reinforce-learning-DQN.ipynb)\n", "\n", "Give us a \u2b50 [on Github](https://www.github.com/Lightning-AI/lightning/)\n", "| Check out [the documentation](https://lightning.ai/docs/)\n", "| Join us [on Discord](https://discord.com/invite/tfXFetEZxv)"]}, {"cell_type": "markdown", "id": "dbd8f0c4", "metadata": {"papermill": {"duration": 0.002193, "end_time": "2025-04-03T20:56:52.414457", "exception": false, "start_time": "2025-04-03T20:56:52.412264", "status": "completed"}, "tags": []}, "source": ["## Setup\n", "This notebook requires some packages besides pytorch-lightning."]}, {"cell_type": "code", "execution_count": 1, "id": "524907c8", "metadata": {"colab": {}, "colab_type": "code", "execution": {"iopub.execute_input": "2025-04-03T20:56:52.421115Z", "iopub.status.busy": "2025-04-03T20:56:52.420090Z", "iopub.status.idle": "2025-04-03T20:56:53.613832Z", "shell.execute_reply": "2025-04-03T20:56:53.612362Z"}, "id": "LfrJLKPFyhsK", "lines_to_next_cell": 0, "papermill": {"duration": 1.199272, "end_time": "2025-04-03T20:56:53.616049", "exception": false, "start_time": "2025-04-03T20:56:52.416777", "status": "completed"}, "tags": []}, "outputs": [{"name": "stdout", "output_type": "stream", "text": ["\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager, possibly rendering your system unusable.It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv. Use the --root-user-action option if you know what you are doing and want to suppress this warning.\u001b[0m\u001b[33m\r\n", "\u001b[0m"]}, {"name": "stdout", "output_type": "stream", "text": ["\r\n", "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m24.2\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m25.0.1\u001b[0m\r\n", "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpython -m pip install --upgrade pip\u001b[0m\r\n"]}], "source": ["! pip install --quiet \"matplotlib\" \"numpy <2.0\" \"numpy <3.0\" \"pandas\" \"gym <0.24\" \"pygame\" \"torch>=1.8.1, <2.7\" \"torch ==2.1.*\" \"pytorch-lightning >=2.0,<2.6\" \"torchvision ==0.16.*\" \"seaborn\" \"torchmetrics>=1.0, <1.8\""]}, {"cell_type": "code", "execution_count": 2, "id": "5cb2e377", "metadata": {"execution": {"iopub.execute_input": "2025-04-03T20:56:53.623508Z", "iopub.status.busy": "2025-04-03T20:56:53.622995Z", "iopub.status.idle": "2025-04-03T20:56:56.688851Z", "shell.execute_reply": "2025-04-03T20:56:56.687594Z"}, "papermill": {"duration": 3.072217, "end_time": "2025-04-03T20:56:56.691347", "exception": false, "start_time": "2025-04-03T20:56:53.619130", "status": "completed"}, "tags": []}, "outputs": [{"name": "stderr", "output_type": "stream", "text": ["/tmp/ipykernel_691/94481177.py:10: DeprecationWarning: Importing display from IPython.core.display is deprecated since IPython 7.14, please import from IPython display\n", " from IPython.core.display import display\n"]}], "source": ["import os\n", "from collections import OrderedDict, deque, namedtuple\n", "from typing import Iterator, List, Tuple\n", "\n", "import gym\n", "import numpy as np\n", "import pandas as pd\n", "import seaborn as sn\n", "import torch\n", "from IPython.core.display import display\n", "from pytorch_lightning import LightningModule, Trainer\n", "from pytorch_lightning.loggers import CSVLogger\n", "from torch import Tensor, nn\n", "from torch.optim import Adam, Optimizer\n", "from torch.utils.data import DataLoader\n", "from torch.utils.data.dataset import IterableDataset\n", "\n", "PATH_DATASETS = os.environ.get(\"PATH_DATASETS\", \".\")"]}, {"cell_type": "code", "execution_count": 3, "id": "cfca6d08", "metadata": {"execution": {"iopub.execute_input": "2025-04-03T20:56:56.703664Z", "iopub.status.busy": "2025-04-03T20:56:56.703320Z", "iopub.status.idle": "2025-04-03T20:56:56.708746Z", "shell.execute_reply": "2025-04-03T20:56:56.707918Z"}, "papermill": {"duration": 0.013678, "end_time": "2025-04-03T20:56:56.710685", "exception": false, "start_time": "2025-04-03T20:56:56.697007", "status": "completed"}, "tags": []}, "outputs": [], "source": ["class DQN(nn.Module):\n", " def __init__(self, obs_size: int, n_actions: int, hidden_size: int = 128):\n", " \"\"\"Simple MLP network.\n", "\n", " Args:\n", " obs_size: observation/state size of the environment\n", " n_actions: number of discrete actions available in the environment\n", " hidden_size: size of hidden layers\n", "\n", " \"\"\"\n", " super().__init__()\n", " self.net = nn.Sequential(\n", " nn.Linear(obs_size, hidden_size),\n", " nn.ReLU(),\n", " nn.Linear(hidden_size, n_actions),\n", " )\n", "\n", " def forward(self, x):\n", " return self.net(x.float())"]}, {"cell_type": "markdown", "id": "f090e220", "metadata": {"papermill": {"duration": 0.005196, "end_time": "2025-04-03T20:56:56.721152", "exception": false, "start_time": "2025-04-03T20:56:56.715956", "status": "completed"}, "tags": []}, "source": ["### Memory"]}, {"cell_type": "code", "execution_count": 4, "id": "be2418d3", "metadata": {"execution": {"iopub.execute_input": "2025-04-03T20:56:56.732885Z", "iopub.status.busy": "2025-04-03T20:56:56.732707Z", "iopub.status.idle": "2025-04-03T20:56:56.736672Z", "shell.execute_reply": "2025-04-03T20:56:56.735812Z"}, "papermill": {"duration": 0.012112, "end_time": "2025-04-03T20:56:56.738605", "exception": false, "start_time": "2025-04-03T20:56:56.726493", "status": "completed"}, "tags": []}, "outputs": [], "source": ["\n", "# Named tuple for storing experience steps gathered in training\n", "Experience = namedtuple(\n", " \"Experience\",\n", " field_names=[\"state\", \"action\", \"reward\", \"done\", \"new_state\"],\n", ")"]}, {"cell_type": "code", "execution_count": 5, "id": "3014214f", "metadata": {"execution": {"iopub.execute_input": "2025-04-03T20:56:56.750285Z", "iopub.status.busy": "2025-04-03T20:56:56.750100Z", "iopub.status.idle": "2025-04-03T20:56:56.755893Z", "shell.execute_reply": "2025-04-03T20:56:56.755080Z"}, "papermill": {"duration": 0.013985, "end_time": "2025-04-03T20:56:56.757962", "exception": false, "start_time": "2025-04-03T20:56:56.743977", "status": "completed"}, "tags": []}, "outputs": [], "source": ["class ReplayBuffer:\n", " \"\"\"Replay Buffer for storing past experiences allowing the agent to learn from them.\n", "\n", " Args:\n", " capacity: size of the buffer\n", "\n", " \"\"\"\n", "\n", " def __init__(self, capacity: int) -> None:\n", " self.buffer = deque(maxlen=capacity)\n", "\n", " def __len__(self) -> None:\n", " return len(self.buffer)\n", "\n", " def append(self, experience: Experience) -> None:\n", " \"\"\"Add experience to the buffer.\n", "\n", " Args:\n", " experience: tuple (state, action, reward, done, new_state)\n", "\n", " \"\"\"\n", " self.buffer.append(experience)\n", "\n", " def sample(self, batch_size: int) -> Tuple:\n", " indices = np.random.choice(len(self.buffer), batch_size, replace=False)\n", " states, actions, rewards, dones, next_states = zip(*(self.buffer[idx] for idx in indices))\n", "\n", " return (\n", " np.array(states),\n", " np.array(actions),\n", " np.array(rewards, dtype=np.float32),\n", " np.array(dones, dtype=bool),\n", " np.array(next_states),\n", " )"]}, {"cell_type": "code", "execution_count": 6, "id": "ede3d32f", "metadata": {"execution": {"iopub.execute_input": "2025-04-03T20:56:56.769741Z", "iopub.status.busy": "2025-04-03T20:56:56.769570Z", "iopub.status.idle": "2025-04-03T20:56:56.774513Z", "shell.execute_reply": "2025-04-03T20:56:56.773659Z"}, "lines_to_next_cell": 2, "papermill": {"duration": 0.013058, "end_time": "2025-04-03T20:56:56.776402", "exception": false, "start_time": "2025-04-03T20:56:56.763344", "status": "completed"}, "tags": []}, "outputs": [], "source": ["class RLDataset(IterableDataset):\n", " \"\"\"Iterable Dataset containing the ExperienceBuffer which will be updated with new experiences during training.\n", "\n", " Args:\n", " buffer: replay buffer\n", " sample_size: number of experiences to sample at a time\n", "\n", " \"\"\"\n", "\n", " def __init__(self, buffer: ReplayBuffer, sample_size: int = 200) -> None:\n", " self.buffer = buffer\n", " self.sample_size = sample_size\n", "\n", " def __iter__(self) -> Iterator[Tuple]:\n", " states, actions, rewards, dones, new_states = self.buffer.sample(self.sample_size)\n", " for i in range(len(dones)):\n", " yield states[i], actions[i], rewards[i], dones[i], new_states[i]"]}, {"cell_type": "markdown", "id": "1cf9079f", "metadata": {"lines_to_next_cell": 2, "papermill": {"duration": 0.005379, "end_time": "2025-04-03T20:56:56.788563", "exception": false, "start_time": "2025-04-03T20:56:56.783184", "status": "completed"}, "tags": []}, "source": ["### Agent"]}, {"cell_type": "code", "execution_count": 7, "id": "18e9ceb8", "metadata": {"execution": {"iopub.execute_input": "2025-04-03T20:56:56.798798Z", "iopub.status.busy": "2025-04-03T20:56:56.798619Z", "iopub.status.idle": "2025-04-03T20:56:56.807393Z", "shell.execute_reply": "2025-04-03T20:56:56.806464Z"}, "lines_to_next_cell": 2, "papermill": {"duration": 0.014878, "end_time": "2025-04-03T20:56:56.808877", "exception": false, "start_time": "2025-04-03T20:56:56.793999", "status": "completed"}, "tags": []}, "outputs": [], "source": ["class Agent:\n", " def __init__(self, env: gym.Env, replay_buffer: ReplayBuffer) -> None:\n", " \"\"\"Base Agent class handling the interaction with the environment.\n", "\n", " Args:\n", " env: training environment\n", " replay_buffer: replay buffer storing experiences\n", "\n", " \"\"\"\n", " self.env = env\n", " self.replay_buffer = replay_buffer\n", " self.reset()\n", " self.state = self.env.reset()\n", "\n", " def reset(self) -> None:\n", " \"\"\"Resents the environment and updates the state.\"\"\"\n", " self.state = self.env.reset()\n", "\n", " def get_action(self, net: nn.Module, epsilon: float, device: str) -> int:\n", " \"\"\"Using the given network, decide what action to carry out using an epsilon-greedy policy.\n", "\n", " Args:\n", " net: DQN network\n", " epsilon: value to determine likelihood of taking a random action\n", " device: current device\n", "\n", " Returns:\n", " action\n", "\n", " \"\"\"\n", " if np.random.random() < epsilon:\n", " action = self.env.action_space.sample()\n", " else:\n", " state = torch.tensor([self.state])\n", "\n", " if device not in [\"cpu\"]:\n", " state = state.cuda(device)\n", "\n", " q_values = net(state)\n", " _, action = torch.max(q_values, dim=1)\n", " action = int(action.item())\n", "\n", " return action\n", "\n", " @torch.no_grad()\n", " def play_step(\n", " self,\n", " net: nn.Module,\n", " epsilon: float = 0.0,\n", " device: str = \"cpu\",\n", " ) -> Tuple[float, bool]:\n", " \"\"\"Carries out a single interaction step between the agent and the environment.\n", "\n", " Args:\n", " net: DQN network\n", " epsilon: value to determine likelihood of taking a random action\n", " device: current device\n", "\n", " Returns:\n", " reward, done\n", "\n", " \"\"\"\n", " action = self.get_action(net, epsilon, device)\n", "\n", " # do step in the environment\n", " # So, in the deprecated version of gym, the env.step() has 4 values unpacked which is\n", " # obs, reward, done, info = env.step(action)\n", " # In the latest version of gym, the step() function returns back an additional variable which is truncated.\n", " # obs, reward, terminated, truncated, info = env.step(action)\n", " new_state, reward, done, _ = self.env.step(action)\n", "\n", " exp = Experience(self.state, action, reward, done, new_state)\n", "\n", " self.replay_buffer.append(exp)\n", "\n", " self.state = new_state\n", " if done:\n", " self.reset()\n", " return reward, done"]}, {"cell_type": "markdown", "id": "02cee7cd", "metadata": {"lines_to_next_cell": 2, "papermill": {"duration": 0.003465, "end_time": "2025-04-03T20:56:56.815922", "exception": false, "start_time": "2025-04-03T20:56:56.812457", "status": "completed"}, "tags": []}, "source": ["### DQN Lightning Module"]}, {"cell_type": "code", "execution_count": 8, "id": "d86c159b", "metadata": {"execution": {"iopub.execute_input": "2025-04-03T20:56:56.823740Z", "iopub.status.busy": "2025-04-03T20:56:56.823561Z", "iopub.status.idle": "2025-04-03T20:56:56.837412Z", "shell.execute_reply": "2025-04-03T20:56:56.836469Z"}, "papermill": {"duration": 0.019402, "end_time": "2025-04-03T20:56:56.838748", "exception": false, "start_time": "2025-04-03T20:56:56.819346", "status": "completed"}, "tags": []}, "outputs": [], "source": ["class DQNLightning(LightningModule):\n", " def __init__(\n", " self,\n", " batch_size: int = 16,\n", " lr: float = 1e-2,\n", " env: str = \"CartPole-v0\",\n", " gamma: float = 0.99,\n", " sync_rate: int = 10,\n", " replay_size: int = 1000,\n", " warm_start_size: int = 1000,\n", " eps_last_frame: int = 1000,\n", " eps_start: float = 1.0,\n", " eps_end: float = 0.01,\n", " episode_length: int = 200,\n", " warm_start_steps: int = 1000,\n", " ) -> None:\n", " \"\"\"Basic DQN Model.\n", "\n", " Args:\n", " batch_size: size of the batches\")\n", " lr: learning rate\n", " env: gym environment tag\n", " gamma: discount factor\n", " sync_rate: how many frames do we update the target network\n", " replay_size: capacity of the replay buffer\n", " warm_start_size: how many samples do we use to fill our buffer at the start of training\n", " eps_last_frame: what frame should epsilon stop decaying\n", " eps_start: starting value of epsilon\n", " eps_end: final value of epsilon\n", " episode_length: max length of an episode\n", " warm_start_steps: max episode reward in the environment\n", "\n", " \"\"\"\n", " super().__init__()\n", " self.save_hyperparameters()\n", "\n", " self.env = gym.make(self.hparams.env)\n", " obs_size = self.env.observation_space.shape[0]\n", " n_actions = self.env.action_space.n\n", "\n", " self.net = DQN(obs_size, n_actions)\n", " self.target_net = DQN(obs_size, n_actions)\n", "\n", " self.buffer = ReplayBuffer(self.hparams.replay_size)\n", " self.agent = Agent(self.env, self.buffer)\n", " self.total_reward = 0\n", " self.episode_reward = 0\n", " self.populate(self.hparams.warm_start_steps)\n", "\n", " def populate(self, steps: int = 1000) -> None:\n", " \"\"\"Carries out several random steps through the environment to initially fill up the replay buffer with\n", " experiences.\n", "\n", " Args:\n", " steps: number of random steps to populate the buffer with\n", "\n", " \"\"\"\n", " for _ in range(steps):\n", " self.agent.play_step(self.net, epsilon=1.0)\n", "\n", " def forward(self, x: Tensor) -> Tensor:\n", " \"\"\"Passes in a state x through the network and gets the q_values of each action as an output.\n", "\n", " Args:\n", " x: environment state\n", "\n", " Returns:\n", " q values\n", "\n", " \"\"\"\n", " output = self.net(x)\n", " return output\n", "\n", " def dqn_mse_loss(self, batch: Tuple[Tensor, Tensor]) -> Tensor:\n", " \"\"\"Calculates the mse loss using a mini batch from the replay buffer.\n", "\n", " Args:\n", " batch: current mini batch of replay data\n", "\n", " Returns:\n", " loss\n", "\n", " \"\"\"\n", " states, actions, rewards, dones, next_states = batch\n", "\n", " state_action_values = self.net(states).gather(1, actions.long().unsqueeze(-1)).squeeze(-1)\n", "\n", " with torch.no_grad():\n", " next_state_values = self.target_net(next_states).max(1)[0]\n", " next_state_values[dones] = 0.0\n", " next_state_values = next_state_values.detach()\n", "\n", " expected_state_action_values = next_state_values * self.hparams.gamma + rewards\n", "\n", " return nn.MSELoss()(state_action_values, expected_state_action_values)\n", "\n", " def get_epsilon(self, start: int, end: int, frames: int) -> float:\n", " if self.global_step > frames:\n", " return end\n", " return start - (self.global_step / frames) * (start - end)\n", "\n", " def training_step(self, batch: Tuple[Tensor, Tensor], nb_batch) -> OrderedDict:\n", " \"\"\"Carries out a single step through the environment to update the replay buffer. Then calculates loss based on\n", " the minibatch received.\n", "\n", " Args:\n", " batch: current mini batch of replay data\n", " nb_batch: batch number\n", "\n", " Returns:\n", " Training loss and log metrics\n", "\n", " \"\"\"\n", " device = self.get_device(batch)\n", " epsilon = self.get_epsilon(self.hparams.eps_start, self.hparams.eps_end, self.hparams.eps_last_frame)\n", " self.log(\"epsilon\", epsilon)\n", "\n", " # step through environment with agent\n", " reward, done = self.agent.play_step(self.net, epsilon, device)\n", " self.episode_reward += reward\n", " self.log(\"episode reward\", self.episode_reward)\n", "\n", " # calculates training loss\n", " loss = self.dqn_mse_loss(batch)\n", "\n", " if done:\n", " self.total_reward = self.episode_reward\n", " self.episode_reward = 0\n", "\n", " # Soft update of target network\n", " if self.global_step % self.hparams.sync_rate == 0:\n", " self.target_net.load_state_dict(self.net.state_dict())\n", "\n", " self.log_dict(\n", " {\n", " \"reward\": reward,\n", " \"train_loss\": loss,\n", " }\n", " )\n", " self.log(\"total_reward\", self.total_reward, prog_bar=True)\n", " self.log(\"steps\", self.global_step, logger=False, prog_bar=True)\n", "\n", " return loss\n", "\n", " def configure_optimizers(self) -> List[Optimizer]:\n", " \"\"\"Initialize Adam optimizer.\"\"\"\n", " optimizer = Adam(self.net.parameters(), lr=self.hparams.lr)\n", " return optimizer\n", "\n", " def __dataloader(self) -> DataLoader:\n", " \"\"\"Initialize the Replay Buffer dataset used for retrieving experiences.\"\"\"\n", " dataset = RLDataset(self.buffer, self.hparams.episode_length)\n", " dataloader = DataLoader(\n", " dataset=dataset,\n", " batch_size=self.hparams.batch_size,\n", " )\n", " return dataloader\n", "\n", " def train_dataloader(self) -> DataLoader:\n", " \"\"\"Get train loader.\"\"\"\n", " return self.__dataloader()\n", "\n", " def get_device(self, batch) -> str:\n", " \"\"\"Retrieve device currently being used by minibatch.\"\"\"\n", " return batch[0].device.index if self.on_gpu else \"cpu\""]}, {"cell_type": "markdown", "id": "d5c381bd", "metadata": {"papermill": {"duration": 0.003454, "end_time": "2025-04-03T20:56:56.845705", "exception": false, "start_time": "2025-04-03T20:56:56.842251", "status": "completed"}, "tags": []}, "source": ["### Trainer"]}, {"cell_type": "code", "execution_count": 9, "id": "f555ca49", "metadata": {"execution": {"iopub.execute_input": "2025-04-03T20:56:56.853662Z", "iopub.status.busy": "2025-04-03T20:56:56.853486Z", "iopub.status.idle": "2025-04-03T20:57:10.926026Z", "shell.execute_reply": "2025-04-03T20:57:10.925162Z"}, "papermill": {"duration": 14.079211, "end_time": "2025-04-03T20:57:10.928450", "exception": false, "start_time": "2025-04-03T20:56:56.849239", "status": "completed"}, "tags": []}, "outputs": [{"name": "stderr", "output_type": "stream", "text": ["/usr/local/lib/python3.10/dist-packages/gym/envs/registration.py:505: UserWarning: \u001b[33mWARN: The environment CartPole-v0 is out of date. You should consider upgrading to version `v1` with the environment ID `CartPole-v1`.\u001b[0m\n", " logger.warn(\n", "/usr/local/lib/python3.10/dist-packages/pygame/pkgdata.py:25: DeprecationWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html\n", " from pkg_resources import resource_stream, resource_exists\n"]}, {"name": "stderr", "output_type": "stream", "text": ["GPU available: True (cuda), used: True\n"]}, {"name": "stderr", "output_type": "stream", "text": ["TPU available: False, using: 0 TPU cores\n"]}, {"name": "stderr", "output_type": "stream", "text": ["HPU available: False, using: 0 HPUs\n"]}, {"name": "stderr", "output_type": "stream", "text": ["You are using a CUDA device ('NVIDIA GeForce RTX 3090') that has Tensor Cores. To properly utilize them, you should set `torch.set_float32_matmul_precision('medium' | 'high')` which will trade-off precision for performance. For more details, read https://pytorch.org/docs/stable/generated/torch.set_float32_matmul_precision.html#torch.set_float32_matmul_precision\n"]}, {"name": "stderr", "output_type": "stream", "text": ["LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n"]}, {"name": "stderr", "output_type": "stream", "text": ["\n", " | Name | Type | Params | Mode \n", "--------------------------------------------\n", "0 | net | DQN | 898 | train\n", "1 | target_net | DQN | 898 | train\n", "--------------------------------------------\n", "1.8 K Trainable params\n", "0 Non-trainable params\n", "1.8 K Total params\n", "0.007 Total estimated model params size (MB)\n", "10 Modules in train mode\n", "0 Modules in eval mode\n"]}, {"name": "stderr", "output_type": "stream", "text": ["/usr/local/lib/python3.10/dist-packages/pytorch_lightning/trainer/connectors/data_connector.py:424: The 'train_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=63` in the `DataLoader` to improve performance.\n", "/usr/local/lib/python3.10/dist-packages/torch/utils/data/_utils/collate.py:175: DeprecationWarning: In future, it will be an error for 'np.bool_' scalars to be interpreted as an index\n", " return torch.as_tensor(batch)\n"]}, {"data": {"application/vnd.jupyter.widget-view+json": {"model_id": "57c3adb365fb4547a97b0ef3312d023e", "version_major": 2, "version_minor": 0}, "text/plain": ["Training: | | 0/? [00:00, ?it/s]"]}, "metadata": {}, "output_type": "display_data"}, {"name": "stderr", "output_type": "stream", "text": ["/tmp/ipykernel_691/3381455415.py:34: UserWarning: Creating a tensor from a list of numpy.ndarrays is extremely slow. Please consider converting the list to a single numpy.ndarray with numpy.array() before converting to a tensor. (Triggered internally at ../torch/csrc/utils/tensor_new.cpp:261.)\n", " state = torch.tensor([self.state])\n"]}, {"name": "stderr", "output_type": "stream", "text": ["`Trainer.fit` stopped: `max_epochs=150` reached.\n"]}], "source": ["\n", "model = DQNLightning()\n", "\n", "trainer = Trainer(\n", " accelerator=\"auto\",\n", " devices=1 if torch.cuda.is_available() else None, # limiting got iPython runs\n", " max_epochs=150,\n", " val_check_interval=50,\n", " logger=CSVLogger(save_dir=\"logs/\"),\n", ")\n", "\n", "trainer.fit(model)"]}, {"cell_type": "code", "execution_count": 10, "id": "a3303256", "metadata": {"execution": {"iopub.execute_input": "2025-04-03T20:57:10.945930Z", "iopub.status.busy": "2025-04-03T20:57:10.945650Z", "iopub.status.idle": "2025-04-03T20:57:11.263740Z", "shell.execute_reply": "2025-04-03T20:57:11.262839Z"}, "papermill": {"duration": 0.328739, "end_time": "2025-04-03T20:57:11.265920", "exception": false, "start_time": "2025-04-03T20:57:10.937181", "status": "completed"}, "tags": []}, "outputs": [{"data": {"text/html": ["
\n", " | episode reward | \n", "epsilon | \n", "reward | \n", "total_reward | \n", "train_loss | \n", "
---|---|---|---|---|---|
epoch | \n", "\n", " | \n", " | \n", " | \n", " | \n", " |
3 | \n", "5.0 | \n", "0.95149 | \n", "1.0 | \n", "35.0 | \n", "2.484817 | \n", "
7 | \n", "6.0 | \n", "0.90199 | \n", "1.0 | \n", "14.0 | \n", "14.151343 | \n", "
11 | \n", "3.0 | \n", "0.85249 | \n", "1.0 | \n", "12.0 | \n", "1.501582 | \n", "
15 | \n", "15.0 | \n", "0.80299 | \n", "1.0 | \n", "29.0 | \n", "12.476444 | \n", "
19 | \n", "1.0 | \n", "0.75349 | \n", "1.0 | \n", "15.0 | \n", "66.397606 | \n", "