-
Notifications
You must be signed in to change notification settings - Fork 1
/
batch_ADAM.py
51 lines (40 loc) · 1.87 KB
/
batch_ADAM.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
from pytorch_lightning.utilities.types import STEP_OUTPUT
from torch.utils.data import DataLoader
from torch.optim import Adam
from framework.optimization.base import Optimization
from examples.models.types import Model
class AdamBatchOptimization(Optimization):
def __init__(self,
model: Model, # The model to optimize
data: dict, # The dictionary of Datasets defined in the previous 'Data' section
num_workers: int, # Number of workers for the data_loader
batch_size: int, # Batch size
lr: float, # Learning rate
pin_memory: bool = True # Flag to enable memory pinning
):
super(AdamBatchOptimization, self).__init__()
self.model = model
self.data = data
self.batch_size = batch_size
self.num_workers = num_workers
self.lr = lr
self.pin_memory = pin_memory
def log_counters(self) -> None:
for counter, value in self.counters.items():
self.log(counter, value)
# this overrides the pl.LightningModule train_dataloader which is used by the Trainer
def train_dataloader(self):
return DataLoader(self.data['train'],
batch_size=self.batch_size,
num_workers=self.num_workers,
shuffle=True,
pin_memory=self.pin_memory)
def training_step(self, data, data_idx) -> STEP_OUTPUT:
loss_items = self.model.compute_loss(data, data_idx)
for name, value in loss_items.items():
self.log('Train/%s' % name, value)
self.counters['iteration'] += 1
self.log_counters()
return loss_items
def configure_optimizers(self):
return Adam(self.model.parameters(), lr=self.lr)