Skip to content

Commit 3a58937

Browse files
BordawilliamFalcon
authored andcommitted
rename variables nb -> num (#567)
* rename nb -> num * flake8 * batch_nb, epoch_nb, gpu_nb, split_nb * add _num deprecations
1 parent 63717e8 commit 3a58937

30 files changed

+326
-290
lines changed

README.md

+10-10
Original file line numberDiff line numberDiff line change
@@ -96,15 +96,15 @@ To use lightning do 2 things:
9696
def forward(self, x):
9797
return torch.relu(self.l1(x.view(x.size(0), -1)))
9898

99-
def training_step(self, batch, batch_nb):
99+
def training_step(self, batch, batch_idx):
100100
# REQUIRED
101101
x, y = batch
102102
y_hat = self.forward(x)
103103
loss = F.cross_entropy(y_hat, y)
104104
tensorboard_logs = {'train_loss': loss}
105105
return {'loss': loss, 'log': tensorboard_logs}
106106

107-
def validation_step(self, batch, batch_nb):
107+
def validation_step(self, batch, batch_idx):
108108
# OPTIONAL
109109
x, y = batch
110110
y_hat = self.forward(x)
@@ -154,16 +154,16 @@ use something other than tensorboard).
154154
Here are more advanced examples
155155
```python
156156
# train on cpu using only 10% of the data (for demo purposes)
157-
trainer = Trainer(max_nb_epochs=1, train_percent_check=0.1)
157+
trainer = Trainer(max_num_epochs=1, train_percent_check=0.1)
158158

159159
# train on 4 gpus (lightning chooses GPUs for you)
160-
# trainer = Trainer(max_nb_epochs=1, gpus=4, distributed_backend='ddp')
160+
# trainer = Trainer(max_num_epochs=1, gpus=4, distributed_backend='ddp')
161161

162162
# train on 4 gpus (you choose GPUs)
163-
# trainer = Trainer(max_nb_epochs=1, gpus=[0, 1, 3, 7], distributed_backend='ddp')
163+
# trainer = Trainer(max_num_epochs=1, gpus=[0, 1, 3, 7], distributed_backend='ddp')
164164

165165
# train on 32 gpus across 4 nodes (make sure to submit appropriate SLURM job)
166-
# trainer = Trainer(max_nb_epochs=1, gpus=8, nb_gpu_nodes=4, distributed_backend='ddp')
166+
# trainer = Trainer(max_num_epochs=1, gpus=8, num_gpu_nodes=4, distributed_backend='ddp')
167167

168168
# train (1 epoch only here for demo)
169169
trainer.fit(model)
@@ -187,10 +187,10 @@ You define the blue parts using the LightningModule interface:
187187

188188
```python
189189
# what to do in the training loop
190-
def training_step(self, batch, batch_nb):
190+
def training_step(self, batch, batch_idx):
191191

192192
# what to do in the validation loop
193-
def validation_step(self, batch, batch_nb):
193+
def validation_step(self, batch, batch_idx):
194194

195195
# how to aggregate validation_step outputs
196196
def validation_end(self, outputs):
@@ -205,7 +205,7 @@ def test_dataloader():
205205

206206
```python
207207
# define what happens for training here
208-
def training_step(self, batch, batch_nb):
208+
def training_step(self, batch, batch_idx):
209209
x, y = batch
210210

211211
# define your own forward and loss calculation
@@ -232,7 +232,7 @@ def training_step(self, batch, batch_nb):
232232

233233
```python
234234
# define what happens for validation here
235-
def validation_step(self, batch, batch_nb):
235+
def validation_step(self, batch, batch_idx):
236236
x, y = batch
237237

238238
# or as basic as a CNN classification

docs/source/intro.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ class BERT(pl.LightningModule):
1616
elif model_name == 'my_cool_version':
1717
self.net = MyCoolVersion()
1818

19-
def training_step(self, batch, batch_nb):
19+
def training_step(self, batch, batch_idx):
2020
if self.task == 'standard_bert':
2121
# do standard bert training with self.net...
2222
# return loss
@@ -35,7 +35,7 @@ class CoolerNotBERT(pl.LightningModule):
3535
def __init__(self):
3636
self.net = ...
3737

38-
def training_step(self, batch, batch_nb):
38+
def training_step(self, batch, batch_idx):
3939
# do some other cool task
4040
# return loss
4141
```

pl_examples/domain_templates/gan.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -90,12 +90,12 @@ def forward(self, z):
9090
def adversarial_loss(self, y_hat, y):
9191
return F.binary_cross_entropy(y_hat, y)
9292

93-
def training_step(self, batch, batch_nb, optimizer_i):
93+
def training_step(self, batch, batch_idx, optimizer_idx):
9494
imgs, _ = batch
9595
self.last_imgs = imgs
9696

9797
# train generator
98-
if optimizer_i == 0:
98+
if optimizer_idx == 0:
9999
# sample noise
100100
z = torch.randn(imgs.shape[0], self.hparams.latent_dim)
101101

@@ -125,7 +125,7 @@ def training_step(self, batch, batch_nb, optimizer_i):
125125
return output
126126

127127
# train discriminator
128-
if optimizer_i == 1:
128+
if optimizer_idx == 1:
129129
# Measure discriminator's ability to classify real from generated samples
130130

131131
# how well can it label as real?

pl_examples/full_examples/imagenet/imagenet_example.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -234,7 +234,7 @@ def main(hparams):
234234
trainer = pl.Trainer(
235235
default_save_path=hparams.save_path,
236236
gpus=hparams.gpus,
237-
max_nb_epochs=hparams.epochs,
237+
max_num_epochs=hparams.epochs,
238238
distributed_backend=hparams.distributed_backend,
239239
use_amp=hparams.use_16bit
240240
)

pl_examples/multi_node_examples/multi_node_ddp2_demo.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def main(hparams):
3131
# ------------------------
3232
trainer = Trainer(
3333
gpus=2,
34-
nb_gpu_nodes=2,
34+
num_nodes=2,
3535
distributed_backend='ddp2'
3636
)
3737

pl_examples/multi_node_examples/multi_node_ddp_demo.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def main(hparams):
3131
# ------------------------
3232
trainer = Trainer(
3333
gpus=2,
34-
nb_gpu_nodes=2,
34+
num_nodes=2,
3535
distributed_backend='ddp'
3636
)
3737

pytorch_lightning/core/__init__.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -34,13 +34,13 @@ def __init__(self):
3434
def forward(self, x):
3535
return torch.relu(self.l1(x.view(x.size(0), -1)))
3636
37-
def training_step(self, batch, batch_nb):
37+
def training_step(self, batch, batch_idx):
3838
# REQUIRED
3939
x, y = batch
4040
y_hat = self.forward(x)
4141
return {'loss': F.cross_entropy(y_hat, y)}
4242
43-
def validation_step(self, batch, batch_nb):
43+
def validation_step(self, batch, batch_idx):
4444
# OPTIONAL
4545
x, y = batch
4646
y_hat = self.forward(x)
@@ -51,7 +51,7 @@ def validation_end(self, outputs):
5151
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
5252
return {'avg_val_loss': avg_loss}
5353
54-
def test_step(self, batch, batch_nb):
54+
def test_step(self, batch, batch_idx):
5555
# OPTIONAL
5656
x, y = batch
5757
y_hat = self.forward(x)

pytorch_lightning/core/lightning.py

+32-32
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ def training_step(self, *args, **kwargs):
109109
"""return loss, dict with metrics for tqdm
110110
111111
:param batch: The output of your dataloader. A tensor, tuple or list
112-
:param int batch_nb: Integer displaying which batch this is
112+
:param int batch_idx: Integer displaying which batch this is
113113
:return: dict with loss key and optional log, progress keys
114114
if implementing training_step, return whatever you need in that step:
115115
- loss -> tensor scalar [REQUIRED]
@@ -124,7 +124,7 @@ def training_step(self, *args, **kwargs):
124124
125125
.. code-block:: python
126126
127-
def training_step(self, batch, batch_nb):
127+
def training_step(self, batch, batch_idx):
128128
x, y, z = batch
129129
130130
# implement your own
@@ -150,7 +150,7 @@ def training_step(self, batch, batch_nb):
150150
.. code-block:: python
151151
152152
# Multiple optimizers (ie: GANs)
153-
def training_step(self, batch, batch_nb, optimizer_idx):
153+
def training_step(self, batch, batch_idx, optimizer_idx):
154154
if optimizer_idx == 0:
155155
# do training_step with encoder
156156
if optimizer_idx == 1:
@@ -163,7 +163,7 @@ def training_step(self, batch, batch_nb, optimizer_idx):
163163
.. code-block:: python
164164
165165
# Truncated back-propagation through time
166-
def training_step(self, batch, batch_nb, hiddens):
166+
def training_step(self, batch, batch_idx, hiddens):
167167
# hiddens are the hiddens from the previous truncated backprop step
168168
169169
You can also return a -1 instead of a dict to stop the current loop. This is useful
@@ -192,9 +192,9 @@ def training_end(self, *args, **kwargs):
192192
.. code-block:: python
193193
194194
# WITHOUT training_end
195-
# if used in DP or DDP2, this batch is 1/nb_gpus large
196-
def training_step(self, batch, batch_nb):
197-
# batch is 1/nb_gpus big
195+
# if used in DP or DDP2, this batch is 1/num_gpus large
196+
def training_step(self, batch, batch_idx):
197+
# batch is 1/num_gpus big
198198
x, y = batch
199199
200200
out = self.forward(x)
@@ -204,8 +204,8 @@ def training_step(self, batch, batch_nb):
204204
205205
# --------------
206206
# with training_end to do softmax over the full batch
207-
def training_step(self, batch, batch_nb):
208-
# batch is 1/nb_gpus big
207+
def training_step(self, batch, batch_idx):
208+
# batch is 1/num_gpus big
209209
x, y = batch
210210
211211
out = self.forward(x)
@@ -225,7 +225,7 @@ def training_end(self, outputs):
225225
.. code-block:: python
226226
227227
# Multiple optimizers (ie: GANs)
228-
def training_step(self, batch, batch_nb, optimizer_idx):
228+
def training_step(self, batch, batch_idx, optimizer_idx):
229229
if optimizer_idx == 0:
230230
# do training_step with encoder
231231
if optimizer_idx == 1:
@@ -237,7 +237,7 @@ def training_step(self, batch, batch_nb, optimizer_idx):
237237
.. code-block:: python
238238
239239
# Truncated back-propagation through time
240-
def training_step(self, batch, batch_nb, hiddens):
240+
def training_step(self, batch, batch_idx, hiddens):
241241
# hiddens are the hiddens from the previous truncated backprop step
242242
243243
You can also return a -1 instead of a dict to stop the current loop. This is useful if you want to
@@ -249,17 +249,17 @@ def validation_step(self, *args, **kwargs):
249249
"""return whatever outputs will need to be aggregated in validation_end
250250
251251
:param batch: The output of your dataloader. A tensor, tuple or list
252-
:param int batch_nb: Integer displaying which batch this is
252+
:param int batch_idx: Integer displaying which batch this is
253253
:param int dataloader_idx: Integer displaying which dataloader this is (only if multiple val datasets used)
254254
:return dict: Dict or OrderedDict - passed to the validation_end step
255255
256256
.. code-block:: python
257257
258258
# if you have one val dataloader:
259-
def validation_step(self, batch, batch_nb)
259+
def validation_step(self, batch, batch_idx)
260260
261261
# if you have multiple val dataloaders:
262-
def validation_step(self, batch, batch_nb, dataloader_idxdx)
262+
def validation_step(self, batch, batch_idx, dataloader_idxdx)
263263
264264
If you don't need to validate you don't need to implement this method.
265265
In this step you'd normally generate examples or calculate anything of interest such as accuracy.
@@ -275,7 +275,7 @@ def validation_step(self, batch, batch_nb, dataloader_idxdx)
275275
.. code-block:: python
276276
277277
# CASE 1: A single validation dataset
278-
def validation_step(self, batch, batch_nb):
278+
def validation_step(self, batch, batch_idx):
279279
x, y = batch
280280
281281
# implement your own
@@ -307,7 +307,7 @@ def validation_step(self, batch, batch_nb):
307307
.. code-block:: python
308308
309309
# CASE 2: multiple validation datasets
310-
def validation_step(self, batch, batch_nb, dataset_idx):
310+
def validation_step(self, batch, batch_idx, dataset_idx):
311311
# dataset_idx tells you which dataset this is.
312312
313313
The `dataset_idx` corresponds to the order of datasets returned in `val_dataloader`.
@@ -318,17 +318,17 @@ def test_step(self, *args, **kwargs):
318318
"""return whatever outputs will need to be aggregated in test_end
319319
320320
:param batch: The output of your dataloader. A tensor, tuple or list
321-
:param int batch_nb: Integer displaying which batch this is
321+
:param int batch_idx: Integer displaying which batch this is
322322
:param int dataloader_idx: Integer displaying which dataloader this is (only if multiple test datasets used)
323323
:return dict: Dict or OrderedDict with metrics to display in progress bar. All keys must be tensors.
324324
325325
.. code-block:: python
326326
327327
# if you have one test dataloader:
328-
def test_step(self, batch, batch_nb)
328+
def test_step(self, batch, batch_idx)
329329
330330
# if you have multiple test dataloaders:
331-
def test_step(self, batch, batch_nb, dataloader_idxdx)
331+
def test_step(self, batch, batch_idx, dataloader_idxdx)
332332
333333
334334
**OPTIONAL**
@@ -348,7 +348,7 @@ def test_step(self, batch, batch_nb, dataloader_idxdx)
348348
.. code-block:: python
349349
350350
# CASE 1: A single test dataset
351-
def test_step(self, batch, batch_nb):
351+
def test_step(self, batch, batch_idx):
352352
x, y = batch
353353
354354
# implement your own
@@ -375,7 +375,7 @@ def test_step(self, batch, batch_nb):
375375
.. code-block:: python
376376
377377
# CASE 2: multiple test datasets
378-
def test_step(self, batch, batch_nb, dataset_idx):
378+
def test_step(self, batch, batch_idx, dataset_idx):
379379
# dataset_idx tells you which dataset this is.
380380
381381
@@ -694,13 +694,13 @@ def configure_optimizers(self):
694694
"""
695695
raise NotImplementedError
696696

697-
def optimizer_step(self, epoch_nb, batch_nb, optimizer, optimizer_i, second_order_closure=None):
697+
def optimizer_step(self, epoch_idx, batch_idx, optimizer, optimizer_idx, second_order_closure=None):
698698
"""Do something instead of the standard optimizer behavior
699699
700-
:param int epoch_nb:
701-
:param int batch_nb:
700+
:param int epoch_idx:
701+
:param int batch_idx:
702702
:param optimizer:
703-
:param optimizer_i:
703+
:param optimizer_idx:
704704
:param second_order_closure: closure for second order methods
705705
:return:
706706
@@ -712,21 +712,21 @@ def optimizer_step(self, epoch_nb, batch_nb, optimizer, optimizer_i, second_orde
712712
.. code-block:: python
713713
714714
# DEFAULT
715-
def optimizer_step(self, current_epoch, batch_nb, optimizer, optimizer_i, second_order_closure=None):
715+
def optimizer_step(self, current_epoch, batch_idx, optimizer, optimizer_idx, second_order_closure=None):
716716
optimizer.step()
717717
optimizer.zero_grad()
718718
719719
# Alternating schedule for optimizer steps (ie: GANs)
720-
def optimizer_step(self, current_epoch, batch_nb, optimizer, optimizer_i, second_order_closure=None):
720+
def optimizer_step(self, current_epoch, batch_idx, optimizer, optimizer_idx, second_order_closure=None):
721721
# update generator opt every 2 steps
722-
if optimizer_i == 0:
723-
if batch_nb % 2 == 0 :
722+
if optimizer_idx == 0:
723+
if batch_idx % 2 == 0 :
724724
optimizer.step()
725725
optimizer.zero_grad()
726726
727727
# update discriminator opt every 4 steps
728-
if optimizer_i == 1:
729-
if batch_nb % 4 == 0 :
728+
if optimizer_idx == 1:
729+
if batch_idx % 4 == 0 :
730730
optimizer.step()
731731
optimizer.zero_grad()
732732
@@ -739,7 +739,7 @@ def optimizer_step(self, current_epoch, batch_nb, optimizer, optimizer_i, second
739739
.. code-block:: python
740740
741741
# learning rate warm-up
742-
def optimizer_step(self, current_epoch, batch_nb, optimizer, optimizer_i, second_order_closure=None):
742+
def optimizer_step(self, current_epoch, batch_idx, optimizer, optimizer_idx, second_order_closure=None):
743743
# warm up lr
744744
if self.trainer.global_step < 500:
745745
lr_scale = min(1., float(self.trainer.global_step + 1) / 500.)

0 commit comments

Comments
 (0)