Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove error when test dataloader used in test #1495

Merged
merged 32 commits into from
Apr 16, 2020
Merged
Show file tree
Hide file tree
Changes from 29 commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
9262acc
remove error when test dataloader used in test
williamFalcon Apr 15, 2020
b433577
remove error when test dataloader used in test
williamFalcon Apr 15, 2020
ecc7d2a
remove error when test dataloader used in test
williamFalcon Apr 15, 2020
4bbf9a7
remove error when test dataloader used in test
williamFalcon Apr 15, 2020
11404ca
remove error when test dataloader used in test
williamFalcon Apr 15, 2020
788cb01
remove error when test dataloader used in test
williamFalcon Apr 15, 2020
8bf9b4d
fix lost model reference
williamFalcon Apr 15, 2020
5b57c54
remove error when test dataloader used in test
williamFalcon Apr 15, 2020
168c96c
fix lost model reference
williamFalcon Apr 15, 2020
1211b57
moved optimizer types
williamFalcon Apr 15, 2020
7eb08e6
moved optimizer types
williamFalcon Apr 15, 2020
27b435f
moved optimizer types
williamFalcon Apr 15, 2020
39b9cfb
moved optimizer types
williamFalcon Apr 15, 2020
77be73d
moved optimizer types
williamFalcon Apr 15, 2020
86f681c
moved optimizer types
williamFalcon Apr 15, 2020
a027eda
moved optimizer types
williamFalcon Apr 15, 2020
03c26af
moved optimizer types
williamFalcon Apr 15, 2020
9839cf3
added tests for warning
williamFalcon Apr 15, 2020
329f887
fix lost model reference
williamFalcon Apr 15, 2020
77b98e5
fix lost model reference
williamFalcon Apr 15, 2020
f51523e
added tests for warning
williamFalcon Apr 15, 2020
8aa5b8d
added tests for warning
williamFalcon Apr 15, 2020
5cba21d
refactoring
Borda Apr 15, 2020
5555b41
refactoring
Borda Apr 15, 2020
7dfcb8f
fix imports
Borda Apr 15, 2020
9748952
refactoring
Borda Apr 15, 2020
ca64314
fix imports
Borda Apr 15, 2020
9275762
refactoring
Borda Apr 15, 2020
686aa34
fix tests
Borda Apr 15, 2020
b9626de
fix mnist
Borda Apr 15, 2020
0460e23
flake8
Borda Apr 15, 2020
e2bb08d
review
Borda Apr 15, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).

### Removed

- Removed test for no test dataloader in .fit ([#1495](https://github.com/PyTorchLightning/pytorch-lightning/pull/1495))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
- Removed test for no test dataloader in .fit ([#1495](https://github.com/PyTorchLightning/pytorch-lightning/pull/1495))
- Removed test for no test dataloader in `.fit()` ([#1495](https://github.com/PyTorchLightning/pytorch-lightning/pull/1495))

- Removed duplicated module `pytorch_lightning.utilities.arg_parse` for loading CLI arguments ([#1167](https://github.com/PyTorchLightning/pytorch-lightning/issues/1167))
- Removed wandb logger's `finalize` method ([#1193](https://github.com/PyTorchLightning/pytorch-lightning/pull/1193))
- Dropped `torchvision` dependency in tests and added own MNIST dataset class instead ([#986](https://github.com/PyTorchLightning/pytorch-lightning/issues/986))
Expand Down
10 changes: 5 additions & 5 deletions benchmarks/test_trainer_parity.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
import tests.base.utils as tutils

from pytorch_lightning import Trainer, LightningModule
from tests.base.datasets import TestingMNIST
from tests.base.datasets import TrialMNIST


class ParityMNIST(LightningModule):
Expand Down Expand Up @@ -42,10 +42,10 @@ def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.02)

def train_dataloader(self):
return DataLoader(TestingMNIST(train=True,
download=True,
num_samples=500,
digits=list(range(5))),
return DataLoader(TrialMNIST(train=True,
download=True,
num_samples=500,
digits=list(range(5))),
batch_size=128)


Expand Down
6 changes: 2 additions & 4 deletions pytorch_lightning/trainer/evaluation_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -418,10 +418,8 @@ def evaluation_forward(self, model, batch, batch_idx, dataloader_idx, test_mode:
# make dataloader_idx arg in validation_step optional
args = [batch, batch_idx]

if test_mode and len(self.test_dataloaders) > 1:
args.append(dataloader_idx)

elif not test_mode and len(self.val_dataloaders) > 1:
if (test_mode and len(self.test_dataloaders) > 1) \
or (not test_mode and len(self.val_dataloaders) > 1):
args.append(dataloader_idx)

# handle DP, DDP forward
Expand Down
2 changes: 2 additions & 0 deletions pytorch_lightning/trainer/model_hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ def is_overriden(self, method_name: str, model: LightningModule = None) -> bool:
return False

instance_attr = getattr(model, method_name)
if not instance_attr:
return False
super_attr = getattr(super_object, method_name)

# when code pointers are different, it was implemented
Expand Down
32 changes: 24 additions & 8 deletions pytorch_lightning/trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -939,10 +939,11 @@ def test(self, model: Optional[LightningModule] = None, test_dataloaders: Option
self.testing = True

if test_dataloaders is not None:
if model is not None:
self.__attach_dataloaders(model, test_dataloaders=test_dataloaders)
else:
self.__attach_dataloaders(self.model, test_dataloaders=test_dataloaders)
self.__attach_dataloaders(model if model else self.model,
test_dataloaders=test_dataloaders)

# give proper warnings if user only passed in loader without hooks
self.check_testing_model_configuration(model if model else self.model)

if model is not None:
self.model = model
Expand Down Expand Up @@ -1012,10 +1013,25 @@ def check_model_configuration(self, model: LightningModule):
'You have defined a `test_dataloader()` and have defined a `test_step()`, you may also want to'
' define `test_epoch_end()` for accumulating stats.', RuntimeWarning
)
else:
if self.is_overriden('test_step', model):
raise MisconfigurationException('You have defined `test_step()`,'
' but have not passed in a `test_dataloader()`.')

def check_testing_model_configuration(self, model: LightningModule):

has_test_step = self.is_overriden('test_step', model)
has_test_epoch_end = self.is_overriden('test_epoch_end', model)
gave_test_loader = hasattr(model, 'test_dataloader') and model.test_dataloader()

if gave_test_loader and not has_test_step:
raise MisconfigurationException('You passed in a `test_dataloader` but did not implement `test_step()`')

if has_test_step and not gave_test_loader:
raise MisconfigurationException('You defined `test_step()` but did not implement'
' `test_dataloader` nor passed in `.fit(test_dataloaders`.')

if has_test_step and gave_test_loader and not has_test_epoch_end:
rank_zero_warn(
'You passed in a `test_dataloader` and have defined a `test_step()`, you may also want to'
' define `test_epoch_end()` for accumulating stats.', RuntimeWarning
)


class _PatchDataLoader(object):
Expand Down
1 change: 1 addition & 0 deletions tests/base/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import torch

from tests.base.models import TestModelBase, DictHparamsModel
from tests.base.eval_model_template import EvalModelTemplate
from tests.base.mixins import (
LightEmptyTestStep,
LightValidationStepMixin,
Expand Down
4 changes: 2 additions & 2 deletions tests/base/datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def normalize_tensor(tensor: Tensor, mean: float = 0.0, std: float = 1.0) -> Ten
return tensor


class TestingMNIST(MNIST):
class TrialMNIST(MNIST):
"""Constrain image dataset

Args:
Expand All @@ -127,7 +127,7 @@ class TestingMNIST(MNIST):
digits: list selected MNIST digits/classes

Examples:
>>> dataset = TestingMNIST(download=True)
>>> dataset = TrialMNIST(download=True)
>>> len(dataset)
300
>>> sorted(set([d.item() for d in dataset.targets]))
Expand Down
8 changes: 4 additions & 4 deletions tests/base/debug.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from torch.utils.data import DataLoader

import pytorch_lightning as pl
from tests.base.datasets import TestingMNIST
from tests.base.datasets import TrialMNIST


# from test_models import assert_ok_test_acc, load_model, \
Expand Down Expand Up @@ -42,10 +42,10 @@ def configure_optimizers(self):
return [torch.optim.Adam(self.parameters(), lr=0.02)]

def train_dataloader(self):
return DataLoader(TestingMNIST(train=True, num_samples=100), batch_size=16)
return DataLoader(TrialMNIST(train=True, num_samples=100), batch_size=16)

def val_dataloader(self):
return DataLoader(TestingMNIST(train=False, num_samples=50), batch_size=16)
return DataLoader(TrialMNIST(train=False, num_samples=50), batch_size=16)

def test_dataloader(self):
return DataLoader(TestingMNIST(train=False, num_samples=50), batch_size=16)
return DataLoader(TrialMNIST(train=False, num_samples=50), batch_size=16)
61 changes: 61 additions & 0 deletions tests/base/eval_model_optimizers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
from abc import ABC

from torch import optim


class ConfigureOptimizersPool(ABC):
def configure_optimizers(self):
"""
return whatever optimizers we want here.
:return: list of optimizers
"""
optimizer = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
return optimizer

def configure_optimizers_empty(self):
return None

def configure_optimizers_lbfgs(self):
"""
return whatever optimizers we want here.
:return: list of optimizers
"""
optimizer = optim.LBFGS(self.parameters(), lr=self.hparams.learning_rate)
return optimizer

def configure_optimizers_multiple_optimizers(self):
"""
return whatever optimizers we want here.
:return: list of optimizers
"""
# try no scheduler for this model (testing purposes)
optimizer1 = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
optimizer2 = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
return optimizer1, optimizer2

def configure_optimizers_single_scheduler(self):
optimizer = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.1)
return [optimizer], [lr_scheduler]

def configure_optimizers_multiple_schedulers(self):
optimizer1 = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
optimizer2 = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
lr_scheduler1 = optim.lr_scheduler.StepLR(optimizer1, 1, gamma=0.1)
lr_scheduler2 = optim.lr_scheduler.StepLR(optimizer2, 1, gamma=0.1)

return [optimizer1, optimizer2], [lr_scheduler1, lr_scheduler2]

def configure_optimizers_mixed_scheduling(self):
optimizer1 = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
optimizer2 = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
lr_scheduler1 = optim.lr_scheduler.StepLR(optimizer1, 4, gamma=0.1)
lr_scheduler2 = optim.lr_scheduler.StepLR(optimizer2, 1, gamma=0.1)

return [optimizer1, optimizer2], \
[{'scheduler': lr_scheduler1, 'interval': 'step'}, lr_scheduler2]

def configure_optimizers_reduce_lr_on_plateau(self):
optimizer = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer)
return [optimizer], [lr_scheduler]
80 changes: 80 additions & 0 deletions tests/base/eval_model_template.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
import torch
import torch.nn as nn
import torch.nn.functional as F

from tests.base.datasets import TrialMNIST
from pytorch_lightning.core.lightning import LightningModule
from tests.base.eval_model_optimizers import ConfigureOptimizersPool
from tests.base.eval_model_test_dataloaders import TestDataloaderVariations
from tests.base.eval_model_test_epoch_ends import TestEpochEndVariations
from tests.base.eval_model_test_steps import TestStepVariations
from tests.base.eval_model_train_dataloaders import TrainDataloaderVariations
from tests.base.eval_model_train_steps import TrainingStepVariations
from tests.base.eval_model_valid_dataloaders import ValDataloaderVariations
from tests.base.eval_model_valid_epoch_ends import ValidationEpochEndVariations
from tests.base.eval_model_valid_steps import ValidationStepVariations
from tests.base.eval_model_utils import ModelTemplateUtils


class EvalModelTemplate(
ModelTemplateUtils,
TrainingStepVariations,
ValidationStepVariations,
ValidationEpochEndVariations,
TestStepVariations,
TestEpochEndVariations,
TrainDataloaderVariations,
ValDataloaderVariations,
TestDataloaderVariations,
ConfigureOptimizersPool,
LightningModule
):
"""
This template houses all combinations of model configurations we want to test
"""
def __init__(self, hparams):
"""Pass in parsed HyperOptArgumentParser to the model."""
# init superclass
super().__init__()
self.hparams = hparams

# if you specify an example input, the summary will show input/output for each layer
self.example_input_array = torch.rand(5, 28 * 28)

# build model
self.__build_model()

def __build_model(self):
"""
Simple model for testing
:return:
"""
self.c_d1 = nn.Linear(
in_features=self.hparams.in_features,
out_features=self.hparams.hidden_dim
)
self.c_d1_bn = nn.BatchNorm1d(self.hparams.hidden_dim)
self.c_d1_drop = nn.Dropout(self.hparams.drop_prob)

self.c_d2 = nn.Linear(
in_features=self.hparams.hidden_dim,
out_features=self.hparams.out_features
)

def forward(self, x):
x = self.c_d1(x)
x = torch.tanh(x)
x = self.c_d1_bn(x)
x = self.c_d1_drop(x)

x = self.c_d2(x)
logits = F.log_softmax(x, dim=1)

return logits

def loss(self, labels, logits):
nll = F.nll_loss(logits, labels)
return nll

def prepare_data(self):
_ = TrialMNIST(root=self.hparams.data_root, train=True, download=True)
11 changes: 11 additions & 0 deletions tests/base/eval_model_test_dataloaders.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
from abc import ABC, abstractmethod


class TestDataloaderVariations(ABC):

@abstractmethod
def dataloader(self, train: bool):
"""placeholder"""

def test_dataloader(self):
return self.dataloader(train=False)
39 changes: 39 additions & 0 deletions tests/base/eval_model_test_epoch_ends.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
from abc import ABC

import torch


class TestEpochEndVariations(ABC):

def test_epoch_end(self, outputs):
"""
Called at the end of validation to aggregate outputs
:param outputs: list of individual outputs of each validation step
:return:
"""
# if returned a scalar from test_step, outputs is a list of tensor scalars
# we return just the average in this case (if we want)
# return torch.stack(outputs).mean()
test_loss_mean = 0
test_acc_mean = 0
for output in outputs:
test_loss = self.get_output_metric(output, 'test_loss')

# reduce manually when using dp
if self.trainer.use_dp:
test_loss = torch.mean(test_loss)
test_loss_mean += test_loss

# reduce manually when using dp
test_acc = self.get_output_metric(output, 'test_acc')
if self.trainer.use_dp:
test_acc = torch.mean(test_acc)

test_acc_mean += test_acc

test_loss_mean /= len(outputs)
test_acc_mean /= len(outputs)

metrics_dict = {'test_loss': test_loss_mean.item(), 'test_acc': test_acc_mean.item()}
result = {'progress_bar': metrics_dict, 'log': metrics_dict}
return result
Loading