Skip to content

Commit 6d22bde

Browse files
committed
Merge remote-tracking branch 'origin/master' into tb_use_gfile
2 parents 6175d4e + 6ebe0d7 commit 6d22bde

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

72 files changed

+299
-266
lines changed

.github/CONTRIBUTING.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ formatting errors. In certain cases, a missing blank line or a wrong indent can
155155
Run these commands
156156

157157
```bash
158-
pip install ".[docs]"
158+
pip install -r requirements/docs.txt
159159
cd docs
160160
make html
161161
```

.pyrightconfig.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
"pytorch_lightning/__init__.py",
88
"pytorch_lightning/callbacks",
99
"pytorch_lightning/core",
10-
"pytorch_lightning/accelerator_backends",
10+
"pytorch_lightning/accelerators",
1111
"pytorch_lightning/loggers",
1212
"pytorch_lightning/logging",
1313
"pytorch_lightning/metrics",

.run_local_tests.sh

-5
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,6 @@ export SLURM_LOCALID=0
66

77
# use this to run tests
88
rm -rf _ckpt_*
9-
rm -rf ./tests/save_dir*
10-
rm -rf ./tests/mlruns_*
11-
rm -rf ./tests/cometruns*
12-
rm -rf ./tests/wandb*
13-
rm -rf ./tests/tests/*
149
rm -rf ./lightning_logs
1510
python -m coverage run --source pytorch_lightning -m py.test pytorch_lightning tests pl_examples -v --flake8
1611
python -m coverage report -m

CHANGELOG.md

+2
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
106106

107107
- Fixed LR finder and `hparams` compatibility ([#2821](https://github.com/PyTorchLightning/pytorch-lightning/pull/2821))
108108

109+
- Fixed `ModelCheckpoint` not saving the latest information when `save_last=True` ([#2881](https://github.com/PyTorchLightning/pytorch-lightning/pull/2881))
110+
109111
## [0.8.5] - 2020-07-09
110112

111113
### Added

MANIFEST.in

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ exclude tests
2626
recursive-exclude docs *
2727
exclude docs
2828
recursive-include docs/source/_images/logos/ *
29-
recursive-include docs/source/_images/general/ pl_overview* tf_* tutorial_*
29+
recursive-include docs/source/_images/general/ pl_overview* tf_* tutorial_* PTL101_*
3030

3131
# Include the Requirements
3232
recursive-include requirements *.txt

README.md

+5
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,11 @@ Once you do this, you can train on multiple-GPUs, TPUs, CPUs and even in 16-bit
8888

8989
Get started with our [QUICK START PAGE](https://pytorch-lightning.readthedocs.io/en/stable/new-project.html)
9090

91+
---
92+
### [Tune in for our PyTorch Lightning 101 class with William Falcon and Alfredo Canziani! New episodes every week!](https://www.youtube.com/watch?v=DbESHcCoWbM&list=PLaMu-SDt_RB5NUm67hU2pdE75j6KaIOv2)
93+
[![IMAGE ALT TEXT HERE](docs/source/_images/general/PTL101_youtube_thumbnail.jpg)](https://www.youtube.com/watch?v=DbESHcCoWbM&list=PLaMu-SDt_RB5NUm67hU2pdE75j6KaIOv2)
94+
---
95+
9196
## Refactoring your PyTorch code + benefits + full walk-through
9297
[![Watch the video](docs/source/_images/general/tutorial_cover.jpg)](https://www.youtube.com/watch?v=QHww1JH7IDU)
9398

dockers/cuda-extras/Dockerfile

-1
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,6 @@ RUN apt-get update && \
3939
&& \
4040

4141
# Install AMP
42-
# TODO: skip this instrall for PT >= 1.6
4342
bash install_AMP.sh && \
4443
# Install all requirements
4544
pip install -r requirements.txt && \
Loading

docs/source/conf.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@
138138
exclude_patterns = [
139139
'api/pytorch_lightning.rst',
140140
'api/pl_examples.*',
141-
'api/pytorch_lightning.accelerator_backends.*',
141+
'api/pytorch_lightning.accelerators.*',
142142
'api/modules.rst',
143143
'PULL_REQUEST_TEMPLATE.md',
144144
]

docs/source/index.rst

+1
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,7 @@ PyTorch Lightning Documentation
6363
:name: Tutorials
6464
:caption: Tutorials
6565

66+
PyTorch Lightning 101 class <https://www.youtube.com/playlist?list=PLaMu-SDt_RB5NUm67hU2pdE75j6KaIOv2>
6667
From PyTorch to PyTorch Lightning <https://towardsdatascience.com/from-pytorch-to-pytorch-lightning-a-gentle-introduction-b371b7caaf09>
6768
Video on how to refactor PyTorch into PyTorch Lightning <https://www.youtube.com/watch?v=QHww1JH7IDU>
6869

pl_examples/basic_examples/cpu_template.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@
44
import os
55
from argparse import ArgumentParser
66

7-
from pytorch_lightning import Trainer, seed_everything
87
from pl_examples.models.lightning_template import LightningTemplateModel
8+
from pytorch_lightning import Trainer, seed_everything
99

1010
seed_everything(234)
1111

pl_examples/basic_examples/gpu_template.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@
44
import os
55
from argparse import ArgumentParser
66

7-
from pytorch_lightning import Trainer, seed_everything
87
from pl_examples.models.lightning_template import LightningTemplateModel
8+
from pytorch_lightning import Trainer, seed_everything
99

1010
seed_everything(234)
1111

pl_examples/basic_examples/multi_node_ddp2_demo.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@
44
import os
55
from argparse import ArgumentParser
66

7-
from pytorch_lightning import Trainer, seed_everything
87
from pl_examples.models.lightning_template import LightningTemplateModel
8+
from pytorch_lightning import Trainer, seed_everything
99

1010
seed_everything(234)
1111

pl_examples/domain_templates/computer_vision_fine_tuning.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -27,13 +27,10 @@
2727
from tempfile import TemporaryDirectory
2828
from typing import Optional, Generator, Union
2929

30-
from torch.nn import Module
31-
32-
import pytorch_lightning as pl
3330
import torch
3431
import torch.nn.functional as F
35-
from pytorch_lightning import _logger as log
3632
from torch import optim
33+
from torch.nn import Module
3734
from torch.optim.lr_scheduler import MultiStepLR
3835
from torch.optim.optimizer import Optimizer
3936
from torch.utils.data import DataLoader
@@ -42,6 +39,9 @@
4239
from torchvision.datasets import ImageFolder
4340
from torchvision.datasets.utils import download_and_extract_archive
4441

42+
import pytorch_lightning as pl
43+
from pytorch_lightning import _logger as log
44+
4545
BN_TYPES = (torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d)
4646
DATA_URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
4747

pl_examples/domain_templates/imagenet.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
"""
22
This example is largely adapted from https://github.com/pytorch/examples/blob/master/imagenet/main.py
33
"""
4-
from argparse import ArgumentParser, Namespace
54
import os
65
import random
6+
from argparse import ArgumentParser, Namespace
77
from collections import OrderedDict
88

99
import torch

pl_examples/domain_templates/reinforce_learn_Qnet.py

+3-4
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,9 @@
1616
tensorboard --logdir default
1717
"""
1818

19-
import pytorch_lightning as pl
20-
21-
from typing import Tuple, List
22-
2319
import argparse
2420
from collections import OrderedDict, deque, namedtuple
21+
from typing import Tuple, List
2522

2623
import gym
2724
import numpy as np
@@ -32,6 +29,8 @@
3229
from torch.utils.data import DataLoader
3330
from torch.utils.data.dataset import IterableDataset
3431

32+
import pytorch_lightning as pl
33+
3534

3635
class DQN(nn.Module):
3736
"""

pl_examples/domain_templates/semantic_segmentation.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import os
2+
import random
23
from argparse import ArgumentParser, Namespace
34

45
import numpy as np
@@ -7,7 +8,6 @@
78
import torchvision.transforms as transforms
89
from PIL import Image
910
from torch.utils.data import DataLoader, Dataset
10-
import random
1111

1212
import pytorch_lightning as pl
1313
from pl_examples.models.unet import UNet

pl_examples/models/lightning_template.py

-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
from torch.utils.data import DataLoader
1313
from torchvision.datasets import MNIST
1414

15-
from pytorch_lightning import _logger as log
1615
from pytorch_lightning.core import LightningModule
1716

1817

pytorch_lightning/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
"""Root package info."""
22

3-
__version__ = '0.9.0rc9'
3+
__version__ = '0.9.0rc11'
44
__author__ = 'William Falcon et al.'
55
__author_email__ = '[email protected]'
66
__license__ = 'Apache-2.0'

pytorch_lightning/accelerator_backends/__init__.py

-7
This file was deleted.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
from pytorch_lightning.accelerators.cpu_backend import CPUBackend
2+
from pytorch_lightning.accelerators.ddp2_backend import DDP2Backend
3+
from pytorch_lightning.accelerators.ddp_backend import DDPBackend
4+
from pytorch_lightning.accelerators.ddp_spawn_backend import DDPSpawnBackend
5+
from pytorch_lightning.accelerators.dp_backend import DataParallelBackend
6+
from pytorch_lightning.accelerators.gpu_backend import GPUBackend
7+
from pytorch_lightning.accelerators.tpu_backend import TPUBackend

pytorch_lightning/accelerator_backends/cpu_backend.py pytorch_lightning/accelerators/cpu_backend.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ def __init__(self, trainer):
2222

2323
def setup(self, model):
2424
# run through amp wrapper
25-
if self.trainer.use_amp:
25+
if self.trainer.amp_type:
2626
raise MisconfigurationException('amp + cpu is not supported. Please use a GPU option')
2727

2828
# call setup after the ddp process has connected

pytorch_lightning/accelerator_backends/ddp2_backend.py pytorch_lightning/accelerators/ddp2_backend.py

+7-9
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,12 @@
1313
# limitations under the License
1414

1515
import os
16+
1617
import torch
17-
from pytorch_lightning.utilities import NATIVE_AMP_AVALAIBLE
18-
from pytorch_lightning.utilities.distributed import rank_zero_only
18+
1919
from pytorch_lightning import _logger as log
20+
from pytorch_lightning.utilities import AMPType
21+
from pytorch_lightning.utilities.distributed import rank_zero_only
2022
from pytorch_lightning.utilities.exceptions import MisconfigurationException
2123

2224
try:
@@ -30,9 +32,7 @@
3032
try:
3133
from apex import amp
3234
except ImportError:
33-
APEX_AVAILABLE = False
34-
else:
35-
APEX_AVAILABLE = True
35+
amp = None
3636

3737

3838
class DDP2Backend(object):
@@ -133,10 +133,8 @@ def ddp_train(self, process_idx, mp_queue, model, is_master=False, proc_offset=0
133133
# set model properties before going into wrapper
134134
self.trainer.copy_trainer_model_properties(model)
135135

136-
# AMP
137-
# run through amp wrapper before going to distributed DP
138-
# TODO: remove with dropping NVIDIA AMP support
139-
if self.trainer.use_amp and not NATIVE_AMP_AVALAIBLE:
136+
# AMP - run through amp wrapper before going to distributed DP
137+
if self.trainer.amp_type == AMPType.APEX:
140138
model, optimizers = model.configure_apex(amp, model, self.trainer.optimizers, self.trainer.amp_level)
141139
self.trainer.optimizers = optimizers
142140
self.trainer.reinit_scheduler_properties(self.trainer.optimizers, self.trainer.lr_schedulers)

pytorch_lightning/accelerator_backends/ddp_backend.py pytorch_lightning/accelerators/ddp_backend.py

+10-12
Original file line numberDiff line numberDiff line change
@@ -13,16 +13,18 @@
1313
# limitations under the License
1414

1515
import os
16-
import torch
1716
import subprocess
1817
import sys
18+
from os.path import abspath
1919
from time import sleep
20+
from typing import Optional
21+
2022
import numpy as np
21-
from os.path import abspath
22-
from pytorch_lightning.utilities import NATIVE_AMP_AVALAIBLE
23-
from pytorch_lightning.utilities.distributed import rank_zero_only
23+
import torch
24+
2425
from pytorch_lightning import _logger as log
25-
from typing import Optional
26+
from pytorch_lightning.utilities import AMPType
27+
from pytorch_lightning.utilities.distributed import rank_zero_only
2628

2729
try:
2830
from hydra.utils import to_absolute_path, get_original_cwd
@@ -35,9 +37,7 @@
3537
try:
3638
from apex import amp
3739
except ImportError:
38-
APEX_AVAILABLE = False
39-
else:
40-
APEX_AVAILABLE = True
40+
amp = None
4141

4242

4343
class DDPBackend(object):
@@ -200,10 +200,8 @@ def ddp_train(self, process_idx, mp_queue, model, is_master=False, proc_offset=0
200200
# set model properties before going into wrapper
201201
self.trainer.copy_trainer_model_properties(model)
202202

203-
# AMP
204-
# run through amp wrapper before going to distributed DP
205-
# TODO: remove with dropping NVIDIA AMP support
206-
if self.trainer.use_amp and not NATIVE_AMP_AVALAIBLE:
203+
# AMP - run through amp wrapper before going to distributed DP
204+
if self.trainer.amp_type == AMPType.APEX:
207205
model, optimizers = model.configure_apex(amp, model, self.trainer.optimizers, self.trainer.amp_level)
208206
self.trainer.optimizers = optimizers
209207
self.trainer.reinit_scheduler_properties(self.trainer.optimizers, self.trainer.lr_schedulers)

pytorch_lightning/accelerator_backends/ddp_spawn_backend.py pytorch_lightning/accelerators/ddp_spawn_backend.py

+6-9
Original file line numberDiff line numberDiff line change
@@ -12,18 +12,17 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License
1414

15-
import os
1615
import torch
1716
import torch.multiprocessing as mp
18-
from pytorch_lightning.utilities.distributed import rank_zero_only
17+
1918
from pytorch_lightning import _logger as log
19+
from pytorch_lightning.utilities import AMPType
20+
from pytorch_lightning.utilities.distributed import rank_zero_only
2021

2122
try:
2223
from apex import amp
2324
except ImportError:
24-
APEX_AVAILABLE = False
25-
else:
26-
APEX_AVAILABLE = True
25+
amp = None
2726

2827

2928
class DDPSpawnBackend(object):
@@ -133,11 +132,9 @@ def ddp_train(self, process_idx, mp_queue, model):
133132
# set model properties before going into wrapper
134133
self.trainer.copy_trainer_model_properties(model)
135134

136-
# AMP
135+
# AMP -
137136
# run through amp wrapper before going to distributed DP
138-
# TODO: remove with dropping NVIDIA AMP support
139-
native_amp_available = hasattr(torch.cuda, "amp") and hasattr(torch.cuda.amp, "autocast")
140-
if self.trainer.use_amp and not native_amp_available:
137+
if self.trainer.amp_type == AMPType.APEX:
141138
model, optimizers = model.configure_apex(amp, model, self.trainer.optimizers, self.trainer.amp_level)
142139
self.trainer.optimizers = optimizers
143140
self.trainer.reinit_scheduler_properties(self.trainer.optimizers, self.trainer.lr_schedulers)

pytorch_lightning/accelerator_backends/dp_backend.py pytorch_lightning/accelerators/dp_backend.py

+7-9
Original file line numberDiff line numberDiff line change
@@ -13,16 +13,16 @@
1313
# limitations under the License.
1414

1515
import torch
16-
from pytorch_lightning.utilities.exceptions import MisconfigurationException
17-
from pytorch_lightning.overrides.data_parallel import LightningDataParallel
1816
from torch import optim
1917

18+
from pytorch_lightning.overrides.data_parallel import LightningDataParallel
19+
from pytorch_lightning.utilities import AMPType
20+
from pytorch_lightning.utilities.exceptions import MisconfigurationException
21+
2022
try:
2123
from apex import amp
2224
except ImportError:
23-
APEX_AVAILABLE = False
24-
else:
25-
APEX_AVAILABLE = True
25+
amp = None
2626

2727

2828
class DataParallelBackend(object):
@@ -49,7 +49,7 @@ def setup(self, model):
4949
self.model_autocast_original_forward = model.forward
5050

5151
# init half precision
52-
if self.trainer.use_amp:
52+
if self.trainer.amp_type:
5353
model = self.__init_half_precision(model)
5454

5555
# init torch data parallel
@@ -69,9 +69,7 @@ def __init_torch_data_parallel(self, model):
6969
return model
7070

7171
def __init_half_precision(self, model):
72-
native_amp_available = hasattr(torch.cuda, "amp") and hasattr(torch.cuda.amp, "autocast")
73-
74-
if native_amp_available:
72+
if self.trainer.amp_type == AMPType.NATIVE:
7573
self.__init_native_amp(model)
7674
else:
7775
model = self.__init_nvidia_apex(model)

0 commit comments

Comments
 (0)