diff --git a/pl_examples/basic_examples/lightning_module_template.py b/pl_examples/basic_examples/lightning_module_template.py index 07ca119651e06..50fba989ea768 100644 --- a/pl_examples/basic_examples/lightning_module_template.py +++ b/pl_examples/basic_examples/lightning_module_template.py @@ -1,7 +1,7 @@ """ Example template for defining a system """ -import logging +import logging as log import os from argparse import ArgumentParser from collections import OrderedDict @@ -215,17 +215,17 @@ def __dataloader(self, train): @pl.data_loader def train_dataloader(self): - logging.info('training data loader called') + log.info('Training data loader called.') return self.__dataloader(train=True) @pl.data_loader def val_dataloader(self): - logging.info('val data loader called') + log.info('Validation data loader called.') return self.__dataloader(train=False) @pl.data_loader def test_dataloader(self): - logging.info('test data loader called') + log.info('Test data loader called.') return self.__dataloader(train=False) @staticmethod diff --git a/pytorch_lightning/__init__.py b/pytorch_lightning/__init__.py index 49dd9591f0dd1..7c04927285868 100644 --- a/pytorch_lightning/__init__.py +++ b/pytorch_lightning/__init__.py @@ -25,14 +25,15 @@ # We are not importing the rest of the scikit during the build # process, as it may not be compiled yet else: + import logging as log + log.basicConfig(level=log.INFO) + from .trainer.trainer import Trainer from .core.lightning import LightningModule from .core.decorators import data_loader - import logging __all__ = [ 'Trainer', 'LightningModule', 'data_loader', ] - logging.basicConfig(level=logging.INFO) diff --git a/pytorch_lightning/callbacks/pt_callbacks.py b/pytorch_lightning/callbacks/pt_callbacks.py index 125e15e1e42c8..1c54f98a9fb9f 100644 --- a/pytorch_lightning/callbacks/pt_callbacks.py +++ b/pytorch_lightning/callbacks/pt_callbacks.py @@ -7,8 +7,9 @@ import os import shutil -import logging +import logging as log import warnings + import numpy as np from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel @@ -113,7 +114,7 @@ def __init__(self, monitor='val_loss', if mode not in ['auto', 'min', 'max']: if self.verbose > 0: - logging.info(f'EarlyStopping mode {mode} is unknown, fallback to auto mode.') + log.info(f'EarlyStopping mode {mode} is unknown, fallback to auto mode.') mode = 'auto' if mode == 'min': @@ -175,7 +176,7 @@ def on_epoch_end(self, epoch, logs=None): def on_train_end(self, logs=None): if self.stopped_epoch > 0 and self.verbose > 0: - logging.info(f'Epoch {self.stopped_epoch + 1:05d}: early stopping') + log.info(f'Epoch {self.stopped_epoch + 1:05d}: early stopping') class ModelCheckpoint(Callback): @@ -351,7 +352,7 @@ def on_epoch_end(self, epoch, logs=None): else: self.best = max(self.best_k_models.values()) if self.verbose > 0: - logging.info( + log.info( f'\nEpoch {epoch:05d}: {self.monitor} reached' f' {current:0.5f} (best {self.best:0.5f}), saving model to' f' {filepath} as top {self.save_top_k}') @@ -359,13 +360,13 @@ def on_epoch_end(self, epoch, logs=None): else: if self.verbose > 0: - logging.info( + log.info( f'\nEpoch {epoch:05d}: {self.monitor}' f' was not in top {self.save_top_k}') else: if self.verbose > 0: - logging.info(f'\nEpoch {epoch:05d}: saving model to {filepath}') + log.info(f'\nEpoch {epoch:05d}: saving model to {filepath}') self._save_model(filepath) @@ -417,6 +418,6 @@ def on_epoch_begin(self, epoch, trainer): # losses = [10, 9, 8, 8, 6, 4.3, 5, 4.4, 2.8, 2.5] # for i, loss in enumerate(losses): # should_stop = c.on_epoch_end(i, logs={'val_loss': loss}) -# logging.info(loss) +# log.info(loss) # if should_stop: # break diff --git a/pytorch_lightning/core/lightning.py b/pytorch_lightning/core/lightning.py index 20008676543d0..0170b55e36afc 100644 --- a/pytorch_lightning/core/lightning.py +++ b/pytorch_lightning/core/lightning.py @@ -1,10 +1,10 @@ import collections -import logging +import logging as log +import csv import os import warnings from abc import ABC, abstractmethod from argparse import Namespace -import csv import torch import torch.distributed as dist @@ -1130,7 +1130,7 @@ def __init__(self, hparams): def summarize(self, mode): model_summary = ModelSummary(self, mode=mode) - logging.info('\n' + model_summary.__str__()) + log.info('\n' + model_summary.__str__()) def freeze(self): r""" @@ -1213,7 +1213,7 @@ def on_save_checkpoint(self, checkpoint): def load_hparams_from_tags_csv(tags_csv): if not os.path.isfile(tags_csv): - logging.warning(f'Missing Tags: {tags_csv}.') + log.warning(f'Missing Tags: {tags_csv}.') return Namespace() tags = {} diff --git a/pytorch_lightning/core/memory.py b/pytorch_lightning/core/memory.py index 269282d5529c2..a1ae908d36782 100644 --- a/pytorch_lightning/core/memory.py +++ b/pytorch_lightning/core/memory.py @@ -3,7 +3,7 @@ ''' import gc -import logging +import logging as log import os import subprocess from subprocess import PIPE @@ -214,7 +214,7 @@ def print_mem_stack(): # pragma: no cover for obj in gc.get_objects(): try: if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)): - logging.info(type(obj), obj.size()) + log.info(type(obj), obj.size()) except Exception: pass diff --git a/pytorch_lightning/loggers/__init__.py b/pytorch_lightning/loggers/__init__.py new file mode 100644 index 0000000000000..23c4b722229c0 --- /dev/null +++ b/pytorch_lightning/loggers/__init__.py @@ -0,0 +1,113 @@ +""" +Lightning supports most popular logging frameworks (Tensorboard, comet, weights and biases, etc...). +To use a logger, simply pass it into the trainer. + +.. code-block:: python + + from pytorch_lightning import loggers + + # lightning uses tensorboard by default + tb_logger = loggers.TensorBoardLogger() + trainer = Trainer(logger=tb_logger) + + # or choose from any of the others such as MLFlow, Comet, Neptune, Wandb + comet_logger = loggers.CometLogger() + trainer = Trainer(logger=comet_logger) + +.. note:: All loggers log by default to `os.getcwd()`. To change the path without creating a logger set + Trainer(default_save_path='/your/path/to/save/checkpoints') + +Custom logger +------------- + +You can implement your own logger by writing a class that inherits from +`LightningLoggerBase`. Use the `rank_zero_only` decorator to make sure that +only the first process in DDP training logs data. + +.. code-block:: python + + from pytorch_lightning.loggers import LightningLoggerBase, rank_zero_only + + class MyLogger(LightningLoggerBase): + + @rank_zero_only + def log_hyperparams(self, params): + # params is an argparse.Namespace + # your code to record hyperparameters goes here + pass + + @rank_zero_only + def log_metrics(self, metrics, step): + # metrics is a dictionary of metric names and values + # your code to record metrics goes here + pass + + def save(self): + # Optional. Any code necessary to save logger data goes here + pass + + @rank_zero_only + def finalize(self, status): + # Optional. Any code that needs to be run after training + # finishes goes here + + +If you write a logger than may be useful to others, please send +a pull request to add it to Lighting! + +Using loggers +------------- + +Call the logger anywhere from your LightningModule by doing: + +.. code-block:: python + + def train_step(...): + # example + self.logger.experiment.whatever_method_summary_writer_supports(...) + + def any_lightning_module_function_or_hook(...): + self.logger.experiment.add_histogram(...) + +Supported Loggers +----------------- +""" +from os import environ + +from .base import LightningLoggerBase, rank_zero_only +from .tensorboard import TensorBoardLogger + +__all__ = ['TensorBoardLogger'] + +try: + # needed to prevent ImportError and duplicated logs. + environ["COMET_DISABLE_AUTO_LOGGING"] = "1" + + from .comet import CometLogger + __all__.append('CometLogger') +except ImportError: + del environ["COMET_DISABLE_AUTO_LOGGING"] + +try: + from .mlflow import MLFlowLogger + __all__.append('MLFlowLogger') +except ImportError: + pass + +try: + from .neptune import NeptuneLogger + __all__.append('NeptuneLogger') +except ImportError: + pass + +try: + from .test_tube import TestTubeLogger + __all__.append('TestTubeLogger') +except ImportError: + pass + +try: + from .wandb import WandbLogger + __all__.append('WandbLogger') +except ImportError: + pass diff --git a/pytorch_lightning/logging/base.py b/pytorch_lightning/loggers/base.py similarity index 100% rename from pytorch_lightning/logging/base.py rename to pytorch_lightning/loggers/base.py diff --git a/pytorch_lightning/logging/comet.py b/pytorch_lightning/loggers/comet.py similarity index 97% rename from pytorch_lightning/logging/comet.py rename to pytorch_lightning/loggers/comet.py index fbf4f839cda7a..1b5950c15226b 100644 --- a/pytorch_lightning/logging/comet.py +++ b/pytorch_lightning/loggers/comet.py @@ -31,7 +31,7 @@ def __init__(self, api_key=None, save_dir=None, workspace=None, .. code-block:: python # ONLINE MODE - from pytorch_lightning.logging import CometLogger + from pytorch_lightning.loggers import CometLogger # arguments made to CometLogger are passed on to the comet_ml.Experiment class comet_logger = CometLogger( @@ -47,7 +47,7 @@ def __init__(self, api_key=None, save_dir=None, workspace=None, .. code-block:: python # OFFLINE MODE - from pytorch_lightning.logging import CometLogger + from pytorch_lightning.loggers import CometLogger # arguments made to CometLogger are passed on to the comet_ml.Experiment class comet_logger = CometLogger( diff --git a/pytorch_lightning/logging/comet_logger.py b/pytorch_lightning/loggers/comet_logger.py similarity index 81% rename from pytorch_lightning/logging/comet_logger.py rename to pytorch_lightning/loggers/comet_logger.py index 4ea0c0714bec9..a5c5a78680031 100644 --- a/pytorch_lightning/logging/comet_logger.py +++ b/pytorch_lightning/loggers/comet_logger.py @@ -7,4 +7,4 @@ warnings.warn("`comet_logger` module has been renamed to `comet` since v0.6.0" " and will be removed in v0.8.0", DeprecationWarning) -from pytorch_lightning.logging.comet import CometLogger # noqa: E402 +from pytorch_lightning.loggers.comet import CometLogger # noqa: E402 diff --git a/pytorch_lightning/logging/mlflow.py b/pytorch_lightning/loggers/mlflow.py similarity index 98% rename from pytorch_lightning/logging/mlflow.py rename to pytorch_lightning/loggers/mlflow.py index 50f4843e0f6c9..652e32f675aae 100644 --- a/pytorch_lightning/logging/mlflow.py +++ b/pytorch_lightning/loggers/mlflow.py @@ -3,7 +3,7 @@ .. code-block:: python - from pytorch_lightning.logging import MLFlowLogger + from pytorch_lightning.loggers import MLFlowLogger mlf_logger = MLFlowLogger( experiment_name="default", tracking_uri="file:/." diff --git a/pytorch_lightning/logging/mlflow_logger.py b/pytorch_lightning/loggers/mlflow_logger.py similarity index 82% rename from pytorch_lightning/logging/mlflow_logger.py rename to pytorch_lightning/loggers/mlflow_logger.py index aa56f5ff7f68d..30993374ee9a1 100644 --- a/pytorch_lightning/logging/mlflow_logger.py +++ b/pytorch_lightning/loggers/mlflow_logger.py @@ -7,4 +7,4 @@ warnings.warn("`mlflow_logger` module has been renamed to `mlflow` since v0.6.0" " and will be removed in v0.8.0", DeprecationWarning) -from pytorch_lightning.logging.mlflow import MLFlowLogger # noqa: E402 +from pytorch_lightning.loggers.mlflow import MLFlowLogger # noqa: E402 diff --git a/pytorch_lightning/logging/neptune.py b/pytorch_lightning/loggers/neptune.py similarity index 97% rename from pytorch_lightning/logging/neptune.py rename to pytorch_lightning/loggers/neptune.py index 7c677962df70e..066ad9156d3b2 100644 --- a/pytorch_lightning/logging/neptune.py +++ b/pytorch_lightning/loggers/neptune.py @@ -6,7 +6,7 @@ .. code-block:: python - from pytorch_lightning.logging import NeptuneLogger + from pytorch_lightning.loggers import NeptuneLogger # arguments made to NeptuneLogger are passed on to the neptune.experiments.Experiment class neptune_logger = NeptuneLogger( @@ -48,7 +48,7 @@ def any_lightning_module_function_or_hook(...): from torch import is_tensor # from .base import LightningLoggerBase, rank_zero_only -from pytorch_lightning.logging.base import LightningLoggerBase, rank_zero_only +from pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_only logger = getLogger(__name__) @@ -66,7 +66,7 @@ def __init__(self, api_key=None, project_name=None, offline_mode=False, .. code-block:: python # ONLINE MODE - from pytorch_lightning.logging import NeptuneLogger + from pytorch_lightning.loggers import NeptuneLogger # arguments made to NeptuneLogger are passed on to the neptune.experiments.Experiment class neptune_logger = NeptuneLogger( @@ -81,7 +81,7 @@ def __init__(self, api_key=None, project_name=None, offline_mode=False, .. code-block:: python # OFFLINE MODE - from pytorch_lightning.logging import NeptuneLogger + from pytorch_lightning.loggers import NeptuneLogger # arguments made to NeptuneLogger are passed on to the neptune.experiments.Experiment class neptune_logger = NeptuneLogger( diff --git a/pytorch_lightning/logging/tensorboard.py b/pytorch_lightning/loggers/tensorboard.py similarity index 100% rename from pytorch_lightning/logging/tensorboard.py rename to pytorch_lightning/loggers/tensorboard.py diff --git a/pytorch_lightning/logging/test_tube.py b/pytorch_lightning/loggers/test_tube.py similarity index 98% rename from pytorch_lightning/logging/test_tube.py rename to pytorch_lightning/loggers/test_tube.py index 10e14a502a37b..30509c7d3cf63 100644 --- a/pytorch_lightning/logging/test_tube.py +++ b/pytorch_lightning/loggers/test_tube.py @@ -6,7 +6,7 @@ .. code-block:: python - from pytorch_lightning.logging import TestTubeLogger + from pytorch_lightning.loggers import TestTubeLogger tt_logger = TestTubeLogger( save_dir=".", name="default", diff --git a/pytorch_lightning/logging/test_tube_logger.py b/pytorch_lightning/loggers/test_tube_logger.py similarity index 82% rename from pytorch_lightning/logging/test_tube_logger.py rename to pytorch_lightning/loggers/test_tube_logger.py index b76d526de92c1..bc531c0560643 100644 --- a/pytorch_lightning/logging/test_tube_logger.py +++ b/pytorch_lightning/loggers/test_tube_logger.py @@ -7,4 +7,4 @@ warnings.warn("`test_tube_logger` module has been renamed to `test_tube` since v0.6.0" " and will be removed in v0.8.0", DeprecationWarning) -from pytorch_lightning.logging.test_tube import TestTubeLogger # noqa: E402 +from pytorch_lightning.loggers.test_tube import TestTubeLogger # noqa: E402 diff --git a/pytorch_lightning/logging/wandb.py b/pytorch_lightning/loggers/wandb.py similarity index 98% rename from pytorch_lightning/logging/wandb.py rename to pytorch_lightning/loggers/wandb.py index 6e4307492623a..123b7544c9768 100644 --- a/pytorch_lightning/logging/wandb.py +++ b/pytorch_lightning/loggers/wandb.py @@ -25,7 +25,7 @@ class WandbLogger(LightningLoggerBase): -------- .. code-block:: python - from pytorch_lightning.logging import WandbLogger + from pytorch_lightning.loggers import WandbLogger from pytorch_lightning import Trainer wandb_logger = WandbLogger() diff --git a/pytorch_lightning/logging/__init__.py b/pytorch_lightning/logging/__init__.py index 4850494f338bc..93515eb1eff31 100644 --- a/pytorch_lightning/logging/__init__.py +++ b/pytorch_lightning/logging/__init__.py @@ -1,115 +1,13 @@ """ -Lightning supports most popular logging frameworks (Tensorboard, comet, weights and biases, etc...). -To use a logger, simply pass it into the trainer. - -.. code-block:: python - - from pytorch_lightning import logging - - # lightning uses tensorboard by default - tb_logger = logging.TensorBoardLogger() - trainer = Trainer(logger=tb_logger) - - # or choose from any of the others such as MLFlow, Comet, Neptune, Wandb - comet_logger = logging.CometLogger() - trainer = Trainer(logger=comet_logger) - -.. note:: All loggers log by default to `os.getcwd()`. To change the path without creating a logger set - Trainer(default_save_path='/your/path/to/save/checkpoints') - -Custom logger -------------- - -You can implement your own logger by writing a class that inherits from -`LightningLoggerBase`. Use the `rank_zero_only` decorator to make sure that -only the first process in DDP training logs data. - -.. code-block:: python - - from pytorch_lightning.logging import LightningLoggerBase, rank_zero_only - - class MyLogger(LightningLoggerBase): - - @rank_zero_only - def log_hyperparams(self, params): - # params is an argparse.Namespace - # your code to record hyperparameters goes here - pass - - @rank_zero_only - def log_metrics(self, metrics, step): - # metrics is a dictionary of metric names and values - # your code to record metrics goes here - pass - - def save(self): - # Optional. Any code necessary to save logger data goes here - pass - - @rank_zero_only - def finalize(self, status): - # Optional. Any code that needs to be run after training - # finishes goes here - - -If you write a logger than may be useful to others, please send -a pull request to add it to Lighting! - -Using loggers -------------- - -Call the logger anywhere from your LightningModule by doing: - -.. code-block:: python - - def train_step(...): - # example - self.logger.experiment.whatever_method_summary_writer_supports(...) - - def any_lightning_module_function_or_hook(...): - self.logger.experiment.add_histogram(...) - -Supported Loggers ------------------ +.. warning:: `logging` package has been renamed to `loggers` since v0.6.1 and will be removed in v0.8.0 """ -from os import environ - -from .base import LightningLoggerBase, rank_zero_only -from .tensorboard import TensorBoardLogger - -loggers = ['TensorBoardLogger'] - -try: - # needed to prevent ImportError and duplicated logs. - environ["COMET_DISABLE_AUTO_LOGGING"] = "1" - - from .comet import CometLogger - loggers.append('CometLogger') -except ImportError: - del environ["COMET_DISABLE_AUTO_LOGGING"] - -try: - from .mlflow import MLFlowLogger - loggers.append('MLFlowLogger') -except ImportError: - pass - -try: - from .neptune import NeptuneLogger - loggers.append('NeptuneLogger') -except ImportError: - pass -try: - from .test_tube import TestTubeLogger - loggers.append('TestTubeLogger') -except ImportError: - pass +import warnings -try: - from .wandb import WandbLogger - loggers.append('WandbLogger') -except ImportError: - pass +warnings.warn("`logging` package has been renamed to `loggers` since v0.6.1" + " and will be removed in v0.8.0", DeprecationWarning) -__all__ = loggers +from pytorch_lightning.loggers import * # noqa: F403 +from pytorch_lightning.loggers import ( # noqa: E402 + base, comet, mlflow, neptune, tensorboard, test_tube, wandb +) diff --git a/pytorch_lightning/trainer/auto_mix_precision.py b/pytorch_lightning/trainer/auto_mix_precision.py index 2915f2465fbb9..bd6edd9310116 100644 --- a/pytorch_lightning/trainer/auto_mix_precision.py +++ b/pytorch_lightning/trainer/auto_mix_precision.py @@ -7,7 +7,7 @@ APEX_AVAILABLE = True except ImportError: APEX_AVAILABLE = False -import logging +import logging as log class TrainerAMPMixin(ABC): @@ -15,7 +15,7 @@ class TrainerAMPMixin(ABC): def init_amp(self, use_amp): self.use_amp = use_amp and APEX_AVAILABLE if self.use_amp: - logging.info('using 16bit precision') + log.info('Using 16bit precision.') if use_amp and not APEX_AVAILABLE: # pragma: no cover msg = """ diff --git a/pytorch_lightning/trainer/distrib_data_parallel.py b/pytorch_lightning/trainer/distrib_data_parallel.py index 0561a6d40e3ff..b66981b995450 100644 --- a/pytorch_lightning/trainer/distrib_data_parallel.py +++ b/pytorch_lightning/trainer/distrib_data_parallel.py @@ -113,7 +113,7 @@ def train_fx(trial_hparams, cluster_manager, _): """ -import logging +import logging as log import os import re import warnings @@ -205,7 +205,7 @@ def set_distributed_mode(self, distributed_backend, num_gpu_nodes): 'or distributed_backend=ddp2' raise MisconfigurationException(w) - logging.info(f'gpu available: {torch.cuda.is_available()}, used: {self.on_gpu}') + log.info(f'GPU available: {torch.cuda.is_available()}, used: {self.on_gpu}') def configure_slurm_ddp(self, num_gpu_nodes): self.is_slurm_managing_tasks = False @@ -253,7 +253,7 @@ def set_nvidia_flags(self, is_slurm_managing_tasks, data_parallel_device_ids): gpu_str = ','.join([str(x) for x in data_parallel_device_ids]) os.environ["CUDA_VISIBLE_DEVICES"] = gpu_str - logging.info(f'VISIBLE GPUS: {os.environ["CUDA_VISIBLE_DEVICES"]}') + log.info(f'VISIBLE GPUS: {os.environ["CUDA_VISIBLE_DEVICES"]}') def ddp_train(self, gpu_idx, model): """ diff --git a/pytorch_lightning/trainer/logging.py b/pytorch_lightning/trainer/logging.py index adc6ec5d9652c..9ff5c3db85076 100644 --- a/pytorch_lightning/trainer/logging.py +++ b/pytorch_lightning/trainer/logging.py @@ -3,7 +3,7 @@ import torch from pytorch_lightning.core import memory -from pytorch_lightning.logging import TensorBoardLogger +from pytorch_lightning.loggers import TensorBoardLogger class TrainerLoggingMixin(ABC): diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index e1c1464c73cf8..5794ba617c99f 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -1,8 +1,7 @@ import os import sys import warnings -import logging - +import logging as log import torch import torch.distributed as dist @@ -97,7 +96,7 @@ def __init__( logger (:class:`.Logger`): Logger for experiment tracking. Example:: - from pytorch_lightning.logging import TensorBoardLogger + from pytorch_lightning.loggers import TensorBoardLogger # default logger used by trainer logger = TensorBoardLogger( @@ -526,7 +525,7 @@ def __init__( Running in fast_dev_run mode: will run a full train, val loop using a single batch ''' - logging.info(m) + log.info(m) # set default save path if user didn't provide one self.default_save_path = default_save_path diff --git a/pytorch_lightning/trainer/training_io.py b/pytorch_lightning/trainer/training_io.py index 3c489132c739c..2ee77ada4c5c4 100644 --- a/pytorch_lightning/trainer/training_io.py +++ b/pytorch_lightning/trainer/training_io.py @@ -39,7 +39,7 @@ .. code-block:: python from pytorch_lightning import Trainer - from pytorch_lightning.logging import TestTubeLogger + from pytorch_lightning.loggers import TestTubeLogger logger = TestTubeLogger( save_dir='./savepath', @@ -89,7 +89,7 @@ """ -import logging +import logging as log import os import re import signal @@ -203,7 +203,7 @@ def restore_state_if_checkpoint_exists(self, model): if last_ckpt_name is not None: last_ckpt_path = os.path.join(self.checkpoint_callback.filepath, last_ckpt_name) self.restore(last_ckpt_path, self.on_gpu) - logging.info(f'model and trainer restored from checkpoint: {last_ckpt_path}') + log.info(f'Model and Trainer restored from checkpoint: {last_ckpt_path}') did_restore = True return did_restore @@ -222,14 +222,14 @@ def register_slurm_signal_handlers(self): pass if on_slurm: - logging.info('set slurm handle signals') + log.info('Set SLURM handle signals.') signal.signal(signal.SIGUSR1, self.sig_handler) signal.signal(signal.SIGTERM, self.term_handler) def sig_handler(self, signum, frame): if self.proc_rank == 0: # save weights - logging.info('handling SIGUSR1') + log.info('handling SIGUSR1') self.hpc_save(self.weights_save_path, self.logger) # find job id @@ -237,21 +237,21 @@ def sig_handler(self, signum, frame): cmd = 'scontrol requeue {}'.format(job_id) # requeue job - logging.info('\nrequeing job {job_id}...') + log.info(f'requeing job {job_id}...') result = call(cmd, shell=True) # print result text if result == 0: - logging.info('requeued exp {job_id}') + log.info(f'requeued exp {job_id}') else: - logging.info('requeue failed...') + log.info('requeue failed...') # close experiment to avoid issues self.logger.close() def term_handler(self, signum, frame): # save - logging.info("bypassing sigterm") + log.info("bypassing sigterm") # -------------------- # MODEL SAVE CHECKPOINT @@ -461,7 +461,7 @@ def hpc_load(self, folderpath, on_gpu): # call model hook model.on_hpc_load(checkpoint) - logging.info(f'restored hpc model from: {filepath}') + log.info(f'restored hpc model from: {filepath}') def max_ckpt_in_folder(self, path, name_key='ckpt_'): files = os.listdir(path) diff --git a/pytorch_lightning/trainer/training_tricks.py b/pytorch_lightning/trainer/training_tricks.py index 227ef245de47e..7fa4059afc3e2 100644 --- a/pytorch_lightning/trainer/training_tricks.py +++ b/pytorch_lightning/trainer/training_tricks.py @@ -1,4 +1,4 @@ -import logging +import logging as log from abc import ABC, abstractmethod import torch @@ -27,7 +27,7 @@ def print_nan_gradients(self): model = self.get_model() for param in model.parameters(): if (param.grad is not None) and torch.isnan(param.grad.float()).any(): - logging.info(param, param.grad) + log.info(param, param.grad) def configure_accumulated_gradients(self, accumulate_grad_batches): self.accumulate_grad_batches = None diff --git a/tests/test_logging.py b/tests/test_logging.py index a91f6087d9cb2..1b531420c8541 100644 --- a/tests/test_logging.py +++ b/tests/test_logging.py @@ -6,10 +6,14 @@ import tests.utils as tutils from pytorch_lightning import Trainer -from pytorch_lightning.logging import ( +from pytorch_lightning.loggers import ( LightningLoggerBase, rank_zero_only, TensorBoardLogger, + MLFlowLogger, + CometLogger, + WandbLogger, + NeptuneLogger ) from pytorch_lightning.testing import LightningTestModel @@ -63,11 +67,6 @@ def test_mlflow_logger(tmpdir): """Verify that basic functionality of mlflow logger works.""" tutils.reset_seed() - try: - from pytorch_lightning.logging import MLFlowLogger - except ModuleNotFoundError: - return - hparams = tutils.get_hparams() model = LightningTestModel(hparams) @@ -91,11 +90,6 @@ def test_mlflow_pickle(tmpdir): """Verify that pickling trainer with mlflow logger works.""" tutils.reset_seed() - try: - from pytorch_lightning.logging import MLFlowLogger - except ModuleNotFoundError: - return - # hparams = tutils.get_hparams() # model = LightningTestModel(hparams) @@ -123,11 +117,6 @@ def test_comet_logger(tmpdir, monkeypatch): tutils.reset_seed() - try: - from pytorch_lightning.logging import CometLogger - except ModuleNotFoundError: - return - hparams = tutils.get_hparams() model = LightningTestModel(hparams) @@ -164,11 +153,6 @@ def test_comet_pickle(tmpdir, monkeypatch): tutils.reset_seed() - try: - from pytorch_lightning.logging import CometLogger - except ModuleNotFoundError: - return - # hparams = tutils.get_hparams() # model = LightningTestModel(hparams) @@ -197,18 +181,14 @@ def test_wandb_logger(tmpdir): """Verify that basic functionality of wandb logger works.""" tutils.reset_seed() - from pytorch_lightning.logging import WandbLogger - wandb_dir = os.path.join(tmpdir, "wandb") - logger = WandbLogger(save_dir=wandb_dir, anonymous=True) + _ = WandbLogger(save_dir=wandb_dir, anonymous=True) def test_neptune_logger(tmpdir): """Verify that basic functionality of neptune logger works.""" tutils.reset_seed() - from pytorch_lightning.logging import NeptuneLogger - hparams = tutils.get_hparams() model = LightningTestModel(hparams) logger = NeptuneLogger(offline_mode=True) @@ -230,7 +210,6 @@ def test_wandb_pickle(tmpdir): """Verify that pickling trainer with wandb logger works.""" tutils.reset_seed() - from pytorch_lightning.logging import WandbLogger wandb_dir = str(tmpdir) logger = WandbLogger(save_dir=wandb_dir, anonymous=True) assert logger is not None @@ -240,8 +219,6 @@ def test_neptune_pickle(tmpdir): """Verify that pickling trainer with neptune logger works.""" tutils.reset_seed() - from pytorch_lightning.logging import NeptuneLogger - # hparams = tutils.get_hparams() # model = LightningTestModel(hparams) diff --git a/tests/test_restore_models.py b/tests/test_restore_models.py index 498b312913380..349e649e94592 100644 --- a/tests/test_restore_models.py +++ b/tests/test_restore_models.py @@ -1,4 +1,4 @@ -import logging +import logging as log import os import torch @@ -41,7 +41,7 @@ def test_running_test_pretrained_model_ddp(tmpdir): trainer = Trainer(**trainer_options) result = trainer.fit(model) - logging.info(os.listdir(tutils.get_data_path(logger, path_dir=tmpdir))) + log.info(os.listdir(tutils.get_data_path(logger, path_dir=tmpdir))) # correct result and ok accuracy assert result == 1, 'training failed to complete' diff --git a/tests/utils.py b/tests/utils.py index 70ecccb28d7a0..e3f84567dac2c 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -8,7 +8,7 @@ from pl_examples import LightningTemplateModel from pytorch_lightning import Trainer from pytorch_lightning.callbacks import ModelCheckpoint -from pytorch_lightning.logging import TestTubeLogger, TensorBoardLogger +from pytorch_lightning.loggers import TestTubeLogger, TensorBoardLogger from pytorch_lightning.testing import LightningTestModel # generate a list of random seeds for each test