diff --git a/CHANGELOG.md b/CHANGELOG.md index 6fa9ce4f0a9a2..9d7af014edecb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Changed `progress_bar_refresh_rate` trainer flag to disable progress bar when set to 0. ([#1108](https://github.com/PyTorchLightning/pytorch-lightning/pull/1108)) - Enhanced `load_from_checkpoint` to also forward params to the model ([#1307](https://github.com/PyTorchLightning/pytorch-lightning/pull/1307)) - Updated references to self.forward() to instead use the `__call__` interface. ([#1211](https://github.com/PyTorchLightning/pytorch-lightning/pull/1211)) - Added option to run without an optimizer by returning `None` from `configure_optimizers`. ([#1279](https://github.com/PyTorchLightning/pytorch-lightning/pull/1279)) @@ -42,6 +43,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Deprecated - Deprecated Trainer argument `print_nan_grads` ([#1097](https://github.com/PyTorchLightning/pytorch-lightning/pull/1097)) +- Deprecated Trainer argument `show_progress_bar` ([#1108](https://github.com/PyTorchLightning/pytorch-lightning/pull/1108)) ### Removed @@ -70,9 +72,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added -- Added automatic sampler setup. Depending on DDP or TPU, lightning configures the sampler correctly (user needs to do nothing) ([#926](https://github.com/PyTorchLightning/pytorch-lightning/pull/926)) -- Added `reload_dataloaders_every_epoch=False` flag for trainer. Some users require reloading data every epoch ([#926](https://github.com/PyTorchLightning/pytorch-lightning/pull/926)) -- Added `progress_bar_refresh_rate=50` flag for trainer. Throttle refresh rate on notebooks ([#926](https://github.com/PyTorchLightning/pytorch-lightning/pull/926)) +- Added automatic sampler setup. Depending on DDP or TPU, lightning configures the sampler correctly (user needs to do nothing) ([#926](https://github.com/PyTorchLightning/pytorch-lightning/pull/926)) +- Added `reload_dataloaders_every_epoch=False` flag for trainer. Some users require reloading data every epoch ([#926](https://github.com/PyTorchLightning/pytorch-lightning/pull/926)) +- Added `progress_bar_refresh_rate=50` flag for trainer. Throttle refresh rate on notebooks ([#926](https://github.com/PyTorchLightning/pytorch-lightning/pull/926)) - Updated governance docs - Added a check to ensure that the metric used for early stopping exists before training commences ([#542](https://github.com/PyTorchLightning/pytorch-lightning/pull/542)) - Added `optimizer_idx` argument to `backward` hook ([#733](https://github.com/PyTorchLightning/pytorch-lightning/pull/733)) @@ -95,7 +97,6 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added TPU gradient clipping ([#963](https://github.com/PyTorchLightning/pytorch-lightning/pull/963)) - Added max/min number of steps in `Trainer` ([#728](https://github.com/PyTorchLightning/pytorch-lightning/pull/728)) - ### Changed - Improved `NeptuneLogger` by adding `close_after_fit` argument to allow logging after training([#908](https://github.com/PyTorchLightning/pytorch-lightning/pull/1084)) @@ -107,9 +108,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Freezed models `hparams` as `Namespace` property ([#1029](https://github.com/PyTorchLightning/pytorch-lightning/pull/1029)) - Dropped `logging` config in package init ([#1015](https://github.com/PyTorchLightning/pytorch-lightning/pull/1015)) - Renames model steps ([#1051](https://github.com/PyTorchLightning/pytorch-lightning/pull/1051)) - * `training_end` >> `training_epoch_end` - * `validation_end` >> `validation_epoch_end` - * `test_end` >> `test_epoch_end` + - `training_end` >> `training_epoch_end` + - `validation_end` >> `validation_epoch_end` + - `test_end` >> `test_epoch_end` - Refactor dataloading, supports infinite dataloader ([#955](https://github.com/PyTorchLightning/pytorch-lightning/pull/955)) - Create single file in `TensorBoardLogger` ([#777](https://github.com/PyTorchLightning/pytorch-lightning/pull/777)) @@ -117,7 +118,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Deprecated `pytorch_lightning.logging` ([#767](https://github.com/PyTorchLightning/pytorch-lightning/pull/767)) - Deprecated `LightningModule.load_from_metrics` in favour of `LightningModule.load_from_checkpoint` ([#995](https://github.com/PyTorchLightning/pytorch-lightning/pull/995), [#1079](https://github.com/PyTorchLightning/pytorch-lightning/pull/1079)) -- Deprecated `@data_loader` decorator ([#926](https://github.com/PyTorchLightning/pytorch-lightning/pull/926)) +- Deprecated `@data_loader` decorator ([#926](https://github.com/PyTorchLightning/pytorch-lightning/pull/926)) - Deprecated model steps `training_end`, `validation_end` and `test_end` ([#1051](https://github.com/PyTorchLightning/pytorch-lightning/pull/1051), [#1056](https://github.com/PyTorchLightning/pytorch-lightning/pull/1056)) ### Removed @@ -307,9 +308,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added -- Added the flag `log_gpu_memory` to `Trainer` to deactivate logging of GPU -memory utilization -- Added SLURM resubmit functionality (port from test-tube) +- Added the flag `log_gpu_memory` to `Trainer` to deactivate logging of GPU memory utilization - Added optional weight_save_path to trainer to remove the need for a checkpoint_callback when using cluster training - Added option to use single gpu per node with `DistributedDataParallel` diff --git a/pytorch_lightning/trainer/__init__.py b/pytorch_lightning/trainer/__init__.py index 5c75bfc414073..395a21c955d61 100644 --- a/pytorch_lightning/trainer/__init__.py +++ b/pytorch_lightning/trainer/__init__.py @@ -646,6 +646,8 @@ def on_train_end(self): # default used by the Trainer trainer = Trainer(progress_bar_refresh_rate=1) + # disable progress bar + trainer = Trainer(progress_bar_refresh_rate=0) reload_dataloaders_every_epoch ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -702,12 +704,9 @@ def on_train_end(self): show_progress_bar ^^^^^^^^^^^^^^^^^ -If true shows tqdm progress bar +.. warning:: .. deprecated:: 0.7.2 -Example:: - - # default used by the Trainer - trainer = Trainer(show_progress_bar=True) + Set `progress_bar_refresh_rate` to 0 instead. Will remove 0.9.0. test_percent_check ^^^^^^^^^^^^^^^^^^ diff --git a/pytorch_lightning/trainer/deprecated_api.py b/pytorch_lightning/trainer/deprecated_api.py index 8c4ca8648b5bb..4cf556acd0eba 100644 --- a/pytorch_lightning/trainer/deprecated_api.py +++ b/pytorch_lightning/trainer/deprecated_api.py @@ -87,3 +87,22 @@ def nb_sanity_val_steps(self, nb): "`num_sanity_val_steps` since v0.5.0" " and this method will be removed in v0.8.0", DeprecationWarning) self.num_sanity_val_steps = nb + + +class TrainerDeprecatedAPITillVer0_9(ABC): + + def __init__(self): + super().__init__() # mixin calls super too + + @property + def show_progress_bar(self): + """Back compatibility, will be removed in v0.9.0""" + warnings.warn("Argument `show_progress_bar` is now set by `progress_bar_refresh_rate` since v0.7.2" + " and this method will be removed in v0.9.0", DeprecationWarning) + return self.progress_bar_refresh_rate >= 1 + + @show_progress_bar.setter + def show_progress_bar(self, tf): + """Back compatibility, will be removed in v0.9.0""" + warnings.warn("Argument `show_progress_bar` is now set by `progress_bar_refresh_rate` since v0.7.2" + " and this method will be removed in v0.9.0", DeprecationWarning) diff --git a/pytorch_lightning/trainer/distrib_data_parallel.py b/pytorch_lightning/trainer/distrib_data_parallel.py index dc3625e356865..7a7f73bea3d70 100644 --- a/pytorch_lightning/trainer/distrib_data_parallel.py +++ b/pytorch_lightning/trainer/distrib_data_parallel.py @@ -281,7 +281,7 @@ def ddp_train(self, gpu_idx, model): self.node_rank = 0 # show progressbar only on progress_rank 0 - self.show_progress_bar = self.show_progress_bar and self.node_rank == 0 and gpu_idx == 0 + self.progress_bar_refresh_rate = self.progress_bar_refresh_rate if self.node_rank == 0 and gpu_idx == 0 else 0 # determine which process we are and world size if self.use_ddp: diff --git a/pytorch_lightning/trainer/distrib_parts.py b/pytorch_lightning/trainer/distrib_parts.py index 43e4df038d79c..d5217c895185a 100644 --- a/pytorch_lightning/trainer/distrib_parts.py +++ b/pytorch_lightning/trainer/distrib_parts.py @@ -480,7 +480,7 @@ def tpu_train(self, tpu_core_idx, model): self.tpu_global_core_rank = xm.get_ordinal() # avoid duplicating progress bar - self.show_progress_bar = self.show_progress_bar and self.tpu_global_core_rank == 0 + self.progress_bar_refresh_rate = self.progress_bar_refresh_rate if self.tpu_global_core_rank == 0 else 0 # track current tpu self.current_tpu_idx = tpu_core_idx diff --git a/pytorch_lightning/trainer/evaluation_loop.py b/pytorch_lightning/trainer/evaluation_loop.py index bc18958891a3c..7c4f4f238852c 100644 --- a/pytorch_lightning/trainer/evaluation_loop.py +++ b/pytorch_lightning/trainer/evaluation_loop.py @@ -163,7 +163,6 @@ class TrainerEvaluationLoopMixin(ABC): num_val_batches: int fast_dev_run: ... process_position: ... - show_progress_bar: ... process_output: ... training_tqdm_dict: ... proc_rank: int @@ -278,7 +277,7 @@ def _evaluate(self, model: LightningModule, dataloaders, max_batches: int, test_ dl_outputs.append(output) # batch done - if batch_idx % self.progress_bar_refresh_rate == 0: + if self.progress_bar_refresh_rate >= 1 and batch_idx % self.progress_bar_refresh_rate == 0: if test_mode: self.test_progress_bar.update(self.progress_bar_refresh_rate) else: @@ -361,7 +360,7 @@ def run_evaluation(self, test_mode: bool = False): desc = 'Testing' if test_mode else 'Validating' total = max_batches if max_batches != float('inf') else None pbar = tqdm(desc=desc, total=total, leave=test_mode, position=position, - disable=not self.show_progress_bar, dynamic_ncols=True, file=sys.stdout) + disable=not self.progress_bar_refresh_rate, dynamic_ncols=True, file=sys.stdout) setattr(self, f'{"test" if test_mode else "val"}_progress_bar', pbar) # run evaluation diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 005512d213d74..4a3e662fd71a0 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -21,7 +21,8 @@ from pytorch_lightning.trainer.callback_config import TrainerCallbackConfigMixin from pytorch_lightning.trainer.callback_hook import TrainerCallbackHookMixin from pytorch_lightning.trainer.data_loading import TrainerDataLoadingMixin -from pytorch_lightning.trainer.deprecated_api import TrainerDeprecatedAPITillVer0_8 +from pytorch_lightning.trainer.deprecated_api import (TrainerDeprecatedAPITillVer0_8, + TrainerDeprecatedAPITillVer0_9) from pytorch_lightning.trainer.distrib_data_parallel import TrainerDDPMixin from pytorch_lightning.trainer.distrib_parts import TrainerDPMixin, parse_gpu_ids, determine_root_gpu_device from pytorch_lightning.trainer.evaluation_loop import TrainerEvaluationLoopMixin @@ -66,12 +67,13 @@ class Trainer( TrainerCallbackConfigMixin, TrainerCallbackHookMixin, TrainerDeprecatedAPITillVer0_8, + TrainerDeprecatedAPITillVer0_9, ): DEPRECATED_IN_0_8 = ( 'gradient_clip', 'nb_gpu_nodes', 'max_nb_epochs', 'min_nb_epochs', 'add_row_log_interval', 'nb_sanity_val_steps' ) - DEPRECATED_IN_0_9 = ('use_amp',) + DEPRECATED_IN_0_9 = ('use_amp', 'show_progress_bar') def __init__( self, @@ -86,7 +88,7 @@ def __init__( gpus: Optional[Union[List[int], str, int]] = None, num_tpu_cores: Optional[int] = None, log_gpu_memory: Optional[str] = None, - show_progress_bar: bool = True, + show_progress_bar=None, # backward compatible, todo: remove in v0.9.0 progress_bar_refresh_rate: int = 1, overfit_pct: float = 0.0, track_grad_norm: int = -1, @@ -161,9 +163,12 @@ def __init__( log_gpu_memory: None, 'min_max', 'all'. Might slow performance - show_progress_bar: If true shows tqdm progress bar + show_progress_bar: + .. warning:: .. deprecated:: 0.7.2 + + Set `progress_bar_refresh_rate` to postive integer to enable. Will remove 0.9.0. - progress_bar_refresh_rate: How often to refresh progress bar (in steps) + progress_bar_refresh_rate: How often to refresh progress bar (in steps). Value ``0`` disables progress bar. overfit_pct: How much of training-, validation-, and test dataset to check. @@ -414,7 +419,9 @@ def __init__( # can't init progress bar here because starting a new process # means the progress_bar won't survive pickling - self.show_progress_bar = show_progress_bar + # backward compatibility + if show_progress_bar is not None: + self.show_progress_bar = show_progress_bar # logging self.log_save_interval = log_save_interval @@ -820,7 +827,7 @@ def run_pretrain_routine(self, model: LightningModule): pbar = tqdm(desc='Validation sanity check', total=self.num_sanity_val_steps * len(self.val_dataloaders), leave=False, position=2 * self.process_position, - disable=not self.show_progress_bar, dynamic_ncols=True) + disable=not self.progress_bar_refresh_rate, dynamic_ncols=True) self.main_progress_bar = pbar # dummy validation progress bar self.val_progress_bar = tqdm(disable=True) diff --git a/pytorch_lightning/trainer/training_loop.py b/pytorch_lightning/trainer/training_loop.py index 9e9f8af3342db..e3a8bb16a7b14 100644 --- a/pytorch_lightning/trainer/training_loop.py +++ b/pytorch_lightning/trainer/training_loop.py @@ -623,7 +623,7 @@ def optimizer_closure(): self.get_model().on_batch_end() # update progress bar - if batch_idx % self.progress_bar_refresh_rate == 0: + if self.progress_bar_refresh_rate >= 1 and batch_idx % self.progress_bar_refresh_rate == 0: self.main_progress_bar.update(self.progress_bar_refresh_rate) self.main_progress_bar.set_postfix(**self.training_tqdm_dict) diff --git a/tests/models/test_amp.py b/tests/models/test_amp.py index a51ea938bd5bd..66e99ba318deb 100644 --- a/tests/models/test_amp.py +++ b/tests/models/test_amp.py @@ -21,7 +21,6 @@ def test_amp_single_gpu(tmpdir): trainer_options = dict( default_save_path=tmpdir, - show_progress_bar=True, max_epochs=1, gpus=1, distributed_backend='ddp', @@ -42,7 +41,6 @@ def test_no_amp_single_gpu(tmpdir): trainer_options = dict( default_save_path=tmpdir, - show_progress_bar=True, max_epochs=1, gpus=1, distributed_backend='dp', @@ -66,7 +64,6 @@ def test_amp_gpu_ddp(tmpdir): trainer_options = dict( default_save_path=tmpdir, - show_progress_bar=True, max_epochs=1, gpus=2, distributed_backend='ddp', @@ -90,7 +87,6 @@ def test_amp_gpu_ddp_slurm_managed(tmpdir): model = LightningTestModel(hparams) trainer_options = dict( - show_progress_bar=True, max_epochs=1, gpus=[0], distributed_backend='ddp', @@ -128,7 +124,7 @@ def test_cpu_model_with_amp(tmpdir): trainer_options = dict( default_save_path=tmpdir, - show_progress_bar=False, + progress_bar_refresh_rate=0, logger=tutils.get_default_testtube_logger(tmpdir), max_epochs=1, train_percent_check=0.4, diff --git a/tests/models/test_cpu.py b/tests/models/test_cpu.py index 5ce2d19567bdd..0d8dcba27cd20 100644 --- a/tests/models/test_cpu.py +++ b/tests/models/test_cpu.py @@ -27,7 +27,6 @@ def test_early_stopping_cpu_model(tmpdir): gradient_clip_val=1.0, overfit_pct=0.20, track_grad_norm=2, - show_progress_bar=True, logger=tutils.get_default_testtube_logger(tmpdir), train_percent_check=0.1, val_percent_check=0.1, @@ -48,7 +47,7 @@ def test_lbfgs_cpu_model(tmpdir): trainer_options = dict( default_save_path=tmpdir, max_epochs=2, - show_progress_bar=False, + progress_bar_refresh_rate=0, weights_summary='top', train_percent_check=1.0, val_percent_check=0.2, @@ -67,7 +66,7 @@ def test_default_logger_callbacks_cpu_model(tmpdir): max_epochs=1, gradient_clip_val=1.0, overfit_pct=0.20, - show_progress_bar=False, + progress_bar_refresh_rate=0, train_percent_check=0.01, val_percent_check=0.01, ) @@ -95,7 +94,7 @@ def test_running_test_after_fitting(tmpdir): trainer_options = dict( default_save_path=tmpdir, - show_progress_bar=False, + progress_bar_refresh_rate=0, max_epochs=8, train_percent_check=0.4, val_percent_check=0.2, @@ -133,7 +132,7 @@ class CurrentTestModel(LightTrainDataloader, LightTestMixin, TestModelBase): checkpoint = tutils.init_checkpoint_callback(logger) trainer_options = dict( - show_progress_bar=False, + progress_bar_refresh_rate=0, max_epochs=1, train_percent_check=0.4, val_percent_check=0.2, @@ -226,7 +225,7 @@ def test_cpu_model(tmpdir): trainer_options = dict( default_save_path=tmpdir, - show_progress_bar=False, + progress_bar_refresh_rate=0, logger=tutils.get_default_testtube_logger(tmpdir), max_epochs=1, train_percent_check=0.4, @@ -247,7 +246,7 @@ def test_all_features_cpu_model(tmpdir): gradient_clip_val=1.0, overfit_pct=0.20, track_grad_norm=2, - show_progress_bar=False, + progress_bar_refresh_rate=0, logger=tutils.get_default_testtube_logger(tmpdir), accumulate_grad_batches=2, max_epochs=1, @@ -344,7 +343,7 @@ def test_single_gpu_model(tmpdir): trainer_options = dict( default_save_path=tmpdir, - show_progress_bar=False, + progress_bar_refresh_rate=0, max_epochs=1, train_percent_check=0.1, val_percent_check=0.1, diff --git a/tests/models/test_gpu.py b/tests/models/test_gpu.py index 0b821c74a6fd5..8e95680d232d3 100644 --- a/tests/models/test_gpu.py +++ b/tests/models/test_gpu.py @@ -27,7 +27,6 @@ def test_multi_gpu_model_ddp2(tmpdir): model, hparams = tutils.get_default_model() trainer_options = dict( default_save_path=tmpdir, - show_progress_bar=True, max_epochs=1, train_percent_check=0.4, val_percent_check=0.2, @@ -49,7 +48,7 @@ def test_multi_gpu_model_ddp(tmpdir): model, hparams = tutils.get_default_model() trainer_options = dict( default_save_path=tmpdir, - show_progress_bar=False, + progress_bar_refresh_rate=0, max_epochs=1, train_percent_check=0.4, val_percent_check=0.2, @@ -69,7 +68,7 @@ def test_ddp_all_dataloaders_passed_to_fit(tmpdir): model, hparams = tutils.get_default_model() trainer_options = dict(default_save_path=tmpdir, - show_progress_bar=False, + progress_bar_refresh_rate=0, max_epochs=1, train_percent_check=0.4, val_percent_check=0.2, @@ -165,7 +164,7 @@ def test_multi_gpu_none_backend(tmpdir): model, hparams = tutils.get_default_model() trainer_options = dict( default_save_path=tmpdir, - show_progress_bar=False, + progress_bar_refresh_rate=0, max_epochs=1, train_percent_check=0.1, val_percent_check=0.1, @@ -184,7 +183,7 @@ def test_multi_gpu_model_dp(tmpdir): model, hparams = tutils.get_default_model() trainer_options = dict( default_save_path=tmpdir, - show_progress_bar=False, + progress_bar_refresh_rate=0, distributed_backend='dp', max_epochs=1, train_percent_check=0.1, diff --git a/tests/models/test_restore.py b/tests/models/test_restore.py index 37a04ec53bd21..62a7c0aa22f49 100644 --- a/tests/models/test_restore.py +++ b/tests/models/test_restore.py @@ -33,7 +33,7 @@ def test_running_test_pretrained_model_ddp(tmpdir): checkpoint = tutils.init_checkpoint_callback(logger) trainer_options = dict( - show_progress_bar=False, + progress_bar_refresh_rate=0, max_epochs=1, train_percent_check=0.4, val_percent_check=0.2, @@ -81,7 +81,7 @@ def test_running_test_pretrained_model(tmpdir): checkpoint = tutils.init_checkpoint_callback(logger) trainer_options = dict( - show_progress_bar=False, + progress_bar_refresh_rate=0, max_epochs=4, train_percent_check=0.4, val_percent_check=0.2, @@ -114,7 +114,7 @@ def test_load_model_from_checkpoint(tmpdir): model = LightningTestModel(hparams) trainer_options = dict( - show_progress_bar=False, + progress_bar_refresh_rate=0, max_epochs=2, train_percent_check=0.4, val_percent_check=0.2, @@ -165,7 +165,6 @@ def test_running_test_pretrained_model_dp(tmpdir): checkpoint = tutils.init_checkpoint_callback(logger) trainer_options = dict( - show_progress_bar=True, max_epochs=2, train_percent_check=0.4, val_percent_check=0.2, @@ -202,7 +201,6 @@ def test_dp_resume(tmpdir): model = LightningTestModel(hparams) trainer_options = dict( - show_progress_bar=True, max_epochs=1, gpus=2, distributed_backend='dp', @@ -329,7 +327,7 @@ def test_model_saving_loading(tmpdir): def test_load_model_with_missing_hparams(tmpdir): trainer_options = dict( - show_progress_bar=False, + progress_bar_refresh_rate=0, max_epochs=1, checkpoint_callback=ModelCheckpoint(tmpdir, save_top_k=-1), logger=False, diff --git a/tests/test_deprecated.py b/tests/test_deprecated.py index 9bde9437bda06..fd373e9659e04 100644 --- a/tests/test_deprecated.py +++ b/tests/test_deprecated.py @@ -48,6 +48,15 @@ def test_tbd_remove_in_v0_8_0_trainer(): 'Wrongly passed deprecated argument "%s" to attribute "%s"' % (attr_old, attr_new) +def test_tbd_remove_in_v0_9_0_trainer(): + # test show_progress_bar set by progress_bar_refresh_rate + trainer = Trainer(progress_bar_refresh_rate=0, show_progress_bar=True) + assert not getattr(trainer, 'show_progress_bar') + + trainer = Trainer(progress_bar_refresh_rate=50, show_progress_bar=False) + assert getattr(trainer, 'show_progress_bar') + + def test_tbd_remove_in_v0_9_0_module_imports(): from pytorch_lightning.core.decorators import data_loader # noqa: F811 diff --git a/tests/trainer/test_callbacks.py b/tests/trainer/test_callbacks.py index ba398320539c7..9bcce6b9e2e87 100644 --- a/tests/trainer/test_callbacks.py +++ b/tests/trainer/test_callbacks.py @@ -99,7 +99,7 @@ def on_test_end(self, trainer, pl_module): 'max_epochs': 1, 'val_percent_check': 0.1, 'train_percent_check': 0.2, - 'show_progress_bar': False + 'progress_bar_refresh_rate': 0 } assert not test_callback.on_init_start_called diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 923a4f833d7a0..30f65576f27c7 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -332,7 +332,7 @@ def increment_batch(self, _): model = _new_model() trainer_options = dict( - show_progress_bar=False, + progress_bar_refresh_rate=0, max_epochs=2, train_percent_check=0.65, val_percent_check=1,