@@ -46,7 +46,7 @@ def setup_common_training_handlers(
46
46
stop_on_nan : bool = True ,
47
47
clear_cuda_cache : bool = True ,
48
48
save_handler : Optional [Union [Callable , BaseSaveHandler ]] = None ,
49
- ** kwargs : Any
49
+ ** kwargs : Any ,
50
50
) -> None :
51
51
"""Helper method to setup trainer with common handlers (it also supports distributed configuration):
52
52
@@ -149,7 +149,7 @@ def _setup_common_training_handlers(
149
149
stop_on_nan : bool = True ,
150
150
clear_cuda_cache : bool = True ,
151
151
save_handler : Optional [Union [Callable , BaseSaveHandler ]] = None ,
152
- ** kwargs : Any
152
+ ** kwargs : Any ,
153
153
) -> None :
154
154
if output_path is not None and save_handler is not None :
155
155
raise ValueError (
@@ -237,7 +237,7 @@ def _setup_common_distrib_training_handlers(
237
237
stop_on_nan : bool = True ,
238
238
clear_cuda_cache : bool = True ,
239
239
save_handler : Optional [Union [Callable , BaseSaveHandler ]] = None ,
240
- ** kwargs : Any
240
+ ** kwargs : Any ,
241
241
) -> None :
242
242
243
243
_setup_common_training_handlers (
@@ -331,7 +331,7 @@ def setup_tb_logging(
331
331
optimizers : Optional [Union [Optimizer , Dict [str , Optimizer ]]] = None ,
332
332
evaluators : Optional [Union [Engine , Dict [str , Engine ]]] = None ,
333
333
log_every_iters : int = 100 ,
334
- ** kwargs : Any
334
+ ** kwargs : Any ,
335
335
) -> TensorboardLogger :
336
336
"""Method to setup TensorBoard logging on trainer and a list of evaluators. Logged metrics are:
337
337
@@ -363,7 +363,7 @@ def setup_visdom_logging(
363
363
optimizers : Optional [Union [Optimizer , Dict [str , Optimizer ]]] = None ,
364
364
evaluators : Optional [Union [Engine , Dict [str , Engine ]]] = None ,
365
365
log_every_iters : int = 100 ,
366
- ** kwargs : Any
366
+ ** kwargs : Any ,
367
367
) -> VisdomLogger :
368
368
"""Method to setup Visdom logging on trainer and a list of evaluators. Logged metrics are:
369
369
@@ -394,7 +394,7 @@ def setup_mlflow_logging(
394
394
optimizers : Optional [Union [Optimizer , Dict [str , Optimizer ]]] = None ,
395
395
evaluators : Optional [Union [Engine , Dict [str , Engine ]]] = None ,
396
396
log_every_iters : int = 100 ,
397
- ** kwargs : Any
397
+ ** kwargs : Any ,
398
398
) -> MLflowLogger :
399
399
"""Method to setup MLflow logging on trainer and a list of evaluators. Logged metrics are:
400
400
@@ -425,7 +425,7 @@ def setup_neptune_logging(
425
425
optimizers : Optional [Union [Optimizer , Dict [str , Optimizer ]]] = None ,
426
426
evaluators : Optional [Union [Engine , Dict [str , Engine ]]] = None ,
427
427
log_every_iters : int = 100 ,
428
- ** kwargs : Any
428
+ ** kwargs : Any ,
429
429
) -> NeptuneLogger :
430
430
"""Method to setup Neptune logging on trainer and a list of evaluators. Logged metrics are:
431
431
@@ -456,7 +456,7 @@ def setup_wandb_logging(
456
456
optimizers : Optional [Union [Optimizer , Dict [str , Optimizer ]]] = None ,
457
457
evaluators : Optional [Union [Engine , Dict [str , Engine ]]] = None ,
458
458
log_every_iters : int = 100 ,
459
- ** kwargs : Any
459
+ ** kwargs : Any ,
460
460
) -> WandBLogger :
461
461
"""Method to setup WandB logging on trainer and a list of evaluators. Logged metrics are:
462
462
@@ -487,7 +487,7 @@ def setup_plx_logging(
487
487
optimizers : Optional [Union [Optimizer , Dict [str , Optimizer ]]] = None ,
488
488
evaluators : Optional [Union [Engine , Dict [str , Engine ]]] = None ,
489
489
log_every_iters : int = 100 ,
490
- ** kwargs : Any
490
+ ** kwargs : Any ,
491
491
) -> PolyaxonLogger :
492
492
"""Method to setup Polyaxon logging on trainer and a list of evaluators. Logged metrics are:
493
493
@@ -518,7 +518,7 @@ def setup_trains_logging(
518
518
optimizers : Optional [Union [Optimizer , Dict [str , Optimizer ]]] = None ,
519
519
evaluators : Optional [Union [Engine , Dict [str , Engine ]]] = None ,
520
520
log_every_iters : int = 100 ,
521
- ** kwargs : Any
521
+ ** kwargs : Any ,
522
522
) -> TrainsLogger :
523
523
"""Method to setup Trains logging on trainer and a list of evaluators. Logged metrics are:
524
524
@@ -560,7 +560,7 @@ def gen_save_best_models_by_val_score(
560
560
n_saved : int = 3 ,
561
561
trainer : Optional [Engine ] = None ,
562
562
tag : str = "val" ,
563
- ** kwargs : Any
563
+ ** kwargs : Any ,
564
564
) -> Checkpoint :
565
565
"""Method adds a handler to ``evaluator`` to save ``n_saved`` of best models based on the metric
566
566
(named by ``metric_name``) provided by ``evaluator`` (i.e. ``evaluator.state.metrics[metric_name]``).
@@ -619,7 +619,7 @@ def save_best_model_by_val_score(
619
619
n_saved : int = 3 ,
620
620
trainer : Optional [Engine ] = None ,
621
621
tag : str = "val" ,
622
- ** kwargs : Any
622
+ ** kwargs : Any ,
623
623
) -> Checkpoint :
624
624
"""Method adds a handler to ``evaluator`` to save on a disk ``n_saved`` of best models based on the metric
625
625
(named by ``metric_name``) provided by ``evaluator`` (i.e. ``evaluator.state.metrics[metric_name]``).
0 commit comments