Skip to content

Commit 9e4a44a

Browse files
author
Jeff Yang
authored
remove old configs leftover from removal of py3.5/py2 (pytorch#1551)
* rm old configs leftover from removal of py3.5 * black format
1 parent 3c8b781 commit 9e4a44a

File tree

19 files changed

+31
-214
lines changed

19 files changed

+31
-214
lines changed

.travis.yml

-175
This file was deleted.

conda.recipe/build_and_upload.sh

-1
Original file line numberDiff line numberDiff line change
@@ -26,5 +26,4 @@ conda config --set anaconda_upload no
2626
conda build --no-test --output-folder conda_build conda.recipe -c pytorch
2727

2828
# Upload to Anaconda
29-
# We could use --all but too much platforms to uploaded
3029
ls conda_build/*/*.tar.bz2 | xargs -I {} anaconda -v -t $ANACONDA_TOKEN upload -u $UPLOAD_USER {}

conda.recipe/meta.yaml

+2-2
Original file line numberDiff line numberDiff line change
@@ -15,12 +15,12 @@ build:
1515
# https://conda.io/docs/user-guide/tasks/build-packages/define-metadata.html#export-runtime-requirements
1616
requirements:
1717
build:
18-
- python>=3.5
18+
- python>=3.6
1919
- setuptools
2020
- pytorch>=1.3
2121

2222
run:
23-
- python>=3.5
23+
- python>=3.6
2424
- pytorch>=1.3
2525

2626
test:

examples/contrib/cifar10/main.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ def run(
151151
nproc_per_node=None,
152152
stop_iteration=None,
153153
with_trains=False,
154-
**spawn_kwargs
154+
**spawn_kwargs,
155155
):
156156
"""Main entry to train an model on CIFAR10 dataset.
157157

ignite/contrib/engines/common.py

+12-12
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ def setup_common_training_handlers(
4646
stop_on_nan: bool = True,
4747
clear_cuda_cache: bool = True,
4848
save_handler: Optional[Union[Callable, BaseSaveHandler]] = None,
49-
**kwargs: Any
49+
**kwargs: Any,
5050
) -> None:
5151
"""Helper method to setup trainer with common handlers (it also supports distributed configuration):
5252
@@ -149,7 +149,7 @@ def _setup_common_training_handlers(
149149
stop_on_nan: bool = True,
150150
clear_cuda_cache: bool = True,
151151
save_handler: Optional[Union[Callable, BaseSaveHandler]] = None,
152-
**kwargs: Any
152+
**kwargs: Any,
153153
) -> None:
154154
if output_path is not None and save_handler is not None:
155155
raise ValueError(
@@ -237,7 +237,7 @@ def _setup_common_distrib_training_handlers(
237237
stop_on_nan: bool = True,
238238
clear_cuda_cache: bool = True,
239239
save_handler: Optional[Union[Callable, BaseSaveHandler]] = None,
240-
**kwargs: Any
240+
**kwargs: Any,
241241
) -> None:
242242

243243
_setup_common_training_handlers(
@@ -331,7 +331,7 @@ def setup_tb_logging(
331331
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
332332
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
333333
log_every_iters: int = 100,
334-
**kwargs: Any
334+
**kwargs: Any,
335335
) -> TensorboardLogger:
336336
"""Method to setup TensorBoard logging on trainer and a list of evaluators. Logged metrics are:
337337
@@ -363,7 +363,7 @@ def setup_visdom_logging(
363363
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
364364
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
365365
log_every_iters: int = 100,
366-
**kwargs: Any
366+
**kwargs: Any,
367367
) -> VisdomLogger:
368368
"""Method to setup Visdom logging on trainer and a list of evaluators. Logged metrics are:
369369
@@ -394,7 +394,7 @@ def setup_mlflow_logging(
394394
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
395395
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
396396
log_every_iters: int = 100,
397-
**kwargs: Any
397+
**kwargs: Any,
398398
) -> MLflowLogger:
399399
"""Method to setup MLflow logging on trainer and a list of evaluators. Logged metrics are:
400400
@@ -425,7 +425,7 @@ def setup_neptune_logging(
425425
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
426426
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
427427
log_every_iters: int = 100,
428-
**kwargs: Any
428+
**kwargs: Any,
429429
) -> NeptuneLogger:
430430
"""Method to setup Neptune logging on trainer and a list of evaluators. Logged metrics are:
431431
@@ -456,7 +456,7 @@ def setup_wandb_logging(
456456
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
457457
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
458458
log_every_iters: int = 100,
459-
**kwargs: Any
459+
**kwargs: Any,
460460
) -> WandBLogger:
461461
"""Method to setup WandB logging on trainer and a list of evaluators. Logged metrics are:
462462
@@ -487,7 +487,7 @@ def setup_plx_logging(
487487
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
488488
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
489489
log_every_iters: int = 100,
490-
**kwargs: Any
490+
**kwargs: Any,
491491
) -> PolyaxonLogger:
492492
"""Method to setup Polyaxon logging on trainer and a list of evaluators. Logged metrics are:
493493
@@ -518,7 +518,7 @@ def setup_trains_logging(
518518
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
519519
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
520520
log_every_iters: int = 100,
521-
**kwargs: Any
521+
**kwargs: Any,
522522
) -> TrainsLogger:
523523
"""Method to setup Trains logging on trainer and a list of evaluators. Logged metrics are:
524524
@@ -560,7 +560,7 @@ def gen_save_best_models_by_val_score(
560560
n_saved: int = 3,
561561
trainer: Optional[Engine] = None,
562562
tag: str = "val",
563-
**kwargs: Any
563+
**kwargs: Any,
564564
) -> Checkpoint:
565565
"""Method adds a handler to ``evaluator`` to save ``n_saved`` of best models based on the metric
566566
(named by ``metric_name``) provided by ``evaluator`` (i.e. ``evaluator.state.metrics[metric_name]``).
@@ -619,7 +619,7 @@ def save_best_model_by_val_score(
619619
n_saved: int = 3,
620620
trainer: Optional[Engine] = None,
621621
tag: str = "val",
622-
**kwargs: Any
622+
**kwargs: Any,
623623
) -> Checkpoint:
624624
"""Method adds a handler to ``evaluator`` to save on a disk ``n_saved`` of best models based on the metric
625625
(named by ``metric_name``) provided by ``evaluator`` (i.e. ``evaluator.state.metrics[metric_name]``).

ignite/contrib/handlers/param_scheduler.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -579,7 +579,7 @@ def simulate_values( # type: ignore[override]
579579
schedulers: List[ParamScheduler],
580580
durations: List[int],
581581
param_names: Optional[Union[List[str], Tuple[str]]] = None,
582-
**kwargs: Any
582+
**kwargs: Any,
583583
) -> List[List[int]]:
584584
"""Method to simulate scheduled values during num_events events.
585585

ignite/contrib/handlers/tqdm_logger.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ def __init__(
104104
self,
105105
persist: bool = False,
106106
bar_format: str = "{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]",
107-
**tqdm_kwargs: Any
107+
**tqdm_kwargs: Any,
108108
) -> None:
109109

110110
try:

ignite/contrib/handlers/trains_logger.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -638,7 +638,7 @@ def __init__(
638638
output_uri: Optional[str] = None,
639639
dirname: Optional[str] = None,
640640
*args: Any,
641-
**kwargs: Any
641+
**kwargs: Any,
642642
) -> None:
643643

644644
self._setup_check_trains(logger, output_uri)

ignite/contrib/handlers/visdom_logger.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ def __init__(
145145
port: Optional[int] = None,
146146
num_workers: int = 1,
147147
raise_exceptions: bool = True,
148-
**kwargs: Any
148+
**kwargs: Any,
149149
):
150150
try:
151151
import visdom

ignite/distributed/comp_models/horovod.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@ def spawn( # type: ignore[override]
129129
nproc_per_node: int = 1,
130130
hosts: Optional[str] = None,
131131
backend: str = HOROVOD,
132-
**kwargs: Any
132+
**kwargs: Any,
133133
) -> None:
134134
c1 = "nnodes" in kwargs and kwargs["nnodes"] > 1
135135
c2 = "node_rank" in kwargs and kwargs["node_rank"] > 0

ignite/distributed/comp_models/native.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -283,7 +283,7 @@ def spawn( # type: ignore[override]
283283
master_addr: str = "127.0.0.1",
284284
master_port: int = 2222,
285285
backend: str = "nccl",
286-
**kwargs: Any
286+
**kwargs: Any,
287287
) -> None:
288288
world_size = nnodes * nproc_per_node
289289

ignite/distributed/comp_models/xla.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ def spawn( # type: ignore[override]
113113
nnodes: int = 1,
114114
node_rank: int = 0,
115115
backend: str = XLA_TPU,
116-
**kwargs: Any
116+
**kwargs: Any,
117117
) -> None:
118118
if "start_method" not in kwargs:
119119
kwargs["start_method"] = "fork"

ignite/distributed/launcher.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,7 @@ def __init__(
181181
node_rank: Optional[int] = None,
182182
master_addr: Optional[str] = None,
183183
master_port: Optional[int] = None,
184-
**spawn_kwargs: Any
184+
**spawn_kwargs: Any,
185185
) -> None:
186186
if backend is not None:
187187
if backend not in idist.available_backends():
@@ -216,7 +216,7 @@ def _setup_spawn_params(
216216
node_rank: Optional[int] = None,
217217
master_addr: Optional[str] = None,
218218
master_port: Optional[int] = None,
219-
**spawn_kwargs: Any
219+
**spawn_kwargs: Any,
220220
) -> Dict:
221221
if nproc_per_node < 1:
222222
raise ValueError(f"Argument nproc_per_node should positive, but given {nproc_per_node}")

0 commit comments

Comments
 (0)