Skip to content

Commit d716c42

Browse files
authored
revamp log api usage method (#5072)
* revamp log api usage method
1 parent e0c5cc4 commit d716c42

35 files changed

+47
-53
lines changed

torchvision/datasets/vision.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ def __init__(
3535
transform: Optional[Callable] = None,
3636
target_transform: Optional[Callable] = None,
3737
) -> None:
38-
_log_api_usage_once(self)
38+
_log_api_usage_once("datasets", self.__class__.__name__)
3939
if isinstance(root, torch._six.string_classes):
4040
root = os.path.expanduser(root)
4141
self.root = root

torchvision/models/alexnet.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
class AlexNet(nn.Module):
1919
def __init__(self, num_classes: int = 1000, dropout: float = 0.5) -> None:
2020
super().__init__()
21-
_log_api_usage_once(self)
21+
_log_api_usage_once("models", self.__class__.__name__)
2222
self.features = nn.Sequential(
2323
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
2424
nn.ReLU(inplace=True),

torchvision/models/densenet.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ def __init__(
163163
) -> None:
164164

165165
super().__init__()
166-
_log_api_usage_once(self)
166+
_log_api_usage_once("models", self.__class__.__name__)
167167

168168
# First convolution
169169
self.features = nn.Sequential(

torchvision/models/detection/generalized_rcnn.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ class GeneralizedRCNN(nn.Module):
2727

2828
def __init__(self, backbone: nn.Module, rpn: nn.Module, roi_heads: nn.Module, transform: nn.Module) -> None:
2929
super().__init__()
30-
_log_api_usage_once(self)
30+
_log_api_usage_once("models", self.__class__.__name__)
3131
self.transform = transform
3232
self.backbone = backbone
3333
self.rpn = rpn

torchvision/models/detection/retinanet.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -337,7 +337,7 @@ def __init__(
337337
topk_candidates=1000,
338338
):
339339
super().__init__()
340-
_log_api_usage_once(self)
340+
_log_api_usage_once("models", self.__class__.__name__)
341341

342342
if not hasattr(backbone, "out_channels"):
343343
raise ValueError(

torchvision/models/detection/ssd.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,7 @@ def __init__(
182182
positive_fraction: float = 0.25,
183183
):
184184
super().__init__()
185-
_log_api_usage_once(self)
185+
_log_api_usage_once("models", self.__class__.__name__)
186186

187187
self.backbone = backbone
188188

torchvision/models/detection/ssdlite.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ def __init__(
120120
min_depth: int = 16,
121121
):
122122
super().__init__()
123-
_log_api_usage_once(self)
123+
_log_api_usage_once("models", self.__class__.__name__)
124124

125125
assert not backbone[c4_pos].use_res_connect
126126
self.features = nn.Sequential(

torchvision/models/efficientnet.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -170,7 +170,7 @@ def __init__(
170170
norm_layer (Optional[Callable[..., nn.Module]]): Module specifying the normalization layer to use
171171
"""
172172
super().__init__()
173-
_log_api_usage_once(self)
173+
_log_api_usage_once("models", self.__class__.__name__)
174174

175175
if not inverted_residual_setting:
176176
raise ValueError("The inverted_residual_setting should not be empty")

torchvision/models/googlenet.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def __init__(
3939
dropout_aux: float = 0.7,
4040
) -> None:
4141
super().__init__()
42-
_log_api_usage_once(self)
42+
_log_api_usage_once("models", self.__class__.__name__)
4343
if blocks is None:
4444
blocks = [BasicConv2d, Inception, InceptionAux]
4545
if init_weights is None:

torchvision/models/inception.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ def __init__(
3737
dropout: float = 0.5,
3838
) -> None:
3939
super().__init__()
40-
_log_api_usage_once(self)
40+
_log_api_usage_once("models", self.__class__.__name__)
4141
if inception_blocks is None:
4242
inception_blocks = [BasicConv2d, InceptionA, InceptionB, InceptionC, InceptionD, InceptionE, InceptionAux]
4343
if init_weights is None:

torchvision/models/mnasnet.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ class MNASNet(torch.nn.Module):
9898

9999
def __init__(self, alpha: float, num_classes: int = 1000, dropout: float = 0.2) -> None:
100100
super().__init__()
101-
_log_api_usage_once(self)
101+
_log_api_usage_once("models", self.__class__.__name__)
102102
assert alpha > 0.0
103103
self.alpha = alpha
104104
self.num_classes = num_classes

torchvision/models/mobilenetv2.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ def __init__(
111111
112112
"""
113113
super().__init__()
114-
_log_api_usage_once(self)
114+
_log_api_usage_once("models", self.__class__.__name__)
115115

116116
if block is None:
117117
block = InvertedResidual

torchvision/models/mobilenetv3.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ def __init__(
151151
dropout (float): The droupout probability
152152
"""
153153
super().__init__()
154-
_log_api_usage_once(self)
154+
_log_api_usage_once("models", self.__class__.__name__)
155155

156156
if not inverted_residual_setting:
157157
raise ValueError("The inverted_residual_setting should not be empty")

torchvision/models/optical_flow/raft.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -440,7 +440,7 @@ def __init__(self, *, feature_encoder, context_encoder, corr_block, update_block
440440
If ``None`` (default), the flow is upsampled using interpolation.
441441
"""
442442
super().__init__()
443-
_log_api_usage_once(self)
443+
_log_api_usage_once("models", self.__class__.__name__)
444444

445445
self.feature_encoder = feature_encoder
446446
self.context_encoder = context_encoder

torchvision/models/regnet.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -310,7 +310,7 @@ def __init__(
310310
activation: Optional[Callable[..., nn.Module]] = None,
311311
) -> None:
312312
super().__init__()
313-
_log_api_usage_once(self)
313+
_log_api_usage_once("models", self.__class__.__name__)
314314

315315
if stem_type is None:
316316
stem_type = SimpleStemIN

torchvision/models/resnet.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ def __init__(
174174
norm_layer: Optional[Callable[..., nn.Module]] = None,
175175
) -> None:
176176
super().__init__()
177-
_log_api_usage_once(self)
177+
_log_api_usage_once("models", self.__class__.__name__)
178178
if norm_layer is None:
179179
norm_layer = nn.BatchNorm2d
180180
self._norm_layer = norm_layer

torchvision/models/segmentation/_utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ class _SimpleSegmentationModel(nn.Module):
1313

1414
def __init__(self, backbone: nn.Module, classifier: nn.Module, aux_classifier: Optional[nn.Module] = None) -> None:
1515
super().__init__()
16-
_log_api_usage_once(self)
16+
_log_api_usage_once("models", self.__class__.__name__)
1717
self.backbone = backbone
1818
self.classifier = classifier
1919
self.aux_classifier = aux_classifier

torchvision/models/segmentation/lraspp.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def __init__(
3838
self, backbone: nn.Module, low_channels: int, high_channels: int, num_classes: int, inter_channels: int = 128
3939
) -> None:
4040
super().__init__()
41-
_log_api_usage_once(self)
41+
_log_api_usage_once("models", self.__class__.__name__)
4242
self.backbone = backbone
4343
self.classifier = LRASPPHead(low_channels, high_channels, num_classes, inter_channels)
4444

torchvision/models/shufflenetv2.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ def __init__(
100100
inverted_residual: Callable[..., nn.Module] = InvertedResidual,
101101
) -> None:
102102
super().__init__()
103-
_log_api_usage_once(self)
103+
_log_api_usage_once("models", self.__class__.__name__)
104104

105105
if len(stages_repeats) != 3:
106106
raise ValueError("expected stages_repeats as list of 3 positive ints")

torchvision/models/squeezenet.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
3636
class SqueezeNet(nn.Module):
3737
def __init__(self, version: str = "1_0", num_classes: int = 1000, dropout: float = 0.5) -> None:
3838
super().__init__()
39-
_log_api_usage_once(self)
39+
_log_api_usage_once("models", self.__class__.__name__)
4040
self.num_classes = num_classes
4141
if version == "1_0":
4242
self.features = nn.Sequential(

torchvision/models/vgg.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ def __init__(
3737
self, features: nn.Module, num_classes: int = 1000, init_weights: bool = True, dropout: float = 0.5
3838
) -> None:
3939
super().__init__()
40-
_log_api_usage_once(self)
40+
_log_api_usage_once("models", self.__class__.__name__)
4141
self.features = features
4242
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
4343
self.classifier = nn.Sequential(

torchvision/models/video/resnet.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -209,7 +209,7 @@ def __init__(
209209
zero_init_residual (bool, optional): Zero init bottleneck residual BN. Defaults to False.
210210
"""
211211
super().__init__()
212-
_log_api_usage_once(self)
212+
_log_api_usage_once("models", self.__class__.__name__)
213213
self.inplanes = 64
214214

215215
self.stem = stem()

torchvision/ops/boxes.py

+9-9
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ def nms(boxes: Tensor, scores: Tensor, iou_threshold: float) -> Tensor:
3434
Tensor: int64 tensor with the indices of the elements that have been kept
3535
by NMS, sorted in decreasing order of scores
3636
"""
37-
_log_api_usage_once("torchvision.ops.nms")
37+
_log_api_usage_once("ops", "nms")
3838
_assert_has_ops()
3939
return torch.ops.torchvision.nms(boxes, scores, iou_threshold)
4040

@@ -63,7 +63,7 @@ def batched_nms(
6363
Tensor: int64 tensor with the indices of the elements that have been kept by NMS, sorted
6464
in decreasing order of scores
6565
"""
66-
_log_api_usage_once("torchvision.ops.batched_nms")
66+
_log_api_usage_once("ops", "batched_nms")
6767
# Benchmarks that drove the following thresholds are at
6868
# https://github.com/pytorch/vision/issues/1311#issuecomment-781329339
6969
if boxes.numel() > (4000 if boxes.device.type == "cpu" else 20000) and not torchvision._is_tracing():
@@ -122,7 +122,7 @@ def remove_small_boxes(boxes: Tensor, min_size: float) -> Tensor:
122122
Tensor[K]: indices of the boxes that have both sides
123123
larger than min_size
124124
"""
125-
_log_api_usage_once("torchvision.ops.remove_small_boxes")
125+
_log_api_usage_once("ops", "remove_small_boxes")
126126
ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1]
127127
keep = (ws >= min_size) & (hs >= min_size)
128128
keep = torch.where(keep)[0]
@@ -141,7 +141,7 @@ def clip_boxes_to_image(boxes: Tensor, size: Tuple[int, int]) -> Tensor:
141141
Returns:
142142
Tensor[N, 4]: clipped boxes
143143
"""
144-
_log_api_usage_once("torchvision.ops.clip_boxes_to_image")
144+
_log_api_usage_once("ops", "clip_boxes_to_image")
145145
dim = boxes.dim()
146146
boxes_x = boxes[..., 0::2]
147147
boxes_y = boxes[..., 1::2]
@@ -182,7 +182,7 @@ def box_convert(boxes: Tensor, in_fmt: str, out_fmt: str) -> Tensor:
182182
Tensor[N, 4]: Boxes into converted format.
183183
"""
184184

185-
_log_api_usage_once("torchvision.ops.box_convert")
185+
_log_api_usage_once("ops", "box_convert")
186186
allowed_fmts = ("xyxy", "xywh", "cxcywh")
187187
if in_fmt not in allowed_fmts or out_fmt not in allowed_fmts:
188188
raise ValueError("Unsupported Bounding Box Conversions for given in_fmt and out_fmt")
@@ -232,7 +232,7 @@ def box_area(boxes: Tensor) -> Tensor:
232232
Returns:
233233
Tensor[N]: the area for each box
234234
"""
235-
_log_api_usage_once("torchvision.ops.box_area")
235+
_log_api_usage_once("ops", "box_area")
236236
boxes = _upcast(boxes)
237237
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
238238

@@ -268,7 +268,7 @@ def box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor:
268268
Returns:
269269
Tensor[N, M]: the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2
270270
"""
271-
_log_api_usage_once("torchvision.ops.box_iou")
271+
_log_api_usage_once("ops", "box_iou")
272272
inter, union = _box_inter_union(boxes1, boxes2)
273273
iou = inter / union
274274
return iou
@@ -291,7 +291,7 @@ def generalized_box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor:
291291
for every element in boxes1 and boxes2
292292
"""
293293

294-
_log_api_usage_once("torchvision.ops.generalized_box_iou")
294+
_log_api_usage_once("ops", "generalized_box_iou")
295295
# degenerate boxes gives inf / nan results
296296
# so do an early check
297297
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
@@ -323,7 +323,7 @@ def masks_to_boxes(masks: torch.Tensor) -> torch.Tensor:
323323
Returns:
324324
Tensor[N, 4]: bounding boxes
325325
"""
326-
_log_api_usage_once("torchvision.ops.masks_to_boxes")
326+
_log_api_usage_once("ops", "masks_to_boxes")
327327
if masks.numel() == 0:
328328
return torch.zeros((0, 4), device=masks.device, dtype=torch.float)
329329

torchvision/ops/deform_conv.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def deform_conv2d(
6161
>>> torch.Size([4, 5, 8, 8])
6262
"""
6363

64-
_log_api_usage_once("torchvision.ops.deform_conv2d")
64+
_log_api_usage_once("ops", "deform_conv2d")
6565
_assert_has_ops()
6666
out_channels = weight.shape[0]
6767

torchvision/ops/feature_pyramid_network.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ def __init__(
7777
extra_blocks: Optional[ExtraFPNBlock] = None,
7878
):
7979
super().__init__()
80-
_log_api_usage_once(self)
80+
_log_api_usage_once("ops", self.__class__.__name__)
8181
self.inner_blocks = nn.ModuleList()
8282
self.layer_blocks = nn.ModuleList()
8383
for in_channels in in_channels_list:

torchvision/ops/focal_loss.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ def sigmoid_focal_loss(
3232
Returns:
3333
Loss tensor with the reduction option applied.
3434
"""
35-
_log_api_usage_once("torchvision.ops.sigmoid_focal_loss")
35+
_log_api_usage_once("ops", "sigmoid_focal_loss")
3636
p = torch.sigmoid(inputs)
3737
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
3838
p_t = p * targets + (1 - p) * (1 - targets)

torchvision/ops/misc.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def __init__(
6161
warnings.warn("`n` argument is deprecated and has been renamed `num_features`", DeprecationWarning)
6262
num_features = n
6363
super().__init__()
64-
_log_api_usage_once(self)
64+
_log_api_usage_once("ops", self.__class__.__name__)
6565
self.eps = eps
6666
self.register_buffer("weight", torch.ones(num_features))
6767
self.register_buffer("bias", torch.zeros(num_features))
@@ -155,7 +155,7 @@ def __init__(
155155
if activation_layer is not None:
156156
layers.append(activation_layer(inplace=inplace))
157157
super().__init__(*layers)
158-
_log_api_usage_once(self)
158+
_log_api_usage_once("ops", self.__class__.__name__)
159159
self.out_channels = out_channels
160160

161161

@@ -179,7 +179,7 @@ def __init__(
179179
scale_activation: Callable[..., torch.nn.Module] = torch.nn.Sigmoid,
180180
) -> None:
181181
super().__init__()
182-
_log_api_usage_once(self)
182+
_log_api_usage_once("ops", self.__class__.__name__)
183183
self.avgpool = torch.nn.AdaptiveAvgPool2d(1)
184184
self.fc1 = torch.nn.Conv2d(input_channels, squeeze_channels, 1)
185185
self.fc2 = torch.nn.Conv2d(squeeze_channels, input_channels, 1)

torchvision/ops/poolers.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -276,7 +276,7 @@ def __init__(
276276
canonical_level: int = 4,
277277
):
278278
super().__init__()
279-
_log_api_usage_once(self)
279+
_log_api_usage_once("ops", self.__class__.__name__)
280280
if isinstance(output_size, int):
281281
output_size = (output_size, output_size)
282282
self.featmap_names = featmap_names

torchvision/ops/ps_roi_align.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ def ps_roi_align(
4343
Returns:
4444
Tensor[K, C / (output_size[0] * output_size[1]), output_size[0], output_size[1]]: The pooled RoIs
4545
"""
46-
_log_api_usage_once("torchvision.ops.ps_roi_align")
46+
_log_api_usage_once("ops", "ps_roi_align")
4747
_assert_has_ops()
4848
check_roi_boxes_shape(boxes)
4949
rois = boxes

torchvision/ops/ps_roi_pool.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ def ps_roi_pool(
3737
Returns:
3838
Tensor[K, C / (output_size[0] * output_size[1]), output_size[0], output_size[1]]: The pooled RoIs.
3939
"""
40-
_log_api_usage_once("torchvision.ops.ps_roi_pool")
40+
_log_api_usage_once("ops", "ps_roi_pool")
4141
_assert_has_ops()
4242
check_roi_boxes_shape(boxes)
4343
rois = boxes

torchvision/ops/roi_align.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ def roi_align(
5050
Returns:
5151
Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs.
5252
"""
53-
_log_api_usage_once("torchvision.ops.roi_align")
53+
_log_api_usage_once("ops", "roi_align")
5454
_assert_has_ops()
5555
check_roi_boxes_shape(boxes)
5656
rois = boxes

torchvision/ops/roi_pool.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def roi_pool(
3939
Returns:
4040
Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs.
4141
"""
42-
_log_api_usage_once("torchvision.ops.roi_pool")
42+
_log_api_usage_once("ops", "roi_pool")
4343
_assert_has_ops()
4444
check_roi_boxes_shape(boxes)
4545
rois = boxes

torchvision/ops/stochastic_depth.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ def stochastic_depth(input: Tensor, p: float, mode: str, training: bool = True)
2323
Returns:
2424
Tensor[N, ...]: The randomly zeroed tensor.
2525
"""
26-
_log_api_usage_once("torchvision.ops.stochastic_depth")
26+
_log_api_usage_once("ops", "stochastic_depth")
2727
if p < 0.0 or p > 1.0:
2828
raise ValueError(f"drop probability has to be between 0 and 1, but got {p}")
2929
if mode not in ["batch", "row"]:

torchvision/prototype/models/vision_transformer.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ def __init__(
140140
norm_layer: Callable[..., torch.nn.Module] = partial(nn.LayerNorm, eps=1e-6),
141141
):
142142
super().__init__()
143-
_log_api_usage_once(self)
143+
_log_api_usage_once("models", self.__class__.__name__)
144144
torch._assert(image_size % patch_size == 0, "Input shape indivisible by patch size!")
145145
self.image_size = image_size
146146
self.patch_size = patch_size

0 commit comments

Comments
 (0)