@@ -34,7 +34,7 @@ def nms(boxes: Tensor, scores: Tensor, iou_threshold: float) -> Tensor:
34
34
Tensor: int64 tensor with the indices of the elements that have been kept
35
35
by NMS, sorted in decreasing order of scores
36
36
"""
37
- _log_api_usage_once ("torchvision. ops. nms" )
37
+ _log_api_usage_once ("ops" , " nms" )
38
38
_assert_has_ops ()
39
39
return torch .ops .torchvision .nms (boxes , scores , iou_threshold )
40
40
@@ -63,7 +63,7 @@ def batched_nms(
63
63
Tensor: int64 tensor with the indices of the elements that have been kept by NMS, sorted
64
64
in decreasing order of scores
65
65
"""
66
- _log_api_usage_once ("torchvision. ops. batched_nms" )
66
+ _log_api_usage_once ("ops" , " batched_nms" )
67
67
# Benchmarks that drove the following thresholds are at
68
68
# https://github.com/pytorch/vision/issues/1311#issuecomment-781329339
69
69
if boxes .numel () > (4000 if boxes .device .type == "cpu" else 20000 ) and not torchvision ._is_tracing ():
@@ -122,7 +122,7 @@ def remove_small_boxes(boxes: Tensor, min_size: float) -> Tensor:
122
122
Tensor[K]: indices of the boxes that have both sides
123
123
larger than min_size
124
124
"""
125
- _log_api_usage_once ("torchvision. ops. remove_small_boxes" )
125
+ _log_api_usage_once ("ops" , " remove_small_boxes" )
126
126
ws , hs = boxes [:, 2 ] - boxes [:, 0 ], boxes [:, 3 ] - boxes [:, 1 ]
127
127
keep = (ws >= min_size ) & (hs >= min_size )
128
128
keep = torch .where (keep )[0 ]
@@ -141,7 +141,7 @@ def clip_boxes_to_image(boxes: Tensor, size: Tuple[int, int]) -> Tensor:
141
141
Returns:
142
142
Tensor[N, 4]: clipped boxes
143
143
"""
144
- _log_api_usage_once ("torchvision. ops. clip_boxes_to_image" )
144
+ _log_api_usage_once ("ops" , " clip_boxes_to_image" )
145
145
dim = boxes .dim ()
146
146
boxes_x = boxes [..., 0 ::2 ]
147
147
boxes_y = boxes [..., 1 ::2 ]
@@ -182,7 +182,7 @@ def box_convert(boxes: Tensor, in_fmt: str, out_fmt: str) -> Tensor:
182
182
Tensor[N, 4]: Boxes into converted format.
183
183
"""
184
184
185
- _log_api_usage_once ("torchvision. ops. box_convert" )
185
+ _log_api_usage_once ("ops" , " box_convert" )
186
186
allowed_fmts = ("xyxy" , "xywh" , "cxcywh" )
187
187
if in_fmt not in allowed_fmts or out_fmt not in allowed_fmts :
188
188
raise ValueError ("Unsupported Bounding Box Conversions for given in_fmt and out_fmt" )
@@ -232,7 +232,7 @@ def box_area(boxes: Tensor) -> Tensor:
232
232
Returns:
233
233
Tensor[N]: the area for each box
234
234
"""
235
- _log_api_usage_once ("torchvision. ops. box_area" )
235
+ _log_api_usage_once ("ops" , " box_area" )
236
236
boxes = _upcast (boxes )
237
237
return (boxes [:, 2 ] - boxes [:, 0 ]) * (boxes [:, 3 ] - boxes [:, 1 ])
238
238
@@ -268,7 +268,7 @@ def box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor:
268
268
Returns:
269
269
Tensor[N, M]: the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2
270
270
"""
271
- _log_api_usage_once ("torchvision. ops. box_iou" )
271
+ _log_api_usage_once ("ops" , " box_iou" )
272
272
inter , union = _box_inter_union (boxes1 , boxes2 )
273
273
iou = inter / union
274
274
return iou
@@ -291,7 +291,7 @@ def generalized_box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor:
291
291
for every element in boxes1 and boxes2
292
292
"""
293
293
294
- _log_api_usage_once ("torchvision. ops. generalized_box_iou" )
294
+ _log_api_usage_once ("ops" , " generalized_box_iou" )
295
295
# degenerate boxes gives inf / nan results
296
296
# so do an early check
297
297
assert (boxes1 [:, 2 :] >= boxes1 [:, :2 ]).all ()
@@ -323,7 +323,7 @@ def masks_to_boxes(masks: torch.Tensor) -> torch.Tensor:
323
323
Returns:
324
324
Tensor[N, 4]: bounding boxes
325
325
"""
326
- _log_api_usage_once ("torchvision. ops. masks_to_boxes" )
326
+ _log_api_usage_once ("ops" , " masks_to_boxes" )
327
327
if masks .numel () == 0 :
328
328
return torch .zeros ((0 , 4 ), device = masks .device , dtype = torch .float )
329
329
0 commit comments