From cf36cfc18d70611e5623fd8aac2b536a71005f94 Mon Sep 17 00:00:00 2001 From: Steven Kou Date: Sat, 18 Jun 2022 20:56:14 +0800 Subject: [PATCH 1/5] Skip box conversion if no boxes are present The `box_convert` function from torchvision expects the input to be a Tensor[N, 4], where N > 0. Should N == 0 and in_fmt != out_fmt, `unbind` will error out on the boxes tensor during the conversion process. The workaround is therefore to skip the box conversion if boxes is an empty tensor. --- src/torchmetrics/detection/mean_ap.py | 3 +- tests/unittests/detection/test_map.py | 74 ++++++++++++++++++++++++++- 2 files changed, 74 insertions(+), 3 deletions(-) diff --git a/src/torchmetrics/detection/mean_ap.py b/src/torchmetrics/detection/mean_ap.py index 8da9679a96a..b86cc24e062 100644 --- a/src/torchmetrics/detection/mean_ap.py +++ b/src/torchmetrics/detection/mean_ap.py @@ -416,7 +416,8 @@ def _get_safe_item_values(self, item: Dict[str, Any]) -> Union[Tensor, Tuple]: if self.iou_type == "bbox": boxes = _fix_empty_tensors(item["boxes"]) - boxes = box_convert(boxes, in_fmt=self.box_format, out_fmt="xyxy") + if boxes.numel() > 0: + boxes = box_convert(boxes, in_fmt=self.box_format, out_fmt="xyxy") return boxes elif self.iou_type == "segm": masks = [] diff --git a/tests/unittests/detection/test_map.py b/tests/unittests/detection/test_map.py index 3fafd61a413..a0f3dcc7a83 100644 --- a/tests/unittests/detection/test_map.py +++ b/tests/unittests/detection/test_map.py @@ -56,7 +56,6 @@ ], ) - _inputs = Input( preds=[ [ @@ -299,7 +298,6 @@ class TestMAP(MetricTester): @pytest.mark.parametrize("ddp", [False, True]) def test_map_bbox(self, compute_on_cpu, ddp): - """Test modular implementation for correctness.""" self.run_class_metric_test( ddp=ddp, @@ -374,6 +372,78 @@ def test_empty_ground_truths(): metric.compute() +@pytest.mark.skipif(_pytest_condition, reason="test requires that torchvision=>0.8.0 is installed") +def test_empty_ground_truths_xywh(): + """Test empty ground truths in xywh format.""" + metric = MeanAveragePrecision(box_format="xywh") + + metric.update( + [ + dict( + boxes=Tensor([[214.1500, 41.2900, 348.2600, 243.7800]]), + scores=Tensor([0.5]), + labels=IntTensor([4]), + ), + ], + [ + dict(boxes=Tensor([]), labels=IntTensor([])), + ], + ) + metric.compute() + + +@pytest.mark.skipif(_pytest_condition, reason="test requires that torchvision=>0.8.0 is installed") +def test_empty_preds_xywh(): + """Test empty predictions in xywh format.""" + metric = MeanAveragePrecision(box_format="xywh") + + metric.update( + [ + dict(boxes=Tensor([]), scores=Tensor([]), labels=IntTensor([])), + ], + [ + dict(boxes=Tensor([[214.1500, 41.2900, 348.2600, 243.7800]]), labels=IntTensor([4])), + ], + ) + metric.compute() + + +@pytest.mark.skipif(_pytest_condition, reason="test requires that torchvision=>0.8.0 is installed") +def test_empty_ground_truths_cxcywh(): + """Test empty ground truths in cxcywh format.""" + metric = MeanAveragePrecision(box_format="cxcywh") + + metric.update( + [ + dict( + boxes=Tensor([[388.2800, 163.1800, 348.2600, 243.7800]]), + scores=Tensor([0.5]), + labels=IntTensor([4]), + ), + ], + [ + dict(boxes=Tensor([]), labels=IntTensor([])), + ], + ) + metric.compute() + + +@pytest.mark.skipif(_pytest_condition, reason="test requires that torchvision=>0.8.0 is installed") +def test_empty_preds_cxcywh(): + """Test empty predictions in cxcywh format.""" + metric = MeanAveragePrecision(box_format="cxcywh") + + metric.update( + [ + dict(boxes=Tensor([]), scores=Tensor([]), labels=IntTensor([])), + ], + [ + dict(boxes=Tensor([[388.2800, 163.1800, 348.2600, 243.7800]]), labels=IntTensor([4])), + ], + ) + metric.compute() + + _gpu_test_condition = not torch.cuda.is_available() From 9a64bdf56a5f2efcaca75acaf1bd84c9532522f4 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Sun, 19 Jun 2022 19:27:31 +0200 Subject: [PATCH 2/5] , --- tests/unittests/detection/test_map.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/unittests/detection/test_map.py b/tests/unittests/detection/test_map.py index a0f3dcc7a83..97aed66b34d 100644 --- a/tests/unittests/detection/test_map.py +++ b/tests/unittests/detection/test_map.py @@ -541,12 +541,12 @@ def test_segm_iou_empty_gt_mask(): dict( masks=torch.randint(0, 1, (1, 10, 10)).bool(), scores=Tensor([0.5]), - labels=IntTensor([4]), - ), + labels=IntTensor([4]) + ) ], [ - dict(masks=Tensor([]), labels=IntTensor([])), - ], + dict(masks=Tensor([]), labels=IntTensor([])) + ] ) metric.compute() @@ -562,12 +562,12 @@ def test_segm_iou_empty_pred_mask(): dict( masks=torch.BoolTensor([]), scores=Tensor([]), - labels=IntTensor([]), - ), + labels=IntTensor([]) + ) ], [ - dict(masks=torch.randint(0, 1, (1, 10, 10)).bool(), labels=IntTensor([4])), - ], + dict(masks=torch.randint(0, 1, (1, 10, 10)).bool(), labels=IntTensor([4])) + ] ) metric.compute() From ff80d6f19016db5b986c61abcd116419b9fe3cb4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sun, 19 Jun 2022 17:28:06 +0000 Subject: [PATCH 3/5] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- tests/unittests/detection/test_map.py | 24 ++++-------------------- 1 file changed, 4 insertions(+), 20 deletions(-) diff --git a/tests/unittests/detection/test_map.py b/tests/unittests/detection/test_map.py index 97aed66b34d..560970cecb2 100644 --- a/tests/unittests/detection/test_map.py +++ b/tests/unittests/detection/test_map.py @@ -537,16 +537,8 @@ def test_segm_iou_empty_gt_mask(): metric = MeanAveragePrecision(iou_type="segm") metric.update( - [ - dict( - masks=torch.randint(0, 1, (1, 10, 10)).bool(), - scores=Tensor([0.5]), - labels=IntTensor([4]) - ) - ], - [ - dict(masks=Tensor([]), labels=IntTensor([])) - ] + [dict(masks=torch.randint(0, 1, (1, 10, 10)).bool(), scores=Tensor([0.5]), labels=IntTensor([4]))], + [dict(masks=Tensor([]), labels=IntTensor([]))], ) metric.compute() @@ -558,16 +550,8 @@ def test_segm_iou_empty_pred_mask(): metric = MeanAveragePrecision(iou_type="segm") metric.update( - [ - dict( - masks=torch.BoolTensor([]), - scores=Tensor([]), - labels=IntTensor([]) - ) - ], - [ - dict(masks=torch.randint(0, 1, (1, 10, 10)).bool(), labels=IntTensor([4])) - ] + [dict(masks=torch.BoolTensor([]), scores=Tensor([]), labels=IntTensor([]))], + [dict(masks=torch.randint(0, 1, (1, 10, 10)).bool(), labels=IntTensor([4]))], ) metric.compute() From 63174ba4d28f4162b1176640de1783a45ad7775a Mon Sep 17 00:00:00 2001 From: Jirka Date: Sun, 19 Jun 2022 19:30:06 +0200 Subject: [PATCH 4/5] simple --- tests/unittests/detection/test_map.py | 60 ++++++--------------------- 1 file changed, 12 insertions(+), 48 deletions(-) diff --git a/tests/unittests/detection/test_map.py b/tests/unittests/detection/test_map.py index 560970cecb2..295d6d76509 100644 --- a/tests/unittests/detection/test_map.py +++ b/tests/unittests/detection/test_map.py @@ -342,12 +342,8 @@ def test_empty_preds(): metric = MeanAveragePrecision() metric.update( - [ - dict(boxes=Tensor([]), scores=Tensor([]), labels=IntTensor([])), - ], - [ - dict(boxes=Tensor([[214.1500, 41.2900, 562.4100, 285.0700]]), labels=IntTensor([4])), - ], + [dict(boxes=Tensor([]), scores=Tensor([]), labels=IntTensor([]))], + [dict(boxes=Tensor([[214.1500, 41.2900, 562.4100, 285.0700]]), labels=IntTensor([4]))], ) metric.compute() @@ -358,16 +354,8 @@ def test_empty_ground_truths(): metric = MeanAveragePrecision() metric.update( - [ - dict( - boxes=Tensor([[214.1500, 41.2900, 562.4100, 285.0700]]), - scores=Tensor([0.5]), - labels=IntTensor([4]), - ), - ], - [ - dict(boxes=Tensor([]), labels=IntTensor([])), - ], + [dict(boxes=Tensor([[214.1500, 41.2900, 562.4100, 285.0700]]), scores=Tensor([0.5]), labels=IntTensor([4]))], + [dict(boxes=Tensor([]), labels=IntTensor([]))], ) metric.compute() @@ -378,16 +366,8 @@ def test_empty_ground_truths_xywh(): metric = MeanAveragePrecision(box_format="xywh") metric.update( - [ - dict( - boxes=Tensor([[214.1500, 41.2900, 348.2600, 243.7800]]), - scores=Tensor([0.5]), - labels=IntTensor([4]), - ), - ], - [ - dict(boxes=Tensor([]), labels=IntTensor([])), - ], + [dict(boxes=Tensor([[214.1500, 41.2900, 348.2600, 243.7800]]), scores=Tensor([0.5]), labels=IntTensor([4]))], + [dict(boxes=Tensor([]), labels=IntTensor([]))], ) metric.compute() @@ -398,12 +378,8 @@ def test_empty_preds_xywh(): metric = MeanAveragePrecision(box_format="xywh") metric.update( - [ - dict(boxes=Tensor([]), scores=Tensor([]), labels=IntTensor([])), - ], - [ - dict(boxes=Tensor([[214.1500, 41.2900, 348.2600, 243.7800]]), labels=IntTensor([4])), - ], + [dict(boxes=Tensor([]), scores=Tensor([]), labels=IntTensor([]))], + [dict(boxes=Tensor([[214.1500, 41.2900, 348.2600, 243.7800]]), labels=IntTensor([4]))], ) metric.compute() @@ -414,16 +390,8 @@ def test_empty_ground_truths_cxcywh(): metric = MeanAveragePrecision(box_format="cxcywh") metric.update( - [ - dict( - boxes=Tensor([[388.2800, 163.1800, 348.2600, 243.7800]]), - scores=Tensor([0.5]), - labels=IntTensor([4]), - ), - ], - [ - dict(boxes=Tensor([]), labels=IntTensor([])), - ], + [dict(boxes=Tensor([[388.2800, 163.1800, 348.2600, 243.7800]]), scores=Tensor([0.5]), labels=IntTensor([4]))], + [dict(boxes=Tensor([]), labels=IntTensor([]))], ) metric.compute() @@ -434,12 +402,8 @@ def test_empty_preds_cxcywh(): metric = MeanAveragePrecision(box_format="cxcywh") metric.update( - [ - dict(boxes=Tensor([]), scores=Tensor([]), labels=IntTensor([])), - ], - [ - dict(boxes=Tensor([[388.2800, 163.1800, 348.2600, 243.7800]]), labels=IntTensor([4])), - ], + [dict(boxes=Tensor([]), scores=Tensor([]), labels=IntTensor([]))], + [dict(boxes=Tensor([[388.2800, 163.1800, 348.2600, 243.7800]]), labels=IntTensor([4]))], ) metric.compute() From 1c1f12fb564317b66a75038b18d96a13947bcfdd Mon Sep 17 00:00:00 2001 From: Jirka Date: Mon, 20 Jun 2022 11:13:12 +0200 Subject: [PATCH 5/5] chlog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7da61d6c634..ffce5d81cad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,9 +42,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Fixed mAP calculation for areas with 0 predictions ([#1080](https://github.com/PyTorchLightning/metrics/pull/1080)) + - Fixed bug where avg precision state and auroc state was not merge when using MetricCollections ([#1086](https://github.com/PyTorchLightning/metrics/pull/1086)) +- Skip box conversion if no boxes are present in `MeanAveragePrecision` ([#1097](https://github.com/PyTorchLightning/metrics/pull/1097)) + + ## [0.9.1] - 2022-06-08 ### Added