Skip to content

Commit

Permalink
Fix expected box format by pycoco (#1913)
Browse files Browse the repository at this point in the history
(cherry picked from commit 0fad881)
  • Loading branch information
SkafteNicki authored and Borda committed Jul 13, 2023
1 parent 3661b25 commit 52d540d
Show file tree
Hide file tree
Showing 3 changed files with 55 additions and 17 deletions.
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Fixed bug related to `MeanMetric` and broadcasting of weights when Nans are present ([#1898](https://github.com/Lightning-AI/torchmetrics/pull/1898))


- Fixed bug related to expected input format of pycoco in `MeanAveragePrecision` ([#1913](https://github.com/Lightning-AI/torchmetrics/pull/1913))


## [1.0.0] - 2022-07-04

### Added
Expand Down
40 changes: 24 additions & 16 deletions src/torchmetrics/detection/mean_ap.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,15 @@ class MeanAveragePrecision(Metric):
Args:
box_format:
Input format of given boxes. Supported formats are ``[`xyxy`, `xywh`, `cxcywh`]``.
Input format of given boxes. Supported formats are:
- 'xyxy': boxes are represented via corners, x1, y1 being top left and x2, y2 being bottom right.
- 'xywh' : boxes are represented via corner, width and height, x1, y2 being top left, w, h being
width and height. This is the default format used by pycoco and all input formats will be converted
to this.
- 'cxcywh': boxes are represented via centre, width and height, cx, cy being center of box, w, h being
width and height.
iou_type:
Type of input (either masks or bounding-boxes) used for computing IOU.
Supported IOU types are ``["bbox", "segm"]``. If using ``"segm"``, masks should be provided in input.
Expand Down Expand Up @@ -232,7 +240,7 @@ class MeanAveragePrecision(Metric):

def __init__(
self,
box_format: str = "xyxy",
box_format: Literal["xyxy", "xywh", "cxcywh"] = "xyxy",
iou_type: Literal["bbox", "segm"] = "bbox",
iou_thresholds: Optional[List[float]] = None,
rec_thresholds: Optional[List[float]] = None,
Expand Down Expand Up @@ -345,27 +353,27 @@ def compute(self) -> dict:
coco_target.createIndex()
coco_preds.createIndex()

coco_eval = COCOeval(coco_target, coco_preds, iouType=self.iou_type)
coco_eval.params.iouThrs = np.array(self.iou_thresholds, dtype=np.float64)
coco_eval.params.recThrs = np.array(self.rec_thresholds, dtype=np.float64)
coco_eval.params.maxDets = self.max_detection_thresholds
self.coco_eval = COCOeval(coco_target, coco_preds, iouType=self.iou_type)
self.coco_eval.params.iouThrs = np.array(self.iou_thresholds, dtype=np.float64)
self.coco_eval.params.recThrs = np.array(self.rec_thresholds, dtype=np.float64)
self.coco_eval.params.maxDets = self.max_detection_thresholds

coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
stats = coco_eval.stats
self.coco_eval.evaluate()
self.coco_eval.accumulate()
self.coco_eval.summarize()
stats = self.coco_eval.stats

# if class mode is enabled, evaluate metrics per class
if self.class_metrics:
map_per_class_list = []
mar_100_per_class_list = []
for class_id in self._get_classes():
coco_eval.params.catIds = [class_id]
self.coco_eval.params.catIds = [class_id]
with contextlib.redirect_stdout(io.StringIO()):
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
class_stats = coco_eval.stats
self.coco_eval.evaluate()
self.coco_eval.accumulate()
self.coco_eval.summarize()
class_stats = self.coco_eval.stats

map_per_class_list.append(torch.tensor([class_stats[0]]))
mar_100_per_class_list.append(torch.tensor([class_stats[8]]))
Expand Down Expand Up @@ -545,7 +553,7 @@ def _get_safe_item_values(self, item: Dict[str, Any]) -> Union[Tensor, Tuple]:
if self.iou_type == "bbox":
boxes = _fix_empty_tensors(item["boxes"])
if boxes.numel() > 0:
boxes = box_convert(boxes, in_fmt=self.box_format, out_fmt="xyxy")
boxes = box_convert(boxes, in_fmt=self.box_format, out_fmt="xywh")
return boxes
if self.iou_type == "segm":
masks = []
Expand Down
29 changes: 28 additions & 1 deletion tests/unittests/detection/test_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,7 @@ def test_map(self, iou_type, iou_thresholds, rec_thresholds, ddp):
"iou_thresholds": iou_thresholds,
"rec_thresholds": rec_thresholds,
"class_metrics": False,
"box_format": "xywh",
},
check_batch=False,
atol=1e-2,
Expand All @@ -154,7 +155,7 @@ def test_map_classwise(self, iou_type, ddp):
target=target,
metric_class=MeanAveragePrecision,
reference_metric=partial(_compare_again_coco_fn, iou_type=iou_type, class_metrics=True),
metric_args={"iou_type": iou_type, "class_metrics": True},
metric_args={"box_format": "xywh", "iou_type": iou_type, "class_metrics": True},
check_batch=False,
atol=1e-1,
)
Expand Down Expand Up @@ -656,3 +657,29 @@ def test_device_changing():
metric = metric.cpu()
val = metric.compute()
assert isinstance(val, dict)


@pytest.mark.parametrize(
("box_format", "iou_val_expected", "map_val_expected"),
[
("xyxy", 0.25, 1),
("xywh", 0.143, 0.0),
("cxcywh", 0.143, 0.0),
],
)
def test_for_box_format(box_format, iou_val_expected, map_val_expected):
"""Test that only the correct box format lead to a score of 1.
See issue: https://github.com/Lightning-AI/torchmetrics/issues/1908.
"""
predictions = [
{"boxes": torch.tensor([[0.5, 0.5, 1, 1]]), "scores": torch.tensor([1.0]), "labels": torch.tensor([0])}
]

targets = [{"boxes": torch.tensor([[0, 0, 1, 1]]), "labels": torch.tensor([0])}]

metric = MeanAveragePrecision(box_format=box_format, iou_thresholds=[0.2])
metric.update(predictions, targets)
result = metric.compute()
assert result["map"].item() == map_val_expected
assert round(float(metric.coco_eval.ious[(0, 0)]), 3) == iou_val_expected

0 comments on commit 52d540d

Please sign in to comment.