From 7b5c9ac0f4b49cfe24ac3bf2f540f17c8f33c2e3 Mon Sep 17 00:00:00 2001 From: MiXaiLL76 Date: Fri, 7 Jun 2024 16:43:49 +0300 Subject: [PATCH 1/3] add faster-coco-eval metric --- mmdet/evaluation/metrics/coco_metric.py | 25 +++++- requirements/runtime.txt | 1 + .../test_metrics/test_coco_metric.py | 81 ++++++++++++++----- 3 files changed, 83 insertions(+), 24 deletions(-) diff --git a/mmdet/evaluation/metrics/coco_metric.py b/mmdet/evaluation/metrics/coco_metric.py index cfdc66e03b9..e97ac68e985 100644 --- a/mmdet/evaluation/metrics/coco_metric.py +++ b/mmdet/evaluation/metrics/coco_metric.py @@ -8,6 +8,8 @@ import numpy as np import torch +from faster_coco_eval import COCO as FasterCOCO +from faster_coco_eval import COCOeval_faster from mmengine.evaluator import BaseMetric from mmengine.fileio import dump, get_local_path, load from mmengine.logging import MMLogger @@ -64,6 +66,7 @@ class CocoMetric(BaseMetric): sort_categories (bool): Whether sort categories in annotations. Only used for `Objects365V1Dataset`. Defaults to False. use_mp_eval (bool): Whether to use mul-processing evaluation + use_faster_coco_eval (bool): Whether to use Faster-COCO-Eval evaluation """ default_prefix: Optional[str] = 'coco' @@ -81,7 +84,8 @@ def __init__(self, collect_device: str = 'cpu', prefix: Optional[str] = None, sort_categories: bool = False, - use_mp_eval: bool = False) -> None: + use_mp_eval: bool = False, + use_faster_coco_eval: bool = False) -> None: super().__init__(collect_device=collect_device, prefix=prefix) # coco evaluation metrics self.metrics = metric if isinstance(metric, list) else [metric] @@ -96,6 +100,8 @@ def __init__(self, self.classwise = classwise # whether to use multi processing evaluation, default False self.use_mp_eval = use_mp_eval + # whether to use Faster Coco Eval, default False + self.use_faster_coco_eval = use_faster_coco_eval # proposal_nums used to compute recall or precision. self.proposal_nums = list(proposal_nums) @@ -127,7 +133,10 @@ def __init__(self, if ann_file is not None: with get_local_path( ann_file, backend_args=self.backend_args) as local_path: - self._coco_api = COCO(local_path) + if self.use_faster_coco_eval: + self._coco_api = FasterCOCO(local_path) + else: + self._coco_api = COCO(local_path) if sort_categories: # 'categories' list in objects365_train.json and # objects365_val.json is inconsistent, need sort @@ -410,7 +419,10 @@ def compute_metrics(self, results: list) -> Dict[str, float]: logger.info('Converting ground truth to coco format...') coco_json_path = self.gt_to_coco_json( gt_dicts=gts, outfile_prefix=outfile_prefix) - self._coco_api = COCO(coco_json_path) + if self.use_faster_coco_eval: + self._coco_api = FasterCOCO(coco_json_path) + else: + self._coco_api = COCO(coco_json_path) # handle lazy init if self.cat_ids is None: @@ -468,6 +480,13 @@ def compute_metrics(self, results: list) -> Dict[str, float]: if self.use_mp_eval: coco_eval = COCOevalMP(self._coco_api, coco_dt, iou_type) + elif self.use_faster_coco_eval: + coco_eval = COCOeval_faster( + self._coco_api, + coco_dt, + iou_type, + print_function=logger.info, + ) else: coco_eval = COCOeval(self._coco_api, coco_dt, iou_type) diff --git a/requirements/runtime.txt b/requirements/runtime.txt index 8f74a6d3e61..28addfc097a 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -1,3 +1,4 @@ +faster-coco-eval matplotlib numpy pycocotools diff --git a/tests/test_evaluation/test_metrics/test_coco_metric.py b/tests/test_evaluation/test_metrics/test_coco_metric.py index 547b8f21e0f..03b0ccac969 100644 --- a/tests/test_evaluation/test_metrics/test_coco_metric.py +++ b/tests/test_evaluation/test_metrics/test_coco_metric.py @@ -6,6 +6,7 @@ import pycocotools.mask as mask_util import torch from mmengine.fileio import dump +from parameterized import parameterized from mmdet.evaluation import CocoMetric @@ -111,7 +112,8 @@ def test_init(self): with self.assertRaisesRegex(KeyError, 'metric should be one of'): CocoMetric(ann_file=fake_json_file, metric='unknown') - def test_evaluate(self): + @parameterized.expand([False, True]) + def test_evaluate(self, use_faster_coco_eval): # create dummy data fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json') self._create_dummy_coco_json(fake_json_file) @@ -121,7 +123,9 @@ def test_evaluate(self): coco_metric = CocoMetric( ann_file=fake_json_file, classwise=False, - outfile_prefix=f'{self.tmp_dir.name}/test') + outfile_prefix=f'{self.tmp_dir.name}/test', + use_faster_coco_eval=use_faster_coco_eval, + ) coco_metric.dataset_meta = dict(classes=['car', 'bicycle']) coco_metric.process( {}, @@ -144,7 +148,9 @@ def test_evaluate(self): ann_file=fake_json_file, metric=['bbox', 'segm'], classwise=False, - outfile_prefix=f'{self.tmp_dir.name}/test') + outfile_prefix=f'{self.tmp_dir.name}/test', + use_faster_coco_eval=use_faster_coco_eval, + ) coco_metric.dataset_meta = dict(classes=['car', 'bicycle']) coco_metric.process( {}, @@ -174,7 +180,10 @@ def test_evaluate(self): with self.assertRaisesRegex(KeyError, 'metric item "invalid" is not supported'): coco_metric = CocoMetric( - ann_file=fake_json_file, metric_items=['invalid']) + ann_file=fake_json_file, + metric_items=['invalid'], + use_faster_coco_eval=use_faster_coco_eval, + ) coco_metric.dataset_meta = dict(classes=['car', 'bicycle']) coco_metric.process({}, [ dict( @@ -184,7 +193,10 @@ def test_evaluate(self): # test custom metric_items coco_metric = CocoMetric( - ann_file=fake_json_file, metric_items=['mAP_m']) + ann_file=fake_json_file, + metric_items=['mAP_m'], + use_faster_coco_eval=use_faster_coco_eval, + ) coco_metric.dataset_meta = dict(classes=['car', 'bicycle']) coco_metric.process( {}, @@ -195,7 +207,8 @@ def test_evaluate(self): } self.assertDictEqual(eval_results, target) - def test_classwise_evaluate(self): + @parameterized.expand([False, True]) + def test_classwise_evaluate(self, use_faster_coco_eval): # create dummy data fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json') self._create_dummy_coco_json(fake_json_file) @@ -203,9 +216,12 @@ def test_classwise_evaluate(self): # test single coco dataset evaluation coco_metric = CocoMetric( - ann_file=fake_json_file, metric='bbox', classwise=True) - # coco_metric1 = CocoMetric( - # ann_file=fake_json_file, metric='bbox', classwise=True) + ann_file=fake_json_file, + metric='bbox', + classwise=True, + use_faster_coco_eval=use_faster_coco_eval, + ) + coco_metric.dataset_meta = dict(classes=['car', 'bicycle']) coco_metric.process( {}, @@ -223,18 +239,24 @@ def test_classwise_evaluate(self): } self.assertDictEqual(eval_results, target) - def test_manually_set_iou_thrs(self): + @parameterized.expand([False, True]) + def test_manually_set_iou_thrs(self, use_faster_coco_eval): # create dummy data fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json') self._create_dummy_coco_json(fake_json_file) # test single coco dataset evaluation coco_metric = CocoMetric( - ann_file=fake_json_file, metric='bbox', iou_thrs=[0.3, 0.6]) + ann_file=fake_json_file, + metric='bbox', + iou_thrs=[0.3, 0.6], + use_faster_coco_eval=use_faster_coco_eval, + ) coco_metric.dataset_meta = dict(classes=['car', 'bicycle']) self.assertEqual(coco_metric.iou_thrs, [0.3, 0.6]) - def test_fast_eval_recall(self): + @parameterized.expand([False, True]) + def test_fast_eval_recall(self, use_faster_coco_eval): # create dummy data fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json') self._create_dummy_coco_json(fake_json_file) @@ -242,7 +264,10 @@ def test_fast_eval_recall(self): # test default proposal nums coco_metric = CocoMetric( - ann_file=fake_json_file, metric='proposal_fast') + ann_file=fake_json_file, + metric='proposal_fast', + use_faster_coco_eval=use_faster_coco_eval, + ) coco_metric.dataset_meta = dict(classes=['car', 'bicycle']) coco_metric.process( {}, @@ -264,13 +289,18 @@ def test_fast_eval_recall(self): target = {'coco/AR@2': 0.5, 'coco/AR@4': 1.0} self.assertDictEqual(eval_results, target) - def test_evaluate_proposal(self): + @parameterized.expand([False, True]) + def test_evaluate_proposal(self, use_faster_coco_eval): # create dummy data fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json') self._create_dummy_coco_json(fake_json_file) dummy_pred = self._create_dummy_results() - coco_metric = CocoMetric(ann_file=fake_json_file, metric='proposal') + coco_metric = CocoMetric( + ann_file=fake_json_file, + metric='proposal', + use_faster_coco_eval=use_faster_coco_eval, + ) coco_metric.dataset_meta = dict(classes=['car', 'bicycle']) coco_metric.process( {}, @@ -287,11 +317,16 @@ def test_evaluate_proposal(self): } self.assertDictEqual(eval_results, target) - def test_empty_results(self): + @parameterized.expand([False, True]) + def test_empty_results(self, use_faster_coco_eval): # create dummy data fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json') self._create_dummy_coco_json(fake_json_file) - coco_metric = CocoMetric(ann_file=fake_json_file, metric='bbox') + coco_metric = CocoMetric( + ann_file=fake_json_file, + metric='bbox', + use_faster_coco_eval=use_faster_coco_eval, + ) coco_metric.dataset_meta = dict(classes=['car', 'bicycle']) bboxes = np.zeros((0, 4)) labels = np.array([]) @@ -308,7 +343,8 @@ def test_empty_results(self): # coco api Index error will be caught coco_metric.evaluate(size=1) - def test_evaluate_without_json(self): + @parameterized.expand([False, True]) + def test_evaluate_without_json(self, use_faster_coco_eval): dummy_pred = self._create_dummy_results() dummy_mask = np.zeros((10, 10), order='F', dtype=np.uint8) @@ -340,7 +376,8 @@ def test_evaluate_without_json(self): ann_file=None, metric=['bbox', 'segm'], classwise=False, - outfile_prefix=f'{self.tmp_dir.name}/test') + outfile_prefix=f'{self.tmp_dir.name}/test', + use_faster_coco_eval=use_faster_coco_eval) coco_metric.dataset_meta = dict(classes=['car', 'bicycle']) coco_metric.process({}, [ dict( @@ -373,7 +410,8 @@ def test_evaluate_without_json(self): self.assertTrue( osp.isfile(osp.join(self.tmp_dir.name, 'test.gt.json'))) - def test_format_only(self): + @parameterized.expand([False, True]) + def test_format_only(self, use_faster_coco_eval): # create dummy data fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json') self._create_dummy_coco_json(fake_json_file) @@ -384,7 +422,8 @@ def test_format_only(self): ann_file=fake_json_file, classwise=False, format_only=True, - outfile_prefix=None) + outfile_prefix=None, + use_faster_coco_eval=use_faster_coco_eval) coco_metric = CocoMetric( ann_file=fake_json_file, From 58647d910dbc90c529e36085346809f96df75d41 Mon Sep 17 00:00:00 2001 From: MiXaiLL76 Date: Fri, 2 Aug 2024 10:42:00 +0300 Subject: [PATCH 2/3] make faster_coco_eval optional --- mmdet/evaluation/metrics/coco_metric.py | 11 +++++-- requirements/optional.txt | 1 + requirements/runtime.txt | 1 - .../test_metrics/test_coco_metric.py | 30 +++++++++++++++++++ 4 files changed, 40 insertions(+), 3 deletions(-) diff --git a/mmdet/evaluation/metrics/coco_metric.py b/mmdet/evaluation/metrics/coco_metric.py index e97ac68e985..5910727606b 100644 --- a/mmdet/evaluation/metrics/coco_metric.py +++ b/mmdet/evaluation/metrics/coco_metric.py @@ -8,8 +8,6 @@ import numpy as np import torch -from faster_coco_eval import COCO as FasterCOCO -from faster_coco_eval import COCOeval_faster from mmengine.evaluator import BaseMetric from mmengine.fileio import dump, get_local_path, load from mmengine.logging import MMLogger @@ -20,6 +18,13 @@ from mmdet.structures.mask import encode_mask_results from ..functional import eval_recalls +try: + from faster_coco_eval import COCO as FasterCOCO + from faster_coco_eval import COCOeval_faster +except ImportError: + FasterCOCO = None + COCOeval_faster = None + @METRICS.register_module() class CocoMetric(BaseMetric): @@ -102,6 +107,8 @@ def __init__(self, self.use_mp_eval = use_mp_eval # whether to use Faster Coco Eval, default False self.use_faster_coco_eval = use_faster_coco_eval + if FasterCOCO is None: + raise RuntimeError('faster-coco-eval is not installed') # proposal_nums used to compute recall or precision. self.proposal_nums = list(proposal_nums) diff --git a/requirements/optional.txt b/requirements/optional.txt index 31bdde50bea..3e65e25ef14 100644 --- a/requirements/optional.txt +++ b/requirements/optional.txt @@ -1,5 +1,6 @@ cityscapesscripts emoji fairscale +faster-coco-eval imagecorruptions scikit-learn diff --git a/requirements/runtime.txt b/requirements/runtime.txt index 28addfc097a..8f74a6d3e61 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -1,4 +1,3 @@ -faster-coco-eval matplotlib numpy pycocotools diff --git a/tests/test_evaluation/test_metrics/test_coco_metric.py b/tests/test_evaluation/test_metrics/test_coco_metric.py index 03b0ccac969..9dd58d0f91d 100644 --- a/tests/test_evaluation/test_metrics/test_coco_metric.py +++ b/tests/test_evaluation/test_metrics/test_coco_metric.py @@ -1,5 +1,6 @@ import os.path as osp import tempfile +import unittest from unittest import TestCase import numpy as np @@ -10,6 +11,11 @@ from mmdet.evaluation import CocoMetric +try: + from faster_coco_eval import COCO as FasterCOCO +except ImportError: + FasterCOCO = None + class TestCocoMetric(TestCase): @@ -114,6 +120,9 @@ def test_init(self): @parameterized.expand([False, True]) def test_evaluate(self, use_faster_coco_eval): + if use_faster_coco_eval and (FasterCOCO is None): + return unittest.skip('faster-coco-eval is not installed') + # create dummy data fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json') self._create_dummy_coco_json(fake_json_file) @@ -209,6 +218,9 @@ def test_evaluate(self, use_faster_coco_eval): @parameterized.expand([False, True]) def test_classwise_evaluate(self, use_faster_coco_eval): + if use_faster_coco_eval and (FasterCOCO is None): + return unittest.skip('faster-coco-eval is not installed') + # create dummy data fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json') self._create_dummy_coco_json(fake_json_file) @@ -241,6 +253,9 @@ def test_classwise_evaluate(self, use_faster_coco_eval): @parameterized.expand([False, True]) def test_manually_set_iou_thrs(self, use_faster_coco_eval): + if use_faster_coco_eval and (FasterCOCO is None): + return unittest.skip('faster-coco-eval is not installed') + # create dummy data fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json') self._create_dummy_coco_json(fake_json_file) @@ -257,6 +272,9 @@ def test_manually_set_iou_thrs(self, use_faster_coco_eval): @parameterized.expand([False, True]) def test_fast_eval_recall(self, use_faster_coco_eval): + if use_faster_coco_eval and (FasterCOCO is None): + return unittest.skip('faster-coco-eval is not installed') + # create dummy data fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json') self._create_dummy_coco_json(fake_json_file) @@ -291,6 +309,9 @@ def test_fast_eval_recall(self, use_faster_coco_eval): @parameterized.expand([False, True]) def test_evaluate_proposal(self, use_faster_coco_eval): + if use_faster_coco_eval and (FasterCOCO is None): + return unittest.skip('faster-coco-eval is not installed') + # create dummy data fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json') self._create_dummy_coco_json(fake_json_file) @@ -319,6 +340,9 @@ def test_evaluate_proposal(self, use_faster_coco_eval): @parameterized.expand([False, True]) def test_empty_results(self, use_faster_coco_eval): + if use_faster_coco_eval and (FasterCOCO is None): + return unittest.skip('faster-coco-eval is not installed') + # create dummy data fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json') self._create_dummy_coco_json(fake_json_file) @@ -345,6 +369,9 @@ def test_empty_results(self, use_faster_coco_eval): @parameterized.expand([False, True]) def test_evaluate_without_json(self, use_faster_coco_eval): + if use_faster_coco_eval and (FasterCOCO is None): + return unittest.skip('faster-coco-eval is not installed') + dummy_pred = self._create_dummy_results() dummy_mask = np.zeros((10, 10), order='F', dtype=np.uint8) @@ -412,6 +439,9 @@ def test_evaluate_without_json(self, use_faster_coco_eval): @parameterized.expand([False, True]) def test_format_only(self, use_faster_coco_eval): + if use_faster_coco_eval and (FasterCOCO is None): + return unittest.skip('faster-coco-eval is not installed') + # create dummy data fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json') self._create_dummy_coco_json(fake_json_file) From c02afc237a21790ce4a38b09f9447c291e5fefce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9C=D0=B8=D1=85=D0=B0=D0=B8=D0=BB?= Date: Tue, 24 Sep 2024 13:38:37 +0300 Subject: [PATCH 3/3] Update mmdet/evaluation/metrics/coco_metric.py Co-authored-by: BigDong --- mmdet/evaluation/metrics/coco_metric.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mmdet/evaluation/metrics/coco_metric.py b/mmdet/evaluation/metrics/coco_metric.py index 5910727606b..5733fb4f373 100644 --- a/mmdet/evaluation/metrics/coco_metric.py +++ b/mmdet/evaluation/metrics/coco_metric.py @@ -107,7 +107,8 @@ def __init__(self, self.use_mp_eval = use_mp_eval # whether to use Faster Coco Eval, default False self.use_faster_coco_eval = use_faster_coco_eval - if FasterCOCO is None: + if self.use_faster_coco_eva: + assert FasterCOCO is not None, 'faster-coco-eval is not installed' raise RuntimeError('faster-coco-eval is not installed') # proposal_nums used to compute recall or precision.