From d01467a4fbe5ac8322342f9e5e89f69b0900b558 Mon Sep 17 00:00:00 2001 From: Sayantan Date: Thu, 24 Feb 2022 23:06:11 +0530 Subject: [PATCH 01/28] updated precision_recall_curve.py --- .../contrib/metrics/precision_recall_curve.py | 27 ++++++++++--------- 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py index 347cf7c3490..380cb03ff55 100644 --- a/ignite/contrib/metrics/precision_recall_curve.py +++ b/ignite/contrib/metrics/precision_recall_curve.py @@ -5,17 +5,6 @@ from ignite.metrics import EpochMetric -def precision_recall_curve_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> Tuple[Any, Any, Any]: - try: - from sklearn.metrics import precision_recall_curve - except ImportError: - raise RuntimeError("This contrib module requires sklearn to be installed.") - - y_true = y_targets.numpy() - y_pred = y_preds.numpy() - return precision_recall_curve(y_true, y_pred) - - class PrecisionRecallCurve(EpochMetric): """Compute precision-recall pairs for different probability thresholds for binary classification task by accumulating predictions and the ground-truth during an epoch and applying @@ -70,6 +59,20 @@ def sigmoid_output_transform(output): """ def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None: + try: + from sklearn.metrics import cohen_kappa_score # noqa: F401 + except ImportError: + raise RuntimeError("This contrib module requires sklearn to be installed.") + self.precision_recall_curve_compute_fn = self.precision_recall_curve_compute() super(PrecisionRecallCurve, self).__init__( - precision_recall_curve_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn + self.precision_recall_curve_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn ) + + def precision_recall_curve_compute(self) -> Callable[[torch.Tensor, torch.Tensor],float]: + from sklearn.metrics import precision_recall_curve + def wrapper(y_preds: torch.Tensor, y_targets: torch.Tensor) -> float: + + y_true = y_targets.numpy() + y_pred = y_preds.numpy() + return precision_recall_curve(y_true, y_pred) + return wrapper From c5e27575daea1b4c73bbdc3cc04210bb6c96004d Mon Sep 17 00:00:00 2001 From: sayantan1410 Date: Thu, 24 Feb 2022 17:37:23 +0000 Subject: [PATCH 02/28] autopep8 fix --- ignite/contrib/metrics/precision_recall_curve.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py index 380cb03ff55..5d03f14a639 100644 --- a/ignite/contrib/metrics/precision_recall_curve.py +++ b/ignite/contrib/metrics/precision_recall_curve.py @@ -68,11 +68,13 @@ def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: b self.precision_recall_curve_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn ) - def precision_recall_curve_compute(self) -> Callable[[torch.Tensor, torch.Tensor],float]: + def precision_recall_curve_compute(self) -> Callable[[torch.Tensor, torch.Tensor], float]: from sklearn.metrics import precision_recall_curve + def wrapper(y_preds: torch.Tensor, y_targets: torch.Tensor) -> float: y_true = y_targets.numpy() y_pred = y_preds.numpy() return precision_recall_curve(y_true, y_pred) + return wrapper From 0ce730f560be24e9512722835493febebd896370 Mon Sep 17 00:00:00 2001 From: Sayantan Date: Thu, 24 Feb 2022 23:25:45 +0530 Subject: [PATCH 03/28] removed unsed imports --- ignite/contrib/metrics/precision_recall_curve.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py index 5d03f14a639..376195c353d 100644 --- a/ignite/contrib/metrics/precision_recall_curve.py +++ b/ignite/contrib/metrics/precision_recall_curve.py @@ -1,4 +1,4 @@ -from typing import Any, Callable, Tuple +from typing import Callable import torch From dc884ec1132028051cbdf03f9623a9b5c3c1eb59 Mon Sep 17 00:00:00 2001 From: Sayantan Date: Fri, 25 Feb 2022 01:41:40 +0530 Subject: [PATCH 04/28] made some small changes --- ignite/contrib/metrics/precision_recall_curve.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py index 376195c353d..2e7dd9bb648 100644 --- a/ignite/contrib/metrics/precision_recall_curve.py +++ b/ignite/contrib/metrics/precision_recall_curve.py @@ -60,15 +60,16 @@ def sigmoid_output_transform(output): def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None: try: - from sklearn.metrics import cohen_kappa_score # noqa: F401 + from sklearn.metrics import precision_recall_curve except ImportError: raise RuntimeError("This contrib module requires sklearn to be installed.") - self.precision_recall_curve_compute_fn = self.precision_recall_curve_compute() + super(PrecisionRecallCurve, self).__init__( - self.precision_recall_curve_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn + self.precision_recall_curve_compute(), output_transform=output_transform, check_compute_fn=check_compute_fn ) def precision_recall_curve_compute(self) -> Callable[[torch.Tensor, torch.Tensor], float]: + """Returns a function computing the precision_recall_curve from scikit-learn.""" from sklearn.metrics import precision_recall_curve def wrapper(y_preds: torch.Tensor, y_targets: torch.Tensor) -> float: From d0f52e4264c30f64becab532c02e590447b1a27a Mon Sep 17 00:00:00 2001 From: Sayantan Date: Fri, 25 Feb 2022 01:47:16 +0530 Subject: [PATCH 05/28] solved unused import issue --- ignite/contrib/metrics/precision_recall_curve.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py index 2e7dd9bb648..8aa1e4f599b 100644 --- a/ignite/contrib/metrics/precision_recall_curve.py +++ b/ignite/contrib/metrics/precision_recall_curve.py @@ -60,7 +60,7 @@ def sigmoid_output_transform(output): def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None: try: - from sklearn.metrics import precision_recall_curve + from sklearn.metrics import precision_recall_curve # noqa: F401 except ImportError: raise RuntimeError("This contrib module requires sklearn to be installed.") From 0c608834ca12384ccc337b698e7244249e1adafe Mon Sep 17 00:00:00 2001 From: sayantan1410 Date: Thu, 24 Feb 2022 20:18:28 +0000 Subject: [PATCH 06/28] autopep8 fix --- ignite/contrib/metrics/precision_recall_curve.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py index 8aa1e4f599b..a6b57327683 100644 --- a/ignite/contrib/metrics/precision_recall_curve.py +++ b/ignite/contrib/metrics/precision_recall_curve.py @@ -60,7 +60,7 @@ def sigmoid_output_transform(output): def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None: try: - from sklearn.metrics import precision_recall_curve # noqa: F401 + from sklearn.metrics import precision_recall_curve # noqa: F401 except ImportError: raise RuntimeError("This contrib module requires sklearn to be installed.") From 603426921ffd06343d315b6dc1e5b915ce263bd7 Mon Sep 17 00:00:00 2001 From: Sayantan Sadhu Date: Sun, 27 Feb 2022 10:09:29 +0530 Subject: [PATCH 07/28] reverted back some changes and changed epoch_metric.py --- .../contrib/metrics/precision_recall_curve.py | 46 ++++++------------- ignite/metrics/epoch_metric.py | 10 +++- 2 files changed, 22 insertions(+), 34 deletions(-) diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py index a6b57327683..46730b63b99 100644 --- a/ignite/contrib/metrics/precision_recall_curve.py +++ b/ignite/contrib/metrics/precision_recall_curve.py @@ -1,16 +1,26 @@ -from typing import Callable +from typing import Any, Callable, Tuple import torch from ignite.metrics import EpochMetric +def precision_recall_curve_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> Tuple[Any, Any, Any]: + try: + from sklearn.metrics import precision_recall_curve + except ImportError: + raise RuntimeError("This contrib module requires sklearn to be installed.") + + y_true = y_targets.numpy() + y_pred = y_preds.numpy() + return precision_recall_curve(y_true, y_pred) + + class PrecisionRecallCurve(EpochMetric): """Compute precision-recall pairs for different probability thresholds for binary classification task by accumulating predictions and the ground-truth during an epoch and applying `sklearn.metrics.precision_recall_curve `_ . - Args: output_transform: a callable that is used to transform the :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the @@ -20,62 +30,34 @@ class PrecisionRecallCurve(EpochMetric): `_ is run on the first batch of data to ensure there are no issues. User will be warned in case there are any issues computing the function. - Note: PrecisionRecallCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence values. To apply an activation to y_pred, use output_transform as shown below: - .. code-block:: python - def sigmoid_output_transform(output): y_pred, y = output y_pred = torch.sigmoid(y_pred) return y_pred, y avg_precision = PrecisionRecallCurve(sigmoid_output_transform) - Examples: - .. include:: defaults.rst :start-after: :orphan: - .. testcode:: - y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997]) y_true = torch.tensor([0, 0, 1, 1]) prec_recall_curve = PrecisionRecallCurve() prec_recall_curve.attach(default_evaluator, 'prec_recall_curve') state = default_evaluator.run([[y_pred, y_true]]) - print("Precision", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()]) print("Recall", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()]) print("Thresholds", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()]) - .. testoutput:: - Precision [1.0, 1.0, 1.0] Recall [1.0, 0.5, 0.0] Thresholds [0.7109, 0.9997] - """ def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None: - try: - from sklearn.metrics import precision_recall_curve # noqa: F401 - except ImportError: - raise RuntimeError("This contrib module requires sklearn to be installed.") - super(PrecisionRecallCurve, self).__init__( - self.precision_recall_curve_compute(), output_transform=output_transform, check_compute_fn=check_compute_fn - ) - - def precision_recall_curve_compute(self) -> Callable[[torch.Tensor, torch.Tensor], float]: - """Returns a function computing the precision_recall_curve from scikit-learn.""" - from sklearn.metrics import precision_recall_curve - - def wrapper(y_preds: torch.Tensor, y_targets: torch.Tensor) -> float: - - y_true = y_targets.numpy() - y_pred = y_preds.numpy() - return precision_recall_curve(y_true, y_pred) - - return wrapper + precision_recall_curve_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn + ) \ No newline at end of file diff --git a/ignite/metrics/epoch_metric.py b/ignite/metrics/epoch_metric.py index a715230b8b0..adf5d572614 100644 --- a/ignite/metrics/epoch_metric.py +++ b/ignite/metrics/epoch_metric.py @@ -157,8 +157,14 @@ def compute(self) -> float: result = self.compute_fn(_prediction_tensor, _target_tensor) if ws > 1: - # broadcast result to all processes - result = cast(float, idist.broadcast(result, src=0)) + if isinstance(result,tuple): + l = len(result) + for i in range(l): + result[i] = idist.broadcast(result[i], src=0) + + else: + # broadcast result to all processes + result = cast(float, idist.broadcast(result, src=0)) return result From 52de16199f07a05f1bc7123996944d11a5bbbcc4 Mon Sep 17 00:00:00 2001 From: sayantan1410 Date: Sun, 27 Feb 2022 04:50:04 +0000 Subject: [PATCH 08/28] autopep8 fix --- ignite/contrib/metrics/precision_recall_curve.py | 2 +- ignite/metrics/epoch_metric.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py index 46730b63b99..6c6b78a3a0e 100644 --- a/ignite/contrib/metrics/precision_recall_curve.py +++ b/ignite/contrib/metrics/precision_recall_curve.py @@ -60,4 +60,4 @@ def sigmoid_output_transform(output): def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None: super(PrecisionRecallCurve, self).__init__( precision_recall_curve_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn - ) \ No newline at end of file + ) diff --git a/ignite/metrics/epoch_metric.py b/ignite/metrics/epoch_metric.py index adf5d572614..e2e7dfca299 100644 --- a/ignite/metrics/epoch_metric.py +++ b/ignite/metrics/epoch_metric.py @@ -157,7 +157,7 @@ def compute(self) -> float: result = self.compute_fn(_prediction_tensor, _target_tensor) if ws > 1: - if isinstance(result,tuple): + if isinstance(result, tuple): l = len(result) for i in range(l): result[i] = idist.broadcast(result[i], src=0) From 36544bc0693c5a8693343e7b5065a2fb356299f5 Mon Sep 17 00:00:00 2001 From: Sayantan Date: Thu, 3 Mar 2022 19:49:20 +0530 Subject: [PATCH 09/28] re written compute function for precision_recall_curve.py --- .../contrib/metrics/precision_recall_curve.py | 45 +++++++++++++------ .../metrics/test_precision_recall_curve.py | 33 ++++++++++++++ 2 files changed, 65 insertions(+), 13 deletions(-) diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py index 347cf7c3490..3982edf592e 100644 --- a/ignite/contrib/metrics/precision_recall_curve.py +++ b/ignite/contrib/metrics/precision_recall_curve.py @@ -1,8 +1,10 @@ -from typing import Any, Callable, Tuple +from typing import Any, Callable, Tuple,cast import torch +import ignite.distributed as idist from ignite.metrics import EpochMetric +from ignite.exceptions import NotComputableError def precision_recall_curve_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> Tuple[Any, Any, Any]: @@ -21,7 +23,6 @@ class PrecisionRecallCurve(EpochMetric): by accumulating predictions and the ground-truth during an epoch and applying `sklearn.metrics.precision_recall_curve `_ . - Args: output_transform: a callable that is used to transform the :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the @@ -31,45 +32,63 @@ class PrecisionRecallCurve(EpochMetric): `_ is run on the first batch of data to ensure there are no issues. User will be warned in case there are any issues computing the function. - Note: PrecisionRecallCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence values. To apply an activation to y_pred, use output_transform as shown below: - .. code-block:: python - def sigmoid_output_transform(output): y_pred, y = output y_pred = torch.sigmoid(y_pred) return y_pred, y avg_precision = PrecisionRecallCurve(sigmoid_output_transform) - Examples: - .. include:: defaults.rst :start-after: :orphan: - .. testcode:: - y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997]) y_true = torch.tensor([0, 0, 1, 1]) prec_recall_curve = PrecisionRecallCurve() prec_recall_curve.attach(default_evaluator, 'prec_recall_curve') state = default_evaluator.run([[y_pred, y_true]]) - print("Precision", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()]) print("Recall", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()]) print("Thresholds", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()]) - .. testoutput:: - Precision [1.0, 1.0, 1.0] Recall [1.0, 0.5, 0.0] Thresholds [0.7109, 0.9997] - """ def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None: super(PrecisionRecallCurve, self).__init__( precision_recall_curve_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn ) + def compute(self) -> float: + if len(self._predictions) < 1 or len(self._targets) < 1: + raise NotComputableError("EpochMetric must have at least one example before it can be computed.") + + _prediction_tensor = torch.cat(self._predictions, dim=0) + _target_tensor = torch.cat(self._targets, dim=0) + + ws = idist.get_world_size() + if ws > 1 and not self._is_reduced: + # All gather across all processes + _prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor)) + _target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor)) + self._is_reduced = True + + precision = torch.zeros(1,len(self._predictions)) + recall = torch.zeros(1,len(self._predictions)) + thresholds = torch.zeros(1,len(self._predictions)-1) + if idist.get_rank() == 0: + # Run compute_fn on zero rank only + precision,recall,thresholds = self.compute_fn(_prediction_tensor, _target_tensor) + + + if ws > 1: + # broadcast result to all processes + precision = cast(float, idist.broadcast(precision, src=0)) + recall = cast(float, idist.broadcast(recall, src=0)) + thresholds = cast(float, idist.broadcast(thresholds, src=0)) + + return precision,recall,thresholds diff --git a/tests/ignite/contrib/metrics/test_precision_recall_curve.py b/tests/ignite/contrib/metrics/test_precision_recall_curve.py index ca3ec25cc2d..82d45cf9e16 100644 --- a/tests/ignite/contrib/metrics/test_precision_recall_curve.py +++ b/tests/ignite/contrib/metrics/test_precision_recall_curve.py @@ -6,6 +6,7 @@ import torch from sklearn.metrics import precision_recall_curve +import ignite.distributed as idist from ignite.contrib.metrics.precision_recall_curve import PrecisionRecallCurve from ignite.engine import Engine from ignite.metrics.epoch_metric import EpochMetricWarning @@ -124,3 +125,35 @@ def test_check_compute_fn(): em = PrecisionRecallCurve(check_compute_fn=False) em.update(output) + +def _test_distrib_binary_input(device): + + rank = idist.get_rank() + torch.manual_seed(12) + + def _test(y_pred, y, batch_size, metric_device): + + metric_device = torch.device(metric_device) + prc = PrecisionRecallCurve(device=metric_device) + + torch.manual_seed(10 + rank) + + prc.reset() + if batch_size > 1: + n_iters = y.shape[0] // batch_size + 1 + for i in range(n_iters): + idx = i * batch_size + prc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) + else: + prc.update((y_pred, y)) + + # gather y_pred, y + y_pred = idist.all_gather(y_pred) + y = idist.all_gather(y) + + np_y = y.cpu().numpy() + np_y_pred = y_pred.cpu().numpy() + + res = prc.compute() + assert isinstance(res, float) + assert PrecisionRecallCurve(np_y, np_y_pred) == pytest.approx(res) From 2d45c21d97226b2fbcae2c484f185f7cbac37275 Mon Sep 17 00:00:00 2001 From: Sayantan Date: Thu, 3 Mar 2022 19:56:01 +0530 Subject: [PATCH 10/28] reverted back epoch_metric.py --- ignite/metrics/epoch_metric.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/ignite/metrics/epoch_metric.py b/ignite/metrics/epoch_metric.py index e2e7dfca299..a715230b8b0 100644 --- a/ignite/metrics/epoch_metric.py +++ b/ignite/metrics/epoch_metric.py @@ -157,14 +157,8 @@ def compute(self) -> float: result = self.compute_fn(_prediction_tensor, _target_tensor) if ws > 1: - if isinstance(result, tuple): - l = len(result) - for i in range(l): - result[i] = idist.broadcast(result[i], src=0) - - else: - # broadcast result to all processes - result = cast(float, idist.broadcast(result, src=0)) + # broadcast result to all processes + result = cast(float, idist.broadcast(result, src=0)) return result From 57439fd955e2fabea9bbed255dbc0ccd0edd07b5 Mon Sep 17 00:00:00 2001 From: Sayantan Date: Thu, 3 Mar 2022 19:59:32 +0530 Subject: [PATCH 11/28] reverted back unnecessary changes to doc string --- ignite/contrib/metrics/precision_recall_curve.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py index 3982edf592e..a6be3a745c9 100644 --- a/ignite/contrib/metrics/precision_recall_curve.py +++ b/ignite/contrib/metrics/precision_recall_curve.py @@ -23,6 +23,7 @@ class PrecisionRecallCurve(EpochMetric): by accumulating predictions and the ground-truth during an epoch and applying `sklearn.metrics.precision_recall_curve `_ . + Args: output_transform: a callable that is used to transform the :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the @@ -32,31 +33,41 @@ class PrecisionRecallCurve(EpochMetric): `_ is run on the first batch of data to ensure there are no issues. User will be warned in case there are any issues computing the function. + Note: PrecisionRecallCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence values. To apply an activation to y_pred, use output_transform as shown below: + .. code-block:: python + def sigmoid_output_transform(output): y_pred, y = output y_pred = torch.sigmoid(y_pred) return y_pred, y avg_precision = PrecisionRecallCurve(sigmoid_output_transform) + Examples: + .. include:: defaults.rst :start-after: :orphan: + .. testcode:: y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997]) y_true = torch.tensor([0, 0, 1, 1]) prec_recall_curve = PrecisionRecallCurve() prec_recall_curve.attach(default_evaluator, 'prec_recall_curve') state = default_evaluator.run([[y_pred, y_true]]) + print("Precision", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()]) print("Recall", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()]) print("Thresholds", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()]) + .. testoutput:: + Precision [1.0, 1.0, 1.0] Recall [1.0, 0.5, 0.0] Thresholds [0.7109, 0.9997] + """ def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None: From 96de71a3623fe4af4934702d433937db43bb8cb0 Mon Sep 17 00:00:00 2001 From: Sayantan Date: Thu, 3 Mar 2022 20:01:58 +0530 Subject: [PATCH 12/28] reverted a line break that was added by mistake --- ignite/contrib/metrics/precision_recall_curve.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py index a6be3a745c9..ae8139bf983 100644 --- a/ignite/contrib/metrics/precision_recall_curve.py +++ b/ignite/contrib/metrics/precision_recall_curve.py @@ -52,6 +52,7 @@ def sigmoid_output_transform(output): :start-after: :orphan: .. testcode:: + y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997]) y_true = torch.tensor([0, 0, 1, 1]) prec_recall_curve = PrecisionRecallCurve() From 19b568dff98862455d9900c29d01a1da8df5475f Mon Sep 17 00:00:00 2001 From: sayantan1410 Date: Thu, 3 Mar 2022 14:33:17 +0000 Subject: [PATCH 13/28] autopep8 fix --- ignite/contrib/metrics/precision_recall_curve.py | 16 ++++++++-------- .../metrics/test_precision_recall_curve.py | 1 + 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py index ae8139bf983..572848563fe 100644 --- a/ignite/contrib/metrics/precision_recall_curve.py +++ b/ignite/contrib/metrics/precision_recall_curve.py @@ -1,10 +1,10 @@ -from typing import Any, Callable, Tuple,cast +from typing import Any, Callable, cast, Tuple import torch import ignite.distributed as idist -from ignite.metrics import EpochMetric from ignite.exceptions import NotComputableError +from ignite.metrics import EpochMetric def precision_recall_curve_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> Tuple[Any, Any, Any]: @@ -75,6 +75,7 @@ def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: b super(PrecisionRecallCurve, self).__init__( precision_recall_curve_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn ) + def compute(self) -> float: if len(self._predictions) < 1 or len(self._targets) < 1: raise NotComputableError("EpochMetric must have at least one example before it can be computed.") @@ -89,13 +90,12 @@ def compute(self) -> float: _target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor)) self._is_reduced = True - precision = torch.zeros(1,len(self._predictions)) - recall = torch.zeros(1,len(self._predictions)) - thresholds = torch.zeros(1,len(self._predictions)-1) + precision = torch.zeros(1, len(self._predictions)) + recall = torch.zeros(1, len(self._predictions)) + thresholds = torch.zeros(1, len(self._predictions) - 1) if idist.get_rank() == 0: # Run compute_fn on zero rank only - precision,recall,thresholds = self.compute_fn(_prediction_tensor, _target_tensor) - + precision, recall, thresholds = self.compute_fn(_prediction_tensor, _target_tensor) if ws > 1: # broadcast result to all processes @@ -103,4 +103,4 @@ def compute(self) -> float: recall = cast(float, idist.broadcast(recall, src=0)) thresholds = cast(float, idist.broadcast(thresholds, src=0)) - return precision,recall,thresholds + return precision, recall, thresholds diff --git a/tests/ignite/contrib/metrics/test_precision_recall_curve.py b/tests/ignite/contrib/metrics/test_precision_recall_curve.py index 82d45cf9e16..4ffb5d50dec 100644 --- a/tests/ignite/contrib/metrics/test_precision_recall_curve.py +++ b/tests/ignite/contrib/metrics/test_precision_recall_curve.py @@ -126,6 +126,7 @@ def test_check_compute_fn(): em = PrecisionRecallCurve(check_compute_fn=False) em.update(output) + def _test_distrib_binary_input(device): rank = idist.get_rank() From 74cb48ba05fa73622831b9e24bc66d6087912c43 Mon Sep 17 00:00:00 2001 From: Sayantan Date: Thu, 3 Mar 2022 20:28:23 +0530 Subject: [PATCH 14/28] corrected function annotation --- ignite/contrib/metrics/precision_recall_curve.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py index 572848563fe..431af365ed4 100644 --- a/ignite/contrib/metrics/precision_recall_curve.py +++ b/ignite/contrib/metrics/precision_recall_curve.py @@ -76,7 +76,7 @@ def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: b precision_recall_curve_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn ) - def compute(self) -> float: + def compute(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: if len(self._predictions) < 1 or len(self._targets) < 1: raise NotComputableError("EpochMetric must have at least one example before it can be computed.") @@ -90,9 +90,9 @@ def compute(self) -> float: _target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor)) self._is_reduced = True - precision = torch.zeros(1, len(self._predictions)) - recall = torch.zeros(1, len(self._predictions)) - thresholds = torch.zeros(1, len(self._predictions) - 1) + precision = torch.zeros(len(self._predictions)) + recall = torch.zeros(len(self._predictions)) + thresholds = torch.zeros(len(self._predictions) - 1) if idist.get_rank() == 0: # Run compute_fn on zero rank only precision, recall, thresholds = self.compute_fn(_prediction_tensor, _target_tensor) From 89a69e928c6d30802c02c0f5ad0e6e11d726a3bd Mon Sep 17 00:00:00 2001 From: Sayantan Date: Thu, 3 Mar 2022 20:55:08 +0530 Subject: [PATCH 15/28] fixed mypy issues --- ignite/contrib/metrics/precision_recall_curve.py | 6 +++--- ignite/metrics/epoch_metric.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py index 431af365ed4..cf1e960225f 100644 --- a/ignite/contrib/metrics/precision_recall_curve.py +++ b/ignite/contrib/metrics/precision_recall_curve.py @@ -99,8 +99,8 @@ def compute(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: if ws > 1: # broadcast result to all processes - precision = cast(float, idist.broadcast(precision, src=0)) - recall = cast(float, idist.broadcast(recall, src=0)) - thresholds = cast(float, idist.broadcast(thresholds, src=0)) + precision = cast(torch.Tensor, idist.broadcast(precision, src=0)) + recall = cast(torch.Tensor, idist.broadcast(recall, src=0)) + thresholds = cast(torch.Tensor, idist.broadcast(thresholds, src=0)) return precision, recall, thresholds diff --git a/ignite/metrics/epoch_metric.py b/ignite/metrics/epoch_metric.py index a715230b8b0..4822953839d 100644 --- a/ignite/metrics/epoch_metric.py +++ b/ignite/metrics/epoch_metric.py @@ -1,5 +1,5 @@ import warnings -from typing import Callable, cast, List, Tuple, Union +from typing import Any, Callable, cast, List, Tuple, Union import torch @@ -136,7 +136,7 @@ def update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None: except Exception as e: warnings.warn(f"Probably, there can be a problem with `compute_fn`:\n {e}.", EpochMetricWarning) - def compute(self) -> float: + def compute(self) -> Any: if len(self._predictions) < 1 or len(self._targets) < 1: raise NotComputableError("EpochMetric must have at least one example before it can be computed.") From 777ec91e66aa1a2993015bbcfd8a7523010aefcf Mon Sep 17 00:00:00 2001 From: Sayantan Date: Fri, 4 Mar 2022 18:28:32 +0530 Subject: [PATCH 16/28] Added tests for GPU and TPU --- .../metrics/test_precision_recall_curve.py | 57 ++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/tests/ignite/contrib/metrics/test_precision_recall_curve.py b/tests/ignite/contrib/metrics/test_precision_recall_curve.py index 4ffb5d50dec..c4f9e3e06ea 100644 --- a/tests/ignite/contrib/metrics/test_precision_recall_curve.py +++ b/tests/ignite/contrib/metrics/test_precision_recall_curve.py @@ -1,5 +1,7 @@ +from typing import Tuple from unittest.mock import patch +import os import numpy as np import pytest import sklearn @@ -156,5 +158,58 @@ def _test(y_pred, y, batch_size, metric_device): np_y_pred = y_pred.cpu().numpy() res = prc.compute() - assert isinstance(res, float) + assert isinstance(res, Tuple) assert PrecisionRecallCurve(np_y, np_y_pred) == pytest.approx(res) + +@pytest.mark.distributed +@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support") +@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU") +def test_distrib_nccl_gpu(distributed_context_single_node_nccl): + + device = idist.device() + _test_distrib_binary_input(device) +@pytest.mark.distributed +@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support") +def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo): + + device = idist.device() + _test_distrib_binary_input(device) + + +@pytest.mark.distributed +@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support") +@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc") +def test_distrib_hvd(gloo_hvd_executor): + + device = torch.device("cpu" if not torch.cuda.is_available() else "cuda") + nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count() + + gloo_hvd_executor(_test_distrib_binary_input, (device,), np=nproc, do_init=True) + +@pytest.mark.multinode_distributed +@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support") +@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") +def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo): + + device = idist.device() + _test_distrib_binary_input(device) + +@pytest.mark.multinode_distributed +@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support") +@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") +def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl): + + device = idist.device() + _test_distrib_binary_input(device) + +@pytest.mark.tpu +@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars") +@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package") +def test_distrib_single_device_xla(): + device = idist.device() + _test_distrib_binary_input(device) + + +def _test_distrib_xla_nprocs(index): + device = idist.device() + _test_distrib_binary_input(device) \ No newline at end of file From 8ac2ecffcf3fb5e8ae175fe112d5ef40efeea1b9 Mon Sep 17 00:00:00 2001 From: sayantan1410 Date: Fri, 4 Mar 2022 13:00:03 +0000 Subject: [PATCH 17/28] autopep8 fix --- .../contrib/metrics/test_precision_recall_curve.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/ignite/contrib/metrics/test_precision_recall_curve.py b/tests/ignite/contrib/metrics/test_precision_recall_curve.py index c4f9e3e06ea..8bdf15be458 100644 --- a/tests/ignite/contrib/metrics/test_precision_recall_curve.py +++ b/tests/ignite/contrib/metrics/test_precision_recall_curve.py @@ -1,7 +1,7 @@ +import os from typing import Tuple from unittest.mock import patch -import os import numpy as np import pytest import sklearn @@ -161,6 +161,7 @@ def _test(y_pred, y, batch_size, metric_device): assert isinstance(res, Tuple) assert PrecisionRecallCurve(np_y, np_y_pred) == pytest.approx(res) + @pytest.mark.distributed @pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support") @pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU") @@ -168,6 +169,8 @@ def test_distrib_nccl_gpu(distributed_context_single_node_nccl): device = idist.device() _test_distrib_binary_input(device) + + @pytest.mark.distributed @pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support") def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo): @@ -186,6 +189,7 @@ def test_distrib_hvd(gloo_hvd_executor): gloo_hvd_executor(_test_distrib_binary_input, (device,), np=nproc, do_init=True) + @pytest.mark.multinode_distributed @pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support") @pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") @@ -194,6 +198,7 @@ def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo): device = idist.device() _test_distrib_binary_input(device) + @pytest.mark.multinode_distributed @pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support") @pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") @@ -202,6 +207,7 @@ def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl): device = idist.device() _test_distrib_binary_input(device) + @pytest.mark.tpu @pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars") @pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package") @@ -212,4 +218,4 @@ def test_distrib_single_device_xla(): def _test_distrib_xla_nprocs(index): device = idist.device() - _test_distrib_binary_input(device) \ No newline at end of file + _test_distrib_binary_input(device) From a81cc31a9868bf42445134aad1b7eee8001b2b61 Mon Sep 17 00:00:00 2001 From: Sayantan Date: Sat, 5 Mar 2022 21:19:57 +0530 Subject: [PATCH 18/28] fixed a few tests in precision_recall_curve --- .../metrics/test_precision_recall_curve.py | 81 +++++++++++++++++-- 1 file changed, 73 insertions(+), 8 deletions(-) diff --git a/tests/ignite/contrib/metrics/test_precision_recall_curve.py b/tests/ignite/contrib/metrics/test_precision_recall_curve.py index 8bdf15be458..7da0d7da5fd 100644 --- a/tests/ignite/contrib/metrics/test_precision_recall_curve.py +++ b/tests/ignite/contrib/metrics/test_precision_recall_curve.py @@ -129,7 +129,7 @@ def test_check_compute_fn(): em.update(output) -def _test_distrib_binary_input(device): +def _test_distrib_compute(device): rank = idist.get_rank() torch.manual_seed(12) @@ -160,6 +160,56 @@ def _test(y_pred, y, batch_size, metric_device): res = prc.compute() assert isinstance(res, Tuple) assert PrecisionRecallCurve(np_y, np_y_pred) == pytest.approx(res) + for _ in range(3): + _test("cpu") + if device.type != "xla": + _test(idist.device()) + +def _test_distrib_integration(device): + + rank = idist.get_rank() + torch.manual_seed(12) + + def _test(n_epochs, metric_device): + metric_device = torch.device(metric_device) + n_iters = 80 + size = 151 + y_true = torch.rand(size=(size,)).to(device) + y_preds = torch.rand(size=(size,)).to(device) + + def update(engine, i): + return ( + y_preds[i * size : (i + 1) * size], + y_true[i * size : (i + 1) * size], + ) + + engine = Engine(update) + + prc = PrecisionRecallCurve(device=metric_device) + prc.attach(engine, "mare") + + data = list(range(n_iters)) + engine.run(data=data, max_epochs=n_epochs) + + assert "mare" in engine.state.metrics + + res = engine.state.metrics["mare"] + + np_y_true = y_true.cpu().numpy().ravel() + np_y_preds = y_preds.cpu().numpy().ravel() + + e = np.abs(np_y_true - np_y_preds) / np.abs(np_y_true - np_y_true.mean()) + np_res = np.median(e) + + assert pytest.approx(res) == np_res + + metric_devices = ["cpu"] + if device.type != "xla": + metric_devices.append(idist.device()) + for metric_device in metric_devices: + for _ in range(2): + _test(n_epochs=1, metric_device=metric_device) + _test(n_epochs=2, metric_device=metric_device) @pytest.mark.distributed @@ -168,7 +218,8 @@ def _test(y_pred, y, batch_size, metric_device): def test_distrib_nccl_gpu(distributed_context_single_node_nccl): device = idist.device() - _test_distrib_binary_input(device) + _test_distrib_compute(device) + _test_distrib_integration(device) @pytest.mark.distributed @@ -176,7 +227,8 @@ def test_distrib_nccl_gpu(distributed_context_single_node_nccl): def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo): device = idist.device() - _test_distrib_binary_input(device) + _test_distrib_compute(device) + _test_distrib_integration(device) @pytest.mark.distributed @@ -187,7 +239,8 @@ def test_distrib_hvd(gloo_hvd_executor): device = torch.device("cpu" if not torch.cuda.is_available() else "cuda") nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count() - gloo_hvd_executor(_test_distrib_binary_input, (device,), np=nproc, do_init=True) + gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True) + gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True) @pytest.mark.multinode_distributed @@ -196,7 +249,8 @@ def test_distrib_hvd(gloo_hvd_executor): def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo): device = idist.device() - _test_distrib_binary_input(device) + _test_distrib_compute(device) + _test_distrib_integration(device) @pytest.mark.multinode_distributed @@ -205,7 +259,8 @@ def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo): def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl): device = idist.device() - _test_distrib_binary_input(device) + _test_distrib_compute(device) + _test_distrib_integration(device) @pytest.mark.tpu @@ -213,9 +268,19 @@ def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl): @pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package") def test_distrib_single_device_xla(): device = idist.device() - _test_distrib_binary_input(device) + _test_distrib_compute(device) + _test_distrib_integration(device) def _test_distrib_xla_nprocs(index): device = idist.device() - _test_distrib_binary_input(device) + _test_distrib_compute(device) + _test_distrib_integration(device) + + +@pytest.mark.tpu +@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars") +@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package") +def test_distrib_xla_nprocs(xmp_executor): + n = int(os.environ["NUM_TPU_WORKERS"]) + xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n) From 7fbc0c96e72f99e9410f5a565cadb03f58a79d5f Mon Sep 17 00:00:00 2001 From: sayantan1410 Date: Sat, 5 Mar 2022 15:51:06 +0000 Subject: [PATCH 19/28] autopep8 fix --- tests/ignite/contrib/metrics/test_precision_recall_curve.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/ignite/contrib/metrics/test_precision_recall_curve.py b/tests/ignite/contrib/metrics/test_precision_recall_curve.py index 7da0d7da5fd..5d28ed9962d 100644 --- a/tests/ignite/contrib/metrics/test_precision_recall_curve.py +++ b/tests/ignite/contrib/metrics/test_precision_recall_curve.py @@ -165,6 +165,7 @@ def _test(y_pred, y, batch_size, metric_device): if device.type != "xla": _test(idist.device()) + def _test_distrib_integration(device): rank = idist.get_rank() From 9d118c48043300730f3649ec02aa5540ea4d0779 Mon Sep 17 00:00:00 2001 From: Sayantan Date: Sun, 6 Mar 2022 10:01:46 +0530 Subject: [PATCH 20/28] fixed a few errors for the tests --- .../contrib/metrics/test_precision_recall_curve.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/ignite/contrib/metrics/test_precision_recall_curve.py b/tests/ignite/contrib/metrics/test_precision_recall_curve.py index 5d28ed9962d..15a9f46ce8c 100644 --- a/tests/ignite/contrib/metrics/test_precision_recall_curve.py +++ b/tests/ignite/contrib/metrics/test_precision_recall_curve.py @@ -187,22 +187,22 @@ def update(engine, i): engine = Engine(update) prc = PrecisionRecallCurve(device=metric_device) - prc.attach(engine, "mare") + prc.attach(engine, "prc") data = list(range(n_iters)) engine.run(data=data, max_epochs=n_epochs) - assert "mare" in engine.state.metrics + assert "prc" in engine.state.metrics - res = engine.state.metrics["mare"] + precision, recall, thresholds = engine.state.metrics["prc"] np_y_true = y_true.cpu().numpy().ravel() np_y_preds = y_preds.cpu().numpy().ravel() - e = np.abs(np_y_true - np_y_preds) / np.abs(np_y_true - np_y_true.mean()) - np_res = np.median(e) - - assert pytest.approx(res) == np_res + sk_precision,sk_recall,sk_thresholds = precision_recall_curve(np_y_true, np_y_preds) + assert pytest.approx(precision) == sk_precision + assert pytest.approx(recall) == sk_recall + assert pytest.approx(thresholds) == sk_thresholds metric_devices = ["cpu"] if device.type != "xla": From 6855a924502fdb9f092172dccb692b660b3a6021 Mon Sep 17 00:00:00 2001 From: sayantan1410 Date: Sun, 6 Mar 2022 04:32:58 +0000 Subject: [PATCH 21/28] autopep8 fix --- tests/ignite/contrib/metrics/test_precision_recall_curve.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ignite/contrib/metrics/test_precision_recall_curve.py b/tests/ignite/contrib/metrics/test_precision_recall_curve.py index 15a9f46ce8c..80afbf973eb 100644 --- a/tests/ignite/contrib/metrics/test_precision_recall_curve.py +++ b/tests/ignite/contrib/metrics/test_precision_recall_curve.py @@ -199,7 +199,7 @@ def update(engine, i): np_y_true = y_true.cpu().numpy().ravel() np_y_preds = y_preds.cpu().numpy().ravel() - sk_precision,sk_recall,sk_thresholds = precision_recall_curve(np_y_true, np_y_preds) + sk_precision, sk_recall, sk_thresholds = precision_recall_curve(np_y_true, np_y_preds) assert pytest.approx(precision) == sk_precision assert pytest.approx(recall) == sk_recall assert pytest.approx(thresholds) == sk_thresholds From 810d0f37b50f8574d4bba8b593ad538bb46a9f83 Mon Sep 17 00:00:00 2001 From: Sayantan Date: Sun, 6 Mar 2022 19:34:47 +0530 Subject: [PATCH 22/28] added tests for array shape --- .../metrics/test_precision_recall_curve.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/tests/ignite/contrib/metrics/test_precision_recall_curve.py b/tests/ignite/contrib/metrics/test_precision_recall_curve.py index 80afbf973eb..1f8a05ca3de 100644 --- a/tests/ignite/contrib/metrics/test_precision_recall_curve.py +++ b/tests/ignite/contrib/metrics/test_precision_recall_curve.py @@ -160,10 +160,10 @@ def _test(y_pred, y, batch_size, metric_device): res = prc.compute() assert isinstance(res, Tuple) assert PrecisionRecallCurve(np_y, np_y_pred) == pytest.approx(res) - for _ in range(3): - _test("cpu") - if device.type != "xla": - _test(idist.device()) + for _ in range(3): + _test("cpu") + if device.type != "xla": + _test(idist.device()) def _test_distrib_integration(device): @@ -195,11 +195,18 @@ def update(engine, i): assert "prc" in engine.state.metrics precision, recall, thresholds = engine.state.metrics["prc"] + precision = precision.numpy() + recall = recall.numpy() + thresholds = thresholds.numpy() np_y_true = y_true.cpu().numpy().ravel() np_y_preds = y_preds.cpu().numpy().ravel() sk_precision, sk_recall, sk_thresholds = precision_recall_curve(np_y_true, np_y_preds) + + assert precision.shape == sk_precision.shape + assert recall.shape == sk_recall.shape + assert thresholds.shape == sk_thresholds.shape assert pytest.approx(precision) == sk_precision assert pytest.approx(recall) == sk_recall assert pytest.approx(thresholds) == sk_thresholds From 203b9a13ffed0f3bcab7ea9483403b587e2336ec Mon Sep 17 00:00:00 2001 From: sayantan1410 Date: Sun, 6 Mar 2022 14:06:14 +0000 Subject: [PATCH 23/28] autopep8 fix --- tests/ignite/contrib/metrics/test_precision_recall_curve.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/ignite/contrib/metrics/test_precision_recall_curve.py b/tests/ignite/contrib/metrics/test_precision_recall_curve.py index 1f8a05ca3de..abdf19eb294 100644 --- a/tests/ignite/contrib/metrics/test_precision_recall_curve.py +++ b/tests/ignite/contrib/metrics/test_precision_recall_curve.py @@ -160,6 +160,7 @@ def _test(y_pred, y, batch_size, metric_device): res = prc.compute() assert isinstance(res, Tuple) assert PrecisionRecallCurve(np_y, np_y_pred) == pytest.approx(res) + for _ in range(3): _test("cpu") if device.type != "xla": From ca9f0a4264c1e8d68f47a0ffa30f9cdaaf60f254 Mon Sep 17 00:00:00 2001 From: Sayantan Date: Mon, 7 Mar 2022 17:37:38 +0530 Subject: [PATCH 24/28] made some small changes --- tests/ignite/contrib/metrics/test_precision_recall_curve.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/ignite/contrib/metrics/test_precision_recall_curve.py b/tests/ignite/contrib/metrics/test_precision_recall_curve.py index abdf19eb294..71901df5a68 100644 --- a/tests/ignite/contrib/metrics/test_precision_recall_curve.py +++ b/tests/ignite/contrib/metrics/test_precision_recall_curve.py @@ -158,6 +158,7 @@ def _test(y_pred, y, batch_size, metric_device): np_y_pred = y_pred.cpu().numpy() res = prc.compute() + assert isinstance(res, Tuple) assert PrecisionRecallCurve(np_y, np_y_pred) == pytest.approx(res) From 74aa143794c0ab9bac7382de6a85d9f66cf13309 Mon Sep 17 00:00:00 2001 From: Sayantan Date: Mon, 7 Mar 2022 19:21:23 +0530 Subject: [PATCH 25/28] Fixed all the errors in the tests --- .../contrib/metrics/precision_recall_curve.py | 14 ++++++-- .../metrics/test_precision_recall_curve.py | 34 +++++++++++++------ 2 files changed, 34 insertions(+), 14 deletions(-) diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py index cf1e960225f..d8bb3ff1e9f 100644 --- a/ignite/contrib/metrics/precision_recall_curve.py +++ b/ignite/contrib/metrics/precision_recall_curve.py @@ -1,4 +1,4 @@ -from typing import Any, Callable, cast, Tuple +from typing import Any, Callable, cast, Tuple, Union import torch @@ -71,9 +71,17 @@ def sigmoid_output_transform(output): """ - def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None: + def __init__( + self, + output_transform: Callable = lambda x: x, + check_compute_fn: bool = False, + device: Union[str, torch.device] = torch.device("cpu"), + ) -> None: super(PrecisionRecallCurve, self).__init__( - precision_recall_curve_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn + precision_recall_curve_compute_fn, + output_transform=output_transform, + check_compute_fn=check_compute_fn, + device=device, ) def compute(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: diff --git a/tests/ignite/contrib/metrics/test_precision_recall_curve.py b/tests/ignite/contrib/metrics/test_precision_recall_curve.py index 71901df5a68..1890f9adca3 100644 --- a/tests/ignite/contrib/metrics/test_precision_recall_curve.py +++ b/tests/ignite/contrib/metrics/test_precision_recall_curve.py @@ -160,12 +160,27 @@ def _test(y_pred, y, batch_size, metric_device): res = prc.compute() assert isinstance(res, Tuple) - assert PrecisionRecallCurve(np_y, np_y_pred) == pytest.approx(res) - - for _ in range(3): - _test("cpu") - if device.type != "xla": - _test(idist.device()) + assert precision_recall_curve(np_y, np_y_pred)[0] == pytest.approx(res[0]) + assert precision_recall_curve(np_y, np_y_pred)[1] == pytest.approx(res[1]) + assert precision_recall_curve(np_y, np_y_pred)[2] == pytest.approx(res[2]) + + def get_test_cases(): + test_cases = [ + # Binary input data of shape (N,) or (N, 1) + (torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10,)), 1), + (torch.randint(0, 2, size=(10, 1)), torch.randint(0, 2, size=(10, 1)), 1), + # updated batches + (torch.randint(0, 2, size=(50,)), torch.randint(0, 2, size=(50,)), 16), + (torch.randint(0, 2, size=(50, 1)), torch.randint(0, 2, size=(50, 1)), 16), + ] + return test_cases + + for _ in range(5): + test_cases = get_test_cases() + for y_pred, y, batch_size in test_cases: + _test(y_pred, y, batch_size, "cpu") + if device.type != "xla": + _test(y_pred, y, batch_size, idist.device()) def _test_distrib_integration(device): @@ -177,8 +192,8 @@ def _test(n_epochs, metric_device): metric_device = torch.device(metric_device) n_iters = 80 size = 151 - y_true = torch.rand(size=(size,)).to(device) - y_preds = torch.rand(size=(size,)).to(device) + y_true = torch.randint(0, 2, (size,)).to(device) + y_preds = torch.randint(0, 2, (size,)).to(device) def update(engine, i): return ( @@ -197,9 +212,6 @@ def update(engine, i): assert "prc" in engine.state.metrics precision, recall, thresholds = engine.state.metrics["prc"] - precision = precision.numpy() - recall = recall.numpy() - thresholds = thresholds.numpy() np_y_true = y_true.cpu().numpy().ravel() np_y_preds = y_preds.cpu().numpy().ravel() From 83b599c999cedad040c8e996be386600b5de90af Mon Sep 17 00:00:00 2001 From: Desroziers Date: Mon, 7 Mar 2022 17:17:55 +0100 Subject: [PATCH 26/28] fix distributed computation --- ignite/contrib/metrics/precision_recall_curve.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py index d8bb3ff1e9f..5b910fac2e7 100644 --- a/ignite/contrib/metrics/precision_recall_curve.py +++ b/ignite/contrib/metrics/precision_recall_curve.py @@ -98,17 +98,21 @@ def compute(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: _target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor)) self._is_reduced = True - precision = torch.zeros(len(self._predictions)) - recall = torch.zeros(len(self._predictions)) - thresholds = torch.zeros(len(self._predictions) - 1) if idist.get_rank() == 0: # Run compute_fn on zero rank only precision, recall, thresholds = self.compute_fn(_prediction_tensor, _target_tensor) + precision = torch.Tensor(precision) + recall = torch.Tensor(recall) + # thresholds can have negative strides, not compatible with torch tensors + # https://discuss.pytorch.org/t/negative-strides-in-tensor-error/134287/2 + thresholds = torch.Tensor(thresholds.copy()) + else: + precision, recall, thresholds = None, None, None if ws > 1: # broadcast result to all processes - precision = cast(torch.Tensor, idist.broadcast(precision, src=0)) - recall = cast(torch.Tensor, idist.broadcast(recall, src=0)) - thresholds = cast(torch.Tensor, idist.broadcast(thresholds, src=0)) + precision = idist.broadcast(precision, src=0, safe_mode=True) + recall = idist.broadcast(recall, src=0, safe_mode=True) + thresholds = idist.broadcast(thresholds, src=0, safe_mode=True) return precision, recall, thresholds From 743c752685b3cbfe387badfe17f10a86f7f8baad Mon Sep 17 00:00:00 2001 From: Sayantan Date: Tue, 8 Mar 2022 11:36:06 +0530 Subject: [PATCH 27/28] converted tensors to numpy array --- .../ignite/contrib/metrics/test_precision_recall_curve.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/ignite/contrib/metrics/test_precision_recall_curve.py b/tests/ignite/contrib/metrics/test_precision_recall_curve.py index 1890f9adca3..e643ae01474 100644 --- a/tests/ignite/contrib/metrics/test_precision_recall_curve.py +++ b/tests/ignite/contrib/metrics/test_precision_recall_curve.py @@ -41,6 +41,9 @@ def test_precision_recall_curve(): precision_recall_curve_metric.update((y_pred, y)) precision, recall, thresholds = precision_recall_curve_metric.compute() + precision = precision.numpy() + recall = recall.numpy() + thresholds = thresholds.numpy() assert np.array_equal(precision, sk_precision) assert np.array_equal(recall, sk_recall) @@ -73,7 +76,9 @@ def update_fn(engine, batch): data = list(range(size // batch_size)) precision, recall, thresholds = engine.run(data, max_epochs=1).metrics["precision_recall_curve"] - + precision = precision.numpy() + recall = recall.numpy() + thresholds = thresholds.numpy() assert np.array_equal(precision, sk_precision) assert np.array_equal(recall, sk_recall) # assert thresholds almost equal, due to numpy->torch->numpy conversion From e0046677709d49f1937ee68b6f46b9d614f5f922 Mon Sep 17 00:00:00 2001 From: Sayantan Date: Tue, 8 Mar 2022 12:04:56 +0530 Subject: [PATCH 28/28] checking for approx equal --- .../metrics/test_precision_recall_curve.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tests/ignite/contrib/metrics/test_precision_recall_curve.py b/tests/ignite/contrib/metrics/test_precision_recall_curve.py index e643ae01474..be5c29a0bb4 100644 --- a/tests/ignite/contrib/metrics/test_precision_recall_curve.py +++ b/tests/ignite/contrib/metrics/test_precision_recall_curve.py @@ -45,8 +45,8 @@ def test_precision_recall_curve(): recall = recall.numpy() thresholds = thresholds.numpy() - assert np.array_equal(precision, sk_precision) - assert np.array_equal(recall, sk_recall) + assert pytest.approx(precision) == sk_precision + assert pytest.approx(recall) == sk_recall # assert thresholds almost equal, due to numpy->torch->numpy conversion np.testing.assert_array_almost_equal(thresholds, sk_thresholds) @@ -79,8 +79,8 @@ def update_fn(engine, batch): precision = precision.numpy() recall = recall.numpy() thresholds = thresholds.numpy() - assert np.array_equal(precision, sk_precision) - assert np.array_equal(recall, sk_recall) + assert pytest.approx(precision) == sk_precision + assert pytest.approx(recall) == sk_recall # assert thresholds almost equal, due to numpy->torch->numpy conversion np.testing.assert_array_almost_equal(thresholds, sk_thresholds) @@ -111,9 +111,12 @@ def update_fn(engine, batch): data = list(range(size // batch_size)) precision, recall, thresholds = engine.run(data, max_epochs=1).metrics["precision_recall_curve"] + precision = precision.numpy() + recall = recall.numpy() + thresholds = thresholds.numpy() - assert np.array_equal(precision, sk_precision) - assert np.array_equal(recall, sk_recall) + assert pytest.approx(precision) == sk_precision + assert pytest.approx(recall) == sk_recall # assert thresholds almost equal, due to numpy->torch->numpy conversion np.testing.assert_array_almost_equal(thresholds, sk_thresholds)