diff --git a/src/torchmetrics/functional/classification/confusion_matrix.py b/src/torchmetrics/functional/classification/confusion_matrix.py index b668c152e5b..1b93450ab69 100644 --- a/src/torchmetrics/functional/classification/confusion_matrix.py +++ b/src/torchmetrics/functional/classification/confusion_matrix.py @@ -94,7 +94,7 @@ def _binary_confusion_matrix_tensor_validation( _check_same_shape(preds, target) # Check that target only contains {0,1} values or value in ignore_index - unique_values = torch.unique(target) + unique_values = torch.unique(target, dim=None) if ignore_index is None: check = torch.any((unique_values != 0) & (unique_values != 1)) else: @@ -107,7 +107,7 @@ def _binary_confusion_matrix_tensor_validation( # If preds is label tensor, also check that it only contains {0,1} values if not preds.is_floating_point(): - unique_values = torch.unique(preds) + unique_values = torch.unique(preds, dim=None) if torch.any((unique_values != 0) & (unique_values != 1)): raise RuntimeError( f"Detected the following values in `preds`: {unique_values} but expected only" @@ -287,7 +287,7 @@ def _multiclass_confusion_matrix_tensor_validation( check_value = num_classes if ignore_index is None else num_classes + 1 for t, name in ((target, "target"),) + ((preds, "preds"),) if not preds.is_floating_point() else (): # noqa: RUF005 - num_unique_values = len(torch.unique(t)) + num_unique_values = len(torch.unique(t, dim=None)) if num_unique_values > check_value: raise RuntimeError( f"Detected more unique values in `{name}` than expected. Expected only {check_value} but found" @@ -454,7 +454,7 @@ def _multilabel_confusion_matrix_tensor_validation( ) # Check that target only contains [0,1] values or value in ignore_index - unique_values = torch.unique(target) + unique_values = torch.unique(target, dim=None) if ignore_index is None: check = torch.any((unique_values != 0) & (unique_values != 1)) else: @@ -467,7 +467,7 @@ def _multilabel_confusion_matrix_tensor_validation( # If preds is label tensor, also check that it only contains [0,1] values if not preds.is_floating_point(): - unique_values = torch.unique(preds) + unique_values = torch.unique(preds, dim=None) if torch.any((unique_values != 0) & (unique_values != 1)): raise RuntimeError( f"Detected the following values in `preds`: {unique_values} but expected only" diff --git a/src/torchmetrics/functional/classification/precision_recall_curve.py b/src/torchmetrics/functional/classification/precision_recall_curve.py index da12db561a1..c4607fd9489 100644 --- a/src/torchmetrics/functional/classification/precision_recall_curve.py +++ b/src/torchmetrics/functional/classification/precision_recall_curve.py @@ -148,7 +148,7 @@ def _binary_precision_recall_curve_tensor_validation( ) # Check that target only contains {0,1} values or value in ignore_index - unique_values = torch.unique(target) + unique_values = torch.unique(target, dim=None) if ignore_index is None: check = torch.any((unique_values != 0) & (unique_values != 1)) else: @@ -417,7 +417,7 @@ def _multiclass_precision_recall_curve_tensor_validation( f" but got {preds.shape} and {target.shape}" ) - num_unique_values = len(torch.unique(target)) + num_unique_values = len(torch.unique(target, dim=None)) check = num_unique_values > num_classes if ignore_index is None else num_unique_values > num_classes + 1 if check: raise RuntimeError( diff --git a/src/torchmetrics/functional/classification/stat_scores.py b/src/torchmetrics/functional/classification/stat_scores.py index 47c38ff72e1..565c212f9bd 100644 --- a/src/torchmetrics/functional/classification/stat_scores.py +++ b/src/torchmetrics/functional/classification/stat_scores.py @@ -67,7 +67,7 @@ def _binary_stat_scores_tensor_validation( _check_same_shape(preds, target) # Check that target only contains [0,1] values or value in ignore_index - unique_values = torch.unique(target) + unique_values = torch.unique(target, dim=None) if ignore_index is None: check = torch.any((unique_values != 0) & (unique_values != 1)) else: @@ -80,7 +80,7 @@ def _binary_stat_scores_tensor_validation( # If preds is label tensor, also check that it only contains [0,1] values if not preds.is_floating_point(): - unique_values = torch.unique(preds) + unique_values = torch.unique(preds, dim=None) if torch.any((unique_values != 0) & (unique_values != 1)): raise RuntimeError( f"Detected the following values in `preds`: {unique_values} but expected only" @@ -314,11 +314,11 @@ def _multiclass_stat_scores_tensor_validation( check_value = num_classes if ignore_index is None else num_classes + 1 for t, name in ((target, "target"),) + ((preds, "preds"),) if not preds.is_floating_point() else (): # noqa: RUF005 - num_unique_values = len(torch.unique(t)) + num_unique_values = len(torch.unique(t, dim=None)) if num_unique_values > check_value: raise RuntimeError( f"Detected more unique values in `{name}` than expected. Expected only {check_value} but found" - f" {num_unique_values} in `target`." + f" {num_unique_values} in `{name}`. Found values: {torch.unique(t, dim=None)}." ) @@ -624,7 +624,7 @@ def _multilabel_stat_scores_tensor_validation( ) # Check that target only contains [0,1] values or value in ignore_index - unique_values = torch.unique(target) + unique_values = torch.unique(target, dim=None) if ignore_index is None: check = torch.any((unique_values != 0) & (unique_values != 1)) else: @@ -637,7 +637,7 @@ def _multilabel_stat_scores_tensor_validation( # If preds is label tensor, also check that it only contains [0,1] values if not preds.is_floating_point(): - unique_values = torch.unique(preds) + unique_values = torch.unique(preds, dim=None) if torch.any((unique_values != 0) & (unique_values != 1)): raise RuntimeError( f"Detected the following values in `preds`: {unique_values} but expected only" diff --git a/tests/unittests/classification/test_stat_scores.py b/tests/unittests/classification/test_stat_scores.py index a0036261963..53fa78d0368 100644 --- a/tests/unittests/classification/test_stat_scores.py +++ b/tests/unittests/classification/test_stat_scores.py @@ -578,8 +578,6 @@ def test_multilabel_stat_scores_dtype_gpu(self, inputs, dtype): ) -# fixme: Expected only 5 but found 7 in `target` -@pytest.mark.flaky(reruns=5, only_rerun="RuntimeError") def test_support_for_int(): """See issue: https://github.com/Lightning-AI/torchmetrics/issues/1970.""" seed_all(42)