diff --git a/ignite/contrib/metrics/regression/canberra_metric.py b/ignite/contrib/metrics/regression/canberra_metric.py index 5bf6fbac15a..cf8bc5a96c8 100644 --- a/ignite/contrib/metrics/regression/canberra_metric.py +++ b/ignite/contrib/metrics/regression/canberra_metric.py @@ -37,18 +37,24 @@ class CanberraMetric(_BaseRegression): .. _`Botchkarev 2018`: https://arxiv.org/ftp/arxiv/papers/1809/1809.03006.pdf - .. testcode:: + Examples: + To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine. + The output of the engine's ``process_function`` needs to be in format of + ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. - metric = CanberraMetric() - metric.attach(default_evaluator, 'canberra') - y_pred = torch.Tensor([[3.8], [9.9], [-5.4], [2.1]]) - y_true = y_pred * 1.5 - state = default_evaluator.run([[y_pred, y_true]]) - print(state.metrics['canberra']) + .. testcode:: - .. testoutput:: + metric = CanberraMetric() + metric.attach(default_evaluator, 'canberra') + y_pred = torch.Tensor([[3.8], [9.9], [-5.4], [2.1]]) + y_true = y_pred * 1.5 + state = default_evaluator.run([[y_pred, y_true]]) + print(state.metrics['canberra']) + + .. testoutput:: + + 0.8000... - 0.8000... .. versionchanged:: 0.4.3 - Fixed implementation: ``abs`` in denominator. diff --git a/ignite/contrib/metrics/regression/fractional_absolute_error.py b/ignite/contrib/metrics/regression/fractional_absolute_error.py index d33d9369c56..1c05233071f 100644 --- a/ignite/contrib/metrics/regression/fractional_absolute_error.py +++ b/ignite/contrib/metrics/regression/fractional_absolute_error.py @@ -34,18 +34,24 @@ class FractionalAbsoluteError(_BaseRegression): metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. - .. testcode:: + Examples: + To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine. + The output of the engine's ``process_function`` needs to be in format of + ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. - metric = FractionalAbsoluteError() - metric.attach(default_evaluator, 'fractional_abs_error') - y_pred = torch.Tensor([[3.8], [9.9], [-5.4], [2.1]]) - y_true = y_pred * 0.8 - state = default_evaluator.run([[y_pred, y_true]]) - print(state.metrics['fractional_abs_error']) + .. testcode:: - .. testoutput:: + metric = FractionalAbsoluteError() + metric.attach(default_evaluator, 'fractional_abs_error') + y_pred = torch.Tensor([[3.8], [9.9], [-5.4], [2.1]]) + y_true = y_pred * 0.8 + state = default_evaluator.run([[y_pred, y_true]]) + print(state.metrics['fractional_abs_error']) + + .. testoutput:: + + 0.2222... - 0.2222... .. versionchanged:: 0.4.5 - Works with DDP. """ diff --git a/ignite/contrib/metrics/regression/fractional_bias.py b/ignite/contrib/metrics/regression/fractional_bias.py index 77cd93ab315..eca183420ca 100644 --- a/ignite/contrib/metrics/regression/fractional_bias.py +++ b/ignite/contrib/metrics/regression/fractional_bias.py @@ -34,18 +34,23 @@ class FractionalBias(_BaseRegression): metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. - .. testcode:: + Examples: + To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine. + The output of the engine's ``process_function`` needs to be in format of + ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. - metric = FractionalBias() - metric.attach(default_evaluator, 'fractional_bias') - y_pred = torch.Tensor([[3.8], [9.9], [5.4], [2.1]]) - y_true = y_pred * 1.5 - state = default_evaluator.run([[y_pred, y_true]]) - print(state.metrics['fractional_bias']) + .. testcode:: - .. testoutput:: + metric = FractionalBias() + metric.attach(default_evaluator, 'fractional_bias') + y_pred = torch.Tensor([[3.8], [9.9], [5.4], [2.1]]) + y_true = y_pred * 1.5 + state = default_evaluator.run([[y_pred, y_true]]) + print(state.metrics['fractional_bias']) - 0.4000... + .. testoutput:: + + 0.4000... .. versionchanged:: 0.4.5 - Works with DDP. diff --git a/ignite/contrib/metrics/regression/geometric_mean_absolute_error.py b/ignite/contrib/metrics/regression/geometric_mean_absolute_error.py index 053fed4ef92..a5cb8eb7199 100644 --- a/ignite/contrib/metrics/regression/geometric_mean_absolute_error.py +++ b/ignite/contrib/metrics/regression/geometric_mean_absolute_error.py @@ -34,18 +34,24 @@ class GeometricMeanAbsoluteError(_BaseRegression): metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. - .. testcode:: + Examples: + To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine. + The output of the engine's ``process_function`` needs to be in format of + ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. - metric = GeometricMeanAbsoluteError() - metric.attach(default_evaluator, 'gmae') - y_pred = torch.Tensor([[3.8], [9.9], [-5.4], [2.1]]) - y_true = y_pred * 1.5 - state = default_evaluator.run([[y_pred, y_true]]) - print(state.metrics['gmae']) + .. testcode:: - .. testoutput:: + metric = GeometricMeanAbsoluteError() + metric.attach(default_evaluator, 'gmae') + y_pred = torch.Tensor([[3.8], [9.9], [-5.4], [2.1]]) + y_true = y_pred * 1.5 + state = default_evaluator.run([[y_pred, y_true]]) + print(state.metrics['gmae']) + + .. testoutput:: + + 2.2723... - 2.2723... .. versionchanged:: 0.4.5 - Works with DDP. """ diff --git a/ignite/contrib/metrics/regression/geometric_mean_relative_absolute_error.py b/ignite/contrib/metrics/regression/geometric_mean_relative_absolute_error.py index 6d71e028ce1..2764e0dc358 100644 --- a/ignite/contrib/metrics/regression/geometric_mean_relative_absolute_error.py +++ b/ignite/contrib/metrics/regression/geometric_mean_relative_absolute_error.py @@ -47,6 +47,24 @@ class GeometricMeanRelativeAbsoluteError(_BaseRegression): device: specifies which device updates are accumulated on. Setting the metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. + + Examples: + To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine. + The output of the engine's ``process_function`` needs to be in format of + ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. + + .. testcode:: + + metric = GeometricMeanRelativeAbsoluteError() + metric.attach(default_evaluator, 'gmare') + y_true = torch.Tensor([0, 1, 2, 3, 4, 5]) + y_pred = y_true * 0.75 + state = default_evaluator.run([[y_pred, y_true]]) + print(state.metrics['gmare']) + + .. testoutput:: + + 0.0... """ @reinit__is_reduced diff --git a/ignite/contrib/metrics/regression/manhattan_distance.py b/ignite/contrib/metrics/regression/manhattan_distance.py index e6ddb12647a..bfe87e061cf 100644 --- a/ignite/contrib/metrics/regression/manhattan_distance.py +++ b/ignite/contrib/metrics/regression/manhattan_distance.py @@ -33,6 +33,24 @@ class ManhattanDistance(_BaseRegression): metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. + Examples: + To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine. + The output of the engine's ``process_function`` needs to be in format of + ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. + + .. testcode:: + + metric = ManhattanDistance() + metric.attach(default_evaluator, 'manhattan') + y_true = torch.Tensor([0, 1, 2, 3, 4, 5]) + y_pred = y_true * 0.75 + state = default_evaluator.run([[y_pred, y_true]]) + print(state.metrics['manhattan']) + + .. testoutput:: + + 3.75... + .. versionchanged:: 0.4.3 - Fixed sklearn compatibility. diff --git a/ignite/contrib/metrics/regression/maximum_absolute_error.py b/ignite/contrib/metrics/regression/maximum_absolute_error.py index 5dee6c71ff3..94fa578200d 100644 --- a/ignite/contrib/metrics/regression/maximum_absolute_error.py +++ b/ignite/contrib/metrics/regression/maximum_absolute_error.py @@ -34,6 +34,24 @@ class MaximumAbsoluteError(_BaseRegression): metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. + Examples: + To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine. + The output of the engine's ``process_function`` needs to be in format of + ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. + + .. testcode:: + + metric = MaximumAbsoluteError() + metric.attach(default_evaluator, 'mae') + y_true = torch.Tensor([0, 1, 2, 3, 4, 5]) + y_pred = y_true * 0.75 + state = default_evaluator.run([[y_pred, y_true]]) + print(state.metrics['mae']) + + .. testoutput:: + + 1.25... + .. versionchanged:: 0.4.5 - Works with DDP. """ diff --git a/ignite/contrib/metrics/regression/mean_absolute_relative_error.py b/ignite/contrib/metrics/regression/mean_absolute_relative_error.py index 4cfea9c6ba4..2a8f0acccef 100644 --- a/ignite/contrib/metrics/regression/mean_absolute_relative_error.py +++ b/ignite/contrib/metrics/regression/mean_absolute_relative_error.py @@ -34,6 +34,24 @@ class MeanAbsoluteRelativeError(_BaseRegression): metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. + Examples: + To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine. + The output of the engine's ``process_function`` needs to be in format of + ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. + + .. testcode:: + + metric = MeanAbsoluteRelativeError() + metric.attach(default_evaluator, 'mare') + y_true = torch.Tensor([1, 2, 3, 4, 5]) + y_pred = y_true * 0.75 + state = default_evaluator.run([[y_pred, y_true]]) + print(state.metrics['mare']) + + .. testoutput:: + + 0.25... + .. versionchanged:: 0.4.5 - Works with DDP. """ diff --git a/ignite/contrib/metrics/regression/mean_error.py b/ignite/contrib/metrics/regression/mean_error.py index 15d15a8c7e9..aafff66f45e 100644 --- a/ignite/contrib/metrics/regression/mean_error.py +++ b/ignite/contrib/metrics/regression/mean_error.py @@ -33,6 +33,24 @@ class MeanError(_BaseRegression): device: specifies which device updates are accumulated on. Setting the metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. + + Examples: + To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine. + The output of the engine's ``process_function`` needs to be in format of + ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. + + .. testcode:: + + metric = MeanError() + metric.attach(default_evaluator, 'me') + y_true = torch.Tensor([0, 1, 2, 3, 4, 5]) + y_pred = y_true * 0.75 + state = default_evaluator.run([[y_pred, y_true]]) + print(state.metrics['me']) + + .. testoutput:: + + 0.625... """ @reinit__is_reduced diff --git a/ignite/contrib/metrics/regression/mean_normalized_bias.py b/ignite/contrib/metrics/regression/mean_normalized_bias.py index 5a5c02f0147..6fe06bb212e 100644 --- a/ignite/contrib/metrics/regression/mean_normalized_bias.py +++ b/ignite/contrib/metrics/regression/mean_normalized_bias.py @@ -34,6 +34,24 @@ class MeanNormalizedBias(_BaseRegression): metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. + Examples: + To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine. + The output of the engine's ``process_function`` needs to be in format of + ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. + + .. testcode:: + + metric = MeanNormalizedBias() + metric.attach(default_evaluator, 'mnb') + y_true = torch.Tensor([1, 2, 3, 4, 5]) + y_pred = y_true * 0.75 + state = default_evaluator.run([[y_pred, y_true]]) + print(state.metrics['mnb']) + + .. testoutput:: + + 0.25... + .. versionchanged:: 0.4.5 - Works with DDP. """ diff --git a/ignite/contrib/metrics/regression/median_absolute_error.py b/ignite/contrib/metrics/regression/median_absolute_error.py index b2fee828414..8ada851a181 100644 --- a/ignite/contrib/metrics/regression/median_absolute_error.py +++ b/ignite/contrib/metrics/regression/median_absolute_error.py @@ -38,6 +38,25 @@ class MedianAbsoluteError(EpochMetric): you want to compute the metric with respect to one of the outputs. By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``. device: optional device specification for internal storage. + + + Examples: + To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine. + The output of the engine's ``process_function`` needs to be in format of + ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. + + .. testcode:: + + metric = MedianAbsoluteError() + metric.attach(default_evaluator, 'mae') + y_true = torch.Tensor([0, 1, 2, 3, 4, 5]) + y_pred = y_true * 0.75 + state = default_evaluator.run([[y_pred, y_true]]) + print(state.metrics['mae']) + + .. testoutput:: + + 0.5... """ def __init__( diff --git a/ignite/contrib/metrics/regression/median_absolute_percentage_error.py b/ignite/contrib/metrics/regression/median_absolute_percentage_error.py index d07999a994c..16f4f3a0bd9 100644 --- a/ignite/contrib/metrics/regression/median_absolute_percentage_error.py +++ b/ignite/contrib/metrics/regression/median_absolute_percentage_error.py @@ -28,7 +28,6 @@ class MedianAbsolutePercentageError(EpochMetric): Current implementation stores all input data (output and target) in as tensors before computing a metric. This can potentially lead to a memory error if the input data is larger than available RAM. - __ https://arxiv.org/abs/1809.03006 Args: @@ -38,6 +37,24 @@ class MedianAbsolutePercentageError(EpochMetric): you want to compute the metric with respect to one of the outputs. By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``. device: optional device specification for internal storage. + + Examples: + To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine. + The output of the engine's ``process_function`` needs to be in format of + ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. + + .. testcode:: + + metric = MedianAbsolutePercentageError() + metric.attach(default_evaluator, 'mape') + y_true = torch.Tensor([1, 2, 3, 4, 5]) + y_pred = y_true * 0.75 + state = default_evaluator.run([[y_pred, y_true]]) + print(state.metrics['mape']) + + .. testoutput:: + + 25.0... """ def __init__( diff --git a/ignite/contrib/metrics/regression/median_relative_absolute_error.py b/ignite/contrib/metrics/regression/median_relative_absolute_error.py index 8c8e305be10..67cef517ec0 100644 --- a/ignite/contrib/metrics/regression/median_relative_absolute_error.py +++ b/ignite/contrib/metrics/regression/median_relative_absolute_error.py @@ -28,7 +28,6 @@ class MedianRelativeAbsoluteError(EpochMetric): Current implementation stores all input data (output and target) in as tensors before computing a metric. This can potentially lead to a memory error if the input data is larger than available RAM. - __ https://arxiv.org/abs/1809.03006 Args: @@ -38,6 +37,24 @@ class MedianRelativeAbsoluteError(EpochMetric): you want to compute the metric with respect to one of the outputs. By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``. device: optional device specification for internal storage. + + Examples: + To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine. + The output of the engine's ``process_function`` needs to be in format of + ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. + + .. testcode:: + + metric = MedianRelativeAbsoluteError() + metric.attach(default_evaluator, 'mrae') + y_true = torch.Tensor([0, 1, 2, 3, 4, 5]) + y_pred = y_true * 0.75 + state = default_evaluator.run([[y_pred, y_true]]) + print(state.metrics['mrae']) + + .. testoutput:: + + 0.5... """ def __init__( diff --git a/ignite/contrib/metrics/regression/r2_score.py b/ignite/contrib/metrics/regression/r2_score.py index 4c906d32280..ae5aeda5417 100644 --- a/ignite/contrib/metrics/regression/r2_score.py +++ b/ignite/contrib/metrics/regression/r2_score.py @@ -32,6 +32,24 @@ class R2Score(_BaseRegression): metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. + Examples: + To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine. + The output of the engine's ``process_function`` needs to be in format of + ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. + + .. testcode:: + + metric = R2Score() + metric.attach(default_evaluator, 'r2') + y_true = torch.Tensor([0, 1, 2, 3, 4, 5]) + y_pred = y_true * 0.75 + state = default_evaluator.run([[y_pred, y_true]]) + print(state.metrics['r2']) + + .. testoutput:: + + 0.8035... + .. versionchanged:: 0.4.3 Works with DDP. """ diff --git a/ignite/contrib/metrics/regression/wave_hedges_distance.py b/ignite/contrib/metrics/regression/wave_hedges_distance.py index 7c029e74100..0361b28d21f 100644 --- a/ignite/contrib/metrics/regression/wave_hedges_distance.py +++ b/ignite/contrib/metrics/regression/wave_hedges_distance.py @@ -33,6 +33,24 @@ class WaveHedgesDistance(_BaseRegression): metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. + Examples: + To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine. + The output of the engine's ``process_function`` needs to be in format of + ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. + + .. testcode:: + + metric = WaveHedgesDistance() + metric.attach(default_evaluator, 'whd') + y_true = torch.Tensor([0, 1, 2, 3, 4, 5]) + y_pred = y_true * 0.75 + state = default_evaluator.run([[y_pred, y_true]]) + print(state.metrics['whd']) + + .. testoutput:: + + 1.25... + .. versionchanged:: 0.4.5 - Works with DDP. """