Skip to content

Commit

Permalink
Better error message on wrong device (#1056)
Browse files Browse the repository at this point in the history
* Apply suggestions from code review

Co-authored-by: Jirka Borovec <Borda@users.noreply.github.com>
Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
  • Loading branch information
3 people committed Jun 7, 2022
1 parent 1cc90c6 commit 9e26cae
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 1 deletion.
4 changes: 4 additions & 0 deletions CHANGELOG.md
Expand Up @@ -11,8 +11,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

### Added

- Added specific `RuntimeError` when metric object is on wrong device ([#1056](https://github.com/PyTorchLightning/metrics/pull/1056))


- Added an option to specify own n-gram weights for `BLEUScore` and `SacreBLEUScore` instead of using uniform weights only. ([#1075](https://github.com/PyTorchLightning/metrics/pull/1075))


-


Expand Down
12 changes: 12 additions & 0 deletions tests/bases/test_metric.py
Expand Up @@ -26,6 +26,7 @@
from tests.helpers import seed_all
from tests.helpers.testers import DummyListMetric, DummyMetric, DummyMetricMultiOutput, DummyMetricSum
from tests.helpers.utilities import no_warning_call
from torchmetrics import PearsonCorrCoef
from torchmetrics.utilities.imports import _TORCH_LOWER_1_6

seed_all(42)
Expand Down Expand Up @@ -426,6 +427,17 @@ class UnsetProperty(metric_class):
UnsetProperty()


@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires gpu")
def test_specific_error_on_wrong_device():
metric = PearsonCorrCoef()
preds = torch.tensor(range(10), device="cuda", dtype=torch.float)
target = torch.tensor(range(10), device="cuda", dtype=torch.float)
with pytest.raises(
RuntimeError, match="This could be due to the metric class not being on the same device as input"
):
_ = metric(preds, target)


@pytest.mark.parametrize("metric_class", [DummyListMetric, DummyMetric, DummyMetricMultiOutput, DummyMetricSum])
def test_no_warning_on_custom_forward(metric_class):
"""If metric is using custom forward, full_state_update is irrelevant."""
Expand Down
15 changes: 14 additions & 1 deletion torchmetrics/metric.py
Expand Up @@ -378,7 +378,20 @@ def wrapped_func(*args: Any, **kwargs: Any) -> None:
self._computed = None
self._update_count += 1
with torch.set_grad_enabled(self._enable_grad):
update(*args, **kwargs)
try:
update(*args, **kwargs)
except RuntimeError as err:
if "Expected all tensors to be on" in str(err):
raise RuntimeError(
"Encountered different devices in metric calculation"
" (see stacktrace for details)."
"This could be due to the metric class not being on the same device as input."
f"Instead of `metric={self.__class__.__name__}(...)` try to do"
f" `metric={self.__class__.__name__}(...).to(device)` where"
" device corresponds to the device of the input."
) from err
raise err

if self.compute_on_cpu:
self._move_list_states_to_cpu()

Expand Down

0 comments on commit 9e26cae

Please sign in to comment.