Skip to content

Commit

Permalink
add tests for bad metric warning
Browse files Browse the repository at this point in the history
Signed-off-by: Weichen Xu <weichen.xu@databricks.com>
  • Loading branch information
WeichenXu123 committed Oct 28, 2021
1 parent e43d8c4 commit ab2c9ed
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 2 deletions.
2 changes: 1 addition & 1 deletion mlflow/statsmodels.py
Expand Up @@ -377,7 +377,7 @@ def _get_autolog_metrics(fitted_model):

if len(failed_evaluating_metrics) > 0:
_logger.warning(
f"Failed to autolog metrics: {', '.join(sorted(failed_evaluating_metrics))}"
f"Failed to autolog metrics: {', '.join(sorted(failed_evaluating_metrics))}."
)
return result_metrics

Expand Down
21 changes: 20 additions & 1 deletion tests/statsmodels/test_statsmodels_autolog.py
Expand Up @@ -5,7 +5,6 @@
from statsmodels.tsa.base.tsa_model import TimeSeriesModel
import mlflow
import mlflow.statsmodels
from mlflow.utils.file_utils import TempDir
from tests.statsmodels.model_fixtures import (
arma_model,
ols_model,
Expand Down Expand Up @@ -120,6 +119,26 @@ def test_statsmodels_autolog_logs_basic_metrics():
metrics = run.data.metrics
assert set(metrics.keys()) == set(mlflow.statsmodels._autolog_metric_allowlist)

@property
def metric_raise_error(self):
raise RuntimeError()

class MockSummary:
def as_text(self):
return "mock summary."

with mock.patch(
"statsmodels.regression.linear_model.OLSResults.f_pvalue", metric_raise_error
), mock.patch(
"statsmodels.regression.linear_model.OLSResults.fvalue", metric_raise_error
), mock.patch(
"statsmodels.regression.linear_model.OLSResults.summary", lambda _: MockSummary()
), mock.patch(
"mlflow.statsmodels._logger.warning"
) as mock_warning:
ols_model()
mock_warning.assert_called_once_with("Failed to autolog metrics: f_pvalue, fvalue.")


def test_statsmodels_autolog_works_after_exception():
mlflow.statsmodels.autolog()
Expand Down

0 comments on commit ab2c9ed

Please sign in to comment.