forked from mlflow/mlflow
-
Notifications
You must be signed in to change notification settings - Fork 1
/
conftest.py
110 lines (88 loc) · 3.45 KB
/
conftest.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import os
import inspect
import shutil
from unittest import mock
import pytest
import mlflow
from mlflow.utils.file_utils import path_to_local_sqlite_uri
from tests.autologging.fixtures import enable_test_mode
@pytest.fixture
def reset_mock():
cache = []
def set_mock(obj, attr, mock):
cache.append((obj, attr, getattr(obj, attr)))
setattr(obj, attr, mock)
yield set_mock
for obj, attr, value in cache:
setattr(obj, attr, value)
cache[:] = []
@pytest.fixture(autouse=True)
def tracking_uri_mock(tmpdir, request):
try:
if "notrackingurimock" not in request.keywords:
tracking_uri = path_to_local_sqlite_uri(os.path.join(tmpdir.strpath, "mlruns"))
mlflow.set_tracking_uri(tracking_uri)
os.environ["MLFLOW_TRACKING_URI"] = tracking_uri
yield tmpdir
finally:
mlflow.set_tracking_uri(None)
if "notrackingurimock" not in request.keywords:
del os.environ["MLFLOW_TRACKING_URI"]
@pytest.fixture(autouse=True, scope="session")
def enable_test_mode_by_default_for_autologging_integrations():
"""
Run all MLflow tests in autologging test mode, ensuring that errors in autologging patch code
are raised and detected. For more information about autologging test mode, see the docstring
for :py:func:`mlflow.utils.autologging_utils._is_testing()`.
"""
yield from enable_test_mode()
@pytest.fixture(autouse=True)
def clean_up_leaked_runs():
"""
Certain test cases validate safety API behavior when runs are leaked. Leaked runs that
are not cleaned up between test cases may result in cascading failures that are hard to
debug. Accordingly, this fixture attempts to end any active runs it encounters and
throws an exception (which reported as an additional error in the pytest execution output).
"""
try:
yield
assert (
not mlflow.active_run()
), "test case unexpectedly leaked a run. Run info: {}. Run data: {}".format(
mlflow.active_run().info, mlflow.active_run().data
)
finally:
while mlflow.active_run():
mlflow.end_run()
def _called_in_save_model():
for frame in inspect.stack()[::-1]:
if frame.function == "save_model":
return True
return False
@pytest.fixture(autouse=True)
def prevent_infer_pip_requirements_fallback(request):
"""
Prevents `mlflow.models.infer_pip_requirements` from falling back in `mlflow.*.save_model`
unless explicitly disabled via `pytest.mark.allow_infer_pip_requirements_fallback`.
"""
from mlflow.utils.environment import _INFER_PIP_REQUIREMENTS_FALLBACK_MESSAGE
def new_exception(msg, *_, **__):
if msg == _INFER_PIP_REQUIREMENTS_FALLBACK_MESSAGE and _called_in_save_model():
raise Exception(
"`mlflow.models.infer_pip_requirements` should not fall back in"
"`mlflow.*.save_model` during test"
)
if "allow_infer_pip_requirements_fallback" not in request.keywords:
with mock.patch("mlflow.utils.environment._logger.exception", new=new_exception):
yield
else:
yield
@pytest.fixture(autouse=True, scope="module")
def clean_up_mlruns_direcotry(request):
"""
Clean up an `mlruns` directory on each test module teardown.
"""
yield
mlruns_dir = os.path.join(request.config.rootpath, "mlruns")
if os.path.exists(mlruns_dir):
shutil.rmtree(mlruns_dir)