Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Autologging functionality for scikit-learn integration with LightGBM (Part 1) #5130

Merged
merged 7 commits into from Dec 24, 2021
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
62 changes: 53 additions & 9 deletions mlflow/lightgbm.py
Expand Up @@ -33,6 +33,7 @@
from mlflow.models.signature import ModelSignature
from mlflow.models.utils import ModelInputExample, _save_example
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils import _get_fully_qualified_class_name
from mlflow.utils.environment import (
_mlflow_conda_env,
_validate_env_arguments,
Expand Down Expand Up @@ -73,7 +74,7 @@ def get_default_pip_requirements():
Calls to :func:`save_model()` and :func:`log_model()` produce a pip environment
that, at minimum, contains these requirements.
"""
return [_get_pinned_requirement("lightgbm")]
return [_get_pinned_requirement("lightgbm"), _get_pinned_requirement("cloudpickle")]
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we add cloudpickle conditionally because users who don't use scikit-learn estimators don't need cloudpickle?

Copy link
Contributor Author

@jwyyy jwyyy Dec 21, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @harupy, thank you for your suggestion! Does that mean we also need to provide an option to turn on / off autologging for scikit-learn estimators? I assumed mlflow.lightgbm.autolog() enables autologging for all models.

Copy link
Contributor Author

@jwyyy jwyyy Dec 22, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @harupy, I found a simple way to add cloudpickle conditionally (and automatically) based on what model is saved (please see L169-171). Please let me know your feedback and comments. Thanks a lot!

(This comment is also addressed in the latest commit.)



def get_default_conda_env():
Expand Down Expand Up @@ -132,7 +133,10 @@ def save_model(
path = os.path.abspath(path)
if os.path.exists(path):
raise MlflowException("Path '{}' already exists".format(path))
model_data_subpath = "model.lgb"
if isinstance(lgb_model, lgb.Booster):
model_data_subpath = "model.lgb"
else:
model_data_subpath = "model.pkl"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
if isinstance(lgb_model, lgb.Booster):
model_data_subpath = "model.lgb"
else:
model_data_subpath = "model.pkl"
model_data_subpath = "model.lgb" if isinstance(lgb_model, lgb.Booster) else "model.pkl"

nit

model_data_path = os.path.join(path, model_data_subpath)
os.makedirs(path)
if mlflow_model is None:
Expand All @@ -143,15 +147,21 @@ def save_model(
_save_example(mlflow_model, input_example, path)

# Save a LightGBM model
lgb_model.save_model(model_data_path)
_save_model(lgb_model, model_data_path)

lgb_model_class = _get_fully_qualified_class_name(lgb_model)
pyfunc.add_to_model(
mlflow_model,
loader_module="mlflow.lightgbm",
data=model_data_subpath,
env=_CONDA_ENV_FILE_NAME,
)
mlflow_model.add_flavor(FLAVOR_NAME, lgb_version=lgb.__version__, data=model_data_subpath)
mlflow_model.add_flavor(
FLAVOR_NAME,
lgb_version=lgb.__version__,
data=model_data_subpath,
model_class=lgb_model_class,
)
mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))

if conda_env is None:
Expand Down Expand Up @@ -186,6 +196,20 @@ def save_model(
write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), "\n".join(pip_requirements))


def _save_model(lgb_model, model_path):
# LightGBM Boosters are saved using the built-in method `save_model()`,
# whereas LightGBM scikit-learn models are serialized using Cloudpickle.
import lightgbm as lgb

if isinstance(lgb_model, lgb.Booster):
lgb_model.save_model(model_path)
else:
import cloudpickle

with open(model_path, "wb") as out:
cloudpickle.dump(lgb_model, out)


@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name=FLAVOR_NAME))
def log_model(
lgb_model,
Expand Down Expand Up @@ -251,9 +275,31 @@ def log_model(


def _load_model(path):
import lightgbm as lgb
"""
Load Model Implementation.
:param path: Local filesystem path to
the MLflow Model with the ``lightgbm`` flavor (MLflow < 1.23.0) or
the top-level MLflow Model directory (MLflow >= 1.23.0).
"""

model_dir = os.path.dirname(path) if os.path.isfile(path) else path
flavor_conf = _get_flavor_configuration(model_path=model_dir, flavor_name=FLAVOR_NAME)

model_class = flavor_conf.get("model_class", "lightgbm.basic.Booster")
lgb_model_path = os.path.join(model_dir, flavor_conf.get("data"))

if model_class == "lightgbm.basic.Booster":
import lightgbm as lgb

model = lgb.Booster(model_file=lgb_model_path)
else:
# LightGBM scikit-learn models are deserialized using Cloudpickle.
import cloudpickle

with open(lgb_model_path, "rb") as f:
model = cloudpickle.load(f)

return lgb.Booster(model_file=path)
return model


def _load_pyfunc(path):
Expand Down Expand Up @@ -286,9 +332,7 @@ def load_model(model_uri, dst_path=None):
:return: A LightGBM model (an instance of `lightgbm.Booster`_).
"""
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path)
flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
lgb_model_file_path = os.path.join(local_model_path, flavor_conf.get("data", "model.lgb"))
return _load_model(path=lgb_model_file_path)
return _load_model(path=local_model_path)


class _LGBModelWrapper:
Expand Down
78 changes: 77 additions & 1 deletion tests/lightgbm/test_lightgbm_model_export.py
Expand Up @@ -50,6 +50,18 @@ def lgb_model():
return ModelWithData(model=model, inference_dataframe=X)


@pytest.fixture(scope="session")
def lgb_sklearn_model():
iris = datasets.load_iris()
X = pd.DataFrame(
iris.data[:, :2], columns=iris.feature_names[:2] # we only take the first two features.
)
y = iris.target
model = lgb.LGBMClassifier(n_estimators=10)
model.fit(X, y)
return ModelWithData(model=model, inference_dataframe=X)


@pytest.fixture
def model_path(tmpdir):
return os.path.join(str(tmpdir), "model")
Expand All @@ -68,7 +80,7 @@ def test_model_save_load(lgb_model, model_path):

mlflow.lightgbm.save_model(lgb_model=model, path=model_path)
reloaded_model = mlflow.lightgbm.load_model(model_uri=model_path)
reloaded_pyfunc = pyfunc.load_pyfunc(model_uri=model_path)
reloaded_pyfunc = pyfunc.load_model(model_uri=model_path)

np.testing.assert_array_almost_equal(
model.predict(lgb_model.inference_dataframe),
Expand All @@ -81,6 +93,24 @@ def test_model_save_load(lgb_model, model_path):
)


@pytest.mark.large
def test_sklearn_model_save_load(lgb_sklearn_model, model_path):
model = lgb_sklearn_model.model
mlflow.lightgbm.save_model(lgb_model=model, path=model_path)
reloaded_model = mlflow.lightgbm.load_model(model_uri=model_path)
reloaded_pyfunc = pyfunc.load_model(model_uri=model_path)

np.testing.assert_array_almost_equal(
model.predict(lgb_sklearn_model.inference_dataframe),
reloaded_model.predict(lgb_sklearn_model.inference_dataframe),
)

np.testing.assert_array_almost_equal(
reloaded_model.predict(lgb_sklearn_model.inference_dataframe),
reloaded_pyfunc.predict(lgb_sklearn_model.inference_dataframe),
)


def test_signature_and_examples_are_saved_correctly(lgb_model):
model = lgb_model.model
X = lgb_model.inference_dataframe
Expand Down Expand Up @@ -398,3 +428,49 @@ def test_pyfunc_serve_and_score_sklearn(model):
)
scores = pd.read_json(resp.content, orient="records").values.squeeze()
np.testing.assert_array_equal(scores, model.predict(X.head(3)))


@pytest.mark.large
def test_load_pyfunc_succeeds_for_older_models_with_pyfunc_data_field(lgb_model, model_path):
"""
This test verifies that LightGBM models saved in older versions of MLflow are loaded
successfully by ``mlflow.pyfunc.load_model``. These older models specify a pyfunc ``data``
field referring directly to a LightGBM model file. Newer models also have the
``model_class`` in LightGBM flavor.
"""
model = lgb_model.model
mlflow.lightgbm.save_model(lgb_model=model, path=model_path)

model_conf_path = os.path.join(model_path, "MLmodel")
model_conf = Model.load(model_conf_path)
pyfunc_conf = model_conf.flavors.get(pyfunc.FLAVOR_NAME)
lgb_conf = model_conf.flavors.get(mlflow.lightgbm.FLAVOR_NAME)
assert lgb_conf is not None
assert "model_class" in lgb_conf
assert "data" in lgb_conf
assert pyfunc_conf is not None
assert "model_class" not in pyfunc_conf
assert pyfunc.DATA in pyfunc_conf

# test old MLmodel conf
model_conf.flavors["lightgbm"] = {"lgb_version": lgb.__version__, "data": "model.lgb"}
model_conf.save(model_conf_path)
model_conf = Model.load(model_conf_path)
lgb_conf = model_conf.flavors.get(mlflow.lightgbm.FLAVOR_NAME)
assert "data" in lgb_conf
assert lgb_conf["data"] == "model.lgb"

reloaded_pyfunc = pyfunc.load_model(model_uri=model_path)
assert isinstance(reloaded_pyfunc._model_impl.lgb_model, lgb.Booster)
reloaded_lgb = mlflow.lightgbm.load_model(model_uri=model_path)
assert isinstance(reloaded_lgb, lgb.Booster)

np.testing.assert_array_almost_equal(
lgb_model.model.predict(lgb_model.inference_dataframe),
reloaded_pyfunc.predict(lgb_model.inference_dataframe),
)

np.testing.assert_array_almost_equal(
reloaded_lgb.predict(lgb_model.inference_dataframe),
reloaded_pyfunc.predict(lgb_model.inference_dataframe),
)