From 81b59e54b774ccc0f6bfe2cb2dc35cb092299afc Mon Sep 17 00:00:00 2001 From: Weichen Xu Date: Tue, 11 Jan 2022 16:27:03 +0800 Subject: [PATCH 01/11] init Signed-off-by: Weichen Xu --- .../evaluate_on_binary_classifier.py | 32 +++++++++++++++++ .../evaluate_on_multiclass_classifier.py | 33 ++++++++++++++++++ examples/evaluators/evaluate_on_regressor.py | 34 +++++++++++++++++++ 3 files changed, 99 insertions(+) create mode 100644 examples/evaluators/evaluate_on_binary_classifier.py create mode 100644 examples/evaluators/evaluate_on_multiclass_classifier.py create mode 100644 examples/evaluators/evaluate_on_regressor.py diff --git a/examples/evaluators/evaluate_on_binary_classifier.py b/examples/evaluators/evaluate_on_binary_classifier.py new file mode 100644 index 0000000000000..bf20955875f44 --- /dev/null +++ b/examples/evaluators/evaluate_on_binary_classifier.py @@ -0,0 +1,32 @@ +import xgboost +import shap +from mlflow.models.evaluation import evaluate, EvaluationDataset +import mlflow +from sklearn.model_selection import train_test_split + +# train XGBoost model +X, y = shap.datasets.adult() + +num_examples = len(X) + +X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) + +model = xgboost.XGBClassifier().fit(X_train, y_train) + +eval_data = X_test +eval_data['label'] = y_test + +eval_dataset = EvaluationDataset(data=eval_data, labels='label', name='adult') + +with mlflow.start_run() as run: + mlflow.sklearn.log_model(model, 'model') + model_uri = mlflow.get_artifact_uri('model') + result = evaluate( + model=model_uri, + model_type='classifier', + dataset=eval_dataset, + evaluators=['default'], + ) + +print(f'metrics:\n{result.metrics}') +print(f'artifacts:\n{result.artifacts}') diff --git a/examples/evaluators/evaluate_on_multiclass_classifier.py b/examples/evaluators/evaluate_on_multiclass_classifier.py new file mode 100644 index 0000000000000..553aae11027c1 --- /dev/null +++ b/examples/evaluators/evaluate_on_multiclass_classifier.py @@ -0,0 +1,33 @@ +from mlflow.models.evaluation import evaluate, EvaluationDataset +import mlflow +from sklearn.linear_model import LogisticRegression +from sklearn.datasets import make_classification +from sklearn.model_selection import train_test_split + +mlflow.sklearn.autolog() + +X, y = make_classification(n_samples=10000, n_classes=10, n_informative=5, random_state=1) + +X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) + +eval_dataset = EvaluationDataset( + data=X_test, labels=y_test, name='multiclass-classification-dataset', +) + +with mlflow.start_run() as run: + model = LogisticRegression(solver='liblinear').fit(X_train, y_train) + model_uri = mlflow.get_artifact_uri('model') + result = evaluate( + model=model_uri, + model_type='classifier', + dataset=eval_dataset, + evaluators='default', + evaluator_config={ + 'log_model_explainability': True, + 'explainability_nsamples': 1000 + } + ) + +print(f'run_id={run.info.run_id}') +print(f'metrics:\n{result.metrics}') +print(f'artifacts:\n{result.artifacts}') diff --git a/examples/evaluators/evaluate_on_regressor.py b/examples/evaluators/evaluate_on_regressor.py new file mode 100644 index 0000000000000..1fd19e4228b5c --- /dev/null +++ b/examples/evaluators/evaluate_on_regressor.py @@ -0,0 +1,34 @@ +from mlflow.models.evaluation import evaluate, EvaluationDataset +import mlflow +from sklearn.datasets import load_boston +from sklearn.linear_model import LinearRegression +from sklearn.model_selection import train_test_split + +mlflow.sklearn.autolog() + +boston_data = load_boston() + +X_train, X_test, y_train, y_test = train_test_split( + boston_data.data, boston_data.target, test_size=0.33, random_state=42 +) + +dataset = EvaluationDataset( + data=X_test, labels=y_test, name='boston', feature_names=boston_data.feature_names +) + +with mlflow.start_run() as run: + model = LinearRegression().fit(X_train, y_train) + model_uri = mlflow.get_artifact_uri('model') + + result = evaluate( + model=model_uri, + model_type='regressor', + dataset=dataset, + evaluators='default', + evaluator_config={ + 'explainability_nsamples': 1000 + } + ) + +print(f'metrics:\n{result.metrics}') +print(f'artifacts:\n{result.artifacts}') From ebf1cba9f74119e4afc68309c042ee0967b1cf05 Mon Sep 17 00:00:00 2001 From: Weichen Xu Date: Tue, 11 Jan 2022 16:31:52 +0800 Subject: [PATCH 02/11] permlink Signed-off-by: Weichen Xu --- mlflow/models/evaluation/lift_curve.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mlflow/models/evaluation/lift_curve.py b/mlflow/models/evaluation/lift_curve.py index cbcba712a9329..d2df4ba453369 100644 --- a/mlflow/models/evaluation/lift_curve.py +++ b/mlflow/models/evaluation/lift_curve.py @@ -6,6 +6,7 @@ def _cumulative_gain_curve(y_true, y_score, pos_label=None): """ This method is copied from scikit-plot package. + See https://github.com/reiinakano/scikit-plot/blob/2dd3e6a76df77edcbd724c4db25575f70abb57cb/scikitplot/helpers.py#L157 This function generates the points necessary to plot the Cumulative Gain @@ -77,6 +78,7 @@ def plot_lift_curve( ): """ This method is copied from scikit-plot package. + See https://github.com/reiinakano/scikit-plot/blob/2dd3e6a76df77edcbd724c4db25575f70abb57cb/scikitplot/metrics.py#L1133 Generates the Lift Curve from labels and scores/probabilities From ec81ae4f359d95891f654cdcd2aa304a11302e89 Mon Sep 17 00:00:00 2001 From: Weichen Xu Date: Fri, 14 Jan 2022 16:56:10 +0800 Subject: [PATCH 03/11] update Signed-off-by: Weichen Xu --- .../evaluators/evaluate_on_binary_classifier.py | 11 +++++------ .../evaluate_on_multiclass_classifier.py | 13 +++++-------- examples/evaluators/evaluate_on_regressor.py | 14 ++++++-------- 3 files changed, 16 insertions(+), 22 deletions(-) diff --git a/examples/evaluators/evaluate_on_binary_classifier.py b/examples/evaluators/evaluate_on_binary_classifier.py index bf20955875f44..fb887cb8234ae 100644 --- a/examples/evaluators/evaluate_on_binary_classifier.py +++ b/examples/evaluators/evaluate_on_binary_classifier.py @@ -1,6 +1,5 @@ import xgboost import shap -from mlflow.models.evaluation import evaluate, EvaluationDataset import mlflow from sklearn.model_selection import train_test_split @@ -16,15 +15,15 @@ eval_data = X_test eval_data['label'] = y_test -eval_dataset = EvaluationDataset(data=eval_data, labels='label', name='adult') - with mlflow.start_run() as run: mlflow.sklearn.log_model(model, 'model') model_uri = mlflow.get_artifact_uri('model') - result = evaluate( - model=model_uri, + result = mlflow.evaluate( + model_uri, + eval_data, + targets='label', model_type='classifier', - dataset=eval_dataset, + dataset_name='adult', evaluators=['default'], ) diff --git a/examples/evaluators/evaluate_on_multiclass_classifier.py b/examples/evaluators/evaluate_on_multiclass_classifier.py index 553aae11027c1..9b3bc01c7e337 100644 --- a/examples/evaluators/evaluate_on_multiclass_classifier.py +++ b/examples/evaluators/evaluate_on_multiclass_classifier.py @@ -1,4 +1,3 @@ -from mlflow.models.evaluation import evaluate, EvaluationDataset import mlflow from sklearn.linear_model import LogisticRegression from sklearn.datasets import make_classification @@ -10,17 +9,15 @@ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) -eval_dataset = EvaluationDataset( - data=X_test, labels=y_test, name='multiclass-classification-dataset', -) - with mlflow.start_run() as run: model = LogisticRegression(solver='liblinear').fit(X_train, y_train) model_uri = mlflow.get_artifact_uri('model') - result = evaluate( - model=model_uri, + result = mlflow.evaluate( + model_uri, + X_test, + targets=y_test, model_type='classifier', - dataset=eval_dataset, + dataset_name='multiclass-classification-dataset', evaluators='default', evaluator_config={ 'log_model_explainability': True, diff --git a/examples/evaluators/evaluate_on_regressor.py b/examples/evaluators/evaluate_on_regressor.py index 1fd19e4228b5c..63c860261ecca 100644 --- a/examples/evaluators/evaluate_on_regressor.py +++ b/examples/evaluators/evaluate_on_regressor.py @@ -1,4 +1,3 @@ -from mlflow.models.evaluation import evaluate, EvaluationDataset import mlflow from sklearn.datasets import load_boston from sklearn.linear_model import LinearRegression @@ -12,19 +11,18 @@ boston_data.data, boston_data.target, test_size=0.33, random_state=42 ) -dataset = EvaluationDataset( - data=X_test, labels=y_test, name='boston', feature_names=boston_data.feature_names -) - with mlflow.start_run() as run: model = LinearRegression().fit(X_train, y_train) model_uri = mlflow.get_artifact_uri('model') - result = evaluate( - model=model_uri, + result = mlflow.evaluate( + model_uri, + X_test, + targets=y_test, model_type='regressor', - dataset=dataset, + dataset_name='boston', evaluators='default', + feature_names=boston_data.feature_names, evaluator_config={ 'explainability_nsamples': 1000 } From 07e9c3de4181aba5e84ab7748f54eecd0b47b461 Mon Sep 17 00:00:00 2001 From: Weichen Xu Date: Fri, 14 Jan 2022 17:46:44 +0800 Subject: [PATCH 04/11] add readme Signed-off-by: Weichen Xu --- examples/evaluation/README.md | 24 +++++++++++++++++++ .../evaluate_on_binary_classifier.py | 0 .../evaluate_on_multiclass_classifier.py | 0 .../evaluate_on_regressor.py | 0 4 files changed, 24 insertions(+) create mode 100644 examples/evaluation/README.md rename examples/{evaluators => evaluation}/evaluate_on_binary_classifier.py (100%) rename examples/{evaluators => evaluation}/evaluate_on_multiclass_classifier.py (100%) rename examples/{evaluators => evaluation}/evaluate_on_regressor.py (100%) diff --git a/examples/evaluation/README.md b/examples/evaluation/README.md new file mode 100644 index 0000000000000..d94ff5657957d --- /dev/null +++ b/examples/evaluation/README.md @@ -0,0 +1,24 @@ +### MLflow evaluation Example + +The three simple examples illustrates how you can use the `mlflow.evaluate` API to evaluate a PyFunc model on the +specified dataset using builtin default evaluator, and log resulting metrics & artifacts to MLflow Tracking. + +- Example `evaluate_on_binary_classifier.py` evaluate a xgboost `XGBClassifier` model on dataset loaded by + `shap.datasets.adult`. +- Example `evaluate_on_multiclass_classifier.py` evaluate a scikit-learn `LogisticRegression` model on dataset + generated by `sklearn.datasets.make_classification`. +- Example `evaluate_on_regressor.py` evaluate a scikit-learn `LinearRegression` model on dataset loaded by + `sklearn.datasets.load_boston` + +#### How to run this code + +Run from the current git directory with Python. +**Note**: These examples assumes that you have all the dependencies for `scikit-learn`, `xgboost`, and `shap` library +installed in your development environment. + + `python evaluate_on_binary_classifier.py` + + `python evaluate_on_multiclass_classifier.py` + + `python evaluate_on_regressor.py` + diff --git a/examples/evaluators/evaluate_on_binary_classifier.py b/examples/evaluation/evaluate_on_binary_classifier.py similarity index 100% rename from examples/evaluators/evaluate_on_binary_classifier.py rename to examples/evaluation/evaluate_on_binary_classifier.py diff --git a/examples/evaluators/evaluate_on_multiclass_classifier.py b/examples/evaluation/evaluate_on_multiclass_classifier.py similarity index 100% rename from examples/evaluators/evaluate_on_multiclass_classifier.py rename to examples/evaluation/evaluate_on_multiclass_classifier.py diff --git a/examples/evaluators/evaluate_on_regressor.py b/examples/evaluation/evaluate_on_regressor.py similarity index 100% rename from examples/evaluators/evaluate_on_regressor.py rename to examples/evaluation/evaluate_on_regressor.py From 4b5b08b324fee3e7a067f6e09f5ddfdee6449f60 Mon Sep 17 00:00:00 2001 From: Weichen Xu Date: Fri, 14 Jan 2022 17:48:17 +0800 Subject: [PATCH 05/11] fix lint Signed-off-by: Weichen Xu --- .../evaluate_on_binary_classifier.py | 18 ++++++++-------- .../evaluate_on_multiclass_classifier.py | 21 ++++++++----------- examples/evaluation/evaluate_on_regressor.py | 16 +++++++------- 3 files changed, 25 insertions(+), 30 deletions(-) diff --git a/examples/evaluation/evaluate_on_binary_classifier.py b/examples/evaluation/evaluate_on_binary_classifier.py index fb887cb8234ae..7bac4a0beca2f 100644 --- a/examples/evaluation/evaluate_on_binary_classifier.py +++ b/examples/evaluation/evaluate_on_binary_classifier.py @@ -13,19 +13,19 @@ model = xgboost.XGBClassifier().fit(X_train, y_train) eval_data = X_test -eval_data['label'] = y_test +eval_data["label"] = y_test with mlflow.start_run() as run: - mlflow.sklearn.log_model(model, 'model') - model_uri = mlflow.get_artifact_uri('model') + mlflow.sklearn.log_model(model, "model") + model_uri = mlflow.get_artifact_uri("model") result = mlflow.evaluate( model_uri, eval_data, - targets='label', - model_type='classifier', - dataset_name='adult', - evaluators=['default'], + targets="label", + model_type="classifier", + dataset_name="adult", + evaluators=["default"], ) -print(f'metrics:\n{result.metrics}') -print(f'artifacts:\n{result.artifacts}') +print(f"metrics:\n{result.metrics}") +print(f"artifacts:\n{result.artifacts}") diff --git a/examples/evaluation/evaluate_on_multiclass_classifier.py b/examples/evaluation/evaluate_on_multiclass_classifier.py index 9b3bc01c7e337..4eab5cf86d0ee 100644 --- a/examples/evaluation/evaluate_on_multiclass_classifier.py +++ b/examples/evaluation/evaluate_on_multiclass_classifier.py @@ -10,21 +10,18 @@ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) with mlflow.start_run() as run: - model = LogisticRegression(solver='liblinear').fit(X_train, y_train) - model_uri = mlflow.get_artifact_uri('model') + model = LogisticRegression(solver="liblinear").fit(X_train, y_train) + model_uri = mlflow.get_artifact_uri("model") result = mlflow.evaluate( model_uri, X_test, targets=y_test, - model_type='classifier', - dataset_name='multiclass-classification-dataset', - evaluators='default', - evaluator_config={ - 'log_model_explainability': True, - 'explainability_nsamples': 1000 - } + model_type="classifier", + dataset_name="multiclass-classification-dataset", + evaluators="default", + evaluator_config={"log_model_explainability": True, "explainability_nsamples": 1000}, ) -print(f'run_id={run.info.run_id}') -print(f'metrics:\n{result.metrics}') -print(f'artifacts:\n{result.artifacts}') +print(f"run_id={run.info.run_id}") +print(f"metrics:\n{result.metrics}") +print(f"artifacts:\n{result.artifacts}") diff --git a/examples/evaluation/evaluate_on_regressor.py b/examples/evaluation/evaluate_on_regressor.py index 63c860261ecca..5250d8cdd4d3d 100644 --- a/examples/evaluation/evaluate_on_regressor.py +++ b/examples/evaluation/evaluate_on_regressor.py @@ -13,20 +13,18 @@ with mlflow.start_run() as run: model = LinearRegression().fit(X_train, y_train) - model_uri = mlflow.get_artifact_uri('model') + model_uri = mlflow.get_artifact_uri("model") result = mlflow.evaluate( model_uri, X_test, targets=y_test, - model_type='regressor', - dataset_name='boston', - evaluators='default', + model_type="regressor", + dataset_name="boston", + evaluators="default", feature_names=boston_data.feature_names, - evaluator_config={ - 'explainability_nsamples': 1000 - } + evaluator_config={"explainability_nsamples": 1000}, ) -print(f'metrics:\n{result.metrics}') -print(f'artifacts:\n{result.artifacts}') +print(f"metrics:\n{result.metrics}") +print(f"artifacts:\n{result.artifacts}") From fcb2e75a3555d0d77a750d5b6f161a178875d093 Mon Sep 17 00:00:00 2001 From: Weichen Xu Date: Fri, 14 Jan 2022 19:35:09 +0800 Subject: [PATCH 06/11] update Signed-off-by: Weichen Xu --- examples/evaluation/README.md | 27 +++++++++++--------- examples/evaluation/evaluate_on_regressor.py | 10 ++++---- 2 files changed, 20 insertions(+), 17 deletions(-) diff --git a/examples/evaluation/README.md b/examples/evaluation/README.md index d94ff5657957d..a99e3b2840f76 100644 --- a/examples/evaluation/README.md +++ b/examples/evaluation/README.md @@ -1,24 +1,27 @@ -### MLflow evaluation Example +### MLflow evaluation Examples The three simple examples illustrates how you can use the `mlflow.evaluate` API to evaluate a PyFunc model on the specified dataset using builtin default evaluator, and log resulting metrics & artifacts to MLflow Tracking. -- Example `evaluate_on_binary_classifier.py` evaluate a xgboost `XGBClassifier` model on dataset loaded by +- Example `evaluate_on_binary_classifier.py` evaluates an xgboost `XGBClassifier` model on dataset loaded by `shap.datasets.adult`. -- Example `evaluate_on_multiclass_classifier.py` evaluate a scikit-learn `LogisticRegression` model on dataset +- Example `evaluate_on_multiclass_classifier.py` evaluates a scikit-learn `LogisticRegression` model on dataset generated by `sklearn.datasets.make_classification`. -- Example `evaluate_on_regressor.py` evaluate a scikit-learn `LinearRegression` model on dataset loaded by +- Example `evaluate_on_regressor.py` evaluate as scikit-learn `LinearRegression` model on dataset loaded by `sklearn.datasets.load_boston` +#### Prerequisites + +``` +pip install scikit-learn xgboost shap matplotlib +``` + #### How to run this code Run from the current git directory with Python. -**Note**: These examples assumes that you have all the dependencies for `scikit-learn`, `xgboost`, and `shap` library -installed in your development environment. - - `python evaluate_on_binary_classifier.py` - - `python evaluate_on_multiclass_classifier.py` - - `python evaluate_on_regressor.py` +``` +python evaluate_on_binary_classifier.py +python evaluate_on_multiclass_classifier.py +python evaluate_on_regressor.py +``` diff --git a/examples/evaluation/evaluate_on_regressor.py b/examples/evaluation/evaluate_on_regressor.py index 5250d8cdd4d3d..538d8cf3ea8b1 100644 --- a/examples/evaluation/evaluate_on_regressor.py +++ b/examples/evaluation/evaluate_on_regressor.py @@ -1,14 +1,14 @@ import mlflow -from sklearn.datasets import load_boston +from sklearn.datasets import fetch_california_housing from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split mlflow.sklearn.autolog() -boston_data = load_boston() +california_housing_data = fetch_california_housing() X_train, X_test, y_train, y_test = train_test_split( - boston_data.data, boston_data.target, test_size=0.33, random_state=42 + california_housing_data.data, california_housing_data.target, test_size=0.33, random_state=42 ) with mlflow.start_run() as run: @@ -20,9 +20,9 @@ X_test, targets=y_test, model_type="regressor", - dataset_name="boston", + dataset_name="california_housing", evaluators="default", - feature_names=boston_data.feature_names, + feature_names=california_housing_data.feature_names, evaluator_config={"explainability_nsamples": 1000}, ) From e464eeda87f2462c460ffd76eae0f9ff33b62bb8 Mon Sep 17 00:00:00 2001 From: Weichen Xu Date: Fri, 14 Jan 2022 19:36:21 +0800 Subject: [PATCH 07/11] update Signed-off-by: Weichen Xu --- examples/evaluation/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/evaluation/README.md b/examples/evaluation/README.md index a99e3b2840f76..887af5e980098 100644 --- a/examples/evaluation/README.md +++ b/examples/evaluation/README.md @@ -8,7 +8,7 @@ specified dataset using builtin default evaluator, and log resulting metrics & a - Example `evaluate_on_multiclass_classifier.py` evaluates a scikit-learn `LogisticRegression` model on dataset generated by `sklearn.datasets.make_classification`. - Example `evaluate_on_regressor.py` evaluate as scikit-learn `LinearRegression` model on dataset loaded by - `sklearn.datasets.load_boston` + `sklearn.datasets.fetch_california_housing` #### Prerequisites From dc11f534ce7faad4f4b59133b1eec91202ad5e31 Mon Sep 17 00:00:00 2001 From: Weichen Xu Date: Fri, 14 Jan 2022 20:01:18 +0800 Subject: [PATCH 08/11] update Signed-off-by: Weichen Xu --- examples/evaluation/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/evaluation/README.md b/examples/evaluation/README.md index 887af5e980098..9a94563a7e6b5 100644 --- a/examples/evaluation/README.md +++ b/examples/evaluation/README.md @@ -1,6 +1,6 @@ ### MLflow evaluation Examples -The three simple examples illustrates how you can use the `mlflow.evaluate` API to evaluate a PyFunc model on the +The examples in this directory illustrate how you can use the `mlflow.evaluate` API to evaluate a PyFunc model on the specified dataset using builtin default evaluator, and log resulting metrics & artifacts to MLflow Tracking. - Example `evaluate_on_binary_classifier.py` evaluates an xgboost `XGBClassifier` model on dataset loaded by From d1f49bd9bd55c8a2ac414d13d7db0e4c71363845 Mon Sep 17 00:00:00 2001 From: Weichen Xu Date: Fri, 14 Jan 2022 20:03:06 +0800 Subject: [PATCH 09/11] update Signed-off-by: Weichen Xu --- examples/evaluation/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/evaluation/README.md b/examples/evaluation/README.md index 9a94563a7e6b5..9d54935ad9f97 100644 --- a/examples/evaluation/README.md +++ b/examples/evaluation/README.md @@ -16,9 +16,9 @@ specified dataset using builtin default evaluator, and log resulting metrics & a pip install scikit-learn xgboost shap matplotlib ``` -#### How to run this code +#### How to run the examples -Run from the current git directory with Python. +Run in this directory with Python. ``` python evaluate_on_binary_classifier.py From 3613b4357baccba03e8b219549e042ee864c2d63 Mon Sep 17 00:00:00 2001 From: Weichen Xu Date: Fri, 14 Jan 2022 20:30:19 +0800 Subject: [PATCH 10/11] update Signed-off-by: Weichen Xu --- examples/evaluation/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/evaluation/README.md b/examples/evaluation/README.md index 9d54935ad9f97..08ea3072815d3 100644 --- a/examples/evaluation/README.md +++ b/examples/evaluation/README.md @@ -13,7 +13,7 @@ specified dataset using builtin default evaluator, and log resulting metrics & a #### Prerequisites ``` -pip install scikit-learn xgboost shap matplotlib +pip install scikit-learn xgboost shap>=0.40 matplotlib ``` #### How to run the examples From 2fcf860c9450aafa2fc9604c5f184d9edb09cd88 Mon Sep 17 00:00:00 2001 From: Weichen Xu Date: Fri, 14 Jan 2022 21:49:08 +0800 Subject: [PATCH 11/11] add artifact __repr__ Signed-off-by: Weichen Xu --- mlflow/models/evaluation/base.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mlflow/models/evaluation/base.py b/mlflow/models/evaluation/base.py index 4cdb3ba91be59..98768f0cc6459 100644 --- a/mlflow/models/evaluation/base.py +++ b/mlflow/models/evaluation/base.py @@ -76,6 +76,9 @@ def uri(self) -> str: """ return self._uri + def __repr__(self): + return f"{self.__class__.__name__}(uri='{self.uri}')" + class EvaluationResult: """