Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Requirements] Bump fastapi and uvicorn #2629

Merged
merged 12 commits into from Jan 1, 2023
2 changes: 1 addition & 1 deletion dockerfiles/mlrun-api/requirements.txt
@@ -1,4 +1,4 @@
uvicorn~=0.17.0
uvicorn~=0.20.0
dask-kubernetes~=0.11.0
apscheduler~=3.6
sqlite3-to-mysql~=1.4
Expand Down
1 change: 0 additions & 1 deletion mlrun/api/main.py
Expand Up @@ -361,7 +361,6 @@ def main():
"mlrun.api.main:app",
host="0.0.0.0",
port=config.httpdb.port,
debug=config.httpdb.debug,
access_log=False,
timeout_keep_alive=config.httpdb.http_connection_timeout_keep_alive,
)
Expand Down
19 changes: 4 additions & 15 deletions mlrun/api/utils/background_tasks.py
Expand Up @@ -18,7 +18,6 @@
import typing
import uuid

import anyio
import fastapi
import fastapi.concurrency
import sqlalchemy.orm
Expand Down Expand Up @@ -151,21 +150,11 @@ def get_background_task(
@mlrun.api.utils.helpers.ensure_running_on_chief
async def background_task_wrapper(self, name: str, function, *args, **kwargs):
try:
if asyncio.iscoroutinefunction(function):
await function(*args, **kwargs)
else:
await fastapi.concurrency.run_in_threadpool(function, *args, **kwargs)

# In the current fastapi version, there is a bug in the starlette package it uses for the background tasks.
# The bug causes the task to be cancelled if the client's http connection is closed before the task is done.
# The bug is fixed in the latest version of starlette & fastapi. We will upgrade in 1.3.0, but until then we
# will use this workaround to prevent the task from being cancelled all together.
# See https://github.com/encode/starlette/issues/1438
# and https://github.com/tiangolo/fastapi/issues/5606
# TODO: remove this workaround when upgrading to fastapi 0.87.0
with anyio.CancelScope(shield=True):
if asyncio.iscoroutinefunction(function):
await function(*args, **kwargs)
else:
await fastapi.concurrency.run_in_threadpool(
function, *args, **kwargs
)
except Exception:
logger.warning(
f"Failed during background task execution: {function.__name__}, exc: {traceback.format_exc()}"
Expand Down
9 changes: 1 addition & 8 deletions requirements.txt
Expand Up @@ -6,13 +6,6 @@ GitPython~=3.0
aiohttp~=3.8
# 8.1.0+ breaks dask/distributed versions older than 2022.04.0, see here - https://github.com/dask/distributed/pull/6018
click~=8.0.0
# fastapi 0.78.0 requires starlette 0.19.1 which requires typing_extensions>=3.10.0
# kfp 1.8.13 requires typing-extensions>=3.7.4,<5
# for some reason when installing mlrun on a venv that already had typing-extensions==3.7.4.3 it didn't upgrade it to
# >=3.10.0 although it was installing starlette 0.19.1
# therefore adding it explicitly
typing-extensions>=3.10.0,<5
# in some environments for some reason the protobuf installed was 3.20.0 instead of 3.20.1,
# when installing google-cloud-storage which required >=3.20.1, <5 it was upgrading the protobuf version to the latest
# version and because kfp 1.8.13 requires protobuf>=3.13, <4 it resulted incompatibility between kfp and protobuf
# this can be removed once kfp will support protobuf > 4
Expand Down Expand Up @@ -54,7 +47,7 @@ kubernetes~=12.0
# TODO: move to API requirements (shouldn't really be here, the sql run db using the API sqldb is preventing us from
# separating the SDK and API code) (referring to humanfriendly and fastapi)
humanfriendly~=8.2
fastapi~=0.78.0
fastapi~=0.88.0
fsspec~=2021.8.1
v3iofs~=0.1.15
# 3.4 and above failed builidng in some images - see https://github.com/pyca/cryptography/issues/5771
Expand Down
26 changes: 13 additions & 13 deletions tests/api/api/test_pipelines.py
Expand Up @@ -71,9 +71,9 @@ def test_list_pipelines_formats(
kfp_client_mock: kfp.Client,
) -> None:
for format_ in [
mlrun.api.schemas.PipelinesFormat.full,
mlrun.api.schemas.PipelinesFormat.metadata_only,
mlrun.api.schemas.PipelinesFormat.name_only,
mlrun.api.schemas.PipelinesFormat.full.value,
mlrun.api.schemas.PipelinesFormat.metadata_only.value,
mlrun.api.schemas.PipelinesFormat.name_only.value,
]:
runs = _generate_list_runs_mocks()
expected_runs = [run.to_dict() for run in runs]
Expand All @@ -97,10 +97,10 @@ def test_get_pipeline_formats(
kfp_client_mock: kfp.Client,
) -> None:
for format_ in [
mlrun.api.schemas.PipelinesFormat.full,
mlrun.api.schemas.PipelinesFormat.metadata_only,
mlrun.api.schemas.PipelinesFormat.summary,
mlrun.api.schemas.PipelinesFormat.name_only,
mlrun.api.schemas.PipelinesFormat.full.value,
mlrun.api.schemas.PipelinesFormat.metadata_only.value,
mlrun.api.schemas.PipelinesFormat.summary.value,
mlrun.api.schemas.PipelinesFormat.name_only.value,
]:
api_run_detail = _generate_get_run_mock()
_mock_get_run(kfp_client_mock, api_run_detail)
Expand All @@ -119,7 +119,7 @@ def test_get_pipeline_no_project_opa_validation(
client: fastapi.testclient.TestClient,
kfp_client_mock: kfp.Client,
) -> None:
format_ = (mlrun.api.schemas.PipelinesFormat.summary,)
format_ = (mlrun.api.schemas.PipelinesFormat.summary.value,)
project = "project-name"
mlrun.api.crud.Pipelines().resolve_project_from_pipeline = unittest.mock.Mock(
return_value=project
Expand Down Expand Up @@ -150,10 +150,10 @@ def test_get_pipeline_specific_project(
kfp_client_mock: kfp.Client,
) -> None:
for format_ in [
mlrun.api.schemas.PipelinesFormat.full,
mlrun.api.schemas.PipelinesFormat.metadata_only,
mlrun.api.schemas.PipelinesFormat.summary,
mlrun.api.schemas.PipelinesFormat.name_only,
mlrun.api.schemas.PipelinesFormat.full.value,
mlrun.api.schemas.PipelinesFormat.metadata_only.value,
mlrun.api.schemas.PipelinesFormat.summary.value,
mlrun.api.schemas.PipelinesFormat.name_only.value,
]:
project = "project-name"
api_run_detail = _generate_get_run_mock()
Expand Down Expand Up @@ -188,7 +188,7 @@ def test_list_pipelines_specific_project(
)
response = client.get(
f"projects/{project}/pipelines",
params={"format": mlrun.api.schemas.PipelinesFormat.name_only},
params={"format": mlrun.api.schemas.PipelinesFormat.name_only.value},
)
expected_response = mlrun.api.schemas.PipelinesOutput(
runs=expected_runs, total_size=len(expected_runs), next_page_token=None
Expand Down
14 changes: 7 additions & 7 deletions tests/api/api/test_projects.py
Expand Up @@ -15,6 +15,7 @@
import copy
import datetime
import http
import json.decoder
import os
import typing
import unittest.mock
Expand All @@ -25,7 +26,6 @@
import fastapi.testclient
import mergedeep
import pytest
import simplejson.errors
import sqlalchemy.orm
from fastapi.testclient import TestClient
from sqlalchemy.orm import Session
Expand Down Expand Up @@ -123,7 +123,7 @@ def test_redirection_from_worker_to_chief_delete_project(
assert response.status_code == expected_status
try:
assert response.json() == expected_response
except simplejson.errors.JSONDecodeError:
except json.decoder.JSONDecodeError:
# NO_CONTENT response doesn't return json serializable response
assert response.text == expected_response

Expand Down Expand Up @@ -687,7 +687,7 @@ def test_list_projects_leader_format(
# list in leader format
response = client.get(
"projects",
params={"format": mlrun.api.schemas.ProjectsFormat.leader},
params={"format": mlrun.api.schemas.ProjectsFormat.leader.value},
headers={
mlrun.api.schemas.HeaderNames.projects_role: mlrun.mlconf.httpdb.projects.leader
},
Expand Down Expand Up @@ -736,7 +736,7 @@ def test_projects_crud(
project_patch = {
"spec": {
"description": "lemon",
"desired_state": mlrun.api.schemas.ProjectState.archived,
"desired_state": mlrun.api.schemas.ProjectState.archived.value,
}
}
response = client.patch(f"projects/{name1}", json=project_patch)
Expand Down Expand Up @@ -782,7 +782,7 @@ def test_projects_crud(

# list - full
response = client.get(
"projects", params={"format": mlrun.api.schemas.ProjectsFormat.full}
"projects", params={"format": mlrun.api.schemas.ProjectsFormat.full.value}
)
projects_output = mlrun.api.schemas.ProjectsOutput(**response.json())
expected = [project_1, project_2]
Expand Down Expand Up @@ -832,7 +832,7 @@ def test_projects_crud(

# list - names only - filter by state
_list_project_names_and_assert(
client, [name1], params={"state": mlrun.api.schemas.ProjectState.archived}
client, [name1], params={"state": mlrun.api.schemas.ProjectState.archived.value}
)

# add function to project 1
Expand Down Expand Up @@ -1225,7 +1225,7 @@ def _list_project_names_and_assert(
client: TestClient, expected_names: typing.List[str], params: typing.Dict = None
):
params = params or {}
params["format"] = mlrun.api.schemas.ProjectsFormat.name_only
params["format"] = mlrun.api.schemas.ProjectsFormat.name_only.value
# list - names only - filter by state
response = client.get(
"projects",
Expand Down
30 changes: 15 additions & 15 deletions tests/api/api/test_runs.py
Expand Up @@ -259,9 +259,9 @@ def test_list_runs_partition_by(db: Session, client: TestClient) -> None:
client,
{
"project": projects[0],
"partition-by": mlrun.api.schemas.RunPartitionByField.name,
"partition-sort-by": mlrun.api.schemas.SortField.created,
"partition-order": mlrun.api.schemas.OrderType.asc,
"partition-by": mlrun.api.schemas.RunPartitionByField.name.value,
"partition-sort-by": mlrun.api.schemas.SortField.created.value,
"partition-order": mlrun.api.schemas.OrderType.asc.value,
},
3,
)
Expand All @@ -274,9 +274,9 @@ def test_list_runs_partition_by(db: Session, client: TestClient) -> None:
client,
{
"project": projects[0],
"partition-by": mlrun.api.schemas.RunPartitionByField.name,
"partition-sort-by": mlrun.api.schemas.SortField.updated,
"partition-order": mlrun.api.schemas.OrderType.desc,
"partition-by": mlrun.api.schemas.RunPartitionByField.name.value,
"partition-sort-by": mlrun.api.schemas.SortField.updated.value,
"partition-order": mlrun.api.schemas.OrderType.desc.value,
},
3,
)
Expand All @@ -289,9 +289,9 @@ def test_list_runs_partition_by(db: Session, client: TestClient) -> None:
client,
{
"project": projects[0],
"partition-by": mlrun.api.schemas.RunPartitionByField.name,
"partition-sort-by": mlrun.api.schemas.SortField.updated,
"partition-order": mlrun.api.schemas.OrderType.desc,
"partition-by": mlrun.api.schemas.RunPartitionByField.name.value,
"partition-sort-by": mlrun.api.schemas.SortField.updated.value,
"partition-order": mlrun.api.schemas.OrderType.desc.value,
"rows-per-partition": 5,
},
15,
Expand All @@ -302,9 +302,9 @@ def test_list_runs_partition_by(db: Session, client: TestClient) -> None:
client,
{
"project": projects[0],
"partition-by": mlrun.api.schemas.RunPartitionByField.name,
"partition-sort-by": mlrun.api.schemas.SortField.updated,
"partition-order": mlrun.api.schemas.OrderType.desc,
"partition-by": mlrun.api.schemas.RunPartitionByField.name.value,
"partition-sort-by": mlrun.api.schemas.SortField.updated.value,
"partition-order": mlrun.api.schemas.OrderType.desc.value,
"rows-per-partition": 5,
"max-partitions": 2,
},
Expand All @@ -320,9 +320,9 @@ def test_list_runs_partition_by(db: Session, client: TestClient) -> None:
{
"project": projects[0],
"iter": False,
"partition-by": mlrun.api.schemas.RunPartitionByField.name,
"partition-sort-by": mlrun.api.schemas.SortField.updated,
"partition-order": mlrun.api.schemas.OrderType.desc,
"partition-by": mlrun.api.schemas.RunPartitionByField.name.value,
"partition-sort-by": mlrun.api.schemas.SortField.updated.value,
"partition-order": mlrun.api.schemas.OrderType.desc.value,
"rows-per-partition": 2,
"max-partitions": 1,
},
Expand Down
16 changes: 12 additions & 4 deletions tests/api/api/test_runtime_resources.py
Expand Up @@ -83,7 +83,9 @@ def test_list_runtimes_resources_group_by_job(
)
response = client.get(
"projects/*/runtime-resources",
params={"group-by": mlrun.api.schemas.ListRuntimeResourcesGroupByField.job},
params={
"group-by": mlrun.api.schemas.ListRuntimeResourcesGroupByField.job.value
},
)
body = response.json()
expected_body = {
Expand Down Expand Up @@ -201,13 +203,17 @@ def test_list_runtime_resources_no_resources(
assert body == []
response = client.get(
"projects/*/runtime-resources",
params={"group-by": mlrun.api.schemas.ListRuntimeResourcesGroupByField.job},
params={
"group-by": mlrun.api.schemas.ListRuntimeResourcesGroupByField.job.value
},
)
body = response.json()
assert body == {}
response = client.get(
"projects/*/runtime-resources",
params={"group-by": mlrun.api.schemas.ListRuntimeResourcesGroupByField.project},
params={
"group-by": mlrun.api.schemas.ListRuntimeResourcesGroupByField.project.value
},
)
body = response.json()
assert body == {}
Expand Down Expand Up @@ -751,7 +757,9 @@ def _mock_opa_filter_and_assert_list_response(
)
response = client.get(
"projects/*/runtime-resources",
params={"group-by": mlrun.api.schemas.ListRuntimeResourcesGroupByField.project},
params={
"group-by": mlrun.api.schemas.ListRuntimeResourcesGroupByField.project.value
},
)
body = response.json()
expected_body = (
Expand Down
5 changes: 4 additions & 1 deletion tests/api/api/test_tags.py
Expand Up @@ -458,7 +458,10 @@ def _delete_artifact_tag(
],
project: str = None,
):
return client.delete(
# using client.request instead of client.delete because the latter doesn't support body
# https://www.python-httpx.org/compatibility/#request-body-on-http-methods
return client.request(
"DELETE",
API_TAGS_PATH.format(project=project or self.project, tag=tag),
json=self._generate_tag_identifiers_json(identifiers=identifiers),
)
Expand Down
10 changes: 1 addition & 9 deletions tests/api/conftest.py
Expand Up @@ -68,15 +68,7 @@ def set_base_url_for_test_client(
client: typing.Union[httpx.AsyncClient, TestClient],
prefix: str = BASE_VERSIONED_API_PREFIX,
):
if isinstance(client, httpx.AsyncClient):
client.base_url = client.base_url.join(prefix)
elif isinstance(client, TestClient):
client.base_url = client.base_url + prefix

# https://stackoverflow.com/questions/10893374/python-confusions-with-urljoin/10893427#10893427
client.base_url = client.base_url.rstrip("/") + "/"
else:
raise NotImplementedError(f"Unknown test client type: {type(client)}")
client.base_url = client.base_url.join(prefix)


@pytest.fixture()
Expand Down