Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

hotfix import torch #15849

Merged
merged 13 commits into from Nov 28, 2022
11 changes: 9 additions & 2 deletions src/lightning_app/components/serve/python_server.py
Expand Up @@ -4,7 +4,6 @@
from pathlib import Path
from typing import Any, Dict, Optional

import torch
import uvicorn
from fastapi import FastAPI
from pydantic import BaseModel
Expand All @@ -13,10 +12,15 @@
from lightning_app.core.queues import MultiProcessQueue
from lightning_app.core.work import LightningWork
from lightning_app.utilities.app_helpers import Logger
from lightning_app.utilities.imports import _is_torch_available, requires
from lightning_app.utilities.proxies import _proxy_setattr, unwrap, WorkRunExecutor, WorkStateObserver

logger = Logger(__name__)

# Skip doctests if requirements aren't available
if _is_torch_available():
__doctest_skip__ = ["PythonServer", "PythonServer.*"]


class _PyTorchSpawnRunExecutor(WorkRunExecutor):

Expand Down Expand Up @@ -86,6 +90,7 @@ def _get_sample_data() -> Dict[Any, Any]:


class PythonServer(LightningWork, abc.ABC):
@requires("torch")
def __init__( # type: ignore
self,
host: str = "127.0.0.1",
Expand Down Expand Up @@ -199,11 +204,13 @@ def _get_sample_dict_from_datatype(datatype: Any) -> dict:
return out

def _attach_predict_fn(self, fastapi_app: FastAPI) -> None:
from torch import inference_mode

input_type: type = self.configure_input_type()
output_type: type = self.configure_output_type()

def predict_fn(request: input_type): # type: ignore
with torch.inference_mode():
with inference_mode():
return self.predict(request)

fastapi_app.post("/predict", response_model=output_type)(predict_fn)
Expand Down