Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We鈥檒l occasionally send you account related emails.

Already on GitHub? Sign in to your account

Torch inference mode for prediction #15719

Merged
merged 2 commits into from Nov 19, 2022
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
8 changes: 5 additions & 3 deletions src/lightning_app/components/serve/python_server.py
Expand Up @@ -3,6 +3,7 @@
from pathlib import Path
from typing import Any, Dict, Optional

import torch
import uvicorn
from fastapi import FastAPI
from pydantic import BaseModel
Expand Down Expand Up @@ -105,7 +106,7 @@ def predict(self, request):
self._input_type = input_type
self._output_type = output_type

def setup(self) -> None:
def setup(self, *args, **kwargs) -> None:
hhsecond marked this conversation as resolved.
Show resolved Hide resolved
"""This method is called before the server starts. Override this if you need to download the model or
initialize the weights, setting up pipelines etc.

Expand Down Expand Up @@ -154,7 +155,8 @@ def _attach_predict_fn(self, fastapi_app: FastAPI) -> None:
output_type: type = self.configure_output_type()

def predict_fn(request: input_type): # type: ignore
return self.predict(request)
with torch.inference_mode():
return self.predict(request)

fastapi_app.post("/predict", response_model=output_type)(predict_fn)

Expand Down Expand Up @@ -207,7 +209,7 @@ def run(self, *args: Any, **kwargs: Any) -> Any:

Normally, you don't need to override this method.
"""
self.setup()
self.setup(*args, **kwargs)

fastapi_app = FastAPI()
self._attach_predict_fn(fastapi_app)
Expand Down