forked from bentoml/BentoML
/
__init__.py
184 lines (169 loc) · 5.79 KB
/
__init__.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
# pylint: skip-file
"""
BentoML
=======
BentoML is the unified ML Model Serving framework. Data Scientists and ML Engineers use
BentoML to:
* Accelerate and standardize the process of taking ML models to production across teams
* Build reliable, scalable, and high performance model serving systems
* Provide a flexible MLOps platform that grows with your Data Science needs
To learn more, visit BentoML documentation at: http://docs.bentoml.org
To get involved with the development, find us on GitHub: https://github.com/bentoml
And join us in the BentoML slack community: https://l.linklyhq.com/l/ktOh
"""
from typing import TYPE_CHECKING
from ._internal.configuration import load_config
from ._internal.configuration import save_config
from ._internal.configuration import BENTOML_VERSION as __version__
# Inject dependencies and configurations
load_config()
# Bento management APIs
from .bentos import get
from .bentos import list # pylint: disable=W0622
from .bentos import pull
from .bentos import push
from .bentos import delete
from .bentos import export_bento
from .bentos import import_bento
# BentoML built-in types
from ._internal.tag import Tag
from ._internal.bento import Bento
from ._internal.models import Model
from ._internal.runner import Runner
from ._internal.runner import Runnable
from ._internal.context import InferenceApiContext as Context
from ._internal.service import Service
from ._internal.utils.http import Cookie
from ._internal.yatai_client import YataiClient
from ._internal.monitoring.api import monitor
from ._internal.service.loader import load
# Framework specific modules, model management and IO APIs are lazily loaded upon import.
if TYPE_CHECKING:
from . import h2o
from . import flax
from . import onnx
from . import gluon
from . import keras
from . import spacy
from . import fastai
from . import mlflow
from . import paddle
from . import easyocr
from . import pycaret
from . import pytorch
from . import sklearn
from . import xgboost
from . import catboost
from . import lightgbm
from . import onnxmlir
from . import detectron
from . import tensorflow
from . import statsmodels
from . import torchscript
from . import transformers
from . import tensorflow_v1
from . import picklable_model
from . import pytorch_lightning
# isort: off
from . import io
from . import models
from . import metrics # Prometheus metrics client
from . import container # Container API
from . import client # Client API
# isort: on
else:
from ._internal.utils import LazyLoader as _LazyLoader
catboost = _LazyLoader("bentoml.catboost", globals(), "bentoml.catboost")
detectron = _LazyLoader("bentoml.detectron", globals(), "bentoml.detectron")
easyocr = _LazyLoader("bentoml.easyocr", globals(), "bentoml.easyocr")
flax = _LazyLoader("bentoml.flax", globals(), "bentoml.flax")
fastai = _LazyLoader("bentoml.fastai", globals(), "bentoml.fastai")
gluon = _LazyLoader("bentoml.gluon", globals(), "bentoml.gluon")
h2o = _LazyLoader("bentoml.h2o", globals(), "bentoml.h2o")
lightgbm = _LazyLoader("bentoml.lightgbm", globals(), "bentoml.lightgbm")
mlflow = _LazyLoader("bentoml.mlflow", globals(), "bentoml.mlflow")
onnx = _LazyLoader("bentoml.onnx", globals(), "bentoml.onnx")
onnxmlir = _LazyLoader("bentoml.onnxmlir", globals(), "bentoml.onnxmlir")
keras = _LazyLoader("bentoml.keras", globals(), "bentoml.keras")
paddle = _LazyLoader("bentoml.paddle", globals(), "bentoml.paddle")
pycaret = _LazyLoader("bentoml.pycaret", globals(), "bentoml.pycaret")
pytorch = _LazyLoader("bentoml.pytorch", globals(), "bentoml.pytorch")
pytorch_lightning = _LazyLoader(
"bentoml.pytorch_lightning", globals(), "bentoml.pytorch_lightning"
)
sklearn = _LazyLoader("bentoml.sklearn", globals(), "bentoml.sklearn")
picklable_model = _LazyLoader(
"bentoml.picklable_model", globals(), "bentoml.picklable_model"
)
spacy = _LazyLoader("bentoml.spacy", globals(), "bentoml.spacy")
statsmodels = _LazyLoader("bentoml.statsmodels", globals(), "bentoml.statsmodels")
tensorflow = _LazyLoader("bentoml.tensorflow", globals(), "bentoml.tensorflow")
tensorflow_v1 = _LazyLoader(
"bentoml.tensorflow_v1", globals(), "bentoml.tensorflow_v1"
)
torchscript = _LazyLoader("bentoml.torchscript", globals(), "bentoml.torchscript")
transformers = _LazyLoader(
"bentoml.transformers", globals(), "bentoml.transformers"
)
xgboost = _LazyLoader("bentoml.xgboost", globals(), "bentoml.xgboost")
io = _LazyLoader("bentoml.io", globals(), "bentoml.io")
models = _LazyLoader("bentoml.models", globals(), "bentoml.models")
metrics = _LazyLoader("bentoml.metrics", globals(), "bentoml.metrics")
container = _LazyLoader("bentoml.container", globals(), "bentoml.container")
client = _LazyLoader("bentoml.client", globals(), "bentoml.client")
del _LazyLoader
__all__ = [
"__version__",
"Context",
"Cookie",
"Service",
"models",
"metrics",
"container",
"client",
"io",
"Tag",
"Model",
"Runner",
"Runnable",
"YataiClient", # Yatai REST API Client
# bento APIs
"list",
"get",
"delete",
"import_bento",
"export_bento",
"load",
"push",
"pull",
"Bento",
# Framework specific modules
"catboost",
"detectron",
"easyocr",
"flax",
"fastai",
"gluon",
"h2o",
"lightgbm",
"mlflow",
"onnx",
"onnxmlir",
"paddle",
"picklable_model",
"pycaret",
"pytorch",
"pytorch_lightning",
"keras",
"sklearn",
"spacy",
"statsmodels",
"tensorflow",
"tensorflow_v1",
"torchscript",
"transformers",
"xgboost",
"monitor",
"load_config",
"save_config",
]