本文整理匯總了Python中uvicorn.run方法的典型用法代碼示例。如果您正苦於以下問題:Python uvicorn.run方法的具體用法?Python uvicorn.run怎麽用?Python uvicorn.run使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類uvicorn
的用法示例。
在下文中一共展示了uvicorn.run方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: register_subcommand
# 需要導入模塊: import uvicorn [as 別名]
# 或者: from uvicorn import run [as 別名]
def register_subcommand(parser: ArgumentParser):
"""
Register this command to argparse so it's available for the transformer-cli
:param parser: Root parser to register command-specific arguments
:return:
"""
serve_parser = parser.add_parser(
"serve", help="CLI tool to run inference requests through REST and GraphQL endpoints."
)
serve_parser.add_argument(
"--task", type=str, choices=SUPPORTED_TASKS.keys(), help="The task to run the pipeline on"
)
serve_parser.add_argument("--host", type=str, default="localhost", help="Interface the server will listen on.")
serve_parser.add_argument("--port", type=int, default=8888, help="Port the serving will listen to.")
serve_parser.add_argument("--workers", type=int, default=1, help="Number of http workers")
serve_parser.add_argument("--model", type=str, help="Model's name or path to stored model.")
serve_parser.add_argument("--config", type=str, help="Model's config name or path to stored model.")
serve_parser.add_argument("--tokenizer", type=str, help="Tokenizer name to use.")
serve_parser.add_argument(
"--device",
type=int,
default=-1,
help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)",
)
serve_parser.set_defaults(func=serve_command_factory)
示例2: run
# 需要導入模塊: import uvicorn [as 別名]
# 或者: from uvicorn import run [as 別名]
def run(self):
# Note(simon): we have to use lower level uvicorn Config and Server
# class because we want to run the server as a coroutine. The only
# alternative is to call uvicorn.run which is blocking.
config = uvicorn.Config(
self.app,
host=self.host,
port=self.port,
lifespan="off",
access_log=False)
server = uvicorn.Server(config=config)
# TODO(edoakes): we need to override install_signal_handlers here
# because the existing implementation fails if it isn't running in
# the main thread and uvicorn doesn't expose a way to configure it.
server.install_signal_handlers = lambda: None
await server.serve()
示例3: __enter__
# 需要導入模塊: import uvicorn [as 別名]
# 或者: from uvicorn import run [as 別名]
def __enter__(self):
if pytest is not None and sys.version_info >= (3, 8):
pytest.skip("LiveServer fails on 3.8")
self._ready = Event()
self._process = Process(
target=uvicorn.run,
args=(self.app,),
kwargs={"callback_notify": self.callback_notify, **self.kwargs},
)
self._process.start()
if not self._ready.wait(self.ready_timeout): # pragma: no cover
raise TimeoutError(
f"Live server not ready after {self.ready_timeout} seconds"
)
return self
示例4: run_server
# 需要導入模塊: import uvicorn [as 別名]
# 或者: from uvicorn import run [as 別名]
def run_server(model_path, host, port):
model = LudwigModel.load(model_path)
app = server(model)
uvicorn.run(app, host=host, port=port)
示例5: cmdl
# 需要導入模塊: import uvicorn [as 別名]
# 或者: from uvicorn import run [as 別名]
def cmdl(ctx, host, port, reload, workers):
import uvicorn
uvicorn.run(
"a2ml.server.server:app",
host=host, port=port, log_level="info", reload=reload, workers=workers
)
示例6: run
# 需要導入模塊: import uvicorn [as 別名]
# 或者: from uvicorn import run [as 別名]
def run(self):
run(self._app, host=self.host, port=self.port, workers=self.workers)
示例7: __init__
# 需要導入模塊: import uvicorn [as 別名]
# 或者: from uvicorn import run [as 別名]
def __init__(self, host, port, instance_name=None):
serve.init(name=instance_name)
self.app = HTTPProxy()
await self.app.fetch_config_from_master()
self.host = host
self.port = port
# Start running the HTTP server on the event loop.
asyncio.get_event_loop().create_task(self.run())
示例8: server
# 需要導入模塊: import uvicorn [as 別名]
# 或者: from uvicorn import run [as 別名]
def server(server_host, server_port):
def start_server():
uvicorn.run(app, host=server_host, port=server_port, log_level='debug')
server_process = Process(target=start_server)
server_process.start()
sleep(0.5)
yield 1
sleep(1.2)
server_process.terminate()
示例9: server_two
# 需要導入模塊: import uvicorn [as 別名]
# 或者: from uvicorn import run [as 別名]
def server_two(server_host, server_port_two):
def start_server():
uvicorn.run(app_two, host=server_host, port=server_port_two, log_level='debug')
server_process = Process(target=start_server)
server_process.start()
sleep(0.5)
yield 1
sleep(1.2)
server_process.terminate()
示例10: analyze
# 需要導入模塊: import uvicorn [as 別名]
# 或者: from uvicorn import run [as 別名]
def analyze(request):
data = await request.form()
img_bytes = await (data['file'].read())
img = plt.imread(BytesIO(img_bytes))
img = np.array(img)
img = resize(img, (64, 64, 3))
img = img[None, ...]
inp = graph.get_tensor_by_name('input_1_1:0')
out = graph.get_tensor_by_name('fc2_1/Softmax:0')
with tf.Session(graph=graph) as sess:
pred = sess.run([out], feed_dict={inp: img})
return JSONResponse({'result': str(classes[np.argmax(pred[0][0])])})
示例11: start_ms_bf_server
# 需要導入模塊: import uvicorn [as 別名]
# 或者: from uvicorn import run [as 別名]
def start_ms_bf_server(model_config: Path,
app_id: Optional[str],
app_secret: Optional[str],
port: Optional[int] = None,
https: Optional[bool] = None,
ssl_key: Optional[str] = None,
ssl_cert: Optional[str] = None) -> None:
server_params = get_server_params(model_config)
host = server_params['host']
port = port or server_params['port']
ssl_config = get_ssl_params(server_params, https, ssl_key=ssl_key, ssl_cert=ssl_cert)
input_q = Queue()
bot = MSBot(model_config, input_q, app_id, app_secret)
bot.start()
endpoint = '/v3/conversations'
redirect_root_to_docs(app, 'answer', endpoint, 'post')
@app.post(endpoint)
async def answer(activity: dict) -> dict:
bot.input_queue.put(activity)
return {}
uvicorn.run(app, host=host, port=port, log_config=log_config, ssl_version=ssl_config.version,
ssl_keyfile=ssl_config.keyfile, ssl_certfile=ssl_config.certfile)
bot.join()
示例12: start_alice_server
# 需要導入模塊: import uvicorn [as 別名]
# 或者: from uvicorn import run [as 別名]
def start_alice_server(model_config: Union[str, Path],
host: Optional[str] = None,
port: Optional[int] = None,
endpoint: Optional[str] = None,
https: Optional[bool] = None,
ssl_key: Optional[str] = None,
ssl_cert: Optional[str] = None) -> None:
server_params = get_server_params(model_config)
host = host or server_params['host']
port = port or server_params['port']
endpoint = endpoint or server_params['model_endpoint']
ssl_config = get_ssl_params(server_params, https, ssl_key=ssl_key, ssl_cert=ssl_cert)
input_q = Queue()
output_q = Queue()
bot = AliceBot(model_config, input_q, output_q)
bot.start()
redirect_root_to_docs(app, 'answer', endpoint, 'post')
@app.post(endpoint, summary='A model endpoint', response_description='A model response')
async def answer(data: dict = data_body) -> dict:
loop = asyncio.get_event_loop()
bot.input_queue.put(data)
response: dict = await loop.run_in_executor(None, bot.output_queue.get)
return response
uvicorn.run(app, host=host, port=port, log_config=log_config, ssl_version=ssl_config.version,
ssl_keyfile=ssl_config.keyfile, ssl_certfile=ssl_config.certfile)
bot.join()
示例13: main
# 需要導入模塊: import uvicorn [as 別名]
# 或者: from uvicorn import run [as 別名]
def main(argv: List[str]) -> Optional[Union[int, str]]:
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--cache", action="store_true",
help="use local repo cache")
parser.add_argument("-p", "--port", type=int, default=8160,
help="port number")
args = parser.parse_args()
appconfig.CACHE_LOCAL = args.cache
uvicorn.run(app, host="127.0.0.1", port=args.port)
return None
示例14: start_model_server
# 需要導入模塊: import uvicorn [as 別名]
# 或者: from uvicorn import run [as 別名]
def start_model_server(model_config: Path,
https: Optional[bool] = None,
ssl_key: Optional[str] = None,
ssl_cert: Optional[str] = None,
port: Optional[int] = None) -> None:
server_params = get_server_params(model_config)
host = server_params['host']
port = port or server_params['port']
model_endpoint = server_params['model_endpoint']
model_args_names = server_params['model_args_names']
ssl_config = get_ssl_params(server_params, https, ssl_key=ssl_key, ssl_cert=ssl_cert)
model = build_model(model_config)
def batch_decorator(cls: ModelMetaclass) -> ModelMetaclass:
cls.__annotations__ = {arg_name: list for arg_name in model_args_names}
cls.__fields__ = {arg_name: ModelField(name=arg_name, type_=list, class_validators=None,
model_config=BaseConfig, required=False, field_info=Field(None))
for arg_name in model_args_names}
return cls
@batch_decorator
class Batch(BaseModel):
pass
redirect_root_to_docs(app, 'answer', model_endpoint, 'post')
model_endpoint_post_example = {arg_name: ['string'] for arg_name in model_args_names}
@app.post(model_endpoint, summary='A model endpoint')
async def answer(item: Batch = Body(..., example=model_endpoint_post_example)) -> List:
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, interact, model, item.dict())
@app.post('/probe', include_in_schema=False)
async def probe(item: Batch) -> List[str]:
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, test_interact, model, item.dict())
@app.get('/api', summary='Model argument names')
async def api() -> List[str]:
return model_args_names
uvicorn.run(app, host=host, port=port, log_config=log_config, ssl_version=ssl_config.version,
ssl_keyfile=ssl_config.keyfile, ssl_certfile=ssl_config.certfile, timeout_keep_alive=20)
示例15: start_alexa_server
# 需要導入模塊: import uvicorn [as 別名]
# 或者: from uvicorn import run [as 別名]
def start_alexa_server(model_config: Union[str, Path, dict],
port: Optional[int] = None,
https: Optional[bool] = None,
ssl_key: Optional[str] = None,
ssl_cert: Optional[str] = None) -> None:
"""Initiates FastAPI web service with Alexa skill.
Allows raise Alexa web service with DeepPavlov config in backend.
Args:
model_config: DeepPavlov config path.
port: FastAPI web service port.
https: Flag for running Alexa skill service in https mode.
ssl_key: SSL key file path.
ssl_cert: SSL certificate file path.
"""
server_params = get_server_params(model_config)
host = server_params['host']
port = port or server_params['port']
ssl_config = get_ssl_params(server_params, https, ssl_key=ssl_key, ssl_cert=ssl_cert)
input_q = Queue()
output_q = Queue()
bot = AlexaBot(model_config, input_q, output_q)
bot.start()
endpoint = '/interact'
redirect_root_to_docs(app, 'interact', endpoint, 'post')
@app.post(endpoint, summary='Amazon Alexa custom service endpoint', response_description='A model response')
async def interact(data: dict = data_body,
signature: str = signature_header,
signature_chain_url: str = cert_chain_url_header) -> JSONResponse:
# It is necessary for correct data validation to serialize data to a JSON formatted string with separators.
request_dict = {
'request_body': json.dumps(data, separators=(',', ':')).encode('utf-8'),
'signature_chain_url': signature_chain_url,
'signature': signature,
'alexa_request': data
}
bot.input_queue.put(request_dict)
loop = asyncio.get_event_loop()
response: dict = await loop.run_in_executor(None, bot.output_queue.get)
response_code = 400 if 'error' in response.keys() else 200
return JSONResponse(response, status_code=response_code)
uvicorn.run(app, host=host, port=port, log_config=log_config, ssl_version=ssl_config.version,
ssl_keyfile=ssl_config.keyfile, ssl_certfile=ssl_config.certfile)
bot.join()