本文整理匯總了Python中starlette.responses.JSONResponse方法的典型用法代碼示例。如果您正苦於以下問題:Python responses.JSONResponse方法的具體用法?Python responses.JSONResponse怎麽用?Python responses.JSONResponse使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類starlette.responses
的用法示例。
在下文中一共展示了responses.JSONResponse方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: register_route
# 需要導入模塊: from starlette import responses [as 別名]
# 或者: from starlette.responses import JSONResponse [as 別名]
def register_route(self, app):
self.app = app
from starlette.responses import JSONResponse, HTMLResponse
self.app.add_route(
self.config.spec_url,
lambda request: JSONResponse(self.spectree.spec),
)
for ui in PAGES:
self.app.add_route(
f'/{self.config.PATH}/{ui}',
lambda request, ui=ui: HTMLResponse(
PAGES[ui].format(self.config.spec_url)
),
)
示例2: graphql_http_server
# 需要導入模塊: from starlette import responses [as 別名]
# 或者: from starlette.responses import JSONResponse [as 別名]
def graphql_http_server(self, request: Request) -> Response:
try:
data = await self.extract_data_from_request(request)
except HttpError as error:
return PlainTextResponse(error.message or error.status, status_code=400)
context_value = await self.get_context_for_request(request)
extensions = await self.get_extensions_for_request(request, context_value)
middleware = await self.get_middleware_for_request(request, context_value)
success, response = await graphql(
self.schema,
data,
context_value=context_value,
root_value=self.root_value,
validation_rules=self.validation_rules,
debug=self.debug,
introspection=self.introspection,
logger=self.logger,
error_formatter=self.error_formatter,
extensions=extensions,
middleware=middleware,
)
status_code = 200 if success else 400
return JSONResponse(response, status_code=status_code)
示例3: simulator
# 需要導入模塊: from starlette import responses [as 別名]
# 或者: from starlette.responses import JSONResponse [as 別名]
def simulator(request: requests.Request):
token = request.headers.get("Authorization")
if token:
token = token[6:] # Drop 'token '
data = SimulatorSchema(await request.json())
if data["pull_request"]:
loop = asyncio.get_running_loop()
title, summary = await loop.run_in_executor(
None,
functools.partial(
_sync_simulator,
data["mergify.yml"]["pull_request_rules"],
*data["pull_request"],
token=token,
),
)
else:
title, summary = ("The configuration is valid", None)
return responses.JSONResponse(
status_code=200, content={"title": title, "summary": summary}
)
示例4: post
# 需要導入模塊: from starlette import responses [as 別名]
# 或者: from starlette.responses import JSONResponse [as 別名]
def post(self, request: Request) -> Response:
content_type = request.headers.get("Content-Type", "")
if "application/json" in content_type:
try:
data = await request.json()
except json.JSONDecodeError:
return JSONResponse({"error": "Invalid JSON."}, 400)
elif "application/graphql" in content_type:
body = await request.body()
data = {"query": body.decode()}
elif "query" in request.query_params:
data = request.query_params
else:
return PlainTextResponse("Unsupported Media Type", 415)
return await self._get_response(request, data=data)
示例5: _get_response
# 需要導入模塊: from starlette import responses [as 別名]
# 或者: from starlette.responses import JSONResponse [as 別名]
def _get_response(self, request: Request, data: QueryParams) -> Response:
try:
query = data["query"]
except KeyError:
return PlainTextResponse("No GraphQL query found in the request", 400)
config = get_graphql_config(request)
background = BackgroundTasks()
context = {"req": request, "background": background, **config.context}
engine: Engine = config.engine
result: dict = await engine.execute(
query,
context=context,
variables=data.get("variables"),
operation_name=data.get("operationName"),
)
content = {"data": result["data"]}
has_errors = "errors" in result
if has_errors:
content["errors"] = format_errors(result["errors"])
status = 400 if has_errors else 200
return JSONResponse(content=content, status_code=status, background=background)
示例6: analyze
# 需要導入模塊: from starlette import responses [as 別名]
# 或者: from starlette.responses import JSONResponse [as 別名]
def analyze(request):
data = await request.body()
instances = json.loads(data.decode('utf-8'))['instances']
# convert from image bytes to images to tensors
img_bytes = [b64decode(inst['image_bytes']['b64']) for inst in instances]
tensors = [pil2tensor(Image.open(BytesIO(byts)), dtype=np.float32).div_(255) for byts in img_bytes]
tfm_tensors = [learner.data.valid_dl.tfms[0]((tensor, torch.zeros(0)))[0] for tensor in tensors]
# batch predict, dummy labels for the second argument
dummy_labels = torch.zeros(len(tfm_tensors))
tensor_stack = torch.stack(tfm_tensors)
if torch.cuda.is_available():
tensor_stack = tensor_stack.cuda()
pred_tensor = learner.pred_batch(batch=(tensor_stack, dummy_labels))
# find the maximum value along the prediction axis
classes = np.argmax(np.array(pred_tensor), axis=1)
return JSONResponse(dict(predictions=classes.tolist()))
示例7: index
# 需要導入模塊: from starlette import responses [as 別名]
# 或者: from starlette.responses import JSONResponse [as 別名]
def index(request: Request) -> JSONResponse:
with incoming_trace("fetch-counter", request, tracer) as span:
with outgoing_trace("next-count", request, tracer, span) as out:
outgoing_span, ougoing_headers = out
counter = await requests.get(
'http://service2:8000/', headers=ougoing_headers, timeout=1)
data = counter.json()
count = data['last']
app.counter_gauge.set({"path": request.url.path}, count)
return JSONResponse({
'svc': 'service1',
'version': '2',
'timestamp': time.time(),
'count': count
})
示例8: index
# 需要導入模塊: from starlette import responses [as 別名]
# 或者: from starlette.responses import JSONResponse [as 別名]
def index(request: Request) -> JSONResponse:
with incoming_trace("fetch-counter", request, tracer) as span:
with outgoing_trace("next-count", request, tracer, span) as out:
outgoing_span, ougoing_headers = out
counter_breaker = CircuitBreaker(
fail_max=3, timeout_duration=timedelta(seconds=30))
counter = await counter_breaker.call_async(
requests.get, 'http://service2:8000/', timeout=1,
headers=ougoing_headers)
data = counter.json()
count = data['last']
app.counter_gauge.set({"path": request.url.path}, count)
return JSONResponse({
'svc': 'service1',
'version': '3',
'timestamp': time.time(),
'count': count
})
示例9: predict
# 需要導入模塊: from starlette import responses [as 別名]
# 或者: from starlette.responses import JSONResponse [as 別名]
def predict(request):
"""
async api
descriptions about this function
"""
print(request.path_params)
print(request.context)
return JSONResponse({'label': 5, 'score': 0.5})
示例10: get
# 需要導入模塊: from starlette import responses [as 別名]
# 或者: from starlette.responses import JSONResponse [as 別名]
def get(self, request):
"""
health check
"""
return JSONResponse({'msg': 'pong'})
示例11: get
# 需要導入模塊: from starlette import responses [as 別名]
# 或者: from starlette.responses import JSONResponse [as 別名]
def get(self, request):
"""summary
description"""
return JSONResponse({'msg': 'pong'})
示例12: user_score
# 需要導入模塊: from starlette import responses [as 別名]
# 或者: from starlette.responses import JSONResponse [as 別名]
def user_score(request):
score = [randint(0, request.context.json.limit) for _ in range(5)]
score.sort(reverse=request.context.query.order)
assert request.context.cookies.pub == 'abcdefg'
assert request.cookies['pub'] == 'abcdefg'
return JSONResponse({
'name': request.context.json.name,
'score': score
})
示例13: server
# 需要導入模塊: from starlette import responses [as 別名]
# 或者: from starlette.responses import JSONResponse [as 別名]
def server(model):
app = FastAPI()
input_features = {
f['name'] for f in model.model_definition['input_features']
}
@app.get('/')
def check_health():
return JSONResponse({"message": "Ludwig server is up"})
@app.post('/predict')
async def predict(request: Request):
form = await request.form()
files, entry = convert_input(form)
try:
if (entry.keys() & input_features) != input_features:
return JSONResponse(ALL_FEATURES_PRESENT_ERROR,
status_code=400)
try:
resp = model.predict(data_dict=[entry]).to_dict('records')[0]
return JSONResponse(resp)
except Exception as e:
logger.error("Error: {}".format(str(e)))
return JSONResponse(COULD_NOT_RUN_INFERENCE_ERROR,
status_code=500)
finally:
for f in files:
os.remove(f.name)
return app
示例14: http_exception_handler
# 需要導入模塊: from starlette import responses [as 別名]
# 或者: from starlette.responses import JSONResponse [as 別名]
def http_exception_handler(request: Request, exc: HTTPException) -> JSONResponse:
headers = getattr(exc, "headers", None)
if headers:
return JSONResponse(
{"detail": exc.detail}, status_code=exc.status_code, headers=headers
)
else:
return JSONResponse({"detail": exc.detail}, status_code=exc.status_code)
示例15: request_validation_exception_handler
# 需要導入模塊: from starlette import responses [as 別名]
# 或者: from starlette.responses import JSONResponse [as 別名]
def request_validation_exception_handler(
request: Request, exc: RequestValidationError
) -> JSONResponse:
return JSONResponse(
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
content={"detail": jsonable_encoder(exc.errors())},
)