本文整理汇总了Python中tensorflow_serving.apis.predict_pb2.PredictRequest方法的典型用法代码示例。如果您正苦于以下问题:Python predict_pb2.PredictRequest方法的具体用法?Python predict_pb2.PredictRequest怎么用?Python predict_pb2.PredictRequest使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow_serving.apis.predict_pb2
的用法示例。
在下文中一共展示了predict_pb2.PredictRequest方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_grpc_request_fn
# 需要导入模块: from tensorflow_serving.apis import predict_pb2 [as 别名]
# 或者: from tensorflow_serving.apis.predict_pb2 import PredictRequest [as 别名]
def make_grpc_request_fn(servable_name, server, timeout_secs):
"""Wraps function to make grpc requests with runtime args."""
stub = _create_stub(server)
def _make_grpc_request(examples):
"""Builds and sends request to TensorFlow model server."""
request = predict_pb2.PredictRequest()
request.model_spec.name = servable_name
request.inputs["input"].CopyFrom(
tf.contrib.util.make_tensor_proto(
[ex.SerializeToString() for ex in examples], shape=[len(examples)]))
response = stub.Predict(request, timeout_secs)
outputs = tf.make_ndarray(response.outputs["outputs"])
scores = tf.make_ndarray(response.outputs["scores"])
assert len(outputs) == len(scores)
return [{
"outputs": outputs[i],
"scores": scores[i]
} for i in range(len(outputs))]
return _make_grpc_request
示例2: make_grpc_request_fn
# 需要导入模块: from tensorflow_serving.apis import predict_pb2 [as 别名]
# 或者: from tensorflow_serving.apis.predict_pb2 import PredictRequest [as 别名]
def make_grpc_request_fn(servable_name, server, timeout_secs):
"""Wraps function to make grpc requests with runtime args."""
stub = _create_stub(server)
def _make_grpc_request(examples):
"""Builds and sends request to TensorFlow model server."""
request = predict_pb2.PredictRequest()
request.model_spec.name = servable_name
request.inputs["input"].CopyFrom(
tf.make_tensor_proto(
[ex.SerializeToString() for ex in examples], shape=[len(examples)]))
response = stub.Predict(request, timeout_secs)
outputs = tf.make_ndarray(response.outputs["outputs"])
scores = tf.make_ndarray(response.outputs["scores"])
assert len(outputs) == len(scores)
return [{ # pylint: disable=g-complex-comprehension
"outputs": output,
"scores": score
} for output, score in zip(outputs, scores)]
return _make_grpc_request
示例3: send_request
# 需要导入模块: from tensorflow_serving.apis import predict_pb2 [as 别名]
# 或者: from tensorflow_serving.apis.predict_pb2 import PredictRequest [as 别名]
def send_request(stub, model_name, batch_tokens, timeout=5.0):
"""Sends a translation request.
Args:
stub: The prediction service stub.
model_name: The model to request.
tokens: A list of tokens.
timeout: Timeout after this many seconds.
Returns:
A future.
"""
batch_tokens, lengths, max_length = pad_batch(batch_tokens)
batch_size = len(lengths)
request = predict_pb2.PredictRequest()
request.model_spec.name = model_name
request.inputs["tokens"].CopyFrom(tf.make_tensor_proto(
batch_tokens, dtype=tf.string, shape=(batch_size, max_length)))
request.inputs["length"].CopyFrom(tf.make_tensor_proto(
lengths, dtype=tf.int32, shape=(batch_size,)))
return stub.Predict.future(request, timeout)
示例4: get_image_quality_predictions
# 需要导入模块: from tensorflow_serving.apis import predict_pb2 [as 别名]
# 或者: from tensorflow_serving.apis.predict_pb2 import PredictRequest [as 别名]
def get_image_quality_predictions(image_path, model_name):
# Load and preprocess image
image = utils.load_image(image_path, target_size=(224, 224))
image = keras.applications.mobilenet.preprocess_input(image)
# Run through model
target = f'{TFS_HOST}:{TFS_PORT}'
channel = grpc.insecure_channel(target)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = model_name
request.model_spec.signature_name = 'image_quality'
request.inputs['input_image'].CopyFrom(
tf.contrib.util.make_tensor_proto(np.expand_dims(image, 0))
)
response = stub.Predict(request, 10.0)
result = round(calc_mean_score(response.outputs['quality_prediction'].float_val), 2)
print(json.dumps({'mean_score_prediction': np.round(result, 3)}, indent=2))
示例5: prepare_stub_and_request
# 需要导入模块: from tensorflow_serving.apis import predict_pb2 [as 别名]
# 或者: from tensorflow_serving.apis.predict_pb2 import PredictRequest [as 别名]
def prepare_stub_and_request(address, model_name, model_version=None, creds=None, opts=None,
request_type=INFERENCE_REQUEST):
if opts is not None:
opts = (('grpc.ssl_target_name_override', opts),)
if creds is not None:
channel = grpc.secure_channel(address, creds, options=opts)
else:
channel = grpc.insecure_channel(address, options=opts)
request = None
stub = None
if request_type == MODEL_STATUS_REQUEST:
request = get_model_status_pb2.GetModelStatusRequest()
stub = model_service_pb2_grpc.ModelServiceStub(channel)
elif request_type == INFERENCE_REQUEST:
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = model_name
if model_version is not None:
request.model_spec.version.value = model_version
return stub, request
示例6: testBuildRequests_EstimatorModel_Predict
# 需要导入模块: from tensorflow_serving.apis import predict_pb2 [as 别名]
# 或者: from tensorflow_serving.apis.predict_pb2 import PredictRequest [as 别名]
def testBuildRequests_EstimatorModel_Predict(self):
builder = request_builder._TFServingRpcRequestBuilder(
model_name='foo',
signatures=self._GetEstimatorModelSignature(
signature_names=['predict']))
builder.ReadExamplesArtifact(self._examples, num_examples=1)
result = builder.BuildRequests()
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], predict_pb2.PredictRequest)
self.assertEqual(result[0].model_spec.name, 'foo')
self.assertEqual(result[0].model_spec.signature_name, 'predict')
self.assertEqual(len(result[0].inputs), 1)
input_key = list(result[0].inputs.keys())[0]
self.assertEqual(result[0].inputs[input_key].dtype,
tf.dtypes.string.as_datatype_enum)
示例7: main
# 需要导入模块: from tensorflow_serving.apis import predict_pb2 [as 别名]
# 或者: from tensorflow_serving.apis.predict_pb2 import PredictRequest [as 别名]
def main():
# Generate inference data
keys = numpy.asarray([1, 2, 3, 4])
keys_tensor_proto = tf.contrib.util.make_tensor_proto(keys, dtype=tf.int32)
features = numpy.asarray(
[[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 1, 1, 1, 1, 1, 1, 1, 1],
[9, 8, 7, 6, 5, 4, 3, 2, 1], [9, 9, 9, 9, 9, 9, 9, 9, 9]])
features_tensor_proto = tf.contrib.util.make_tensor_proto(
features, dtype=tf.float32)
# Create gRPC client
channel = implementations.insecure_channel(FLAGS.host, FLAGS.port)
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = FLAGS.model_name
if FLAGS.model_version > 0:
request.model_spec.version.value = FLAGS.model_version
if FLAGS.signature_name != "":
request.model_spec.signature_name = FLAGS.signature_name
request.inputs["keys"].CopyFrom(keys_tensor_proto)
request.inputs["features"].CopyFrom(features_tensor_proto)
# Send request
result = stub.Predict(request, FLAGS.request_timeout)
print(result)
示例8: _do_local_inference
# 需要导入模块: from tensorflow_serving.apis import predict_pb2 [as 别名]
# 或者: from tensorflow_serving.apis.predict_pb2 import PredictRequest [as 别名]
def _do_local_inference(host, port, serialized_examples, model_name):
"""Performs inference on a model hosted by the host:port server."""
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
request = predict_pb2.PredictRequest()
# request.model_spec.name = 'chicago_taxi'
request.model_spec.name = model_name
request.model_spec.signature_name = 'predict'
tfproto = tf.contrib.util.make_tensor_proto([serialized_examples],
shape=[len(serialized_examples)],
dtype=tf.string)
# The name of the input tensor is 'examples' based on
# https://github.com/tensorflow/tensorflow/blob/r1.9/tensorflow/python/estimator/export/export.py#L290
request.inputs['examples'].CopyFrom(tfproto)
print(stub.Predict(request, _LOCAL_INFERENCE_TIMEOUT_SECONDS))
示例9: grpc_predict_raw
# 需要导入模块: from tensorflow_serving.apis import predict_pb2 [as 别名]
# 或者: from tensorflow_serving.apis.predict_pb2 import PredictRequest [as 别名]
def grpc_predict_raw(data):
port = 8500
channel = grpc.insecure_channel('{host}:{port}'.format(host=host, port=port))
# channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = 'textcnn_model'
request.model_spec.signature_name = "serving_default"
tensor_protos = {
# 一条一条的请求方式
'sentence':tf.make_tensor_proto(data['sentence'], dtype=tf.int64, shape=[1, 55])
}
for k in tensor_protos:
request.inputs[k].CopyFrom(tensor_protos[k])
response = stub.Predict(request, 5.0)
print(response)
示例10: gRPCPredict
# 需要导入模块: from tensorflow_serving.apis import predict_pb2 [as 别名]
# 或者: from tensorflow_serving.apis.predict_pb2 import PredictRequest [as 别名]
def gRPCPredict(request: model.Request):
start = datetime.datetime.now()
stub = prediction_service_pb2_grpc.PredictionServiceStub(
grpc.insecure_channel(f"{SERVING_HOST}:{SERVING_GRPC_PORT}")
)
predictRequest = predict_pb2.PredictRequest()
predictRequest.model_spec.name = model_name
predictRequest.inputs['x'].CopyFrom(
make_tensor_proto(
request.instances,
shape = [len(request.instances), 1]
)
)
predictResult = stub.Predict(predictRequest, PREDICT_TIMEOUT)
return {
'predictions': list(predictResult.outputs['y'].float_val),
'meta': {
'model_name': model_name,
'duration': util.millis_interval(start,datetime.datetime.now()),
'timestamp': datetime.datetime.now().timestamp(),
'jetson_model': jetson_model
}
}
示例11: prepare_grpc_request
# 需要导入模块: from tensorflow_serving.apis import predict_pb2 [as 别名]
# 或者: from tensorflow_serving.apis.predict_pb2 import PredictRequest [as 别名]
def prepare_grpc_request(model_name, signature_name, input_dict):
request = predict_pb2.PredictRequest()
request.model_spec.name = model_name
request.model_spec.signature_name = signature_name
for key in input_dict:
request.inputs[key].CopyFrom(
make_tensor_proto([input_dict[key]]))
return request
示例12: do_inference
# 需要导入模块: from tensorflow_serving.apis import predict_pb2 [as 别名]
# 或者: from tensorflow_serving.apis.predict_pb2 import PredictRequest [as 别名]
def do_inference(num_tests, concurrency=1):
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
coord = _Coordinator(num_tests, concurrency)
for _ in range(num_tests):
# dummy audio
duration, sr, n_fft, win_length, hop_length, n_mels, max_db, min_db = 4, 16000, 512, 512, 128, 80, 35, -55
filename = librosa.util.example_audio_file()
wav = read_wav(filename, sr=sr, duration=duration)
mel = wav2melspec_db(wav, sr, n_fft, win_length, hop_length, n_mels)
mel = normalize_db(mel, max_db=max_db, min_db=min_db)
mel = mel.astype(np.float32)
mel = np.expand_dims(mel, axis=0) # single batch
n_timesteps = sr / hop_length * duration + 1
# build request
request = predict_pb2.PredictRequest()
request.model_spec.name = 'voice_vector'
request.model_spec.signature_name = 'predict'
request.inputs['x'].CopyFrom(tf.contrib.util.make_tensor_proto(mel, shape=[1, n_timesteps, n_mels]))
coord.throttle()
# send asynchronous response (recommended. use this.)
result_future = stub.Predict.future(request, 10.0) # timeout
result_future.add_done_callback(_create_rpc_callback(coord))
# send synchronous response (NOT recommended)
# result = stub.Predict(request, 5.0)
coord.wait_all_done()
示例13: generate_grpc_request
# 需要导入模块: from tensorflow_serving.apis import predict_pb2 [as 别名]
# 或者: from tensorflow_serving.apis.predict_pb2 import PredictRequest [as 别名]
def generate_grpc_request():
"""Generate gRPC inference request with payload."""
request = predict_pb2.PredictRequest()
request.model_spec.name = FLAGS.model_name
request.model_spec.signature_name = 'serving_default'
image = get_image_payload()
request.inputs[FLAGS.input_name].CopyFrom(
tf.contrib.util.make_tensor_proto(
[image] * FLAGS.batch_size, shape=[FLAGS.batch_size]))
return request
示例14: get_fake_request
# 需要导入模块: from tensorflow_serving.apis import predict_pb2 [as 别名]
# 或者: from tensorflow_serving.apis.predict_pb2 import PredictRequest [as 别名]
def get_fake_request(model_name, data_shape, input_blob, version=None):
request = predict_pb2.PredictRequest()
request.model_spec.name = model_name
if version is not None:
request.model_spec.version.value = version
data = np.ones(shape=data_shape)
request.inputs[input_blob].CopyFrom(
make_tensor_proto(data, shape=data.shape))
return request
示例15: main
# 需要导入模块: from tensorflow_serving.apis import predict_pb2 [as 别名]
# 或者: from tensorflow_serving.apis.predict_pb2 import PredictRequest [as 别名]
def main(_):
host, port = FLAGS.server.split(':')
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
# Send request
image = tf.gfile.FastGFile(FLAGS.image, 'rb').read()
request = predict_pb2.PredictRequest()
request.model_spec.name = 'tensorflow-serving'
request.model_spec.signature_name = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
request.inputs['image'].CopyFrom(tf.contrib.util.make_tensor_proto(image))
#request.inputs['input'].CopyFrom()
result = stub.Predict(request, 10.0) # 10 secs timeout
print(result)