本文整理汇总了Python中tensorflow_serving.apis.prediction_service_pb2_grpc.PredictionServiceStub方法的典型用法代码示例。如果您正苦于以下问题:Python prediction_service_pb2_grpc.PredictionServiceStub方法的具体用法?Python prediction_service_pb2_grpc.PredictionServiceStub怎么用?Python prediction_service_pb2_grpc.PredictionServiceStub使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow_serving.apis.prediction_service_pb2_grpc
的用法示例。
在下文中一共展示了prediction_service_pb2_grpc.PredictionServiceStub方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from tensorflow_serving.apis import prediction_service_pb2_grpc [as 别名]
# 或者: from tensorflow_serving.apis.prediction_service_pb2_grpc import PredictionServiceStub [as 别名]
def main():
parser = argparse.ArgumentParser(description="Translation client example")
parser.add_argument("--model_name", required=True,
help="model name")
parser.add_argument("--sentencepiece_model", required=True,
help="path to the sentence model")
parser.add_argument("--host", default="localhost",
help="model server host")
parser.add_argument("--port", type=int, default=9000,
help="model server port")
parser.add_argument("--timeout", type=float, default=10.0,
help="request timeout")
args = parser.parse_args()
channel = grpc.insecure_channel("%s:%d" % (args.host, args.port))
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
tokenizer = pyonmttok.Tokenizer("none", sp_model_path=args.sentencepiece_model)
while True:
text = input("Source: ")
output = translate(stub, args.model_name, [text], tokenizer, timeout=args.timeout)
print("Target: %s" % output[0])
print("")
示例2: get_image_quality_predictions
# 需要导入模块: from tensorflow_serving.apis import prediction_service_pb2_grpc [as 别名]
# 或者: from tensorflow_serving.apis.prediction_service_pb2_grpc import PredictionServiceStub [as 别名]
def get_image_quality_predictions(image_path, model_name):
# Load and preprocess image
image = utils.load_image(image_path, target_size=(224, 224))
image = keras.applications.mobilenet.preprocess_input(image)
# Run through model
target = f'{TFS_HOST}:{TFS_PORT}'
channel = grpc.insecure_channel(target)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = model_name
request.model_spec.signature_name = 'image_quality'
request.inputs['input_image'].CopyFrom(
tf.contrib.util.make_tensor_proto(np.expand_dims(image, 0))
)
response = stub.Predict(request, 10.0)
result = round(calc_mean_score(response.outputs['quality_prediction'].float_val), 2)
print(json.dumps({'mean_score_prediction': np.round(result, 3)}, indent=2))
示例3: main
# 需要导入模块: from tensorflow_serving.apis import prediction_service_pb2_grpc [as 别名]
# 或者: from tensorflow_serving.apis.prediction_service_pb2_grpc import PredictionServiceStub [as 别名]
def main(argv):
del argv
tpu_address = FLAGS.tpu
if not any(pref in FLAGS.tpu for pref in ['http://', 'grpc://']):
tpu_address = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu).master()
tpu_address = '{}:{}'.format(tpu_address[:-len(':1234')],
'8470' if FLAGS.grpc else '8473')
tpu_address = tpu_address[len('abcd://'):]
tf.logging.info('ModelServer at: {}'.format(tpu_address))
if FLAGS.grpc:
grpc_channel = grpc.insecure_channel(tpu_address)
stub = prediction_service_pb2_grpc.PredictionServiceStub(grpc_channel)
run_grpc_load_test(FLAGS.num_requests, FLAGS.qps, generate_grpc_request(),
stub)
else:
payload = generate_rest_payload()
run_rest_load_test(FLAGS.num_requests, FLAGS.qps, tpu_address, payload)
示例4: prepare_stub_and_request
# 需要导入模块: from tensorflow_serving.apis import prediction_service_pb2_grpc [as 别名]
# 或者: from tensorflow_serving.apis.prediction_service_pb2_grpc import PredictionServiceStub [as 别名]
def prepare_stub_and_request(address, model_name, model_version=None, creds=None, opts=None,
request_type=INFERENCE_REQUEST):
if opts is not None:
opts = (('grpc.ssl_target_name_override', opts),)
if creds is not None:
channel = grpc.secure_channel(address, creds, options=opts)
else:
channel = grpc.insecure_channel(address, options=opts)
request = None
stub = None
if request_type == MODEL_STATUS_REQUEST:
request = get_model_status_pb2.GetModelStatusRequest()
stub = model_service_pb2_grpc.ModelServiceStub(channel)
elif request_type == INFERENCE_REQUEST:
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = model_name
if model_version is not None:
request.model_spec.version.value = model_version
return stub, request
示例5: grpc_predict_raw
# 需要导入模块: from tensorflow_serving.apis import prediction_service_pb2_grpc [as 别名]
# 或者: from tensorflow_serving.apis.prediction_service_pb2_grpc import PredictionServiceStub [as 别名]
def grpc_predict_raw(data):
port = 8500
channel = grpc.insecure_channel('{host}:{port}'.format(host=host, port=port))
# channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = 'textcnn_model'
request.model_spec.signature_name = "serving_default"
tensor_protos = {
# 一条一条的请求方式
'sentence':tf.make_tensor_proto(data['sentence'], dtype=tf.int64, shape=[1, 55])
}
for k in tensor_protos:
request.inputs[k].CopyFrom(tensor_protos[k])
response = stub.Predict(request, 5.0)
print(response)
示例6: gRPCPredict
# 需要导入模块: from tensorflow_serving.apis import prediction_service_pb2_grpc [as 别名]
# 或者: from tensorflow_serving.apis.prediction_service_pb2_grpc import PredictionServiceStub [as 别名]
def gRPCPredict(request: model.Request):
start = datetime.datetime.now()
stub = prediction_service_pb2_grpc.PredictionServiceStub(
grpc.insecure_channel(f"{SERVING_HOST}:{SERVING_GRPC_PORT}")
)
predictRequest = predict_pb2.PredictRequest()
predictRequest.model_spec.name = model_name
predictRequest.inputs['x'].CopyFrom(
make_tensor_proto(
request.instances,
shape = [len(request.instances), 1]
)
)
predictResult = stub.Predict(predictRequest, PREDICT_TIMEOUT)
return {
'predictions': list(predictResult.outputs['y'].float_val),
'meta': {
'model_name': model_name,
'duration': util.millis_interval(start,datetime.datetime.now()),
'timestamp': datetime.datetime.now().timestamp(),
'jetson_model': jetson_model
}
}
示例7: _create_stub
# 需要导入模块: from tensorflow_serving.apis import prediction_service_pb2_grpc [as 别名]
# 或者: from tensorflow_serving.apis.prediction_service_pb2_grpc import PredictionServiceStub [as 别名]
def _create_stub(server):
channel = grpc.insecure_channel(server)
return prediction_service_pb2_grpc.PredictionServiceStub(channel)
示例8: __init__
# 需要导入模块: from tensorflow_serving.apis import prediction_service_pb2_grpc [as 别名]
# 或者: from tensorflow_serving.apis.prediction_service_pb2_grpc import PredictionServiceStub [as 别名]
def __init__(self, endpoint: Text, model_name: Text):
# Note that the channel instance is automatically closed (unsubscribed) on
# deletion, so we don't have to manually close this on __del__.
self._channel = grpc.insecure_channel(endpoint)
self._model_name = model_name
self._model_service = model_service_pb2_grpc.ModelServiceStub(self._channel)
self._prediction_service = prediction_service_pb2_grpc.PredictionServiceStub(self._channel) # pylint: disable=line-too-long
示例9: make_request
# 需要导入模块: from tensorflow_serving.apis import prediction_service_pb2_grpc [as 别名]
# 或者: from tensorflow_serving.apis.prediction_service_pb2_grpc import PredictionServiceStub [as 别名]
def make_request(image_path, server):
"""
:param image_path:
:param server:
:return:
"""
channel = grpc.insecure_channel(server)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
image = cv2.resize(image, (CFG.ARCH.INPUT_SIZE[0], CFG.ARCH.INPUT_SIZE[1]), interpolation=cv2.INTER_LINEAR)
image = np.array(image, np.float32) / 127.5 - 1.0
image_list = np.array([image], dtype=np.float32)
request = predict_pb2.PredictRequest()
request.model_spec.name = 'crnn'
request.model_spec.signature_name = sm.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
request.inputs['input_tensor'].CopyFrom(make_tensor_proto(
image_list, shape=[1, CFG.ARCH.INPUT_SIZE[1], CFG.ARCH.INPUT_SIZE[0], 3]))
try:
result = stub.Predict(request, 10.0)
return result
except Exception as err:
print(err)
return None
示例10: __init__
# 需要导入模块: from tensorflow_serving.apis import prediction_service_pb2_grpc [as 别名]
# 或者: from tensorflow_serving.apis.prediction_service_pb2_grpc import PredictionServiceStub [as 别名]
def __init__(self,
host,
port,
model_name,
preprocessor,
postprocessor,
bpe_codes):
channel = grpc.insecure_channel("%s:%d" % (host, port))
self.stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
self.model_name = model_name
self.preprocessor = preprocessor
self.postprocessor = postprocessor
with open(bpe_codes) as f:
self.bpe = apply_bpe.BPE(f)
示例11: __init__
# 需要导入模块: from tensorflow_serving.apis import prediction_service_pb2_grpc [as 别名]
# 或者: from tensorflow_serving.apis.prediction_service_pb2_grpc import PredictionServiceStub [as 别名]
def __init__(self):
self.thread_lock = threading.Lock()
self.num_completed_requests = 0
self.num_failed_requests = 0
self.latencies = []
self.file_list = get_files_in_directory_sorted(FLAGS.image_directory)
self.num_images = len(self.file_list)
channel = grpc.insecure_channel(FLAGS.server)
self.stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
# Fix random seed so that sequence of images sent to server is
# deterministic.
random.seed(RANDOM_SEED)
开发者ID:GoogleCloudPlatform,项目名称:PerfKitBenchmarker,代码行数:16,代码来源:tensorflow_serving_client_workload.py