本文整理汇总了Python中grpc.insecure_channel方法的典型用法代码示例。如果您正苦于以下问题:Python grpc.insecure_channel方法的具体用法?Python grpc.insecure_channel怎么用?Python grpc.insecure_channel使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类grpc
的用法示例。
在下文中一共展示了grpc.insecure_channel方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
# 需要导入模块: import grpc [as 别名]
# 或者: from grpc import insecure_channel [as 别名]
def run(host, port, api_key, auth_token, timeout, use_tls):
"""Makes a basic ListShelves call against a gRPC Bookstore server."""
if use_tls:
with open('../roots.pem', 'rb') as f:
creds = grpc.ssl_channel_credentials(f.read())
channel = grpc.secure_channel('{}:{}'.format(host, port), creds)
else:
channel = grpc.insecure_channel('{}:{}'.format(host, port))
stub = bookstore_pb2_grpc.BookstoreStub(channel)
metadata = []
if api_key:
metadata.append(('x-api-key', api_key))
if auth_token:
metadata.append(('authorization', 'Bearer ' + auth_token))
shelves = stub.ListShelves(empty_pb2.Empty(), timeout, metadata=metadata)
print('ListShelves: {}'.format(shelves))
示例2: get_image_quality_predictions
# 需要导入模块: import grpc [as 别名]
# 或者: from grpc import insecure_channel [as 别名]
def get_image_quality_predictions(image_path, model_name):
# Load and preprocess image
image = utils.load_image(image_path, target_size=(224, 224))
image = keras.applications.mobilenet.preprocess_input(image)
# Run through model
target = f'{TFS_HOST}:{TFS_PORT}'
channel = grpc.insecure_channel(target)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = model_name
request.model_spec.signature_name = 'image_quality'
request.inputs['input_image'].CopyFrom(
tf.contrib.util.make_tensor_proto(np.expand_dims(image, 0))
)
response = stub.Predict(request, 10.0)
result = round(calc_mean_score(response.outputs['quality_prediction'].float_val), 2)
print(json.dumps({'mean_score_prediction': np.round(result, 3)}, indent=2))
示例3: worker
# 需要导入模块: import grpc [as 别名]
# 或者: from grpc import insecure_channel [as 别名]
def worker(port, queue, num_concurrent_streams, num_requests_per_stream,
num_rounds, message_size, load_type):
with grpc.insecure_channel("localhost:{}".format(port)) as channel:
stub = GreeterStub(channel)
if load_type == "unary":
load_fn = do_load_unary
else:
raise ValueError(f"Unknown load type: {load_type}")
for _ in range(num_rounds):
start = time.time()
task_results = Queue()
for _ in range(num_concurrent_streams):
load_fn(task_results, stub, num_requests_per_stream, message_size)
results = []
for _ in range(num_concurrent_streams):
results.append(task_results.get())
end = time.time()
rps = num_concurrent_streams * num_requests_per_stream / (end - start)
queue.put(rps)
queue.put(results)
queue.close()
queue.join_thread()
示例4: grpc_channel
# 需要导入模块: import grpc [as 别名]
# 或者: from grpc import insecure_channel [as 别名]
def grpc_channel(port_fixture_name, channel_arg_name="channel"):
def decorator(func):
if hasattr(func, "__parallelized__") and func.__parallelized__:
raise TypeError("Cannot pass gRPC channel to already parallelized test, grpc_client_parallelize should "
"be the last decorator in chain")
@forge.compose(
forge.copy(func),
forge.modify(channel_arg_name, name=port_fixture_name, interface_name="port_fixture_value"),
)
def new_func(*, port_fixture_value, **kwargs):
import grpc
with grpc.insecure_channel('127.0.0.1:{}'.format(port_fixture_value)) as channel:
func(**kwargs, channel=channel)
return new_func
return decorator
示例5: purerpc_channel
# 需要导入模块: import grpc [as 别名]
# 或者: from grpc import insecure_channel [as 别名]
def purerpc_channel(port_fixture_name, channel_arg_name="channel"):
def decorator(corofunc):
if not inspect.iscoroutinefunction(corofunc):
raise TypeError("Expected coroutine function")
@forge.compose(
forge.copy(corofunc),
forge.modify(channel_arg_name, name=port_fixture_name, interface_name="port_fixture_value"),
)
async def new_corofunc(*, port_fixture_value, **kwargs):
import purerpc
async with purerpc.insecure_channel("127.0.0.1", port_fixture_value) as channel:
await corofunc(**kwargs, channel=channel)
return new_corofunc
return decorator
示例6: main
# 需要导入模块: import grpc [as 别名]
# 或者: from grpc import insecure_channel [as 别名]
def main():
parser = argparse.ArgumentParser(description="Translation client example")
parser.add_argument("--model_name", required=True,
help="model name")
parser.add_argument("--sentencepiece_model", required=True,
help="path to the sentence model")
parser.add_argument("--host", default="localhost",
help="model server host")
parser.add_argument("--port", type=int, default=9000,
help="model server port")
parser.add_argument("--timeout", type=float, default=10.0,
help="request timeout")
args = parser.parse_args()
channel = grpc.insecure_channel("%s:%d" % (args.host, args.port))
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
tokenizer = pyonmttok.Tokenizer("none", sp_model_path=args.sentencepiece_model)
while True:
text = input("Source: ")
output = translate(stub, args.model_name, [text], tokenizer, timeout=args.timeout)
print("Target: %s" % output[0])
print("")
示例7: get_channel
# 需要导入模块: import grpc [as 别名]
# 或者: from grpc import insecure_channel [as 别名]
def get_channel(self):
'''
Return a Tiller channel
'''
tiller_ip = self._get_tiller_ip()
tiller_port = self._get_tiller_port()
try:
LOG.debug(
'Tiller getting gRPC insecure channel at %s:%s '
'with options: [grpc.max_send_message_length=%s, '
'grpc.max_receive_message_length=%s]', tiller_ip, tiller_port,
MAX_MESSAGE_LENGTH, MAX_MESSAGE_LENGTH)
return grpc.insecure_channel(
'%s:%s' % (tiller_ip, tiller_port),
options=[
('grpc.max_send_message_length', MAX_MESSAGE_LENGTH),
('grpc.max_receive_message_length', MAX_MESSAGE_LENGTH)
])
except Exception:
LOG.exception('Failed to initialize grpc channel to tiller.')
raise ex.ChannelException()
示例8: get_channel
# 需要导入模块: import grpc [as 别名]
# 或者: from grpc import insecure_channel [as 别名]
def get_channel(self):
'''
Return a Tiller channel
'''
tiller_ip = self._get_tiller_ip()
tiller_port = self._get_tiller_port()
try:
LOG.debug('Tiller getting gRPC insecure channel at %s:%s '
'with options: [grpc.max_send_message_length=%s, '
'grpc.max_receive_message_length=%s]',
tiller_ip, tiller_port,
MAX_MESSAGE_LENGTH, MAX_MESSAGE_LENGTH)
return grpc.insecure_channel(
'%s:%s' % (tiller_ip, tiller_port),
options=[
('grpc.max_send_message_length', MAX_MESSAGE_LENGTH),
('grpc.max_receive_message_length', MAX_MESSAGE_LENGTH)
]
)
except Exception:
raise ex.ChannelException()
示例9: main
# 需要导入模块: import grpc [as 别名]
# 或者: from grpc import insecure_channel [as 别名]
def main(argv):
del argv
tpu_address = FLAGS.tpu
if not any(pref in FLAGS.tpu for pref in ['http://', 'grpc://']):
tpu_address = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu).master()
tpu_address = '{}:{}'.format(tpu_address[:-len(':1234')],
'8470' if FLAGS.grpc else '8473')
tpu_address = tpu_address[len('abcd://'):]
tf.logging.info('ModelServer at: {}'.format(tpu_address))
if FLAGS.grpc:
grpc_channel = grpc.insecure_channel(tpu_address)
stub = prediction_service_pb2_grpc.PredictionServiceStub(grpc_channel)
run_grpc_load_test(FLAGS.num_requests, FLAGS.qps, generate_grpc_request(),
stub)
else:
payload = generate_rest_payload()
run_rest_load_test(FLAGS.num_requests, FLAGS.qps, tpu_address, payload)
示例10: prepare_stub_and_request
# 需要导入模块: import grpc [as 别名]
# 或者: from grpc import insecure_channel [as 别名]
def prepare_stub_and_request(address, model_name, model_version=None, creds=None, opts=None,
request_type=INFERENCE_REQUEST):
if opts is not None:
opts = (('grpc.ssl_target_name_override', opts),)
if creds is not None:
channel = grpc.secure_channel(address, creds, options=opts)
else:
channel = grpc.insecure_channel(address, options=opts)
request = None
stub = None
if request_type == MODEL_STATUS_REQUEST:
request = get_model_status_pb2.GetModelStatusRequest()
stub = model_service_pb2_grpc.ModelServiceStub(channel)
elif request_type == INFERENCE_REQUEST:
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = model_name
if model_version is not None:
request.model_spec.version.value = model_version
return stub, request
示例11: run
# 需要导入模块: import grpc [as 别名]
# 或者: from grpc import insecure_channel [as 别名]
def run():
channel = grpc.insecure_channel('localhost:50051')
try:
grpc.channel_ready_future(channel).result(timeout=10)
except grpc.FutureTimeoutError:
sys.exit('Error connecting to server')
else:
stub = users_service.UsersStub(channel)
metadata = [('ip', '127.0.0.1')]
response = stub.CreateUser(
users_messages.CreateUserRequest(username='tom'),
metadata=metadata,
)
if response:
print("User created:", response.user.username)
request = users_messages.GetUsersRequest(
user=[users_messages.User(username="alexa", user_id=1),
users_messages.User(username="christie", user_id=1)]
)
response = stub.GetUsers(request, timeout=0.00001)
for resp in response:
print(resp)
示例12: run
# 需要导入模块: import grpc [as 别名]
# 或者: from grpc import insecure_channel [as 别名]
def run():
channel = grpc.insecure_channel('localhost:50051')
try:
grpc.channel_ready_future(channel).result(timeout=10)
except grpc.FutureTimeoutError:
sys.exit('Error connecting to server')
else:
stub = users_service.UsersStub(channel)
metadata = [('ip', '127.0.0.1')]
response = stub.CreateUser(
users_messages.CreateUserRequest(username='tom'),
metadata=metadata,
)
if response:
print("User created:", response.user.username)
request = users_messages.GetUsersRequest(
user=[users_messages.User(username="alexa", user_id=1),
users_messages.User(username="christie", user_id=1)]
)
response = stub.GetUsers(request)
for resp in response:
print(resp)
示例13: __init__
# 需要导入模块: import grpc [as 别名]
# 或者: from grpc import insecure_channel [as 别名]
def __init__(self, env_name=ENVIRONMENT, strength=MUTATION_STRENGTH, host=HOST):
"""Creates a Worker instance.
Args:
env (string): The valid gym environment name.
host (string): The hostname of the master server.
strength (float): The genetic mutation strength.
"""
self.client = NeuroStub(grpc.insecure_channel(host))
self.env = gym.make(env_name)
self.policy = Policy(self.env.action_space.n)
self.strength = strength
print("Host:", host)
print("Environment:", env_name)
print("Mutation Strength:", strength)
示例14: refresh_tags
# 需要导入模块: import grpc [as 别名]
# 或者: from grpc import insecure_channel [as 别名]
def refresh_tags(self):
channel = grpc.insecure_channel('unix://' + SOCKET_PATH)
stub = PodResourcesListerStub(channel)
response = stub.List(ListPodResourcesRequest())
new_tags = {}
for pod_res in response.pod_resources:
for container in pod_res.containers:
for device in container.devices:
if device.resource_name != "nvidia.com/gpu":
continue
pod_name = pod_res.name
kube_namespace = pod_res.namespace
kube_container_name = container.name
for device_id in device.device_ids:
# These are the tag names that datadog seems to use
new_tags[device_id] = [
"pod_name:" + pod_name,
"kube_namespace:" + kube_namespace,
"kube_container_name:" + kube_container_name,
]
with self.lock:
self.known_tags = new_tags
示例15: _setup
# 需要导入模块: import grpc [as 别名]
# 或者: from grpc import insecure_channel [as 别名]
def _setup(self, host, port, uri, pre_ping=False):
"""
Create a grpc channel and a stub
:raises: NotConnectError
"""
self._uri = set_uri(host, port, uri)
self._channel = grpc.insecure_channel(
self._uri,
options=[(cygrpc.ChannelArgKey.max_send_message_length, -1),
(cygrpc.ChannelArgKey.max_receive_message_length, -1),
('grpc.enable_retries', 1),
('grpc.keepalive_time_ms', 55000)]
# (b'grpc.enable_http_proxy', 0)]
)
self._stub = milvus_pb2_grpc.MilvusServiceStub(self._channel)
self.status = Status()