本文整理汇总了Python中tensorflow.make_ndarray方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.make_ndarray方法的具体用法?Python tensorflow.make_ndarray怎么用?Python tensorflow.make_ndarray使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.make_ndarray方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_grpc_request_fn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_ndarray [as 别名]
def make_grpc_request_fn(servable_name, server, timeout_secs):
"""Wraps function to make grpc requests with runtime args."""
stub = _create_stub(server)
def _make_grpc_request(examples):
"""Builds and sends request to TensorFlow model server."""
request = predict_pb2.PredictRequest()
request.model_spec.name = servable_name
request.inputs["input"].CopyFrom(
tf.contrib.util.make_tensor_proto(
[ex.SerializeToString() for ex in examples], shape=[len(examples)]))
response = stub.Predict(request, timeout_secs)
outputs = tf.make_ndarray(response.outputs["outputs"])
scores = tf.make_ndarray(response.outputs["scores"])
assert len(outputs) == len(scores)
return [{
"outputs": outputs[i],
"scores": scores[i]
} for i in range(len(outputs))]
return _make_grpc_request
示例2: make_grpc_request_fn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_ndarray [as 别名]
def make_grpc_request_fn(servable_name, server, timeout_secs):
"""Wraps function to make grpc requests with runtime args."""
stub = _create_stub(server)
def _make_grpc_request(examples):
"""Builds and sends request to TensorFlow model server."""
request = predict_pb2.PredictRequest()
request.model_spec.name = servable_name
request.inputs["input"].CopyFrom(
tf.make_tensor_proto(
[ex.SerializeToString() for ex in examples], shape=[len(examples)]))
response = stub.Predict(request, timeout_secs)
outputs = tf.make_ndarray(response.outputs["outputs"])
scores = tf.make_ndarray(response.outputs["scores"])
assert len(outputs) == len(scores)
return [{ # pylint: disable=g-complex-comprehension
"outputs": output,
"scores": score
} for output, score in zip(outputs, scores)]
return _make_grpc_request
示例3: extract_prediction
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_ndarray [as 别名]
def extract_prediction(result):
"""Parses a translation result.
Args:
result: A `PredictResponse` proto.
Returns:
A generator over the hypotheses.
"""
batch_lengths = tf.make_ndarray(result.outputs["length"])
batch_predictions = tf.make_ndarray(result.outputs["tokens"])
for hypotheses, lengths in zip(batch_predictions, batch_lengths):
# Only consider the first hypothesis (the best one).
best_hypothesis = hypotheses[0].tolist()
best_length = lengths[0]
if best_hypothesis[best_length - 1] == b"</s>":
best_length -= 1
yield best_hypothesis[:best_length]
示例4: test_predict_successful
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_ndarray [as 别名]
def test_predict_successful(mocker, get_grpc_service_for_predict,
get_fake_model):
results_mock = mocker.patch(
'ie_serving.server.request.Request.wait_for_result')
expected_response = np.ones(shape=(2, 2))
results_mock.return_value = ({'output': expected_response}, 0)
request = get_fake_request(model_name='test',
data_shape=(1, 1, 1), input_blob='input')
grpc_server = get_grpc_service_for_predict
rpc = grpc_server.invoke_unary_unary(
PREDICT_SERVICE.methods_by_name['Predict'],
(),
request, None)
rpc.initial_metadata()
response, trailing_metadata, code, details = rpc.termination()
encoded_response = make_ndarray(response.outputs['output'])
assert get_fake_model.default_version == response.model_spec.version.value
assert grpc.StatusCode.OK == code
assert expected_response.shape == encoded_response.shape
示例5: test_predict_successful_version
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_ndarray [as 别名]
def test_predict_successful_version(mocker, get_grpc_service_for_predict):
results_mock = mocker.patch(
'ie_serving.server.request.Request.wait_for_result')
expected_response = np.ones(shape=(2, 2))
results_mock.return_value = ({'output': expected_response}, None)
requested_version = 1
request = get_fake_request(model_name='test', data_shape=(1, 1, 1),
input_blob='input', version=requested_version)
grpc_server = get_grpc_service_for_predict
rpc = grpc_server.invoke_unary_unary(
PREDICT_SERVICE.methods_by_name['Predict'],
(),
request, None)
rpc.initial_metadata()
response, trailing_metadata, code, details = rpc.termination()
encoded_response = make_ndarray(response.outputs['output'])
assert requested_version == response.model_spec.version.value
assert grpc.StatusCode.OK == code
assert expected_response.shape == encoded_response.shape
示例6: test_prepare_output_as_list
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_ndarray [as 别名]
def test_prepare_output_as_list(serialization_function, outputs_names, shapes,
types):
outputs = {}
x = 0
for key, value in outputs_names.items():
outputs[value] = np.ones(shape=shapes[x], dtype=types[x])
x += 1
output = SERIALIZATION_FUNCTIONS[serialization_function](
inference_output=outputs, model_available_outputs=outputs_names)
x = 0
for key, value in outputs_names.items():
temp_output = make_ndarray(output.outputs[key])
assert temp_output.shape == shapes[x]
assert temp_output.dtype == types[x]
x += 1
示例7: cal_tensor_value
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_ndarray [as 别名]
def cal_tensor_value(tensor): # type: (tensorflow.Tensor)->Union[np.ndarray, None]
if _count_input_nodes(tensor) < 0:
return None
node = tensor.op
if node.type in ["Const", "ConstV2"]:
make_ndarray = tensorflow.make_ndarray
np_arr = make_ndarray(node.get_attr("value"))
return np_arr
else:
try:
cls_sess = tensorflow.Session if hasattr(tensorflow, 'Session') else tensorflow.compat.v1.Session
with cls_sess(graph=node.graph) as sess:
np_arr = sess.run(tensor)
return np_arr
except (ValueError, tensorflow.errors.InvalidArgumentError, tensorflow.errors.OpError):
return None
示例8: histograms_impl
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_ndarray [as 别名]
def histograms_impl(self, tag, run, downsample_to=50):
"""Result of the form `(body, mime_type)`, or `ValueError`.
At most `downsample_to` events will be returned. If this value is
`None`, then no downsampling will be performed.
"""
try:
tensor_events = self._multiplexer.Tensors(run, tag)
except KeyError:
raise ValueError('No histogram tag %r for run %r' % (tag, run))
events = [[ev.wall_time, ev.step, tf.make_ndarray(ev.tensor_proto).tolist()]
for ev in tensor_events]
if downsample_to is not None and len(events) > downsample_to:
indices = sorted(random.Random(0).sample(list(range(len(events))),
downsample_to))
events = [events[i] for i in indices]
return (events, 'application/json')
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:19,代码来源:histograms_plugin.py
示例9: deserialize_tensor_value
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_ndarray [as 别名]
def deserialize_tensor_value(value_proto):
"""Deserializes a tensor value from `executor_pb2.Value`.
Args:
value_proto: An instance of `executor_pb2.Value`.
Returns:
A tuple `(value, type_spec)`, where `value` is a Numpy array that represents
the deserialized value, and `type_spec` is an instance of `tff.TensorType`
that represents its type.
Raises:
TypeError: If the arguments are of the wrong types.
ValueError: If the value is malformed.
"""
py_typecheck.check_type(value_proto, executor_pb2.Value)
which_value = value_proto.WhichOneof('value')
if which_value != 'tensor':
raise ValueError('Not a tensor value: {}'.format(which_value))
# TODO(b/134543154): Find some way of creating the `TensorProto` using a
# proper public interface rather than creating a dummy value that we will
# overwrite right away.
tensor_proto = tf.make_tensor_proto(values=0)
if not value_proto.tensor.Unpack(tensor_proto):
raise ValueError('Unable to unpack the received tensor value.')
tensor_value = tf.make_ndarray(tensor_proto)
value_type = computation_types.TensorType(
dtype=tf.dtypes.as_dtype(tensor_proto.dtype),
shape=tf.TensorShape(tensor_proto.tensor_shape))
return tensor_value, value_type
示例10: infer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_ndarray [as 别名]
def infer(img, input_tensor, grpc_stub, model_spec_name,
model_spec_version, output_tensors):
request = predict_pb2.PredictRequest()
request.model_spec.name = model_spec_name
if model_spec_version is not None:
request.model_spec.version.value = model_spec_version
print("input shape ", img.shape)
request.inputs[input_tensor].CopyFrom(
make_tensor_proto(img, shape=list(img.shape)))
result = grpc_stub.Predict(request, 10.0)
data = {}
for output_tensor in output_tensors:
data[output_tensor] = make_ndarray(result.outputs[output_tensor])
return data
示例11: extract_values
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_ndarray [as 别名]
def extract_values(reader, tag):
events = reader.Tensors('run', tag)
steps = [event.step for event in events]
times = [event.wall_time for event in events]
values = [tf.make_ndarray(event.tensor_proto) for event in events]
return steps, times, values
示例12: extract_scalar
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_ndarray [as 别名]
def extract_scalar(multiplexer, run_name, tag):
tensor_events = multiplexer.Tensors(run_name, tag)
data = {'wall_time': [], 'step': [], 'value': []}
for event in tensor_events:
data['wall_time'].append(event.wall_time)
data['step'].append(event.step)
data['value'].append(tf.make_ndarray(event.tensor_proto).item())
return pd.DataFrame(data)
示例13: tf2summary2dict
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_ndarray [as 别名]
def tf2summary2dict(path, tags=[]):
serialized_examples = tf.data.TFRecordDataset(path)
data = {}
for serialized_example in serialized_examples:
event = event_pb2.Event.FromString(serialized_example.numpy())
for value in event.summary.value:
if value.tag in tags or tags == []:
t = tf.make_ndarray(value.tensor)
t = float(t)
try:
data[f'{value.tag}'].append([t, event.step])
except:
data[f'{value.tag}'] = [[t, event.step]]
pass
return data
示例14: convert_tensor_to_gif_summary
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_ndarray [as 别名]
def convert_tensor_to_gif_summary(summ):
if isinstance(summ, bytes):
summary_proto = tf.Summary()
summary_proto.ParseFromString(summ)
summ = summary_proto
summary = tf.Summary()
for value in summ.value:
tag = value.tag
try:
images_arr = tf.make_ndarray(value.tensor)
except TypeError:
summary.value.add(tag=tag, image=value.image)
continue
if len(images_arr.shape) == 5:
images_arr = np.concatenate(list(images_arr), axis=-2)
if len(images_arr.shape) != 4:
raise ValueError('Tensors must be 4-D or 5-D for gif summary.')
channels = images_arr.shape[-1]
if channels < 1 or channels > 4:
raise ValueError('Tensors must have 1, 2, 3, or 4 color channels for gif summary.')
encoded_image_string = ffmpeg_gif.encode_gif(images_arr, fps=4)
image = tf.Summary.Image()
image.height = images_arr.shape[-3]
image.width = images_arr.shape[-2]
image.colorspace = channels # 1: grayscale, 2: grayscale + alpha, 3: RGB, 4: RGBA
image.encoded_image_string = encoded_image_string
summary.value.add(tag=tag, image=image)
return summary
示例15: _process_string_tensor_event
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_ndarray [as 别名]
def _process_string_tensor_event(self, event):
"""Convert a TensorEvent into a JSON-compatible response."""
string_arr = tf.make_ndarray(event.tensor_proto)
text = string_arr.astype(np.dtype(str)).tostring()
return {
'wall_time': event.wall_time,
'step': event.step,
'text': text,
}