本文整理汇总了Python中tensorflow.python.framework.tensor_util.make_tensor_proto函数的典型用法代码示例。如果您正苦于以下问题:Python make_tensor_proto函数的具体用法?Python make_tensor_proto怎么用?Python make_tensor_proto使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了make_tensor_proto函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
def main():
# Connect with the gRPC server
server_address = "127.0.0.1:50051"
request_timeout = 5.0
channel = grpc.insecure_channel(server_address)
stub = predict_pb2.PredictionServiceStub(channel)
# Make request data
request = predict_pb2.PredictRequest()
image = Image.open('../mnist_jpgs/4/pic_test1010.png')
array = np.array(image)/(255*1.0)
samples_features = array.reshape([-1,784])
# samples_features = np.array(
# [[10, 10, 10, 8, 6, 1, 8, 9, 1], [10, 10, 10, 8, 6, 1, 8, 9, 1]])
samples_keys = np.array([1])
# Convert numpy to TensorProto
request.inputs["features"].CopyFrom(tensor_util.make_tensor_proto(
samples_features))
request.inputs["key"].CopyFrom(tensor_util.make_tensor_proto(samples_keys))
# Invoke gRPC request
response = stub.Predict(request, request_timeout)
# Convert TensorProto to numpy
result = {}
for k, v in response.outputs.items():
result[k] = tensor_util.MakeNdarray(v)
print(result)
示例2: testQuantizedTypes
def testQuantizedTypes(self):
# Test with array.
data = [(21,), (22,), (23,)]
t = tensor_util.make_tensor_proto(data, dtype=tf.qint32)
self.assertProtoEquals("""
dtype: DT_QINT32
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\000\000\026\000\000\000\027\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(tf.qint32.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=tf.quint8)
self.assertProtoEquals("""
dtype: DT_QUINT8
tensor_shape { dim { size: 3 } }
tensor_content: "\025\026\027"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(tf.quint8.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=tf.qint8)
self.assertProtoEquals("""
dtype: DT_QINT8
tensor_shape { dim { size: 3 } }
tensor_content: "\025\026\027"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(tf.qint8.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
示例3: testTensorShapeVerification
def testTensorShapeVerification(self):
array = np.array([[1], [2]])
correct_shape = (2, 1)
incorrect_shape = (1, 2)
tensor_util.make_tensor_proto(array, shape=correct_shape, verify_shape=True)
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(
array, shape=incorrect_shape, verify_shape=True)
示例4: testTransformGraph
def testTransformGraph(self):
input_graph_def = graph_pb2.GraphDef()
const_op1 = input_graph_def.node.add()
const_op1.op = "Const"
const_op1.name = "const_op1"
const_op1.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
const_op1.attr["value"].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
[1, 2], dtypes.float32, [1, 2])))
const_op2 = input_graph_def.node.add()
const_op2.op = "Const"
const_op2.name = "const_op2"
const_op2.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
const_op2.attr["value"].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
[3, 4], dtypes.float32, [1, 2])))
# Create an add that has two constants as inputs.
add_op = input_graph_def.node.add()
add_op.op = "Add"
add_op.attr["T"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
add_op.name = "add_op"
add_op.input.extend(["const_op1", "const_op2"])
# Create a relu that reads from the add.
relu_op = input_graph_def.node.add()
relu_op.op = "Relu"
relu_op.attr["T"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
relu_op.name = "relu_op"
relu_op.input.extend(["add_op"])
# We're specifying that add_op is the final output, and so the relu isn't
# needed.
input_names = []
output_names = ["add_op"]
transforms = ["strip_unused_nodes"]
transformed_graph_def = TransformGraph(input_graph_def, input_names,
output_names, transforms)
# We expect that the relu is no longer present after running the transform.
for node in transformed_graph_def.node:
self.assertNotEqual("Relu", node.op)
示例5: convert_variables_to_constants
def convert_variables_to_constants(sess, input_graph_def, output_node_names):
variable_names = []
variable_dict_names = []
for node in input_graph_def.node:
if node.op == "Assign":
variable_name = node.input[0]
variable_dict_names.append(variable_name)
variable_names.append(variable_name + ":0")
returned_variables = sess.run(variable_names)
found_variables = dict(zip(variable_dict_names, returned_variables))
print("Frozen %d variables." % len(returned_variables))
inference_graph = extract_sub_graph(input_graph_def, output_node_names)
output_graph_def = graph_pb2.GraphDef()
how_many_converted = 0
for input_node in inference_graph.node:
output_node = graph_pb2.NodeDef()
if input_node.name in found_variables:
output_node.op = "Const"
output_node.name = input_node.name
dtype = input_node.attr["dtype"]
data = found_variables[input_node.name]
output_node.attr["dtype"].CopyFrom(dtype)
output_node.attr["value"].CopyFrom(attr_value_pb2.AttrValue(
tensor=tensor_util.make_tensor_proto(data, dtype=dtype.type, shape=data.shape)))
how_many_converted += 1
else:
output_node.CopyFrom(input_node)
output_graph_def.node.extend([output_node])
print("Converted %d variables to const ops." % how_many_converted)
return output_graph_def
示例6: testLowRankSupported
def testLowRankSupported(self):
t = tensor_util.make_tensor_proto(np.array(7))
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: 7
""", t)
示例7: testNoOutputs
def testNoOutputs(self):
with session_lib.Session() as sess:
# Build a function with a single Const node, whose output is ignored.
fdef = function_pb2.FunctionDef()
fdef.signature.name = "KernelWithNoOutputs"
node = node_def_pb2.NodeDef()
node.op = "Const"
node.name = "ignored"
node.attr["dtype"].type = dtypes.int32.as_datatype_enum
tensor = tensor_util.make_tensor_proto([0], dtype=dtypes.int32, shape=[])
node.attr["value"].tensor.CopyFrom(tensor)
fdef.node_def.extend([node])
# Check that calling the result as a compiled kernel doesn't crash.
@function.Defun(compiled=True)
def KernelWithNoOutputs():
return constant_op.constant(100)
# Hack to override the definition. By accessing .definition, we
# force the _DefinedFunction initialized internally. Then, we
# replace it's internal FunctionDef proto. We do this hack here
# because one typically can't construct KernelWithNoOutputs
# function via Defun decorator directly.
_ = KernelWithNoOutputs.definition
foo = KernelWithNoOutputs
foo._definition = fdef
call = KernelWithNoOutputs()
sess.run(call, {})
示例8: testStringWithImplicitRepeat
def testStringWithImplicitRepeat(self):
t = tensor_util.make_tensor_proto(["f", "g"], shape=[3, 4])
a = tensor_util.MakeNdarray(t)
self.assertAllEqual(
np.array([[b"f", b"g", b"g", b"g"], [b"g", b"g", b"g", b"g"],
[b"g", b"g", b"g", b"g"]],
dtype=np.object), a)
示例9: testFloatSizesLessValues
def testFloatSizesLessValues(self):
t = tensor_util.make_tensor_proto(10.0, shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
float_val: 10.0
""", t)
示例10: testComplexWithImplicitRepeat
def testComplexWithImplicitRepeat(self):
t = tensor_util.make_tensor_proto((1+1j), shape=[3, 4],
dtype=tf.complex64)
a = tensor_util.MakeNdarray(t)
self.assertAllClose(np.array([[(1+1j), (1+1j), (1+1j), (1+1j)],
[(1+1j), (1+1j), (1+1j), (1+1j)],
[(1+1j), (1+1j), (1+1j), (1+1j)]],
dtype=np.complex64), a)
示例11: testFloatTypesWithImplicitRepeat
def testFloatTypesWithImplicitRepeat(self):
for dtype, nptype in [
(tf.float32, np.float32), (tf.float64, np.float64)]:
t = tensor_util.make_tensor_proto([10.0], shape=[3, 4], dtype=dtype)
a = tensor_util.MakeNdarray(t)
self.assertAllClose(np.array([[10.0, 10.0, 10.0, 10.0],
[10.0, 10.0, 10.0, 10.0],
[10.0, 10.0, 10.0, 10.0]], dtype=nptype), a)
示例12: set_attr_tensor
def set_attr_tensor(node, key, value, dtype, shape=None):
try:
node.attr[key].CopyFrom(tf.AttrValue(
tensor=tensor_util.make_tensor_proto(value,
dtype=dtype,
shape=shape)))
except KeyError:
pass
示例13: testShapeEquals
def testShapeEquals(self):
t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2])
self.assertTrue(tensor_util.ShapeEquals(t, [2, 2]))
self.assertTrue(tensor_util.ShapeEquals(t, (2, 2)))
self.assertTrue(tensor_util.ShapeEquals(t, tensor_util.MakeTensorShapeProto([2, 2])))
self.assertFalse(tensor_util.ShapeEquals(t, [5, 3]))
self.assertFalse(tensor_util.ShapeEquals(t, [1, 4]))
self.assertFalse(tensor_util.ShapeEquals(t, [4]))
示例14: testLongNpArray
def testLongNpArray(self):
t = tensor_util.make_tensor_proto(np.array([10, 20, 30]))
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape { dim { size: 3 } }
tensor_content: "\\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
示例15: testIntNDefaultType
def testIntNDefaultType(self):
t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2])
self.assertProtoEquals("""
dtype: DT_INT32
tensor_shape { dim { size: 2 } dim { size: 2 } }
tensor_content: "\\n\000\000\000\024\000\000\000\036\000\000\000(\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int32, a.dtype)
self.assertAllClose(np.array([[10, 20], [30, 40]], dtype=np.int32), a)