本文整理汇总了Python中tensorflow.python.ops.nn.conv2d函数的典型用法代码示例。如果您正苦于以下问题:Python conv2d函数的具体用法?Python conv2d怎么用?Python conv2d使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了conv2d函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: GetParams
def GetParams(self):
"""Test for Constant broadcasting in TF-TRT."""
dtype = dtypes.float32
input_name = 'input'
input_dims = [5, 12, 12, 2]
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
filt1 = constant_op.constant(
0.3, shape=(3, 3, 2, 1), dtype=dtype, name='filt1')
y1 = nn.conv2d(x, filt1, strides=[1, 1, 1, 1], padding='SAME', name='y1')
z1 = nn.relu(y1, name='z1')
filt2 = constant_op.constant(
np.random.randn(9), shape=(3, 3, 1, 1), dtype=dtype, name='filt2')
y2 = nn.conv2d(z1, filt2, strides=[1, 1, 1, 1], padding='SAME', name='y2')
z2 = nn.relu(y2, name='z')
filt3 = constant_op.constant(
np.random.randn(3, 3, 1, 1),
shape=(3, 3, 1, 1),
dtype=dtype,
name='filt3')
y3 = nn.conv2d(z2, filt3, strides=[1, 1, 1, 1], padding='SAME', name='y3')
nn.relu(y3, name='output')
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
num_expected_engines=1,
expected_output_dims=(5, 12, 12, 1),
allclose_atol=1.e-02,
allclose_rtol=1.e-02)
示例2: GetParams
def GetParams(self):
"""Testing conversion of BatchMatMul in TF-TRT conversion."""
dtype = dtypes.float32
input_name = "input"
input_dims = [2, 15, 15, 3]
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(
dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
with g.device("/GPU:0"):
e1 = constant_op.constant(
np.random.randn(1, 1, 3, 5), name="kernel_1", dtype=dtype)
e2 = constant_op.constant(
np.random.randn(1, 1, 5, 10), name="kernel_2", dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=e1,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
out = nn.conv2d(
input=conv,
filter=e2,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv_2")
array_ops.squeeze(out, name=self.output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
expected_engines=["my_trt_op_0"],
expected_output_dims=(2, 15, 15, 10),
allclose_atol=1.e-02,
allclose_rtol=1.e-02)
示例3: GetMultiEngineGraphDef
def GetMultiEngineGraphDef(dtype=dtypes.float32):
"""Create a graph containing multiple segment."""
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(
dtype=dtype, shape=[None] + INPUT_DIMS[1:], name=INPUT_NAME)
with g.device("/GPU:0"):
conv_filter = constant_op.constant(
[[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
name="weights",
dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=conv_filter,
strides=[1, 2, 2, 1],
padding="SAME",
name="conv")
c1 = constant_op.constant(
np.random.randn(INPUT_DIMS[0], 12, 12, 6), dtype=dtype)
p = conv * c1
c2 = constant_op.constant(
np.random.randn(INPUT_DIMS[0], 12, 12, 6), dtype=dtype)
q = conv / c2
edge = math_ops.sin(q)
edge /= edge
r = edge + edge
p -= edge
q *= edge
s = p + q
s -= r
array_ops.squeeze(s, name=OUTPUT_NAME)
return g.as_graph_def()
示例4: _annotated_graph
def _annotated_graph(self):
graph = ops.Graph()
with graph.as_default():
random_seed.set_random_seed(2)
current_activation = variable_scope.get_variable(
name='start', shape=[1, 2, 2, 5])
conv_filter = variable_scope.get_variable(
name='filter', shape=[5, 5, 5, 5])
for layer_number in range(3):
with variable_scope.variable_scope('layer_{}'.format(layer_number)):
after_conv = nn.conv2d(current_activation, conv_filter, [1, 1, 1, 1],
'SAME')
current_activation = 2. * after_conv
current_activation.op._set_attr(
'_recompute_hint',
# The value of the attribute does not matter; just that the key
# exists in the op's attributes.
attr_value_pb2.AttrValue(i=1))
current_activation += 5.
current_activation.op._set_attr(
'_recompute_hint', attr_value_pb2.AttrValue(i=0))
current_activation = nn.relu(current_activation)
current_activation.op._set_attr(
'_recompute_hint', attr_value_pb2.AttrValue(i=1))
loss = math_ops.reduce_mean(current_activation)
optimizer = train.AdamOptimizer(0.001)
train_op = optimizer.minimize(loss)
init_op = variables.global_variables_initializer()
return graph, init_op, train_op
示例5: GetParams
def GetParams(self):
"""Single vgg layer test in TF-TRT conversion."""
dtype = dtypes.float32
input_name = "input"
input_dims = [5, 8, 8, 2]
output_name = "output"
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
x, _, _ = nn_impl.fused_batch_norm(
x, [1.0, 1.0], [0.0, 0.0],
mean=[0.5, 0.5],
variance=[1.0, 1.0],
is_training=False)
e = constant_op.constant(
np.random.randn(1, 1, 2, 6), name="weights", dtype=dtype)
conv = nn.conv2d(
input=x, filter=e, strides=[1, 2, 2, 1], padding="SAME", name="conv")
b = constant_op.constant(np.random.randn(6), name="bias", dtype=dtype)
t = nn.bias_add(conv, b, name="biasAdd")
relu = nn.relu(t, "relu")
idty = array_ops.identity(relu, "ID")
v = nn_ops.max_pool(
idty, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
array_ops.squeeze(v, name=output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
output_names=[output_name],
expected_output_dims=[(5, 2, 2, 6)])
示例6: GetParams
def GetParams(self):
dtype = dtypes.float32
input_name = "input"
input_dims = [[[1, 10, 10, 2]], [[2, 10, 10, 2]], [[4, 10, 10, 2]],
[[2, 10, 10, 2]]]
expected_output_dims = [[[1, 10, 10, 1]], [[2, 10, 10, 1]], [[4, 10, 10,
1]],
[[2, 10, 10, 1]]]
output_name = "output"
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(
dtype=dtype, shape=[None, 10, 10, 2], name=input_name)
conv_filter = constant_op.constant(
np.random.randn(3, 3, 2, 1), dtype=dtypes.float32)
x = nn.conv2d(
input=x,
filter=conv_filter,
strides=[1, 1, 1, 1],
padding="SAME",
name="conv")
bias = constant_op.constant(
np.random.randn(1, 10, 10, 1), dtype=dtypes.float32)
x = math_ops.add(x, bias)
x = nn.relu(x)
x = array_ops.identity(x, name="output")
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=input_dims,
output_names=[output_name],
expected_output_dims=expected_output_dims)
示例7: get_simple_graph_def
def get_simple_graph_def(self):
"""Create a simple graph and return its graph_def."""
g = ops.Graph()
with g.as_default():
a = aops.placeholder(
dtype=dtypes.float32, shape=(None, 24, 24, 2), name="input")
e = cop.constant(
[[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
name="weights",
dtype=dtypes.float32)
conv = nn.conv2d(
input=a,
filter=e,
strides=[1, 2, 2, 1],
padding="SAME",
name="conv")
b = cop.constant(
[4., 1.5, 2., 3., 5., 7.], name="bias", dtype=dtypes.float32)
t = nn.bias_add(conv, b, name="biasAdd")
relu = nn.relu(t, "relu")
idty = aops.identity(relu, "ID")
v = nn_ops.max_pool(
idty, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
aops.squeeze(v, name="output")
return g.as_graph_def()
示例8: GetParams
def GetParams(self):
"""Neighboring node wiring tests in TF-TRT conversion."""
dtype = dtypes.float32
input_name = "input"
input_dims = [2, 3, 7, 5]
output_name = "output"
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
e = constant_op.constant(
np.random.normal(.3, 0.05, [3, 2, 3, 4]), name="weights", dtype=dtype)
conv = nn.conv2d(
input=x,
filter=e,
data_format="NCHW",
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
b = constant_op.constant(
np.random.normal(1.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
t = math_ops.mul(conv, b, name="mul")
e = self.trt_incompatible_op(conv, name="incompatible")
t = math_ops.sub(t, e, name="sub")
array_ops.squeeze(t, name=output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
output_names=[output_name],
expected_output_dims=[(2, 4, 5, 4)])
示例9: GetParams
def GetParams(self):
"""Neighboring node wiring tests in TF-TRT conversion."""
dtype = dtypes.float32
input_name = "input"
input_dims = [2, 3, 7, 5]
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
e = constant_op.constant(
np.random.normal(.3, 0.05, [3, 2, 3, 4]), name="weights", dtype=dtype)
conv = nn.conv2d(
input=x,
filter=e,
data_format="NCHW",
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
b = constant_op.constant(
np.random.normal(1.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
t = conv * b
e = gen_math_ops.tan(conv)
t = t - e
array_ops.squeeze(t, name=self.output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
num_expected_engines=2,
expected_output_dims=(2, 4, 5, 4),
allclose_atol=1.e-03,
allclose_rtol=1.e-03)
示例10: GetSingleEngineGraphDef
def GetSingleEngineGraphDef(dtype=dtypes.float32):
"""Create a graph containing single segment."""
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(
dtype=dtype, shape=[None] + INPUT_DIMS[1:], name=INPUT_NAME)
with g.device("/GPU:0"):
conv_filter = constant_op.constant(
[[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
name="weights",
dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=conv_filter,
strides=[1, 2, 2, 1],
padding="SAME",
name="conv")
bias = constant_op.constant(
[4., 1.5, 2., 3., 5., 7.], name="bias", dtype=dtype)
added = nn.bias_add(conv, bias, name="bias_add")
relu = nn.relu(added, "relu")
identity = array_ops.identity(relu, "identity")
pool = nn_ops.max_pool(
identity, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
array_ops.squeeze(pool, name=OUTPUT_NAME)
return g.as_graph_def()
示例11: GetParams
def GetParams(self):
# TODO(laigd): we should test the following cases:
# - batch size is not changed, other dims are changing
# - batch size is decreasing, other dims are identical
# - batch size is decreasing, other dims are changing
# - batch size is increasing, other dims are identical
# - batch size is increasing, other dims are changing
input_dims = [[[1, 5, 5, 1]], [[10, 5, 5, 1]], [[3, 5, 5, 1]],
[[1, 5, 5, 1]], [[1, 3, 1, 1]], [[2, 9, 9, 1]],
[[1, 224, 224, 1]], [[1, 128, 224, 1]]]
expected_output_dims = input_dims
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(
shape=(None, None, None, 1), dtype=dtypes.float32, name="input")
conv_filter1 = constant_op.constant(
np.ones([3, 3, 1, 8]), name="weights1", dtype=dtypes.float32)
bias1 = constant_op.constant(np.random.randn(8), dtype=dtypes.float32)
x = nn.conv2d(
input=x,
filter=conv_filter1,
strides=[1, 1, 1, 1],
padding="SAME",
name="conv")
x = nn.bias_add(x, bias1)
x = nn.relu(x)
conv_filter2 = constant_op.constant(
np.ones([3, 3, 8, 1]), name="weights2", dtype=dtypes.float32)
bias2 = constant_op.constant(np.random.randn(1), dtype=dtypes.float32)
x = nn.conv2d(
input=x,
filter=conv_filter2,
strides=[1, 1, 1, 1],
padding="SAME",
name="conv")
x = nn.bias_add(x, bias2)
x = array_ops.identity(x, name="output")
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=["input"],
input_dims=input_dims,
output_names=["output"],
expected_output_dims=expected_output_dims)
示例12: GetParams
def GetParams(self):
"""Create a graph containing multiple segment."""
# TODO(aaroey): test graph with different dtypes.
dtype = dtypes.float32
input_name = "input"
input_dims = [100, 24, 24, 2]
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(
dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
with g.device("/GPU:0"):
conv_filter = constant_op.constant(
[[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
name="weights",
dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=conv_filter,
strides=[1, 2, 2, 1],
padding="SAME",
name="conv")
c1 = constant_op.constant(
np.random.randn(input_dims[0], 12, 12, 6), dtype=dtype, name="c1")
p = math_ops.mul(conv, c1, name="mul")
c2 = constant_op.constant(
np.random.randn(input_dims[0], 12, 12, 6), dtype=dtype, name="c2")
q = math_ops.div(conv, c2, name="div")
edge = self.trt_incompatible_op(q, name="incompatible")
edge = math_ops.div(edge, edge, name="div1")
r = math_ops.add(edge, edge, name="add")
p = math_ops.sub(p, edge, name="sub")
q = math_ops.mul(q, edge, name="mul1")
s = math_ops.add(p, q, name="add1")
s = math_ops.sub(s, r, name="sub1")
array_ops.squeeze(s, name=self.output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
# TODO(aaroey): LayoutOptimizer adds additional nodes to the graph which
# breaks the connection check, fix it.
# - my_trt_op_0 should have ["mul", "sub", "div1", "mul1", "add1",
# "add", "sub1"];
# - my_trt_op_1 should have ["weights","conv", "div"]
expected_engines=["my_trt_op_0", "my_trt_op_1"],
expected_output_dims=(100, 12, 12, 6),
allclose_atol=1.e-03,
allclose_rtol=1.e-03)
示例13: GetParams
def GetParams(self):
"""Test for multi connection neighboring nodes wiring tests in TF-TRT."""
dtype = dtypes.float32
input_name = "input"
input_dims = [2, 3, 7, 5]
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
e = constant_op.constant(
np.random.normal(.05, .005, [3, 2, 3, 4]),
name="weights",
dtype=dtype)
conv = nn.conv2d(
input=x,
filter=e,
data_format="NCHW",
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
b = constant_op.constant(
np.random.normal(2.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
t = conv + b
b = constant_op.constant(
np.random.normal(5.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
q = conv - b
edge = math_ops.sigmoid(q)
b = constant_op.constant(
np.random.normal(5.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
d = b + conv
edge3 = math_ops.sigmoid(d)
edge1 = gen_math_ops.tan(conv)
t = t - edge1
q = q + edge
t = t + q
t = t + d
t = t - edge3
array_ops.squeeze(t, name=self.output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
expected_engines=["my_trt_op_0", "my_trt_op_1"],
expected_output_dims=(2, 4, 5, 4),
allclose_atol=1.e-03,
allclose_rtol=1.e-03)
示例14: GetParams
def GetParams(self):
"""Test for multi connection neighboring nodes wiring tests in TF-TRT."""
dtype = dtypes.float32
input_name = "input"
input_dims = [2, 3, 7, 5]
output_name = "output"
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
e = constant_op.constant(
np.random.normal(.05, .005, [3, 2, 3, 4]),
name="weights",
dtype=dtype)
conv = nn.conv2d(
input=x,
filter=e,
data_format="NCHW",
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
b = constant_op.constant(
np.random.normal(2.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
t = conv + b
b = constant_op.constant(
np.random.normal(5.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
q = conv - b
edge = self.trt_incompatible_op(q)
b = constant_op.constant(
np.random.normal(5.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
d = b + conv
edge3 = self.trt_incompatible_op(d)
edge1 = self.trt_incompatible_op(conv)
t = t - edge1
q = q + edge
t = t + q
t = t + d
t = t - edge3
array_ops.squeeze(t, name=output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[[input_dims]],
output_names=[output_name],
expected_output_dims=[[[2, 4, 5, 4]]])
示例15: conv2d
def conv2d(tensor_in,
n_filters,
filter_shape,
strides=None,
padding='SAME',
bias=True,
activation=None,
batch_norm=False):
"""Creates 2D convolutional subgraph with bank of filters.
Uses tf.nn.conv2d under the hood.
Creates a filter bank:
[filter_shape[0], filter_shape[1], tensor_in[3], n_filters]
and applies it to the input tensor.
Args:
tensor_in: input Tensor, 4D shape:
[batch, in_height, in_width, in_depth].
n_filters: number of filters in the bank.
filter_shape: Shape of filters, a list of ints, 1-D of length 2.
strides: A list of ints, 1-D of length 4. The stride of the sliding
window for each dimension of input.
padding: A string: 'SAME' or 'VALID'. The type of padding algorthim to use.
See the [comment here]
(https://www.tensorflow.org/api_docs/python/nn.html#convolution)
bias: Boolean, if to add bias.
activation: Activation Op, optional. If provided applied on the output.
batch_norm: Whether to apply batch normalization.
Returns:
A Tensor with resulting convolution.
"""
with vs.variable_scope('convolution'):
if strides is None:
strides = [1, 1, 1, 1]
input_shape = tensor_in.get_shape()
filter_shape = list(filter_shape) + [input_shape[3], n_filters]
filters = vs.get_variable('filters', filter_shape, dtypes.float32)
output = nn.conv2d(tensor_in, filters, strides, padding)
if bias:
bias_var = vs.get_variable('bias', [1, 1, 1, n_filters], dtypes.float32)
output += bias_var
if batch_norm:
output = batch_normalize(output, convnet=True)
if activation:
output = activation(output)
return output