本文整理汇总了Python中tensorflow.python.ops.nn_ops.conv2d函数的典型用法代码示例。如果您正苦于以下问题:Python conv2d函数的具体用法?Python conv2d怎么用?Python conv2d使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了conv2d函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testFuseResizeAndConv
def testFuseResizeAndConv(self):
with self.cached_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
resize_op = image_ops.resize_bilinear(
input_op, [12, 4], align_corners=False)
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
nn_ops.conv2d(
resize_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
original_graph_def, ["output"])
with self.cached_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("Conv2D", node.op)
self.assertNotEqual("MirrorPad", node.op)
示例2: testFusePadAndConv
def testFusePadAndConv(self):
with self.cached_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
pad_op = array_ops.pad(input_op, [[0, 0], [1, 1], [2, 2], [0, 0]],
mode="REFLECT")
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
nn_ops.conv2d(
pad_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
original_graph_def, ["output"])
with self.cached_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("Conv2D", node.op)
self.assertNotEqual("ResizeBilinear", node.op)
示例3: _test_convolution
def _test_convolution(tensor_in_sizes, filter_in_sizes,
dilations, strides, padding, data_format):
""" One iteration of convolution with given shapes and attributes """
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype='float32')
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype='float32')
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
nn_ops.conv2d(in_data,
in_filter,
strides=strides,
padding=padding,
data_format=data_format)
compare_tf_with_tvm(np.reshape(data_array, tensor_in_sizes).astype('float32'),
'Placeholder:0', 'Conv2D:0')
示例4: build_graph
def build_graph(device, input_shape, filter_shape, strides, padding, num_iters):
"""builds a graph containing a sequence of conv2d operations.
Args:
device: String, the device to run on.
input_shape: Shape of the input tensor.
filter_shape: Shape of the filter tensor.
strides: A list of ints. 1-D of length 4. The stride of sliding
window for each dimension of input.
padding: A string from: "SAME", "VALID". The type of padding
algorithm to use.
num_iters: number of iterations to run conv2d.
Returns:
An array of tensors to run()
"""
with ops.device("/%s:0" % device):
inp = variables.Variable(random_ops.truncated_normal(input_shape))
filt = variables.Variable(random_ops.truncated_normal(filter_shape))
outputs = []
conv2d_op = nn_ops.conv2d(inp, filt, strides, padding, data_format="NHWC")
outputs.append(conv2d_op)
for _ in range(1, num_iters):
with ops.control_dependencies([conv2d_op]):
conv2d_op = nn_ops.conv2d(
inp, filt, strides, padding, data_format="NHWC")
outputs.append(conv2d_op)
return control_flow_ops.group(*outputs)
示例5: testAtrousSequence
def testAtrousSequence(self):
"""Tests optimization of sequence of atrous convolutions.
Verifies that a sequence of `atrous_conv2d` operations with identical `rate`
parameters, 'SAME' `padding`, and `filters` with odd heights/ widths:
net = atrous_conv2d(net, filters1, rate, padding="SAME")
net = atrous_conv2d(net, filters2, rate, padding="SAME")
...
net = atrous_conv2d(net, filtersK, rate, padding="SAME")
is equivalent to:
pad = ... # padding so that the input dims are multiples of rate
net = space_to_batch(net, paddings=pad, block_size=rate)
net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME")
net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME")
...
net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME")
net = batch_to_space(net, crops=pad, block_size=rate)
"""
padding = "SAME" # The padding needs to be "SAME"
np.random.seed(1) # Make it reproducible.
with self.session(use_gpu=True):
# Input: [batch, height, width, input_depth]
for height in range(15, 17):
for width in range(15, 17):
x_shape = [3, height, width, 2]
x = np.random.random_sample(x_shape).astype(np.float32)
for kernel in [1, 3, 5]: # The kernel size needs to be odd.
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
f_shape = [kernel, kernel, 2, 2]
f = 1e-2 * np.random.random_sample(f_shape).astype(np.float32)
for rate in range(2, 4):
# y1: three atrous_conv2d in a row.
y1 = nn_ops.atrous_conv2d(x, f, rate, padding=padding)
y1 = nn_ops.atrous_conv2d(y1, f, rate, padding=padding)
y1 = nn_ops.atrous_conv2d(y1, f, rate, padding=padding)
# y2: space_to_batch, three conv2d in a row, batch_to_space
pad_bottom = 0 if height % rate == 0 else rate - height % rate
pad_right = 0 if width % rate == 0 else rate - width % rate
pad = [[0, pad_bottom], [0, pad_right]]
y2 = array_ops.space_to_batch(x, paddings=pad, block_size=rate)
y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
y2 = array_ops.batch_to_space(y2, crops=pad, block_size=rate)
self.assertAllClose(
y1.eval(), self.evaluate(y2), rtol=1e-2, atol=1e-2)
示例6: _BuildSmallModel
def _BuildSmallModel(self):
image = array_ops.zeros([2, 6, 6, 3])
kernel = variable_scope.get_variable(
'DW', [3, 3, 3, 6],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
x = nn_ops.conv2d(image, kernel, [1, 2, 2, 1], padding='SAME')
kernel = variable_scope.get_variable(
'DW2', [2, 2, 6, 12],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
x = nn_ops.conv2d(x, kernel, [1, 2, 2, 1], padding='SAME')
return x
示例7: _tf_enc_attention_decoder
def _tf_enc_attention_decoder(self, attention_states, last_enc_state, cell,
num_heads=1,
dtype=dtypes.float32, scope=None):
"""RNN decoder with attention for the sequence-to-sequence model.
Args:
return_encodings: If true, return encoder hidden states. Otherwise, return
single step decoding tensors
"""
if num_heads < 1:
raise ValueError("With less than 1 heads, use a non-attention decoder.")
if not attention_states.get_shape()[1:2].is_fully_defined():
raise ValueError("Shape[1] and [2] of attention_states must be known: %s"
% attention_states.get_shape())
with variable_scope.variable_scope(scope or "attention_decoder"):
attn_length = attention_states.get_shape()[1].value
attn_size = attention_states.get_shape()[2].value
# To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.
hidden = array_ops.reshape(
attention_states, [-1, attn_length, 1, attn_size])
hidden_features = []
v = []
attention_vec_size = attn_size # Size of query vectors for attention.
for a in xrange(num_heads):
k = variable_scope.get_variable("AttnW_%d" % a,
[1, 1, attn_size, attention_vec_size])
hidden_features.append(nn_ops.conv2d(hidden, k, [1, 1, 1, 1], "SAME")) # Hidden states multiplied with W1
v.append(variable_scope.get_variable("AttnV_%d" % a,
[attention_vec_size]))
return [last_enc_state] + [hidden] + hidden_features + v
示例8: testSmallNetwork
def testSmallNetwork(self):
image = array_ops.placeholder(dtypes.float32, shape=[1, 28, 28, 1])
label = array_ops.placeholder(dtypes.float32, shape=[1, 10])
w = variables.Variable(
random_ops.truncated_normal([5, 5, 1, 32], stddev=0.1))
b = variables.Variable(random_ops.truncated_normal([32], stddev=0.1))
conv = nn_ops.conv2d(image, w, strides=[1, 1, 1, 1], padding="SAME")
h_conv = nn_ops.relu(conv + b)
h_conv_flat = array_ops.reshape(h_conv, [1, -1])
w_fc = variables.Variable(
random_ops.truncated_normal([25088, 10], stddev=0.1))
b_fc = variables.Variable(random_ops.truncated_normal([10], stddev=0.1))
y_conv = nn_ops.softmax(math_ops.matmul(h_conv_flat, w_fc) + b_fc)
cross_entropy = math_ops.reduce_mean(-math_ops.reduce_sum(
label * math_ops.log(y_conv), reduction_indices=[1]))
_ = adam.AdamOptimizer(1e-4).minimize(cross_entropy)
mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
report = cost_analyzer.GenerateCostReport(mg)
self.assertTrue(b"MatMul" in report)
self.assertTrue(b"ApplyAdam" in report)
self.assertTrue(b"Conv2D" in report)
self.assertTrue(b"Conv2DBackpropInput" in report)
self.assertTrue(b"Conv2DBackpropFilter" in report)
self.assertTrue(b"Softmax" in report)
# Also print the report to make it easier to debug
print("{}".format(report))
示例9: testExtractPointwiseConv2dPatches
def testExtractPointwiseConv2dPatches(self):
with ops.Graph().as_default(), self.test_session() as sess:
batch_size = 10
image_height = image_width = 8
in_channels = out_channels = 3
kernel_height = kernel_width = 1
strides = [1, 1, 1, 1]
padding = 'VALID'
images = random_ops.random_uniform(
[batch_size, image_height, image_width, in_channels], seed=0)
kernel_shape = [kernel_height, kernel_width, in_channels, out_channels]
kernel = random_ops.random_uniform(kernel_shape, seed=1)
# Ensure shape matches expectation.
patches = utils.extract_pointwise_conv2d_patches(images, kernel_shape)
self.assertEqual(patches.shape.as_list(), [
batch_size, image_height, image_width, kernel_height, kernel_width,
in_channels
])
# Ensure extract...patches() + matmul() and conv2d() implementation
# give the same answer.
outputs = nn_ops.conv2d(images, kernel, strides, padding)
patches_flat = array_ops.reshape(
patches, [-1, kernel_height * kernel_width * in_channels])
kernel_flat = array_ops.reshape(kernel, [-1, out_channels])
outputs_flat = math_ops.matmul(patches_flat, kernel_flat)
outputs_, outputs_flat_ = sess.run([outputs, outputs_flat])
self.assertAllClose(outputs_.flatten(), outputs_flat_.flatten())
示例10: testGradientDilatedConv
def testGradientDilatedConv(self):
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
for padding in ["SAME", "VALID"]:
for stride in [1, 2]:
np.random.seed(1)
in_shape = [5, 8, 6, 4]
in_val = constant_op.constant(
2 * np.random.random_sample(in_shape) - 1, dtype=dtypes.float32)
filter_shape = [3, 3, 4, 6]
# Make a convolution op with the current settings,
# just to easily get the shape of the output.
conv_out = nn_ops.conv2d(
in_val,
array_ops.zeros(filter_shape),
dilations=[1, 2, 2, 1],
strides=[1, stride, stride, 1],
padding=padding)
out_backprop_shape = conv_out.get_shape().as_list()
out_backprop_val = constant_op.constant(
2 * np.random.random_sample(out_backprop_shape) - 1,
dtype=dtypes.float32)
output = nn_ops.conv2d_backprop_filter(
in_val,
filter_shape,
out_backprop_val,
dilations=[1, 2, 2, 1],
strides=[1, stride, stride, 1],
padding=padding)
err = gradient_checker.compute_gradient_error(
[in_val, out_backprop_val], [in_shape, out_backprop_shape],
output, filter_shape)
print("conv2d_backprop_filter gradient err = %g " % err)
err_tolerance = 2e-3
self.assertLess(err, err_tolerance)
示例11: _VerifyValues
def _VerifyValues(self,
input_sizes=None,
filter_sizes=None,
strides=None,
dilations=None,
padding=None,
data_format_src="NHWC",
data_format_dst="NHWC",
expected=None):
"""Tests that tf.nn.conv2d produces the expected value.
Args:
input_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
strides: Strides.
dilations: RHS dilations.
padding: Padding type.
data_format_src: Data format input is in.
data_format_dst: Data format verification will run and input is converted
to.
expected: Expected output.
"""
total_size_1 = np.prod(input_sizes)
total_size_2 = np.prod(filter_sizes)
x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(input_sizes)
x2 = np.arange(1, total_size_2 + 1, dtype=np.float32).reshape(filter_sizes)
strides = [1] + strides + [1]
if dilations is None:
dilations = [1, 1]
dilations = [1] + dilations + [1]
# Convert between data formats.
expected = test_utils.ConvertBetweenDataFormats(expected, data_format_src,
data_format_dst)
x1 = test_utils.ConvertBetweenDataFormats(x1, data_format_src,
data_format_dst)
input_sizes = test_utils.PermuteDimsBetweenDataFormats(
input_sizes, data_format_src, data_format_dst)
strides = test_utils.PermuteDimsBetweenDataFormats(strides, data_format_src,
data_format_dst)
dilations = test_utils.PermuteDimsBetweenDataFormats(
dilations, data_format_src, data_format_dst)
with self.test_session() as sess:
t1 = array_ops.placeholder(dtypes.float32, shape=input_sizes)
t2 = array_ops.placeholder(dtypes.float32, shape=filter_sizes)
with self.test_scope():
out = nn_ops.conv2d(
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format_dst,
dilations=dilations)
value = sess.run(out, {t1: x1, t2: x2})
self.assertAllClose(expected, value, 1e-3)
示例12: _VerifyValues
def _VerifyValues(self, input_sizes, filter_sizes, stride, padding, expected):
"""Tests that tf.nn.conv2d produces the expected value.
Args:
input_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
stride: Stride.
padding: Padding type.
expected: Expected output.
"""
total_size_1 = np.prod(input_sizes)
total_size_2 = np.prod(filter_sizes)
x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(input_sizes)
x2 = np.arange(1, total_size_2 + 1, dtype=np.float32).reshape(filter_sizes)
strides = [1, stride, stride, 1]
with self.test_session() as sess:
with self.test_scope():
t1 = array_ops.placeholder(dtypes.float32, shape=input_sizes)
t2 = array_ops.placeholder(dtypes.float32, shape=filter_sizes)
out = nn_ops.conv2d(
t1, t2, strides=strides, padding=padding, data_format="NHWC")
value = sess.run(out, {t1: x1, t2: x2})
self.assertArrayNear(expected, np.ravel(value), 1e-3)
示例13: SimulateFusedConv2dBiasActivationInt8
def SimulateFusedConv2dBiasActivationInt8(conv_input_scale, conv_input, kernel,
padding, strides, side_input_scale,
side_input, biases):
"""Simulates the int8 fused 2-D convolution op using separate float ops.
The arguments and return values have the same format, meanings and
restrictions as the actual op.
Args:
conv_input_scale: A scalar 'float'.
conv_input: A `Tensor` of type `qint8` in NCHW_VECT_C layout.
kernel: A `Tensor` of type `qint8` in OIHW_VECT_I layout.
padding: A `string` from: `"SAME", "VALID"`.
strides: A list of `ints`.
side_input_scale: A scalar 'float'.
side_input: A `Tensor` of type `qint8` in NCHW_VECT_C layout.
biases: A `Tensor` of type `float32` in NCHW layout.
Returns:
A `Tensor` of type `qint8` in NCHW_VECT_C layout.
"""
conv_result = nn_ops.conv2d(
NchwVectCToNchw(gen_array_ops.dequantize(conv_input, -128, 127)),
OihwVectIToHwio(gen_array_ops.dequantize(kernel, -128, 127)),
strides=strides,
padding=padding,
data_format="NCHW") * conv_input_scale
conv_and_side_inputs = conv_result + side_input_scale * NchwVectCToNchw(
gen_array_ops.dequantize(side_input, -128, 127))
logit = nn_ops.bias_add(conv_and_side_inputs, biases, data_format="NCHW")
result, _, _ = gen_array_ops.quantize_v2(
NchwToNchwVectC(nn_ops.relu(logit)), -128, 127, dtypes.qint8)
return result
示例14: _strict_conv1d
def _strict_conv1d(x, h):
"""Return x * h for rank 1 tensors x and h."""
with ops.op_scope([x, h], 'strict_conv1d'):
x = array_ops.reshape(x, (1, -1, 1, 1))
h = array_ops.reshape(h, (-1, 1, 1, 1))
result = nn_ops.conv2d(x, h, [1, 1, 1, 1], 'SAME')
return array_ops.reshape(result, [-1])
示例15: ReferenceDepthwiseConv2D
def ReferenceDepthwiseConv2D(input_tensor, filter_tensor, strides, padding,
data_format=None):
# Reference implementation of depthwise convolution that uses regular
# convolution.
convs = []
in_channels = filter_tensor.shape[2]
# Use a custom implementation of depthwise conv2d using slicing.
for channel in xrange(in_channels):
# Slice the input along channel
if data_format == "NCHW":
input_slice = input_tensor[:, channel:channel+1, :, :]
else:
input_slice = input_tensor[:, :, :, channel:channel+1]
# Slice the filters. Filters are H, W, InC, DepthMultiplier
filter_slice = filter_tensor[:, :, channel:channel+1, :]
# Do conv
convs.append(nn_ops.conv2d(input_slice, filter_slice,
strides, padding,
data_format=data_format,
name="depthwise_slice_%d" % channel))
# Concat along dimension.
if data_format == "NCHW":
return array_ops.concat(convs, 1)
else:
return array_ops.concat(convs, 3)