本文整理汇总了Python中tensorflow.contrib.layers.python.layers.utils.two_element_tuple方法的典型用法代码示例。如果您正苦于以下问题:Python utils.two_element_tuple方法的具体用法?Python utils.two_element_tuple怎么用?Python utils.two_element_tuple使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.layers.python.layers.utils
的用法示例。
在下文中一共展示了utils.two_element_tuple方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: preact_conv2d
# 需要导入模块: from tensorflow.contrib.layers.python.layers import utils [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.utils import two_element_tuple [as 别名]
def preact_conv2d(
inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a 2D convolution preceded by batch normalization and activation.
"""
with variable_scope.variable_scope(scope, 'Conv', values=[inputs], reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
dtype = inputs.dtype.base_dtype
if normalizer_fn:
normalizer_params = normalizer_params or {}
inputs = normalizer_fn(inputs, activation_fn=activation_fn, **normalizer_params)
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
weights_shape = [kernel_h, kernel_w, num_filters_in, num_outputs]
weights_collections = utils.get_variable_collections(variables_collections, 'weights')
weights = variables.model_variable('weights',
shape=weights_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
collections=weights_collections,
trainable=trainable)
outputs = nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1], padding=padding)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
示例2: avg_pool2d
# 需要导入模块: from tensorflow.contrib.layers.python.layers import utils [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.utils import two_element_tuple [as 别名]
def avg_pool2d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NHWC,
outputs_collections=None,
scope=None):
"""Adds a 2D average pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 4-D tensor of shape `[batch_size, height, width, channels]` if
`data_format` is `NHWC`, and `[batch_size, channels, height, width]` if
`data_format` is `NCHW`.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: A list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: if `data_format` is neither `NHWC` nor `NCHW`.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with ops.name_scope(scope, 'AvgPool2D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
if data_format == DATA_FORMAT_NHWC:
ksize = [1, kernel_h, kernel_w, 1]
strides = [1, stride_h, stride_w, 1]
else:
ksize = [1, 1, kernel_h, kernel_w]
strides = [1, 1, stride_h, stride_w]
outputs = nn.avg_pool(inputs,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
示例3: max_pool2d
# 需要导入模块: from tensorflow.contrib.layers.python.layers import utils [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.utils import two_element_tuple [as 别名]
def max_pool2d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NHWC,
outputs_collections=None,
scope=None):
"""Adds a 2D Max Pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 4-D tensor of shape `[batch_size, height, width, channels]` if
`data_format` is `NHWC`, and `[batch_size, channels, height, width]` if
`data_format` is `NCHW`.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: A list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: if `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If 'kernel_size' is not a 2-D list
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with ops.name_scope(scope, 'MaxPool2D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
if data_format == DATA_FORMAT_NHWC:
ksize = [1, kernel_h, kernel_w, 1]
strides = [1, stride_h, stride_w, 1]
else:
ksize = [1, 1, kernel_h, kernel_w]
strides = [1, 1, stride_h, stride_w]
outputs = nn.max_pool(inputs,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
示例4: conv2d
# 需要导入模块: from tensorflow.contrib.layers.python.layers import utils [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.utils import two_element_tuple [as 别名]
def conv2d(inputs, num_outputs, kernel_size, *args, **kwargs):
"""A wrapper/substitute for conv2d that counts the flops.
This counts the number of floating-point operations (flops) for a conv2d
layer, including one with a "mask." The optional keyword argument
`output_mask` specifies which of the position in the output response map need
actually be calculated, the rest can be discarded and are not counted in the
result.
Since this is a wrapper around slim.conv2d, see that function for details on
the inputs/outputs.
Args:
inputs: The input response map to the convolution.
num_outputs: The number of output channels for the convolution.
kernel_size: Spatial size of the convolution kernel.
*args: Additional position arguments forwarded to slim.conv2d.
**kwargs: Additional keyword args forwarded to slim.conv2d.
Returns:
outputs: The result of the convolution from slim.conv2d.
flops: The operation count as a scalar integer tensor.
"""
output_mask = kwargs.pop('output_mask', None)
outputs = slim.conv2d(inputs, num_outputs, kernel_size, *args, **kwargs)
if inputs.get_shape().is_fully_defined():
inputs_shape = inputs.get_shape().as_list()
outputs_shape = outputs.get_shape().as_list()
else:
inputs_shape = tf.to_int64(tf.shape(inputs))
outputs_shape = tf.to_int64(tf.shape(outputs))
batch_size = outputs_shape[0]
num_filters_in = inputs_shape[3]
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
if output_mask is None:
num_spatial_positions = tf.fill(
# tf.fill does not support int64 dims :-|
dims=tf.to_int32(tf.stack([batch_size])),
value=outputs_shape[1] * outputs_shape[2])
else:
num_spatial_positions = tf.reduce_sum(output_mask, [1, 2])
num_spatial_positions = tf.to_int64(num_spatial_positions)
num_output_positions = num_spatial_positions * num_outputs
flops = 2 * num_output_positions * (kernel_h * kernel_w * num_filters_in)
# The numbers are slightly different than TensorFlow graph_metrics since we
# ignore biases. We do not try to mimic graph_metrics because it is
# inconsistent in the treatment of biases (batch_norm makes biases "free").
return outputs, flops