本文整理汇总了Python中tensorflow.python.ops.gen_nn_ops._avg_pool方法的典型用法代码示例。如果您正苦于以下问题:Python gen_nn_ops._avg_pool方法的具体用法?Python gen_nn_ops._avg_pool怎么用?Python gen_nn_ops._avg_pool使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.gen_nn_ops
的用法示例。
在下文中一共展示了gen_nn_ops._avg_pool方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: avg_pool
# 需要导入模块: from tensorflow.python.ops import gen_nn_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_nn_ops import _avg_pool [as 别名]
def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: A list of ints that has length >= 4.
The size of the window for each dimension of the input tensor.
strides: A list of ints that has length >= 4.
The stride of the sliding window for each dimension of the
input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the @{tf.nn.convolution$comment here}
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool", [value]) as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops._avg_pool(value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
示例2: _AvgPoolGradGrad
# 需要导入模块: from tensorflow.python.ops import gen_nn_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_nn_ops import _avg_pool [as 别名]
def _AvgPoolGradGrad(op, grad):
return (array_ops.stop_gradient(op.inputs[0]), gen_nn_ops._avg_pool(
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format")))
示例3: avg_pool
# 需要导入模块: from tensorflow.python.ops import gen_nn_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_nn_ops import _avg_pool [as 别名]
def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: A list of ints that has length >= 4.
The size of the window for each dimension of the input tensor.
strides: A list of ints that has length >= 4.
The stride of the sliding window for each dimension of the
input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the [comment here](https://www.tensorflow.org/api_docs/python/nn.html#convolution)
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool", [value]) as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops._avg_pool(value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
示例4: avg_pool
# 需要导入模块: from tensorflow.python.ops import gen_nn_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_nn_ops import _avg_pool [as 别名]
def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: A 1-D int Tensor of 4 elements.
The size of the window for each dimension of the input tensor.
strides: A 1-D int Tensor of 4 elements
The stride of the sliding window for each dimension of the
input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the @{tf.nn.convolution$comment here}
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool", [value]) as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops._avg_pool(value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:32,代码来源:nn_ops.py
示例5: _rmac
# 需要导入模块: from tensorflow.python.ops import gen_nn_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_nn_ops import _avg_pool [as 别名]
def _rmac(feat_map, rmac_step, reduce_method, deploy):
"""Extract regional vector from the raw feature map.
The overlap ratio of neighboring regions is 0.5
Args:
feat_map: input raw feature map.
step: the length of the output feature map.
e.g., step = 3 gives 3 x 3 = 9 regional vectors.
Returns:
all_rvec: regional vectors.
"""
all_rvec = None
fully_defined = False
if feat_map.get_shape().is_fully_defined() or deploy:
batch_size, feat_h, feat_w, feat_dim = feat_map.get_shape().as_list()
fully_defined = True
else:
batch_size, feat_h, feat_w, feat_dim = tf.unstack(tf.shape(feat_map))
for step in rmac_step:
if step > 1:
k_h = (feat_h / (step + 1)) * 2
s_h = (feat_h - k_h) / (step - 1)
k_w = (feat_w / (step + 1)) * 2
s_w = (feat_w - k_w) / (step - 1)
else:
# reduce all.
k_h = feat_h
s_h = 1
k_w = feat_w
s_w = 1
if fully_defined and (k_h < 1 or k_w < 1):
# skip the step if the kernal size is smaller that 1.
continue
if reduce_method == 'AVG':
rvec = gen_nn_ops._avg_pool(feat_map, [1, k_h, k_w, 1], [1, s_h, s_w, 1], 'VALID')
elif reduce_method == 'L2':
rvec = tf.sqrt(gen_nn_ops._avg_pool(tf.square(feat_map), [
- 1, k_h, k_w, 1], [1, s_h, s_w, 1], 'VALID'))
elif reduce_method == 'MAX':
#rvec = gen_nn_ops._max_pool_v2(feat_map, [1, k_h, k_w, 1], [1, s_h, s_w, 1], 'VALID')
rvec = tf.nn.max_pool(feat_map, [1, k_h, k_w, 1], [1, s_h, s_w, 1], 'VALID')
else:
print(Notify.FAIL, 'Known reduce method:', reduce_method, Notify.ENDC)
rvec = tf.reshape(rvec, [tf.shape(feat_map)[0], -1, feat_dim])
rvec = tf.transpose(rvec, [0, 2, 1])
if all_rvec is None:
all_rvec = rvec
else:
all_rvec = tf.concat([all_rvec, rvec], axis=2)
return all_rvec