本文整理汇总了Python中tensorflow.python.ops.gen_nn_ops._max_pool_grad方法的典型用法代码示例。如果您正苦于以下问题:Python gen_nn_ops._max_pool_grad方法的具体用法?Python gen_nn_ops._max_pool_grad怎么用?Python gen_nn_ops._max_pool_grad使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.gen_nn_ops
的用法示例。
在下文中一共展示了gen_nn_ops._max_pool_grad方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _MaxPoolGrad
# 需要导入模块: from tensorflow.python.ops import gen_nn_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_nn_ops import _max_pool_grad [as 别名]
def _MaxPoolGrad(self, orig_input, orig_output, grad, window_rows,
window_cols, row_stride, col_stride, padding):
"""Max Pooling Gradient.
Args:
orig_input: A float Tensor. The original input tensor.
orig_output: A float Tensor. The original output tensor.
grad: A float Tensor.
The 4D (batch x rows x cols x depth) output backprop.
window_rows: integer. Kernel size along rows dimension.
window_cols: integer. Kernel size along cols dimension.
row_stride: integer. Stride along rows dimension
col_stride: integer. Stride along cols dimension
padding: PoolingOpDef.Padding. Padding type.
Returns:
A Tensor.
"""
return gen_nn_ops._max_pool_grad(
orig_input, orig_output, grad,
[1, window_rows, window_cols, 1], [1, row_stride, col_stride, 1],
padding)
示例2: _MaxPoolGradGrad
# 需要导入模块: from tensorflow.python.ops import gen_nn_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_nn_ops import _max_pool_grad [as 别名]
def _MaxPoolGradGrad(op, grad):
gradient = gen_nn_ops._max_pool_grad(op.inputs[0], op.outputs[0],
grad, op.get_attr("ksize"), op.get_attr("strides"),
padding=op.get_attr("padding"), data_format=op.get_attr("data_format"))
gradgrad1 = array_ops.zeros(shape = array_ops.shape(op.inputs[1]), dtype=gradient.dtype)
gradgrad2 = array_ops.zeros(shape = array_ops.shape(op.inputs[2]), dtype=gradient.dtype)
return (gradient, gradgrad1, gradgrad2)
示例3: _MaxPoolGrad
# 需要导入模块: from tensorflow.python.ops import gen_nn_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_nn_ops import _max_pool_grad [as 别名]
def _MaxPoolGrad(op, grad):
return gen_nn_ops._max_pool_grad(op.inputs[0],
op.outputs[0],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
示例4: _MaxPoolGradGradGrad
# 需要导入模块: from tensorflow.python.ops import gen_nn_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_nn_ops import _max_pool_grad [as 别名]
def _MaxPoolGradGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]),
dtype=op.inputs[0].dtype), array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops._max_pool_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
示例5: _CompareMaxPoolingBk
# 需要导入模块: from tensorflow.python.ops import gen_nn_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_nn_ops import _max_pool_grad [as 别名]
def _CompareMaxPoolingBk(self, input_shape, output_shape, ksize, strides,
padding):
for dtype in np.float32, np.float16:
# Generate numbers in a narrow range, so that there are many duplicates
# in the input.
tensor_input = np.random.random_integers(0, 3, input_shape).astype(dtype)
tensor_output = np.random.rand(*output_shape).astype(dtype)
with self.test_session(use_gpu=True):
t = tf.constant(tensor_input, shape=input_shape)
_, argmax_op = tf.nn.max_pool_with_argmax(t, ksize, strides, padding)
argmax = argmax_op.eval()
grad_in = tf.constant(tensor_output, shape=output_shape)
out_op = gen_nn_ops._max_pool_grad_with_argmax(t, grad_in, argmax,
ksize, strides, padding)
gpu_val = out_op.eval()
self.assertShapeEqual(gpu_val, out_op)
with self.test_session(use_gpu=False):
t = tf.constant(tensor_input, shape=input_shape)
out_op = tf.nn.max_pool(t, ksize, strides, padding)
orig_out = out_op.eval()
grad_in = tf.constant(tensor_output, shape=output_shape)
out_op = gen_nn_ops._max_pool_grad(t, orig_out, grad_in, ksize, strides,
padding)
cpu_val = out_op.eval()
self.assertShapeEqual(cpu_val, out_op)
if dtype == np.float16:
# The CPU version accumulates its gradient on fp16, so it's less
# accurate than the GPU version that does the accumulation on fp32
self.assertAllClose(cpu_val, gpu_val, rtol=0.01, atol=0.01)
else:
self.assertAllClose(cpu_val, gpu_val)
示例6: testDirectNotUseOverlapping
# 需要导入模块: from tensorflow.python.ops import gen_nn_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_nn_ops import _max_pool_grad [as 别名]
def testDirectNotUseOverlapping(self):
for num_batches in [1, 3]:
for row_window_size in [2, 5]:
for col_window_size in [2, 4]:
num_rows = row_window_size * 5
num_cols = col_window_size * 7
for num_channels in [1, 2]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
with self.test_session() as _:
input_tensor = tf.constant(self._GenerateUniqueRandomInputTensor(
input_shape))
window_size = [1, row_window_size, col_window_size, 1]
stride_size = [1, row_window_size, col_window_size, 1]
padding = "VALID"
output_tensor = tf.nn.max_pool(input_tensor, window_size,
stride_size, padding)
output_data = output_tensor.eval()
output_backprop = self._PRNG.randint(100, size=output_data.shape)
input_backprop_tensor = gen_nn_ops._max_pool_grad(input_tensor,
output_tensor,
output_backprop,
window_size,
stride_size,
padding)
input_backprop = input_backprop_tensor.eval()
row_seq = list(range(0, num_rows + 1, row_window_size))
col_seq = list(range(0, num_cols + 1, col_window_size))
fmp_input_backprop_tensor = gen_nn_ops._fractional_max_pool_grad(
input_tensor,
output_tensor,
output_backprop,
row_seq,
col_seq,
overlapping=False)
fmp_input_backprop = fmp_input_backprop_tensor.eval()
self.assertShapeEqual(input_backprop, fmp_input_backprop_tensor)
self.assertAllClose(input_backprop, fmp_input_backprop)
示例7: testDirectUseOverlapping
# 需要导入模块: from tensorflow.python.ops import gen_nn_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_nn_ops import _max_pool_grad [as 别名]
def testDirectUseOverlapping(self):
for num_batches in [1, 3]:
for row_window_size in [2, 5]:
for col_window_size in [2, 4]:
num_rows = (row_window_size - 1) * 5 + 1
num_cols = (col_window_size - 1) * 7 + 1
for num_channels in [1, 2]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
with self.test_session() as _:
input_tensor = tf.constant(self._GenerateUniqueRandomInputTensor(
input_shape))
window_size = [1, row_window_size, col_window_size, 1]
stride_size = [1, row_window_size - 1, col_window_size - 1, 1]
padding = "VALID"
output_tensor = tf.nn.max_pool(input_tensor, window_size,
stride_size, padding)
output_data = output_tensor.eval()
output_backprop = self._PRNG.randint(100, size=output_data.shape)
input_backprop_tensor = gen_nn_ops._max_pool_grad(input_tensor,
output_tensor,
output_backprop,
window_size,
stride_size,
padding)
input_backprop = input_backprop_tensor.eval()
row_seq = list(range(0, num_rows, row_window_size - 1))
col_seq = list(range(0, num_cols, col_window_size - 1))
row_seq[-1] += 1
col_seq[-1] += 1
fmp_input_backprop_tensor = gen_nn_ops._fractional_max_pool_grad(
input_tensor,
output_tensor,
output_backprop,
row_seq,
col_seq,
overlapping=True)
fmp_input_backprop = fmp_input_backprop_tensor.eval()
self.assertShapeEqual(input_backprop, fmp_input_backprop_tensor)
self.assertAllClose(input_backprop, fmp_input_backprop)
示例8: MaxPoolGrad_FwGrad
# 需要导入模块: from tensorflow.python.ops import gen_nn_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_nn_ops import _max_pool_grad [as 别名]
def MaxPoolGrad_FwGrad(op,
dx,
dy,
dz,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
data_format="NHWC",
padding="SAME",
_op_table=None,
_grad_table=None):
"""Forward gradient operator for the backward gradient of max pooling.
Args:
op: MaxPoolGrad operator.
dx: Forward gradient to the input of MaxPool.
dy: Forward gradient to the output of MaxPool.
dz: Forward gradient to the backward gradient of the output of MaxPool.
"""
if dz is None:
return None
return gen_nn_ops._max_pool_grad(
op.inputs[0],
op.inputs[1],
dz,
ksize,
strides,
padding,
data_format=data_format)