本文整理汇总了Python中tensorflow.python.ops.nn.fused_batch_norm函数的典型用法代码示例。如果您正苦于以下问题:Python fused_batch_norm函数的具体用法?Python fused_batch_norm怎么用?Python fused_batch_norm使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了fused_batch_norm函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testSplitWithNonConstAxis
def testSplitWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dim = array_ops.placeholder(dtype='int32')
split = array_ops.split(conv, 2, axis=dim)
scale = constant_op.constant(0.1, shape=[32])
offset = constant_op.constant(0.3, shape=[32])
bn0 = nn.fused_batch_norm(split[0], scale, offset)
bn1 = nn.fused_batch_norm(split[1], scale, offset)
add = bn0[0] + bn1[0]
output = array_ops.identity(add)
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={dim: 3})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('add_2-0-0', nodes)
self._assert_map_nhwc_to_nchw('split-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
示例2: _fused_batch_norm_training
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
epsilon=self.epsilon,
data_format=self._data_format)
示例3: loop_fn
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
outputs = nn.fused_batch_norm(
x1,
scale,
offset,
mean=mean,
variance=variance,
epsilon=0.01,
data_format=data_format,
is_training=is_training)
outputs = list(outputs)
# We only test the first value of outputs when is_training is False.
# It looks like CPU and GPU have different outputs for batch_mean
# and batch_variance for this case.
if not is_training:
outputs[1] = constant_op.constant(0.)
outputs[2] = constant_op.constant(0.)
loss = nn.l2_loss(outputs[0])
if is_training:
gradients = g.gradient(loss, [x1, scale, offset])
else:
gradients = [constant_op.constant(0.)] * 3
return outputs + gradients
示例4: testInference
def testInference(self):
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
data_format = "NHWC"
with self.test_session() as sess, self.test_scope():
# To avoid constant folding
t_val = array_ops.placeholder(np.float32, shape=x_shape, name="x")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
offset = array_ops.placeholder(np.float32, shape=scale_shape, name="offset")
epsilon = 0.001
y_ref, mean_ref, var_ref = self._reference_training(
x_val, scale_val, offset_val, epsilon, data_format)
y, mean, variance = nn.fused_batch_norm(
t_val,
scale,
offset,
mean=mean_ref,
variance=var_ref,
epsilon=epsilon,
data_format=data_format,
is_training=False)
y_val, _, _ = sess.run(
[y, mean,
variance], {t_val: x_val,
scale: scale_val,
offset: offset_val})
self.assertAllClose(y_val, y_ref, atol=1e-3)
示例5: testBasic
def testBasic(self):
x_shape = [2, 2, 6, 2]
scale_shape = [2]
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
data_format = "NHWC"
with self.test_session() as sess, self.test_scope():
# To avoid constant folding
t_val = array_ops.placeholder(np.float32, shape=x_shape, name="x")
scale = array_ops.placeholder(np.float32, shape=[2], name="scale")
offset = array_ops.placeholder(np.float32, shape=[2], name="offset")
epsilon = 0.001
y, mean, var = nn.fused_batch_norm(
t_val,
scale,
offset,
mean=None,
variance=None,
epsilon=epsilon,
data_format=data_format,
is_training=True)
y_val, mean_val, var_val = sess.run(
[y, mean, var], {t_val: x_val,
scale: scale_val,
offset: offset_val})
y_ref, mean_ref, var_ref = self._reference_training(
x_val, scale_val, offset_val, epsilon, data_format)
self.assertAllClose(mean_val, mean_ref, atol=1e-3)
self.assertAllClose(y_val, y_ref, atol=1e-3)
self.assertAllClose(var_val, var_ref, atol=1e-3)
示例6: _testLearning
def _testLearning(self, use_gradient_checker, data_format):
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
epsilon = 0.001
data_format_src = "NHWC"
# When in training mode, fused_batchnorm applies an implicit Bessel's
# correction. So we have to use the corrected variance here, as well.
y_ref, mean_ref, _, var_ref_corr = self._reference_training(
x_val, scale_val, offset_val, epsilon, data_format_src)
with self.cached_session() as sess, self.test_scope():
# To avoid constant folding
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
y_ref_converted = test_utils.ConvertBetweenDataFormats(
y_ref, data_format_src, data_format)
t_val = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
offset = array_ops.placeholder(
np.float32, shape=scale_shape, name="offset")
y, mean, var = nn.fused_batch_norm(
t_val,
scale,
offset,
mean=None,
variance=None,
epsilon=epsilon,
data_format=data_format,
is_training=True)
# Check gradient.
if use_gradient_checker:
err = gradient_checker.compute_gradient_error(
t_val,
x_val_converted.shape,
y,
x_val_converted.shape,
extra_feed_dict={
t_val: x_val_converted,
scale: scale_val,
offset: offset_val
})
self.assertLess(err, 1e-3)
y_val, mean_val, var_val = sess.run([y, mean, var], {
t_val: x_val_converted,
scale: scale_val,
offset: offset_val
})
self.assertAllClose(mean_val, mean_ref, atol=1e-3)
self.assertAllClose(y_val, y_ref_converted, atol=1e-3)
self.assertAllClose(var_val, var_ref_corr, atol=1e-3)
示例7: _model_with_second_port
def _model_with_second_port():
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([2, 5, 5, 4], seed=0)
scale = constant_op.constant(0.1, shape=[4])
offset = constant_op.constant(0.3, shape=[4])
y, mean, _ = nn.fused_batch_norm(x, scale, offset)
mul = math_ops.add(y, mean)
output = array_ops.identity(mul)
return output
示例8: _fused_batch_norm_inference
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=self.moving_mean,
variance=self.moving_variance,
epsilon=self.epsilon,
is_training=False,
data_format=self._data_format)
示例9: _testLearning
def _testLearning(self, use_gradient_checker):
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
data_format = "NHWC"
with self.test_session() as sess, self.test_scope():
# To avoid constant folding
t_val = array_ops.placeholder(np.float32, shape=x_shape, name="x")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
offset = array_ops.placeholder(
np.float32, shape=scale_shape, name="offset")
epsilon = 0.001
y, mean, var = nn.fused_batch_norm(
t_val,
scale,
offset,
mean=None,
variance=None,
epsilon=epsilon,
data_format=data_format,
is_training=True)
# Check gradient.
if use_gradient_checker:
err = gradient_checker.compute_gradient_error(
t_val,
x_shape,
y,
x_shape,
extra_feed_dict={
t_val: x_val,
scale: scale_val,
offset: offset_val
})
self.assertLess(err, 1e-3)
y_val, mean_val, var_val = sess.run(
[y, mean, var], {t_val: x_val,
scale: scale_val,
offset: offset_val})
y_ref, mean_ref, var_ref = self._reference_training(
x_val, scale_val, offset_val, epsilon, data_format)
self.assertAllClose(mean_val, mean_ref, atol=1e-3)
self.assertAllClose(y_val, y_ref, atol=1e-3)
self.assertAllClose(var_val, var_ref, atol=1e-3)
示例10: _testLearning
def _testLearning(self, use_gradient_checker, data_format):
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
epsilon = 0.001
data_format_src = "NHWC"
y_ref, mean_ref, var_ref = self._reference_training(
x_val, scale_val, offset_val, epsilon, data_format_src)
# TODO(b/110530713): Support data format HWCN on GPU
if self.device == "XLA_GPU" and data_format == "HWCN":
self.skipTest("GPU does not support data format HWCN.")
with self.test_session() as sess, self.test_scope():
# To avoid constant folding
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
y_ref_converted = test_utils.ConvertBetweenDataFormats(
y_ref, data_format_src, data_format)
t_val = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
offset = array_ops.placeholder(
np.float32, shape=scale_shape, name="offset")
y, mean, var = nn.fused_batch_norm(
t_val,
scale,
offset,
mean=None,
variance=None,
epsilon=epsilon,
data_format=data_format,
is_training=True)
# Check gradient.
if use_gradient_checker:
err = gradient_checker.compute_gradient_error(
t_val,
x_val_converted.shape,
y,
x_val_converted.shape,
extra_feed_dict={
t_val: x_val_converted,
scale: scale_val,
offset: offset_val
})
self.assertLess(err, 1e-3)
y_val, mean_val, var_val = sess.run([y, mean, var], {
t_val: x_val_converted,
scale: scale_val,
offset: offset_val
})
self.assertAllClose(mean_val, mean_ref, atol=1e-3)
self.assertAllClose(y_val, y_ref_converted, atol=1e-3)
self.assertAllClose(var_val, var_ref, atol=1e-3)