當前位置: 首頁>>代碼示例>>Python>>正文


Python layers.batch_norm方法代碼示例

本文整理匯總了Python中tensorflow.contrib.layers.batch_norm方法的典型用法代碼示例。如果您正苦於以下問題:Python layers.batch_norm方法的具體用法?Python layers.batch_norm怎麽用?Python layers.batch_norm使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.contrib.layers的用法示例。


在下文中一共展示了layers.batch_norm方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: var_dropout

# 需要導入模塊: from tensorflow.contrib import layers [as 別名]
# 或者: from tensorflow.contrib.layers import batch_norm [as 別名]
def var_dropout(x, n, net_size, n_particles, is_training):
    normalizer_params = {'is_training': is_training,
                         'updates_collections': None}
    bn = zs.BayesianNet()
    h = x
    for i, [n_in, n_out] in enumerate(zip(net_size[:-1], net_size[1:])):
        eps_mean = tf.ones([n, n_in])
        eps = bn.normal(
            'layer' + str(i) + '/eps', eps_mean, std=1.,
            n_samples=n_particles, group_ndims=1)
        h = layers.fully_connected(
            h * eps, n_out, normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        if i < len(net_size) - 2:
            h = tf.nn.relu(h)
    y = bn.categorical('y', h)
    bn.deterministic('y_logit', h)
    return bn 
開發者ID:thu-ml,項目名稱:zhusuan,代碼行數:20,代碼來源:variational_dropout.py

示例2: resBlock

# 需要導入模塊: from tensorflow.contrib import layers [as 別名]
# 或者: from tensorflow.contrib.layers import batch_norm [as 別名]
def resBlock(x, num_outputs, kernel_size=4, stride=1, activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm,
    scope=None):
  assert num_outputs % 2 == 0  # num_outputs must be divided by channel_factor(2 here)
  with tf.variable_scope(scope, 'resBlock'):
    shortcut = x
    if stride != 1 or x.get_shape()[3] != num_outputs:
      shortcut = tcl.conv2d(shortcut, num_outputs, kernel_size=1, stride=stride,
        activation_fn=None, normalizer_fn=None, scope='shortcut')
    x = tcl.conv2d(x, num_outputs / 2, kernel_size=1, stride=1, padding='SAME')
    x = tcl.conv2d(x, num_outputs / 2, kernel_size=kernel_size, stride=stride, padding='SAME')
    x = tcl.conv2d(x, num_outputs, kernel_size=1, stride=1, activation_fn=None, padding='SAME', normalizer_fn=None)

    x += shortcut
    x = normalizer_fn(x)
    x = activation_fn(x)
  return x 
開發者ID:joseph-zhong,項目名稱:LipReading,代碼行數:18,代碼來源:prnet.py

示例3: testAddN

# 需要導入模塊: from tensorflow.contrib import layers [as 別名]
# 或者: from tensorflow.contrib.layers import batch_norm [as 別名]
def testAddN(self):
    inputs = tf.zeros([2, 4, 4, 3])
    identity1 = tf.identity(inputs)
    identity2 = tf.identity(inputs)
    identity3 = tf.identity(inputs)
    identity4 = tf.identity(inputs)
    add_n = tf.add_n([identity1, identity2, identity3, identity4])
    batch_norm = layers.batch_norm(add_n)

    manager = orm.OpRegularizerManager(
        [batch_norm.op], op_handler_dict=self._default_op_handler_dict)

    op_slices = manager.get_op_slices(identity1.op)
    self.assertLen(op_slices, 1)
    op_group = manager.get_op_group(op_slices[0]).op_slices

    # Verify all ops are in the same group.
    for test_op in (identity1.op, identity2.op, identity3.op, identity4.op,
                    add_n.op, batch_norm.op):
      test_op_slices = manager.get_op_slices(test_op)
      self.assertLen(test_op_slices, 1)
      self.assertIn(test_op_slices[0], op_group) 
開發者ID:google-research,項目名稱:morph-net,代碼行數:24,代碼來源:op_regularizer_manager_test.py

示例4: testAddN_Duplicates

# 需要導入模塊: from tensorflow.contrib import layers [as 別名]
# 或者: from tensorflow.contrib.layers import batch_norm [as 別名]
def testAddN_Duplicates(self):
    inputs = tf.zeros([2, 4, 4, 3])
    identity = tf.identity(inputs)
    add_n = tf.add_n([identity, identity, identity, identity])
    batch_norm = layers.batch_norm(add_n)

    manager = orm.OpRegularizerManager(
        [batch_norm.op], op_handler_dict=self._default_op_handler_dict)

    op_slices = manager.get_op_slices(identity.op)
    self.assertLen(op_slices, 1)
    op_group = manager.get_op_group(op_slices[0]).op_slices

    # Verify all ops are in the same group.
    for test_op in (identity.op, add_n.op, batch_norm.op):
      test_op_slices = manager.get_op_slices(test_op)
      self.assertLen(test_op_slices, 1)
      self.assertIn(test_op_slices[0], op_group) 
開發者ID:google-research,項目名稱:morph-net,代碼行數:20,代碼來源:op_regularizer_manager_test.py

示例5: testProcessOps_DuplicatesRemoved

# 需要導入模塊: from tensorflow.contrib import layers [as 別名]
# 或者: from tensorflow.contrib.layers import batch_norm [as 別名]
def testProcessOps_DuplicatesRemoved(self):
    inputs = tf.zeros([2, 4, 4, 3])
    batch_norm = layers.batch_norm(inputs)
    identity1 = tf.identity(batch_norm)
    identity2 = tf.identity(batch_norm)

    manager = orm.OpRegularizerManager(
        [identity1.op, identity2.op],
        op_handler_dict=self._default_op_handler_dict)
    manager.process_ops([identity1.op, identity2.op, batch_norm.op])
    # Try to process the same ops again.
    manager.process_ops([identity1.op, identity2.op, batch_norm.op])

    self.assertLen(manager._op_deque, 3)
    self.assertEqual(batch_norm.op, manager._op_deque.pop())
    self.assertEqual(identity2.op, manager._op_deque.pop())
    self.assertEqual(identity1.op, manager._op_deque.pop()) 
開發者ID:google-research,項目名稱:morph-net,代碼行數:19,代碼來源:op_regularizer_manager_test.py

示例6: testProcessOpsLast

# 需要導入模塊: from tensorflow.contrib import layers [as 別名]
# 或者: from tensorflow.contrib.layers import batch_norm [as 別名]
def testProcessOpsLast(self):
    inputs = tf.zeros([2, 4, 4, 3])
    batch_norm = layers.batch_norm(inputs)
    identity1 = tf.identity(batch_norm)
    identity2 = tf.identity(batch_norm)

    manager = orm.OpRegularizerManager(
        [identity1.op, identity2.op],
        op_handler_dict=self._default_op_handler_dict)
    manager.process_ops([identity1.op])
    manager.process_ops_last([identity2.op, batch_norm.op])

    self.assertLen(manager._op_deque, 3)
    self.assertEqual(identity1.op, manager._op_deque.pop())
    self.assertEqual(identity2.op, manager._op_deque.pop())
    self.assertEqual(batch_norm.op, manager._op_deque.pop()) 
開發者ID:google-research,項目名稱:morph-net,代碼行數:18,代碼來源:op_regularizer_manager_test.py

示例7: testProcessOpsLast_DuplicatesRemoved

# 需要導入模塊: from tensorflow.contrib import layers [as 別名]
# 或者: from tensorflow.contrib.layers import batch_norm [as 別名]
def testProcessOpsLast_DuplicatesRemoved(self):
    inputs = tf.zeros([2, 4, 4, 3])
    batch_norm = layers.batch_norm(inputs)
    identity1 = tf.identity(batch_norm)
    identity2 = tf.identity(batch_norm)

    manager = orm.OpRegularizerManager(
        [identity1.op, identity2.op],
        op_handler_dict=self._default_op_handler_dict)
    manager.process_ops([identity1.op])
    manager.process_ops_last([identity2.op, batch_norm.op])
    # Try to process the same ops again.
    manager.process_ops_last([identity2.op, batch_norm.op])

    self.assertLen(manager._op_deque, 3)
    self.assertEqual(identity1.op, manager._op_deque.pop())
    self.assertEqual(identity2.op, manager._op_deque.pop())
    self.assertEqual(batch_norm.op, manager._op_deque.pop()) 
開發者ID:google-research,項目名稱:morph-net,代碼行數:20,代碼來源:op_regularizer_manager_test.py

示例8: testCorrectSourceOpsWithSkipConnection

# 需要導入模塊: from tensorflow.contrib import layers [as 別名]
# 或者: from tensorflow.contrib.layers import batch_norm [as 別名]
def testCorrectSourceOpsWithSkipConnection(self):
    inputs = tf.zeros([2, 4, 4, 3])
    x0 = layers.conv2d(
        inputs, num_outputs=8, kernel_size=3, activation_fn=None, scope='conv0')
    x1 = tf.nn.relu(layers.batch_norm(x0, scale=True, scope='bn0'))
    x1 = layers.conv2d(
        x1, num_outputs=8, kernel_size=3, activation_fn=None, scope='conv1')
    x2 = tf.add_n([x0, x1], name='add')
    final_op = tf.nn.relu(layers.batch_norm(x2, scale=True, scope='bn1'))

    op_handler_dict = self._default_op_handler_dict
    op_reg_manager = orm.OpRegularizerManager([final_op.op], op_handler_dict)

    # All ops are in the same group
    group = list(op_reg_manager._op_group_dict.values())[0]
    source_op_names = [s.op.name for s in group.source_op_slices]
    self.assertSetEqual(set(['bn0/FusedBatchNormV3', 'bn1/FusedBatchNormV3']),
                        set(source_op_names)) 
開發者ID:google-research,項目名稱:morph-net,代碼行數:20,代碼來源:op_regularizer_manager_test.py

示例9: resnet_arg_scope

# 需要導入模塊: from tensorflow.contrib import layers [as 別名]
# 或者: from tensorflow.contrib.layers import batch_norm [as 別名]
def resnet_arg_scope(
        weight_decay=0.0001,
        batch_norm_decay=0.997,
        batch_norm_epsilon=1e-5,
        batch_norm_scale=True,
):
    batch_norm_params = {
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
    }
    l2_regularizer = layers.l2_regularizer(weight_decay)

    arg_scope_layers = arg_scope(
        [layers.conv2d, my_layers.preact_conv2d, layers.fully_connected],
        weights_initializer=layers.variance_scaling_initializer(),
        weights_regularizer=l2_regularizer,
        activation_fn=tf.nn.relu)
    arg_scope_conv = arg_scope(
        [layers.conv2d, my_layers.preact_conv2d],
        normalizer_fn=layers.batch_norm,
        normalizer_params=batch_norm_params)
    with arg_scope_layers, arg_scope_conv as arg_sc:
        return arg_sc 
開發者ID:rwightman,項目名稱:tensorflow-litterbox,代碼行數:26,代碼來源:build_resnet.py

示例10: _batch_norm_default

# 需要導入模塊: from tensorflow.contrib import layers [as 別名]
# 或者: from tensorflow.contrib.layers import batch_norm [as 別名]
def _batch_norm_default(self, bottom, scope, eps=1e-3, center=True, scale=True):
        if hasattr(self, 'bn_decay'):
            # print('bn decay factor: ', self.bn_decay)
            decay = self.bn_decay
        else:
            decay = 0.9
        if hasattr(self, 'need_gamma'):
            need_gamma = self.need_gamma
        else:
            need_gamma = scale
        if hasattr(self, 'need_beta'):
            need_beta = self.need_beta
        else:
            need_beta = center
        return batch_norm(inputs=bottom, decay=decay, center=need_beta, scale=need_gamma, activation_fn=None,
                   is_training=self.training, scope=scope, epsilon=eps) 
開發者ID:DingXiaoH,項目名稱:Centripetal-SGD,代碼行數:18,代碼來源:tfm_builder_densenet.py

示例11: get_arg_scope

# 需要導入模塊: from tensorflow.contrib import layers [as 別名]
# 或者: from tensorflow.contrib.layers import batch_norm [as 別名]
def get_arg_scope(is_training):
        weight_decay_l2 = 0.1
        batch_norm_decay = 0.999
        batch_norm_epsilon = 0.0001

        with slim.arg_scope([slim.conv2d, slim.fully_connected, layers.separable_convolution2d],
                            weights_regularizer = slim.l2_regularizer(weight_decay_l2),
                            biases_regularizer = slim.l2_regularizer(weight_decay_l2),
                            weights_initializer = layers.variance_scaling_initializer(),
                            ):
            batch_norm_params = {
                'decay': batch_norm_decay,
                'epsilon': batch_norm_epsilon
            }
            with slim.arg_scope([slim.batch_norm, slim.dropout],
                                is_training = is_training):
                with slim.arg_scope([slim.batch_norm],
                                    **batch_norm_params):
                    with slim.arg_scope([slim.conv2d, layers.separable_convolution2d, layers.fully_connected],
                                        activation_fn = tf.nn.elu,
                                        normalizer_fn = slim.batch_norm,
                                        normalizer_params = batch_norm_params) as scope:
                        return scope 
開發者ID:marian-margeta,項目名稱:gait-recognition,代碼行數:25,代碼來源:gait_nn.py

示例12: __call__

# 需要導入模塊: from tensorflow.contrib import layers [as 別名]
# 或者: from tensorflow.contrib.layers import batch_norm [as 別名]
def __call__(self, i):
		with tf.variable_scope(self.name):
			if self.reuse:
				tf.get_variable_scope().reuse_variables()
			else:
				assert tf.get_variable_scope().reuse is False
				self.reuse = True
			g = tcl.fully_connected(i, self.size * self.size * 1024, activation_fn=tf.nn.relu, 
									normalizer_fn=tcl.batch_norm)
			g = tf.reshape(g, (-1, self.size, self.size, 1024))  # size
			g = tcl.conv2d_transpose(g, 512, 3, stride=2, # size*2
									activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
			g = tcl.conv2d_transpose(g, 256, 3, stride=2, # size*4
									activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
			g = tcl.conv2d_transpose(g, 128, 3, stride=2, # size*8
									activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
			
			g = tcl.conv2d_transpose(g, self.channel, 3, stride=2, # size*16
										activation_fn=tf.nn.sigmoid, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
			return g

		return x 
開發者ID:yanzhicong,項目名稱:VAE-GAN,代碼行數:24,代碼來源:generator_conv.py

示例13: deconv2d_bn_lrn_drop

# 需要導入模塊: from tensorflow.contrib import layers [as 別名]
# 或者: from tensorflow.contrib.layers import batch_norm [as 別名]
def deconv2d_bn_lrn_drop(scope_or_name, inputs, kernel_shape, out_shape, subS=2, activation=tf.nn.relu,
                       use_bn=False,
                       use_mvn=False,
                       is_training=True,
                       use_lrn=False,
                       keep_prob=1.0,
                       dropout_maps=False,
                       initOpt=0):
    with tf.variable_scope(scope_or_name):
        if initOpt == 0:
            stddev = np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2] + kernel_shape[3]))
        if initOpt == 1:
            stddev = 5e-2
        if initOpt == 2:
            stddev = min(np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2])),5e-2)
        kernel = tf.get_variable("weights", kernel_shape,
                                 initializer=tf.random_normal_initializer(stddev=stddev))
        bias = tf.get_variable("bias", kernel_shape[2],
                               initializer=tf.constant_initializer(value=0.1))
        conv=tf.nn.conv2d_transpose(inputs, kernel, out_shape, strides=[1, subS, subS, 1], padding='SAME', name='conv')
        outputs = tf.nn.bias_add(conv, bias, name='preActivation')
        if use_bn:
            # outputs = tf.layers.batch_normalization(outputs, axis=3, training=is_training, name="batchNorm")
            outputs = batch_norm(outputs, is_training=is_training, scale=True, fused=True, scope="batchNorm")
        if use_mvn:
            outputs = feat_norm(outputs, kernel_shape[3])
        if activation:
            outputs = activation(outputs, name='activation')
        if use_lrn:
            outputs = tf.nn.local_response_normalization(outputs, name='localResponseNorm')
        if dropout_maps:
            conv_shape = tf.shape(outputs)
            n_shape = tf.stack([conv_shape[0], 1, 1, conv_shape[3]])
            outputs = tf.nn.dropout(outputs, keep_prob, noise_shape=n_shape)
        else:
            outputs = tf.nn.dropout(outputs, keep_prob)
        return outputs 
開發者ID:TobiasGruening,項目名稱:ARU-Net,代碼行數:39,代碼來源:layers.py

示例14: Batch_Normalization

# 需要導入模塊: from tensorflow.contrib import layers [as 別名]
# 或者: from tensorflow.contrib.layers import batch_norm [as 別名]
def Batch_Normalization(x, training, scope):
    with arg_scope([batch_norm],
                   scope=scope,
                   updates_collections=None,
                   decay=0.9,
                   center=True,
                   scale=True,
                   zero_debias_moving_mean=True) :
        return tf.cond(training,
                       lambda : batch_norm(inputs=x, is_training=training, reuse=None),
                       lambda : batch_norm(inputs=x, is_training=training, reuse=True)) 
開發者ID:taki0112,項目名稱:ResNeXt-Tensorflow,代碼行數:13,代碼來源:ResNeXt.py

示例15: convbn

# 需要導入模塊: from tensorflow.contrib import layers [as 別名]
# 或者: from tensorflow.contrib.layers import batch_norm [as 別名]
def convbn(*args, **kwargs):
    scope = kwargs.pop('scope', None)
    with tf.variable_scope(scope):
        return batch_norm(conv2d(*args, **kwargs)) 
開發者ID:taehoonlee,項目名稱:tensornets,代碼行數:6,代碼來源:layers.py


注:本文中的tensorflow.contrib.layers.batch_norm方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。