當前位置: 首頁>>代碼示例>>Python>>正文


Python slim.layer_norm方法代碼示例

本文整理匯總了Python中tensorflow.contrib.slim.layer_norm方法的典型用法代碼示例。如果您正苦於以下問題:Python slim.layer_norm方法的具體用法?Python slim.layer_norm怎麽用?Python slim.layer_norm使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.contrib.slim的用法示例。


在下文中一共展示了slim.layer_norm方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: argscope

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import layer_norm [as 別名]
def argscope(is_training=None, normalizer_fn=slim.layer_norm):
  """Default TF argscope used for convnet-based grasping models.

  Args:
    is_training: Whether this argscope is for training or inference.
    normalizer_fn: Which conv/fc normalizer to use.
  Returns:
    Dictionary of argument overrides.
  """
  with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):
    with slim.arg_scope(
        [slim.conv2d, slim.fully_connected],
        weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
        activation_fn=tf.nn.relu,
        normalizer_fn=normalizer_fn):
      with slim.arg_scope(
          [slim.conv2d, slim.max_pool2d], stride=2, padding='VALID') as scope:
        return scope 
開發者ID:google-research,項目名稱:tensor2robot,代碼行數:20,代碼來源:tf_modules.py

示例2: __call__

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import layer_norm [as 別名]
def __call__(self, inputs, state, scope=None):
        if self._apply_to == 'input':
            with tf.variable_scope(scope or self._name):
                inputs = slim.layer_norm(inputs)
            return self._cell(inputs, state)
        elif self._apply_to == 'output':
            output, res_state = self._cell(inputs, state)
            with tf.variable_scope(scope or self._name):
                output = slim.layer_norm(output)
                return output, res_state
        elif self._apply_to == 'state':
            output, res_state = self._cell(inputs, state)
            with tf.variable_scope(scope or self._name):
                res_state = slim.layer_norm(res_state)
                return output, res_state
        else:
            raise ValueError('Unknown apply_to: "{}"'.format(self._apply_to)) 
開發者ID:sjoerdvansteenkiste,項目名稱:Neural-EM,代碼行數:19,代碼來源:network.py

示例3: __call__

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import layer_norm [as 別名]
def __call__(self, inputs, state, scope=None):
        if self._apply_to == 'input':
            with tf.variable_scope(scope or self._name):
                inputs = slim.layer_norm(inputs)
            return self._cell(inputs, state)
        elif self._apply_to == 'output':
            output, res_state = self._cell(inputs, state)
            with tf.variable_scope(scope or self._name):
                output = slim.layer_norm(output)
                return output, res_state
        elif self._apply_to == 'state':
            output, res_state = self._cell(inputs, state)
            with tf.variable_scope(scope or self._name):
                res_state = slim.layer_norm(res_state)
                return output, res_state
        else:
            raise ValueError('Unknown apply_to: "{}"'.format(self._apply_to))
            

# R-NEM CELL 
開發者ID:sjoerdvansteenkiste,項目名稱:Relational-NEM,代碼行數:22,代碼來源:network.py

示例4: _build_layer

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import layer_norm [as 別名]
def _build_layer(inputs, layer):
        # apply transformation
        if layer['name'] == 'fc':
            out = slim.fully_connected(inputs, layer['size'], activation_fn=None)
        else:
            raise KeyError('Unknown layer "{}"'.format(layer['name']))

        # apply layer normalisation
        if layer.get('ln', False):
            out = slim.layer_norm(out)

        # apply activation function
        if layer.get('act', False):
            out = ACTIVATION_FUNCTIONS[layer['act']](out)

        return out


# NETWORK BUILDER 
開發者ID:sjoerdvansteenkiste,項目名稱:Relational-NEM,代碼行數:21,代碼來源:network.py

示例5: create_network_factory

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import layer_norm [as 別名]
def create_network_factory(is_training, num_classes, add_logits,
                           weight_decay=1e-8, reuse=None):

    def factory_fn(image):
            with slim.arg_scope([slim.batch_norm, slim.dropout],
                                is_training=is_training):
                with slim.arg_scope([slim.conv2d, slim.fully_connected,
                                     slim.batch_norm, slim.layer_norm],
                                    reuse=reuse):
                    features, logits = create_network(
                        image, num_classes=num_classes, add_logits=add_logits,
                        reuse=reuse, create_summaries=is_training,
                        weight_decay=weight_decay)
                    return features, logits

    return factory_fn 
開發者ID:nwojke,項目名稱:cosine_metric_learning,代碼行數:18,代碼來源:network_definition.py

示例6: _network_factory

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import layer_norm [as 別名]
def _network_factory(weight_decay=1e-8):

    def factory_fn(image, reuse):
            with slim.arg_scope([slim.batch_norm, slim.dropout],
                                is_training=False):
                with slim.arg_scope([slim.conv2d, slim.fully_connected,
                                     slim.batch_norm, slim.layer_norm],
                                    reuse=reuse):
                    features, logits = _create_network(
                        image, reuse=reuse, weight_decay=weight_decay)
                    return features, logits

    return factory_fn 
開發者ID:nwojke,項目名稱:deep_sort,代碼行數:15,代碼來源:freeze_model.py

示例7: _call

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import layer_norm [as 別名]
def _call(self, inputs, output_size, is_training):
        inputs = self._subcall(inputs, output_size, is_training)
        if self._spec.get('ln', False):
            inputs = slim.layer_norm(inputs)

        act = self._spec.get('act', False)
        if act:
            activation = ACTIVATION_FUNCTIONS[act]
            return activation(inputs)

        return inputs 
開發者ID:e2crawfo,項目名稱:auto_yolo,代碼行數:13,代碼來源:nem.py

示例8: resnet_arg_scope

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import layer_norm [as 別名]
def resnet_arg_scope(weight_decay=0.0001,
                     activation_fn=tf.nn.relu,
                     use_layer_norm=True):
  """Defines the default ResNet arg scope.

  TODO(gpapan): The batch-normalization related default values above are
    appropriate for use in conjunction with the reference ResNet models
    released at https://github.com/KaimingHe/deep-residual-networks. When
    training ResNets from scratch, they might need to be tuned.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    activation_fn: The activation function which is used in ResNet.
    use_layer_norm: Whether or not to use layer normalization.

  Returns:
    An `arg_scope` to use for the resnet models.
  """

  with slim.arg_scope(
      [slim.conv2d],
      weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=slim.variance_scaling_initializer(),
      activation_fn=activation_fn,
      normalizer_fn=slim.layer_norm if use_layer_norm else None,
      normalizer_params=None):
    # The following implies padding='SAME' for pool1, which makes feature
    # alignment easier for dense prediction tasks. This is also used in
    # https://github.com/facebook/fb.resnet.torch. However the accompanying
    # code of 'Deep Residual Learning for Image Recognition' uses
    # padding='VALID' for pool1. You can switch to that choice by setting
    # slim.arg_scope([slim.max_pool2d], padding='VALID').
    with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
      return arg_sc 
開發者ID:jerryli27,項目名稱:TwinGAN,代碼行數:36,代碼來源:resnet_v2_layernorm.py

示例9: get_norm_layer

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import layer_norm [as 別名]
def get_norm_layer(norm, training, updates_collections=None):
    if norm == 'none':
        return lambda x: x
    elif norm == 'batch_norm':
        return functools.partial(slim.batch_norm, scale=True, is_training=training, updates_collections=updates_collections)
    elif norm == 'instance_norm':
        return slim.instance_norm
    elif norm == 'layer_norm':
        return slim.layer_norm 
開發者ID:LynnHo,項目名稱:AttGAN-Tensorflow,代碼行數:11,代碼來源:utils.py

示例10: bottleneck

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import layer_norm [as 別名]
def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1,
               outputs_collections=None, scope=None):
  """Bottleneck residual unit variant with BN before convolutions.

  This is the full preactivation residual unit variant proposed in [2]. See
  Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck
  variant which has an extra bottleneck layer.

  When putting together two consecutive ResNet blocks that use this unit, one
  should use stride = 2 in the last unit of the first block.

  Args:
    inputs: A tensor of size [batch, height, width, channels].
    depth: The depth of the ResNet unit output.
    depth_bottleneck: The depth of the bottleneck layers.
    stride: The ResNet unit's stride. Determines the amount of downsampling of
      the units output compared to its input.
    rate: An integer, rate for atrous convolution.
    outputs_collections: Collection to add the ResNet unit output.
    scope: Optional variable_scope.

  Returns:
    The ResNet unit's output.
  """
  with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
    depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
    preact = slim.layer_norm(inputs, activation_fn=tf.nn.relu, scope='preact')
    if depth == depth_in:
      shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
    else:
      shortcut = slim.conv2d(preact, depth, [1, 1], stride=stride,
                             normalizer_fn=None, activation_fn=None,
                             scope='shortcut')

    residual = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1,
                           scope='conv1')
    residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,
                                        rate=rate, scope='conv2')
    residual = slim.conv2d(residual, depth, [1, 1], stride=1,
                           normalizer_fn=None, activation_fn=None,
                           scope='conv3')

    output = shortcut + residual

    return slim.utils.collect_named_outputs(outputs_collections,
                                            sc.name,
                                            output) 
開發者ID:jerryli27,項目名稱:TwinGAN,代碼行數:49,代碼來源:resnet_v2_layernorm.py

示例11: mpi_net

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import layer_norm [as 別名]
def mpi_net(inputs, num_outputs, ngf=64, vscope='net', reuse_weights=False):
  """Network definition for multiplane image (MPI) inference.

  Args:
    inputs: stack of input images [batch, height, width, input_channels]
    num_outputs: number of output channels
    ngf: number of features for the first conv layer
    vscope: variable scope
    reuse_weights: whether to reuse weights (for weight sharing)
  Returns:
    pred: network output at the same spatial resolution as the inputs.
  """
  with tf.variable_scope(vscope, reuse=reuse_weights):
    with slim.arg_scope(
        [slim.conv2d, slim.conv2d_transpose], normalizer_fn=slim.layer_norm):
      cnv1_1 = slim.conv2d(inputs, ngf, [3, 3], scope='conv1_1', stride=1)
      cnv1_2 = slim.conv2d(cnv1_1, ngf * 2, [3, 3], scope='conv1_2', stride=2)

      cnv2_1 = slim.conv2d(cnv1_2, ngf * 2, [3, 3], scope='conv2_1', stride=1)
      cnv2_2 = slim.conv2d(cnv2_1, ngf * 4, [3, 3], scope='conv2_2', stride=2)

      cnv3_1 = slim.conv2d(cnv2_2, ngf * 4, [3, 3], scope='conv3_1', stride=1)
      cnv3_2 = slim.conv2d(cnv3_1, ngf * 4, [3, 3], scope='conv3_2', stride=1)
      cnv3_3 = slim.conv2d(cnv3_2, ngf * 8, [3, 3], scope='conv3_3', stride=2)

      cnv4_1 = slim.conv2d(
          cnv3_3, ngf * 8, [3, 3], scope='conv4_1', stride=1, rate=2)
      cnv4_2 = slim.conv2d(
          cnv4_1, ngf * 8, [3, 3], scope='conv4_2', stride=1, rate=2)
      cnv4_3 = slim.conv2d(
          cnv4_2, ngf * 8, [3, 3], scope='conv4_3', stride=1, rate=2)

      # Adding skips
      skip = tf.concat([cnv4_3, cnv3_3], axis=3)
      cnv6_1 = slim.conv2d_transpose(
          skip, ngf * 4, [4, 4], scope='conv6_1', stride=2)
      cnv6_2 = slim.conv2d(cnv6_1, ngf * 4, [3, 3], scope='conv6_2', stride=1)
      cnv6_3 = slim.conv2d(cnv6_2, ngf * 4, [3, 3], scope='conv6_3', stride=1)

      skip = tf.concat([cnv6_3, cnv2_2], axis=3)
      cnv7_1 = slim.conv2d_transpose(
          skip, ngf * 2, [4, 4], scope='conv7_1', stride=2)
      cnv7_2 = slim.conv2d(cnv7_1, ngf * 2, [3, 3], scope='conv7_2', stride=1)

      skip = tf.concat([cnv7_2, cnv1_2], axis=3)
      cnv8_1 = slim.conv2d_transpose(
          skip, ngf, [4, 4], scope='conv8_1', stride=2)
      cnv8_2 = slim.conv2d(cnv8_1, ngf, [3, 3], scope='conv8_2', stride=1)

      feat = cnv8_2

      pred = slim.conv2d(
          feat,
          num_outputs, [1, 1],
          stride=1,
          activation_fn=tf.nn.tanh,
          normalizer_fn=None,
          scope='color_pred')
      return pred 
開發者ID:google,項目名稱:stereo-magnification,代碼行數:61,代碼來源:nets.py

示例12: build_network

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import layer_norm [as 別名]
def build_network(K, input, recurrent, output):
    with tf.name_scope('inner_RNN'):
        # build recurrent
        for i, layer in enumerate(recurrent):
            if layer['name'] == 'rnn':
                cell = tf.contrib.rnn.BasicRNNCell(layer['size'], activation=ACTIVATION_FUNCTIONS['linear'])
                cell = LayerNormWrapper(cell, apply_to='output', name='LayerNormR{}'.format(i)) if layer.get('ln') else cell
                cell = ActivationFunctionWrapper(cell, activation=layer['act'], apply_to='state')
                cell = ActivationFunctionWrapper(cell, activation=layer['act'], apply_to='output')

            elif layer['name'] == 'lstm':
                cell = tf.contrib.rnn.LayerNormBasicLSTMCell(layer['size'], layer_norm=layer.get('ln', False))

                if layer.get('act'):
                    print("WARNING: activation function arg for LSTM Cell is ignored. Default (tanh) is used in stead.")

            elif layer['name'] == 'r_nem':
                cell = R_NEM(encoder=layer['encoder'],
                             core=layer['core'],
                             context=layer['context'],
                             attention=layer['attention'],
                             actions=layer.get('actions', None),
                             size=layer['size'],
                             K=K)

                cell = LayerNormWrapper(cell, apply_to='output', name='LayerNormR{}'.format(i)) if layer.get('ln') else cell
                cell = ActivationFunctionWrapper(cell, activation=layer['act'], apply_to='state')
                cell = ActivationFunctionWrapper(cell, activation=layer['act'], apply_to='output')
            else:
                raise ValueError('Unknown recurrent name "{}"'.format(layer['name']))

        # build input
        for i, layer in reversed(list(enumerate(input))):
            if layer['name'] == 'reshape':
                cell = ReshapeWrapper(cell, layer['shape'], apply_to='input')
            else:
                cell = ActivationFunctionWrapper(cell, layer['act'], apply_to='input')
                cell = LayerNormWrapper(cell, apply_to='input', name='LayerNormI{}'.format(i)) if layer.get('ln') else cell
                cell = InputWrapper(cell, layer, name="InputWrapper{}".format(i))

        # build output
        for i, layer in enumerate(output):
            if layer['name'] == 'reshape':
                cell = ReshapeWrapper(cell, layer['shape'])
            else:
                n_out = layer.get('n_out', 1)
                cell = OutputWrapper(cell, layer, n_out=n_out, name="OutputWrapper{}".format(i))
                cell = LayerNormWrapper(cell, apply_to='output', name='LayerNormO{}'.format(i)) if layer.get('ln') else cell
                cell = ActivationFunctionWrapper(cell, layer['act'], apply_to='output')

        return cell 
開發者ID:sjoerdvansteenkiste,項目名稱:Relational-NEM,代碼行數:53,代碼來源:network.py


注:本文中的tensorflow.contrib.slim.layer_norm方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。