当前位置: 首页>>代码示例>>Python>>正文


Python sonnet.BatchNorm方法代码示例

本文整理汇总了Python中sonnet.BatchNorm方法的典型用法代码示例。如果您正苦于以下问题:Python sonnet.BatchNorm方法的具体用法?Python sonnet.BatchNorm怎么用?Python sonnet.BatchNorm使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sonnet的用法示例。


在下文中一共展示了sonnet.BatchNorm方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _build

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchNorm [as 别名]
def _build(self, inputs, is_training):
    """Connects the module to inputs.

    Args:
      inputs: Inputs to the Unit3D component.
      is_training: whether to use training mode for snt.BatchNorm (boolean).

    Returns:
      Outputs from the module.
    """
    net = snt.Conv3D(output_channels=self._output_channels,
                     kernel_shape=self._kernel_shape,
                     stride=self._stride,
                     padding=snt.SAME,
                     use_bias=self._use_bias)(inputs)
    if self._use_batch_norm:
      bn = snt.BatchNorm()
      net = bn(net, is_training=is_training, test_local_stats=False)
    if self._activation_fn is not None:
      net = self._activation_fn(net)
    return net 
开发者ID:LossNAN,项目名称:I3D-Tensorflow,代码行数:23,代码来源:i3d.py

示例2: _build

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchNorm [as 别名]
def _build(self, inputs, is_training):
    """Connects the module to inputs.

    Args:
      inputs: Inputs to the Unit3D component.
      is_training: whether to use training mode for snt.BatchNorm (boolean).

    Returns:
      Outputs from the module.
    """
    net = snt.Conv3D(output_channels=self._output_channels,
                     kernel_shape=self._kernel_shape,
                     stride=self._stride,
                     padding=snt.SAME,
                     use_bias=self._use_bias)(inputs)
    if self._use_batch_norm:
      bn = snt.BatchNorm()
      #################### Warning batchnorm is hard coded to is_training=False #################
      # net = bn(net, is_training=is_training, test_local_stats=False)
      net = bn(net, is_training=False, test_local_stats=False)
    if self._activation_fn is not None:
      net = self._activation_fn(net)
    return net 
开发者ID:oulutan,项目名称:ACAM_Demo,代码行数:25,代码来源:i3d.py

示例3: _build

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchNorm [as 别名]
def _build(self, inputs, is_training):
        """Connects the module to inputs.

    Args:
    inputs: Inputs to the Unit3Dtf component.
    is_training: whether to use training mode for snt.BatchNorm (boolean).

    Returns:
    Outputs from the module.
        """
        net = snt.Conv3D(
            output_channels=self._output_channels,
            kernel_shape=self._kernel_shape,
            stride=self._stride,
            padding=snt.SAME,
            use_bias=self._use_bias)(inputs)
        if self._use_batch_norm:
            bn = snt.BatchNorm()
            net = bn(net, is_training=is_training, test_local_stats=False)
        if self._activation_fn is not None:
            net = self._activation_fn(net)

        return net 
开发者ID:hassony2,项目名称:kinetics_i3d_pytorch,代码行数:25,代码来源:i3dtf.py

示例4: _build

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchNorm [as 别名]
def _build(self, x):
    # x is [units, bs, 1]
    net = tf.transpose(x, [1, 0, 2])  # now [bs x units x 1]
    channels = x.shape.as_list()[2]
    mod = snt.Conv1D(output_channels=channels, kernel_shape=[3])
    net = mod(net)
    net = snt.BatchNorm(axis=[0, 1])(net, is_training=False)
    net = tf.nn.relu(net)
    mod = snt.Conv1D(output_channels=channels, kernel_shape=[3])
    net = mod(net)
    net = snt.BatchNorm(axis=[0, 1])(net, is_training=False)
    net = tf.nn.relu(net)
    to_concat = tf.transpose(net, [1, 0, 2])
    if self.add:
      return x + to_concat
    else:
      return tf.concat([x, to_concat], 2) 
开发者ID:itsamitgoel,项目名称:Gun-Detector,代码行数:19,代码来源:more_local_weight_update.py

示例5: custom_build

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchNorm [as 别名]
def custom_build(inputs, is_training, keep_prob):
  x_inputs = tf.reshape(inputs, [-1, 28, 28, 1])
  """A custom build method to wrap into a sonnet Module."""
  outputs = snt.Conv2D(output_channels=32, kernel_shape=4, stride=2)(x_inputs)
  outputs = snt.BatchNorm()(outputs, is_training=is_training)
  outputs = tf.nn.relu(outputs)
  outputs = tf.nn.max_pool(outputs, ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1], padding='SAME')
  outputs = snt.Conv2D(output_channels=64, kernel_shape=4, stride=2)(outputs)
  outputs = snt.BatchNorm()(outputs, is_training=is_training)
  outputs = tf.nn.relu(outputs)
  outputs = tf.nn.max_pool(outputs, ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1], padding='SAME')
  outputs = snt.Conv2D(output_channels=1024, kernel_shape=1, stride=1)(outputs)
  outputs = snt.BatchNorm()(outputs, is_training=is_training)
  outputs = tf.nn.relu(outputs)
  outputs = snt.BatchFlatten()(outputs)
  outputs = tf.nn.dropout(outputs, keep_prob=keep_prob)
  outputs = snt.Linear(output_size=10)(outputs)
#  _activation_summary(outputs)
  return outputs 
开发者ID:normanheckscher,项目名称:mnist-multi-gpu,代码行数:23,代码来源:mnist_multi_gpu_sonnet.py

示例6: __init__

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchNorm [as 别名]
def __init__(self, axis=None, offset=True, scale=False,
               decay_rate=0.999, eps=1e-3, initializers=None,
               partitioners=None, regularizers=None,
               update_ops_collection=None, name='batch_norm'):
    """Constructs a BatchNorm module. See original code for more details."""
    super(BatchNorm, self).__init__(
        axis=axis, offset=offset, scale=scale, decay_rate=decay_rate, eps=eps,
        initializers=initializers, partitioners=partitioners,
        regularizers=regularizers, fused=False,
        update_ops_collection=update_ops_collection, name=name) 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:12,代码来源:layers.py

示例7: _build_statistics

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchNorm [as 别名]
def _build_statistics(self, input_batch, axis, use_batch_stats, stat_dtype):
    """Builds the statistics part of the graph when using moving variance."""
    self._mean, self._variance = super(BatchNorm, self)._build_statistics(
        input_batch, axis, use_batch_stats, stat_dtype)
    return self._mean, self._variance 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:7,代码来源:layers.py

示例8: _build

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchNorm [as 别名]
def _build(self, input_batch, is_training=True, test_local_stats=False,
             reuse=False):
    """Connects the BatchNorm module into the graph.

    Args:
      input_batch: A Tensor of arbitrary dimension. By default, the final
        dimension is not reduced over when computing the minibatch statistics.
      is_training: A boolean to indicate if the module should be connected in
        training mode, meaning the moving averages are updated. Can be a Tensor.
      test_local_stats: A boolean to indicate if the statistics should be from
        the local batch. When is_training is True, test_local_stats is not used.
      reuse: If True, the statistics computed by previous call to _build
        are used and is_training is ignored. Otherwise, behaves like a normal
        batch normalization layer.

    Returns:
      A tensor with the same shape as `input_batch`.

    Raises:
      ValueError: If `axis` is not valid for the
        input shape or has negative entries.
    """
    if reuse:
      self._ensure_is_connected()
      return tf.nn.batch_normalization(
          input_batch, self._mean, self._variance, self._beta, self._gamma,
          self._eps, name='batch_norm')
    else:
      return super(BatchNorm, self)._build(input_batch, is_training,
                                           test_local_stats=test_local_stats) 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:32,代码来源:layers.py

示例9: _inputs_for_observed_module

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchNorm [as 别名]
def _inputs_for_observed_module(self, subgraph):
    """Extracts input tensors from a connected Sonnet module.

    This default implementation supports common layer types, but should be
    overridden if custom layer types are to be supported.

    Args:
      subgraph: `snt.ConnectedSubGraph` specifying the Sonnet module being
        connected, and its inputs and outputs.

    Returns:
      List of input tensors, or None if not a supported Sonnet module.
    """
    m = subgraph.module
    # Only support a few operations for now.
    if not (isinstance(m, snt.BatchReshape) or
            isinstance(m, snt.Linear) or
            isinstance(m, snt.Conv1D) or
            isinstance(m, snt.Conv2D) or
            isinstance(m, snt.BatchNorm) or
            isinstance(m, layers.ImageNorm)):
      return None

    if isinstance(m, snt.BatchNorm):
      return subgraph.inputs['input_batch'],
    else:
      return subgraph.inputs['inputs'], 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:29,代码来源:model.py

示例10: _wrapper_for_observed_module

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchNorm [as 别名]
def _wrapper_for_observed_module(self, subgraph):
    """Creates a wrapper for a connected Sonnet module.

    This default implementation supports common layer types, but should be
    overridden if custom layer types are to be supported.

    Args:
      subgraph: `snt.ConnectedSubGraph` specifying the Sonnet module being
        connected, and its inputs and outputs.

    Returns:
      `ibp.VerifiableWrapper` for the Sonnet module.
    """
    m = subgraph.module
    if isinstance(m, snt.BatchReshape):
      shape = subgraph.outputs.get_shape()[1:].as_list()
      return verifiable_wrapper.BatchReshapeWrapper(m, shape)
    elif isinstance(m, snt.Linear):
      return verifiable_wrapper.LinearFCWrapper(m)
    elif isinstance(m, snt.Conv1D):
      return verifiable_wrapper.LinearConv1dWrapper(m)
    elif isinstance(m, snt.Conv2D):
      return verifiable_wrapper.LinearConv2dWrapper(m)
    elif isinstance(m, layers.ImageNorm):
      return verifiable_wrapper.ImageNormWrapper(m)
    else:
      assert isinstance(m, snt.BatchNorm)
      return verifiable_wrapper.BatchNormWrapper(m) 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:30,代码来源:model.py

示例11: combine_with_batchnorm

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchNorm [as 别名]
def combine_with_batchnorm(w, b, batchnorm_module):
  """Combines a linear layer and a batch norm into a single linear layer.

  Calculates the weights and biases of the linear layer formed by
  applying the specified linear layer followed by the batch norm.

  Note that, in the case of a convolution, the returned bias will have
  spatial dimensions.

  Args:
    w: 2D tensor of shape (input_size, output_size) or 4D tensor of shape
      (kernel_height, kernel_width, input_channels, output_channels) containing
      weights for the linear layer.
    b: 1D tensor of shape (output_size) or (output_channels) containing biases
      for the linear layer, or `None` if no bias.
    batchnorm_module: `snt.BatchNorm` module.

  Returns:
    w: 2D tensor of shape (input_size, output_size) or 4D tensor of shape
      (kernel_height, kernel_width, input_channels, output_channels) containing
      weights for the combined layer.
    b: 1D tensor of shape (output_size) or 3D tensor of shape
      (output_height, output_width, output_channels) containing
      biases for the combined layer.
  """
  if b is None:
    b = tf.zeros(dtype=w.dtype, shape=())

  w_bn, b_bn = decode_batchnorm(batchnorm_module)
  return w * w_bn, b * w_bn + b_bn 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:32,代码来源:layer_utils.py

示例12: __init__

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchNorm [as 别名]
def __init__(self, module):
    if not isinstance(module, snt.BatchNorm):
      raise ValueError('Cannot wrap {} with a BatchNormWrapper.'.format(
          module))
    super(BatchNormWrapper, self).__init__(module) 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:7,代码来源:verifiable_wrapper.py

示例13: _propagate_through

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchNorm [as 别名]
def _propagate_through(self, module, input_bounds):
    if isinstance(module, layers.BatchNorm):
      # This IBP-specific batch-norm implementation exposes stats recorded
      # the most recent time the BatchNorm module was connected.
      # These will be either the batch stats (e.g. if training) or the moving
      # averages, depending on how the module was called.
      mean = module.mean
      variance = module.variance
      epsilon = module.epsilon
      scale = module.scale
      bias = module.bias

    else:
      # This plain Sonnet batch-norm implementation only exposes the
      # moving averages.
      logging.warn('Sonnet BatchNorm module encountered: %s. '
                   'IBP will always use its moving averages, not the local '
                   'batch stats, even in training mode.', str(module))
      mean = module.moving_mean
      variance = module.moving_variance
      epsilon = module._eps  # pylint: disable=protected-access
      try:
        bias = module.beta
      except snt.Error:
        bias = None
      try:
        scale = module.gamma
      except snt.Error:
        scale = None

    return input_bounds.apply_batch_norm(self, mean, variance,
                                         scale, bias, epsilon) 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:34,代码来源:verifiable_wrapper.py

示例14: compute_top_delta

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchNorm [as 别名]
def compute_top_delta(self, z):
    """ parameterization of topD. This converts the top level activation
    to an error signal.
    Args:
      z: tf.Tensor
        batch of final layer post activations
    Returns
      delta: tf.Tensor
        the error signal
    """
    s_idx = 0
    with tf.variable_scope('compute_top_delta'), tf.device(self.remote_device):
      # typically this takes [BS, length, input_channels],
      # We are applying this such that we convolve over the batch dimension.
      act = tf.expand_dims(tf.transpose(z, [1, 0]), 2)  # [channels, BS, 1]

      mod = snt.Conv1D(output_channels=self.top_delta_size, kernel_shape=[5])
      act = mod(act)

      act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
      act = tf.nn.relu(act)

      bs = act.shape.as_list()[0]
      act = tf.transpose(act, [2, 1, 0])
      act = snt.Conv1D(output_channels=bs, kernel_shape=[3])(act)
      act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
      act = tf.nn.relu(act)
      act = snt.Conv1D(output_channels=bs, kernel_shape=[3])(act)
      act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
      act = tf.nn.relu(act)
      act = tf.transpose(act, [2, 1, 0])

      prev_act = act
      for i in range(self.top_delta_layers):
        mod = snt.Conv1D(output_channels=self.top_delta_size, kernel_shape=[3])
        act = mod(act)

        act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
        act = tf.nn.relu(act)

        prev_act = act

      mod = snt.Conv1D(output_channels=self.delta_dim, kernel_shape=[3])
      act = mod(act)

      # [bs, feature_channels, delta_channels]
      act = tf.transpose(act, [1, 0, 2])
      return act 
开发者ID:itsamitgoel,项目名称:Gun-Detector,代码行数:50,代码来源:more_local_weight_update.py

示例15: decode_batchnorm

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchNorm [as 别名]
def decode_batchnorm(batchnorm_module):
  """Calculates the neuron-wise multipliers and biases of the batch norm layer.

  Note that, in the case of a convolution, the returned bias will have
  spatial dimensions.

  Args:
    batchnorm_module: `snt.BatchNorm` module.

  Returns:
    w: 1D tensor of shape (output_size) or 3D tensor of shape
      (output_height, output_width, output_channels) containing
      neuron-wise multipliers for the batch norm layer.
    b: 1D tensor of shape (output_size) or 3D tensor of shape
      (output_height, output_width, output_channels) containing
      neuron-wise biases for the batch norm layer.
  """
  if isinstance(batchnorm_module, layers.BatchNorm):
    mean = batchnorm_module.mean
    variance = batchnorm_module.variance
    variance_epsilon = batchnorm_module.epsilon
    scale = batchnorm_module.scale
    offset = batchnorm_module.bias

  else:
    assert isinstance(batchnorm_module, snt.BatchNorm)
    mean = batchnorm_module.moving_mean
    variance = batchnorm_module.moving_variance
    variance_epsilon = batchnorm_module._eps  # pylint: disable=protected-access
    try:
      scale = batchnorm_module.gamma
    except snt.Error:
      scale = None
    try:
      offset = batchnorm_module.beta
    except snt.Error:
      offset = None

  w = tf.rsqrt(variance + variance_epsilon)
  if scale is not None:
    w *= scale

  b = -w * mean
  if offset is not None:
    b += offset

  # Batchnorm vars have a redundant leading dim.
  w = tf.squeeze(w, axis=0)
  b = tf.squeeze(b, axis=0)
  return w, b 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:52,代码来源:layer_utils.py


注:本文中的sonnet.BatchNorm方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。