当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.variance_scaling_initializer方法代码示例

本文整理汇总了Python中tensorflow.variance_scaling_initializer方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.variance_scaling_initializer方法的具体用法?Python tensorflow.variance_scaling_initializer怎么用?Python tensorflow.variance_scaling_initializer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.variance_scaling_initializer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_variable_initializer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variance_scaling_initializer [as 别名]
def get_variable_initializer(hparams):
  """Get variable initializer from hparams."""
  if not hparams.initializer:
    return None

  if not tf.contrib.eager.in_eager_mode():
    tf.logging.info("Using variable initializer: %s", hparams.initializer)
  if hparams.initializer == "orthogonal":
    return tf.orthogonal_initializer(gain=hparams.initializer_gain)
  elif hparams.initializer == "uniform":
    max_val = 0.1 * hparams.initializer_gain
    return tf.random_uniform_initializer(-max_val, max_val)
  elif hparams.initializer == "normal_unit_scaling":
    return tf.variance_scaling_initializer(
        hparams.initializer_gain, mode="fan_avg", distribution="normal")
  elif hparams.initializer == "uniform_unit_scaling":
    return tf.variance_scaling_initializer(
        hparams.initializer_gain, mode="fan_avg", distribution="uniform")
  elif hparams.initializer == "xavier":
    return tf.contrib.layers.xavier_initializer()
  else:
    raise ValueError("Unrecognized initializer: %s" % hparams.initializer) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:24,代码来源:optimize.py

示例2: resnet_backbone

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variance_scaling_initializer [as 别名]
def resnet_backbone(image, num_blocks, group_func, block_func):
    """
    Sec 5.1: We adopt the initialization of [15] for all convolutional layers.
    TensorFlow does not have the true "MSRA init". We use variance_scaling as an approximation.
    """
    with argscope(Conv2D, use_bias=False,
                  kernel_initializer=tf.variance_scaling_initializer(scale=2.0, mode='fan_out')):
        l = Conv2D('conv0', image, 64, 7, strides=2, activation=BNReLU)
        l = MaxPooling('pool0', l, pool_size=3, strides=2, padding='SAME')
        l = group_func('group0', l, block_func, 64, num_blocks[0], 1)
        l = group_func('group1', l, block_func, 128, num_blocks[1], 2)
        l = group_func('group2', l, block_func, 256, num_blocks[2], 2)
        l = group_func('group3', l, block_func, 512, num_blocks[3], 2)
        l = GlobalAvgPooling('gap', l)
        logits = FullyConnected('linear', l, 1000,
                                kernel_initializer=tf.random_normal_initializer(stddev=0.01))
    """
    Sec 5.1:
    The 1000-way fully-connected layer is initialized by
    drawing weights from a zero-mean Gaussian with standard
    deviation of 0.01.
    """
    return logits 
开发者ID:tensorpack,项目名称:benchmarks,代码行数:25,代码来源:resnet_model.py

示例3: get_initializer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variance_scaling_initializer [as 别名]
def get_initializer(params):
    if params.initializer == "uniform":
        max_val = params.initializer_gain
        return tf.random_uniform_initializer(-max_val, max_val)
    elif params.initializer == "normal":
        return tf.random_normal_initializer(0.0, params.initializer_gain)
    elif params.initializer == "normal_unit_scaling":
        return tf.variance_scaling_initializer(params.initializer_gain,
                                               mode="fan_avg",
                                               distribution="normal")
    elif params.initializer == "uniform_unit_scaling":
        return tf.variance_scaling_initializer(params.initializer_gain,
                                               mode="fan_avg",
                                               distribution="uniform")
    else:
        raise ValueError("Unrecognized initializer: %s" % params.initializer) 
开发者ID:THUNLP-MT,项目名称:THUMT,代码行数:18,代码来源:trainer.py

示例4: conv_kernel_initializer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variance_scaling_initializer [as 别名]
def conv_kernel_initializer(shape, dtype=None, partition_info=None):
  """Initialization for convolutional kernels.
  The main difference with tf.variance_scaling_initializer is that
  tf.variance_scaling_initializer uses a truncated normal with an uncorrected
  standard deviation, whereas here we use a normal distribution. Similarly,
  tf.initializers.variance_scaling uses a truncated normal with
  a corrected standard deviation.
  Args:
    shape: shape of variable
    dtype: dtype of variable
    partition_info: unused
  Returns:
    an initialization for the variable
  """
  del partition_info
  kernel_height, kernel_width, _, out_filters = shape
  fan_out = int(kernel_height * kernel_width * out_filters)
  return tf.random_normal(
      shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype) 
开发者ID:Thinklab-SJTU,项目名称:R3Det_Tensorflow,代码行数:21,代码来源:efficientnet_model.py

示例5: dense_kernel_initializer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variance_scaling_initializer [as 别名]
def dense_kernel_initializer(shape, dtype=None, partition_info=None):
  """Initialization for dense kernels.
  This initialization is equal to
    tf.variance_scaling_initializer(scale=1.0/3.0, mode='fan_out',
                                    distribution='uniform').
  It is written out explicitly here for clarity.
  Args:
    shape: shape of variable
    dtype: dtype of variable
    partition_info: unused
  Returns:
    an initialization for the variable
  """
  del partition_info
  init_range = 1.0 / np.sqrt(shape[1])
  return tf.random_uniform(shape, -init_range, init_range, dtype=dtype) 
开发者ID:Thinklab-SJTU,项目名称:R3Det_Tensorflow,代码行数:18,代码来源:efficientnet_model.py

示例6: conv_kernel_initializer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variance_scaling_initializer [as 别名]
def conv_kernel_initializer(shape, dtype=None, partition_info=None):
    """Initialization for convolutional kernels.
    The main difference with tf.variance_scaling_initializer is that
    tf.variance_scaling_initializer uses a truncated normal with an uncorrected
    standard deviation, whereas here we use a normal distribution. Similarly,
    tf.contrib.layers.variance_scaling_initializer uses a truncated normal with
    a corrected standard deviation.
    Args:
      shape: shape of variable
      dtype: dtype of variable
      partition_info: unused
    Returns:
      an initialization for the variable
    """
    del partition_info
    kernel_height, kernel_width, _, out_filters = shape
    fan_out = int(kernel_height * kernel_width * out_filters)
    return tf.random_normal(
        shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype) 
开发者ID:Thinklab-SJTU,项目名称:R3Det_Tensorflow,代码行数:21,代码来源:demo.py

示例7: dense_kernel_initializer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variance_scaling_initializer [as 别名]
def dense_kernel_initializer(shape, dtype=None, partition_info=None):
    """Initialization for dense kernels.
    This initialization is equal to
      tf.variance_scaling_initializer(scale=1.0/3.0, mode='fan_out',
                                      distribution='uniform').
    It is written out explicitly here for clarity.
    Args:
      shape: shape of variable
      dtype: dtype of variable
      partition_info: unused
    Returns:
      an initialization for the variable
    """
    del partition_info
    init_range = 1.0 / np.sqrt(shape[1])
    return tf.random_uniform(shape, -init_range, init_range, dtype=dtype) 
开发者ID:Thinklab-SJTU,项目名称:R3Det_Tensorflow,代码行数:18,代码来源:demo.py

示例8: get_initializer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variance_scaling_initializer [as 别名]
def get_initializer(initializer, initializer_gain):
    tfdtype = tf.as_dtype(dtype.floatx())

    if initializer == "uniform":
        max_val = initializer_gain
        return tf.random_uniform_initializer(-max_val, max_val, dtype=tfdtype)
    elif initializer == "normal":
        return tf.random_normal_initializer(0.0, initializer_gain, dtype=tfdtype)
    elif initializer == "normal_unit_scaling":
        return tf.variance_scaling_initializer(initializer_gain,
                                               mode="fan_avg",
                                               distribution="normal",
                                               dtype=tfdtype)
    elif initializer == "uniform_unit_scaling":
        return tf.variance_scaling_initializer(initializer_gain,
                                               mode="fan_avg",
                                               distribution="uniform",
                                               dtype=tfdtype)
    else:
        tf.logging.warn("Unrecognized initializer: %s" % initializer)
        tf.logging.warn("Return to default initializer: glorot_uniform_initializer")
        return tf.glorot_uniform_initializer(dtype=tfdtype) 
开发者ID:bzhangGo,项目名称:zero,代码行数:24,代码来源:initializer.py

示例9: conv_kernel_initializer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variance_scaling_initializer [as 别名]
def conv_kernel_initializer(shape, dtype=K.floatx()):
    """Initialization for convolutional kernels.
    The main difference with tf.variance_scaling_initializer is that
    tf.variance_scaling_initializer uses a truncated normal with an uncorrected
    standard deviation, whereas here we use a normal distribution. Similarly,
    tf.contrib.layers.variance_scaling_initializer uses a truncated normal with
    a corrected standard deviation.
    Args:
        shape: shape of variable
        dtype: dtype of variable
    Returns:
        an initialization for the variable
    """
    kernel_height, kernel_width, _, out_filters = shape
    fan_out = int(kernel_height * kernel_width * out_filters)
    return tf.random_normal(
        shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype) 
开发者ID:zhoudaxia233,项目名称:EfficientUnet,代码行数:19,代码来源:utils.py

示例10: get_initializer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variance_scaling_initializer [as 别名]
def get_initializer(params):
    if params.initializer == "uniform":
        max_val = 0.1 * params.initializer_gain
        return tf.random_uniform_initializer(-max_val, max_val)
    elif params.initializer == "normal":
        return tf.random_normal_initializer(0.0, params.initializer_gain)
    elif params.initializer == "orthogonal":
        return tf.orthogonal_initializer(params.initializer_gain)
    elif params.initializer == "normal_unit_scaling":
        return tf.variance_scaling_initializer(params.initializer_gain,
                                               mode="fan_avg",
                                               distribution="normal")
    elif params.initializer == "uniform_unit_scaling":
        return tf.variance_scaling_initializer(params.initializer_gain,
                                               mode="fan_avg",
                                               distribution="uniform")
    else:
        raise ValueError("Unrecognized initializer: %s" % params.initializer) 
开发者ID:Imagist-Shuo,项目名称:UNMT-SPR,代码行数:20,代码来源:train.py

示例11: conv2d_fixed_padding

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variance_scaling_initializer [as 别名]
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format):
  """Strided 2-D convolution with explicit padding."""
  # The padding is consistent and is based only on `kernel_size`, not on the
  # dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
  if strides > 1:
    inputs = fixed_padding(inputs, kernel_size, data_format)

  return tf.layers.conv2d(
      inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
      padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,
      kernel_initializer=tf.variance_scaling_initializer(),
      data_format=data_format)


################################################################################
# ResNet block definitions.
################################################################################ 
开发者ID:rockyzhengwu,项目名称:nsfw,代码行数:19,代码来源:resnet_model.py

示例12: conv_kernel_initializer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variance_scaling_initializer [as 别名]
def conv_kernel_initializer(shape, dtype=None, partition_info=None):
  """Initialization for convolutional kernels.

  The main difference with tf.variance_scaling_initializer is that
  tf.variance_scaling_initializer uses a truncated normal with an uncorrected
  standard deviation, whereas here we use a normal distribution. Similarly,
  tf.contrib.layers.variance_scaling_initializer uses a truncated normal with
  a corrected standard deviation.

  Args:
    shape: shape of variable
    dtype: dtype of variable
    partition_info: unused

  Returns:
    an initialization for the variable
  """
  del partition_info
  kernel_height, kernel_width, _, out_filters = shape
  fan_out = int(kernel_height * kernel_width * out_filters)
  return tf.random_normal(
      shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype) 
开发者ID:artyompal,项目名称:tpu_models,代码行数:24,代码来源:efficientnet_model.py

示例13: dense_kernel_initializer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variance_scaling_initializer [as 别名]
def dense_kernel_initializer(shape, dtype=None, partition_info=None):
  """Initialization for dense kernels.

  This initialization is equal to
    tf.variance_scaling_initializer(scale=1.0/3.0, mode='fan_out',
                                    distribution='uniform').
  It is written out explicitly here for clarity.

  Args:
    shape: shape of variable
    dtype: dtype of variable
    partition_info: unused

  Returns:
    an initialization for the variable
  """
  del partition_info
  init_range = 1.0 / np.sqrt(shape[1])
  return tf.random_uniform(shape, -init_range, init_range, dtype=dtype) 
开发者ID:artyompal,项目名称:tpu_models,代码行数:21,代码来源:efficientnet_model.py

示例14: conv2d_fixed_padding

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variance_scaling_initializer [as 别名]
def conv2d_fixed_padding(self, inputs, filters, kernel_size, strides):
    """Strided 2-D convolution with explicit padding.

    The padding is consistent and is based only on `kernel_size`, not on the
    dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).

    Args:
      inputs: `Tensor` of size `[batch, channels, height_in, width_in]`.
      filters: `int` number of filters in the convolution.
      kernel_size: `int` size of the kernel to be used in the convolution.
      strides: `int` strides of the convolution.

    Returns:
      A `Tensor` of shape `[batch, filters, height_out, width_out]`.
    """
    if strides > 1:
      inputs = self.fixed_padding(inputs, kernel_size)

    return tf.layers.conv2d(
        inputs=inputs, filters=filters, kernel_size=kernel_size,
        strides=strides, padding=('SAME' if strides == 1 else 'VALID'),
        use_bias=False, kernel_initializer=tf.variance_scaling_initializer(),
        data_format=self._data_format) 
开发者ID:artyompal,项目名称:tpu_models,代码行数:25,代码来源:resnet.py

示例15: concat_attention

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variance_scaling_initializer [as 别名]
def concat_attention(a, b, a_lengths, b_lengths, max_seq_len, hidden_units=150,
                     scope='concat-attention', reuse=False):
    with tf.variable_scope(scope, reuse=reuse):
        a = tf.expand_dims(a, 2)
        b = tf.expand_dims(b, 1)
        c = tf.concat([a, b], axis=3)
        W = tf.get_variable(
            name='matmul_weights',
            initializer=tf.contrib.layers.variance_scaling_initializer(),
            shape=[shape(c, -1), hidden_units]
        )
        cW = tf.einsum('ijkl,lm->ijkm', c, W)
        v = tf.get_variable(
            name='dot_weights',
            initializer=tf.ones_initializer(),
            shape=[hidden_units]
        )
        logits = tf.einsum('ijkl,l->ijk', tf.nn.tanh(cW), v)
        logits = logits - tf.expand_dims(tf.reduce_max(logits, axis=2), 2)
        attn = tf.exp(logits)
        attn = mask_attention_weights(attn, a_lengths, b_lengths, max_seq_len)
        return attn / tf.expand_dims(tf.reduce_sum(attn, axis=2) + 1e-10, 2) 
开发者ID:zhufz,项目名称:nlp_research,代码行数:24,代码来源:attention.py


注:本文中的tensorflow.variance_scaling_initializer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。