当前位置: 首页>>代码示例>>Python>>正文


Python v1.truncated_normal方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.truncated_normal方法的典型用法代码示例。如果您正苦于以下问题:Python v1.truncated_normal方法的具体用法?Python v1.truncated_normal怎么用?Python v1.truncated_normal使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.truncated_normal方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_synthetic_inputs

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import truncated_normal [as 别名]
def get_synthetic_inputs(self, input_name, nclass):
    # Synthetic input should be within [0, 255].
    image_shape, label_shape = self.get_input_shapes('train')
    inputs = tf.truncated_normal(
        image_shape,
        dtype=self.data_type,
        mean=127,
        stddev=60,
        name=self.model_name + '_synthetic_inputs')
    inputs = variables_module.VariableV1(
        inputs, trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES],
        name=input_name)
    labels = tf.random_uniform(
        label_shape,
        minval=0,
        maxval=nclass - 1,
        dtype=tf.int32,
        name=self.model_name + '_synthetic_labels')
    return (inputs, labels) 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:21,代码来源:model.py

示例2: weight_noise

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import truncated_normal [as 别名]
def weight_noise(noise_rate, learning_rate, var_list):
  """Apply weight noise to vars in var_list."""
  if not noise_rate:
    return [tf.no_op()]

  tf.logging.info("Applying weight noise scaled by learning rate, "
                  "noise_rate: %0.5f", noise_rate)

  noise_ops = []

  for v in var_list:
    with tf.device(v.device):  # pylint: disable=protected-access
      scale = noise_rate * learning_rate * 0.001
      if common_layers.should_generate_summaries():
        tf.summary.scalar("weight_noise_scale", scale)
      noise = tf.truncated_normal(v.shape) * scale
      noise_op = v.assign_add(noise)
      noise_ops.append(noise_op)

  return noise_ops 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:22,代码来源:optimize.py

示例3: tanh_discrete_bottleneck

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import truncated_normal [as 别名]
def tanh_discrete_bottleneck(x, bottleneck_bits, bottleneck_noise,
                             discretize_warmup_steps, mode):
  """Simple discretization through tanh, flip bottleneck_noise many bits."""
  x = tf.layers.dense(x, bottleneck_bits, name="tanh_discrete_bottleneck")
  d0 = tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x))) - 1.0
  if mode == tf.estimator.ModeKeys.TRAIN:
    x += tf.truncated_normal(
        common_layers.shape_list(x), mean=0.0, stddev=0.2)
  x = tf.tanh(x)
  d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x)
  if mode == tf.estimator.ModeKeys.TRAIN:
    noise = tf.random_uniform(common_layers.shape_list(x))
    noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0
    d *= noise
  d = common_layers.mix(d, x, discretize_warmup_steps,
                        mode == tf.estimator.ModeKeys.TRAIN)
  return d, d0 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:19,代码来源:discretization.py

示例4: make_low_rank_factorization_initializer

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import truncated_normal [as 别名]
def make_low_rank_factorization_initializer(shape, rank):
  fan_in = int(shape[0])
  # This is the variance we'd like to see if a matrix of 'shape' was
  # initialized directly.
  variance = 1.0 / fan_in
  # Each element of a*b (the low rank matrices) is the sum of 'rank'
  # terms, each of which is a product of an element from 'a' and
  # 'b'.
  stddev = np.sqrt(np.sqrt(variance / rank))
  return tf.initializers.truncated_normal(stddev=stddev) 
开发者ID:deepmind,项目名称:lamb,代码行数:12,代码来源:utils.py

示例5: isemhash_bottleneck

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import truncated_normal [as 别名]
def isemhash_bottleneck(x,
                        bottleneck_bits,
                        bottleneck_noise,
                        discretize_warmup_steps,
                        mode,
                        isemhash_noise_dev=0.5,
                        isemhash_mix_prob=0.5):
  """Improved semantic hashing bottleneck."""
  with tf.variable_scope("isemhash_bottleneck"):
    x = tf.layers.dense(x, bottleneck_bits, name="dense")
    y = common_layers.saturating_sigmoid(x)
    if isemhash_noise_dev > 0 and mode == tf.estimator.ModeKeys.TRAIN:
      noise = tf.truncated_normal(
          common_layers.shape_list(x), mean=0.0, stddev=isemhash_noise_dev)
      y = common_layers.saturating_sigmoid(x + noise)
    d = tf.to_float(tf.less(0.5, y)) + y - tf.stop_gradient(y)
    d = 2.0 * d - 1.0  # Move from [0, 1] to [-1, 1].
    if mode == tf.estimator.ModeKeys.TRAIN:  # Flip some bits.
      noise = tf.random_uniform(common_layers.shape_list(x))
      noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0
      d *= noise
      d = common_layers.mix(
          d,
          2.0 * y - 1.0,
          discretize_warmup_steps,
          mode == tf.estimator.ModeKeys.TRAIN,
          max_prob=isemhash_mix_prob)
    return d, 0.0 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:30,代码来源:discretization.py

示例6: weight_variable

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import truncated_normal [as 别名]
def weight_variable(shape):
  initial = tf.truncated_normal(shape, stddev=0.1)
  return tf.Variable(initial) 
开发者ID:SullyChen,项目名称:Autopilot-TensorFlow,代码行数:5,代码来源:model.py

示例7: variance_scaling_initializer

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import truncated_normal [as 别名]
def variance_scaling_initializer(scale=2.0, mode='fan_in',
                                 distribution='truncated_normal',
                                 mean=0.0, seed=None, dtype=tf.float32):
  """Like tf.variance_scaling_initializer but supports non-zero means."""
  if not dtype.is_floating:
    raise TypeError('Cannot create initializer for non-floating point type.')
  if mode not in ['fan_in', 'fan_out', 'fan_avg']:
    raise TypeError('Unknown mode %s [fan_in, fan_out, fan_avg]' % mode)

  # pylint: disable=unused-argument
  def _initializer(shape, dtype=dtype, partition_info=None):
    """Initializer function."""
    if not dtype.is_floating:
      raise TypeError('Cannot create initializer for non-floating point type.')
    # Estimating fan_in and fan_out is not possible to do perfectly, but we try.
    # This is the right thing for matrix multiply and convolutions.
    if shape:
      fan_in = float(shape[-2]) if len(shape) > 1 else float(shape[-1])
      fan_out = float(shape[-1])
    else:
      fan_in = 1.0
      fan_out = 1.0
    for dim in shape[:-2]:
      fan_in *= float(dim)
      fan_out *= float(dim)
    if mode == 'fan_in':
      # Count only number of input connections.
      n = fan_in
    elif mode == 'fan_out':
      # Count only number of output connections.
      n = fan_out
    elif mode == 'fan_avg':
      # Average number of inputs and output connections.
      n = (fan_in + fan_out) / 2.0
    if distribution == 'truncated_normal':
      # To get stddev = math.sqrt(scale / n) need to adjust for truncated.
      trunc_stddev = math.sqrt(1.3 * scale / n)
      return tf.truncated_normal(shape, mean, trunc_stddev, dtype, seed=seed)
    elif distribution == 'uniform':
      # To get stddev = math.sqrt(scale / n) need to adjust for uniform.
      limit = math.sqrt(3.0 * scale / n)
      return tf.random_uniform(shape, mean-limit, mean+limit, dtype, seed=seed)
    else:
      assert 'Unexpected distribution %s.' % distribution
  # pylint: enable=unused-argument

  return _initializer 
开发者ID:deepmind,项目名称:lamb,代码行数:49,代码来源:utils.py

示例8: body

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import truncated_normal [as 别名]
def body(self, features):
    """TextCNN main model_fn.

    Args:
      features: Map of features to the model. Should contain the following:
          "inputs": Text inputs.
              [batch_size, input_length, 1, hidden_dim].
          "targets": Target encoder outputs.
              [batch_size, 1, 1, hidden_dim]
    Returns:
      Final encoder representation. [batch_size, 1, 1, hidden_dim]
    """
    hparams = self._hparams
    inputs = features["inputs"]

    xshape = common_layers.shape_list(inputs)

    vocab_size = xshape[3]
    inputs = tf.reshape(inputs, [xshape[0], xshape[1], xshape[3], xshape[2]])

    pooled_outputs = []
    for _, filter_size in enumerate(hparams.filter_sizes):
      with tf.name_scope("conv-maxpool-%s" % filter_size):
        filter_shape = [filter_size, vocab_size, 1, hparams.num_filters]
        filter_var = tf.Variable(
            tf.truncated_normal(filter_shape, stddev=0.1), name="W")
        filter_bias = tf.Variable(
            tf.constant(0.1, shape=[hparams.num_filters]), name="b")
        conv = tf.nn.conv2d(
            inputs,
            filter_var,
            strides=[1, 1, 1, 1],
            padding="VALID",
            name="conv")
        conv_outputs = tf.nn.relu(
            tf.nn.bias_add(conv, filter_bias), name="relu")
        pooled = tf.math.reduce_max(
            conv_outputs, axis=1, keepdims=True, name="max")
        pooled_outputs.append(pooled)

    num_filters_total = hparams.num_filters * len(hparams.filter_sizes)
    h_pool = tf.concat(pooled_outputs, 3)
    h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total])

    # Add dropout
    output = tf.nn.dropout(h_pool_flat, 1 - hparams.output_dropout)
    output = tf.reshape(output, [-1, 1, 1, num_filters_total])

    return output 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:51,代码来源:text_cnn.py


注:本文中的tensorflow.compat.v1.truncated_normal方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。