当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.truncated_normal方法代码示例

本文整理汇总了Python中tensorflow.truncated_normal方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.truncated_normal方法的具体用法?Python tensorflow.truncated_normal怎么用?Python tensorflow.truncated_normal使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.truncated_normal方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal [as 别名]
def __init__(self, dims=(0,), scale=2.0, **kwargs):
    """Creates an initializer.

    Args:
      dims: Dimension(s) index to compute standard deviation:
        sqrt(scale / product(shape[dims]))
      scale: A constant scaling for the initialization used as
        sqrt(scale / product(shape[dims])).
      **kwargs: Extra keyword arguments to pass to tf.truncated_normal.
    """
    if isinstance(dims, (int, long)):
      self._dims = [dims]
    else:
      self._dims = dims
    self._kwargs = kwargs
    self._scale = scale 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:18,代码来源:block_util.py

示例2: weight_noise

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal [as 别名]
def weight_noise(noise_rate, learning_rate, var_list):
  """Apply weight noise to vars in var_list."""
  if not noise_rate:
    return [tf.no_op()]

  tf.logging.info("Applying weight noise scaled by learning rate, "
                  "noise_rate: %0.5f", noise_rate)

  noise_ops = []

  for v in var_list:
    with tf.device(v._ref().device):  # pylint: disable=protected-access
      scale = noise_rate * learning_rate * 0.001
      tf.summary.scalar("weight_noise_scale", scale)
      noise = tf.truncated_normal(v.shape) * scale
      noise_op = v.assign_add(noise)
      noise_ops.append(noise_op)

  return noise_ops 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:21,代码来源:optimize.py

示例3: isemhash_bottleneck

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal [as 别名]
def isemhash_bottleneck(x, bottleneck_bits, bottleneck_noise,
                        discretize_warmup_steps, mode,
                        isemhash_noise_dev=0.5, isemhash_mix_prob=0.5):
  """Improved semantic hashing bottleneck."""
  with tf.variable_scope("isemhash_bottleneck"):
    x = tf.layers.dense(x, bottleneck_bits, name="dense")
    y = common_layers.saturating_sigmoid(x)
    if isemhash_noise_dev > 0 and mode == tf.estimator.ModeKeys.TRAIN:
      noise = tf.truncated_normal(
          common_layers.shape_list(x), mean=0.0, stddev=isemhash_noise_dev)
      y = common_layers.saturating_sigmoid(x + noise)
    d = tf.to_float(tf.less(0.5, y)) + y - tf.stop_gradient(y)
    d = 2.0 * d - 1.0  # Move from [0, 1] to [-1, 1].
    if mode == tf.estimator.ModeKeys.TRAIN:  # Flip some bits.
      noise = tf.random_uniform(common_layers.shape_list(x))
      noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0
      d *= noise
      d = common_layers.mix(d, 2.0 * y - 1.0, discretize_warmup_steps,
                            mode == tf.estimator.ModeKeys.TRAIN,
                            max_prob=isemhash_mix_prob)
    return d, 0.0 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:23,代码来源:discretization.py

示例4: dense_layer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal [as 别名]
def dense_layer(x, in_dim, out_dim, layer_name, act):
    """Creates a single densely connected layer of a NN"""
    with tf.name_scope(layer_name):
        # layer weights corresponding to the input / output dimensions
        weights = tf.Variable(
            tf.truncated_normal(
                [in_dim, out_dim], 
                stddev=1.0 / tf.sqrt(float(out_dim))
            ), name="weights"
        )

        # layer biases corresponding to output dimension
        biases = tf.Variable(tf.zeros([out_dim]), name="biases")

        # layer activations applied to Wx+b
        layer = act(tf.matmul(x, weights) + biases, name="activations")

    return layer 
开发者ID:aakhundov,项目名称:tf-example-models,代码行数:20,代码来源:tf_cnn.py

示例5: conv_pool_layer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal [as 别名]
def conv_pool_layer(x, in_channels, out_channels, layer_name):
    """Creates a single convpool layer of a NN"""
    with tf.name_scope(layer_name):
        # layer weights corresponding to the input / output channels
        weights = tf.Variable(tf.truncated_normal([5, 5, in_channels, out_channels], stddev=0.1))

        # layer biases corresponding to output channels
        biases = tf.Variable(tf.constant(0.1, shape=[out_channels]))

        # convolution layer: convolving inputs with the weights and applying ReLU
        conv = tf.nn.relu(tf.nn.conv2d(x, weights, strides=[1, 1, 1, 1], padding='SAME') + biases)

        # max-pooling layer: pooling convolutions (after applying ReLU) by 2x2 windows
        pool = tf.nn.max_pool(conv, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

        return pool


# PREPARING DATA

# downloading (on first run) and extracting MNIST data 
开发者ID:aakhundov,项目名称:tf-example-models,代码行数:23,代码来源:tf_cnn.py

示例6: dense_layer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal [as 别名]
def dense_layer(x, in_dim, out_dim, layer_name, act):
    """Creates a single densely connected layer of a NN"""
    with tf.name_scope(layer_name):
        # layer weights corresponding to the input / output dimensions
        weights = tf.Variable(
            tf.truncated_normal(
                [in_dim, out_dim], 
                stddev=1.0 / tf.sqrt(float(out_dim))
            ), name="weights"
        )

        # layer biases corresponding to output dimension
        biases = tf.Variable(tf.zeros([out_dim]), name="biases")

        # layer activations applied to Wx+b
        layer = act(tf.matmul(x, weights) + biases, name="activations")

    return layer


# PREPARING DATA

# downloading (on first run) and extracting MNIST data 
开发者ID:aakhundov,项目名称:tf-example-models,代码行数:25,代码来源:tf_mlp.py

示例7: conv_den

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal [as 别名]
def conv_den(x, dendrites, out_channels, variances=[1., 1., 2.], width=11, data_format="NHWC"):

    if data_format not in ["NHWC", "NCHW"]:
        raise ValueError("data_format must be \"NHWC\" or \"NCHW\".")

    if data_format == "NHWC":
        raise NotImplementedError("data_format \"NHWC\" is not yet implemented!")

    shape = x.shape.as_list()
    depth = shape[1]

    positions_height = tf.Variable(initial_value=tf.truncated_normal([dendrites], stddev=(width - 3) / 4.), name="dendrite_height", dtype=tf.float32)
    positions_width = tf.Variable(initial_value=tf.truncated_normal([dendrites], stddev=(width - 3) / 4.), name="dendrite_width", dtype=tf.float32)
    positions_depth = tf.Variable(initial_value=tf.abs(tf.truncated_normal([dendrites], stddev=(depth - 2) / 2.)) + 0.5, name="dendrite_depth", dtype=tf.float32)

    positions = [positions_height, positions_width, positions_depth]

    weights = weight_variable([out_channels, dendrites], name="weights")
    bias = bias_variable([out_channels, 1, 1], name="biases")

    output = dendrite_layer(x, positions, weights, variances=[1., 1., 2.], width=11, data_format=data_format)

    return tf.nn.relu(output + bias, name="relu") 
开发者ID:FelixGruen,项目名称:tensorflow-u-net,代码行数:25,代码来源:layers.py

示例8: weight_variable

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal [as 别名]
def weight_variable(shape, stddev=0.1, name=None):
    """
    Creates a weight variable initialized with a truncated normal distribution.

    Parameters
    ----------
    shape: list or tuple of ints
        The shape of the weight variable.
    stddev: float
        The standard deviation of the truncated normal distribution.
    name : string
        The name of the variable in TensorFlow.

    Returns
    -------
    weights: TF variable
        The weight variable.   
    """
    return tf.Variable(initial_value=tf.truncated_normal(shape, stddev=stddev),
                       name=name,
                       dtype=tf.float32) 
开发者ID:FelixGruen,项目名称:tensorflow-u-net,代码行数:23,代码来源:layers.py

示例9: build

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal [as 别名]
def build(self):
    # Create target inputs
    self.label_placeholder = tf.placeholder(
        dtype='float32', shape=(None, self.n_tasks), name="label_placeholder")
    self.weight_placeholder = tf.placeholder(
        dtype='float32', shape=(None, self.n_tasks), name="weight_placholder")

    feat = self.model.return_outputs()
    feat_size = feat.get_shape()[-1].value
    outputs = []
    for task in range(self.n_tasks):
      outputs.append(
          tf.squeeze(
              model_ops.fully_connected_layer(
                  tensor=feat,
                  size=1,
                  weight_init=tf.truncated_normal(
                      shape=[feat_size, 1], stddev=0.01),
                  bias_init=tf.constant(value=0., shape=[1]))))
    return outputs 
开发者ID:deepchem,项目名称:deepchem,代码行数:22,代码来源:multitask_regressor.py

示例10: sample_encoded_context

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal [as 别名]
def sample_encoded_context(self, embeddings):
        '''Helper function for init_opt'''
        c_mean_logsigma = self.model.generate_condition(embeddings)
        mean = c_mean_logsigma[0]
        if cfg.TRAIN.COND_AUGMENTATION:
            # epsilon = tf.random_normal(tf.shape(mean))
            epsilon = tf.truncated_normal(tf.shape(mean))
            stddev = tf.exp(c_mean_logsigma[1])
            c = mean + stddev * epsilon

            kl_loss = KL_loss(c_mean_logsigma[0], c_mean_logsigma[1])
        else:
            c = mean
            kl_loss = 0

        return c, cfg.TRAIN.COEFF.KL * kl_loss 
开发者ID:hanzhanggit,项目名称:StackGAN,代码行数:18,代码来源:trainer.py

示例11: sample_encoded_context

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal [as 别名]
def sample_encoded_context(self, embeddings):
        '''Helper function for init_opt'''
        # Build conditioning augmentation structure for text embedding
        # under different variable_scope: 'g_net' and 'hr_g_net'
        c_mean_logsigma = self.model.generate_condition(embeddings)
        mean = c_mean_logsigma[0]
        if cfg.TRAIN.COND_AUGMENTATION:
            # epsilon = tf.random_normal(tf.shape(mean))
            epsilon = tf.truncated_normal(tf.shape(mean))
            stddev = tf.exp(c_mean_logsigma[1])
            c = mean + stddev * epsilon

            kl_loss = KL_loss(c_mean_logsigma[0], c_mean_logsigma[1])
        else:
            c = mean
            kl_loss = 0
        # TODO: play with the coefficient for KL
        return c, cfg.TRAIN.COEFF.KL * kl_loss 
开发者ID:hanzhanggit,项目名称:StackGAN,代码行数:20,代码来源:trainer.py

示例12: setupRNN

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal [as 别名]
def setupRNN(self):
        """ Create RNN layers and return output of these layers """
        # Collapse layer to remove dimension 100 x 1 x 512 --> 100 x 512 on axis=2
        rnnIn3d = tf.squeeze(self.cnnOut4d, axis=[2])

        # 2 layers of LSTM cell used to build RNN
        numHidden = 512
        cells = [tf.contrib.rnn.LSTMCell(
            num_units=numHidden, state_is_tuple=True, name='basic_lstm_cell') for _ in range(2)]
        stacked = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=True)
        # Bi-directional RNN
        # BxTxF -> BxTx2H
        ((forward, backward), _) = tf.nn.bidirectional_dynamic_rnn(
            cell_fw=stacked, cell_bw=stacked, inputs=rnnIn3d, dtype=rnnIn3d.dtype)

        # BxTxH + BxTxH -> BxTx2H -> BxTx1X2H
        concat = tf.expand_dims(tf.concat([forward, backward], 2), 2)

        # Project output to chars (including blank): BxTx1x2H -> BxTx1xC -> BxTxC
        kernel = tf.Variable(tf.truncated_normal(
            [1, 1, numHidden * 2, len(self.charList) + 1], stddev=0.1))
        self.rnnOut3d = tf.squeeze(tf.nn.atrous_conv2d(value=concat, filters=kernel, rate=1, padding='SAME'), axis=[2]) 
开发者ID:sushant097,项目名称:Handwritten-Line-Text-Recognition-using-Deep-Learning-with-Tensorflow,代码行数:24,代码来源:Model.py

示例13: weight_noise

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal [as 别名]
def weight_noise(noise_rate, learning_rate, var_list):
  """Apply weight noise to vars in var_list."""
  if not noise_rate:
    return [tf.no_op()]

  tf.logging.info("Applying weight noise scaled by learning rate, "
                  "noise_rate: %0.5f", noise_rate)

  noise_ops = []

  for v in var_list:
    with tf.device(v.device):  # pylint: disable=protected-access
      scale = noise_rate * learning_rate * 0.001
      if common_layers.should_generate_summaries():
        tf.summary.scalar("weight_noise_scale", scale)
      noise = tf.truncated_normal(v.shape) * scale
      noise_op = v.assign_add(noise)
      noise_ops.append(noise_op)

  return noise_ops 
开发者ID:yyht,项目名称:BERT,代码行数:22,代码来源:optimize.py

示例14: tanh_discrete_bottleneck

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal [as 别名]
def tanh_discrete_bottleneck(x, bottleneck_bits, bottleneck_noise,
                             discretize_warmup_steps, mode):
  """Simple discretization through tanh, flip bottleneck_noise many bits."""
  x = tf.layers.dense(x, bottleneck_bits, name="tanh_discrete_bottleneck")
  d0 = tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x))) - 1.0
  if mode == tf.estimator.ModeKeys.TRAIN:
    x += tf.truncated_normal(
        common_layers.shape_list(x), mean=0.0, stddev=0.2)
  x = tf.tanh(x)
  d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x)
  if mode == tf.estimator.ModeKeys.TRAIN:
    noise = tf.random_uniform(common_layers.shape_list(x))
    noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0
    d *= noise
  d = common_layers.mix(d, x, discretize_warmup_steps,
                        mode == tf.estimator.ModeKeys.TRAIN)
  return d, d0 
开发者ID:yyht,项目名称:BERT,代码行数:19,代码来源:discretization.py

示例15: _weight_variable

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal [as 别名]
def _weight_variable(shape,name=None):
    """weight_variable generates a weigh  t        variable of a given shape."""
    initial = tf.truncated_normal(shape, stddev=0.01)+0.01
    return tf.Variable(initial,name=name) 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:6,代码来源:tf_model.py


注:本文中的tensorflow.truncated_normal方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。