当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.random_normal_initializer方法代码示例

本文整理汇总了Python中tensorflow.random_normal_initializer方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.random_normal_initializer方法的具体用法?Python tensorflow.random_normal_initializer怎么用?Python tensorflow.random_normal_initializer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.random_normal_initializer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: define_network

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal_initializer [as 别名]
def define_network(constructor, config, action_size):
  """Constructor for the recurrent cell for the algorithm.

  Args:
    constructor: Callable returning the network as RNNCell.
    config: Object providing configurations via attributes.
    action_size: Integer indicating the amount of action dimensions.

  Returns:
    Created recurrent cell object.
  """
  mean_weights_initializer = (
      tf.contrib.layers.variance_scaling_initializer(
          factor=config.init_mean_factor))
  logstd_initializer = tf.random_normal_initializer(
      config.init_logstd, 1e-10)
  network = constructor(
      config.policy_layers, config.value_layers, action_size,
      mean_weights_initializer=mean_weights_initializer,
      logstd_initializer=logstd_initializer)
  return network 
开发者ID:utra-robosoccer,项目名称:soccer-matlab,代码行数:23,代码来源:utility.py

示例2: instance_norm

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal_initializer [as 别名]
def instance_norm(input):
    """
    Instance normalization
    """
    with tf.variable_scope('instance_norm'):
        num_out = input.get_shape()[-1]
        scale = tf.get_variable(
            'scale', [num_out],
            initializer=tf.random_normal_initializer(mean=1.0, stddev=0.02))
        offset = tf.get_variable(
            'offset', [num_out],
            initializer=tf.random_normal_initializer(mean=0.0, stddev=0.02))
        mean, var = tf.nn.moments(input, axes=[1, 2], keep_dims=True)
        epsilon = 1e-6
        inv = tf.rsqrt(var + epsilon)
        return scale * (input - mean) * inv + offset 
开发者ID:clvrai,项目名称:SSGAN-Tensorflow,代码行数:18,代码来源:ops.py

示例3: add_depth_embedding

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal_initializer [as 别名]
def add_depth_embedding(x):
  """Add n-dimensional embedding as the depth embedding (timing signal).

  Adds embeddings to represent the position of the step in the recurrent
  tower.

  Args:
    x: a tensor with shape [max_step, batch, length, depth]

  Returns:
    a Tensor the same shape as x.
  """
  x_shape = common_layers.shape_list(x)
  depth = x_shape[-1]
  num_steps = x_shape[0]
  shape = [num_steps, 1, 1, depth]
  depth_embedding = (
      tf.get_variable(
          "depth_embedding",
          shape,
          initializer=tf.random_normal_initializer(0, depth**-0.5)) * (depth**
                                                                       0.5))

  x += depth_embedding
  return x 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:27,代码来源:universal_transformer_util.py

示例4: get_layer_timing_signal_learned_1d

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal_initializer [as 别名]
def get_layer_timing_signal_learned_1d(channels, layer, num_layers):
  """get n-dimensional embedding as the layer (vertical) timing signal.

  Adds embeddings to represent the position of the layer in the tower.

  Args:
    channels: dimension of the timing signal
    layer: layer num
    num_layers: total number of layers

  Returns:
    a Tensor of timing signals [1, 1, channels].
  """
  shape = [num_layers, 1, 1, channels]
  layer_embedding = (
      tf.get_variable(
          "layer_embedding",
          shape,
          initializer=tf.random_normal_initializer(0, channels**-0.5)) *
      (channels**0.5))
  return layer_embedding[layer, :, :, :] 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:23,代码来源:common_attention.py

示例5: conv2d

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal_initializer [as 别名]
def conv2d(self, input_, n_filters, k_size, padding='same'):
        if not self.cfg.weight_scale:
            return tf.layers.conv2d(input_, n_filters, k_size, padding=padding)

        n_feats_in = input_.get_shape().as_list()[-1]
        fan_in = k_size * k_size * n_feats_in
        c = tf.constant(np.sqrt(2. / fan_in), dtype=tf.float32)
        kernel_init = tf.random_normal_initializer(stddev=1.)
        w_shape = [k_size, k_size, n_feats_in, n_filters]
        w = tf.get_variable('kernel', shape=w_shape, initializer=kernel_init)
        w = c * w
        strides = [1, 1, 1, 1]
        net = tf.nn.conv2d(input_, w, strides, padding=padding.upper())
        b = tf.get_variable('bias', [n_filters],
                            initializer=tf.constant_initializer(0.))
        net = tf.nn.bias_add(net, b)
        return net 
开发者ID:preritj,项目名称:progressive_growing_of_GANs,代码行数:19,代码来源:net.py

示例6: pix2pix_arg_scope

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal_initializer [as 别名]
def pix2pix_arg_scope():
  """Returns a default argument scope for isola_net.

  Returns:
    An arg scope.
  """
  # These parameters come from the online port, which don't necessarily match
  # those in the paper.
  # TODO(nsilberman): confirm these values with Philip.
  instance_norm_params = {
      'center': True,
      'scale': True,
      'epsilon': 0.00001,
  }

  with tf.contrib.framework.arg_scope(
      [layers.conv2d, layers.conv2d_transpose],
      normalizer_fn=layers.instance_norm,
      normalizer_params=instance_norm_params,
      weights_initializer=tf.random_normal_initializer(0, 0.02)) as sc:
    return sc 
开发者ID:leimao,项目名称:DeepLab_v3,代码行数:23,代码来源:pix2pix.py

示例7: resnet_backbone

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal_initializer [as 别名]
def resnet_backbone(image, num_blocks, group_func, block_func):
    """
    Sec 5.1: We adopt the initialization of [15] for all convolutional layers.
    TensorFlow does not have the true "MSRA init". We use variance_scaling as an approximation.
    """
    with argscope(Conv2D, use_bias=False,
                  kernel_initializer=tf.variance_scaling_initializer(scale=2.0, mode='fan_out')):
        l = Conv2D('conv0', image, 64, 7, strides=2, activation=BNReLU)
        l = MaxPooling('pool0', l, pool_size=3, strides=2, padding='SAME')
        l = group_func('group0', l, block_func, 64, num_blocks[0], 1)
        l = group_func('group1', l, block_func, 128, num_blocks[1], 2)
        l = group_func('group2', l, block_func, 256, num_blocks[2], 2)
        l = group_func('group3', l, block_func, 512, num_blocks[3], 2)
        l = GlobalAvgPooling('gap', l)
        logits = FullyConnected('linear', l, 1000,
                                kernel_initializer=tf.random_normal_initializer(stddev=0.01))
    """
    Sec 5.1:
    The 1000-way fully-connected layer is initialized by
    drawing weights from a zero-mean Gaussian with standard
    deviation of 0.01.
    """
    return logits 
开发者ID:tensorpack,项目名称:benchmarks,代码行数:25,代码来源:resnet_model.py

示例8: _conv

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal_initializer [as 别名]
def _conv(name, x, filter_size, in_filters, out_filters, strides):
    """Convolution."""
    with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
        n = filter_size * filter_size * out_filters
        kernel = tf.get_variable(
            'DW', [filter_size, filter_size, in_filters, out_filters],
            tf.float32, initializer=tf.random_normal_initializer(
                stddev=np.sqrt(2.0 / n)))
        return tf.nn.conv2d(x, kernel, strides, padding='SAME') 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:11,代码来源:madry_cifar10_model.py

示例9: two_hidden_layers_2

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal_initializer [as 别名]
def two_hidden_layers_2(x):
    assert x.shape.as_list() == [200, 100]
    w1 = tf.get_variable('h1_weights', [100, 50], initializer=tf.random_normal_initializer())
    b1 = tf.get_variable('h1_biases', [50], initializer=tf.constant_initializer(0.0))
    h1 = tf.matmul(x, w1) + b1
    assert h1.shape.as_list() == [200, 50]
    w2 = tf.get_variable('h2_weights', [50, 10], initializer=tf.random_normal_initializer())
    b2 = tf.get_variable('h2_biases', [10], initializer=tf.constant_initializer(0.0))
    logits = tf.matmul(h1, w2) + b2
    return logits 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:12,代码来源:13_variable_sharing.py

示例10: conv_relu

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal_initializer [as 别名]
def conv_relu(inputs, filters, k_size, stride, padding, scope_name):
    '''
    A method that does convolution + relu on inputs
    '''
    with tf.compat.v1.variable_scope(scope_name, reuse=tf.compat.v1.AUTO_REUSE) as scope:
        in_channels = inputs.shape[-1]
        kernel = tf.compat.v1.get_variable('kernel',
                                 [k_size, k_size, in_channels, filters],
                                 initializer=tf.truncated_normal_initializer())
        biases = tf.compat.v1.get_variable('biases',
                                 [filters],
                                 initializer=tf.random_normal_initializer())
        conv = tf.nn.conv2d(inputs, kernel, strides=[1, stride, stride, 1], padding=padding)
    return tf.nn.relu(conv + biases, name=scope.name) 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:16,代码来源:17_conv_mnist.py

示例11: fully_connected

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal_initializer [as 别名]
def fully_connected(x, output_dim, scope):
    with tf.variable_scope(scope, reuse=tf.AUTO_REUSE) as scope:
        w = tf.get_variable('weights', [x.shape[1], output_dim], initializer=tf.random_normal_initializer())
        b = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
        return tf.matmul(x, w) + b 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:7,代码来源:14_fully_connected.py

示例12: inference

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal_initializer [as 别名]
def inference(input_data):
    with tf.variable_scope('hidden1'):
        # 第一层 16 个
        weights = tf.get_variable("weight", [1, 16], tf.float32,
                                  initializer=tf.random_normal_initializer(0.0, 1))
        biases = tf.get_variable("bias", [1, 16], tf.float32,
                                 initializer=tf.random_normal_initializer(0.0, 1))
        hidden1 = tf.sigmoid(tf.multiply(input_data, weights) + biases)

    with tf.variable_scope('hidden2'):
        # 第二层 16 个
        weights = tf.get_variable("weight", [16, 16], tf.float32,
                                  initializer=tf.random_normal_initializer(0.0, 1))
        biases = tf.get_variable("bias", [16], tf.float32,
                                 initializer=tf.random_normal_initializer(0.0, 1))
        hidden2 = tf.sigmoid(tf.matmul(hidden1, weights) + biases)

    with tf.variable_scope('hidden3'):
        # 第三层 16 个
        weights = tf.get_variable("weight", [16, 16], tf.float32,
                                  initializer=tf.random_normal_initializer(0.0, 1))
        biases = tf.get_variable("bias", [16], tf.float32,
                                 initializer=tf.random_normal_initializer(0.0, 1))
        hidden3 = tf.sigmoid(tf.matmul(hidden2, weights) + biases)

    with tf.variable_scope('output_layer'):
        # 输出层
        weights = tf.get_variable("weight", [16, 1], tf.float32,
                                  initializer=tf.random_normal_initializer(0.0, 1))
        biases = tf.get_variable("bias", [1], tf.float32,
                                 initializer=tf.random_normal_initializer(0.0, 1))
        output = tf.matmul(hidden3, weights) + biases
    return output


# 训练 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:38,代码来源:4_simulate_sin.py

示例13: deconv2d_bn_lrn_drop

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal_initializer [as 别名]
def deconv2d_bn_lrn_drop(scope_or_name, inputs, kernel_shape, out_shape, subS=2, activation=tf.nn.relu,
                       use_bn=False,
                       use_mvn=False,
                       is_training=True,
                       use_lrn=False,
                       keep_prob=1.0,
                       dropout_maps=False,
                       initOpt=0):
    with tf.variable_scope(scope_or_name):
        if initOpt == 0:
            stddev = np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2] + kernel_shape[3]))
        if initOpt == 1:
            stddev = 5e-2
        if initOpt == 2:
            stddev = min(np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2])),5e-2)
        kernel = tf.get_variable("weights", kernel_shape,
                                 initializer=tf.random_normal_initializer(stddev=stddev))
        bias = tf.get_variable("bias", kernel_shape[2],
                               initializer=tf.constant_initializer(value=0.1))
        conv=tf.nn.conv2d_transpose(inputs, kernel, out_shape, strides=[1, subS, subS, 1], padding='SAME', name='conv')
        outputs = tf.nn.bias_add(conv, bias, name='preActivation')
        if use_bn:
            # outputs = tf.layers.batch_normalization(outputs, axis=3, training=is_training, name="batchNorm")
            outputs = batch_norm(outputs, is_training=is_training, scale=True, fused=True, scope="batchNorm")
        if use_mvn:
            outputs = feat_norm(outputs, kernel_shape[3])
        if activation:
            outputs = activation(outputs, name='activation')
        if use_lrn:
            outputs = tf.nn.local_response_normalization(outputs, name='localResponseNorm')
        if dropout_maps:
            conv_shape = tf.shape(outputs)
            n_shape = tf.stack([conv_shape[0], 1, 1, conv_shape[3]])
            outputs = tf.nn.dropout(outputs, keep_prob, noise_shape=n_shape)
        else:
            outputs = tf.nn.dropout(outputs, keep_prob)
        return outputs 
开发者ID:TobiasGruening,项目名称:ARU-Net,代码行数:39,代码来源:layers.py

示例14: fc_network

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal_initializer [as 别名]
def fc_network(x, neurons, wt_decay, name, num_pred=None, offset=0,
               batch_norm_param=None, dropout_ratio=0.0, is_training=None): 
  if dropout_ratio > 0:
    assert(is_training is not None), \
      'is_training needs to be defined when trainnig with dropout.'
  
  repr = []
  for i, neuron in enumerate(neurons):
    init_var = np.sqrt(2.0/neuron)
    if batch_norm_param is not None:
      x = slim.fully_connected(x, neuron, activation_fn=None,
                               weights_initializer=tf.random_normal_initializer(stddev=init_var),
                               weights_regularizer=slim.l2_regularizer(wt_decay),
                               normalizer_fn=slim.batch_norm,
                               normalizer_params=batch_norm_param,
                               biases_initializer=tf.zeros_initializer(),
                               scope='{:s}_{:d}'.format(name, offset+i))
    else:
      x = slim.fully_connected(x, neuron, activation_fn=tf.nn.relu,
                               weights_initializer=tf.random_normal_initializer(stddev=init_var),
                               weights_regularizer=slim.l2_regularizer(wt_decay),
                               biases_initializer=tf.zeros_initializer(),
                               scope='{:s}_{:d}'.format(name, offset+i))
    if dropout_ratio > 0:
       x = slim.dropout(x, keep_prob=1-dropout_ratio, is_training=is_training,
                        scope='{:s}_{:d}'.format('dropout_'+name, offset+i))
    repr.append(x)
  
  if num_pred is not None:
    init_var = np.sqrt(2.0/num_pred)
    x = slim.fully_connected(x, num_pred,
                             weights_regularizer=slim.l2_regularizer(wt_decay),
                             weights_initializer=tf.random_normal_initializer(stddev=init_var),
                             biases_initializer=tf.zeros_initializer(),
                             activation_fn=None,
                             scope='{:s}_pred'.format(name))
  return x, repr 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:39,代码来源:tf_utils.py

示例15: _conv

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal_initializer [as 别名]
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
    """Convolution."""
    with tf.variable_scope(name):
      n = filter_size * filter_size * out_filters
      kernel = tf.get_variable(
          'DW', [filter_size, filter_size, in_filters, out_filters],
          tf.float32, initializer=tf.random_normal_initializer(
              stddev=np.sqrt(2.0/n)))
      return tf.nn.conv2d(x, kernel, strides, padding='SAME') 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:11,代码来源:resnet_model.py


注:本文中的tensorflow.random_normal_initializer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。