当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.uniform_unit_scaling_initializer方法代码示例

本文整理汇总了Python中tensorflow.uniform_unit_scaling_initializer方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.uniform_unit_scaling_initializer方法的具体用法?Python tensorflow.uniform_unit_scaling_initializer怎么用?Python tensorflow.uniform_unit_scaling_initializer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.uniform_unit_scaling_initializer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: init_vq_bottleneck

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def init_vq_bottleneck(bottleneck_size, hidden_size):
  """Get lookup table for VQ bottleneck."""
  means = tf.get_variable(
      name="means",
      shape=[bottleneck_size, hidden_size],
      initializer=tf.uniform_unit_scaling_initializer())
  ema_count = tf.get_variable(
      name="ema_count",
      shape=[bottleneck_size],
      initializer=tf.constant_initializer(0),
      trainable=False)
  with tf.colocate_with(means):
    ema_means = tf.get_variable(
        name="ema_means",
        initializer=means.initialized_value(),
        trainable=False)

  return means, ema_means, ema_count 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:20,代码来源:transformer_nat.py

示例2: get_vq_bottleneck

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def get_vq_bottleneck(bottleneck_size, hidden_size):
  """Get lookup table for VQ bottleneck."""
  with tf.variable_scope("vq", reuse=tf.AUTO_REUSE):
    means = tf.get_variable(
        name="means",
        shape=[bottleneck_size, hidden_size],
        initializer=tf.uniform_unit_scaling_initializer())

    ema_count = tf.get_variable(
        name="ema_count",
        shape=[bottleneck_size],
        initializer=tf.constant_initializer(0),
        trainable=False)

    with tf.colocate_with(means):
      ema_means = tf.get_variable(
          name="ema_means",
          initializer=means.initialized_value(),
          trainable=False)

  return means, ema_means, ema_count 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:23,代码来源:discretization.py

示例3: fully_connected

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def fully_connected(x, out_dim):
  """FullyConnected layer
  
  Parameters:
  x (Tensor): Input tensor to the fully connected layer
  out_dim (int): Output dimension of the fully connected layer.
  
  Return: 
  The Tensor corresponding to the fully connected layer output.
  """
  w = tf.get_variable(
      'DW', [x.get_shape()[1], out_dim],
      initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
  b = tf.get_variable('biases', [out_dim],
                      initializer=tf.constant_initializer())
  return tf.nn.xw_plus_b(x, w, b) 
开发者ID:IBM,项目名称:AIX360,代码行数:18,代码来源:train_probes.py

示例4: get_vq_codebook

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def get_vq_codebook(codebook_size, hidden_size):
  """Get lookup table for VQ bottleneck."""
  with tf.variable_scope("vq", reuse=tf.AUTO_REUSE):
    means = tf.get_variable(
        name="means",
        shape=[codebook_size, hidden_size],
        initializer=tf.uniform_unit_scaling_initializer())

    ema_count = tf.get_variable(
        name="ema_count",
        shape=[codebook_size],
        initializer=tf.constant_initializer(0),
        trainable=False)

    with tf.colocate_with(means):
      ema_means = tf.get_variable(
          name="ema_means",
          initializer=means.initialized_value(),
          trainable=False)

  return means, ema_means, ema_count 
开发者ID:yyht,项目名称:BERT,代码行数:23,代码来源:discretization.py

示例5: add_embeddings

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def add_embeddings(self):
        """
        embedding层
        """
        with tf.variable_scope('embedding'):
            if self.config.embeddings is not None:
                embeddings = tf.Variable(self.config.embeddings, name="embeddings", trainable=False)
            else:
                embeddings = tf.get_variable('embeddings', shape=[self.config.vocab_size, self.config.embedding_size], initializer=tf.uniform_unit_scaling_initializer())
            q_embed = tf.nn.embedding_lookup(embeddings, self.q)
            aplus_embed = tf.nn.embedding_lookup(embeddings, self.aplus)
            aminus_embed = tf.nn.embedding_lookup(embeddings, self.aminus)
            q_embed = tf.nn.dropout(q_embed, keep_prob=self.keep_prob)
            aplus_embed = tf.nn.dropout(aplus_embed, keep_prob=self.keep_prob)
            aminus_embed = tf.nn.dropout(aminus_embed, keep_prob=self.keep_prob)
            return q_embed, aplus_embed, aminus_embed 
开发者ID:l11x0m7,项目名称:Question_Answering_Models,代码行数:18,代码来源:models.py

示例6: _HL_layer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def _HL_layer(self, bottom, n_weight, name):
        """
        全连接层
        """
        assert len(bottom.get_shape()) == 3
        n_prev_weight = bottom.get_shape()[-1]
        max_len = bottom.get_shape()[1]
        initer = tf.truncated_normal_initializer(stddev=0.01)
        W = tf.get_variable(name + 'W', dtype=tf.float32, shape=[n_prev_weight, n_weight],
                            initializer=tf.uniform_unit_scaling_initializer())
        b = tf.get_variable(name + 'b', dtype=tf.float32,
                            initializer=tf.constant(0.1, shape=[n_weight], dtype=tf.float32))
        bottom_2 = tf.reshape(bottom, [-1, n_prev_weight])
        hl = tf.nn.bias_add(tf.matmul(bottom_2, W), b)
        hl_tanh = tf.nn.tanh(hl)
        HL = tf.reshape(hl_tanh, [-1, max_len, n_weight])
        return HL 
开发者ID:WenRichard,项目名称:CNN-in-Answer-selection,代码行数:19,代码来源:model.py

示例7: _build

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def _build(self, embeddings):
        if embeddings is not None:
            self.Embedding = tf.Variable(tf.to_float(embeddings), trainable=False, name='Embedding')
        else:
            self.Embedding = tf.get_variable('Embedding', shape=[self.vocab_size, self.embedding_size],
                                         initializer=tf.uniform_unit_scaling_initializer())
        self.q_embed = tf.nn.dropout(tf.nn.embedding_lookup(self.Embedding, self._ques), keep_prob=self.dropout_keep_prob)
        self.a_embed = tf.nn.dropout(tf.nn.embedding_lookup(self.Embedding, self._ans), keep_prob=self.dropout_keep_prob)

        with tf.variable_scope('siamese') as scope:
            # 计算隐藏和卷积层
            hl_q = self._HL_layer(self.q_embed, self.hidden_size, 'HL_layer')
            conv1_q = self._cnn_layer(hl_q)
            scope.reuse_variables()
            hl_a = self._HL_layer(self.a_embed, self.hidden_size, 'HL_layer')
            conv1_a = self._cnn_layer(hl_a)
        with tf.variable_scope('fc') as scope:
            con = tf.concat([conv1_q, conv1_a], axis=-1)
            logits = self.fc_layer(con, 1, 'fc_layer')
            res = tf.nn.sigmoid(logits)
        return logits, res 
开发者ID:WenRichard,项目名称:Customer-Chatbot,代码行数:23,代码来源:model.py

示例8: build_graph

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def build_graph(self):
        parameters = self.parameters
        with tf.variable_scope(name_or_scope=self.scope, initializer=tf.uniform_unit_scaling_initializer()):
            seq_ids_pl, seq_other_ids_pls, inputs = self.build_input_graph(vocab_size=parameters['vocab_size'],
                                                                           emb_size=parameters['emb_size'],
                                                                           word_window_size=parameters['word_window_size'],
                                                                           word_vocab_size=parameters['word_vocab_size'],
                                                                           word_emb_size=parameters['word_emb_size'])
            stag_ids_pl, seq_lengths_pl, is_train_pl, cost_op, train_cost_op, scores_op, summary_op = \
                self.build_tagging_graph(inputs=inputs,
                                         num_tags=parameters['num_tags'],
                                         use_crf=parameters['use_crf'],
                                         lamd=parameters['lamd'],
                                         dropout_emb=parameters['dropout_emb'],
                                         dropout_hidden=parameters['dropout_hidden'],
                                         hidden_layers=parameters['hidden_layers'],
                                         channels=parameters['channels'],
                                         kernel_size=parameters['kernel_size'],
                                         use_bn=parameters['use_bn'],
                                         use_wn=parameters['use_wn'],
                                         active_type=parameters['active_type'])
        self.seq_ids_pl = seq_ids_pl
        self.seq_other_ids_pls = seq_other_ids_pls
        self.stag_ids_pl = stag_ids_pl
        self.seq_lengths_pl = seq_lengths_pl
        self.is_train_pl = is_train_pl
        self.cost_op = cost_op
        self.train_cost_op = train_cost_op
        self.scores_op = scores_op
        self.summary_op = summary_op 
开发者ID:chqiwang,项目名称:convseg,代码行数:32,代码来源:tagger.py

示例9: _fully_connected

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    x = tf.reshape(x, [self.hps.batch_size, -1])
    w = tf.get_variable(
        'DW', [x.get_shape()[1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    return tf.nn.xw_plus_b(x, w, b) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:11,代码来源:resnet_model.py

示例10: score

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def score(feature_vec):
    W = tf.get_variable("W", shape=[feature_vec.get_shape()[1],1], initializer=tf.uniform_unit_scaling_initializer()) # init_weight([int(feature_vec.get_shape()[1]),1])
    return tf.matmul(feature_vec,W) 
开发者ID:yiling-chen,项目名称:view-finding-network,代码行数:5,代码来源:network.py

示例11: _fully_connected

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    num_non_batch_dimensions = len(x.get_shape())
    prod_non_batch_dimensions = 1
    for ii in range(num_non_batch_dimensions - 1):
      prod_non_batch_dimensions *= int(x.get_shape()[ii + 1])
    x = tf.reshape(x, [tf.shape(x)[0], -1])
    w = tf.get_variable(
        'DW', [prod_non_batch_dimensions, out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    return tf.nn.xw_plus_b(x, w, b) 
开发者ID:sunblaze-ucb,项目名称:blackbox-attacks,代码行数:15,代码来源:resnet_model_reusable.py

示例12: _fully_connected

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    num_non_batch_dimensions = len(x.shape)
    prod_non_batch_dimensions = 1
    for ii in range(num_non_batch_dimensions - 1):
      prod_non_batch_dimensions *= int(x.shape[ii + 1])
    x = tf.reshape(x, [tf.shape(x)[0], -1])
    w = tf.get_variable(
        'DW', [prod_non_batch_dimensions, out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    return tf.nn.xw_plus_b(x, w, b) 
开发者ID:sunblaze-ucb,项目名称:blackbox-attacks,代码行数:15,代码来源:madry_thin_model.py

示例13: sharded_variable

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def sharded_variable(name, shape, num_shards, dtype=tf.float32, transposed=False):
    # The final size of the sharded variable may be larger than requested.
    # This should be fine for embeddings.
    shard_size = int((shape[0] + num_shards - 1) / num_shards)
    if transposed:
        initializer = tf.uniform_unit_scaling_initializer(dtype=dtype, full_shape=[shape[1], shape[0]])
    else:
        initializer = tf.uniform_unit_scaling_initializer(dtype=dtype, full_shape=shape)
    return [tf.get_variable(name + "_%d" % i, [shard_size, shape[1]], initializer=initializer, dtype=dtype)
            for i in range(num_shards)]


# XXX(rafal): Code below copied from rnn_cell.py 
开发者ID:rafaljozefowicz,项目名称:lm,代码行数:15,代码来源:model_utils.py

示例14: linear

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def linear(input_, output_dim, scope=None, stddev=.7):
    unif = tf.uniform_unit_scaling_initializer()
    norm = tf.random_normal_initializer(stddev=stddev)
    const = tf.constant_initializer(0.0)
    with tf.variable_scope(scope or 'linear'):
        #w = tf.get_variable('w', [input_.get_shape()[1], output_dim], initializer=unif)
        w = tf.get_variable('w', [input_.get_shape()[1], output_dim], initializer=norm)
        b = tf.get_variable('b', [output_dim], initializer=const)
        return tf.matmul(input_, w) + b 
开发者ID:mkocaoglu,项目名称:CausalGAN,代码行数:11,代码来源:models.py

示例15: _fully_connected

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def _fully_connected(self, x, out_dim):
        """FullyConnected layer for final output."""
        x = tf.contrib.layers.flatten(x)
        w = tf.get_variable(
            'DW', [x.get_shape()[1], out_dim],
            initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
        b = tf.get_variable('biases', [out_dim],
                            initializer=tf.constant_initializer())
        return tf.nn.xw_plus_b(x, w, b) 
开发者ID:ermongroup,项目名称:generative_adversary,代码行数:11,代码来源:resnet_model.py


注:本文中的tensorflow.uniform_unit_scaling_initializer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。