當前位置: 首頁>>代碼示例>>Python>>正文


Python tensorflow.uniform_unit_scaling_initializer方法代碼示例

本文整理匯總了Python中tensorflow.uniform_unit_scaling_initializer方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.uniform_unit_scaling_initializer方法的具體用法?Python tensorflow.uniform_unit_scaling_initializer怎麽用?Python tensorflow.uniform_unit_scaling_initializer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.uniform_unit_scaling_initializer方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: init_vq_bottleneck

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 別名]
def init_vq_bottleneck(bottleneck_size, hidden_size):
  """Get lookup table for VQ bottleneck."""
  means = tf.get_variable(
      name="means",
      shape=[bottleneck_size, hidden_size],
      initializer=tf.uniform_unit_scaling_initializer())
  ema_count = tf.get_variable(
      name="ema_count",
      shape=[bottleneck_size],
      initializer=tf.constant_initializer(0),
      trainable=False)
  with tf.colocate_with(means):
    ema_means = tf.get_variable(
        name="ema_means",
        initializer=means.initialized_value(),
        trainable=False)

  return means, ema_means, ema_count 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:20,代碼來源:transformer_nat.py

示例2: get_vq_bottleneck

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 別名]
def get_vq_bottleneck(bottleneck_size, hidden_size):
  """Get lookup table for VQ bottleneck."""
  with tf.variable_scope("vq", reuse=tf.AUTO_REUSE):
    means = tf.get_variable(
        name="means",
        shape=[bottleneck_size, hidden_size],
        initializer=tf.uniform_unit_scaling_initializer())

    ema_count = tf.get_variable(
        name="ema_count",
        shape=[bottleneck_size],
        initializer=tf.constant_initializer(0),
        trainable=False)

    with tf.colocate_with(means):
      ema_means = tf.get_variable(
          name="ema_means",
          initializer=means.initialized_value(),
          trainable=False)

  return means, ema_means, ema_count 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:23,代碼來源:discretization.py

示例3: fully_connected

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 別名]
def fully_connected(x, out_dim):
  """FullyConnected layer
  
  Parameters:
  x (Tensor): Input tensor to the fully connected layer
  out_dim (int): Output dimension of the fully connected layer.
  
  Return: 
  The Tensor corresponding to the fully connected layer output.
  """
  w = tf.get_variable(
      'DW', [x.get_shape()[1], out_dim],
      initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
  b = tf.get_variable('biases', [out_dim],
                      initializer=tf.constant_initializer())
  return tf.nn.xw_plus_b(x, w, b) 
開發者ID:IBM,項目名稱:AIX360,代碼行數:18,代碼來源:train_probes.py

示例4: get_vq_codebook

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 別名]
def get_vq_codebook(codebook_size, hidden_size):
  """Get lookup table for VQ bottleneck."""
  with tf.variable_scope("vq", reuse=tf.AUTO_REUSE):
    means = tf.get_variable(
        name="means",
        shape=[codebook_size, hidden_size],
        initializer=tf.uniform_unit_scaling_initializer())

    ema_count = tf.get_variable(
        name="ema_count",
        shape=[codebook_size],
        initializer=tf.constant_initializer(0),
        trainable=False)

    with tf.colocate_with(means):
      ema_means = tf.get_variable(
          name="ema_means",
          initializer=means.initialized_value(),
          trainable=False)

  return means, ema_means, ema_count 
開發者ID:yyht,項目名稱:BERT,代碼行數:23,代碼來源:discretization.py

示例5: add_embeddings

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 別名]
def add_embeddings(self):
        """
        embedding層
        """
        with tf.variable_scope('embedding'):
            if self.config.embeddings is not None:
                embeddings = tf.Variable(self.config.embeddings, name="embeddings", trainable=False)
            else:
                embeddings = tf.get_variable('embeddings', shape=[self.config.vocab_size, self.config.embedding_size], initializer=tf.uniform_unit_scaling_initializer())
            q_embed = tf.nn.embedding_lookup(embeddings, self.q)
            aplus_embed = tf.nn.embedding_lookup(embeddings, self.aplus)
            aminus_embed = tf.nn.embedding_lookup(embeddings, self.aminus)
            q_embed = tf.nn.dropout(q_embed, keep_prob=self.keep_prob)
            aplus_embed = tf.nn.dropout(aplus_embed, keep_prob=self.keep_prob)
            aminus_embed = tf.nn.dropout(aminus_embed, keep_prob=self.keep_prob)
            return q_embed, aplus_embed, aminus_embed 
開發者ID:l11x0m7,項目名稱:Question_Answering_Models,代碼行數:18,代碼來源:models.py

示例6: _HL_layer

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 別名]
def _HL_layer(self, bottom, n_weight, name):
        """
        全連接層
        """
        assert len(bottom.get_shape()) == 3
        n_prev_weight = bottom.get_shape()[-1]
        max_len = bottom.get_shape()[1]
        initer = tf.truncated_normal_initializer(stddev=0.01)
        W = tf.get_variable(name + 'W', dtype=tf.float32, shape=[n_prev_weight, n_weight],
                            initializer=tf.uniform_unit_scaling_initializer())
        b = tf.get_variable(name + 'b', dtype=tf.float32,
                            initializer=tf.constant(0.1, shape=[n_weight], dtype=tf.float32))
        bottom_2 = tf.reshape(bottom, [-1, n_prev_weight])
        hl = tf.nn.bias_add(tf.matmul(bottom_2, W), b)
        hl_tanh = tf.nn.tanh(hl)
        HL = tf.reshape(hl_tanh, [-1, max_len, n_weight])
        return HL 
開發者ID:WenRichard,項目名稱:CNN-in-Answer-selection,代碼行數:19,代碼來源:model.py

示例7: _build

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 別名]
def _build(self, embeddings):
        if embeddings is not None:
            self.Embedding = tf.Variable(tf.to_float(embeddings), trainable=False, name='Embedding')
        else:
            self.Embedding = tf.get_variable('Embedding', shape=[self.vocab_size, self.embedding_size],
                                         initializer=tf.uniform_unit_scaling_initializer())
        self.q_embed = tf.nn.dropout(tf.nn.embedding_lookup(self.Embedding, self._ques), keep_prob=self.dropout_keep_prob)
        self.a_embed = tf.nn.dropout(tf.nn.embedding_lookup(self.Embedding, self._ans), keep_prob=self.dropout_keep_prob)

        with tf.variable_scope('siamese') as scope:
            # 計算隱藏和卷積層
            hl_q = self._HL_layer(self.q_embed, self.hidden_size, 'HL_layer')
            conv1_q = self._cnn_layer(hl_q)
            scope.reuse_variables()
            hl_a = self._HL_layer(self.a_embed, self.hidden_size, 'HL_layer')
            conv1_a = self._cnn_layer(hl_a)
        with tf.variable_scope('fc') as scope:
            con = tf.concat([conv1_q, conv1_a], axis=-1)
            logits = self.fc_layer(con, 1, 'fc_layer')
            res = tf.nn.sigmoid(logits)
        return logits, res 
開發者ID:WenRichard,項目名稱:Customer-Chatbot,代碼行數:23,代碼來源:model.py

示例8: build_graph

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 別名]
def build_graph(self):
        parameters = self.parameters
        with tf.variable_scope(name_or_scope=self.scope, initializer=tf.uniform_unit_scaling_initializer()):
            seq_ids_pl, seq_other_ids_pls, inputs = self.build_input_graph(vocab_size=parameters['vocab_size'],
                                                                           emb_size=parameters['emb_size'],
                                                                           word_window_size=parameters['word_window_size'],
                                                                           word_vocab_size=parameters['word_vocab_size'],
                                                                           word_emb_size=parameters['word_emb_size'])
            stag_ids_pl, seq_lengths_pl, is_train_pl, cost_op, train_cost_op, scores_op, summary_op = \
                self.build_tagging_graph(inputs=inputs,
                                         num_tags=parameters['num_tags'],
                                         use_crf=parameters['use_crf'],
                                         lamd=parameters['lamd'],
                                         dropout_emb=parameters['dropout_emb'],
                                         dropout_hidden=parameters['dropout_hidden'],
                                         hidden_layers=parameters['hidden_layers'],
                                         channels=parameters['channels'],
                                         kernel_size=parameters['kernel_size'],
                                         use_bn=parameters['use_bn'],
                                         use_wn=parameters['use_wn'],
                                         active_type=parameters['active_type'])
        self.seq_ids_pl = seq_ids_pl
        self.seq_other_ids_pls = seq_other_ids_pls
        self.stag_ids_pl = stag_ids_pl
        self.seq_lengths_pl = seq_lengths_pl
        self.is_train_pl = is_train_pl
        self.cost_op = cost_op
        self.train_cost_op = train_cost_op
        self.scores_op = scores_op
        self.summary_op = summary_op 
開發者ID:chqiwang,項目名稱:convseg,代碼行數:32,代碼來源:tagger.py

示例9: _fully_connected

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 別名]
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    x = tf.reshape(x, [self.hps.batch_size, -1])
    w = tf.get_variable(
        'DW', [x.get_shape()[1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    return tf.nn.xw_plus_b(x, w, b) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:11,代碼來源:resnet_model.py

示例10: score

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 別名]
def score(feature_vec):
    W = tf.get_variable("W", shape=[feature_vec.get_shape()[1],1], initializer=tf.uniform_unit_scaling_initializer()) # init_weight([int(feature_vec.get_shape()[1]),1])
    return tf.matmul(feature_vec,W) 
開發者ID:yiling-chen,項目名稱:view-finding-network,代碼行數:5,代碼來源:network.py

示例11: _fully_connected

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 別名]
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    num_non_batch_dimensions = len(x.get_shape())
    prod_non_batch_dimensions = 1
    for ii in range(num_non_batch_dimensions - 1):
      prod_non_batch_dimensions *= int(x.get_shape()[ii + 1])
    x = tf.reshape(x, [tf.shape(x)[0], -1])
    w = tf.get_variable(
        'DW', [prod_non_batch_dimensions, out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    return tf.nn.xw_plus_b(x, w, b) 
開發者ID:sunblaze-ucb,項目名稱:blackbox-attacks,代碼行數:15,代碼來源:resnet_model_reusable.py

示例12: _fully_connected

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 別名]
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    num_non_batch_dimensions = len(x.shape)
    prod_non_batch_dimensions = 1
    for ii in range(num_non_batch_dimensions - 1):
      prod_non_batch_dimensions *= int(x.shape[ii + 1])
    x = tf.reshape(x, [tf.shape(x)[0], -1])
    w = tf.get_variable(
        'DW', [prod_non_batch_dimensions, out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    return tf.nn.xw_plus_b(x, w, b) 
開發者ID:sunblaze-ucb,項目名稱:blackbox-attacks,代碼行數:15,代碼來源:madry_thin_model.py

示例13: sharded_variable

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 別名]
def sharded_variable(name, shape, num_shards, dtype=tf.float32, transposed=False):
    # The final size of the sharded variable may be larger than requested.
    # This should be fine for embeddings.
    shard_size = int((shape[0] + num_shards - 1) / num_shards)
    if transposed:
        initializer = tf.uniform_unit_scaling_initializer(dtype=dtype, full_shape=[shape[1], shape[0]])
    else:
        initializer = tf.uniform_unit_scaling_initializer(dtype=dtype, full_shape=shape)
    return [tf.get_variable(name + "_%d" % i, [shard_size, shape[1]], initializer=initializer, dtype=dtype)
            for i in range(num_shards)]


# XXX(rafal): Code below copied from rnn_cell.py 
開發者ID:rafaljozefowicz,項目名稱:lm,代碼行數:15,代碼來源:model_utils.py

示例14: linear

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 別名]
def linear(input_, output_dim, scope=None, stddev=.7):
    unif = tf.uniform_unit_scaling_initializer()
    norm = tf.random_normal_initializer(stddev=stddev)
    const = tf.constant_initializer(0.0)
    with tf.variable_scope(scope or 'linear'):
        #w = tf.get_variable('w', [input_.get_shape()[1], output_dim], initializer=unif)
        w = tf.get_variable('w', [input_.get_shape()[1], output_dim], initializer=norm)
        b = tf.get_variable('b', [output_dim], initializer=const)
        return tf.matmul(input_, w) + b 
開發者ID:mkocaoglu,項目名稱:CausalGAN,代碼行數:11,代碼來源:models.py

示例15: _fully_connected

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 別名]
def _fully_connected(self, x, out_dim):
        """FullyConnected layer for final output."""
        x = tf.contrib.layers.flatten(x)
        w = tf.get_variable(
            'DW', [x.get_shape()[1], out_dim],
            initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
        b = tf.get_variable('biases', [out_dim],
                            initializer=tf.constant_initializer())
        return tf.nn.xw_plus_b(x, w, b) 
開發者ID:ermongroup,項目名稱:generative_adversary,代碼行數:11,代碼來源:resnet_model.py


注:本文中的tensorflow.uniform_unit_scaling_initializer方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。