当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.nce_loss方法代码示例

本文整理汇总了Python中tensorflow.nce_loss方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.nce_loss方法的具体用法?Python tensorflow.nce_loss怎么用?Python tensorflow.nce_loss使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.nce_loss方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: loss_nce

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import nce_loss [as 别名]
def loss_nce(self,l2_lambda=0.0001): #0.0001-->0.001
        """calculate loss using (NCE)cross entropy here"""
        # Compute the average NCE loss for the batch.
        # tf.nce_loss automatically draws a new sample of the negative labels each
        # time we evaluate the loss.
        if self.is_training: #training
            #labels=tf.reshape(self.input_y,[-1])               #[batch_size,1]------>[batch_size,]
            labels=tf.expand_dims(self.input_y,1)                   #[batch_size,]----->[batch_size,1]
            loss = tf.reduce_mean( #inputs: A `Tensor` of shape `[batch_size, dim]`.  The forward activations of the input network.
                tf.nn.nce_loss(weights=tf.transpose(self.W_projection),#[hidden_size*2, num_classes]--->[num_classes,hidden_size*2]. nce_weights:A `Tensor` of shape `[num_classes, dim].O.K.
                               biases=self.b_projection,                 #[label_size]. nce_biases:A `Tensor` of shape `[num_classes]`.
                               labels=labels,                 #[batch_size,1]. train_labels, # A `Tensor` of type `int64` and shape `[batch_size,num_true]`. The target classes.
                               inputs=self.output_rnn_last,# [batch_size,hidden_size*2] #A `Tensor` of shape `[batch_size, dim]`.  The forward activations of the input network.
                               num_sampled=self.num_sampled,  #scalar. 100
                               num_classes=self.num_classes,partition_strategy="div"))  #scalar. 1999
        l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda
        loss = loss + l2_losses
        return loss 
开发者ID:brightmart,项目名称:text_classification,代码行数:20,代码来源:p8_TextRNN_model_multi_layers.py

示例2: loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import nce_loss [as 别名]
def loss(self,l2_lambda=0.01): #0.0001-->0.001
        """calculate loss using (NCE)cross entropy here"""
        # Compute the average NCE loss for the batch.
        # tf.nce_loss automatically draws a new sample of the negative labels each
        # time we evaluate the loss.
        if self.is_training: #training
            labels=tf.reshape(self.labels,[-1])               #[batch_size,1]------>[batch_size,]
            labels=tf.expand_dims(labels,1)                   #[batch_size,]----->[batch_size,1]
            loss = tf.reduce_mean( #inputs: A `Tensor` of shape `[batch_size, dim]`.  The forward activations of the input network.
                tf.nn.nce_loss(weights=tf.transpose(self.W),  #[embed_size, label_size]--->[label_size,embed_size]. nce_weights:A `Tensor` of shape `[num_classes, dim].O.K.
                               biases=self.b,                 #[label_size]. nce_biases:A `Tensor` of shape `[num_classes]`.
                               labels=labels,                 #[batch_size,1]. train_labels, # A `Tensor` of type `int64` and shape `[batch_size,num_true]`. The target classes.
                               inputs=self.sentence_embeddings,# [None,self.embed_size] #A `Tensor` of shape `[batch_size, dim]`.  The forward activations of the input network.
                               num_sampled=self.num_sampled,  #scalar. 100
                               num_classes=self.label_size,partition_strategy="div"))  #scalar. 1999
        else:#eval/inference
            #logits = tf.matmul(self.sentence_embeddings, tf.transpose(self.W)) #matmul([None,self.embed_size])--->
            #logits = tf.nn.bias_add(logits, self.b)
            labels_one_hot = tf.one_hot(self.labels, self.label_size) #[batch_size]---->[batch_size,label_size]
            #sigmoid_cross_entropy_with_logits:Computes sigmoid cross entropy given `logits`.Measures the probability error in discrete classification tasks in which each class is independent and not mutually exclusive.  For instance, one could perform multilabel classification where a picture can contain both an elephant and a dog at the same time.
            loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_one_hot,logits=self.logits) #labels:[batch_size,label_size];logits:[batch, label_size]
            print("loss0:", loss) #shape=(?, 1999)
            loss = tf.reduce_sum(loss, axis=1)
            print("loss1:",loss)  #shape=(?,)
        l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda
        return loss 
开发者ID:brightmart,项目名称:text_classification,代码行数:28,代码来源:p5_fastTextB_model.py

示例3: loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import nce_loss [as 别名]
def loss(self,l2_lambda=0.0001):
        """calculate loss using (NCE)cross entropy here"""
        # Compute the average NCE loss for the batch.
        # tf.nce_loss automatically draws a new sample of the negative labels each
        # time we evaluate the loss.
        #if self.is_training:#training
            #labels=tf.reshape(self.labels,[-1])               #3.[batch_size,max_label_per_example]------>[batch_size*max_label_per_example,]
            #labels=tf.expand_dims(labels,1)                   #[batch_size*max_label_per_example,]----->[batch_size*max_label_per_example,1]
            #nce_loss: notice-->for now, if you have a variable number of target classes, you can pad them out to a constant number by either repeating them or by padding with an otherwise unused class.
         #   loss = tf.reduce_mean(#inputs's SHAPE should be: [batch_size, dim]
         #       tf.nn.nce_loss(weights=tf.transpose(self.W),  #[embed_size, label_size]--->[label_size,embed_size]. nce_weights:A `Tensor` of shape `[num_classes, dim].O.K.
         #                      biases=self.b,                 #[label_size]. nce_biases:A `Tensor` of shape `[num_classes]`.
         #                      labels=self.labels,                 #4.[batch_size,max_label_per_example]. train_labels, # A `Tensor` of type `int64` and shape `[batch_size,num_true]`. The target classes.
         #                      inputs=self.sentence_embeddings,#TODO [None,self.embed_size] #A `Tensor` of shape `[batch_size, dim]`.  The forward activations of the input network.
         #                      num_sampled=self.num_sampled,  #  scalar. 100
         #                      num_true=self.max_label_per_example,
         #                      num_classes=self.label_size,partition_strategy="div"))  #scalar. 1999
        #else:#eval(/inference)
        labels_multi_hot = self.labels_l1999 #[batch_size,label_size]
        #sigmoid_cross_entropy_with_logits:Computes sigmoid cross entropy given `logits`.Measures the probability error in discrete classification tasks in which each class is independent and not mutually exclusive.  For instance, one could perform multilabel classification where a picture can contain both an elephant and a dog at the same time.
        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_multi_hot,logits=self.logits) #labels:[batch_size,label_size];logits:[batch, label_size]
        loss = tf.reduce_mean(tf.reduce_sum(loss, axis=1)) # reduce_sum
        print("loss:",loss)

        # add regularization result in not converge
        self.l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda
        print("l2_losses:",self.l2_losses)
        loss=loss+self.l2_losses
        return loss 
开发者ID:brightmart,项目名称:text_classification,代码行数:31,代码来源:p6_fastTextB_model_multilabel.py

示例4: word2vec_model

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import nce_loss [as 别名]
def word2vec_model(params):
  # Input data.
  inputs = tf.placeholder(dtype=tf.int32, shape=[None], name='input')
  labels = tf.placeholder(dtype=tf.int32, shape=[None, 1], name='label')

  # Look up embeddings for inputs.
  embeddings = tf.Variable(
    tf.random_uniform([params.vocab_size, params.embedding_size], -1.0, 1.0)
  )
  embed = tf.nn.embedding_lookup(embeddings, inputs)

  # Construct the variables for the NCE loss
  nce_weights = tf.Variable(
    tf.truncated_normal(shape=[params.vocab_size, params.embedding_size],
                        stddev=1.0 / np.sqrt(params.embedding_size))
  )
  nce_biases = tf.Variable(tf.zeros([params.vocab_size]))

  # Compute the average NCE loss for the batch.
  # tf.nce_loss automatically draws a new sample of the negative labels each
  # time we evaluate the loss.
  loss = tf.reduce_mean(
    tf.nn.nce_loss(nce_weights, nce_biases,
                   labels=labels,
                   inputs=embed,
                   num_sampled=params.negative_samples,
                   num_classes=params.vocab_size),
    name='loss'
  )
  optimizer = tf.train.AdamOptimizer(params.learning_rate)
  optimizer.minimize(loss, name='minimize') 
开发者ID:maxim5,项目名称:hyper-engine,代码行数:33,代码来源:4_1_word2vec_embedding.py

示例5: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import nce_loss [as 别名]
def __init__(
        self,
        inputs = None,
        train_labels = None,
        vocabulary_size = 80000,
        embedding_size = 200,
        num_sampled = 64,
        nce_loss_args = {},
        E_init = tf.random_uniform_initializer(minval=-1.0, maxval=1.0),
        E_init_args = {},
        nce_W_init = tf.truncated_normal_initializer(stddev=0.03),
        nce_W_init_args = {},
        nce_b_init = tf.constant_initializer(value=0.0),
        nce_b_init_args = {},
        name ='word2vec_layer',
    ):
        Layer.__init__(self, name=name)
        self.inputs = inputs
        print("  tensorlayer:Instantiate Word2vecEmbeddingInputlayer %s: (%d, %d)" % (self.name, vocabulary_size, embedding_size))
        # Look up embeddings for inputs.
        # Note: a row of 'embeddings' is the vector representation of a word.
        # for the sake of speed, it is better to slice the embedding matrix
        # instead of transfering a word id to one-hot-format vector and then
        # multiply by the embedding matrix.
        # embed is the outputs of the hidden layer (embedding layer), it is a
        # row vector with 'embedding_size' values.
        with tf.variable_scope(name) as vs:
            embeddings = tf.get_variable(name='embeddings',
                                    shape=(vocabulary_size, embedding_size),
                                    initializer=E_init,
                                    **E_init_args)
            embed = tf.nn.embedding_lookup(embeddings, self.inputs)
            # Construct the variables for the NCE loss (i.e. negative sampling)
            nce_weights = tf.get_variable(name='nce_weights',
                                    shape=(vocabulary_size, embedding_size),
                                    initializer=nce_W_init,
                                    **nce_W_init_args)
            nce_biases = tf.get_variable(name='nce_biases',
                                    shape=(vocabulary_size),
                                    initializer=nce_b_init,
                                    **nce_b_init_args)

        # Compute the average NCE loss for the batch.
        # tf.nce_loss automatically draws a new sample of the negative labels
        # each time we evaluate the loss.
        self.nce_cost = tf.reduce_mean(
            tf.nn.nce_loss(weights=nce_weights, biases=nce_biases,
                           inputs=embed, labels=train_labels,
                           num_sampled=num_sampled, num_classes=vocabulary_size,
                           **nce_loss_args))

        self.outputs = embed
        self.normalized_embeddings = tf.nn.l2_normalize(embeddings, 1)

        self.all_layers = [self.outputs]
        self.all_params = [embeddings, nce_weights, nce_biases]
        self.all_drop = {} 
开发者ID:GuangmingZhu,项目名称:Conv3D_BICLSTM,代码行数:59,代码来源:tensorlayer-layers.py


注:本文中的tensorflow.nce_loss方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。