當前位置: 首頁>>代碼示例>>Python>>正文


Python tensorflow.reduce_logsumexp方法代碼示例

本文整理匯總了Python中tensorflow.reduce_logsumexp方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.reduce_logsumexp方法的具體用法?Python tensorflow.reduce_logsumexp怎麽用?Python tensorflow.reduce_logsumexp使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.reduce_logsumexp方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: alpha

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_logsumexp [as 別名]
def alpha(cls, parameters: Dict[str, Tensor]) -> Tensor:
        mu = parameters["mu"]
        tau = parameters["tau"]
        nu = parameters["nu"]
        beta = parameters["beta"]

        sigma = 1./tf.sqrt(tau)
        lam = 1./beta

        muStd = tf.constant(0., dtype=mu.dtype)
        sigmaStd = tf.constant(1., dtype=mu.dtype)
        stdNorm = tf.contrib.distributions.Normal(loc=muStd, scale=sigmaStd)

        c0 = lam*(mu-nu) + stdNorm.log_cdf((nu-(mu+sigma**2*lam))/sigma)
        c1 = -lam*(mu-nu) + stdNorm.log_cdf(-(nu-(mu-sigma**2*lam))/sigma)
        c = tf.reduce_logsumexp([c0, c1], axis=0)
        f = (mu-nu)*lam

        norm = tf.distributions.Normal(loc=mu+sigma**2*lam, scale=sigma)

        alpha = tf.exp(f + norm.log_cdf(nu) - c)
        return(alpha) 
開發者ID:bethgelab,項目名稱:decompose,代碼行數:24,代碼來源:jumpNormalAlgorithms.py

示例2: M_step

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_logsumexp [as 別名]
def M_step(log_R, log_activation, vote, lambda_val=0.01):
    R_shape = tf.shape(log_R)
    log_R = log_R + log_activation

    R_sum_i = cl.reduce_sum(tf.exp(log_R), axis=-3, keepdims=True)
    log_normalized_R = log_R - tf.reduce_logsumexp(log_R, axis=-3, keepdims=True)

    pose = cl.reduce_sum(vote * tf.exp(log_normalized_R), axis=-3, keepdims=True)
    log_var = tf.reduce_logsumexp(log_normalized_R + cl.log(tf.square(vote - pose)), axis=-3, keepdims=True)

    beta_v = tf.get_variable('beta_v',
                             shape=[1 for i in range(len(pose.shape) - 2)] + [pose.shape[-2], 1],
                             initializer=tf.truncated_normal_initializer(mean=15., stddev=3.))
    cost = R_sum_i * (beta_v + 0.5 * log_var)

    beta_a = tf.get_variable('beta_a',
                             shape=[1 for i in range(len(pose.shape) - 2)] + [pose.shape[-2], 1],
                             initializer=tf.truncated_normal_initializer(mean=100.0, stddev=10))
    cost_sum_h = cl.reduce_sum(cost, axis=-1, keepdims=True)
    logit = lambda_val * (beta_a - cost_sum_h)
    log_activation = tf.log_sigmoid(logit)

    return(pose, log_var, log_activation) 
開發者ID:naturomics,項目名稱:CapsLayer,代碼行數:25,代碼來源:routing.py

示例3: log_sum_exp

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_logsumexp [as 別名]
def log_sum_exp(x, axis=None, keepdims=False):
    """
    Deprecated: Use tf.reduce_logsumexp().

    Tensorflow numerically stable log sum of exps across the `axis`.

    :param x: A Tensor.
    :param axis: An int or list or tuple. The dimensions to reduce.
        If `None` (the default), reduces all dimensions.
    :param keepdims: Bool. If true, retains reduced dimensions with length 1.
        Default to be False.

    :return: A Tensor after the computation of log sum exp along given axes of
        x.
    """
    x = tf.convert_to_tensor(x)
    x_max = tf.reduce_max(x, axis=axis, keepdims=True)
    ret = tf.log(tf.reduce_sum(tf.exp(x - x_max), axis=axis,
                               keepdims=True)) + x_max
    if not keepdims:
        ret = tf.reduce_sum(ret, axis=axis)
    return ret 
開發者ID:thu-ml,項目名稱:zhusuan,代碼行數:24,代碼來源:utils.py

示例4: _log_prob

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_logsumexp [as 別名]
def _log_prob(self, given):
        logits, temperature = self.path_param(self.logits), \
                              self.path_param(self.temperature)
        log_given = tf.log(given)
        log_temperature = tf.log(temperature)
        n = tf.cast(self.n_categories, self.dtype)

        if self._check_numerics:
            log_given = tf.check_numerics(log_given, "log(given)")
            log_temperature = tf.check_numerics(
                log_temperature, "log(temperature)")

        temp = logits - temperature * log_given

        return tf.lgamma(n) + (n - 1) * log_temperature + \
            tf.reduce_sum(temp - log_given, axis=-1) - \
            n * tf.reduce_logsumexp(temp, axis=-1) 
開發者ID:thu-ml,項目名稱:zhusuan,代碼行數:19,代碼來源:multivariate.py

示例5: multilabel_categorical_crossentropy

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_logsumexp [as 別名]
def multilabel_categorical_crossentropy(y_true, y_pred):
	"""
	y_true = [0,1],
	1 stands for target class,
	0 stands for none-target class
	"""
	y_pred = (1 - 2 * y_true) * y_pred
	y_pred_neg = y_pred - y_true * 1e12
	y_pred_pos = y_pred - (1 - y_true) * 1e12

	zeros = tf.zeros_like(y_pred[..., :1])
	y_pred_neg = tf.concat([y_pred_neg, zeros], axis=-1)
	y_pred_pos = tf.concat([y_pred_pos, zeros], axis=-1)
	neg_loss = tf.reduce_logsumexp(y_pred_neg, axis=-1)
	pos_loss = tf.reduce_logsumexp(y_pred_pos, axis=-1)
	return neg_loss + pos_loss 
開發者ID:yyht,項目名稱:BERT,代碼行數:18,代碼來源:loss_utils.py

示例6: logsumexp

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_logsumexp [as 別名]
def logsumexp(x, axis=None, keepdims=False):
    """Computes log(sum(exp(elements across dimensions of a tensor))).

    This function is more numerically stable than log(sum(exp(x))).
    It avoids overflows caused by taking the exp of large inputs and
    underflows caused by taking the log of small inputs.

    # Arguments
        x: A tensor or variable.
        axis: axis: An integer or list of integers in [-rank(x), rank(x)),
            the axes to compute the logsumexp. If `None` (default), computes
            the logsumexp over all dimensions.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`, the reduced dimension is
            retained with length 1.

    # Returns
        The reduced tensor.
    """
    return tf.reduce_logsumexp(x, axis, keepdims) 
開發者ID:Relph1119,項目名稱:GraphicDesignPatternByPython,代碼行數:23,代碼來源:tensorflow_backend.py

示例7: dense_loss

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_logsumexp [as 別名]
def dense_loss(self, y_true, y_pred):
        """y_true需要是one hot形式
        """
        # 導出mask並轉換數據類型
        mask = K.all(K.greater(y_pred, -1e6), axis=2, keepdims=True)
        mask = K.cast(mask, K.floatx())
        # 計算目標分數
        y_true, y_pred = y_true * mask, y_pred * mask
        target_score = self.target_score(y_true, y_pred)
        # 遞歸計算log Z
        init_states = [y_pred[:, 0]]
        y_pred = K.concatenate([y_pred, mask], axis=2)
        input_length = K.int_shape(y_pred[:, 1:])[1]
        log_norm, _, _ = K.rnn(
            self.log_norm_step,
            y_pred[:, 1:],
            init_states,
            input_length=input_length
        )  # 最後一步的log Z向量
        log_norm = tf.reduce_logsumexp(log_norm, 1)  # logsumexp得標量
        # 計算損失 -log p
        return log_norm - target_score 
開發者ID:bojone,項目名稱:bert4keras,代碼行數:24,代碼來源:layers.py

示例8: testCrfLogLikelihood

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_logsumexp [as 別名]
def testCrfLogLikelihood(self):
    inputs = np.array(
        [[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32)
    transition_params = np.array(
        [[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32)
    sequence_lengths = np.array(3, dtype=np.int32)
    num_words = inputs.shape[0]
    num_tags = inputs.shape[1]
    with self.test_session() as sess:
      all_sequence_log_likelihoods = []

      # Make sure all probabilities sum to 1.
      for tag_indices in itertools.product(
          range(num_tags), repeat=sequence_lengths):
        tag_indices = list(tag_indices)
        tag_indices.extend([0] * (num_words - sequence_lengths))
        sequence_log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(
            inputs=tf.expand_dims(inputs, 0),
            tag_indices=tf.expand_dims(tag_indices, 0),
            sequence_lengths=tf.expand_dims(sequence_lengths, 0),
            transition_params=tf.constant(transition_params))
        all_sequence_log_likelihoods.append(sequence_log_likelihood)
      total_log_likelihood = tf.reduce_logsumexp(all_sequence_log_likelihoods)
      tf_total_log_likelihood = sess.run(total_log_likelihood)
      self.assertAllClose(tf_total_log_likelihood, 0.0) 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:27,代碼來源:crf_test.py

示例9: _call

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_logsumexp [as 別名]
def _call(self, inp, output_size, is_training):
        H, W, B, _ = tuple(int(i) for i in inp.shape[1:])

        # inp = tf.log(tf.nn.softmax(tf.clip_by_value(inp, -10., 10.), axis=4))
        inp = inp - tf.reduce_logsumexp(inp, axis=4, keepdims=True)

        running_sum = inp[:, 0, 0, 0, :]

        for h in range(H):
            for w in range(W):
                for b in range(B):
                    if h == 0 and w == 0 and b == 0:
                        pass
                    else:
                        right = inp[:, h, w, b, :]
                        running_sum = addition_compact_logspace(running_sum, right)

        assert running_sum.shape[1] == output_size
        return running_sum 
開發者ID:e2crawfo,項目名稱:auto_yolo,代碼行數:21,代碼來源:networks.py

示例10: logsumexp

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_logsumexp [as 別名]
def logsumexp(x, axis=None, keepdims=False):
    """Computes log(sum(exp(elements across dimensions of a tensor))).

    This function is more numerically stable than log(sum(exp(x))).
    It avoids overflows caused by taking the exp of large inputs and
    underflows caused by taking the log of small inputs.

    # Arguments
        x: A tensor or variable.
        axis: An integer, the axis to reduce over.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`, the reduced dimension is
            retained with length 1.

    # Returns
        The reduced tensor.
    """
    return tf.reduce_logsumexp(x, axis, keepdims) 
開發者ID:hello-sea,項目名稱:DeepLearning_Wavelet-LSTM,代碼行數:21,代碼來源:tensorflow_backend.py

示例11: multiclass_loss

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_logsumexp [as 別名]
def multiclass_loss(self,
                        p_emb: tf.Tensor,
                        s_emb: tf.Tensor,
                        o_emb: tf.Tensor,
                        all_emb: tf.Tensor) -> tf.Tensor:
        # [B]
        x_ijk = self.score(p_emb, s_emb, o_emb)
        # [N,
        # [B, N]
        x_ij = self.score_sp(p_emb, s_emb, all_emb)
        x_jk = self.score_po(p_emb, all_emb, o_emb)
        # [B]
        lse_x_ij = tf.reduce_logsumexp(x_ij, 1)
        lse_x_jk = tf.reduce_logsumexp(x_jk, 1)
        # [B]
        losses = - x_ijk + lse_x_ij - x_ijk + lse_x_jk
        # Scalar
        loss = tf.reduce_mean(losses)
        return loss 
開發者ID:uclnlp,項目名稱:gntp,代碼行數:21,代碼來源:lfm.py

示例12: evidence

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_logsumexp [as 別名]
def evidence(sess,data,elbo, batch_size = 100, S = 100, total_batch = None):
    '''
    For correct use:
    ELBO for x_i must be calculated by SINGLE z sample from q(z|x_i)
    '''
    #from scipy.special import logsumexp    
    if total_batch is None:
        total_batch = int(data.num_examples / batch_size)
        
    avg_evi = 0
    for j in range(total_batch):
        test_xs = data.next_batch(batch_size)         
        elbo_accu = np.empty([batch_size,0])
        for i in range(S):
            elbo_i = sess.run(elbo,{x:test_xs})
            elbo_accu = np.append(elbo_accu,elbo_i,axis=1)
        
        evi0 = sess.run(tf.reduce_logsumexp(elbo_accu,axis = 1))
        evi = np.mean(evi0 - np.log(S))
        avg_evi += evi / total_batch
    return avg_evi    
    

#%% 
開發者ID:mingzhang-yin,項目名稱:ARM-gradient,代碼行數:26,代碼來源:b_mnist_linear2_slim.py

示例13: evidence

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_logsumexp [as 別名]
def evidence(sess,data,elbo, batch_size = 100, S = 100, total_batch = None):
    '''
    For correct use:
    ELBO for x_i must be calculated by SINGLE z sample from q(z|x_i)
    '''
    #from scipy.special import logsumexp    
    if total_batch is None:
        total_batch = int(data.num_examples / batch_size)
        
    avg_evi = 0
    for j in range(total_batch):
        test_xs = data.next_batch(batch_size)         
        elbo_accu = np.empty([batch_size,0])
        for i in range(S):
            elbo_i = sess.run(elbo,{x:test_xs})
            elbo_accu = np.append(elbo_accu,elbo_i,axis=1)
        
        evi0 = sess.run(tf.reduce_logsumexp(elbo_accu,axis = 1))
        evi = np.mean(evi0 - np.log(S))
        avg_evi += evi / total_batch
    return avg_evi 

#%% 
開發者ID:mingzhang-yin,項目名稱:ARM-gradient,代碼行數:25,代碼來源:b_mnist_nonlinear_slim.py

示例14: evidence

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_logsumexp [as 別名]
def evidence(sess,data,elbo, batch_size = 100, S = 100, total_batch = None):
    '''
    For correct use:
    ELBO for x_i must be calculated by SINGLE z sample from q(z|x_i)
    '''
    #from scipy.special import logsumexp    
    if total_batch is None:
        total_batch = int(data.num_examples / batch_size)
        
    avg_evi = 0
    for j in range(total_batch):
        test_xs = data.next_batch(batch_size)         
        elbo_accu = np.empty([batch_size,0])
        for i in range(S):
            elbo_i = sess.run(elbo,{x:test_xs})
            elbo_accu = np.append(elbo_accu,elbo_i,axis=1)
        
        evi0 = sess.run(tf.reduce_logsumexp(elbo_accu,axis = 1))
        evi = np.mean(evi0 - np.log(S))
        avg_evi += evi / total_batch
    return avg_evi   

#%% 
開發者ID:mingzhang-yin,項目名稱:ARM-gradient,代碼行數:25,代碼來源:b_omni_nonlinear.py

示例15: evidence

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_logsumexp [as 別名]
def evidence(sess,data,elbo, batch_size = 100, S = 100, total_batch = None):
    '''
    For correct use:
    ELBO for x_i must be calculated by SINGLE z sample from q(z|x_i)
    '''
    #from scipy.special import logsumexp    
    if total_batch is None:
        total_batch = int(data.num_examples / batch_size)
        
    avg_evi = 0
    for j in range(total_batch):
        test_xs = data.next_batch(batch_size)         
        elbo_accu = np.empty([batch_size,0])
        for i in range(S):
            elbo_i = sess.run(elbo,{x:test_xs})
            elbo_accu = np.append(elbo_accu,elbo_i,axis=1)
        
        evi0 = sess.run(tf.reduce_logsumexp(elbo_accu,axis = 1))
        evi = np.mean(evi0 - np.log(S))
        avg_evi += evi / total_batch
    return avg_evi 
    

#%% 
開發者ID:mingzhang-yin,項目名稱:ARM-gradient,代碼行數:26,代碼來源:b_omni_linear.py


注:本文中的tensorflow.reduce_logsumexp方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。