当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.reduce_logsumexp方法代码示例

本文整理汇总了Python中tensorflow.reduce_logsumexp方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.reduce_logsumexp方法的具体用法?Python tensorflow.reduce_logsumexp怎么用?Python tensorflow.reduce_logsumexp使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.reduce_logsumexp方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: alpha

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_logsumexp [as 别名]
def alpha(cls, parameters: Dict[str, Tensor]) -> Tensor:
        mu = parameters["mu"]
        tau = parameters["tau"]
        nu = parameters["nu"]
        beta = parameters["beta"]

        sigma = 1./tf.sqrt(tau)
        lam = 1./beta

        muStd = tf.constant(0., dtype=mu.dtype)
        sigmaStd = tf.constant(1., dtype=mu.dtype)
        stdNorm = tf.contrib.distributions.Normal(loc=muStd, scale=sigmaStd)

        c0 = lam*(mu-nu) + stdNorm.log_cdf((nu-(mu+sigma**2*lam))/sigma)
        c1 = -lam*(mu-nu) + stdNorm.log_cdf(-(nu-(mu-sigma**2*lam))/sigma)
        c = tf.reduce_logsumexp([c0, c1], axis=0)
        f = (mu-nu)*lam

        norm = tf.distributions.Normal(loc=mu+sigma**2*lam, scale=sigma)

        alpha = tf.exp(f + norm.log_cdf(nu) - c)
        return(alpha) 
开发者ID:bethgelab,项目名称:decompose,代码行数:24,代码来源:jumpNormalAlgorithms.py

示例2: M_step

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_logsumexp [as 别名]
def M_step(log_R, log_activation, vote, lambda_val=0.01):
    R_shape = tf.shape(log_R)
    log_R = log_R + log_activation

    R_sum_i = cl.reduce_sum(tf.exp(log_R), axis=-3, keepdims=True)
    log_normalized_R = log_R - tf.reduce_logsumexp(log_R, axis=-3, keepdims=True)

    pose = cl.reduce_sum(vote * tf.exp(log_normalized_R), axis=-3, keepdims=True)
    log_var = tf.reduce_logsumexp(log_normalized_R + cl.log(tf.square(vote - pose)), axis=-3, keepdims=True)

    beta_v = tf.get_variable('beta_v',
                             shape=[1 for i in range(len(pose.shape) - 2)] + [pose.shape[-2], 1],
                             initializer=tf.truncated_normal_initializer(mean=15., stddev=3.))
    cost = R_sum_i * (beta_v + 0.5 * log_var)

    beta_a = tf.get_variable('beta_a',
                             shape=[1 for i in range(len(pose.shape) - 2)] + [pose.shape[-2], 1],
                             initializer=tf.truncated_normal_initializer(mean=100.0, stddev=10))
    cost_sum_h = cl.reduce_sum(cost, axis=-1, keepdims=True)
    logit = lambda_val * (beta_a - cost_sum_h)
    log_activation = tf.log_sigmoid(logit)

    return(pose, log_var, log_activation) 
开发者ID:naturomics,项目名称:CapsLayer,代码行数:25,代码来源:routing.py

示例3: log_sum_exp

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_logsumexp [as 别名]
def log_sum_exp(x, axis=None, keepdims=False):
    """
    Deprecated: Use tf.reduce_logsumexp().

    Tensorflow numerically stable log sum of exps across the `axis`.

    :param x: A Tensor.
    :param axis: An int or list or tuple. The dimensions to reduce.
        If `None` (the default), reduces all dimensions.
    :param keepdims: Bool. If true, retains reduced dimensions with length 1.
        Default to be False.

    :return: A Tensor after the computation of log sum exp along given axes of
        x.
    """
    x = tf.convert_to_tensor(x)
    x_max = tf.reduce_max(x, axis=axis, keepdims=True)
    ret = tf.log(tf.reduce_sum(tf.exp(x - x_max), axis=axis,
                               keepdims=True)) + x_max
    if not keepdims:
        ret = tf.reduce_sum(ret, axis=axis)
    return ret 
开发者ID:thu-ml,项目名称:zhusuan,代码行数:24,代码来源:utils.py

示例4: _log_prob

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_logsumexp [as 别名]
def _log_prob(self, given):
        logits, temperature = self.path_param(self.logits), \
                              self.path_param(self.temperature)
        log_given = tf.log(given)
        log_temperature = tf.log(temperature)
        n = tf.cast(self.n_categories, self.dtype)

        if self._check_numerics:
            log_given = tf.check_numerics(log_given, "log(given)")
            log_temperature = tf.check_numerics(
                log_temperature, "log(temperature)")

        temp = logits - temperature * log_given

        return tf.lgamma(n) + (n - 1) * log_temperature + \
            tf.reduce_sum(temp - log_given, axis=-1) - \
            n * tf.reduce_logsumexp(temp, axis=-1) 
开发者ID:thu-ml,项目名称:zhusuan,代码行数:19,代码来源:multivariate.py

示例5: multilabel_categorical_crossentropy

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_logsumexp [as 别名]
def multilabel_categorical_crossentropy(y_true, y_pred):
	"""
	y_true = [0,1],
	1 stands for target class,
	0 stands for none-target class
	"""
	y_pred = (1 - 2 * y_true) * y_pred
	y_pred_neg = y_pred - y_true * 1e12
	y_pred_pos = y_pred - (1 - y_true) * 1e12

	zeros = tf.zeros_like(y_pred[..., :1])
	y_pred_neg = tf.concat([y_pred_neg, zeros], axis=-1)
	y_pred_pos = tf.concat([y_pred_pos, zeros], axis=-1)
	neg_loss = tf.reduce_logsumexp(y_pred_neg, axis=-1)
	pos_loss = tf.reduce_logsumexp(y_pred_pos, axis=-1)
	return neg_loss + pos_loss 
开发者ID:yyht,项目名称:BERT,代码行数:18,代码来源:loss_utils.py

示例6: logsumexp

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_logsumexp [as 别名]
def logsumexp(x, axis=None, keepdims=False):
    """Computes log(sum(exp(elements across dimensions of a tensor))).

    This function is more numerically stable than log(sum(exp(x))).
    It avoids overflows caused by taking the exp of large inputs and
    underflows caused by taking the log of small inputs.

    # Arguments
        x: A tensor or variable.
        axis: axis: An integer or list of integers in [-rank(x), rank(x)),
            the axes to compute the logsumexp. If `None` (default), computes
            the logsumexp over all dimensions.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`, the reduced dimension is
            retained with length 1.

    # Returns
        The reduced tensor.
    """
    return tf.reduce_logsumexp(x, axis, keepdims) 
开发者ID:Relph1119,项目名称:GraphicDesignPatternByPython,代码行数:23,代码来源:tensorflow_backend.py

示例7: dense_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_logsumexp [as 别名]
def dense_loss(self, y_true, y_pred):
        """y_true需要是one hot形式
        """
        # 导出mask并转换数据类型
        mask = K.all(K.greater(y_pred, -1e6), axis=2, keepdims=True)
        mask = K.cast(mask, K.floatx())
        # 计算目标分数
        y_true, y_pred = y_true * mask, y_pred * mask
        target_score = self.target_score(y_true, y_pred)
        # 递归计算log Z
        init_states = [y_pred[:, 0]]
        y_pred = K.concatenate([y_pred, mask], axis=2)
        input_length = K.int_shape(y_pred[:, 1:])[1]
        log_norm, _, _ = K.rnn(
            self.log_norm_step,
            y_pred[:, 1:],
            init_states,
            input_length=input_length
        )  # 最后一步的log Z向量
        log_norm = tf.reduce_logsumexp(log_norm, 1)  # logsumexp得标量
        # 计算损失 -log p
        return log_norm - target_score 
开发者ID:bojone,项目名称:bert4keras,代码行数:24,代码来源:layers.py

示例8: testCrfLogLikelihood

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_logsumexp [as 别名]
def testCrfLogLikelihood(self):
    inputs = np.array(
        [[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32)
    transition_params = np.array(
        [[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32)
    sequence_lengths = np.array(3, dtype=np.int32)
    num_words = inputs.shape[0]
    num_tags = inputs.shape[1]
    with self.test_session() as sess:
      all_sequence_log_likelihoods = []

      # Make sure all probabilities sum to 1.
      for tag_indices in itertools.product(
          range(num_tags), repeat=sequence_lengths):
        tag_indices = list(tag_indices)
        tag_indices.extend([0] * (num_words - sequence_lengths))
        sequence_log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(
            inputs=tf.expand_dims(inputs, 0),
            tag_indices=tf.expand_dims(tag_indices, 0),
            sequence_lengths=tf.expand_dims(sequence_lengths, 0),
            transition_params=tf.constant(transition_params))
        all_sequence_log_likelihoods.append(sequence_log_likelihood)
      total_log_likelihood = tf.reduce_logsumexp(all_sequence_log_likelihoods)
      tf_total_log_likelihood = sess.run(total_log_likelihood)
      self.assertAllClose(tf_total_log_likelihood, 0.0) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:27,代码来源:crf_test.py

示例9: _call

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_logsumexp [as 别名]
def _call(self, inp, output_size, is_training):
        H, W, B, _ = tuple(int(i) for i in inp.shape[1:])

        # inp = tf.log(tf.nn.softmax(tf.clip_by_value(inp, -10., 10.), axis=4))
        inp = inp - tf.reduce_logsumexp(inp, axis=4, keepdims=True)

        running_sum = inp[:, 0, 0, 0, :]

        for h in range(H):
            for w in range(W):
                for b in range(B):
                    if h == 0 and w == 0 and b == 0:
                        pass
                    else:
                        right = inp[:, h, w, b, :]
                        running_sum = addition_compact_logspace(running_sum, right)

        assert running_sum.shape[1] == output_size
        return running_sum 
开发者ID:e2crawfo,项目名称:auto_yolo,代码行数:21,代码来源:networks.py

示例10: logsumexp

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_logsumexp [as 别名]
def logsumexp(x, axis=None, keepdims=False):
    """Computes log(sum(exp(elements across dimensions of a tensor))).

    This function is more numerically stable than log(sum(exp(x))).
    It avoids overflows caused by taking the exp of large inputs and
    underflows caused by taking the log of small inputs.

    # Arguments
        x: A tensor or variable.
        axis: An integer, the axis to reduce over.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`, the reduced dimension is
            retained with length 1.

    # Returns
        The reduced tensor.
    """
    return tf.reduce_logsumexp(x, axis, keepdims) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:21,代码来源:tensorflow_backend.py

示例11: multiclass_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_logsumexp [as 别名]
def multiclass_loss(self,
                        p_emb: tf.Tensor,
                        s_emb: tf.Tensor,
                        o_emb: tf.Tensor,
                        all_emb: tf.Tensor) -> tf.Tensor:
        # [B]
        x_ijk = self.score(p_emb, s_emb, o_emb)
        # [N,
        # [B, N]
        x_ij = self.score_sp(p_emb, s_emb, all_emb)
        x_jk = self.score_po(p_emb, all_emb, o_emb)
        # [B]
        lse_x_ij = tf.reduce_logsumexp(x_ij, 1)
        lse_x_jk = tf.reduce_logsumexp(x_jk, 1)
        # [B]
        losses = - x_ijk + lse_x_ij - x_ijk + lse_x_jk
        # Scalar
        loss = tf.reduce_mean(losses)
        return loss 
开发者ID:uclnlp,项目名称:gntp,代码行数:21,代码来源:lfm.py

示例12: evidence

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_logsumexp [as 别名]
def evidence(sess,data,elbo, batch_size = 100, S = 100, total_batch = None):
    '''
    For correct use:
    ELBO for x_i must be calculated by SINGLE z sample from q(z|x_i)
    '''
    #from scipy.special import logsumexp    
    if total_batch is None:
        total_batch = int(data.num_examples / batch_size)
        
    avg_evi = 0
    for j in range(total_batch):
        test_xs = data.next_batch(batch_size)         
        elbo_accu = np.empty([batch_size,0])
        for i in range(S):
            elbo_i = sess.run(elbo,{x:test_xs})
            elbo_accu = np.append(elbo_accu,elbo_i,axis=1)
        
        evi0 = sess.run(tf.reduce_logsumexp(elbo_accu,axis = 1))
        evi = np.mean(evi0 - np.log(S))
        avg_evi += evi / total_batch
    return avg_evi    
    

#%% 
开发者ID:mingzhang-yin,项目名称:ARM-gradient,代码行数:26,代码来源:b_mnist_linear2_slim.py

示例13: evidence

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_logsumexp [as 别名]
def evidence(sess,data,elbo, batch_size = 100, S = 100, total_batch = None):
    '''
    For correct use:
    ELBO for x_i must be calculated by SINGLE z sample from q(z|x_i)
    '''
    #from scipy.special import logsumexp    
    if total_batch is None:
        total_batch = int(data.num_examples / batch_size)
        
    avg_evi = 0
    for j in range(total_batch):
        test_xs = data.next_batch(batch_size)         
        elbo_accu = np.empty([batch_size,0])
        for i in range(S):
            elbo_i = sess.run(elbo,{x:test_xs})
            elbo_accu = np.append(elbo_accu,elbo_i,axis=1)
        
        evi0 = sess.run(tf.reduce_logsumexp(elbo_accu,axis = 1))
        evi = np.mean(evi0 - np.log(S))
        avg_evi += evi / total_batch
    return avg_evi 

#%% 
开发者ID:mingzhang-yin,项目名称:ARM-gradient,代码行数:25,代码来源:b_mnist_nonlinear_slim.py

示例14: evidence

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_logsumexp [as 别名]
def evidence(sess,data,elbo, batch_size = 100, S = 100, total_batch = None):
    '''
    For correct use:
    ELBO for x_i must be calculated by SINGLE z sample from q(z|x_i)
    '''
    #from scipy.special import logsumexp    
    if total_batch is None:
        total_batch = int(data.num_examples / batch_size)
        
    avg_evi = 0
    for j in range(total_batch):
        test_xs = data.next_batch(batch_size)         
        elbo_accu = np.empty([batch_size,0])
        for i in range(S):
            elbo_i = sess.run(elbo,{x:test_xs})
            elbo_accu = np.append(elbo_accu,elbo_i,axis=1)
        
        evi0 = sess.run(tf.reduce_logsumexp(elbo_accu,axis = 1))
        evi = np.mean(evi0 - np.log(S))
        avg_evi += evi / total_batch
    return avg_evi   

#%% 
开发者ID:mingzhang-yin,项目名称:ARM-gradient,代码行数:25,代码来源:b_omni_nonlinear.py

示例15: evidence

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_logsumexp [as 别名]
def evidence(sess,data,elbo, batch_size = 100, S = 100, total_batch = None):
    '''
    For correct use:
    ELBO for x_i must be calculated by SINGLE z sample from q(z|x_i)
    '''
    #from scipy.special import logsumexp    
    if total_batch is None:
        total_batch = int(data.num_examples / batch_size)
        
    avg_evi = 0
    for j in range(total_batch):
        test_xs = data.next_batch(batch_size)         
        elbo_accu = np.empty([batch_size,0])
        for i in range(S):
            elbo_i = sess.run(elbo,{x:test_xs})
            elbo_accu = np.append(elbo_accu,elbo_i,axis=1)
        
        evi0 = sess.run(tf.reduce_logsumexp(elbo_accu,axis = 1))
        evi = np.mean(evi0 - np.log(S))
        avg_evi += evi / total_batch
    return avg_evi 
    

#%% 
开发者ID:mingzhang-yin,项目名称:ARM-gradient,代码行数:26,代码来源:b_omni_linear.py


注:本文中的tensorflow.reduce_logsumexp方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。