当前位置: 首页>>代码示例>>Python>>正文


Python special.logsumexp方法代码示例

本文整理汇总了Python中scipy.special.logsumexp方法的典型用法代码示例。如果您正苦于以下问题:Python special.logsumexp方法的具体用法?Python special.logsumexp怎么用?Python special.logsumexp使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在scipy.special的用法示例。


在下文中一共展示了special.logsumexp方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: predict_log_proba

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logsumexp [as 别名]
def predict_log_proba(self, X):
        """
        Return log-probability estimates for the test vector X.

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]

        Returns
        -------
        C : array-like, shape = [n_samples, n_classes]
            Returns the log-probability of the samples for each class in
            the model. The columns correspond to the classes in sorted
            order, as they appear in the attribute `classes_`.
        """
        jll = self._joint_log_likelihood(X)

        # normalize by P(x) = P(f_1, ..., f_n)
        log_prob_x = logsumexp(jll, axis=1)  # return shape = (2,)

        return jll - np.atleast_2d(log_prob_x).T 
开发者ID:J535D165,项目名称:recordlinkage,代码行数:23,代码来源:nb_sklearn.py

示例2: test_logsumexp

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logsumexp [as 别名]
def test_logsumexp():
    # check consistency with scipy's version
    rng = np.random.RandomState(0)
    for _ in range(100):
        a = rng.uniform(0, 1000, size=1000)
        assert_almost_equal(logsumexp(a), _logsumexp(a))

    # Test whether logsumexp() function correctly handles large inputs.
    # (from scipy tests)

    b = np.array([1000, 1000])
    desired = 1000.0 + np.log(2.0)
    assert_almost_equal(_logsumexp(b), desired)

    n = 1000
    b = np.full(n, 10000, dtype='float64')
    desired = 10000.0 + np.log(n)
    assert_almost_equal(_logsumexp(b), desired) 
开发者ID:ogrisel,项目名称:pygbm,代码行数:20,代码来源:test_loss.py

示例3: __init__

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logsumexp [as 别名]
def __init__(self, n_hidden=2, max_iter=200, eps=1e-5, seed=None, verbose=False, count='binarize',
                 tree=True, **kwargs):
        self.n_hidden = n_hidden  # Number of hidden factors to use (Y_1,...Y_m) in paper
        self.max_iter = max_iter  # Maximum number of updates to run, regardless of convergence
        self.eps = eps  # Change to signal convergence
        self.tree = tree
        np.random.seed(seed)  # Set seed for deterministic results
        self.verbose = verbose
        self.t = 20  # Initial softness of the soft-max function for alpha (see NIPS paper [1])
        self.count = count  # Which strategy, if necessary, for binarizing count data
        if verbose > 0:
            np.set_printoptions(precision=3, suppress=True, linewidth=200)
            print('corex, rep size:', n_hidden)
        if verbose:
            np.seterr(all='warn')
            # Can change to 'raise' if you are worried to see where the errors are
            # Locally, I "ignore" underflow errors in logsumexp that appear innocuous (probabilities near 0)
        else:
            np.seterr(all='ignore') 
开发者ID:gregversteeg,项目名称:corex_topic,代码行数:21,代码来源:corextopic.py

示例4: ESS

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logsumexp [as 别名]
def ESS(works_prev, works_incremental):
        """
        compute the effective sample size (ESS) as given in Eq 3.15 in https://arxiv.org/abs/1303.3123.

        Parameters
        ----------
        works_prev: np.array
            np.array of floats representing the accumulated works at t-1 (unnormalized)
        works_incremental: np.array
            np.array of floats representing the incremental works at t (unnormalized)

        Returns
        -------
        ESS: float
            effective sample size
        """
        prev_weights_normalized = np.exp(-works_prev - logsumexp(-works_prev))
        #_logger.debug(f"\t\tnormalized weights: {prev_weights_normalized}")
        incremental_weights_unnormalized = np.exp(-works_incremental)
        #_logger.debug(f"\t\tincremental weights (unnormalized): {incremental_weights_unnormalized}")
        ESS = np.dot(prev_weights_normalized, incremental_weights_unnormalized)**2 / np.dot(np.power(prev_weights_normalized, 2), np.power(incremental_weights_unnormalized, 2))
        #_logger.debug(f"\t\tESS: {ESS}")
        return ESS 
开发者ID:choderalab,项目名称:perses,代码行数:25,代码来源:relative_setup.py

示例5: CESS

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logsumexp [as 别名]
def CESS(works_prev, works_incremental):
        """
        compute the conditional effective sample size (CESS) as given in Eq 3.16 in https://arxiv.org/abs/1303.3123.

        Parameters
        ----------
        works_prev: np.array
            np.array of floats representing the accumulated works at t-1 (unnormalized)
        works_incremental: np.array
            np.array of floats representing the incremental works at t (unnormalized)

        Returns
        -------
        CESS: float
            conditional effective sample size
        """
        prev_weights_normalization = np.exp(logsumexp(-works_prev))
        prev_weights_normalized = np.exp(-works_prev) / prev_weights_normalization
        #_logger.debug(f"\t\tnormalized weights: {prev_weights_normalized}")
        incremental_weights_unnormalized = np.exp(-works_incremental)
        #_logger.debug(f"\t\tincremental weights (unnormalized): {incremental_weights_unnormalized}")
        N = len(prev_weights_normalized)
        CESS = N * np.dot(prev_weights_normalized, incremental_weights_unnormalized)**2 / np.dot(prev_weights_normalized, np.power(incremental_weights_unnormalized, 2))
        #_logger.debug(f"\t\tCESS: {CESS}")
        return CESS 
开发者ID:choderalab,项目名称:perses,代码行数:27,代码来源:relative_setup.py

示例6: multinomial_resample

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logsumexp [as 别名]
def multinomial_resample(total_works, num_resamples):
    """
    from a numpy array of total works and particle_labels, resample the particle indices N times with replacement
    from a multinomial distribution conditioned on the weights w_i \propto e^{-cumulative_works_i}
    Parameters
    ----------
    total_works : np.array of floats
        generalized accumulated works at time t for all particles
    num_resamples : int, default len(sampler_states)
        number of resamples to conduct; default doesn't change the number of particles

    Returns
    -------
    resampled_works : np.array([1.0/num_resamples]*num_resamples)
        resampled works (uniform)
    resampled_indices : np.array of ints
        resampled indices
    """
    normalized_weights = np.exp(-total_works - logsumexp(-total_works))
    resampled_indices = np.random.choice(len(normalized_weights), num_resamples, p=normalized_weights, replace = True)
    resampled_works = np.array([np.average(total_works)] * num_resamples)

    return resampled_works, resampled_indices 
开发者ID:choderalab,项目名称:perses,代码行数:25,代码来源:utils.py

示例7: ESS

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logsumexp [as 别名]
def ESS(works_prev, works_incremental):
    """
    compute the effective sample size (ESS) as given in Eq 3.15 in https://arxiv.org/abs/1303.3123.
    Parameters
    ----------
    works_prev: np.array
        np.array of floats representing the accumulated works at t-1 (unnormalized)
    works_incremental: np.array
        np.array of floats representing the incremental works at t (unnormalized)

    Returns
    -------
    normalized_ESS: float
        effective sample size
    """
    prev_weights_normalized = np.exp(-works_prev - logsumexp(-works_prev))
    incremental_weights_unnormalized = np.exp(-works_incremental)
    ESS = np.dot(prev_weights_normalized, incremental_weights_unnormalized)**2 / np.dot(np.power(prev_weights_normalized, 2), np.power(incremental_weights_unnormalized, 2))
    normalized_ESS = ESS / len(prev_weights_normalized)
    assert normalized_ESS >= 0.0 - DISTRIBUTED_ERROR_TOLERANCE and normalized_ESS <= 1.0 + DISTRIBUTED_ERROR_TOLERANCE, f"the normalized ESS ({normalized_ESS} is not between 0 and 1)"
    return normalized_ESS 
开发者ID:choderalab,项目名称:perses,代码行数:23,代码来源:utils.py

示例8: test_logsumexp_b

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logsumexp [as 别名]
def test_logsumexp_b():
    a = np.arange(200)
    b = np.arange(200, 0, -1)
    desired = np.log(np.sum(b*np.exp(a)))
    assert_almost_equal(logsumexp(a, b=b), desired)

    a = [1000, 1000]
    b = [1.2, 1.2]
    desired = 1000 + np.log(2 * 1.2)
    assert_almost_equal(logsumexp(a, b=b), desired)

    x = np.array([1e-40] * 100000)
    b = np.linspace(1, 1000, 100000)
    logx = np.log(x)

    X = np.vstack((x, x))
    logX = np.vstack((logx, logx))
    B = np.vstack((b, b))
    assert_array_almost_equal(np.exp(logsumexp(logX, b=B)), (B * X).sum())
    assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=0)),
                                (B * X).sum(axis=0))
    assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=1)),
                                (B * X).sum(axis=1)) 
开发者ID:Relph1119,项目名称:GraphicDesignPatternByPython,代码行数:25,代码来源:test_logsumexp.py

示例9: log_likelihood_network

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logsumexp [as 别名]
def log_likelihood_network(
        digraph, traffic_in, traffic_out, params, weight=None):
    """
    Compute the log-likelihood of model parameters.

    If ``weight`` is not ``None``, the log-likelihood is correct only up to a
    constant (independent of the parameters).
    """
    loglik = 0
    for i in range(len(traffic_in)):
        loglik += traffic_in[i] * params[i]
        if digraph.out_degree(i) > 0:
            neighbors = list(digraph.successors(i))
            if weight is None:
                loglik -= traffic_out[i] * logsumexp(params.take(neighbors))
            else:
                weights = [digraph[i][j][weight] for j in neighbors]
                loglik -= traffic_out[i] * logsumexp(
                        params.take(neighbors), b=weights)
    return loglik 
开发者ID:lucasmaystre,项目名称:choix,代码行数:22,代码来源:utils.py

示例10: _log_density

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logsumexp [as 别名]
def _log_density(self, stimulus):
        smap = self.parent_model.log_density(stimulus)

        target_shape = (stimulus.shape[0],
                        stimulus.shape[1])

        if smap.shape != target_shape:
            if self.verbose:
                print("Resizing saliency map", smap.shape, target_shape)
            x_factor = target_shape[1] / smap.shape[1]
            y_factor = target_shape[0] / smap.shape[0]

            smap = zoom(smap, [y_factor, x_factor], order=1, mode='nearest')

            smap -= logsumexp(smap)

            assert smap.shape == target_shape

        return smap 
开发者ID:matthias-k,项目名称:pysaliency,代码行数:21,代码来源:models.py

示例11: conditional_log_density

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logsumexp [as 别名]
def conditional_log_density(self, stimulus, x_hist, y_hist, t_hist, attributes=None, out=None):
        smap = self.parent_model.conditional_log_density(stimulus, x_hist, y_hist, t_hist, attributes=attributes, out=out)

        target_shape = (stimulus.shape[0],
                        stimulus.shape[1])

        if smap.shape != target_shape:
            if self.verbose:
                print("Resizing saliency map", smap.shape, target_shape)
            x_factor = target_shape[1] / smap.shape[1]
            y_factor = target_shape[0] / smap.shape[0]

            smap = zoom(smap, [y_factor, x_factor], order=1, mode='nearest')

            smap -= logsumexp(smap)

            assert smap.shape == target_shape

        return smap 
开发者ID:matthias-k,项目名称:pysaliency,代码行数:21,代码来源:models.py

示例12: _log_density

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logsumexp [as 别名]
def _log_density(self, stimulus):
        shape = stimulus.shape[0], stimulus.shape[1]

        stimulus_id = get_image_hash(stimulus)
        stimulus_index = self.stimuli.stimulus_ids.index(stimulus_id)

        #fixations = self.fixations[self.fixations.n == stimulus_index]
        inds = self.fixations.n != stimulus_index

        ZZ = np.zeros(shape)

        _fixations = np.array([self.ys[inds]*shape[0], self.xs[inds]*shape[1]]).T
        fill_fixation_map(ZZ, _fixations)
        ZZ = gaussian_filter(ZZ, [self.bandwidth*shape[0], self.bandwidth*shape[1]])
        ZZ *= (1-self.eps)
        ZZ += self.eps * 1.0/(shape[0]*shape[1])
        ZZ = np.log(ZZ)

        ZZ -= logsumexp(ZZ)
        #ZZ -= np.log(np.exp(ZZ).sum())

        return ZZ 
开发者ID:matthias-k,项目名称:pysaliency,代码行数:24,代码来源:baseline_utils.py

示例13: _global_jump

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logsumexp [as 别名]
def _global_jump(self, replicas_log_P_k):
        """
        Global jump scheme.
        This method is described after Eq. 3 in [2]
        """
        n_replica, n_states = self.n_replicas, self.n_states
        for replica_index, current_state_index in enumerate(self._replica_thermodynamic_states):
            neighborhood = self._neighborhood(current_state_index)

            # Compute unnormalized log probabilities for all thermodynamic states.
            log_P_k = np.zeros([n_states], np.float64)
            for state_index in neighborhood:
                u_k = self._energy_thermodynamic_states[replica_index, :]
                log_P_k[state_index] =  - u_k[state_index] + self.log_weights[state_index]
            log_P_k -= logsumexp(log_P_k)

            # Update sampler Context to current thermodynamic state.
            P_k = np.exp(log_P_k[neighborhood])
            new_state_index = np.random.choice(neighborhood, p=P_k)
            self._replica_thermodynamic_states[replica_index] = new_state_index

            # Accumulate statistics.
            replicas_log_P_k[replica_index,:] = log_P_k[:]
            self._n_proposed_matrix[current_state_index, neighborhood] += 1
            self._n_accepted_matrix[current_state_index, new_state_index] += 1 
开发者ID:choderalab,项目名称:openmmtools,代码行数:27,代码来源:sams.py

示例14: test_logsumexp_b

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logsumexp [as 别名]
def test_logsumexp_b(ary_dtype, axis, b, keepdims):
    """Test ArviZ implementation of logsumexp.

    Test also compares against Scipy implementation.
    Case where b=None, they are equal. (N=len(ary))
    Second case where b=x, and x is 1/(number of elements), they are almost equal.

    Test tests against b parameter.
    """
    ary = np.random.randn(100, 101).astype(ary_dtype)  # pylint: disable=no-member
    assert _logsumexp(ary=ary, axis=axis, b=b, keepdims=keepdims, copy=True) is not None
    ary = ary.copy()
    assert _logsumexp(ary=ary, axis=axis, b=b, keepdims=keepdims, copy=False) is not None
    out = np.empty(5)
    assert _logsumexp(ary=np.random.randn(10, 5), axis=0, out=out) is not None

    # Scipy implementation
    scipy_results = logsumexp(ary, b=b, axis=axis, keepdims=keepdims)
    arviz_results = _logsumexp(ary, b=b, axis=axis, keepdims=keepdims)

    assert_array_almost_equal(scipy_results, arviz_results) 
开发者ID:arviz-devs,项目名称:arviz,代码行数:23,代码来源:test_stats_utils.py

示例15: _compute_log_type_probabilities

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logsumexp [as 别名]
def _compute_log_type_probabilities(df, optim_paras, options):
    """Compute the log type probabilities."""
    x_betas = _compute_x_beta_for_type_probabilities(
        df, optim_paras=optim_paras, options=options
    )

    log_probabilities = x_betas - special.logsumexp(x_betas, axis=1, keepdims=True)
    log_probabilities = np.clip(log_probabilities, MIN_FLOAT, MAX_FLOAT)

    return log_probabilities 
开发者ID:OpenSourceEconomics,项目名称:respy,代码行数:12,代码来源:likelihood.py


注:本文中的scipy.special.logsumexp方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。