当前位置: 首页>>代码示例>>Python>>正文


Python extmath.logsumexp函数代码示例

本文整理汇总了Python中sklearn.utils.extmath.logsumexp函数的典型用法代码示例。如果您正苦于以下问题:Python logsumexp函数的具体用法?Python logsumexp怎么用?Python logsumexp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了logsumexp函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_logsumexp

def test_logsumexp():
    # Try to add some smallish numbers in logspace
    x = np.array([1e-40] * 1000000)
    logx = np.log(x)
    assert_almost_equal(np.exp(logsumexp(logx)), x.sum())

    X = np.vstack([x, x])
    logX = np.vstack([logx, logx])
    assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
    assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
开发者ID:93sam,项目名称:scikit-learn,代码行数:10,代码来源:test_extmath.py

示例2: eval

    def eval(self, X):
        """Evaluate the model on data

        Compute the log probability of X under the model and
        return the posterior distribution (responsibilities) of each
        mixture component for each element of X.

        Parameters
        ----------
        X: array_like, shape (n_samples, n_features)
            List of n_features-dimensional data points.  Each row
            corresponds to a single data point.

        Returns
        -------
        logprob: array_like, shape (n_samples,)
            Log probabilities of each data point in X
        responsibilities: array_like, shape (n_samples, n_components)
            Posterior probabilities of each mixture component for each
            observation
        """
        X = np.asarray(X)
        if X.ndim == 1:
            X = X[:, np.newaxis]
        if X.size == 0:
            return np.array([]), np.empty((0, self.n_components))
        if X.shape[1] != self.means_.shape[1]:
            raise ValueError('the shape of X  is not compatible with self')

        if self.blocksize > 0:
            logprob = np.zeros(X.shape[0],dtype=self.float_type)
            responsibilities = np.zeros((X.shape[0],self.n_components),dtype=self.float_type)
            block_id = 0
            if self.verbose:
                print("Running block multiplication")

            for block_id in range(0,X.shape[0],self.blocksize):
                blockend = min(X.shape[0],block_id+self.blocksize)
                lpr = (log_product_of_bernoullis_mixture_likelihood(X[block_id:blockend], self.log_odds_,
                                                            self.log_inv_mean_sums_)
               + np.log(self.weights_))
                logprob[block_id:blockend] = logsumexp(lpr, axis=1)
                responsibilities[block_id:blockend] = np.exp(lpr - (logprob[block_id:blockend])[:, np.newaxis])
        else:
            lpr = (log_product_of_bernoullis_mixture_likelihood(X, self.log_odds_,
                                                            self.log_inv_mean_sums_)
               + np.log(self.weights_))

            logprob = logsumexp(lpr, axis=1)
            responsibilities = np.exp(lpr - logprob[:, np.newaxis])
        return logprob, responsibilities
开发者ID:EdwardBetts,项目名称:partsNet,代码行数:51,代码来源:bernoullimm.py

示例3: _accumulate_sufficient_statistics

 def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
                                       posteriors, fwdlattice, bwdlattice,
                                       params, seq_weight):
     stats['nobs'] += 1 * seq_weight
     if 's' in params:
         stats['start'] += posteriors[0] * seq_weight
     if 't' in params:
         n_observations, n_components = framelogprob.shape
         lneta = np.zeros((n_observations - 1, n_components, n_components))
         lnP = logsumexp(fwdlattice[-1])
         _hmmc._compute_lneta(n_observations, n_components, fwdlattice,
                              self._log_transmat, bwdlattice, framelogprob,
                              lnP, lneta)
         stats["trans"] += np.exp(logsumexp(lneta, 0)) * seq_weight
开发者ID:sunsern,项目名称:uright-python,代码行数:14,代码来源:weightedhmm.py

示例4: _fit

    def _fit(self, obs):
        prev_loglikelihood = None
        for iteration in xrange(self.n_training_iterations):
            stats = self._initialize_sufficient_statistics()
            curr_loglikelihood = 0
            for seq in obs:
                # Forward-backward pass and accumulate stats
                framelogprob = self._compute_log_likelihood(seq)
                lpr, fwdlattice = self._do_forward_pass(framelogprob)
                bwdlattice = self._do_backward_pass(framelogprob)
                gamma = fwdlattice + bwdlattice
                posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
                assert np.allclose(np.sum(posteriors, axis=1), 1.0)  # posteriors must sum to 1 for each t
                curr_loglikelihood += lpr
                self._accumulate_sufficient_statistics(stats, seq, framelogprob, posteriors, fwdlattice, bwdlattice)

            # Test for convergence
            if prev_loglikelihood is not None:
                delta = curr_loglikelihood - prev_loglikelihood
                print ('%f (%f)' % (curr_loglikelihood, delta))
                assert delta >= -0.01  # Likelihood when training with Baum-Welch should grow monotonically
                if delta <= self.training_threshold:
                    break

            self._do_mstep(stats)
            prev_loglikelihood = curr_loglikelihood
开发者ID:caomw,项目名称:motion-classification,代码行数:26,代码来源:impl_hmmlearn.py

示例5: normalize_logspace

def normalize_logspace(a):
    """Normalizes the array `a` in the log domain.

    Each row of `a` is a log discrete distribution. Returns
    the array normalized in the log domain while minimizing the
    possibility of numerical underflow.

    Parameters
    ----------
    a : ndarray
        The array to normalize in the log domain.

    Returns
    -------
    a : ndarray
        The array normalized in the log domain.
    lnorm : float
        log normalization constant.

    Examples
    --------
    >>> normalize_logspace()

    .. note::
        Adapted from Matlab:

        | Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
        | Copyright (2010) Kevin Murphy and Matt Dunham
        | License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_

    """
    l = logsumexp(a, 1)
    y = a.T - l
    return y.T, l
开发者ID:baiyunping333,项目名称:mlpy,代码行数:34,代码来源:_stats.py

示例6: eval

    def eval(self, obs):
        """Evaluate the model on data

        Compute the log probability of `obs` under the model and
        return the posterior distribution (responsibilities) of each
        mixture component for each element of `obs`.

        Parameters
        ----------
        obs: array_like, shape (n_samples, n_features)
            List of n_features-dimensional data points.  Each row
            corresponds to a single data point.

        Returns
        -------
        logprob: array_like, shape (n_samples,)
            Log probabilities of each data point in `obs`

        posteriors: array_like, shape (n_samples, n_components)
            Posterior probabilities of each mixture component for each
            observation
        """
        obs = np.asarray(obs)
        lpr = lmvnpdf(obs, self._means, self._covars, self._cvtype) + self._log_weights
        logprob = logsumexp(lpr, axis=1)
        posteriors = np.exp(lpr - logprob[:, np.newaxis])
        return logprob, posteriors
开发者ID:jmstewart90,项目名称:numerical_computing,代码行数:27,代码来源:gmm.py

示例7: fit

    def fit(self, obs):

        # same implementation as in sklearn, but returns the learning curve
        if self.algorithm not in decoder_algorithms:
            self._algorithm = "viterbi"

        self._init(obs, self.init_params)

        logprob = []
        for i in range(self.n_iter):
            # Expectation step
            stats = self._initialize_sufficient_statistics()
            curr_logprob = 0
            for seq in obs:
                framelogprob = self._compute_log_likelihood(seq)
                lpr, fwdlattice = self._do_forward_pass(framelogprob)
                bwdlattice = self._do_backward_pass(framelogprob)
                gamma = fwdlattice + bwdlattice
                posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
                curr_logprob += lpr
                self._accumulate_sufficient_statistics(
                    stats, seq, framelogprob, posteriors, fwdlattice,
                    bwdlattice, self.params)
            logprob.append(curr_logprob)

            # Check for convergence.
            if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh:
                break

            # Maximization step
            self._do_mstep(stats, self.params)

        return logprob
开发者ID:bobi5rova,项目名称:cecog,代码行数:33,代码来源:hmm.py

示例8: _exact_loglikelihood

    def _exact_loglikelihood(self, ob):
        log_transmat = np.zeros((self.n_chains, self.n_states, self.n_states))
        log_startprob = np.zeros((self.n_chains, self.n_states))
        for idx, chain in enumerate(self.chains_):
            log_transmat[idx] = chain._log_transmat
            log_startprob[idx] = chain._log_startprob

        n_state_combinations = self.n_states ** self.n_chains
        state_combinations = [tuple(x) for x in list(itertools.product(np.arange(self.n_states), repeat=self.n_chains))]
        n_observations = ob.shape[0]
        n_features = ob.shape[1]
        fwdlattice = np.zeros((n_observations, n_state_combinations))

        # Calculate means and covariances for all state combinations and calculate emission probabilities
        weight = (1.0 / float(self.n_chains))
        weight_squared = weight * weight
        covars = np.zeros((n_state_combinations, n_features))  # TODO: add support for all covariance types
        means = np.zeros((n_state_combinations, n_features))
        for idx, state_combination in enumerate(state_combinations):
            for chain_idx, state in enumerate(state_combination):
                chain = self.chains_[chain_idx]
                covars[idx] += chain._covars_[state]
                means[idx] += chain._means_[state]
            covars[idx] *= weight_squared
            means[idx] *= weight
        framelogprob = log_multivariate_normal_density(ob, means, covars, covariance_type='diag')  # TODO: add support for all covariance types

        # Run the forward algorithm
        fhmmc._forward(n_observations, self.n_chains, self.n_states, state_combinations, log_startprob, log_transmat,
                       framelogprob, fwdlattice)

        last_column = fwdlattice[-1]
        assert np.size(last_column) == n_state_combinations
        score = logsumexp(last_column)
        return score
开发者ID:caomw,项目名称:motion-classification,代码行数:35,代码来源:impl_hmmlearn.py

示例9: eval

    def eval(self, X):
        """Evaluate the model on data

        Compute the log probability of X under the model and
        return the posterior distribution (responsibilities) of each
        mixture component for each element of X.

        Parameters
        ----------
        X: array_like, shape (n_samples, n_features)
            List of n_features-dimensional data points.  Each row
            corresponds to a single data point.

        Returns
        -------
        logprob: array_like, shape (n_samples,)
            Log probabilities of each data point in X
        responsibilities: array_like, shape (n_samples, n_components)
            Posterior probabilities of each mixture component for each
            observation
        """
        X = np.asarray(X)
        if X.ndim == 1:
            X = X[:, np.newaxis]
        if X.size == 0:
            return np.array([]), np.empty((0, self.n_components))
        if X.shape[1] != self.means_.shape[1]:
            raise ValueError("the shape of X  is not compatible with self")

        lpr = log_multivariate_normal_density(X, self.means_, self.covars_, self.covariance_type) + np.log(
            self.weights_
        )
        logprob = logsumexp(lpr, axis=1)
        responsibilities = np.exp(lpr - logprob[:, np.newaxis])
        return logprob, responsibilities
开发者ID:JasonFil,项目名称:Python-ML,代码行数:35,代码来源:gmm_diag2_new.py

示例10: score_samples

    def score_samples(self, X):
        """Return the per-sample likelihood of the data under the model.

        Compute the log probability of X under the model and
        return the posterior distribution (responsibilities) of each
        mixture component for each element of X.

        Parameters
        ----------
        X: array_like, shape (n_samples, n_features)
            List of n_features-dimensional data points. Each row
            corresponds to a single data point.

        Returns
        -------
        logprob : array_like, shape (n_samples,)
            Log probabilities of each data point in X.

        responsibilities : array_like, shape (n_samples, n_components)
            Posterior probabilities of each mixture component for each
            observation
        """
        X = check_angular(X)

        # Don't use components whose weights fell to 0. Is this correct? Hack
        # for use in webapp.
        ## TODO: REMOVE BEFORE SUBMITTING TO SKLEARN!
        good_comps = (abs(self.weights_) > 1e-8)

        logprobs = (log_vmf_pdf(X, self.means_[good_comps], self.precs_) +
                    np.log(self.weights_[good_comps][np.newaxis]))
        logprob = logsumexp(logprobs, axis=1)

        responsibilities = np.exp(logprobs - logprob[:, np.newaxis])
        return logprob, responsibilities
开发者ID:HapeMask,项目名称:eve_activity_graph_demo,代码行数:35,代码来源:vmfmm.py

示例11: E_step

 def E_step( self, X):
   N,D = X.shape
   lpr = np.zeros( (N, self.gmm.K) )
   logdet = np.zeros( self.gmm.K )
   dterms = np.arange( 1,D+1 ) # 1,2,3... D
   self.invWchol = list()
   for k in range(self.gmm.K):
     dXm  = X - self.qMixComp[k].m
     L = scipy.linalg.cholesky(  self.qMixComp[k].invW, lower=True)
     self.invWchol.append( L )
     
     if np.any( np.isnan(L) | np.isinf(L) ):
       print 'NaN!', self.qMixComp[k]
     #invL = scipy.linalg.inv( L )
     #  want: Q =  invL * X.T
     #    so we solve for matrix Q s.t. L*Q = X.T
     lpr[:,k] = -0.5*self.qMixComp[k].dF \
                   * np.sum( scipy.linalg.solve_triangular( L, dXm.T,lower=True)**2, axis=0)
     lpr[:,k] -= 0.5*D/self.qMixComp[k].beta
     # det( W ) = 1/det(invW)
     #          = 1/det( L )**2 
     # det of triangle matrix = prod of diag entries
     logdet[k] = -2*np.sum( np.log(np.diag(L) ) ) + D*np.log(2.0) 
     logdet[k] += digamma( 0.5*(dterms+1+self.qMixComp[k].dF)  ).sum()
   self.logwtilde = digamma( self.alpha ) - digamma( self.alpha.sum() )
   self.logLtilde = logdet
   lpr += self.logwtilde
   lpr += logdet
   lprSUM = logsumexp(lpr, axis=1)
   resp   = np.exp(lpr - lprSUM[:, np.newaxis])
   resp   /= resp.sum( axis=1)[:,np.newaxis] # row normalize
   return resp
开发者ID:anshe80,项目名称:MLRaptor,代码行数:32,代码来源:VBLearnerGMM_OLD.py

示例12: _do_forward_pass

 def _do_forward_pass(self, framelogprob):
     n_observations = framelogprob.shape[0]
     state_combinations = [tuple(x) for x in list(itertools.product(np.arange(self.n_states), repeat=self.n_chains))]
     fwdlattice = np.zeros((n_observations, self.n_states ** self.n_chains))
     fhmmc._forward(n_observations, self.n_chains, self.n_states, state_combinations, self.log_startprob,
                    self.log_transmat, framelogprob, fwdlattice)
     return logsumexp(fwdlattice[-1]), fwdlattice
开发者ID:caomw,项目名称:motion-classification,代码行数:7,代码来源:impl_hmmlearn.py

示例13: test_multinomial_loss_ground_truth

def test_multinomial_loss_ground_truth():
    # n_samples, n_features, n_classes = 4, 2, 3
    n_classes = 3
    X = np.array([[1.1, 2.2], [2.2, -4.4], [3.3, -2.2], [1.1, 1.1]])
    y = np.array([0, 1, 2, 0])
    lbin = LabelBinarizer()
    Y_bin = lbin.fit_transform(y)

    weights = np.array([[0.1, 0.2, 0.3], [1.1, 1.2, -1.3]])
    intercept = np.array([1., 0, -.2])
    sample_weights = np.array([0.8, 1, 1, 0.8])

    prediction = np.dot(X, weights) + intercept
    logsumexp_prediction = logsumexp(prediction, axis=1)
    p = prediction - logsumexp_prediction[:, np.newaxis]
    loss_1 = -(sample_weights[:, np.newaxis] * p * Y_bin).sum()
    diff = sample_weights[:, np.newaxis] * (np.exp(p) - Y_bin)
    grad_1 = np.dot(X.T, diff)

    weights_intercept = np.vstack((weights, intercept)).T.ravel()
    loss_2, grad_2, _ = _multinomial_loss_grad(weights_intercept, X, Y_bin,
                                               0.0, sample_weights)
    grad_2 = grad_2.reshape(n_classes, -1)
    grad_2 = grad_2[:, :-1].T

    assert_almost_equal(loss_1, loss_2)
    assert_array_almost_equal(grad_1, grad_2)

    # ground truth
    loss_gt = 11.680360354325961
    grad_gt = np.array([[-0.557487, -1.619151, +2.176638],
                        [-0.903942, +5.258745, -4.354803]])
    assert_almost_equal(loss_1, loss_gt)
    assert_array_almost_equal(grad_1, grad_gt)
开发者ID:1992huanghai,项目名称:scikit-learn,代码行数:34,代码来源:test_sag.py

示例14: _accumulate_sufficient_statistics

 def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
                                       posteriors, fwdlattice, bwdlattice,
                                       params):
     stats['nobs'] += 1
     if 's' in params:
         stats['start'] += posteriors[0]
     if 't' in params:
         n_observations, n_components = framelogprob.shape
         # when the sample is of length 1, it contains no transitions
         # so there is no reason to update our trans. matrix estimate
         if n_observations > 1:
             lneta = np.zeros((n_observations - 1, n_components, n_components))
             lnP = logsumexp(fwdlattice[-1])
             _hmmc._compute_lneta(n_observations, n_components, fwdlattice,
                                  self._log_transmat, bwdlattice, framelogprob,
                                  lnP, lneta)
             stats['trans'] += np.exp(np.minimum(logsumexp(lneta, 0), 700))
开发者ID:chagri,项目名称:Human-Voice-NonVerbal-Detection,代码行数:17,代码来源:hmm.py

示例15: compute_pvalue

def compute_pvalue(distr, N, x, current_p):
    """Compute log2 pvalue"""
    sum_num = []
    sum_denum = []
    
    for i in range(N+1):
        p1 = get_log_value(i, distr)
        p2 = get_log_value(N - i, distr)
        p = p1 + p2
        
        #if current_p >= p:
        if i <= x:
            sum_num.append(p)
        
        sum_denum.append(p)

    return logsumexp(np.array(sum_num)) - logsumexp(np.array(sum_denum))
开发者ID:jovesus,项目名称:reg-gen,代码行数:17,代码来源:python_script.py


注:本文中的sklearn.utils.extmath.logsumexp函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。