当前位置: 首页>>代码示例>>Python>>正文


Python special.comb方法代码示例

本文整理汇总了Python中scipy.special.comb方法的典型用法代码示例。如果您正苦于以下问题:Python special.comb方法的具体用法?Python special.comb怎么用?Python special.comb使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在scipy.special的用法示例。


在下文中一共展示了special.comb方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_celeba_task_pool

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import comb [as 别名]
def get_celeba_task_pool(self, attributes, order=3, print_partition=None):
        """
        Produces partitions: a list of dictionaries (key: 0 or 1, value: list of data indices), which is
        compatible with the other methods of this class.
        """
        num_pools = 0
        partitions = []
        from scipy.special import comb
        for attr_comb in tqdm(combinations(range(attributes.shape[1]), order), desc='get_task_pool', total=comb(attributes.shape[1], order)):
            for booleans in product(range(2), repeat=order-1):
                booleans = (0,) + booleans  # only the half of the cartesian products that start with 0
                positive = np.where(np.all([attributes[:, attr] == i_booleans for (attr, i_booleans) in zip(attr_comb, booleans)], axis=0))[0]
                if len(positive) < self.num_samples_per_class:
                    continue
                negative = np.where(np.all([attributes[:, attr] == 1 - i_booleans for (attr, i_booleans) in zip(attr_comb, booleans)], axis=0))[0]
                if len(negative) < self.num_samples_per_class:
                    continue
                # inner_pool[booleans] = {0: list(negative), 1: list(positive)}
                partitions.append({0: list(negative), 1: list(positive)})
                num_pools += 1
                if num_pools == print_partition:
                    print(attr_comb, booleans)
        print('Generated {} task pools by using {} attributes from {} per pool'.format(num_pools, order, attributes.shape[1]))
        return partitions 
开发者ID:kylehkhsu,项目名称:cactus-maml,代码行数:26,代码来源:task_generator.py

示例2: _exact_p_value

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import comb [as 别名]
def _exact_p_value(self):
        r"""
        Computes the exact p-value of the McNemar test.

        Returns
        -------
        p_value : float
            The calculated exact p-value.

        Notes
        -----
        The one-sided exact p-value is defined as the following:

        .. math::

            p_{exact} = \sum^n_{i=n_{12}} \binom{n}{i} \frac{1}{2}^i (1 - \frac{1}{2})^{n - i}

        """
        i = self.table[0, 1]
        n = self.table[1, 0] + self.table[0, 1]
        i_n = np.arange(i + 1, n + 1)

        p_value = 1 - np.sum(comb(n, i_n) * 0.5 ** i_n * (1 - 0.5) ** (n - i_n))

        return p_value * 2 
开发者ID:aschleg,项目名称:hypothetical,代码行数:27,代码来源:contingency.py

示例3: NumRegressors

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import comb [as 别名]
def NumRegressors(npix, pld_order, cross_terms=True):
    '''
    Return the number of regressors for `npix` pixels
    and PLD order `pld_order`.

    :param bool cross_terms: Include pixel cross-terms? Default :py:obj:`True`

    '''

    res = 0
    for k in range(1, pld_order + 1):
        if cross_terms:
            res += comb(npix + k - 1, k)
        else:
            res += npix
    return int(res) 
开发者ID:rodluger,项目名称:everest,代码行数:18,代码来源:mathutils.py

示例4: _construct_collection

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import comb [as 别名]
def _construct_collection(
        orders,
        dist,
        x_lookup,
        w_lookup,
):
    """Create a collection of {abscissa: weight} key-value pairs."""
    order = numpy.min(orders)
    skew = orders-order

    # Indices and coefficients used in the calculations
    indices = numpoly.glexindex(
        order-len(dist)+1, order+1, dimensions=len(dist))
    coeffs = numpy.sum(indices, -1)
    coeffs = (2*((order-coeffs+1) % 2)-1)*comb(len(dist)-1, order-coeffs)

    collection = defaultdict(float)
    for bidx, coeff in zip(indices+skew, coeffs.tolist()):
        abscissas = [value[idx] for idx, value in zip(bidx, x_lookup)]
        weights = [value[idx] for idx, value in zip(bidx, w_lookup)]
        for abscissa, weight in zip(product(*abscissas), product(*weights)):
            collection[abscissa] += numpy.prod(weight)*coeff

    return collection 
开发者ID:jonathf,项目名称:chaospy,代码行数:26,代码来源:sparse_grid.py

示例5: _nCr

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import comb [as 别名]
def _nCr(n, r):
    """Number of combinations of r items out of a set of n.  Equals n!/(r!(n-r)!)"""
    #f = _math.factorial; return f(n) / f(r) / f(n-r)
    return _spspecial.comb(n, r) 
开发者ID:pyGSTio,项目名称:pyGSTi,代码行数:6,代码来源:fiducialpairreduction.py

示例6: lp2bp

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import comb [as 别名]
def lp2bp(b, a, wo=1.0, bw=1.0):
    """
    Transform a lowpass filter prototype to a bandpass filter.

    Return an analog band-pass filter with center frequency `wo` and
    bandwidth `bw` from an analog low-pass filter prototype with unity
    cutoff frequency, in transfer function ('ba') representation.

    """
    a, b = map(atleast_1d, (a, b))
    D = len(a) - 1
    N = len(b) - 1
    artype = mintypecode((a, b))
    ma = max([N, D])
    Np = N + ma
    Dp = D + ma
    bprime = numpy.zeros(Np + 1, artype)
    aprime = numpy.zeros(Dp + 1, artype)
    wosq = wo * wo
    for j in range(Np + 1):
        val = 0.0
        for i in range(0, N + 1):
            for k in range(0, i + 1):
                if ma - i + 2 * k == j:
                    val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i
        bprime[Np - j] = val
    for j in range(Dp + 1):
        val = 0.0
        for i in range(0, D + 1):
            for k in range(0, i + 1):
                if ma - i + 2 * k == j:
                    val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i
        aprime[Dp - j] = val

    return normalize(bprime, aprime) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:37,代码来源:filter_design.py

示例7: lp2bs

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import comb [as 别名]
def lp2bs(b, a, wo=1.0, bw=1.0):
    """
    Transform a lowpass filter prototype to a bandstop filter.

    Return an analog band-stop filter with center frequency `wo` and
    bandwidth `bw` from an analog low-pass filter prototype with unity
    cutoff frequency, in transfer function ('ba') representation.

    """
    a, b = map(atleast_1d, (a, b))
    D = len(a) - 1
    N = len(b) - 1
    artype = mintypecode((a, b))
    M = max([N, D])
    Np = M + M
    Dp = M + M
    bprime = numpy.zeros(Np + 1, artype)
    aprime = numpy.zeros(Dp + 1, artype)
    wosq = wo * wo
    for j in range(Np + 1):
        val = 0.0
        for i in range(0, N + 1):
            for k in range(0, M - i + 1):
                if i + 2 * k == j:
                    val += (comb(M - i, k) * b[N - i] *
                            (wosq) ** (M - i - k) * bw ** i)
        bprime[Np - j] = val
    for j in range(Dp + 1):
        val = 0.0
        for i in range(0, D + 1):
            for k in range(0, M - i + 1):
                if i + 2 * k == j:
                    val += (comb(M - i, k) * a[D - i] *
                            (wosq) ** (M - i - k) * bw ** i)
        aprime[Dp - j] = val

    return normalize(bprime, aprime) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:39,代码来源:filter_design.py

示例8: bilinear

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import comb [as 别名]
def bilinear(b, a, fs=1.0):
    """Return a digital filter from an analog one using a bilinear transform.

    The bilinear transform substitutes ``(z-1) / (z+1)`` for ``s``.
    """
    fs = float(fs)
    a, b = map(atleast_1d, (a, b))
    D = len(a) - 1
    N = len(b) - 1
    artype = float
    M = max([N, D])
    Np = M
    Dp = M
    bprime = numpy.zeros(Np + 1, artype)
    aprime = numpy.zeros(Dp + 1, artype)
    for j in range(Np + 1):
        val = 0.0
        for i in range(N + 1):
            for k in range(i + 1):
                for l in range(M - i + 1):
                    if k + l == j:
                        val += (comb(i, k) * comb(M - i, l) * b[N - i] *
                                pow(2 * fs, i) * (-1) ** k)
        bprime[j] = real(val)
    for j in range(Dp + 1):
        val = 0.0
        for i in range(D + 1):
            for k in range(i + 1):
                for l in range(M - i + 1):
                    if k + l == j:
                        val += (comb(i, k) * comb(M - i, l) * a[D - i] *
                                pow(2 * fs, i) * (-1) ** k)
        aprime[j] = real(val)

    return normalize(bprime, aprime) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:37,代码来源:filter_design.py

示例9: from_bernstein_basis

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import comb [as 别名]
def from_bernstein_basis(cls, bp, extrapolate=None):
        """
        Construct a piecewise polynomial in the power basis
        from a polynomial in Bernstein basis.

        Parameters
        ----------
        bp : BPoly
            A Bernstein basis polynomial, as created by BPoly
        extrapolate : bool or 'periodic', optional
            If bool, determines whether to extrapolate to out-of-bounds points
            based on first and last intervals, or to return NaNs.
            If 'periodic', periodic extrapolation is used. Default is True.
        """
        dx = np.diff(bp.x)
        k = bp.c.shape[0] - 1  # polynomial order

        rest = (None,)*(bp.c.ndim-2)

        c = np.zeros_like(bp.c)
        for a in range(k+1):
            factor = (-1)**a * comb(k, a) * bp.c[a]
            for s in range(a, k+1):
                val = comb(k-a, s-a) * (-1)**s
                c[k-s] += factor * val / dx[(slice(None),)+rest]**s

        if extrapolate is None:
            extrapolate = bp.extrapolate

        return cls.construct_fast(c, bp.x, extrapolate, bp.axis) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:32,代码来源:interpolate.py

示例10: from_power_basis

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import comb [as 别名]
def from_power_basis(cls, pp, extrapolate=None):
        """
        Construct a piecewise polynomial in Bernstein basis
        from a power basis polynomial.

        Parameters
        ----------
        pp : PPoly
            A piecewise polynomial in the power basis
        extrapolate : bool or 'periodic', optional
            If bool, determines whether to extrapolate to out-of-bounds points
            based on first and last intervals, or to return NaNs.
            If 'periodic', periodic extrapolation is used. Default is True.
        """
        dx = np.diff(pp.x)
        k = pp.c.shape[0] - 1   # polynomial order

        rest = (None,)*(pp.c.ndim-2)

        c = np.zeros_like(pp.c)
        for a in range(k+1):
            factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
            for j in range(k-a, k+1):
                c[j] += factor * comb(j, k-a)

        if extrapolate is None:
            extrapolate = pp.extrapolate

        return cls.construct_fast(c, pp.x, extrapolate, pp.axis) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:31,代码来源:interpolate.py

示例11: _munp

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import comb [as 别名]
def _munp(self, n, c):
        def __munp(n, c):
            val = 0.0
            k = np.arange(0, n + 1)
            for ki, cnk in zip(k, sc.comb(n, k)):
                val = val + cnk * (-1) ** ki / (1.0 - c * ki)
            return np.where(c * n < 1, val * (-1.0 / c) ** n, np.inf)
        return _lazywhere(c != 0, (c,),
                          lambda c: __munp(n, c),
                          sc.gamma(n + 1)) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:12,代码来源:_continuous_distns.py

示例12: _p_value

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import comb [as 别名]
def _p_value(self):
        r"""
        Calculates the p-value of the binomial test.

        Returns
        -------
        pval : float
            The computed p-value.

        """
        successes = np.arange(self.x + 1)

        pval = np.sum(comb(self.n, successes) * self.p ** successes * self.q ** (self.n - successes))

        if self.alternative in ('two-sided', 'greater'):
            other_tail = np.arange(self.x, self.n + 1)

            y = comb(self.n, self.x) * (self.p ** self.x) * self.q ** (self.n - self.x)

            p_othertail = comb(self.n, other_tail) * self.p ** other_tail * self.q ** (self.n - other_tail)
            p_othertail = np.sum(p_othertail[p_othertail <= y])

            if self.alternative == 'two-sided':
                pval = p_othertail * 2
                #pval = 1 - pval
            elif self.alternative == 'greater':
                pval = p_othertail

        return pval 
开发者ID:aschleg,项目名称:hypothetical,代码行数:31,代码来源:hypothesis.py

示例13: abund_log_prob

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import comb [as 别名]
def abund_log_prob(genotype, abundance, refrabund=None, mean=30.0, sd=8.0,
                   error=0.001):
    """Calculate probability of k-mer abundance conditioned on genotype.

    The `genotype` variable represents the number of assumed allele copies and
    is one of {0, 1, 2} (corresponding to genotypes {0/0, 0/1, and 1/1}). The
    `mean` and `sd` variables describe a normal distribution of observed
    abundances of k-mers with copy number 2. The `error` parameter is the
    sequencing error rate.

    For SNVs, there is a 1-to-1 correspondence of alternate allele k-mers to
    reference allele k-mers. We can therefore check the frequency of the
    reference allele in the reference genome and scale up the error rate if it
    is repetitive. There is no such mapping of alt allele k-mers to refr allele
    k-mers for indels, so we use a lower fixed error rate.
    """
    if genotype == 0:
        if not refrabund:  # INDEL mode
            refrabund = 1
            error *= 0.01
        scaledmean = mean * refrabund
        if abundance > scaledmean:
            abundance = scaledmean
        nCk = choose(scaledmean, abundance, exact=True)
        prob = (
            log(nCk)
            + (abundance * log(error))
            + ((scaledmean - abundance) * log(1.0 - error))
        )
        return prob
    elif genotype == 1:
        return scipy.stats.norm.logpdf(abundance, mean / 2, sd / 2)
    elif genotype == 2:
        return scipy.stats.norm.logpdf(abundance, mean, sd) 
开发者ID:kevlar-dev,项目名称:kevlar,代码行数:36,代码来源:simlike.py

示例14: test_jw_number_indices

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import comb [as 别名]
def test_jw_number_indices(self):
        n_qubits = numpy.random.randint(1, 12)
        n_particles = numpy.random.randint(n_qubits + 1)

        number_indices = jw_number_indices(n_particles, n_qubits)
        subspace_dimension = len(number_indices)

        self.assertEqual(subspace_dimension, comb(n_qubits, n_particles))

        for index in number_indices:
            binary_string = bin(index)[2:].zfill(n_qubits)
            n_ones = binary_string.count('1')
            self.assertEqual(n_ones, n_particles) 
开发者ID:quantumlib,项目名称:OpenFermion,代码行数:15,代码来源:_sparse_tools_test.py

示例15: hypergeom_lh

# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import comb [as 别名]
def hypergeom_lh(ho, ha, trial, n, g, N):
	"""
	Returns likelihood ratio for independently distributed hypergeometric random variables. 
	
	Parameters
	----------
	ho : float
	   null hypothesis
	ha : float
	   alternative hypothesis
	trial : float
	   number of good elements in recent sample 
	n : float or int
	   sample size
	g : float or int
	   number of good elements in sample 
	N : float or int
	   total population size 
	Returns
	-------
	float
	   likelihood ratio of model
	
	"""
	ho_G, ha_G = ho * (N / n), ha * (N / n)

	null_lh = (comb(ho_G, g) * comb(N - ho_G, n - g)) 
	alt_lh = (comb(ha_G, g) * comb(N - ha_G, n - g))

	return alt_lh / null_lh 
开发者ID:statlab,项目名称:permute,代码行数:32,代码来源:sprt.py


注:本文中的scipy.special.comb方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。