当前位置: 首页>>代码示例>>Python>>正文


Python stats.rankdata方法代码示例

本文整理汇总了Python中scipy.stats.rankdata方法的典型用法代码示例。如果您正苦于以下问题:Python stats.rankdata方法的具体用法?Python stats.rankdata怎么用?Python stats.rankdata使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在scipy.stats的用法示例。


在下文中一共展示了stats.rankdata方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: score_candidates

# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import rankdata [as 别名]
def score_candidates(reactants, candidate_list, xs):

	pred = model.predict(xs, batch_size = 20)[0]
	rank = ss.rankdata(pred)

	fname = raw_input('Enter file name to save to: ') + '.dat'
	with open(os.path.join(FROOT, fname), 'w') as fid:
		fid.write('FOR REACTANTS {}\n'.format(Chem.MolToSmiles(reactants)))
		fid.write('Candidate product\tCandidate edit\tProbability\tRank\n')
		for (c, candidate) in enumerate(candidate_list):
			candidate_smile = candidate[0]
			candidate_edit = candidate[1]
			fid.write('{}\t{}\t{}\t{}\n'.format(
				candidate_smile, candidate_edit, pred[c], 1 + len(pred) - rank[c]
			))
	print('Wrote to file {}'.format(os.path.join(FROOT, fname))) 
开发者ID:connorcoley,项目名称:ochem_predict_nn,代码行数:18,代码来源:lowe_interactive_predict.py

示例2: _get_scaler_function

# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import rankdata [as 别名]
def _get_scaler_function(scaler_algo):
        scaler = None
        if scaler_algo == 'normcdf':
            scaler = lambda x: norm.cdf(x, x.mean(), x.std())
        elif scaler_algo == 'lognormcdf':
            scaler = lambda x: norm.cdf(np.log(x), np.log(x).mean(), np.log(x).std())
        elif scaler_algo == 'percentile':
            scaler = lambda x: rankdata(x).astype(np.float64) / len(x)
        elif scaler_algo == 'percentiledense':
            scaler = lambda x: rankdata(x, method='dense').astype(np.float64) / len(x)
        elif scaler_algo == 'ecdf':
            from statsmodels.distributions import ECDF
            scaler = lambda x: ECDF(x)
        elif scaler_algo == 'none':
            scaler = lambda x: x
        else:
            raise InvalidScalerException("Invalid scaler alogrithm.  Must be either percentile or normcdf.")
        return scaler 
开发者ID:JasonKessler,项目名称:scattertext,代码行数:20,代码来源:ScaledFScore.py

示例3: runbasic_old

# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import rankdata [as 别名]
def runbasic_old(self, useranks=False):
        #check: refactoring screwed up case useranks=True

        #groupxsum = np.bincount(intlab, weights=X[:,0])
        #groupxmean = groupxsum * 1.0 / groupnobs
        x = self.x
        if useranks:
            self.xx = x[:,1].argsort().argsort() + 1  #rankraw
        else:
            self.xx = x[:,0]
        self.groupsum = groupranksum = np.bincount(self.intlab, weights=self.xx)
        #print('groupranksum', groupranksum, groupranksum.shape, self.groupnobs.shape
        # start at 1 for stats.rankdata :
        self.groupmean = grouprankmean = groupranksum * 1.0 / self.groupnobs # + 1
        self.groupmeanfilter = grouprankmean[self.intlab]
        #return grouprankmean[intlab] 
开发者ID:birforce,项目名称:vnpy_crypto,代码行数:18,代码来源:multicomp.py

示例4: runbasic

# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import rankdata [as 别名]
def runbasic(self, useranks=False):
        #check: refactoring screwed up case useranks=True

        #groupxsum = np.bincount(intlab, weights=X[:,0])
        #groupxmean = groupxsum * 1.0 / groupnobs
        x = self.x
        if useranks:
            xuni, xintlab = np.unique(x[:,0], return_inverse=True)
            ranksraw = x[:,0].argsort().argsort() + 1  #rankraw
            self.xx = GroupsStats(np.column_stack([ranksraw, xintlab]),
                                  useranks=False).groupmeanfilter
        else:
            self.xx = x[:,0]
        self.groupsum = groupranksum = np.bincount(self.intlab, weights=self.xx)
        #print('groupranksum', groupranksum, groupranksum.shape, self.groupnobs.shape
        # start at 1 for stats.rankdata :
        self.groupmean = grouprankmean = groupranksum * 1.0 / self.groupnobs # + 1
        self.groupmeanfilter = grouprankmean[self.intlab]
        #return grouprankmean[intlab] 
开发者ID:birforce,项目名称:vnpy_crypto,代码行数:21,代码来源:multicomp.py

示例5: rankdata

# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import rankdata [as 别名]
def rankdata(x):
    '''rankdata, equivalent to scipy.stats.rankdata

    just a different implementation, I have not yet compared speed

    '''
    uni, intlab = np.unique(x[:,0], return_inverse=True)
    groupnobs = np.bincount(intlab)
    groupxsum = np.bincount(intlab, weights=X[:,0])
    groupxmean = groupxsum * 1.0 / groupnobs

    rankraw = x[:,0].argsort().argsort()
    groupranksum = np.bincount(intlab, weights=rankraw)
    # start at 1 for stats.rankdata :
    grouprankmean = groupranksum * 1.0 / groupnobs + 1
    return grouprankmean[intlab]


#new 
开发者ID:birforce,项目名称:vnpy_crypto,代码行数:21,代码来源:multicomp.py

示例6: test_trimmed2

# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import rankdata [as 别名]
def test_trimmed2(self):
        x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
        y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
        # Use center='trimmed'
        Xsq1, pval1 = stats.fligner(x, y, center='trimmed', proportiontocut=0.125)
        # Trim the data here, and use center='mean'
        Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean')
        # Result should be the same.
        assert_almost_equal(Xsq1, Xsq2)
        assert_almost_equal(pval1, pval2)

    # The following test looks reasonable at first, but fligner() uses the
    # function stats.rankdata(), and in one of the cases in this test,
    # there are ties, while in the other (because of normal rounding
    # errors) there are not.  This difference leads to differences in the
    # third significant digit of W.
    #
    #def test_equal_mean_median(self):
    #    x = np.linspace(-1,1,21)
    #    y = x**3
    #    W1, pval1 = stats.fligner(x, y, center='mean')
    #    W2, pval2 = stats.fligner(x, y, center='median')
    #    assert_almost_equal(W1, W2)
    #    assert_almost_equal(pval1, pval2) 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:26,代码来源:test_morestats.py

示例7: fusion

# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import rankdata [as 别名]
def fusion(*args):
    from scipy.stats import rankdata
    from sklearn.preprocessing import minmax_scale

    max_rk = [None] * len(args)
    masks = [None] * len(args)
    for j, a in enumerate(args):
        m = masks[j] = a != 0
        a[m] = rankdata(a[m])
        max_rk[j] = a[m].max()

    max_rk = min(max_rk)
    for j, a in enumerate(args):
        m = masks[j]
        a[m] = minmax_scale(a[m], feature_range=(1, max_rk))

    return np.hstack(args)


# fuse the matrices 
开发者ID:MICA-MNI,项目名称:BrainSpace,代码行数:22,代码来源:plot_tutorial2.py

示例8: _build_kernel

# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import rankdata [as 别名]
def _build_kernel(x, kernel, gamma=None):

    if kernel in {'pearson', 'spearman'}:
        if kernel == 'spearman':
            x = np.apply_along_axis(rankdata, 1, x)
        return np.corrcoef(x)

    if kernel in {'cosine', 'normalized_angle'}:
        x = 1 - squareform(pdist(x, metric='cosine'))
        if kernel == 'normalized_angle':
            x = 1 - np.arccos(x, x)/np.pi
        return x

    if kernel == 'gaussian':
        if gamma is None:
            gamma = 1 / x.shape[1]
        return rbf_kernel(x, gamma=gamma)

    if callable(kernel):
        return kernel(x)

    raise ValueError("Unknown kernel '{0}'.".format(kernel)) 
开发者ID:MICA-MNI,项目名称:BrainSpace,代码行数:24,代码来源:kernels.py

示例9: score_samples

# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import rankdata [as 别名]
def score_samples(y_true, y_score):
    scores = []

    y_true = csr_matrix(y_true)
    y_score = -y_score

    n_samples, n_labels = y_true.shape

    for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
        relevant = y_true.indices[start:stop]

        if (relevant.size == 0 or relevant.size == n_labels):
            # If all labels are relevant or unrelevant, the score is also
            # equal to 1. The label ranking has no meaning.
            aux = 1.
        else:
            scores_i = y_score[i]
            rank = rankdata(scores_i, 'max')[relevant]
            L = rankdata(scores_i[relevant], 'max')
            aux = (L / rank).mean()

        scores.append(aux)

    return np.array(scores) 
开发者ID:ex4sperans,项目名称:freesound-classification,代码行数:26,代码来源:relabel_noisy_data.py

示例10: check_decision_proba_consistency

# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import rankdata [as 别名]
def check_decision_proba_consistency(name, estimator_orig):
    # Check whether an estimator having both decision_function and
    # predict_proba methods has outputs with perfect rank correlation.

    centers = [(2, 2), (4, 4)]
    X, y = make_blobs(n_samples=100, random_state=0, n_features=4,
                      centers=centers, cluster_std=1.0, shuffle=True)
    X_test = np.random.randn(20, 2) + 4
    estimator = clone(estimator_orig)

    if (hasattr(estimator, "decision_function") and
            hasattr(estimator, "predict_proba")):

        estimator.fit(X, y)
        a = estimator.predict_proba(X_test)[:, 1]
        b = estimator.decision_function(X_test)
        assert_array_equal(rankdata(a), rankdata(b)) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:19,代码来源:estimator_checks.py

示例11: test_ranking

# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import rankdata [as 别名]
def test_ranking(self):
        x = ma.array([0,1,1,1,2,3,4,5,5,6,])
        assert_almost_equal(mstats.rankdata(x),
                           [1,3,3,3,5,6,7,8.5,8.5,10])
        x[[3,4]] = masked
        assert_almost_equal(mstats.rankdata(x),
                           [1,2.5,2.5,0,0,4,5,6.5,6.5,8])
        assert_almost_equal(mstats.rankdata(x, use_missing=True),
                            [1,2.5,2.5,4.5,4.5,4,5,6.5,6.5,8])
        x = ma.array([0,1,5,1,2,4,3,5,1,6,])
        assert_almost_equal(mstats.rankdata(x),
                           [1,3,8.5,3,5,7,6,8.5,3,10])
        x = ma.array([[0,1,1,1,2], [3,4,5,5,6,]])
        assert_almost_equal(mstats.rankdata(x),
                            [[1,3,3,3,5], [6,7,8.5,8.5,10]])
        assert_almost_equal(mstats.rankdata(x, axis=1),
                           [[1,3,3,3,5], [1,2,3.5,3.5,5]])
        assert_almost_equal(mstats.rankdata(x,axis=0),
                           [[1,1,1,1,1], [2,2,2,2,2,]]) 
开发者ID:Relph1119,项目名称:GraphicDesignPatternByPython,代码行数:21,代码来源:test_mstats_basic.py

示例12: test_scipy_compat

# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import rankdata [as 别名]
def test_scipy_compat(self):
        from scipy.stats import rankdata

        def _check(arr):
            mask = ~np.isfinite(arr)
            arr = arr.copy()
            result = libalgos.rank_1d_float64(arr)
            arr[mask] = np.inf
            exp = rankdata(arr)
            exp[mask] = nan
            assert_almost_equal(result, exp)

        _check(np.array([nan, nan, 5., 5., 5., nan, 1, 2, 3, nan]))
        _check(np.array([4., nan, 5., 5., 5., nan, 1, 2, 4., nan])) 
开发者ID:Frank-qlu,项目名称:recruit,代码行数:16,代码来源:test_algos.py

示例13: _get_scaler_function

# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import rankdata [as 别名]
def _get_scaler_function(self, scaler_algo):
        scaler = None
        if scaler_algo == 'percentile':
            scaler = lambda x: rankdata(x).astype(np.float64) / len(x)
        elif scaler_algo == 'normcdf':
            # scaler = lambda x: ECDF(x[cat_word_counts != 0])(x)
            scaler = lambda x: norm.cdf(x, x.mean(), x.std())
        elif scaler_algo == 'none':
            scaler = lambda x: x
        else:
            raise InvalidScalerException("Invalid scaler alogrithm.  Must be either percentile or normcdf.")
        return scaler 
开发者ID:JasonKessler,项目名称:scattertext,代码行数:14,代码来源:TermDocMatrix.py

示例14: _get_percentiles_from_freqs

# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import rankdata [as 别名]
def _get_percentiles_from_freqs(self, freqs):
        return rankdata(freqs) / len(freqs) 
开发者ID:JasonKessler,项目名称:scattertext,代码行数:4,代码来源:TermDocMatrix.py

示例15: scale_neg_1_to_1_with_zero_mean_rank_abs_max

# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import rankdata [as 别名]
def scale_neg_1_to_1_with_zero_mean_rank_abs_max(v):
	rankv = v * 2 - 1
	pos_v = rankv[rankv > 0]
	pos_v = rankdata(pos_v, 'dense')
	pos_v = pos_v / pos_v.max()
	neg_v = rankv[rankv < 0]
	neg_v = rankdata(neg_v, 'dense')
	neg_v = neg_v / neg_v.max()
	rankv[rankv > 0] = pos_v
	rankv[rankv < 0] = - (neg_v.max() - neg_v)

	return scale_neg_1_to_1_with_zero_mean_abs_max(rankv) 
开发者ID:JasonKessler,项目名称:scattertext,代码行数:14,代码来源:Scalers.py


注:本文中的scipy.stats.rankdata方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。