本文整理汇总了Python中scipy.stats.mstats.gmean方法的典型用法代码示例。如果您正苦于以下问题:Python mstats.gmean方法的具体用法?Python mstats.gmean怎么用?Python mstats.gmean使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.stats.mstats
的用法示例。
在下文中一共展示了mstats.gmean方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: experiment_pred
# 需要导入模块: from scipy.stats import mstats [as 别名]
# 或者: from scipy.stats.mstats import gmean [as 别名]
def experiment_pred(experiment_dir, images_lst):
print(f"Start predict: {experiment_dir}")
transforms = get_transforms(False, CROP_SIZE)
pred_lst = []
for fold in config.folds:
print("Predict fold", fold)
fold_dir = experiment_dir / f'fold_{fold}'
model_path = get_best_model_path(fold_dir)
print("Model path", model_path)
predictor = Predictor(model_path, transforms,
BATCH_SIZE,
(config.audio.n_mels, CROP_SIZE),
(config.audio.n_mels, CROP_SIZE//TILE_STEP),
device=DEVICE)
pred = pred_test(predictor, images_lst)
pred_lst.append(pred)
preds = gmean(pred_lst, axis=0)
return preds
示例2: stacking_pred
# 需要导入模块: from scipy.stats import mstats [as 别名]
# 或者: from scipy.stats.mstats import gmean [as 别名]
def stacking_pred(experiment_dir, stack_probs):
print(f"Start predict: {experiment_dir}")
pred_lst = []
for fold in config.folds:
print("Predict fold", fold)
fold_dir = experiment_dir / f'fold_{fold}'
model_path = get_best_model_path(fold_dir)
print("Model path", model_path)
predictor = StackPredictor(model_path, STACK_BATCH_SIZE,
device=DEVICE)
pred = predictor.predict(stack_probs)
pred_lst.append(pred)
preds = gmean(pred_lst, axis=0)
return preds
示例3: test_1D
# 需要导入模块: from scipy.stats import mstats [as 别名]
# 或者: from scipy.stats.mstats import gmean [as 别名]
def test_1D(self):
a = (1,2,3,4)
actual = mstats.gmean(a)
desired = np.power(1*2*3*4,1./4.)
assert_almost_equal(actual, desired,decimal=14)
desired1 = mstats.gmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
assert_(not isinstance(desired1, ma.MaskedArray))
a = ma.array((1,2,3,4),mask=(0,0,0,1))
actual = mstats.gmean(a)
desired = np.power(1*2*3,1./3.)
assert_almost_equal(actual, desired,decimal=14)
desired1 = mstats.gmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
示例4: test_1D
# 需要导入模块: from scipy.stats import mstats [as 别名]
# 或者: from scipy.stats.mstats import gmean [as 别名]
def test_1D(self):
a = (1,2,3,4)
actual = mstats.gmean(a)
desired = np.power(1*2*3*4,1./4.)
assert_almost_equal(actual, desired, decimal=14)
desired1 = mstats.gmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
assert_(not isinstance(desired1, ma.MaskedArray))
a = ma.array((1,2,3,4),mask=(0,0,0,1))
actual = mstats.gmean(a)
desired = np.power(1*2*3,1./3.)
assert_almost_equal(actual, desired,decimal=14)
desired1 = mstats.gmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
示例5: calculate_all_nfs
# 需要导入模块: from scipy.stats import mstats [as 别名]
# 或者: from scipy.stats.mstats import gmean [as 别名]
def calculate_all_nfs(sample_frame, ranked_targets, ref_sample):
"""For a set of n ranked_genes, calculates normalization factors NF_1,
NF_2, ..., NF_n. NF_i represents the normalization factor generated by
considering the first i targets in ranked_targets.
calculate_nf (which returns only NF_n) is probably more
useful for routine analysis.
:param DataFrame sample_frame: A sample data frame.
:param iterable ranked_targets: A list or Series of target names, in order
of descending stability (ascending M).
:param string ref_sample: The name of the sample to normalize against.
:return: a DataFrame with columns 1, 2, ..., n containing normalization
factors NF_1, ..., NF_n for each sample, indexed by sample name.
:rtype: DataFrame
"""
# Returns a DataFrame, where rows represent samples and columns represent a number of reference genes.
grouped = sample_frame.groupby(['Target', 'Sample'])['Cq'].aggregate(average_cq)
samples = sample_frame['Sample'].unique()
nfs = {}
for i in xrange(1, len(ranked_targets)+1):
nfs[i] = gmean([pow(2, -grouped.ix[zip(repeat(ref_gene), samples)] + grouped.ix[ref_gene, ref_sample]) for ref_gene in ranked_targets[:i]])
return pd.DataFrame(nfs, index=samples)
示例6: gmean_preds_blend
# 需要导入模块: from scipy.stats import mstats [as 别名]
# 或者: from scipy.stats.mstats import gmean [as 别名]
def gmean_preds_blend(probs_df_lst):
blend_df = probs_df_lst[0]
blend_values = np.stack([df.loc[blend_df.index.values].values
for df in probs_df_lst], axis=0)
blend_values = gmean(blend_values, axis=0)
blend_df.values[:] = blend_values
return blend_df
示例7: test_2D
# 需要导入模块: from scipy.stats import mstats [as 别名]
# 或者: from scipy.stats.mstats import gmean [as 别名]
def test_2D(self):
a = ma.array(((1,2,3,4),(1,2,3,4),(1,2,3,4)),
mask=((0,0,0,0),(1,0,0,1),(0,1,1,0)))
actual = mstats.gmean(a)
desired = np.array((1,2,3,4))
assert_array_almost_equal(actual, desired, decimal=14)
#
desired1 = mstats.gmean(a,axis=0)
assert_array_almost_equal(actual, desired1, decimal=14)
#
actual = mstats.gmean(a, -1)
desired = ma.array((np.power(1*2*3*4,1./4.),
np.power(2*3,1./2.),
np.power(1*4,1./2.)))
assert_array_almost_equal(actual, desired, decimal=14)
示例8: main
# 需要导入模块: from scipy.stats import mstats [as 别名]
# 或者: from scipy.stats.mstats import gmean [as 别名]
def main():
sol = dict()
for method in ['dopri5', 'adams']:
for tol in [1e-3, 1e-6, 1e-9]:
print('======= {} | tol={:e} ======='.format(method, tol))
nfes = []
times = []
errs = []
for c in ['A', 'B', 'C', 'D', 'E']:
for i in ['1', '2', '3', '4', '5']:
diffeq, init, _ = getattr(detest, c + i)()
t0, y0 = init()
diffeq = NFEDiffEq(diffeq)
if not c + i in sol:
sol[c + i] = odeint(
diffeq, y0, torch.stack([t0, torch.tensor(20.)]), atol=1e-12, rtol=1e-12, method='dopri5'
)[1]
diffeq.nfe = 0
start_time = time.time()
est = odeint(diffeq, y0, torch.stack([t0, torch.tensor(20.)]), atol=tol, rtol=tol, method=method)
time_spent = time.time() - start_time
error = torch.sqrt(torch.mean((sol[c + i] - est[1])**2))
errs.append(error.item())
nfes.append(diffeq.nfe)
times.append(time_spent)
print('{}: NFE {} | Time {} | Err {:e}'.format(c + i, diffeq.nfe, time_spent, error.item()))
print('Total NFE {} | Total Time {} | GeomAvg Error {:e}'.format(np.sum(nfes), np.sum(times), gmean(errs)))
示例9: test_1D_float96
# 需要导入模块: from scipy.stats import mstats [as 别名]
# 或者: from scipy.stats.mstats import gmean [as 别名]
def test_1D_float96(self):
a = ma.array((1,2,3,4), mask=(0,0,0,1))
actual_dt = mstats.gmean(a, dtype=np.float96)
desired_dt = np.power(1 * 2 * 3, 1. / 3.).astype(np.float96)
assert_almost_equal(actual_dt, desired_dt, decimal=14)
assert_(actual_dt.dtype == desired_dt.dtype)
示例10: test_2D
# 需要导入模块: from scipy.stats import mstats [as 别名]
# 或者: from scipy.stats.mstats import gmean [as 别名]
def test_2D(self):
a = ma.array(((1, 2, 3, 4), (1, 2, 3, 4), (1, 2, 3, 4)),
mask=((0, 0, 0, 0), (1, 0, 0, 1), (0, 1, 1, 0)))
actual = mstats.gmean(a)
desired = np.array((1,2,3,4))
assert_array_almost_equal(actual, desired, decimal=14)
desired1 = mstats.gmean(a,axis=0)
assert_array_almost_equal(actual, desired1, decimal=14)
actual = mstats.gmean(a, -1)
desired = ma.array((np.power(1*2*3*4,1./4.),
np.power(2*3,1./2.),
np.power(1*4,1./2.)))
assert_array_almost_equal(actual, desired, decimal=14)
示例11: test_gmean
# 需要导入模块: from scipy.stats import mstats [as 别名]
# 或者: from scipy.stats.mstats import gmean [as 别名]
def test_gmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.gmean(abs(x))
rm = stats.mstats.gmean(abs(xm))
assert_allclose(r, rm, rtol=1e-13)
r = stats.gmean(abs(y))
rm = stats.mstats.gmean(abs(ym))
assert_allclose(r, rm, rtol=1e-13)
示例12: impute_missing_total_reads
# 需要导入模块: from scipy.stats import mstats [as 别名]
# 或者: from scipy.stats.mstats import gmean [as 别名]
def impute_missing_total_reads(total_reads, missing_variant_confidence):
# Change NaNs to masked values via SciPy.
masked_total_reads = ma.fix_invalid(total_reads)
# Going forward, suppose you have v variants and s samples in a v*s matrix of
# read counts. Missing values are masked.
# Calculate geometric mean of variant read depth in each sample. Result: s*1
sample_means = gmean(masked_total_reads, axis=0)
assert np.sum(sample_means <= 0) == np.sum(np.isnan(sample_means)) == 0
# Divide every variant's read count by its mean sample read depth to get read
# depth enrichment relative to other variants in sample. Result: v*s
normalized_to_sample = np.dot(masked_total_reads, np.diag(1./sample_means))
# For each variant, calculate geometric mean of its read depth enrichment
# across samples. Result: v*1
variant_mean_reads = gmean(normalized_to_sample, axis=1)
assert np.sum(variant_mean_reads <= 0) == np.sum(np.isnan(variant_mean_reads)) == 0
# Convert 1D arrays to vectors to permit matrix multiplication.
imputed_counts = np.dot(variant_mean_reads.reshape((-1, 1)), sample_means.reshape((1, -1)))
nan_coords = np.where(np.isnan(total_reads))
total_reads[nan_coords] = imputed_counts[nan_coords]
assert np.sum(total_reads <= 0) == np.sum(np.isnan(total_reads)) == 0
total_reads[nan_coords] *= missing_variant_confidence
return np.floor(total_reads).astype(np.int)
示例13: calculate_nf
# 需要导入模块: from scipy.stats import mstats [as 别名]
# 或者: from scipy.stats.mstats import gmean [as 别名]
def calculate_nf(sample_frame, ref_targets, ref_sample):
"""Calculates a normalization factor from the geometric mean of the
expression of all ref_targets, normalized to a reference sample.
:param DataFrame sample_frame: A sample data frame.
:param iterable ref_targets: A list or Series of target names.
:param string ref_sample: The name of the sample to normalize against.
:return: a Series indexed by sample name containing normalization factors
for each sample.
"""
grouped = sample_frame.groupby(['Target', 'Sample'])['Cq'].aggregate(average_cq)
samples = sample_frame['Sample'].unique()
nfs = gmean([pow(2, -grouped.ix[zip(repeat(ref_gene), samples)] + grouped.ix[ref_gene, ref_sample]) for ref_gene in ref_targets])
return pd.Series(nfs, index=samples)
示例14: benchmark
# 需要导入模块: from scipy.stats import mstats [as 别名]
# 或者: from scipy.stats.mstats import gmean [as 别名]
def benchmark():
n_atoms_range = [1, 3, 9]
n_channels_range = [1, 25, 50, 100, 200]
n_times_atom_range = [8, 32, 128]
n_runs = (len(n_atoms_range) * len(n_channels_range) * len(
n_times_atom_range) * len(all_func))
k = 0
results = []
for n_atoms in n_atoms_range:
for n_channels in n_channels_range:
for n_times_atom in n_times_atom_range:
for func in all_func:
print('%d/%d, %s' % (k, n_runs, func.__name__))
k += 1
results.append(
run_one(n_atoms, n_channels, n_times_atom, func))
df = pd.DataFrame(results, columns=[
'n_atoms', 'n_channels', 'n_times_atom', 'func', 'duration'
])
fig, axes = plt.subplots(2, 2, figsize=(10, 8))
axes = axes.ravel()
def plot(index, ax):
pivot = df.pivot_table(columns='func', index=index, values='duration',
aggfunc=gmean)
pivot.plot(ax=ax)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_ylabel('duration')
plot('n_atoms', axes[0])
plot('n_times_atom', axes[1])
plot('n_channels', axes[2])
# plot('n_times_valid', axes[3])
plt.tight_layout()
plt.show()
示例15: benchmark
# 需要导入模块: from scipy.stats import mstats [as 别名]
# 或者: from scipy.stats.mstats import gmean [as 别名]
def benchmark():
n_atoms_range = [1, 2, 4, 8, 16]
n_channels_range = [10, 20, 40, 80, 160]
n_times_atom_range = [10, 20, 40, 80, 160]
n_runs = (len(n_atoms_range) * len(n_channels_range) *
len(n_times_atom_range) * len(all_func))
k = 0
results = []
for n_atoms in n_atoms_range:
for n_channels in n_channels_range:
for n_times_atom in n_times_atom_range:
for func in all_func:
print('%d/%d, %s' % (k, n_runs, func.__name__))
k += 1
results.append(
run_one(n_atoms, n_channels, n_times_atom, func))
df = pd.DataFrame(results, columns=[
'n_atoms', 'n_channels', 'n_times_atom', 'func', 'duration'
])
fig, axes = plt.subplots(2, 2, figsize=(10, 8))
axes = axes.ravel()
def plot(index, ax):
pivot = df.pivot_table(columns='func', index=index, values='duration',
aggfunc=gmean)
pivot.plot(ax=ax)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_ylabel('duration')
plot('n_atoms', axes[0])
plot('n_times_atom', axes[1])
plot('n_channels', axes[2])
plt.tight_layout()
plt.show()