本文整理汇总了Python中joblib.Parallel.std方法的典型用法代码示例。如果您正苦于以下问题:Python Parallel.std方法的具体用法?Python Parallel.std怎么用?Python Parallel.std使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类joblib.Parallel
的用法示例。
在下文中一共展示了Parallel.std方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: len
# 需要导入模块: from joblib import Parallel [as 别名]
# 或者: from joblib.Parallel import std [as 别名]
lytf += len(ytf)
lypf += len(ypf)
if lytf == 0:
print('Warning: No steps annotation for', ex.id)
ch_r, lytf = 0, 1
if lypf == 0:
ch_p, lypf = 0, 1
return [ch_r/lytf, ch_p/lypf]
del_PR = delayed(PR)
PRS = Parallel(n_jobs=-2)(del_PR(ex) for ex in range(N))
PRS = np.array([prs for prs in PRS if prs is not None])
t_batch = time()-t_start
score = list(PRS.mean(axis=0))
score += list(PRS.std(axis=0))
res['results'] += [score]
res['patterns'] += [patterns]
results += [res]
t_batch = time() - t_start
print('-'*79)
print('Batch : {:03}/{:03}'.format(simu, n_batch))
print('Time batch : {:.2f}s'.format(t_batch))
print('Train: {}, Test: {}'.format(c_train[0], c_test[0]))
print('Score: {0:.2f}({2:.2f}), {1:.2f} ({3:.2f})'
''.format(*(np.mean(res['results'], axis=0))))
print('-'*79+'\n\n')
np.save('save_simu/cross_class_IEEE.npy', results)
示例2: bootstrap_rdm
# 需要导入模块: from joblib import Parallel [as 别名]
# 或者: from joblib.Parallel import std [as 别名]
#.........这里部分代码省略.........
if perm_idx_list is None:
# with perm_idx_list, it's your own adventure.
assert bootstrap_subject or bootstrap_condition, 'you must do bootstrap on something, unless you have idx list'
# let's create reshaped rdms.
ref_rdms_square = []
model_rdms_square = []
for ref_rdm in ref_rdms:
assert ref_rdm.ndim == 1
ref_rdms_square.append(squareform(ref_rdm))
for model_rdm in model_rdms: # here this model_rdms can be any iterable returing a 1d model rdm every time.
assert model_rdm.ndim == 1
model_rdms_square.append(squareform(model_rdm))
ref_rdms_square = np.asarray(ref_rdms_square)
model_rdms_square = np.asarray(model_rdms_square)
assert ref_rdms_square.ndim == 3 and model_rdms_square.ndim == 3
n_ref_rdm = ref_rdms_square.shape[0]
n_model_rdm = model_rdms_square.shape[0]
if legacy:
assert not one_side, "legacy p-value computation only supports two side computation"
if similarity_ref is None: # for legacy, you can specify it to None
similarity_ref = np.zeros((n_model_rdm,))
assert similarity_ref.shape == (n_model_rdm,)
rdm_h, rdm_w = ref_rdms_square.shape[1:]
assert (rdm_h, rdm_w) == model_rdms_square.shape[1:] and rdm_h == rdm_w
if perm_idx_list is None:
rng_state_subject = np.random.RandomState(rng_state_subject_seed)
rng_state_condition = np.random.RandomState(rng_state_condition_seed)
if bootstrap_subject:
perm_idx_list_subject_generator = (rng_state_subject.randint(n_ref_rdm, size=(n_ref_rdm,)) for _ in
range(n))
else:
perm_idx_list_subject_generator = (np.arange(n_ref_rdm) for _ in range(n))
if bootstrap_condition:
perm_idx_list_condition_generator = (rng_state_condition.randint(rdm_h, size=(rdm_h,)) for _ in range(n))
else:
perm_idx_list_condition_generator = (np.arange(rdm_h) for _ in range(n))
# subject then condition.
perm_idx_list = izip(perm_idx_list_subject_generator, perm_idx_list_condition_generator)
if debug:
perm_idx_list = list(perm_idx_list)
bootstrap_rdm_helper_partial = partial(bootstrap_rdm_helper,
n_model_rdm=n_model_rdm,
similarity_type=similarity_type,
computation_method=computation_method)
# then collect all.
if parallel:
similarity_all_bootstrap = Parallel(n_jobs=n_jobs, verbose=verbose, max_nbytes=max_nbytes)(
delayed(bootstrap_rdm_helper_partial)(perm_idx, ref_rdms_square, model_rdms_square) for perm_idx in
perm_idx_list)
else:
similarity_all_bootstrap = [bootstrap_rdm_helper_partial(perm_idx, ref_rdms_square, model_rdms_square) for
perm_idx in perm_idx_list]
similarity_all_bootstrap = np.asarray(similarity_all_bootstrap).T
assert similarity_all_bootstrap.shape == (n_model_rdm, n)
# then let's do the statistical analysis.
# use ddof to be 1 to be more correct, since we are now doing statistical analysis.
error_bars = similarity_all_bootstrap.std(axis=1, ddof=1)
assert error_bars.shape == (n_model_rdm,)
pairwise_p_matrix = np.empty((n_model_rdm, n_model_rdm))
p_matrix_it = np.nditer(pairwise_p_matrix, flags=['multi_index'], op_flags=[['writeonly']])
similarity_ref_diff = similarity_ref[:, np.newaxis] - similarity_ref[np.newaxis, :]
assert similarity_ref_diff.shape == (n_model_rdm, n_model_rdm)
while not p_matrix_it.finished:
i_row, j_col = p_matrix_it.multi_index
if i_row == j_col:
p_matrix_it[0] = np.nan
else:
# get the differences.
similarity_diff = similarity_ref_diff[i_row, j_col]
similarity_diff_bootstrap = similarity_all_bootstrap[i_row] - similarity_all_bootstrap[j_col]
similarity_diff_bootstrap_normed = similarity_diff_bootstrap - similarity_diff_bootstrap.mean()
if one_side:
p_matrix_it[0] = np.mean(similarity_diff_bootstrap_normed > similarity_diff)
else:
if legacy:
p_matrix_it[0] = 2 * min(np.mean(similarity_diff_bootstrap < 0),
np.mean(similarity_diff_bootstrap > 0))
else:
p_matrix_it[0] = np.mean(abs(similarity_diff_bootstrap_normed) > abs(similarity_diff))
p_matrix_it.iternext()
return_result = {'bootstrap_similarity': similarity_all_bootstrap,
'bootstrap_std': error_bars,
'pairwise_p_matrix': pairwise_p_matrix}
if debug:
return_result['perm_idx_list'] = perm_idx_list
return return_result