本文整理汇总了Python中numpy.std方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.std方法的具体用法?Python numpy.std怎么用?Python numpy.std使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类numpy
的用法示例。
在下文中一共展示了numpy.std方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import std [as 别名]
def __init__(self, bam, keepReads=False):
self.insertSizes = []
self.readLengths = []
self.orientations = []
self._insertSizeKDE = None
self.singleEnded = False
self._insertSizeScores = {} # cache
try:
self.insertSizes, self.reads, self.orientations, self.readLengths = sampleInsertSizes(bam, keepReads=keepReads)
if len(self.insertSizes) > 1:
logging.info(" insert size mean: {:.2f} std: {:.2f}".format(numpy.mean(self.insertSizes), numpy.std(self.insertSizes)))
except ValueError as e:
print("*"*100, "here")
print("ERROR:", e)
示例2: validate_on_lfw
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import std [as 别名]
def validate_on_lfw(model, lfw_160_path):
# Read the file containing the pairs used for testing
pairs = lfw.read_pairs('validation-LFW-pairs.txt')
# Get the paths for the corresponding images
paths, actual_issame = lfw.get_paths(lfw_160_path, pairs)
num_pairs = len(actual_issame)
all_embeddings = np.zeros((num_pairs * 2, 512), dtype='float32')
for k in tqdm.trange(num_pairs):
img1 = cv2.imread(paths[k * 2], cv2.IMREAD_COLOR)[:, :, ::-1]
img2 = cv2.imread(paths[k * 2 + 1], cv2.IMREAD_COLOR)[:, :, ::-1]
batch = np.stack([img1, img2], axis=0)
embeddings = model.eval_embeddings(batch)
all_embeddings[k * 2: k * 2 + 2, :] = embeddings
tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(
all_embeddings, actual_issame, distance_metric=1, subtract_mean=True)
print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
auc = metrics.auc(fpr, tpr)
print('Area Under Curve (AUC): %1.3f' % auc)
eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)
print('Equal Error Rate (EER): %1.3f' % eer)
示例3: get_graph_stats
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import std [as 别名]
def get_graph_stats(graph_obj_handle, prop='degrees'):
# if prop == 'degrees':
num_cores = multiprocessing.cpu_count()
inputs = [int(i*len(graph_obj_handle)/num_cores) for i in range(num_cores)] + [len(graph_obj_handle)]
res = Parallel(n_jobs=num_cores)(delayed(get_values)(graph_obj_handle, inputs[i], inputs[i+1], prop) for i in range(num_cores))
stat_dict = {}
if 'degrees' in prop:
stat_dict['degrees'] = list(set([d for core_res in res for file_res in core_res for d in file_res['degrees']]))
if 'edge_labels' in prop:
stat_dict['edge_labels'] = list(set([d for core_res in res for file_res in core_res for d in file_res['edge_labels']]))
if 'target_mean' in prop or 'target_std' in prop:
param = np.array([file_res['params'] for core_res in res for file_res in core_res])
if 'target_mean' in prop:
stat_dict['target_mean'] = np.mean(param, axis=0)
if 'target_std' in prop:
stat_dict['target_std'] = np.std(param, axis=0)
return stat_dict
示例4: upper_bollinger_band
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import std [as 别名]
def upper_bollinger_band(data, period, std_mult=2.0):
"""
Upper Bollinger Band.
Formula:
u_bb = SMA(t) + STD(SMA(t-n:t)) * std_mult
"""
check_for_period_error(data, period)
period = int(period)
simple_ma = sma(data, period)[period-1:]
upper_bb = []
for idx in range(len(data) - period + 1):
std_dev = np.std(data[idx:idx + period])
upper_bb.append(simple_ma[idx] + std_dev * std_mult)
upper_bb = fill_for_noncomputable_vals(data, upper_bb)
return np.array(upper_bb)
示例5: lower_bollinger_band
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import std [as 别名]
def lower_bollinger_band(data, period, std=2.0):
"""
Lower Bollinger Band.
Formula:
u_bb = SMA(t) - STD(SMA(t-n:t)) * std_mult
"""
check_for_period_error(data, period)
period = int(period)
simple_ma = sma(data, period)[period-1:]
lower_bb = []
for idx in range(len(data) - period + 1):
std_dev = np.std(data[idx:idx + period])
lower_bb.append(simple_ma[idx] - std_dev * std)
lower_bb = fill_for_noncomputable_vals(data, lower_bb)
return np.array(lower_bb)
示例6: bandwidth
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import std [as 别名]
def bandwidth(data, period, std=2.0):
"""
Bandwidth.
Formula:
bw = u_bb - l_bb / m_bb
"""
check_for_period_error(data, period)
period = int(period)
bandwidth = ((upper_bollinger_band(data, period, std) -
lower_bollinger_band(data, period, std)) /
middle_bollinger_band(data, period, std)
)
return bandwidth
示例7: standard_deviation
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import std [as 别名]
def standard_deviation(data, period):
"""
Standard Deviation.
Formula:
std = sqrt(avg(abs(x - avg(x))^2))
"""
check_for_period_error(data, period)
stds = list(map(
lambda idx:
np.std(data[idx+1-period:idx+1], ddof=1),
range(period-1, len(data))
))
stds = fill_for_noncomputable_vals(data, stds)
return stds
示例8: log
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import std [as 别名]
def log(self):
end_idxs = np.nonzero(self._dones)[0] + 1
returns = []
start_idx = 0
for end_idx in end_idxs:
rewards = self._rewards[start_idx:end_idx]
returns.append(np.sum(rewards))
start_idx = end_idx
logger.record_tabular('ReturnAvg', np.mean(returns))
logger.record_tabular('ReturnStd', np.std(returns))
logger.record_tabular('ReturnMin', np.min(returns))
logger.record_tabular('ReturnMax', np.max(returns))
##################
### Tensorflow ###
##################
示例9: update_critic
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import std [as 别名]
def update_critic(self, ob_no, hidden, q_n):
"""
given:
self.num_value_iters
self.l2_reg
arguments:
ob_no: (minibsize, history, meta_obs_dim)
hidden: (minibsize, self.gru_size)
q_n: (minibsize)
requires:
self.num_value_iters
"""
target_n = (q_n - np.mean(q_n))/(np.std(q_n)+1e-8)
for k in range(self.num_value_iters):
critic_loss, _ = self.sess.run(
[self.critic_loss, self.critic_update_op],
feed_dict={self.sy_target_n: target_n, self.sy_ob_no: ob_no, self.sy_hidden: hidden})
return critic_loss
示例10: get
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import std [as 别名]
def get(self):
tpr, fpr, accuracy, threshold = calculate_roc(
self.thresholds, np.asarray(
self.dists), np.asarray(
self.issame), self.nfolds)
val, val_std, far = calculate_val(
self.thresholds, np.asarray(
self.dists), np.asarray(
self.issame), self.far_target, self.nfolds)
acc, acc_std = np.mean(accuracy), np.std(accuracy)
threshold = (
1 - threshold) if self.dist_type == 'cosine' else threshold
return tpr, fpr, acc, threshold, val, val_std, far, acc_std
# code below is modified from project <Facenet (David Sandberg)> and
# <Gluon-Face>
示例11: extractMeanDataStats
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import std [as 别名]
def extractMeanDataStats(size = [200, 200, 100],
postfix = '_200x200x100orig',
main_folder_path = '../../Data/MS2017b/',
):
scan_folders = glob.glob(main_folder_path + 'scans/*')
img_path = 'pre/FLAIR' + postfix + '.nii.gz'
segm_path = 'wmh' + postfix + '.nii.gz'
shape_ = [len(scan_folders), size[0], size[1], size[2]]
arr = np.zeros(shape_)
for i, sf in enumerate(scan_folders):
arr[i, :,:,:] = numpyFromScan(os.path.join(sf,img_path)).squeeze()
arr /= len(scan_folders)
means = np.mean(arr)
stds = np.std(arr, axis = 0)
np.save(main_folder_path + 'extra_data/std' + postfix, stds)
np.save(main_folder_path + 'extra_data/mean' + postfix, means)
示例12: reverse_generator
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import std [as 别名]
def reverse_generator(generator, X_sample, y_sample, title):
"""Gradient descent to map images back to their latent vectors."""
latent_vec = np.random.normal(size=(1, 100))
# Function for figuring out how to bump the input.
target = K.placeholder()
loss = K.sum(K.square(generator.outputs[0] - target))
grad = K.gradients(loss, generator.inputs[0])[0]
update_fn = K.function(generator.inputs + [target], [grad])
# Repeatedly apply the update rule.
xs = []
for i in range(60):
print('%d: latent_vec mean=%f, std=%f'
% (i, np.mean(latent_vec), np.std(latent_vec)))
xs.append(generator.predict_on_batch([latent_vec, y_sample]))
for _ in range(10):
update_vec = update_fn([latent_vec, y_sample, X_sample])[0]
latent_vec -= update_vec * update_rate
# Plots the samples.
xs = np.concatenate(xs, axis=0)
plot_as_gif(xs, X_sample, title)
示例13: addVariantResults
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import std [as 别名]
def addVariantResults(self, dataHub):
variant = str(dataHub.variant)
for sampleName, sample in dataHub.samples.items():
counts = collections.Counter()
reasons = {}
alnScores = collections.defaultdict(list)
insertSizes = collections.defaultdict(list)
# collect stats
for alnCollection in sample.alnCollections:
allele = alnCollection.choice
counts[allele] += 1
if not allele in reasons:
reasons[allele] = collections.Counter()
reasons[allele][alnCollection.why] += 1
alnScores[allele].append(sum(aln.score for aln in alnCollection.chosenSet().getAlignments()))
insertSizes[allele].append(len(alnCollection.chosenSet()))
# record stats
for allele, count in counts.items():
self.stats.append([variant, sampleName, allele, "count", count])
for allele in reasons:
for reason in reasons[allele]:
self.stats.append([variant, sampleName, allele, "reason_{}".format(reason), reasons[allele][reason]])
for allele in alnScores:
self.stats.append([variant, sampleName, allele, "alnScore_mean", numpy.mean(alnScores[allele])])
self.stats.append([variant, sampleName, allele, "alnScore_std", numpy.std(alnScores[allele])])
for allele in insertSizes:
self.stats.append([variant, sampleName, allele, "insertSize_mean", numpy.mean(insertSizes[allele])])
self.stats.append([variant, sampleName, allele, "insertSize_std", numpy.std(insertSizes[allele])])
示例14: bias_variance_decomposition
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import std [as 别名]
def bias_variance_decomposition(self, graphs, targets,
cv=5, n_bootstraps=10):
"""bias_variance_decomposition."""
x = self.transform(graphs)
score_list = []
for i in range(n_bootstraps):
scores = cross_val_score(
self.model, x, targets, cv=cv)
score_list.append(scores)
score_list = np.array(score_list)
mean_scores = np.mean(score_list, axis=1)
std_scores = np.std(score_list, axis=1)
return mean_scores, std_scores
示例15: output_avg_and_std
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import std [as 别名]
def output_avg_and_std(iterable):
"""output_avg_and_std."""
print(('score: %.2f +-%.2f' % (np.mean(iterable), np.std(iterable))))
return iterable