本文整理汇总了Python中numpy.mean方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.mean方法的具体用法?Python numpy.mean怎么用?Python numpy.mean使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类numpy
的用法示例。
在下文中一共展示了numpy.mean方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import mean [as 别名]
def __init__(self, bam, keepReads=False):
self.insertSizes = []
self.readLengths = []
self.orientations = []
self._insertSizeKDE = None
self.singleEnded = False
self._insertSizeScores = {} # cache
try:
self.insertSizes, self.reads, self.orientations, self.readLengths = sampleInsertSizes(bam, keepReads=keepReads)
if len(self.insertSizes) > 1:
logging.info(" insert size mean: {:.2f} std: {:.2f}".format(numpy.mean(self.insertSizes), numpy.std(self.insertSizes)))
except ValueError as e:
print("*"*100, "here")
print("ERROR:", e)
示例2: extract_sequence_and_score
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import mean [as 别名]
def extract_sequence_and_score(graph=None):
# make dict with positions as keys and lists of ids as values
pos_to_ids = defaultdict(list)
for u in graph.nodes():
if 'position' not in graph.node[u]: # no position attributes in graph, use the vertex id instead
raise Exception('Missing "position" attribute in node:%s %s' % (u, graph.node[u]))
else:
pos = graph.node[u]['position']
# accumulate all node ids
pos_to_ids[pos] += [u]
# extract sequence of labels and importances
seq = [None] * len(pos_to_ids)
score = [0] * len(pos_to_ids)
for pos in sorted(pos_to_ids):
ids = pos_to_ids[pos]
labels = [graph.node[u].get('label', 'N/A') for u in ids]
# check that all labels for the same position are identical
assert(sum([1 for label in labels if label == labels[0]]) == len(labels)
), 'ERROR: non identical labels referring to same position: %s %s' % (pos, labels)
seq[pos] = labels[0]
# average all importance score for the same position
importances = [graph.node[u].get('importance', 0) for u in ids]
score[pos] = np.mean(importances)
return seq, score
示例3: train
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import mean [as 别名]
def train(self):
while (self.epoch < self.option.max_epoch and not self.early_stopped):
self.one_epoch_train()
self.one_epoch_valid()
self.one_epoch_test()
self.epoch += 1
model_path = self.saver.save(self.sess,
self.option.model_path,
global_step=self.epoch)
print("Model saved at %s" % model_path)
if self.early_stop():
self.early_stopped = True
print("Early stopped at epoch %d" % (self.epoch))
all_test_in_top = [np.mean(x[1]) for x in self.test_stats]
best_test_epoch = np.argmax(all_test_in_top)
best_test = all_test_in_top[best_test_epoch]
msg = "Best test in top: %0.4f at epoch %d." % (best_test, best_test_epoch + 1)
print(msg)
self.log_file.write(msg + "\n")
pickle.dump([self.train_stats, self.valid_stats, self.test_stats],
open(os.path.join(self.option.this_expsdir, "results.pckl"), "w"))
示例4: _raise_on_mode
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import mean [as 别名]
def _raise_on_mode(self, mode):
"""
Checks that the provided query mode is one of the accepted values. If
not, raises a :obj:`ValueError`.
"""
valid_modes = [
'random_sample',
'random_sample_per_pix',
'samples',
'median',
'mean',
'best',
'percentile']
if mode not in valid_modes:
raise ValueError(
'"{}" is not a valid `mode`. Valid modes are:\n'
' {}'.format(mode, valid_modes)
)
示例5: validate_on_lfw
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import mean [as 别名]
def validate_on_lfw(model, lfw_160_path):
# Read the file containing the pairs used for testing
pairs = lfw.read_pairs('validation-LFW-pairs.txt')
# Get the paths for the corresponding images
paths, actual_issame = lfw.get_paths(lfw_160_path, pairs)
num_pairs = len(actual_issame)
all_embeddings = np.zeros((num_pairs * 2, 512), dtype='float32')
for k in tqdm.trange(num_pairs):
img1 = cv2.imread(paths[k * 2], cv2.IMREAD_COLOR)[:, :, ::-1]
img2 = cv2.imread(paths[k * 2 + 1], cv2.IMREAD_COLOR)[:, :, ::-1]
batch = np.stack([img1, img2], axis=0)
embeddings = model.eval_embeddings(batch)
all_embeddings[k * 2: k * 2 + 2, :] = embeddings
tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(
all_embeddings, actual_issame, distance_metric=1, subtract_mean=True)
print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
auc = metrics.auc(fpr, tpr)
print('Area Under Curve (AUC): %1.3f' % auc)
eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)
print('Equal Error Rate (EER): %1.3f' % eer)
示例6: __init__
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import mean [as 别名]
def __init__(self,node_i, node_j, node_k, node_l,t, E, mu, rho, name=None):
#8-nodes
self.__nodes.append(node_i)
self.__nodes.append(node_j)
self.__nodes.append(node_k)
self.__nodes.append(node_l)
self.__t=t
center=np.mean([node_i,node_j,node_k,node_l])
# self.local_csys = CoordinateSystem.cartisian(center,nodes[4],nodes[5])
self.__alpha=[]#the angle between edge and local-x, to be added
self.__alpha.append(self.angle(node_i,node_j,self.local_csys.x))
self.__alpha.append(self.angle(node_j,node_k,self.local_csys.x))
self.__alpha.append(self.angle(node_k,node_l,self.local_csys.x))
self.__alpha.append(self.angle(node_l,node_i,self.local_csys.x))
self.__K=np.zeros((24,24))
示例7: _bbox_forward_train
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import mean [as 别名]
def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
img_metas):
num_imgs = len(img_metas)
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_results = self._bbox_forward(x, rois)
bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
gt_labels, self.train_cfg)
# record the `beta_topk`-th smallest target
# `bbox_targets[2]` and `bbox_targets[3]` stand for bbox_targets
# and bbox_weights, respectively
pos_inds = bbox_targets[3][:, 0].nonzero().squeeze(1)
num_pos = len(pos_inds)
cur_target = bbox_targets[2][pos_inds, :2].abs().mean(dim=1)
beta_topk = min(self.train_cfg.dynamic_rcnn.beta_topk * num_imgs,
num_pos)
cur_target = torch.kthvalue(cur_target, beta_topk)[0].item()
self.beta_history.append(cur_target)
loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],
bbox_results['bbox_pred'], rois,
*bbox_targets)
bbox_results.update(loss_bbox=loss_bbox)
return bbox_results
示例8: update_hyperparameters
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import mean [as 别名]
def update_hyperparameters(self):
"""Update hyperparameters like IoU thresholds for assigner and beta for
SmoothL1 loss based on the training statistics.
Returns:
tuple[float]: the updated ``iou_thr`` and ``beta``.
"""
new_iou_thr = max(self.train_cfg.dynamic_rcnn.initial_iou,
np.mean(self.iou_history))
self.iou_history = []
self.bbox_assigner.pos_iou_thr = new_iou_thr
self.bbox_assigner.neg_iou_thr = new_iou_thr
self.bbox_assigner.min_pos_iou = new_iou_thr
new_beta = min(self.train_cfg.dynamic_rcnn.initial_beta,
np.median(self.beta_history))
self.beta_history = []
self.bbox_head.loss_bbox.beta = new_beta
return new_iou_thr, new_beta
示例9: fast_eval_recall
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import mean [as 别名]
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
示例10: cal_train_time
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import mean [as 别名]
def cal_train_time(log_dicts, args):
for i, log_dict in enumerate(log_dicts):
print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}')
all_times = []
for epoch in log_dict.keys():
if args.include_outliers:
all_times.append(log_dict[epoch]['time'])
else:
all_times.append(log_dict[epoch]['time'][1:])
all_times = np.array(all_times)
epoch_ave_time = all_times.mean(-1)
slowest_epoch = epoch_ave_time.argmax()
fastest_epoch = epoch_ave_time.argmin()
std_over_epoch = epoch_ave_time.std()
print(f'slowest epoch {slowest_epoch + 1}, '
f'average time is {epoch_ave_time[slowest_epoch]:.4f}')
print(f'fastest epoch {fastest_epoch + 1}, '
f'average time is {epoch_ave_time[fastest_epoch]:.4f}')
print(f'time std over epochs is {std_over_epoch:.4f}')
print(f'average iter time: {np.mean(all_times):.4f} s/iter')
print()
示例11: test_generate_np_targeted_gives_adversarial_example
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import mean [as 别名]
def test_generate_np_targeted_gives_adversarial_example(self):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
feed_labs = np.zeros((100, 2))
feed_labs[np.arange(100), np.random.randint(0, 1, 100)] = 1
x_adv = self.attack.generate_np(x_val, max_iterations=100,
binary_search_steps=3,
initial_const=1,
clip_min=-5, clip_max=5,
batch_size=100, y_target=feed_labs)
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(np.argmax(feed_labs, axis=1) == new_labs)
> 0.9)
示例12: test_generate_gives_adversarial_example
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import mean [as 别名]
def test_generate_gives_adversarial_example(self):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
feed_labs = np.zeros((100, 2))
feed_labs[np.arange(100), orig_labs] = 1
x = tf.placeholder(tf.float32, x_val.shape)
y = tf.placeholder(tf.float32, feed_labs.shape)
x_adv_p = self.attack.generate(x, max_iterations=100,
binary_search_steps=3,
initial_const=1,
clip_min=-5, clip_max=5,
batch_size=100, y=y)
self.assertEqual(x_val.shape, x_adv_p.shape)
x_adv = self.sess.run(x_adv_p, {x: x_val, y: feed_labs})
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
示例13: test_attack_strength
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import mean [as 别名]
def test_attack_strength(self):
"""
If clipping is not done at each iteration (not using clip_min and
clip_max), this attack fails by
np.mean(orig_labels == new_labels) == .5
"""
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
x_adv = self.attack.generate_np(x_val, eps=1.0, eps_iter=0.05,
clip_min=0.5, clip_max=0.7,
nb_iter=5)
orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
示例14: get_graph_stats
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import mean [as 别名]
def get_graph_stats(graph_obj_handle, prop='degrees'):
# if prop == 'degrees':
num_cores = multiprocessing.cpu_count()
inputs = [int(i*len(graph_obj_handle)/num_cores) for i in range(num_cores)] + [len(graph_obj_handle)]
res = Parallel(n_jobs=num_cores)(delayed(get_values)(graph_obj_handle, inputs[i], inputs[i+1], prop) for i in range(num_cores))
stat_dict = {}
if 'degrees' in prop:
stat_dict['degrees'] = list(set([d for core_res in res for file_res in core_res for d in file_res['degrees']]))
if 'edge_labels' in prop:
stat_dict['edge_labels'] = list(set([d for core_res in res for file_res in core_res for d in file_res['edge_labels']]))
if 'target_mean' in prop or 'target_std' in prop:
param = np.array([file_res['params'] for core_res in res for file_res in core_res])
if 'target_mean' in prop:
stat_dict['target_mean'] = np.mean(param, axis=0)
if 'target_std' in prop:
stat_dict['target_std'] = np.std(param, axis=0)
return stat_dict
示例15: __forward
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import mean [as 别名]
def __forward(self, x, train_flg):
if self.running_mean is None:
N, D = x.shape
self.running_mean = np.zeros(D)
self.running_var = np.zeros(D)
if train_flg:
mu = x.mean(axis=0)
xc = x - mu
var = np.mean(xc ** 2, axis=0)
std = np.sqrt(var + 10e-7)
xn = xc / std
self.batch_size = x.shape[0]
self.xc = xc
self.xn = xn
self.std = std
self.running_mean = self.momentum * self.running_mean + (1 - self.momentum) * mu
self.running_var = self.momentum * self.running_var + (1 - self.momentum) * var
else:
xc = x - self.running_mean
xn = xc / ((np.sqrt(self.running_var + 10e-7)))
out = self.gamma * xn + self.beta
return out