本文整理汇总了Python中numpy.stack方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.stack方法的具体用法?Python numpy.stack怎么用?Python numpy.stack使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类numpy
的用法示例。
在下文中一共展示了numpy.stack方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import stack [as 别名]
def __init__(self, masks, height, width):
self.height = height
self.width = width
if len(masks) == 0:
self.masks = np.empty((0, self.height, self.width), dtype=np.uint8)
else:
assert isinstance(masks, (list, np.ndarray))
if isinstance(masks, list):
assert isinstance(masks[0], np.ndarray)
assert masks[0].ndim == 2 # (H, W)
else:
assert masks.ndim == 3 # (N, H, W)
self.masks = np.stack(masks).reshape(-1, height, width)
assert self.masks.shape[1] == self.height
assert self.masks.shape[2] == self.width
示例2: test_monthly_mean_at_each_ind
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import stack [as 别名]
def test_monthly_mean_at_each_ind():
times_submonthly = pd.to_datetime(['2000-06-01', '2000-06-15',
'2000-07-04', '2000-07-19'])
times_means = pd.to_datetime(['2000-06-01', '2000-07-01'])
len_other_dim = 2
arr_submonthly = xr.DataArray(
np.random.random((len(times_submonthly), len_other_dim)),
dims=[TIME_STR, 'dim0'], coords={TIME_STR: times_submonthly}
)
arr_means = xr.DataArray(
np.random.random((len(times_means), len_other_dim)),
dims=arr_submonthly.dims, coords={TIME_STR: times_means}
)
actual = monthly_mean_at_each_ind(arr_means, arr_submonthly)
desired_values = np.stack([arr_means.values[0]] * len_other_dim +
[arr_means.values[1]] * len_other_dim,
axis=0)
desired = xr.DataArray(desired_values, dims=arr_submonthly.dims,
coords=arr_submonthly.coords)
assert actual.identical(desired)
示例3: convert
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import stack [as 别名]
def convert(story):
# import pdb; pdb.set_trace()
sentence_arr, graphs, query_arr, answer_arr = story
node_id_w = graphs[2].shape[2]
edge_type_w = graphs[3].shape[3]
all_node_strengths = [np.zeros([1])]
all_node_ids = [np.zeros([1,node_id_w])]
for num_new_nodes, new_node_strengths, new_node_ids, _ in zip(*graphs):
last_strengths = all_node_strengths[-1]
last_ids = all_node_ids[-1]
cur_strengths = np.concatenate([last_strengths, new_node_strengths], 0)
cur_ids = np.concatenate([last_ids, new_node_ids], 0)
all_node_strengths.append(cur_strengths)
all_node_ids.append(cur_ids)
all_edges = graphs[3]
full_n_nodes = all_edges.shape[1]
all_node_strengths = np.stack([np.pad(x, ((0, full_n_nodes-x.shape[0])), 'constant') for x in all_node_strengths[1:]])
all_node_ids = np.stack([np.pad(x, ((0, full_n_nodes-x.shape[0]), (0, 0)), 'constant') for x in all_node_ids[1:]])
all_node_states = np.zeros([len(all_node_strengths), full_n_nodes,0])
return tuple(x[np.newaxis,...] for x in (all_node_strengths, all_node_ids, all_node_states, all_edges))
示例4: assemble_batch
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import stack [as 别名]
def assemble_batch(story_fns, num_answer_words, format_spec):
stories = []
for sfn in story_fns:
with gzip.open(sfn,'rb') as f:
cvtd_story, _, _, _ = pickle.load(f)
stories.append(cvtd_story)
sents, graphs, queries, answers = zip(*stories)
cvtd_sents = np.array(sents, np.int32)
cvtd_queries = np.array(queries, np.int32)
max_ans_len = max(len(a) for a in answers)
cvtd_answers = np.stack([convert_answer(answer, num_answer_words, format_spec, max_ans_len) for answer in answers])
num_new_nodes, new_node_strengths, new_node_ids, next_edges = zip(*graphs)
num_new_nodes = np.stack(num_new_nodes)
new_node_strengths = np.stack(new_node_strengths)
new_node_ids = np.stack(new_node_ids)
next_edges = np.stack(next_edges)
return cvtd_sents, cvtd_queries, cvtd_answers, num_new_nodes, new_node_strengths, new_node_ids, next_edges
示例5: validate_on_lfw
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import stack [as 别名]
def validate_on_lfw(model, lfw_160_path):
# Read the file containing the pairs used for testing
pairs = lfw.read_pairs('validation-LFW-pairs.txt')
# Get the paths for the corresponding images
paths, actual_issame = lfw.get_paths(lfw_160_path, pairs)
num_pairs = len(actual_issame)
all_embeddings = np.zeros((num_pairs * 2, 512), dtype='float32')
for k in tqdm.trange(num_pairs):
img1 = cv2.imread(paths[k * 2], cv2.IMREAD_COLOR)[:, :, ::-1]
img2 = cv2.imread(paths[k * 2 + 1], cv2.IMREAD_COLOR)[:, :, ::-1]
batch = np.stack([img1, img2], axis=0)
embeddings = model.eval_embeddings(batch)
all_embeddings[k * 2: k * 2 + 2, :] = embeddings
tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(
all_embeddings, actual_issame, distance_metric=1, subtract_mean=True)
print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
auc = metrics.auc(fpr, tpr)
print('Area Under Curve (AUC): %1.3f' % auc)
eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)
print('Equal Error Rate (EER): %1.3f' % eer)
示例6: offset_to_pts
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import stack [as 别名]
def offset_to_pts(self, center_list, pred_list):
"""Change from point offset to point coordinate."""
pts_list = []
for i_lvl in range(len(self.point_strides)):
pts_lvl = []
for i_img in range(len(center_list)):
pts_center = center_list[i_img][i_lvl][:, :2].repeat(
1, self.num_points)
pts_shift = pred_list[i_lvl][i_img]
yx_pts_shift = pts_shift.permute(1, 2, 0).view(
-1, 2 * self.num_points)
y_pts_shift = yx_pts_shift[..., 0::2]
x_pts_shift = yx_pts_shift[..., 1::2]
xy_pts_shift = torch.stack([x_pts_shift, y_pts_shift], -1)
xy_pts_shift = xy_pts_shift.view(*yx_pts_shift.shape[:-1], -1)
pts = xy_pts_shift * self.point_strides[i_lvl] + pts_center
pts_lvl.append(pts)
pts_lvl = torch.stack(pts_lvl, 0)
pts_list.append(pts_lvl)
return pts_list
示例7: _get_area_ratio
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import stack [as 别名]
def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks):
"""Compute area ratio of the gt mask inside the proposal and the gt
mask of the corresponding instance."""
num_pos = pos_proposals.size(0)
if num_pos > 0:
area_ratios = []
proposals_np = pos_proposals.cpu().numpy()
pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()
# compute mask areas of gt instances (batch processing for speedup)
gt_instance_mask_area = gt_masks.areas
for i in range(num_pos):
gt_mask = gt_masks[pos_assigned_gt_inds[i]]
# crop the gt mask inside the proposal
bbox = proposals_np[i, :].astype(np.int32)
gt_mask_in_proposal = gt_mask.crop(bbox)
ratio = gt_mask_in_proposal.areas[0] / (
gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7)
area_ratios.append(ratio)
area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to(
pos_proposals.device)
else:
area_ratios = pos_proposals.new_zeros((0, ))
return area_ratios
示例8: _get_bp_indexes_labranchor
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import stack [as 别名]
def _get_bp_indexes_labranchor(self, soi):
"""
Get indexes of branch point regions in given sequences.
:param soi: batch of sequences of interest for introns (intron-3..intron+6)
:return: array of predicted bp indexes
"""
encoded = [onehot(str(seq)[self.acc_i - 70:self.acc_i]) for seq in np.nditer(soi)]
labr_in = np.stack(encoded, axis=0)
out = self.labranchor.predict_on_batch(labr_in)
# for each row, pick the base with max branchpoint probability, and get its index
max_indexes = np.apply_along_axis(lambda x: self.acc_i - 70 + np.argmax(x), axis=1, arr=out)
# self.write_bp(max_indexes)
return max_indexes
# TODO boilerplate
# def write_bp(self, max_indexes):
# max_indexes = [str(seq) for seq in np.nditer(max_indexes)]
# with open(''.join([this_dir, "/../customBP/example_files/bp_idx_chr21_labr.txt"]), "a") as bp_idx_file:
# bp_idx_file.write('\n'.join(max_indexes))
# bp_idx_file.write('\n')
# bp_idx_file.close()
示例9: apply_transform
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import stack [as 别名]
def apply_transform(x,
transform_matrix,
fill_mode='nearest',
cval=0.):
x = np.rollaxis(x, 0, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=0,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, 0 + 1)
return x
示例10: test_lstm_forget_bias
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import stack [as 别名]
def test_lstm_forget_bias():
forget_bias = 2.0
stack = gluon.rnn.SequentialRNNCell()
stack.add(gluon.rnn.LSTMCell(100, i2h_bias_initializer=mx.init.LSTMBias(forget_bias), prefix='l0_'))
stack.add(gluon.rnn.LSTMCell(100, i2h_bias_initializer=mx.init.LSTMBias(forget_bias), prefix='l1_'))
dshape = (32, 1, 200)
data = mx.sym.Variable('data')
sym, _ = stack.unroll(1, data, merge_outputs=True)
mod = mx.mod.Module(sym, label_names=None, context=mx.cpu(0))
mod.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod.init_params()
bias_argument = next(x for x in sym.list_arguments() if x.endswith('i2h_bias'))
expected_bias = np.hstack([np.zeros((100,)),
forget_bias * np.ones(100, ), np.zeros((2 * 100,))])
assert_allclose(mod.get_params()[0][bias_argument].asnumpy(), expected_bias)
示例11: reset
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import stack [as 别名]
def reset(self, indices=None):
"""Reset the environment and convert the resulting observation.
Args:
indices: The batch indices of environments to reset; defaults to all.
Returns:
Batch of observations.
"""
if indices is None:
indices = np.arange(len(self._envs))
if self._blocking:
observs = [self._envs[index].reset() for index in indices]
else:
observs = [self._envs[index].reset(blocking=False) for index in indices]
observs = [observ() for observ in observs]
observ = np.stack(observs)
return observ
示例12: process_outlier_and_stack
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import stack [as 别名]
def process_outlier_and_stack(interim_path, file_name, phase_str, datetime, processed_path):
data_nc = load_pkl(interim_path, file_name)
# Outlier processing
for v in obs_var:
data_nc['input_obs'][v] = process_outlier_and_normalize(data_nc['input_obs'][v], obs_range_dic[v])
for v in ruitu_var:
data_nc['input_ruitu'][v] = process_outlier_and_normalize(data_nc['input_ruitu'][v], ruitu_range_dic[v])
stacked_data = [data_nc['input_obs'][v] for v in obs_var]
stacked_input_obs = np.stack(stacked_data, axis=-1)
stacked_data = [data_nc['input_ruitu'][v] for v in ruitu_var]
stacked_input_ruitu = np.stack(stacked_data, axis=-1)
print(stacked_input_obs.shape) #(sample_ind, timestep, station_id, features)
print(stacked_input_ruitu.shape)
data_dic={'input_obs':stacked_input_obs,
'input_ruitu':stacked_input_ruitu}
#normalize
save_pkl(data_dic, processed_path, '{}_{}_norm.dict'.format(phase_str, datetime))
开发者ID:BruceBinBoxing,项目名称:Deep_Learning_Weather_Forecasting,代码行数:24,代码来源:make_TestOnlineData_from_nc.py
示例13: predict
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import stack [as 别名]
def predict(self, batch_inputs, batch_ruitu):
assert batch_ruitu.shape[0] == batch_inputs.shape[0], 'Shape Error'
assert batch_inputs.shape[1] == 28 and batch_inputs.shape[2] == 10 and batch_inputs.shape[3] == 9, 'Error! Obs input shape must be (None, 28,10,9)'
assert batch_ruitu.shape[1] == 37 and batch_ruitu.shape[2] == 10 and batch_ruitu.shape[3] == 29, 'Error! Ruitu input shape must be (None, 37,10, 29)'
#all_pred={}
pred_result_list = []
for i in range(10):
#print('Predict for station: 9000{}'.format(i+1))
result = self.model.predict(x=[batch_inputs[:,:,i,:], batch_ruitu[:,:,i,:]])
result = np.squeeze(result, axis=0)
#all_pred[i] = result
pred_result_list.append(result)
#pass
pred_result = np.stack(pred_result_list, axis=0)
#return all_pred, pred_result
print('Predict shape (10,37,3) means (stationID, timestep, features). Features include: t2m, rh2m and w10m')
self.pred_result = pred_result
return pred_result
示例14: __next__
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import stack [as 别名]
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, interp=cv2.INTER_LINEAR)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Normalize RGB
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB
img = np.ascontiguousarray(img, dtype=np.float16 if self.half else np.float32) # uint8 to fp16/fp32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
return self.sources, img, img0, None
示例15: reset
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import stack [as 别名]
def reset(self, indices=None):
"""Reset the environment and convert the resulting observation.
Args:
indices: The batch indices of environments to reset; defaults to all.
Returns:
Batch of observations.
"""
if indices is None:
indices = np.arange(len(self._envs))
if self._blocking:
observs = [self._envs[index].reset() for index in indices]
else:
observs = [self._envs[index].reset(blocking=False) for index in indices]
observs = [observ() for observ in observs]
observ = np.stack(observs)
# TODO(piotrmilos): Do we really want this?
observ = observ.astype(np.float32)
return observ