本文整理汇总了Python中mxnet.ndarray.concat方法的典型用法代码示例。如果您正苦于以下问题:Python ndarray.concat方法的具体用法?Python ndarray.concat怎么用?Python ndarray.concat使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.ndarray
的用法示例。
在下文中一共展示了ndarray.concat方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: query
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import concat [as 别名]
def query(self, images):
if self.pool_size == 0:
return images
return_images = []
for image in images:
image = image.reshape(1,image.shape[0],image.shape[1],image.shape[2])
if self.num_imgs < self.pool_size:
self.num_imgs = self.num_imgs + 1
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0, 1)
if p > 0.5:
random_id = random.randint(0, self.pool_size - 1) # randint is inclusive
tmp = self.images[random_id].copy()
self.images[random_id] = image
return_images.append(tmp)
else:
return_images.append(image)
image_array = return_images[0].copyto(images.context)
for image in return_images[1:]:
image_array = nd.concat(image_array,image.copyto(images.context),dim=0)
return image_array
示例2: _retina_solve
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import concat [as 别名]
def _retina_solve(self):
out, res, anchors = iter(self.exec_group.execs[0].outputs), [], []
for fpn in self._fpn_anchors:
scores = next(out)[:, -fpn.scales_shape:,
:, :].transpose((0, 2, 3, 1))
deltas = next(out).transpose((0, 2, 3, 1))
res.append(concat(deltas.reshape((-1, 4)),
scores.reshape((-1, 1)), dim=1))
anchors.append(self._get_runtime_anchors(*deltas.shape[1:3],
fpn.stride,
fpn.base_anchors))
return concat(*res, dim=0), concatenate(anchors)
示例3: _forward_alg
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import concat [as 别名]
def _forward_alg(self, feats):
alphas = [[-10000.] * self.tagset_size]
alphas[0][self.tag2idx[self.START_TAG]] = 0.
alphas = nd.array(alphas,ctx=self.ctx)
for feat in feats:
alphas_t = []
for next_tag in range(self.tagset_size):
emit_score = feat[next_tag].reshape((1, -1))
trans_score = self.transitions[next_tag].reshape((1, -1))
next_tag_var = alphas + trans_score + emit_score
alphas_t.append(log_sum_exp(next_tag_var))
alphas = nd.concat(*alphas_t, dim=0).reshape((1, -1))
terminal_var = alphas + self.transitions[self.tag2idx[self.STOP_TAG]]
alpha = log_sum_exp(terminal_var)
return alpha
示例4: tensor_save_bgrimage
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import concat [as 别名]
def tensor_save_bgrimage(tensor, filename, cuda=False):
(b, g, r) = F.split(tensor, num_outputs=3, axis=0)
tensor = F.concat(r, g, b, dim=0)
tensor_save_rgbimage(tensor, filename, cuda)
示例5: subtract_imagenet_mean_batch
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import concat [as 别名]
def subtract_imagenet_mean_batch(batch):
"""Subtract ImageNet mean pixel-wise from a BGR image."""
batch = F.swapaxes(batch,0, 1)
(r, g, b) = F.split(batch, num_outputs=3, axis=0)
r = r - 123.680
g = g - 116.779
b = b - 103.939
batch = F.concat(r, g, b, dim=0)
batch = F.swapaxes(batch,0, 1)
return batch
示例6: subtract_imagenet_mean_preprocess_batch
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import concat [as 别名]
def subtract_imagenet_mean_preprocess_batch(batch):
"""Subtract ImageNet mean pixel-wise from a BGR image."""
batch = F.swapaxes(batch,0, 1)
(r, g, b) = F.split(batch, num_outputs=3, axis=0)
r = r - 123.680
g = g - 116.779
b = b - 103.939
batch = F.concat(b, g, r, dim=0)
batch = F.swapaxes(batch,0, 1)
return batch
示例7: add_imagenet_mean_batch
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import concat [as 别名]
def add_imagenet_mean_batch(batch):
batch = F.swapaxes(batch,0, 1)
(b, g, r) = F.split(batch, num_outputs=3, axis=0)
r = r + 123.680
g = g + 116.779
b = b + 103.939
batch = F.concat(b, g, r, dim=0)
batch = F.swapaxes(batch,0, 1)
"""
batch = denormalizer(batch)
"""
return batch
示例8: preprocess_batch
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import concat [as 别名]
def preprocess_batch(batch):
batch = F.swapaxes(batch, 0, 1)
(r, g, b) = F.split(batch, num_outputs=3, axis=0)
batch = F.concat(b, g, r, dim=0)
batch = F.swapaxes(batch, 0, 1)
return batch
示例9: _score_sentence
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import concat [as 别名]
def _score_sentence(self, feats, tags):
# Gives the score of a provided tag sequence
score = nd.array([0])
tags = nd.concat(nd.array([self.tag2idx[START_TAG]]), *tags, dim=0)
for i, feat in enumerate(feats):
score = score + \
self.transitions.data()[to_scalar(tags[i+1]), to_scalar(tags[i])] + feat[to_scalar(tags[i+1])]
score = score + self.transitions.data()[self.tag2idx[STOP_TAG],
to_scalar(tags[int(tags.shape[0]-1)])]
return score
示例10: _viterbi_decode
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import concat [as 别名]
def _viterbi_decode(self, feats):
backpointers = []
# Initialize the viterbi variables in log space
vvars = nd.full((1, self.tagset_size), -10000.)
vvars[0, self.tag2idx[START_TAG]] = 0
for feat in feats:
bptrs_t = [] # holds the backpointers for this step
viterbivars_t = [] # holds the viterbi variables for this step
for next_tag in range(self.tagset_size):
# next_tag_var[i] holds the viterbi variable for tag i at the
# previous step, plus the score of transitioning
# from tag i to next_tag.
# We don't include the emission scores here because the max
# does not depend on them (we add them in below)
next_tag_var = vvars + self.transitions.data()[next_tag]
best_tag_id = argmax(next_tag_var)
bptrs_t.append(best_tag_id)
viterbivars_t.append(next_tag_var[0, best_tag_id])
# Now add in the emission scores, and assign vvars to the set
# of viterbi variables we just computed
vvars = (nd.concat(*viterbivars_t, dim=0) + feat).reshape((1, -1))
backpointers.append(bptrs_t)
# Transition to STOP_TAG
terminal_var = vvars + self.transitions.data()[self.tag2idx[STOP_TAG]]
best_tag_id = argmax(terminal_var)
path_score = terminal_var[0, best_tag_id]
# Follow the back pointers to decode the best path.
best_path = [best_tag_id]
for bptrs_t in reversed(backpointers):
best_tag_id = bptrs_t[best_tag_id]
best_path.append(best_tag_id)
# Pop off the start tag (we dont want to return that to the caller)
start = best_path.pop()
assert start == self.tag2idx[START_TAG] # Sanity check
best_path.reverse()
return path_score, best_path
示例11: k_fold_cross_valid
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import concat [as 别名]
def k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,
learning_rate, weight_decay, batch_size):
"""Conducts k-fold cross validation for the model."""
assert k > 1
fold_size = X_train.shape[0] // k
train_loss_sum = 0.0
test_loss_sum = 0.0
for test_idx in range(k):
X_val_test = X_train[test_idx * fold_size: (test_idx + 1) *
fold_size, :]
y_val_test = y_train[test_idx * fold_size: (test_idx + 1) * fold_size]
val_train_defined = False
for i in range(k):
if i != test_idx:
X_cur_fold = X_train[i * fold_size: (i + 1) * fold_size, :]
y_cur_fold = y_train[i * fold_size: (i + 1) * fold_size]
if not val_train_defined:
X_val_train = X_cur_fold
y_val_train = y_cur_fold
val_train_defined = True
else:
X_val_train = nd.concat(X_val_train, X_cur_fold, dim=0)
y_val_train = nd.concat(y_val_train, y_cur_fold, dim=0)
net = get_net()
train_loss = train(net, X_val_train, y_val_train, epochs, verbose_epoch,
learning_rate, weight_decay, batch_size)
train_loss_sum += train_loss
test_loss = get_rmse_log(net, X_val_test, y_val_test)
print("Test loss: %f" % test_loss)
test_loss_sum += test_loss
return train_loss_sum / k, test_loss_sum / k
# The sets of parameters. Better results are obtained with modifications.
# These parameters can be fine-tuned with k-fold cross-validation.
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:37,代码来源:kaggle_k_fold_cross_validation.py
示例12: learn
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import concat [as 别名]
def learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
weight_decay, batch_size):
"""Trains the model and predicts on the test data set."""
net = get_net()
_ = train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
weight_decay, batch_size)
preds = net(X_test).asnumpy()
test['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
submission = pd.concat([test['Id'], test['SalePrice']], axis=1)
submission.to_csv('submission.csv', index=False)
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:12,代码来源:kaggle_k_fold_cross_validation.py
示例13: forward
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import concat [as 别名]
def forward(self, graph, ufeat, ifeat):
"""Forward function.
Parameters
----------
graph : DGLHeteroGraph
"Flattened" user-movie graph with only one edge type.
ufeat : mx.nd.NDArray
User embeddings. Shape: (|V_u|, D)
ifeat : mx.nd.NDArray
Movie embeddings. Shape: (|V_m|, D)
Returns
-------
mx.nd.NDArray
Predicting scores for each user-movie edge.
"""
graph = graph.local_var()
ufeat = self.dropout(ufeat)
ifeat = self.dropout(ifeat)
graph.nodes['movie'].data['h'] = ifeat
basis_out = []
for i in range(self._num_basis_functions):
graph.nodes['user'].data['h'] = F.dot(ufeat, self.Ps[i].data())
graph.apply_edges(fn.u_dot_v('h', 'h', 'sr'))
basis_out.append(graph.edata['sr'].expand_dims(1))
out = F.concat(*basis_out, dim=1)
out = self.rate_out(out)
return out
示例14: cat
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import concat [as 别名]
def cat(seq, dim):
return nd.concat(*seq, dim=dim)
示例15: create_neg
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import concat [as 别名]
def create_neg(self, neg_head):
if neg_head:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
emb_real, emb_img = nd.split(tails, num_outputs=2, axis=-1)
rel_real, rel_img = nd.split(relations, num_outputs=2, axis=-1)
real = emb_real * rel_real + emb_img * rel_img
img = -emb_real * rel_img + emb_img * rel_real
emb_complex = nd.concat(real, img, dim=-1)
tmp = emb_complex.reshape(num_chunks, chunk_size, hidden_dim)
heads = heads.reshape(num_chunks, neg_sample_size, hidden_dim)
heads = nd.transpose(heads, axes=(0, 2, 1))
return nd.linalg_gemm2(tmp, heads)
return fn
else:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
emb_real, emb_img = nd.split(heads, num_outputs=2, axis=-1)
rel_real, rel_img = nd.split(relations, num_outputs=2, axis=-1)
real = emb_real * rel_real - emb_img * rel_img
img = emb_real * rel_img + emb_img * rel_real
emb_complex = nd.concat(real, img, dim=-1)
tmp = emb_complex.reshape(num_chunks, chunk_size, hidden_dim)
tails = tails.reshape(num_chunks, neg_sample_size, hidden_dim)
tails = nd.transpose(tails, axes=(0, 2, 1))
return nd.linalg_gemm2(tmp, tails)
return fn