本文整理汇总了Python中mxnet.ndarray.expand_dims方法的典型用法代码示例。如果您正苦于以下问题:Python ndarray.expand_dims方法的具体用法?Python ndarray.expand_dims怎么用?Python ndarray.expand_dims使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.ndarray
的用法示例。
在下文中一共展示了ndarray.expand_dims方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: face_create_lib
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import expand_dims [as 别名]
def face_create_lib(args, npz_embs, npz_emb_len, embedding, item, fd, max_nums):
# 得到当前ID的总个数
id_sum = npz_embs.shape[0]
# 为新的人脸入库,创建一个新的文件夹
new_lib_dir = os.path.join(args.outdir, '%08d' % id_sum)
os.mkdir(new_lib_dir)
# 为了统一,扩展一个维度
embedding = np.expand_dims(embedding, axis=0)
# 特征向量以及对应的ID图片的数目,都进行垂直拼接
npz_embs = np.vstack((npz_embs, embedding.reshape(1, -1)))
npz_emb_len = np.vstack((npz_emb_len, np.array([[1]])))
new_img_path = os.path.join(new_lib_dir, '00000' + args.encoding)
old_img_path = os.path.join(args.indir, item[1])
fd.write(old_img_path + '\t' + new_img_path + '\t' + str(max_nums) + '\n\n')
shutil.copyfile(old_img_path, new_img_path)
if args.delete:
os.remove(old_img_path)
return npz_embs,npz_emb_len
示例2: tensor_load_rgbimage
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import expand_dims [as 别名]
def tensor_load_rgbimage(filename, ctx, size=None, scale=None, keep_asp=False):
img = Image.open(filename).convert('RGB')
if size is not None:
if keep_asp:
size2 = int(size * 1.0 / img.size[0] * img.size[1])
img = img.resize((size, size2), Image.ANTIALIAS)
else:
img = img.resize((size, size), Image.ANTIALIAS)
elif scale is not None:
img = img.resize((int(img.size[0] / scale), int(img.size[1] / scale)), Image.ANTIALIAS)
img = np.array(img).transpose(2, 0, 1).astype(float)
img = F.expand_dims(mx.nd.array(img, ctx=ctx), 0)
return img
示例3: unsqueeze
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import expand_dims [as 别名]
def unsqueeze(input, dim):
return nd.expand_dims(input, axis=dim)
示例4: batched_l2_dist
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import expand_dims [as 别名]
def batched_l2_dist(a, b):
a_squared = nd.power(nd.norm(a, axis=-1), 2)
b_squared = nd.power(nd.norm(b, axis=-1), 2)
squared_res = nd.add(nd.linalg_gemm(
a, nd.transpose(b, axes=(0, 2, 1)), nd.broadcast_axes(nd.expand_dims(b_squared, axis=-2), axis=1, size=a.shape[1]), alpha=-2
), nd.expand_dims(a_squared, axis=-1))
res = nd.sqrt(nd.clip(squared_res, 1e-30, np.finfo(np.float32).max))
return res
示例5: batched_l1_dist
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import expand_dims [as 别名]
def batched_l1_dist(a, b):
a = nd.expand_dims(a, axis=-2)
b = nd.expand_dims(b, axis=-3)
res = nd.norm(a - b, ord=1, axis=-1)
return res
示例6: prepare
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import expand_dims [as 别名]
def prepare(self, g, gpu_id, trace=False):
head_ids, tail_ids = g.all_edges(order='eid')
projection = self.projection_emb(g.edata['id'], gpu_id, trace)
projection = projection.reshape(-1, self.entity_dim, self.relation_dim)
head_emb = g.ndata['emb'][head_ids.as_in_context(g.ndata['emb'].context)].expand_dims(axis=-2)
tail_emb = g.ndata['emb'][tail_ids.as_in_context(g.ndata['emb'].context)].expand_dims(axis=-2)
g.edata['head_emb'] = nd.batch_dot(head_emb, projection).squeeze()
g.edata['tail_emb'] = nd.batch_dot(tail_emb, projection).squeeze()
示例7: edge_func
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import expand_dims [as 别名]
def edge_func(self, edges):
head = edges.src['emb']
tail = edges.dst['emb'].expand_dims(2)
rel = edges.data['emb']
rel = rel.reshape(-1, self.relation_dim, self.entity_dim)
score = head * mx.nd.batch_dot(rel, tail).squeeze()
# TODO: check if use self.gamma
return {'score': mx.nd.sum(score, -1)}
# return {'score': self.gamma - th.norm(score, p=1, dim=-1)}
示例8: update
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import expand_dims [as 别名]
def update(self, gpu_id=-1):
""" Update embeddings in a sparse manner
Sparse embeddings are updated in mini batches. we maintains gradient states for
each embedding so they can be updated separately.
Parameters
----------
gpu_id : int
Which gpu to accelerate the calculation. if -1 is provided, cpu is used.
"""
self.state_step += 1
for idx, data in self.trace:
grad = data.grad
clr = self.args.lr
#clr = self.args.lr / (1 + (self.state_step - 1) * group['lr_decay'])
# the update is non-linear so indices must be unique
grad_indices = idx
grad_values = grad
grad_sum = (grad_values * grad_values).mean(1)
ctx = self.state_sum.context
if ctx != grad_indices.context:
grad_indices = grad_indices.as_in_context(ctx)
if ctx != grad_sum.context:
grad_sum = grad_sum.as_in_context(ctx)
self.state_sum[grad_indices] += grad_sum
std = self.state_sum[grad_indices] # _sparse_mask
if gpu_id >= 0:
std = std.as_in_context(mx.gpu(gpu_id))
std_values = nd.expand_dims(nd.sqrt(std) + 1e-10, 1)
tmp = (-clr * grad_values / std_values)
if tmp.context != ctx:
tmp = tmp.as_in_context(ctx)
# TODO(zhengda) the overhead is here.
self.emb[grad_indices] = mx.nd.take(self.emb, grad_indices) + tmp
self.trace = []
示例9: test_quantization
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import expand_dims [as 别名]
def test_quantization(model, args, test_data, size, num_class, pred_offset):
# output folder
outdir = 'outdir_int8'
if not os.path.exists(outdir):
os.makedirs(outdir)
print(model)
metric = gluoncv.utils.metrics.SegmentationMetric(num_class)
tbar = tqdm(test_data)
metric.reset()
tic = time.time()
for i, (batch, dsts) in enumerate(tbar):
if args.eval:
targets = mx.gluon.utils.split_and_load(dsts, ctx_list=args.ctx, even_split=False)
data = mx.gluon.utils.split_and_load(batch, ctx_list=args.ctx, batch_axis=0, even_split=False)
outputs = None
for x in data:
output = model(x)
outputs = output if outputs is None else nd.concat(outputs, output, axis=0)
metric.update(targets, outputs)
pixAcc, mIoU = metric.get()
tbar.set_description('pixAcc: %.4f, mIoU: %.4f' % (pixAcc, mIoU))
else:
for data, impath in zip(batch, dsts):
data = data.as_in_context(args.ctx[0])
if len(data.shape) < 4:
data = nd.expand_dims(data, axis=0)
predict = model(data)[0]
predict = mx.nd.squeeze(mx.nd.argmax(predict, 1)).asnumpy() + pred_offset
mask = get_color_pallete(predict, args.dataset)
outname = os.path.splitext(impath)[0] + '.png'
mask.save(os.path.join(outdir, outname))
speed = size / (time.time() - tic)
print('Inference speed with batchsize %d is %.2f img/sec' % (args.batch_size, speed))
示例10: generate_text
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import expand_dims [as 别名]
def generate_text(model, seed, length=512, top_n=10):
"""
generates text of specified length from trained model
with given seed character sequence.
"""
logger.info("generating %s characters from top %s choices.", length, top_n)
logger.info('generating with seed: "%s".', seed)
generated = seed
encoded = mx.nd.array(encode_text(seed))
seq_len = encoded.shape[0]
x = F.expand_dims(encoded[:seq_len-1], 1)
# input shape: [seq_len, 1]
state = model.begin_state()
# get rnn state due to seed sequence
_, state = model(x, state)
next_index = encoded[seq_len-1].asscalar()
for i in range(length):
x = mx.nd.array([[next_index]])
# input shape: [1, 1]
logit, state = model(x, state)
# output shape: [1, vocab_size]
probs = F.softmax(logit)
next_index = sample_from_probs(probs.asnumpy().squeeze(), top_n)
# append to sequence
generated += ID2CHAR[next_index]
logger.info("generated text: \n%s\n", generated)
return generated
示例11: predict
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import expand_dims [as 别名]
def predict(self,img):
img = nd.array(img)
#print(img.shape)
img = nd.transpose(img, axes=(2, 0, 1)).astype('float32')
img = nd.expand_dims(img, axis=0)
#print(img.shape)
db = mx.io.DataBatch(data=(img,))
self.model.forward(db, is_train=False)
net_out = self.model.get_outputs()
embedding = net_out[0].asnumpy()
embedding = sklearn.preprocessing.normalize(embedding,axis=1)
return embedding
示例12: predict
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import expand_dims [as 别名]
def predict(self, img):
img = nd.array(img)
img = nd.transpose(img, axes=(2, 0, 1)).astype('float32')
img = nd.expand_dims(img, axis=0)
# print(img.shape)
db = mx.io.DataBatch(data=(img,))
self.model.forward(db, is_train=False)
net_out = self.model.get_outputs()
embedding = net_out[0].asnumpy()
embedding = sklearn.preprocessing.normalize(embedding)
return embedding
示例13: predict
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import expand_dims [as 别名]
def predict(self,img):
img = nd.array(img)
img = nd.transpose(img, axes=(2, 0, 1)).astype('float32')
img = nd.expand_dims(img, axis=0)
#print(img.shape)
db = mx.io.DataBatch(data=(img,))
self.model.forward(db, is_train=False)
net_out = self.model.get_outputs()
embedding = net_out[0].asnumpy()
embedding = sklearn.preprocessing.normalize(embedding)
return embedding