本文整理汇总了Python中mxnet.ndarray.slice_axis方法的典型用法代码示例。如果您正苦于以下问题:Python ndarray.slice_axis方法的具体用法?Python ndarray.slice_axis怎么用?Python ndarray.slice_axis使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.ndarray
的用法示例。
在下文中一共展示了ndarray.slice_axis方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: argtopk
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import slice_axis [as 别名]
def argtopk(input, k, dim, descending=True):
idx = nd.argsort(input, dim, is_ascend=not descending)
return nd.slice_axis(input, dim, 0, k)
示例2: slice_axis
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import slice_axis [as 别名]
def slice_axis(data, axis, begin, end):
dim = data.shape[axis]
if begin < 0:
begin += dim
if end <= 0:
end += dim
return nd.slice_axis(data, axis, begin, end)
示例3: dumpR
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import slice_axis [as 别名]
def dumpR(data_set, mx_model, batch_size, name='', data_extra = None, label_shape = None):
print('dump verification embedding..')
data_list = data_set[0]
issame_list = data_set[1]
model = mx_model
embeddings_list = []
if data_extra is not None:
_data_extra = nd.array(data_extra)
time_consumed = 0.0
if label_shape is None:
_label = nd.ones( (batch_size,) )
else:
_label = nd.ones( label_shape )
for i in range( len(data_list) ):
data = data_list[i]
embeddings = None
ba = 0
while ba<data.shape[0]:
bb = min(ba+batch_size, data.shape[0])
count = bb-ba
_data = nd.slice_axis(data, axis=0, begin=bb-batch_size, end=bb)
#print(_data.shape, _label.shape)
time0 = datetime.datetime.now()
if data_extra is None:
db = mx.io.DataBatch(data=(_data,), label=(_label,))
else:
db = mx.io.DataBatch(data=(_data,_data_extra), label=(_label,))
model.forward(db, is_train=False)
net_out = model.get_outputs()
_embeddings = net_out[0].asnumpy()
time_now = datetime.datetime.now()
diff = time_now - time0
time_consumed+=diff.total_seconds()
if embeddings is None:
embeddings = np.zeros( (data.shape[0], _embeddings.shape[1]) )
embeddings[ba:bb,:] = _embeddings[(batch_size-count):,:]
ba = bb
embeddings_list.append(embeddings)
embeddings = embeddings_list[0] + embeddings_list[1]
embeddings = sklearn.preprocessing.normalize(embeddings)
actual_issame = np.asarray(issame_list)
outname = os.path.join('temp.bin')
with open(outname, 'wb') as f:
pickle.dump((embeddings, issame_list), f, protocol=pickle.HIGHEST_PROTOCOL)
示例4: test
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import slice_axis [as 别名]
def test(lfw_set, mx_model, batch_size):
print('testing lfw..')
lfw_data_list = lfw_set[0]
issame_list = lfw_set[1]
model = mx_model
embeddings_list = []
for i in range( len(lfw_data_list) ):
lfw_data = lfw_data_list[i]
embeddings = None
ba = 0
while ba<lfw_data.shape[0]:
bb = min(ba+batch_size, lfw_data.shape[0])
_data = nd.slice_axis(lfw_data, axis=0, begin=ba, end=bb)
_label = nd.ones( (bb-ba,) )
#print(_data.shape, _label.shape)
db = mx.io.DataBatch(data=(_data,), label=(_label,))
model.forward(db, is_train=False)
net_out = model.get_outputs()
#_arg, _aux = model.get_params()
#__arg = {}
#for k,v in _arg.iteritems():
# __arg[k] = v.as_in_context(_ctx)
#_arg = __arg
#_arg["data"] = _data.as_in_context(_ctx)
#_arg["softmax_label"] = _label.as_in_context(_ctx)
#for k,v in _arg.iteritems():
# print(k,v.context)
#exe = sym.bind(_ctx, _arg ,args_grad=None, grad_req="null", aux_states=_aux)
#exe.forward(is_train=False)
#net_out = exe.outputs
_embeddings = net_out[0].asnumpy()
#print(_embeddings.shape)
if embeddings is None:
embeddings = np.zeros( (lfw_data.shape[0], _embeddings.shape[1]) )
embeddings[ba:bb,:] = _embeddings
ba = bb
embeddings_list.append(embeddings)
_xnorm = 0.0
_xnorm_cnt = 0
for embed in embeddings_list:
for i in range(embed.shape[0]):
_em = embed[i]
_norm=np.linalg.norm(_em)
#print(_em.shape, _norm)
_xnorm+=_norm
_xnorm_cnt+=1
_xnorm /= _xnorm_cnt
embeddings = embeddings_list[0].copy()
embeddings = sklearn.preprocessing.normalize(embeddings)
_, _, accuracy, val, val_std, far = evaluate(embeddings, issame_list, nrof_folds=10)
acc1, std1 = np.mean(accuracy), np.std(accuracy)
#print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
#embeddings = np.concatenate(embeddings_list, axis=1)
embeddings = embeddings_list[0] + embeddings_list[1]
embeddings = sklearn.preprocessing.normalize(embeddings)
print(embeddings.shape)
_, _, accuracy, val, val_std, far = evaluate(embeddings, issame_list, nrof_folds=10)
acc2, std2 = np.mean(accuracy), np.std(accuracy)
return acc1, std1, acc2, std2, _xnorm, embeddings_list
示例5: dumpR
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import slice_axis [as 别名]
def dumpR(data_set, mx_model, batch_size, name='', data_extra = None, label_shape = None):
print('dump verification embedding..')
data_list = data_set[0]
issame_list = data_set[1]
model = mx_model
embeddings_list = []
if data_extra is not None:
_data_extra = nd.array(data_extra)
time_consumed = 0.0
if label_shape is None:
_label = nd.ones( (batch_size,) )
else:
_label = nd.ones( label_shape )
for i in xrange( len(data_list) ):
data = data_list[i]
embeddings = None
ba = 0
while ba<data.shape[0]:
bb = min(ba+batch_size, data.shape[0])
count = bb-ba
_data = nd.slice_axis(data, axis=0, begin=bb-batch_size, end=bb)
#print(_data.shape, _label.shape)
time0 = datetime.datetime.now()
if data_extra is None:
db = mx.io.DataBatch(data=(_data,), label=(_label,))
else:
db = mx.io.DataBatch(data=(_data,_data_extra), label=(_label,))
model.forward(db, is_train=False)
net_out = model.get_outputs()
_embeddings = net_out[0].asnumpy()
time_now = datetime.datetime.now()
diff = time_now - time0
time_consumed+=diff.total_seconds()
if embeddings is None:
embeddings = np.zeros( (data.shape[0], _embeddings.shape[1]) )
embeddings[ba:bb,:] = _embeddings[(batch_size-count):,:]
ba = bb
embeddings_list.append(embeddings)
embeddings = embeddings_list[0] + embeddings_list[1]
embeddings = sklearn.preprocessing.normalize(embeddings)
actual_issame = np.asarray(issame_list)
outname = os.path.join('temp.bin')
with open(outname, 'wb') as f:
pickle.dump((embeddings, issame_list), f, protocol=pickle.HIGHEST_PROTOCOL)
示例6: test
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import slice_axis [as 别名]
def test(lfw_set, mx_model, batch_size):
print('testing lfw..')
lfw_data_list = lfw_set[0]
issame_list = lfw_set[1]
model = mx_model
embeddings_list = []
for i in xrange( len(lfw_data_list) ):
lfw_data = lfw_data_list[i]
embeddings = None
ba = 0
while ba<lfw_data.shape[0]:
bb = min(ba+batch_size, lfw_data.shape[0])
_data = nd.slice_axis(lfw_data, axis=0, begin=ba, end=bb)
_label = nd.ones( (bb-ba,) )
#print(_data.shape, _label.shape)
db = mx.io.DataBatch(data=(_data,), label=(_label,))
model.forward(db, is_train=False)
net_out = model.get_outputs()
#_arg, _aux = model.get_params()
#__arg = {}
#for k,v in _arg.iteritems():
# __arg[k] = v.as_in_context(_ctx)
#_arg = __arg
#_arg["data"] = _data.as_in_context(_ctx)
#_arg["softmax_label"] = _label.as_in_context(_ctx)
#for k,v in _arg.iteritems():
# print(k,v.context)
#exe = sym.bind(_ctx, _arg ,args_grad=None, grad_req="null", aux_states=_aux)
#exe.forward(is_train=False)
#net_out = exe.outputs
_embeddings = net_out[0].asnumpy()
#print(_embeddings.shape)
if embeddings is None:
embeddings = np.zeros( (lfw_data.shape[0], _embeddings.shape[1]) )
embeddings[ba:bb,:] = _embeddings
ba = bb
embeddings_list.append(embeddings)
_xnorm = 0.0
_xnorm_cnt = 0
for embed in embeddings_list:
for i in xrange(embed.shape[0]):
_em = embed[i]
_norm=np.linalg.norm(_em)
#print(_em.shape, _norm)
_xnorm+=_norm
_xnorm_cnt+=1
_xnorm /= _xnorm_cnt
embeddings = embeddings_list[0].copy()
embeddings = sklearn.preprocessing.normalize(embeddings)
_, _, accuracy, val, val_std, far = evaluate(embeddings, issame_list, nrof_folds=10)
acc1, std1 = np.mean(accuracy), np.std(accuracy)
#print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
#embeddings = np.concatenate(embeddings_list, axis=1)
embeddings = embeddings_list[0] + embeddings_list[1]
embeddings = sklearn.preprocessing.normalize(embeddings)
print(embeddings.shape)
_, _, accuracy, val, val_std, far = evaluate(embeddings, issame_list, nrof_folds=10)
acc2, std2 = np.mean(accuracy), np.std(accuracy)
return acc1, std1, acc2, std2, _xnorm, embeddings_list
示例7: split_data
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import slice_axis [as 别名]
def split_data(data, num_slice, batch_axis=0, even_split=True, multiplier=1):
"""Splits an NDArray into `num_slice` slices along `batch_axis`.
Usually used for data parallelism where each slices is sent
to one device (i.e. GPU).
Parameters
----------
data : NDArray
A batch of data.
num_slice : int
Number of desired slices.
batch_axis : int, default 0
The axis along which to slice.
even_split : bool, default True
Whether to force all slices to have the same number of elements.
If `True`, an error will be raised when `num_slice` does not evenly
divide `data.shape[batch_axis]`.
multiplier : int, default 1
The batch size has to be the multiples of multiplier
Returns
-------
list of NDArray
Return value is a list even if `num_slice` is 1.
"""
size = data.shape[batch_axis]
if even_split and size % num_slice != 0:
raise ValueError(
"data with shape %s cannot be evenly split into %d slices along axis %d. " \
"Use a batch size that's multiple of %d or set even_split=False to allow " \
"uneven partitioning of data."%(
str(data.shape), num_slice, batch_axis, num_slice))
step = (int(size / multiplier) // num_slice) * multiplier
# If size < num_slice, make fewer slices
if not even_split and size < num_slice:
step = 1
num_slice = size
if batch_axis == 0:
slices = [data[i*step:(i+1)*step] if i < num_slice - 1 else data[i*step:size]
for i in range(num_slice)]
elif even_split:
slices = ndarray.split(data, num_outputs=num_slice, axis=batch_axis)
else:
slices = [ndarray.slice_axis(data, batch_axis, i*step, (i+1)*step)
if i < num_slice - 1 else
ndarray.slice_axis(data, batch_axis, i*step, size)
for i in range(num_slice)]
return slices
示例8: dumpR
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import slice_axis [as 别名]
def dumpR(data_set, mx_model, batch_size, name='', data_extra=None, label_shape=None):
print('dump verification embedding..')
data_list = data_set[0]
issame_list = data_set[1]
model = mx_model
embeddings_list = []
if data_extra is not None:
_data_extra = nd.array(data_extra)
time_consumed = 0.0
if label_shape is None:
_label = nd.ones((batch_size,))
else:
_label = nd.ones(label_shape)
for i in range(len(data_list)):
data = data_list[i]
embeddings = None
ba = 0
while ba < data.shape[0]:
bb = min(ba + batch_size, data.shape[0])
count = bb - ba
_data = nd.slice_axis(data, axis=0, begin=bb - batch_size, end=bb)
# print(_data.shape, _label.shape)
time0 = datetime.datetime.now()
if data_extra is None:
db = mx.io.DataBatch(data=(_data,), label=(_label,))
else:
db = mx.io.DataBatch(data=(_data, _data_extra), label=(_label,))
model.forward(db, is_train=False)
net_out = model.get_outputs()
_embeddings = net_out[0].asnumpy()
time_now = datetime.datetime.now()
diff = time_now - time0
time_consumed += diff.total_seconds()
if embeddings is None:
embeddings = np.zeros((data.shape[0], _embeddings.shape[1]))
embeddings[ba:bb, :] = _embeddings[(batch_size - count):, :]
ba = bb
embeddings_list.append(embeddings)
embeddings = embeddings_list[0] + embeddings_list[1]
embeddings = sklearn.preprocessing.normalize(embeddings)
actual_issame = np.asarray(issame_list)
outname = os.path.join('temp.bin')
with open(outname, 'wb') as f:
pickle.dump((embeddings, issame_list), f, protocol=pickle.HIGHEST_PROTOCOL)