本文整理汇总了Python中mxnet.ndarray.ones函数的典型用法代码示例。如果您正苦于以下问题:Python ones函数的具体用法?Python ones怎么用?Python ones使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ones函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: check_binary_func
def check_binary_func(x, y):
f_add = lambda x, y: x+y
f_add_grad = lambda x, y: [nd.ones(x.shape), nd.ones(y.shape)]
autograd_assert(x, y, func=f_add, grad_func=f_add_grad)
f_mul = lambda x, y: x*y
f_mul_grad = lambda x, y: [y, x]
autograd_assert(x, y, func=f_mul, grad_func=f_mul_grad)
f_compose = lambda x, y: x+x*y
f_compose_grad = lambda x, y: [nd.ones(x.shape) + y, x]
autograd_assert(x, y, func=f_compose, grad_func=f_compose_grad)
示例2: test_binary_func
def test_binary_func():
x = nd.uniform(shape=(4, 5))
y = nd.uniform(shape=(4, 5))
f_add = lambda x, y: x+y
f_add_grad = lambda x, y: [nd.ones(x.shape), nd.ones(y.shape)]
autograd_assert(x, y, func=f_add, grad_func=f_add_grad)
f_mul = lambda x, y: x*y
f_mul_grad = lambda x, y: [y, x]
autograd_assert(x, y, func=f_mul, grad_func=f_mul_grad)
f_compose = lambda x, y: x+x*y
f_compose_grad = lambda x, y: [nd.ones(x.shape) + y, x]
autograd_assert(x, y, func=f_compose, grad_func=f_compose_grad)
示例3: test_argnum
def test_argnum():
def f_with_mode(a, b, mode):
if mode:
return a+b
else:
return a*b
a = nd.uniform(shape=(3, 2))
b = nd.uniform(shape=(3, 2))
f_add_grad = lambda x, y, mode: [nd.ones(x.shape), nd.ones(y.shape)]
f_mul_grad = lambda x, y, mode: [y, x]
autograd_assert(a, b, True,
argnum=[0, 1], func=f_with_mode, grad_func=f_add_grad)
autograd_assert(a, b, False,
argnum=[0, 1], func=f_with_mode, grad_func=f_mul_grad)
示例4: test_detach_updated_grad
def test_detach_updated_grad():
x = nd.ones((2, 2))
dx = nd.zeros_like(x)
y = nd.ones_like(x)
dy = nd.zeros_like(x)
mark_variables([x, y], [dx, dy])
assert x._fresh_grad == False
assert y._fresh_grad == False
with train_section():
x2 = x + 2
y2 = x2 + y
y2.backward()
assert (dx.asnumpy() == 1).all()
assert x._fresh_grad == True
assert y._fresh_grad == True
dx[:] = 0
x._fresh_grad = False
y._fresh_grad = False
assert x._fresh_grad == False
assert y._fresh_grad == False
with train_section():
x2 = x + 2
x2 = x2.detach()
y2 = x2 + y
y2.backward()
assert (dx.asnumpy() == 0).all()
assert y._fresh_grad == True
assert x._fresh_grad == False
示例5: gan_loss
def gan_loss(input,target_is_real):
if target_is_real:
target = nd.ones(input.shape,ctx=input.context)
else:
target = nd.zeros(input.shape, ctx=input.context)
#mse loss for lsgan
e = ((input - target) ** 2).mean(axis=0, exclude=True)
return e
示例6: bilinear
def bilinear(x, W, y, input_size, seq_len, batch_size, num_outputs=1, bias_x=False, bias_y=False):
"""Do xWy
Parameters
----------
x : NDArray
(input_size x seq_len) x batch_size
W : NDArray
(num_outputs x ny) x nx
y : NDArray
(input_size x seq_len) x batch_size
input_size : int
input dimension
seq_len : int
sequence length
batch_size : int
batch size
num_outputs : int
number of outputs
bias_x : bool
whether concat bias vector to input x
bias_y : bool
whether concat bias vector to input y
Returns
-------
output : NDArray
[seq_len_y x seq_len_x if output_size == 1 else seq_len_y x num_outputs x seq_len_x] x batch_size
"""
if bias_x:
x = nd.concat(x, nd.ones((1, seq_len, batch_size)), dim=0)
if bias_y:
y = nd.concat(y, nd.ones((1, seq_len, batch_size)), dim=0)
nx, ny = input_size + bias_x, input_size + bias_y
# W: (num_outputs x ny) x nx
lin = nd.dot(W, x)
if num_outputs > 1:
lin = reshape_fortran(lin, (ny, num_outputs * seq_len, batch_size))
y = y.transpose([2, 1, 0]) # May cause performance issues
lin = lin.transpose([2, 1, 0])
blin = nd.batch_dot(lin, y, transpose_b=True)
blin = blin.transpose([2, 1, 0])
if num_outputs > 1:
blin = reshape_fortran(blin, (seq_len, num_outputs, seq_len, batch_size))
return blin
示例7: test_training
def test_training():
x = nd.ones((10, 10))
with train_section():
y = nd.Dropout(x, p=0.5)
assert not (y.asnumpy() == x.asnumpy()).all()
with test_section():
y = nd.Dropout(x, p=0.5)
assert (y.asnumpy() == x.asnumpy()).all()
示例8: test_training
def test_training():
x = nd.ones((10, 10))
with record():
y = nd.Dropout(x, p=0.5)
assert not (y.asnumpy() == x.asnumpy()).all()
with pause():
y = nd.Dropout(x, p=0.5)
assert (y.asnumpy() == x.asnumpy()).all()
示例9: forward
def forward(self, x):
if isinstance(x, np.ndarray):
x = nd.array(x)
if self._max_len > x.size:
pad = nd.ones((self._max_len - x.size,)) * self._fill_value
x = nd.concat(x, pad, dim=0)
elif self._max_len < x.size:
x = x[:self._max_len]
return x
示例10: dumpR
def dumpR(data_set, mx_model, batch_size, name='', data_extra = None, label_shape = None):
print('dump verification embedding..')
data_list = data_set[0]
issame_list = data_set[1]
model = mx_model
embeddings_list = []
if data_extra is not None:
_data_extra = nd.array(data_extra)
time_consumed = 0.0
if label_shape is None:
_label = nd.ones( (batch_size,) )
else:
_label = nd.ones( label_shape )
for i in xrange( len(data_list) ):
data = data_list[i]
embeddings = None
ba = 0
while ba<data.shape[0]:
bb = min(ba+batch_size, data.shape[0])
count = bb-ba
_data = nd.slice_axis(data, axis=0, begin=bb-batch_size, end=bb)
#print(_data.shape, _label.shape)
time0 = datetime.datetime.now()
if data_extra is None:
db = mx.io.DataBatch(data=(_data,), label=(_label,))
else:
db = mx.io.DataBatch(data=(_data,_data_extra), label=(_label,))
model.forward(db, is_train=False)
net_out = model.get_outputs()
_embeddings = net_out[0].asnumpy()
time_now = datetime.datetime.now()
diff = time_now - time0
time_consumed+=diff.total_seconds()
if embeddings is None:
embeddings = np.zeros( (data.shape[0], _embeddings.shape[1]) )
embeddings[ba:bb,:] = _embeddings[(batch_size-count):,:]
ba = bb
embeddings_list.append(embeddings)
embeddings = embeddings_list[0] + embeddings_list[1]
embeddings = sklearn.preprocessing.normalize(embeddings)
actual_issame = np.asarray(issame_list)
outname = os.path.join('temp.bin')
with open(outname, 'wb') as f:
pickle.dump((embeddings, issame_list), f, protocol=pickle.HIGHEST_PROTOCOL)
示例11: check_unary_func
def check_unary_func(x):
f_exp = lambda x: nd.exp(x)
f_exp_grad = lambda x: [nd.exp(x)]
autograd_assert(x, func=f_exp, grad_func=f_exp_grad)
f_half = lambda x: x/2
f_half_grad = lambda x: [nd.ones(x.shape) * 0.5]
autograd_assert(x, func=f_half, grad_func=f_half_grad)
f_square = lambda x: x**2
f_square_grad = lambda x: [2*x]
autograd_assert(x, func=f_square, grad_func=f_square_grad)
示例12: test_unary_func
def test_unary_func():
x = nd.uniform(shape=(4, 5))
f_exp = lambda x: nd.exp(x)
f_exp_grad = lambda x: [nd.exp(x)]
autograd_assert(x, func=f_exp, grad_func=f_exp_grad)
f_half = lambda x: x/2
f_half_grad = lambda x: [nd.ones(x.shape) * 0.5]
autograd_assert(x, func=f_half, grad_func=f_half_grad)
f_square = lambda x: x**2
f_square_grad = lambda x: [2*x]
autograd_assert(x, func=f_square, grad_func=f_square_grad)
示例13: test_module_input_grads
def test_module_input_grads():
a = mx.sym.Variable('a', __layout__='NC')
b = mx.sym.Variable('b', __layout__='NC')
c = mx.sym.Variable('c', __layout__='NC')
c = a + 2 * b + 3 * c
net = mx.mod.Module(c, data_names=['b', 'c', 'a'], label_names=None,
context=[mx.cpu(0), mx.cpu(1)])
net.bind(data_shapes=[['b', (5, 5)], ['c', (5, 5)], ['a', (5, 5)]],
label_shapes=None, inputs_need_grad=True)
net.init_params()
net.forward(data_batch=mx.io.DataBatch(data=[nd.ones((5, 5)),
nd.ones((5, 5)),
nd.ones((5, 5))]))
net.backward(out_grads=[nd.ones((5, 5))])
input_grads = net.get_input_grads()
b_grad = input_grads[0].asnumpy()
c_grad = input_grads[1].asnumpy()
a_grad = input_grads[2].asnumpy()
assert np.all(a_grad == 1), a_grad
assert np.all(b_grad == 2), b_grad
assert np.all(c_grad == 3), c_grad
示例14: test_out_grads
def test_out_grads():
x = nd.ones((3, 5))
dx = nd.zeros_like(x)
mark_variables([x], [dx])
da = None
db = nd.array([1,2,3,4,5])
dc = nd.array([5,4,3,2,1])
with train_section():
a, b, c = nd.split(x, axis=0, num_outputs=3, squeeze_axis=True)
backward([a, b, c], [da, db, dc])
assert (dx.asnumpy() == np.array(
[[1,1,1,1,1],
[1,2,3,4,5],
[5,4,3,2,1]])).all()
示例15: lfw_test
def lfw_test(nbatch):
print('testing lfw..')
embeddings_list = []
for i in xrange( len(lfw_data_list) ):
lfw_data = lfw_data_list[i]
embeddings = None
ba = 0
while ba<lfw_data.shape[0]:
bb = min(ba+args.batch_size, lfw_data.shape[0])
_data = nd.slice_axis(lfw_data, axis=0, begin=ba, end=bb)
_label = nd.ones( (bb-ba,) )
db = mx.io.DataBatch(data=(_data,), label=(_label,))
model.forward(db, is_train=False)
net_out = model.get_outputs()
_embeddings = net_out[0].asnumpy()
if embeddings is None:
embeddings = np.zeros( (lfw_data.shape[0], _embeddings.shape[1]) )
embeddings[ba:bb,:] = _embeddings
ba = bb
embeddings_list.append(embeddings)
acc_list = []
embeddings = embeddings_list[0]
_, _, accuracy, val, val_std, far = lfw.evaluate(embeddings, issame_list, nrof_folds=10)
acc_list.append(np.mean(accuracy))
print('[%d]Accuracy: %1.3f+-%1.3f' % (nbatch, np.mean(accuracy), np.std(accuracy)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
embeddings = np.concatenate(embeddings_list, axis=1)
embeddings = sklearn.preprocessing.normalize(embeddings)
print(embeddings.shape)
_, _, accuracy, val, val_std, far = lfw.evaluate(embeddings, issame_list, nrof_folds=10)
acc_list.append(np.mean(accuracy))
print('[%d]Accuracy-Flip: %1.3f+-%1.3f' % (nbatch, np.mean(accuracy), np.std(accuracy)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
pca = PCA(n_components=128)
embeddings = pca.fit_transform(embeddings)
embeddings = sklearn.preprocessing.normalize(embeddings)
print(embeddings.shape)
_, _, accuracy, val, val_std, far = lfw.evaluate(embeddings, issame_list, nrof_folds=10)
acc_list.append(np.mean(accuracy))
print('[%d]Accuracy-PCA: %1.3f+-%1.3f' % (nbatch, np.mean(accuracy), np.std(accuracy)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
return max(*acc_list)