本文整理汇总了Python中mxnet.ndarray.zeros_like方法的典型用法代码示例。如果您正苦于以下问题:Python ndarray.zeros_like方法的具体用法?Python ndarray.zeros_like怎么用?Python ndarray.zeros_like使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.ndarray
的用法示例。
在下文中一共展示了ndarray.zeros_like方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: backward
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import zeros_like [as 别名]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
if ReluOp.guided_backprop:
# Get output and gradients of output
y = out_data[0]
dy = out_grad[0]
# Zero out the negatives in the gradients of the output
dy_positives = nd.maximum(dy, nd.zeros_like(dy))
# What output values were greater than 0?
y_ones = y.__gt__(0)
# Mask out the values for which at least one of dy or y is negative
dx = dy_positives * y_ones
self.assign(in_grad[0], req[0], dx)
else:
# Regular backward for ReLU
x = in_data[0]
x_gt_zero = x.__gt__(0)
dx = out_grad[0] * x_gt_zero
self.assign(in_grad[0], req[0], dx)
示例2: test_out_grads
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import zeros_like [as 别名]
def test_out_grads():
x = nd.ones((3, 5))
dx = nd.zeros_like(x)
mark_variables([x], [dx])
da = None
db = nd.array([1,2,3,4,5])
dc = nd.array([5,4,3,2,1])
with record():
a, b, c = nd.split(x, axis=0, num_outputs=3, squeeze_axis=True)
backward([a, b, c], [da, db, dc])
assert (dx.asnumpy() == np.array(
[[1,1,1,1,1],
[1,2,3,4,5],
[5,4,3,2,1]])).all()
示例3: test_out_grads
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import zeros_like [as 别名]
def test_out_grads():
x = nd.ones((3, 5))
dx = nd.zeros_like(x)
mark_variables([x], [dx])
da = None
db = nd.array([1,2,3,4,5])
dc = nd.array([5,4,3,2,1])
with train_section():
a, b, c = nd.split(x, axis=0, num_outputs=3, squeeze_axis=True)
backward([a, b, c], [da, db, dc])
assert (dx.asnumpy() == np.array(
[[1,1,1,1,1],
[1,2,3,4,5],
[5,4,3,2,1]])).all()
示例4: forward
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import zeros_like [as 别名]
def forward(self, is_train, req, in_data, out_data, aux):
x = in_data[0]
y = nd.maximum(x, nd.zeros_like(x))
self.assign(out_data[0], req[0], y)
示例5: grad_and_loss
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import zeros_like [as 别名]
def grad_and_loss(func, argnum=None):
"""Return function that computes both gradient of arguments and loss value.
Parameters
----------
func: a python function
The forward (loss) function.
argnum: an int or a list of int
The index of argument to calculate gradient for.
Returns
-------
grad_and_loss_func: a python function
A function that would compute both the gradient of arguments and loss value.
"""
@functools.wraps(func)
def wrapped(*args):
"""Wrapped function."""
variables = args
if argnum is not None:
argnum_ = argnum if isinstance(argnum, list) else [argnum]
variables = [args[i] for i in argnum_]
for x in variables:
assert isinstance(x, NDArray), "type of autograd input should NDArray."
grads = [zeros_like(x) for x in variables]
mark_variables(variables, grads)
with record():
outputs = func(*args)
backward([outputs] if isinstance(outputs, NDArray) else outputs)
return grads, outputs
return wrapped
示例6: test_detach_updated_grad
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import zeros_like [as 别名]
def test_detach_updated_grad():
x = nd.ones((2, 2))
dx = nd.zeros_like(x)
y = nd.ones_like(x)
dy = nd.zeros_like(x)
mark_variables([x, y], [dx, dy])
assert x._fresh_grad == False
assert y._fresh_grad == False
with record():
x2 = x + 2
y2 = x2 + y
y2.backward()
assert (dx.asnumpy() == 1).all()
assert x._fresh_grad == True
assert y._fresh_grad == True
dx[:] = 0
x._fresh_grad = False
y._fresh_grad = False
assert x._fresh_grad == False
assert y._fresh_grad == False
with record():
x2 = x + 2
x2 = x2.detach()
y2 = x2 + y
y2.backward()
assert (dx.asnumpy() == 0).all()
assert y._fresh_grad == True
assert x._fresh_grad == False
示例7: test_detach_updated_grad
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import zeros_like [as 别名]
def test_detach_updated_grad():
x = nd.ones((2, 2))
dx = nd.zeros_like(x)
y = nd.ones_like(x)
dy = nd.zeros_like(x)
mark_variables([x, y], [dx, dy])
assert x._fresh_grad == False
assert y._fresh_grad == False
with train_section():
x2 = x + 2
y2 = x2 + y
y2.backward()
assert (dx.asnumpy() == 1).all()
assert x._fresh_grad == True
assert y._fresh_grad == True
dx[:] = 0
x._fresh_grad = False
y._fresh_grad = False
assert x._fresh_grad == False
assert y._fresh_grad == False
with train_section():
x2 = x + 2
x2 = x2.detach()
y2 = x2 + y
y2.backward()
assert (dx.asnumpy() == 0).all()
assert y._fresh_grad == True
assert x._fresh_grad == False
示例8: kv_push
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import zeros_like [as 别名]
def kv_push(self, key, value):
#if value.context!=mx.cpu():
# value = value.as_in_context(mx.cpu())
if not key in self._kvinit:
self._distkv.init(key, nd.zeros_like(value))
self._kvinit[key] = 1
self._distkv.push(key, value)
#get fc1 and partial fc7
示例9: zeros_like
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import zeros_like [as 别名]
def zeros_like(input):
return nd.zeros_like(input)