本文整理汇总了Python中mxnet.ndarray.exp方法的典型用法代码示例。如果您正苦于以下问题:Python ndarray.exp方法的具体用法?Python ndarray.exp怎么用?Python ndarray.exp使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.ndarray
的用法示例。
在下文中一共展示了ndarray.exp方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: synthetic_grad
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None):
if grad is None:
grad = nd.empty(theta.shape, theta.context)
theta1 = theta.asnumpy()[0]
theta2 = theta.asnumpy()[1]
v1 = sigma1 ** 2
v2 = sigma2 ** 2
vx = sigmax ** 2
denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp(
-(X - theta1 - theta2) ** 2 / (2 * vx))
grad_npy = numpy.zeros(theta.shape)
grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx
+ numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
X - theta1 - theta2) / vx) / denominator).sum() \
+ theta1 / v1
grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
X - theta1 - theta2) / vx) / denominator).sum() \
+ theta2 / v2
grad[:] = grad_npy
return grad
示例2: forward
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def forward(self, in_data, out_data):
x = in_data[0]
y = out_data[0]
y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1))).astype('float32')
y /= y.sum(axis=1).reshape((x.shape[0], 1))
示例3: backward
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def backward(self, out_grad, in_data, out_data, in_grad):
l = in_data[1]
y = out_data[0]
dx = in_grad[0]
dx[:] = (numpy.exp(y) - l).astype('float32')
示例4: regression_student_grad
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision):
student_mean = student_outputs[0]
student_var = student_outputs[1]
grad_mean = nd.exp(-student_var) * (student_mean - teacher_pred)
grad_var = (1 - nd.exp(-student_var) * (nd.square(student_mean - teacher_pred)
+ 1.0 / teacher_noise_precision)) / 2
return [grad_mean, grad_var]
示例5: log_sum_exp
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def log_sum_exp(vec):
max_score = nd.max(vec).asscalar()
return nd.log(nd.sum(nd.exp(vec - max_score))) + max_score
# Model
示例6: _forward_alg
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def _forward_alg(self, feats):
# Do the forward algorithm to compute the partition function
alphas = [[-10000.] * self.tagset_size]
alphas[0][self.tag2idx[START_TAG]] = 0.
alphas = nd.array(alphas)
# Iterate through the sentence
for feat in feats:
alphas_t = [] # The forward variables at this timestep
for next_tag in range(self.tagset_size):
# broadcast the emission score: it is the same regardless of
# the previous tag
emit_score = feat[next_tag].reshape((1, -1))
# the ith entry of trans_score is the score of transitioning to
# next_tag from i
trans_score = self.transitions.data()[next_tag].reshape((1, -1))
# The ith entry of next_tag_var is the value for the
# edge (i -> next_tag) before we do log-sum-exp
next_tag_var = alphas + trans_score + emit_score
# The forward variable for this tag is log-sum-exp of all the
# scores.
alphas_t.append(log_sum_exp(next_tag_var))
alphas = nd.concat(*alphas_t, dim=0).reshape((1, -1))
terminal_var = alphas + self.transitions.data()[self.tag2idx[STOP_TAG]]
alpha = log_sum_exp(terminal_var)
return alpha
示例7: test_unary_func
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def test_unary_func():
def check_unary_func(x):
f_exp = lambda x: nd.exp(x)
f_exp_grad = lambda x: [nd.exp(x)]
autograd_assert(x, func=f_exp, grad_func=f_exp_grad)
f_half = lambda x: x/2
f_half_grad = lambda x: [nd.ones(x.shape) * 0.5]
autograd_assert(x, func=f_half, grad_func=f_half_grad)
f_square = lambda x: x**2
f_square_grad = lambda x: [2*x]
autograd_assert(x, func=f_square, grad_func=f_square_grad)
uniform = nd.uniform(shape=(4, 5))
stypes = ['default', 'row_sparse', 'csr']
for stype in stypes:
check_unary_func(uniform.tostype(stype))
示例8: test_gradient
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def test_gradient():
x = mx.nd.ones((1,))
x.attach_grad()
with mx.autograd.record():
z = mx.nd.elemwise_add(mx.nd.exp(x), x)
dx, = mx.autograd.grad(z, [x], create_graph=True)
assert abs(dx.asscalar() - 3.71828175) < 1e-7
dx.backward()
assert abs(x.grad.asscalar() - 2.71828175) < 1e-7
示例9: test_unary_func
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def test_unary_func():
x = nd.uniform(shape=(4, 5))
f_exp = lambda x: nd.exp(x)
f_exp_grad = lambda x: [nd.exp(x)]
autograd_assert(x, func=f_exp, grad_func=f_exp_grad)
f_half = lambda x: x/2
f_half_grad = lambda x: [nd.ones(x.shape) * 0.5]
autograd_assert(x, func=f_half, grad_func=f_half_grad)
f_square = lambda x: x**2
f_square_grad = lambda x: [2*x]
autograd_assert(x, func=f_square, grad_func=f_square_grad)
示例10: exp
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def exp(input):
return nd.exp(input)
示例11: logsigmoid
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def logsigmoid(val):
max_elem = nd.maximum(0., -val)
z = nd.exp(-max_elem) + nd.exp(-val - max_elem)
return -(max_elem + nd.log(z))
示例12: softplus
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def softplus(x):
return nd.log(1. + nd.exp(x))
示例13: softplus_inv
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def softplus_inv(x):
return nd.log(nd.exp(x) - 1.)
示例14: softplus_inv_numpy
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def softplus_inv_numpy(x):
return np.log(np.exp(x) - 1.)
示例15: log_sum_exp
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def log_sum_exp(vec):
max_score = nd.max(vec).asscalar()
return nd.log(nd.sum(nd.exp(vec - max_score))) + max_score