当前位置: 首页>>代码示例>>Python>>正文


Python ndarray.exp方法代码示例

本文整理汇总了Python中mxnet.ndarray.exp方法的典型用法代码示例。如果您正苦于以下问题:Python ndarray.exp方法的具体用法?Python ndarray.exp怎么用?Python ndarray.exp使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在mxnet.ndarray的用法示例。


在下文中一共展示了ndarray.exp方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: synthetic_grad

# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None):
    if grad is None:
        grad = nd.empty(theta.shape, theta.context)
    theta1 = theta.asnumpy()[0]
    theta2 = theta.asnumpy()[1]
    v1 = sigma1 ** 2
    v2 = sigma2 ** 2
    vx = sigmax ** 2
    denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp(
        -(X - theta1 - theta2) ** 2 / (2 * vx))
    grad_npy = numpy.zeros(theta.shape)
    grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx
                                    + numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
                                    X - theta1 - theta2) / vx) / denominator).sum() \
                  + theta1 / v1
    grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
    X - theta1 - theta2) / vx) / denominator).sum() \
                  + theta2 / v2
    grad[:] = grad_npy
    return grad 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:22,代码来源:bdk_demo.py

示例2: forward

# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def forward(self, in_data, out_data):
        x = in_data[0]
        y = out_data[0]
        y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1))).astype('float32')
        y /= y.sum(axis=1).reshape((x.shape[0], 1)) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:7,代码来源:bdk_demo.py

示例3: backward

# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def backward(self, out_grad, in_data, out_data, in_grad):
        l = in_data[1]
        y = out_data[0]
        dx = in_grad[0]
        dx[:] = (numpy.exp(y) - l).astype('float32') 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:7,代码来源:bdk_demo.py

示例4: regression_student_grad

# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision):
    student_mean = student_outputs[0]
    student_var = student_outputs[1]
    grad_mean = nd.exp(-student_var) * (student_mean - teacher_pred)

    grad_var = (1 - nd.exp(-student_var) * (nd.square(student_mean - teacher_pred)
                                            + 1.0 / teacher_noise_precision)) / 2
    return [grad_mean, grad_var] 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:10,代码来源:bdk_demo.py

示例5: log_sum_exp

# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def log_sum_exp(vec):
    max_score = nd.max(vec).asscalar()
    return nd.log(nd.sum(nd.exp(vec - max_score))) + max_score

# Model 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:7,代码来源:lstm_crf.py

示例6: _forward_alg

# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def _forward_alg(self, feats):
        # Do the forward algorithm to compute the partition function
        alphas = [[-10000.] * self.tagset_size]
        alphas[0][self.tag2idx[START_TAG]] = 0.
        alphas = nd.array(alphas)

        # Iterate through the sentence
        for feat in feats:
            alphas_t = []  # The forward variables at this timestep
            for next_tag in range(self.tagset_size):
                # broadcast the emission score: it is the same regardless of
                # the previous tag
                emit_score = feat[next_tag].reshape((1, -1))
                # the ith entry of trans_score is the score of transitioning to
                # next_tag from i
                trans_score = self.transitions.data()[next_tag].reshape((1, -1))
                # The ith entry of next_tag_var is the value for the
                # edge (i -> next_tag) before we do log-sum-exp
                next_tag_var = alphas + trans_score + emit_score
                # The forward variable for this tag is log-sum-exp of all the
                # scores.
                alphas_t.append(log_sum_exp(next_tag_var))
            alphas = nd.concat(*alphas_t, dim=0).reshape((1, -1))
        terminal_var = alphas + self.transitions.data()[self.tag2idx[STOP_TAG]]
        alpha = log_sum_exp(terminal_var)
        return alpha 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:28,代码来源:lstm_crf.py

示例7: test_unary_func

# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def test_unary_func():
    def check_unary_func(x):
        f_exp         = lambda x: nd.exp(x)
        f_exp_grad    = lambda x: [nd.exp(x)]
        autograd_assert(x, func=f_exp, grad_func=f_exp_grad)
        f_half        = lambda x: x/2
        f_half_grad   = lambda x: [nd.ones(x.shape) * 0.5]
        autograd_assert(x, func=f_half, grad_func=f_half_grad)
        f_square      = lambda x: x**2
        f_square_grad = lambda x: [2*x]
        autograd_assert(x, func=f_square, grad_func=f_square_grad)
    uniform = nd.uniform(shape=(4, 5))
    stypes = ['default', 'row_sparse', 'csr']
    for stype in stypes:
        check_unary_func(uniform.tostype(stype)) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:17,代码来源:test_autograd.py

示例8: test_gradient

# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def test_gradient():
    x = mx.nd.ones((1,))
    x.attach_grad()

    with mx.autograd.record():
        z = mx.nd.elemwise_add(mx.nd.exp(x), x)
    dx, = mx.autograd.grad(z, [x], create_graph=True)
    assert abs(dx.asscalar() - 3.71828175) < 1e-7
    dx.backward()
    assert abs(x.grad.asscalar() - 2.71828175) < 1e-7 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:12,代码来源:test_autograd.py

示例9: test_unary_func

# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def test_unary_func():
    x = nd.uniform(shape=(4, 5))
    f_exp         = lambda x: nd.exp(x)
    f_exp_grad    = lambda x: [nd.exp(x)]
    autograd_assert(x, func=f_exp, grad_func=f_exp_grad)
    f_half        = lambda x: x/2
    f_half_grad   = lambda x: [nd.ones(x.shape) * 0.5]
    autograd_assert(x, func=f_half, grad_func=f_half_grad)
    f_square      = lambda x: x**2
    f_square_grad = lambda x: [2*x]
    autograd_assert(x, func=f_square, grad_func=f_square_grad) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:13,代码来源:test_contrib_autograd.py

示例10: exp

# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def exp(input):
    return nd.exp(input) 
开发者ID:dmlc,项目名称:dgl,代码行数:4,代码来源:tensor.py

示例11: logsigmoid

# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def logsigmoid(val):
    max_elem = nd.maximum(0., -val)
    z = nd.exp(-max_elem) + nd.exp(-val - max_elem)
    return -(max_elem + nd.log(z)) 
开发者ID:dmlc,项目名称:dgl,代码行数:6,代码来源:tensor_models.py

示例12: softplus

# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def softplus(x):
    return nd.log(1. + nd.exp(x)) 
开发者ID:amzn,项目名称:xfer,代码行数:4,代码来源:utils.py

示例13: softplus_inv

# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def softplus_inv(x):
    return nd.log(nd.exp(x) - 1.) 
开发者ID:amzn,项目名称:xfer,代码行数:4,代码来源:utils.py

示例14: softplus_inv_numpy

# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def softplus_inv_numpy(x):
    return np.log(np.exp(x) - 1.) 
开发者ID:amzn,项目名称:xfer,代码行数:4,代码来源:utils.py

示例15: log_sum_exp

# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import exp [as 别名]
def log_sum_exp(vec):
    max_score = nd.max(vec).asscalar()
    return nd.log(nd.sum(nd.exp(vec - max_score))) + max_score 
开发者ID:fierceX,项目名称:NER_BiLSTM_CRF_Chinese,代码行数:5,代码来源:model.py


注:本文中的mxnet.ndarray.exp方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。