當前位置: 首頁>>代碼示例>>Python>>正文


Python ndarray.exp方法代碼示例

本文整理匯總了Python中mxnet.ndarray.exp方法的典型用法代碼示例。如果您正苦於以下問題:Python ndarray.exp方法的具體用法?Python ndarray.exp怎麽用?Python ndarray.exp使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在mxnet.ndarray的用法示例。


在下文中一共展示了ndarray.exp方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: synthetic_grad

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import exp [as 別名]
def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None):
    if grad is None:
        grad = nd.empty(theta.shape, theta.context)
    theta1 = theta.asnumpy()[0]
    theta2 = theta.asnumpy()[1]
    v1 = sigma1 ** 2
    v2 = sigma2 ** 2
    vx = sigmax ** 2
    denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp(
        -(X - theta1 - theta2) ** 2 / (2 * vx))
    grad_npy = numpy.zeros(theta.shape)
    grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx
                                    + numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
                                    X - theta1 - theta2) / vx) / denominator).sum() \
                  + theta1 / v1
    grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
    X - theta1 - theta2) / vx) / denominator).sum() \
                  + theta2 / v2
    grad[:] = grad_npy
    return grad 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:22,代碼來源:bdk_demo.py

示例2: forward

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import exp [as 別名]
def forward(self, in_data, out_data):
        x = in_data[0]
        y = out_data[0]
        y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1))).astype('float32')
        y /= y.sum(axis=1).reshape((x.shape[0], 1)) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:7,代碼來源:bdk_demo.py

示例3: backward

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import exp [as 別名]
def backward(self, out_grad, in_data, out_data, in_grad):
        l = in_data[1]
        y = out_data[0]
        dx = in_grad[0]
        dx[:] = (numpy.exp(y) - l).astype('float32') 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:7,代碼來源:bdk_demo.py

示例4: regression_student_grad

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import exp [as 別名]
def regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision):
    student_mean = student_outputs[0]
    student_var = student_outputs[1]
    grad_mean = nd.exp(-student_var) * (student_mean - teacher_pred)

    grad_var = (1 - nd.exp(-student_var) * (nd.square(student_mean - teacher_pred)
                                            + 1.0 / teacher_noise_precision)) / 2
    return [grad_mean, grad_var] 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:10,代碼來源:bdk_demo.py

示例5: log_sum_exp

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import exp [as 別名]
def log_sum_exp(vec):
    max_score = nd.max(vec).asscalar()
    return nd.log(nd.sum(nd.exp(vec - max_score))) + max_score

# Model 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:7,代碼來源:lstm_crf.py

示例6: _forward_alg

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import exp [as 別名]
def _forward_alg(self, feats):
        # Do the forward algorithm to compute the partition function
        alphas = [[-10000.] * self.tagset_size]
        alphas[0][self.tag2idx[START_TAG]] = 0.
        alphas = nd.array(alphas)

        # Iterate through the sentence
        for feat in feats:
            alphas_t = []  # The forward variables at this timestep
            for next_tag in range(self.tagset_size):
                # broadcast the emission score: it is the same regardless of
                # the previous tag
                emit_score = feat[next_tag].reshape((1, -1))
                # the ith entry of trans_score is the score of transitioning to
                # next_tag from i
                trans_score = self.transitions.data()[next_tag].reshape((1, -1))
                # The ith entry of next_tag_var is the value for the
                # edge (i -> next_tag) before we do log-sum-exp
                next_tag_var = alphas + trans_score + emit_score
                # The forward variable for this tag is log-sum-exp of all the
                # scores.
                alphas_t.append(log_sum_exp(next_tag_var))
            alphas = nd.concat(*alphas_t, dim=0).reshape((1, -1))
        terminal_var = alphas + self.transitions.data()[self.tag2idx[STOP_TAG]]
        alpha = log_sum_exp(terminal_var)
        return alpha 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:28,代碼來源:lstm_crf.py

示例7: test_unary_func

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import exp [as 別名]
def test_unary_func():
    def check_unary_func(x):
        f_exp         = lambda x: nd.exp(x)
        f_exp_grad    = lambda x: [nd.exp(x)]
        autograd_assert(x, func=f_exp, grad_func=f_exp_grad)
        f_half        = lambda x: x/2
        f_half_grad   = lambda x: [nd.ones(x.shape) * 0.5]
        autograd_assert(x, func=f_half, grad_func=f_half_grad)
        f_square      = lambda x: x**2
        f_square_grad = lambda x: [2*x]
        autograd_assert(x, func=f_square, grad_func=f_square_grad)
    uniform = nd.uniform(shape=(4, 5))
    stypes = ['default', 'row_sparse', 'csr']
    for stype in stypes:
        check_unary_func(uniform.tostype(stype)) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:17,代碼來源:test_autograd.py

示例8: test_gradient

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import exp [as 別名]
def test_gradient():
    x = mx.nd.ones((1,))
    x.attach_grad()

    with mx.autograd.record():
        z = mx.nd.elemwise_add(mx.nd.exp(x), x)
    dx, = mx.autograd.grad(z, [x], create_graph=True)
    assert abs(dx.asscalar() - 3.71828175) < 1e-7
    dx.backward()
    assert abs(x.grad.asscalar() - 2.71828175) < 1e-7 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:12,代碼來源:test_autograd.py

示例9: test_unary_func

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import exp [as 別名]
def test_unary_func():
    x = nd.uniform(shape=(4, 5))
    f_exp         = lambda x: nd.exp(x)
    f_exp_grad    = lambda x: [nd.exp(x)]
    autograd_assert(x, func=f_exp, grad_func=f_exp_grad)
    f_half        = lambda x: x/2
    f_half_grad   = lambda x: [nd.ones(x.shape) * 0.5]
    autograd_assert(x, func=f_half, grad_func=f_half_grad)
    f_square      = lambda x: x**2
    f_square_grad = lambda x: [2*x]
    autograd_assert(x, func=f_square, grad_func=f_square_grad) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:13,代碼來源:test_contrib_autograd.py

示例10: exp

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import exp [as 別名]
def exp(input):
    return nd.exp(input) 
開發者ID:dmlc,項目名稱:dgl,代碼行數:4,代碼來源:tensor.py

示例11: logsigmoid

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import exp [as 別名]
def logsigmoid(val):
    max_elem = nd.maximum(0., -val)
    z = nd.exp(-max_elem) + nd.exp(-val - max_elem)
    return -(max_elem + nd.log(z)) 
開發者ID:dmlc,項目名稱:dgl,代碼行數:6,代碼來源:tensor_models.py

示例12: softplus

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import exp [as 別名]
def softplus(x):
    return nd.log(1. + nd.exp(x)) 
開發者ID:amzn,項目名稱:xfer,代碼行數:4,代碼來源:utils.py

示例13: softplus_inv

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import exp [as 別名]
def softplus_inv(x):
    return nd.log(nd.exp(x) - 1.) 
開發者ID:amzn,項目名稱:xfer,代碼行數:4,代碼來源:utils.py

示例14: softplus_inv_numpy

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import exp [as 別名]
def softplus_inv_numpy(x):
    return np.log(np.exp(x) - 1.) 
開發者ID:amzn,項目名稱:xfer,代碼行數:4,代碼來源:utils.py

示例15: log_sum_exp

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import exp [as 別名]
def log_sum_exp(vec):
    max_score = nd.max(vec).asscalar()
    return nd.log(nd.sum(nd.exp(vec - max_score))) + max_score 
開發者ID:fierceX,項目名稱:NER_BiLSTM_CRF_Chinese,代碼行數:5,代碼來源:model.py


注:本文中的mxnet.ndarray.exp方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。