当前位置: 首页>>代码示例>>Python>>正文


Python optimize.check_grad方法代码示例

本文整理汇总了Python中scipy.optimize.check_grad方法的典型用法代码示例。如果您正苦于以下问题:Python optimize.check_grad方法的具体用法?Python optimize.check_grad怎么用?Python optimize.check_grad使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在scipy.optimize的用法示例。


在下文中一共展示了optimize.check_grad方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_gradient

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import check_grad [as 别名]
def test_gradient():
    # Test gradient of Kullback-Leibler divergence.
    random_state = check_random_state(0)

    n_samples = 50
    n_features = 2
    n_components = 2
    alpha = 1.0

    distances = random_state.randn(n_samples, n_features).astype(np.float32)
    distances = np.abs(distances.dot(distances.T))
    np.fill_diagonal(distances, 0.0)
    X_embedded = random_state.randn(n_samples, n_components).astype(np.float32)

    P = _joint_probabilities(distances, desired_perplexity=25.0,
                             verbose=0)

    def fun(params):
        return _kl_divergence(params, P, alpha, n_samples, n_components)[0]

    def grad(params):
        return _kl_divergence(params, P, alpha, n_samples, n_components)[1]

    assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
                        decimal=5) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:27,代码来源:test_t_sne.py

示例2: test_huber_gradient

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import check_grad [as 别名]
def test_huber_gradient():
    # Test that the gradient calculated by _huber_loss_and_gradient is correct
    rng = np.random.RandomState(1)
    X, y = make_regression_with_outliers()
    sample_weight = rng.randint(1, 3, (y.shape[0]))

    def loss_func(x, *args):
        return _huber_loss_and_gradient(x, *args)[0]

    def grad_func(x, *args):
        return _huber_loss_and_gradient(x, *args)[1]

    # Check using optimize.check_grad that the gradients are equal.
    for _ in range(5):
        # Check for both fit_intercept and otherwise.
        for n_features in [X.shape[1] + 1, X.shape[1] + 2]:
            w = rng.randn(n_features)
            w[-1] = np.abs(w[-1])
            grad_same = optimize.check_grad(
                loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight)
            assert_almost_equal(grad_same, 1e-6, 4) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:23,代码来源:test_huber.py

示例3: test_finite_differences

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import check_grad [as 别名]
def test_finite_differences():
    """Test gradient of loss function

    Assert that the gradient is almost equal to its finite differences
    approximation.
    """
    # Initialize the transformation `M`, as well as `X` and `y` and `NCA`
    rng = np.random.RandomState(42)
    X, y = make_classification()
    M = rng.randn(rng.randint(1, X.shape[1] + 1),
                  X.shape[1])
    nca = NeighborhoodComponentsAnalysis()
    nca.n_iter_ = 0
    mask = y[:, np.newaxis] == y[np.newaxis, :]

    def fun(M):
        return nca._loss_grad_lbfgs(M, X, mask)[0]

    def grad(M):
        return nca._loss_grad_lbfgs(M, X, mask)[1]

    # compute relative error
    rel_diff = check_grad(fun, grad, M.ravel()) / np.linalg.norm(grad(M))
    np.testing.assert_almost_equal(rel_diff, 0., decimal=5) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:26,代码来源:test_nca.py

示例4: test_viterbi_hessian

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import check_grad [as 别名]
def test_viterbi_hessian(operator):
    theta = make_data()
    Z = np.random.randn(*theta.shape)

    def func(X):
        X = X.reshape(theta.shape)
        _, grad, _, _ = dtw_grad(X, operator=operator)
        return np.sum(grad * Z)

    def grad(X):
        X = X.reshape(theta.shape)
        v, H = dtw_hessian_prod(X, Z, operator=operator)
        return H.ravel()

    # check_grad does not work with ndarray of dim > 2
    err = check_grad(func, grad, theta.ravel())
    assert err < 1e-6 
开发者ID:arthurmensch,项目名称:didyprog,代码行数:19,代码来源:test_dtw.py

示例5: test_viterbi_grad

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import check_grad [as 别名]
def test_viterbi_grad(operator):
    states, emissions, theta = make_data()
    theta /= 100

    def func(X):
        X = X.reshape(theta.shape)
        return viterbi_value(X, operator=operator)

    def grad(X):
        X = X.reshape(theta.shape)
        _, grad, _, _ = viterbi_grad(X, operator=operator)
        return grad.ravel()

    # check_grad does not work with ndarray of dim > 2
    err = check_grad(func, grad, theta.ravel())
    if operator == 'sparsemax':
        assert err < 1e-4
    else:
        assert err < 1e-6 
开发者ID:arthurmensch,项目名称:didyprog,代码行数:21,代码来源:test_viterbi.py

示例6: test_viterbi_hessian

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import check_grad [as 别名]
def test_viterbi_hessian(operator):
    states, emissions, theta = make_data()

    theta /= 100
    Z = np.random.randn(*theta.shape)

    def func(X):
        X = X.reshape(theta.shape)
        _, grad, _, _ = viterbi_grad(X, operator=operator)
        return np.sum(grad * Z)

    def grad(X):
        X = X.reshape(theta.shape)
        _, H = viterbi_hessian_prod(X, Z, operator=operator)
        return H.ravel()

    # check_grad does not work with ndarray of dim > 2
    err = check_grad(func, grad, theta.ravel())
    if operator == 'sparsemax':
        assert err < 1e-4
    else:
        assert err < 1e-6 
开发者ID:arthurmensch,项目名称:didyprog,代码行数:24,代码来源:test_viterbi.py

示例7: test_ModelHawkesSumExpKernLogLik_hessian

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import check_grad [as 别名]
def test_ModelHawkesSumExpKernLogLik_hessian(self):
        """...Numerical consistency check of hessian for Hawkes loglik
        """
        for model in [self.model]:
            hessian = model.hessian(self.coeffs).todense()
            # Check that hessian is equal to its transpose
            np.testing.assert_array_almost_equal(hessian, hessian.T,
                                                 decimal=10)

            np.set_printoptions(precision=3, linewidth=200)

            # Check that for all dimension hessian row is consistent
            # with its corresponding gradient coordinate.
            for i in range(model.n_coeffs):

                def g_i(x):
                    return model.grad(x)[i]

                def h_i(x):
                    h = model.hessian(x).todense()
                    return np.asarray(h)[i, :]

                self.assertLess(check_grad(g_i, h_i, self.coeffs), 1e-5) 
开发者ID:X-DataInitiative,项目名称:tick,代码行数:25,代码来源:model_hawkes_sumexpkern_loglik_test.py

示例8: test_ModelHawkesExpKernLogLik_hessian

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import check_grad [as 别名]
def test_ModelHawkesExpKernLogLik_hessian(self):
        """...Numerical consistency check of hessian for Hawkes loglik
        """
        for model in [self.model]:
            hessian = model.hessian(self.coeffs).todense()
            # Check that hessian is equal to its transpose
            np.testing.assert_array_almost_equal(hessian, hessian.T,
                                                 decimal=10)

            # Check that for all dimension hessian row is consistent
            # with its corresponding gradient coordinate.
            for i in range(model.n_coeffs):

                def g_i(x):
                    return model.grad(x)[i]

                def h_i(x):
                    h = model.hessian(x).todense()
                    return np.asarray(h)[i, :]

                self.assertLess(check_grad(g_i, h_i, self.coeffs), 1e-5) 
开发者ID:X-DataInitiative,项目名称:tick,代码行数:23,代码来源:model_hawkes_expkern_loglik_test.py

示例9: test_ModelHawkesExpKernLeastSqHess

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import check_grad [as 别名]
def test_ModelHawkesExpKernLeastSqHess(self):
        """...Numerical consistency check of hessian for Hawkes contrast
        """
        for model in [self.model, self.model_list]:
            # this hessian is independent of x but for more generality
            # we still put an used coeff as argument
            hessian = model.hessian(self.coeffs).todense()

            # Check that hessian is equal to its transpose
            np.testing.assert_array_almost_equal(hessian, hessian.T,
                                                 decimal=10)

            # Check that for all dimension hessian row is consistent
            # with its corresponding gradient coordinate.
            for i in range(model.n_coeffs):

                def g_i(x):
                    return model.grad(x)[i]

                def h_i(x):
                    return np.asarray(hessian)[i, :]

                self.assertLess(check_grad(g_i, h_i, self.coeffs), 1e-5) 
开发者ID:X-DataInitiative,项目名称:tick,代码行数:25,代码来源:model_hawkes_expkern_leastsq_test.py

示例10: test_model_hawkes_varying_baseline_least_sq_grad

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import check_grad [as 别名]
def test_model_hawkes_varying_baseline_least_sq_grad(self):
        """...Test that ModelHawkesExpKernLeastSq gradient is consistent
        with loss
        """
        for model in [self.model, self.model_list]:
            model.period_length = 1.
            model.n_baselines = 3
            coeffs = np.random.rand(model.n_coeffs)

            self.assertLess(check_grad(model.loss, model.grad, coeffs), 1e-5)

            coeffs_min = fmin_bfgs(model.loss, coeffs, fprime=model.grad,
                                   disp=False)

            self.assertAlmostEqual(
                norm(model.grad(coeffs_min)), .0, delta=1e-4) 
开发者ID:X-DataInitiative,项目名称:tick,代码行数:18,代码来源:model_hawkes_sumexpkern_leastsq_test.py

示例11: test_check_grad

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import check_grad [as 别名]
def test_check_grad():
    # Verify if check_grad is able to estimate the derivative of the
    # logistic function.

    def logit(x):
        return 1 / (1 + np.exp(-x))

    def der_logit(x):
        return np.exp(-x) / (1 + np.exp(-x))**2

    x0 = np.array([1.5])

    r = optimize.check_grad(logit, der_logit, x0)
    assert_almost_equal(r, 0)

    r = optimize.check_grad(logit, der_logit, x0, epsilon=1e-6)
    assert_almost_equal(r, 0)

    # Check if the epsilon parameter is being considered.
    r = abs(optimize.check_grad(logit, der_logit, x0, epsilon=1e-1) - 0)
    assert_(r > 1e-7) 
开发者ID:Relph1119,项目名称:GraphicDesignPatternByPython,代码行数:23,代码来源:test_optimize.py

示例12: test_non_linear_model_mean_gradient

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import check_grad [as 别名]
def test_non_linear_model_mean_gradient(self, non_linear_model):
        """
        Check the gradient of the mean prediction is correct
        """

        np.random.seed(1234)
        x0 = np.random.rand(2)

        # wrap function so fidelity index doesn't change
        def wrap_func(x):
            x_full = np.concatenate([x[None, :], [[2]]], axis=1)
            return non_linear_model.predict(x_full)[0]

        def wrap_gradients(x):
            x_full = np.concatenate([x[None, :], [[2]]], axis=1)
            return non_linear_model.get_prediction_gradients(x_full)[0]
        assert np.all(check_grad(wrap_func, wrap_gradients, x0) < 1e-6) 
开发者ID:amzn,项目名称:emukit,代码行数:19,代码来源:test_non_linear_models.py

示例13: test_non_linear_model_variance_gradient

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import check_grad [as 别名]
def test_non_linear_model_variance_gradient(self, non_linear_model):
        """
        Check the gradient of the predictive variance is correct
        """

        np.random.seed(1234)
        x0 = np.random.rand(2)

        # wrap function so fidelity index doesn't change
        def wrap_func(x):
            x_full = np.concatenate([x[None, :], [[2]]], axis=1)
            return non_linear_model.predict(x_full)[1]

        def wrap_gradients(x):
            x_full = np.concatenate([x[None, :], [[2]]], axis=1)
            return non_linear_model.get_prediction_gradients(x_full)[1]

        assert np.all(check_grad(wrap_func, wrap_gradients, x0) < 1e-6) 
开发者ID:amzn,项目名称:emukit,代码行数:20,代码来源:test_non_linear_models.py

示例14: test_simple

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import check_grad [as 别名]
def test_simple():
    T = 100
    L = 10
    S = T - L + 1
    x = np.random.random(T)
    z = np.random.random(S)
    d = np.random.random(L)

    def func(d0):
        xr = signal.convolve(z, d0)
        residual = x - xr
        return .5 * np.sum(residual * residual)

    def grad(d0):
        xr = signal.convolve(z, d0)
        residual = x - xr
        grad_d = - signal.convolve(residual, z[::-1], mode='valid')
        return grad_d

    error = optimize.check_grad(func, grad, d, epsilon=1e-8)
    assert error < 1e-4, "Gradient is false: {:.4e}".format(error) 
开发者ID:alphacsc,项目名称:alphacsc,代码行数:23,代码来源:test_update_d_multi.py

示例15: check_density

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import check_grad [as 别名]
def check_density(density, tol=1e-6, n_test=10, rng=None):
    if rng is None:
        rng = np.random.RandomState(0)
    Y = rng.randn(n_test)

    def score(Y):
        return density.score_and_der(Y)[0]

    def score_der(Y):
        return density.score_and_der(Y)[1]

    err_msgs = ['score', 'score derivative']
    for f, fprime, err_msg in zip([density.log_lik, score], [score, score_der],
                                  err_msgs):
        for y in Y:
            err = check_grad(f, fprime, np.array([y]))
            assert_allclose(err, 0, atol=tol, rtol=0,
                            err_msg='Wrong %s' % err_msg) 
开发者ID:pierreablin,项目名称:picard,代码行数:20,代码来源:densities.py


注:本文中的scipy.optimize.check_grad方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。