当前位置: 首页>>代码示例>>Python>>正文


Python optimize.check_grad函数代码示例

本文整理汇总了Python中scipy.optimize.check_grad函数的典型用法代码示例。如果您正苦于以下问题:Python check_grad函数的具体用法?Python check_grad怎么用?Python check_grad使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了check_grad函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_back_prop_with_diff_grad_checks

    def test_back_prop_with_diff_grad_checks(self, iter=200):
        eps = math.sqrt(np.finfo(float).eps)
        init_val = self.packTheta(self.W1, self.b1, self.W2, self.b2)

        err = optimize.check_grad(self.cost, self.cost_prime, init_val, self.X)
        print ("Error after 0 iterations: %f, Error per Param: %f" % (err, err/init_val.size))
        res = optimize.minimize(fun=self.cost, x0=init_val, args=(self.X,), jac=self.cost_prime, method='L-BFGS-B', options={'maxiter':iter})
        self.W1, self.b1, self.W2, self.b2 = self.unpackTheta(res.x)

        err = optimize.check_grad(self.cost, self.cost_prime, init_val, self.X)
        print ("Error after 200 iterations: %f, Error per Param: %f" % (err, err/init_val.size))
        init_val = res.x
        res = optimize.minimize(fun=self.cost, x0=init_val, args=(self.X,), jac=self.cost_prime, method='L-BFGS-B', options={'maxiter':iter})
        self.W1, self.b1, self.W2, self.b2 = self.unpackTheta(res.x)

        err = optimize.check_grad(self.cost, self.cost_prime, init_val, self.X)
        print ("Error after 400 iterations: %f, Error per Param: %f" % (err, err/init_val.size))
        init_val = res.x
        res = optimize.minimize(fun=self.cost, x0=init_val, args=(self.X,), jac=self.cost_prime, method='L-BFGS-B', options={'maxiter':iter})
        self.W1, self.b1, self.W2, self.b2 = self.unpackTheta(res.x)

        err = optimize.check_grad(self.cost, self.cost_prime, init_val, self.X)
        print ("Error after 600 iterations: %f, Error per Param: %f" % (err, err/init_val.size))
        init_val = res.x
        res = optimize.minimize(fun=self.cost, x0=init_val, args=(self.X,), jac=self.cost_prime, method='L-BFGS-B', options={'maxiter':iter})
        self.W1, self.b1, self.W2, self.b2 = self.unpackTheta(res.x)

        err = optimize.check_grad(self.cost, self.cost_prime, init_val, self.X)
        print ("Error after 800 iterations: %f, Error per Param: %f" % (err, err/init_val.size))
开发者ID:thushv89,项目名称:AutoEncorder_Simple,代码行数:29,代码来源:autoencoder2_l_bfgs_sparcity.py

示例2: fit

 def fit(self,X,y,initParams = None):
     self.params = np.zeros([1,X.shape[1]+1])
     self.labels = np.unique(y)
     X_nopad = X
     X = np.pad(X,((0,0),(1,0)),mode='constant',constant_values=1)
     
     #print self.cost(self.params,X, y)
     
     if initParams is None:
         init = np.random.random(self.params.size)
         #init = np.zeros(self.params.size)
     else:
         init = initParams
     
     if DEBUG:
         _epsilon = np.sqrt(np.finfo(float).eps)
         #print approx_fprime(self.params[0], self.cost, _epsilon, X,y)
         print check_grad(self.cost, self.grad, init,X,y)
     
     if self.optimizeOrder == 0:
         self.params = self.optimize(self.cost,init,args=(X,y),disp=False)
     if self.optimizeOrder == 1:
         self.params = self.optimize(self.cost,init,self.grad,args=(X,y),disp=False)
         
     return self
开发者ID:lukastencer,项目名称:RNNpy,代码行数:25,代码来源:logreg.py

示例3: test

def test():
    data = np.loadtxt("data.txt")
    X = data[:,0:-1] # everything except the last column
    y = data[:,-1]   # just the last column

    args = (X,y)

    #theta = np.array([ 1.7657065779589087, -1.3841332550882446, -10.162222605402242])
    #theta = np.array([ 1.7999382115210827, -14.001391904643032 , -5.577578503745549])
    theta = np.zeros(3)
    theta[0] = np.random.normal(0,5)
    theta[1] = np.random.normal(0,5)
    theta[2] = np.random.normal(0,5)
    print theta
    print np.exp(theta)
    print logPosterior(theta,args)
    print gradLogPosterior(theta,args)
    print so.check_grad(logPosterior, gradLogPosterior, theta, args)

    newTheta = so.fmin_cg(logPosterior, theta, fprime=gradLogPosterior, args=[args], gtol=1e-4,maxiter=100,disp=1)
    print newTheta, logPosterior(newTheta,args)

    K = kernel2(X,X,newTheta,wantderiv=False)
    L = np.linalg.cholesky(K)
    beta = np.linalg.solve(L.transpose(), np.linalg.solve(L,y))
    test = X
    #pred = [predict(i,input,K,target,newTheta,L,beta) for i in input]
    #pred = np.squeeze([predict(i,input,K,target,newTheta,L,beta) for i in input])
    demoplot(theta,args)
    demoplot(newTheta,args)
开发者ID:bigaidream,项目名称:subsets_ml_cookbook,代码行数:30,代码来源:gp.py

示例4: test_dldtheta

 def test_dldtheta(self):
     self.ECG.primary = ['q']
     def f(X):
         self.ECG.array2primary(X)
         lv = self.ECG.loglik(self.data);
         slv = sum(lv)
         return slv
     def df(X):
         self.ECG.array2primary(X)
         gv = self.ECG.dldtheta(self.data)
         sgv = sum(gv, axis=1);
         return sgv
     theta0 = self.ECG.primary2array()
     theta0 = abs(randn(len(theta0)))+1
     err = check_grad(f,df,theta0)
     print "error in gradient: ", err
     self.ECG.primary = ['W']
     def f2(X):
         self.ECG.array2primary(X)
         lv = self.ECG.loglik(self.data);
         slv = sum(lv)
         return slv
     def df2(X):
         self.ECG.array2primary(X)
         gv = self.ECG.dldtheta(self.data)
         sgv = sum(gv, axis=1);
         return sgv
     theta0 = self.ECG.primary2array()
     theta0 = abs(randn(len(theta0)))+1
     err = check_grad(f2,df2,theta0)
     print "error in gradient: ", err
     self.assertTrue(err < 1e-02)
开发者ID:fabiansinz,项目名称:natter,代码行数:32,代码来源:TestEllipticallyContourGamma.py

示例5: test_gradients

def test_gradients():
    K = 1
    B = 3
    T = 100
    dt = 1.0
    true_model = DiscreteTimeNetworkHawkesModelGammaMixture(K=K, B=B, dt=dt)
    S,R = true_model.generate(T=T)

    # Test with a standard Hawkes model
    test_model = DiscreteTimeStandardHawkesModel(K=K, B=B, dt=dt)
    test_model.add_data(S)

    # Check gradients with the initial parameters
    def objective(x):
        test_model.weights[0,:] = np.exp(x)
        return test_model.log_likelihood()

    def gradient(x):
        test_model.weights[0,:] = np.exp(x)
        return test_model.compute_gradient(0)

    print("Checking initial gradient: ")
    print(gradient(np.log(test_model.weights[0,:])))
    check_grad(objective, gradient,
               np.log(test_model.weights[0,:]))

    print("Checking gradient at true model parameters: ")
    test_model.initialize_with_gibbs_model(true_model)

    print(gradient(np.log(test_model.weights[0,:])))
    check_grad(objective, gradient,
               np.log(test_model.weights[0,:]))
开发者ID:slinderman,项目名称:pyhawkes,代码行数:32,代码来源:test_standard_grads.py

示例6: check_gradient

 def check_gradient(self):
     def cost(ws):
         return   self.cost_function(ws,self._training_data[0:100,:],self._training_labels[0:100])
                                     
     def gradcost(ws):
         return self._back_prop(ws,self._training_data[0:100,:],self._training_labels[0:100])
     
     print check_grad(cost, gradcost,self._betas)
开发者ID:mfcabrera,项目名称:deeplearning-praktikum-ss2013,代码行数:8,代码来源:neuralnet.py

示例7: test_logistic_loss_derivative

def test_logistic_loss_derivative(n_samples=4, n_features=10, decimal=5):
    rng = np.random.RandomState(42)
    X = rng.randn(n_samples, n_features)
    y = rng.randn(n_samples)
    n_features = X.shape[1]
    w = rng.randn(n_features + 1)
    np.testing.assert_almost_equal(
        check_grad(lambda w: _logistic(X, y, w), lambda w: _logistic_loss_grad(X, y, w), w), 0.0, decimal=decimal
    )

    np.testing.assert_almost_equal(
        check_grad(lambda w: _logistic(X, y, w), lambda w: _logistic_loss_grad(X, y, w), w), 0.0, decimal=decimal
    )
开发者ID:LisaLeroi,项目名称:nilearn,代码行数:13,代码来源:test_objective_functions.py

示例8: fit

    def fit(self,X,y,initParams = None):
        
        X = np.pad(X,((0,0),(1,0)),mode='constant',constant_values=1)
        
        inDim = X.shape[1]
        
#         if DEBUG:
# #             self.layersSize.append(1)
# #             self.layersSize.insert(0, int(inDim))
# #             self.yindi = np.asarray(np.logical_not(y),dtype=np.int32)         
#         else:            
        self.layersSize.append(len(np.unique(y)))
        self.layersSize.insert(0, int(inDim))
        self.setIndi(y)
        
#         self.layersSize[-1]=1
#         self.yindi = np.expand_dims(self.yindi[:,0].T,1)
        
        paramSum = 0
        for i,layer in enumerate(self.layers):
            if not( i == len(self.layers)-1):
                layer.initParams([self.layersSize[i+1],self.layersSize[i]])
                split = self.layersSize[i+1] * self.layersSize[i]
                paramSum += split
                self.paramSplits.append(paramSum)
            else:
                layer.setParams(None)

        if initParams is None:
            init = self.getParams()
        else:
            init = initParams

        
        if DEBUG:
            _epsilon = np.sqrt(np.finfo(float).eps)
            #print approx_fprime(self.params[0], self.cost, _epsilon, X,y)
            print check_grad(self.cost, self.grad, np.zeros(init.shape),X,self.yindi)
            print check_grad(self.cost, self.grad, init,X,self.yindi)
        
        if self.optimizeOrder == 0:
            newParams = self.optimize(self.cost,init,args=(X,self.yindi),disp=False)
        if self.optimizeOrder == 1:
            newParams = self.optimize(self.cost,init,args=(X,self.yindi),disp=False)
        
        #newParams = self.optimize(self.cost, self.getParams(), args = (X,y))
        
        self.setParams(newParams)
开发者ID:lukastencer,项目名称:RNNpy,代码行数:48,代码来源:mlp_network.py

示例9: test_pairwise_gradient

def test_pairwise_gradient():
    fcts = PairwiseFcts(PAIRWISE_DATA, 0.2)
    for sigma in np.linspace(1, 20, num=10):
        xs = sigma * RND.randn(8)
        val = approx_fprime(xs, fcts.objective, EPS)
        err = check_grad(fcts.objective, fcts.gradient, xs, epsilon=EPS)
        assert abs(err / np.linalg.norm(val)) < 1e-5
开发者ID:lucasmaystre,项目名称:choix,代码行数:7,代码来源:test_opt.py

示例10: test_nonlinear_mean_return_model

    def test_nonlinear_mean_return_model(self):
        model = Nonlinear(delta=0.1, lmb=1.0, hidden=7)

        for i in range(10):
            diff = check_grad(model.cost, model.grad, model.weights(self.trX, i), self.trX, self.trY)

            self.assertTrue(diff < 1.0e-5, diff)
开发者ID:rreas,项目名称:drl,代码行数:7,代码来源:test_gradients.py

示例11: test_checkgrad

def test_checkgrad():
    from scipy.optimize import check_grad
    import numpy as np

    for x in range(100):
        x = x * np.ones((1)) / 10
        print "check_grad @ %.2f: %.6f" % (x, check_grad(f, fgrad, x))
开发者ID:jgera,项目名称:Segmentation-Code,代码行数:7,代码来源:test.py

示例12: test_01_6_unitary_hadamard_grad

    def test_01_6_unitary_hadamard_grad(self):
        """
        control.pulseoptim: Hadamard gate gradient check
        assert that gradient approx and exact gradient match in tolerance
        """
        # Hadamard
        H_d = sigmaz()
        H_c = [sigmax()]
        U_0 = identity(2)
        U_targ = hadamard_transform(1)

        n_ts = 10
        evo_time = 10

        # Create the optim objects
        optim = cpo.create_pulse_optimizer(H_d, H_c, U_0, U_targ,
                        n_ts, evo_time,
                        fid_err_targ=1e-10,
                        dyn_type='UNIT',
                        init_pulse_type='LIN',
                        gen_stats=True)
        dyn = optim.dynamics

        init_amps = optim.pulse_generator.gen_pulse().reshape([-1, 1])
        dyn.initialize_controls(init_amps)

        # Check the exact gradient
        func = optim.fid_err_func_wrapper
        grad = optim.fid_err_grad_wrapper
        x0 = dyn.ctrl_amps.flatten()
        grad_diff = check_grad(func, grad, x0)
        assert_almost_equal(grad_diff, 0.0, decimal=6,
                            err_msg="Unitary gradient outside tolerance")
开发者ID:NunoEdgarGub1,项目名称:qutip,代码行数:33,代码来源:test_control_pulseoptim.py

示例13: learnGPparamsWithPrior

def learnGPparamsWithPrior(oldParams, infRes, experiment, tauOptimMethod, regularizer_stepsize_tau):
    xdim, T = np.shape(infRes['post_mean'][0])
    binSize = experiment.binSize
    oldTau = oldParams['tau']*1000/binSize
    
    precomp = makePrecomp(infRes)
    
    tempTau = np.zeros(xdim)

    pOptimizeDetails = [[]]*xdim
    for xd in range(xdim): 
        initp = np.log(1/oldTau[xd]**2)

        if False: # gradient check and stuff
            gradcheck = op.check_grad(
                MStepGPtimescaleCostWithPrior,
                MStepGPtimescaleCostWithPrior_grad,
                initp,precomp[0],0.001,binSize, oldParams['tau'][xd], regularizer_stepsize_tau)
            print('tau learning grad check = ' + str(gradcheck))
            pdb.set_trace()
            apprxGrad = op.approx_fprime(
                initp,MStepGPtimescaleCostWithPrior,1e-8,
                precomp[xd],0.001,binSize,oldParams['tau'][xd],regularizer_stepsize_tau)
            calcdGrad = MStepGPtimescaleCostWithPrior_grad(
                initp,precomp[xd],0.001,binSize,oldParams['tau'][xd],regularizer_stepsize_tau)
            plt.plot(apprxGrad,linewidth = 10, color = 'k', alpha = 0.4)
            plt.plot(calcdGrad,linewidth = 2, color = 'k', alpha = 0.4)
            plt.legend(['approximated','calculated'])
            plt.title('Approx. vs. calculated Grad of Tau learning cost')
            plt.tight_layout()
            plt.show()
            def cost(p): 
                cost = MStepGPtimescaleCostWithPrior(
                    p, precomp[xd], 0.001, binSize, oldParams['tau'][xd], regularizer_stepsize_tau)
                return cost
            def cost_grad(p): 
                grad = MStepGPtimescaleCostWithPrior_grad(
                    p, precomp[xd], 0.001, binSize, oldParams['tau'][xd], regularizer_stepsize_tau)
                return grad
            pdb.set_trace()

        if False: # bench for setting hessian as inverse variance
            hessTau = op.approx_fprime([initp], MStepGPtimescaleCost_grad, 1e-14, 
                precomp[xd], 0.001)
            priorVar = -1/hessTau
            regularizer_stepsize_tau = np.sqrt(np.abs(priorVar))
            # pdb.set_trace()

        res = op.minimize(
            fun = MStepGPtimescaleCostWithPrior,
            x0 = initp,
            args = (precomp[xd], 0.001, binSize, oldParams['tau'][xd], regularizer_stepsize_tau),
            jac = MStepGPtimescaleCostWithPrior_grad,
            options = {'disp': False,'gtol':1e-10},
            method = tauOptimMethod)
        pOptimizeDetails[xd] = res
        tempTau[xd] = (1/np.exp(res.x))**(0.5)

    newTau = tempTau*binSize/1000
    return newTau, pOptimizeDetails
开发者ID:mackelab,项目名称:poisson-gpfa,代码行数:60,代码来源:learning.py

示例14: gradient_check

def gradient_check(theta, x, y, l2_regularization):
    print 'check_grad:', check_grad(calculate_cost, calculate_gradient, theta, x, y, l2_regularization)
    spatial_alpha_vec, spatial_mean_vec, spatial_sigma_vec, temporal_mean, temporal_sigma = span_params(theta)
    cost1 = calculate_cost(theta, x, y, l2_regularization)
    num_of_params = len(spatial_alpha_vec) + 2*len(spatial_mean_vec) + len(spatial_sigma_vec) + 2
    direction = np.random.randint(2, size=num_of_params)*2-1
    eps = 1e-7
    gradient = eps * direction
    total = 0
    spatial_alpha_vec2 = spatial_alpha_vec + gradient[0:len(spatial_alpha_vec)]
    total += len(spatial_alpha_vec)
    spatial_mean_vec2 = spatial_mean_vec + gradient[total:total+2*len(spatial_mean_vec)].reshape(-1,2)
    total += 2*len(spatial_mean_vec)
    spatial_sigma_vec2 = spatial_sigma_vec + gradient[total:total+len(spatial_sigma_vec)]
    total += len(spatial_sigma_vec)
    temporal_mean2 = np.array(temporal_mean + gradient[-2])
    temporal_sigma2 = np.array(temporal_sigma + gradient[-1])

    theta2 = compress_params(spatial_alpha_vec2, spatial_mean_vec2, spatial_sigma_vec2, temporal_mean2, temporal_sigma2)
    cost2 = calculate_cost(theta2, x, y, l2_regularization)
    delta = (cost2-cost1)
    print 'Gradient check:'
    print 'Empiric:', delta
    print 'Analytic:', gradient.dot(calculate_gradient(theta, x, y, l2_regularization))
    diff = abs(delta - gradient.dot(calculate_gradient(theta, x, y, l2_regularization)))
    print 'Difference:', diff
    if diff < 1e-3:
        print 'Gradient is O.K'
    else:
        print 'Gradient check FAILED'
开发者ID:drorsimon,项目名称:Kaggle-West-Nile-Virus,代码行数:30,代码来源:mosquitoes_regression.py

示例15: test_gradient

def test_gradient():
    # Test gradient of Kullback-Leibler divergence.
    random_state = check_random_state(0)

    n_samples = 50
    n_features = 2
    n_components = 2
    alpha = 1.0

    distances = random_state.randn(n_samples, n_features).astype(np.float32)
    distances = np.abs(distances.dot(distances.T))
    np.fill_diagonal(distances, 0.0)
    X_embedded = random_state.randn(n_samples, n_components).astype(np.float32)

    P = _joint_probabilities(distances, desired_perplexity=25.0,
                             verbose=0)

    def fun(params):
        return _kl_divergence(params, P, alpha, n_samples, n_components)[0]

    def grad(params):
        return _kl_divergence(params, P, alpha, n_samples, n_components)[1]

    assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
                        decimal=5)
开发者ID:BasilBeirouti,项目名称:scikit-learn,代码行数:25,代码来源:test_t_sne.py


注:本文中的scipy.optimize.check_grad函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。