當前位置: 首頁>>代碼示例>>Python>>正文


Python optimize.fmin_l_bfgs_b方法代碼示例

本文整理匯總了Python中scipy.optimize.fmin_l_bfgs_b方法的典型用法代碼示例。如果您正苦於以下問題:Python optimize.fmin_l_bfgs_b方法的具體用法?Python optimize.fmin_l_bfgs_b怎麽用?Python optimize.fmin_l_bfgs_b使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在scipy.optimize的用法示例。


在下文中一共展示了optimize.fmin_l_bfgs_b方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: global_optimization

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 別名]
def global_optimization(grid, lower, upper, function_grid, function_scalar, function_scalar_gradient):

    grid_values = function_grid(grid)
    best = grid_values.argmin()
    
    # We solve the optimization problem

    X_initial = grid[ best : (best + 1), : ]
    def objective(X):
        X = casting(X)
        X = X.reshape((1, grid.shape[ 1 ]))
        value = function_scalar(X)
        gradient_value = function_scalar_gradient(X).flatten()
        return np.float(value), gradient_value.astype(np.float)

    lbfgs_bounds = zip(lower.tolist(), upper.tolist())
    x_optimal, y_opt, opt_info = spo.fmin_l_bfgs_b(objective, X_initial, bounds = list(lbfgs_bounds), iprint = 0, maxiter = 150)
    x_optimal = x_optimal.reshape((1, grid.shape[ 1 ]))

    return x_optimal, y_opt 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:22,代碼來源:sparse_gp.py

示例2: _fit_start_params

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 別名]
def _fit_start_params(self, order, method, start_ar_lags=None):
        if method != 'css-mle':  # use Hannan-Rissanen to get start params
            start_params = self._fit_start_params_hr(order, start_ar_lags)
        else:  # use CSS to get start params
            func = lambda params: -self.loglike_css(params)
            #start_params = [.1]*(k_ar+k_ma+k_exog) # different one for k?
            start_params = self._fit_start_params_hr(order, start_ar_lags)
            if self.transparams:
                start_params = self._invtransparams(start_params)
            bounds = [(None,)*2]*sum(order)
            mlefit = optimize.fmin_l_bfgs_b(func, start_params,
                                            approx_grad=True, m=12,
                                            pgtol=1e-7, factr=1e3,
                                            bounds=bounds, iprint=-1)
            start_params = mlefit[0]
            if self.transparams:
                start_params = self._transparams(start_params)
        return start_params 
開發者ID:birforce,項目名稱:vnpy_crypto,代碼行數:20,代碼來源:arima_model.py

示例3: global_optimization

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 別名]
def global_optimization(grid, lower, upper, function_grid, function_scalar, function_scalar_gradient):

    grid_values = function_grid(grid)
    best = grid_values.argmin()
    
    # We solve the optimization problem

    X_initial = grid[ best : (best + 1), : ]
    def objective(X):
        X = casting(X)
        X = X.reshape((1, grid.shape[ 1 ]))
        value = function_scalar(X)
        gradient_value = function_scalar_gradient(X).flatten()
        return np.float(value), gradient_value.astype(np.float)

    lbfgs_bounds = zip(lower.tolist(), upper.tolist())
    x_optimal, y_opt, opt_info = spo.fmin_l_bfgs_b(objective, X_initial, bounds = lbfgs_bounds, iprint = 0, maxiter = 150)
    x_optimal = x_optimal.reshape((1, grid.shape[ 1 ]))

    return x_optimal, y_opt 
開發者ID:wengong-jin,項目名稱:icml18-jtnn,代碼行數:22,代碼來源:sparse_gp.py

示例4: test_l_bfgs_b

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 別名]
def test_l_bfgs_b(self):
        """ limited-memory bound-constrained BFGS algorithm
        """
        retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
                                        self.grad, args=(),
                                        maxiter=self.maxiter)

        (params, fopt, d) = retval

        assert_allclose(self.func(params), self.func(self.solution),
                        atol=1e-6)

        # Ensure that function call counts are 'known good'; these are from
        # Scipy 0.7.0. Don't allow them to increase.
        assert_(self.funccalls == 7, self.funccalls)
        assert_(self.gradcalls == 5, self.gradcalls)

        # Ensure that the function behaves the same; this is from Scipy 0.7.0
        assert_allclose(self.trace[3:5],
                        [[0., -0.52489628, 0.48753042],
                         [0., -0.52489628, 0.48753042]],
                        atol=1e-14, rtol=1e-7) 
開發者ID:ktraunmueller,項目名稱:Computable,代碼行數:24,代碼來源:test_optimize.py

示例5: test_l_bfgs_b

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 別名]
def test_l_bfgs_b(self):
        # limited-memory bound-constrained BFGS algorithm
        retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
                                        self.grad, args=(),
                                        maxiter=self.maxiter)

        (params, fopt, d) = retval

        assert_allclose(self.func(params), self.func(self.solution),
                        atol=1e-6)

        # Ensure that function call counts are 'known good'; these are from
        # Scipy 0.7.0. Don't allow them to increase.
        assert_(self.funccalls == 7, self.funccalls)
        assert_(self.gradcalls == 5, self.gradcalls)

        # Ensure that the function behaves the same; this is from Scipy 0.7.0
        assert_allclose(self.trace[3:5],
                        [[0., -0.52489628, 0.48753042],
                         [0., -0.52489628, 0.48753042]],
                        atol=1e-14, rtol=1e-7) 
開發者ID:Relph1119,項目名稱:GraphicDesignPatternByPython,代碼行數:23,代碼來源:test_optimize.py

示例6: test_minimize_l_bfgs_b_maxfun_interruption

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 別名]
def test_minimize_l_bfgs_b_maxfun_interruption(self):
        # gh-6162
        f = optimize.rosen
        g = optimize.rosen_der
        values = []
        x0 = np.ones(7) * 1000

        def objfun(x):
            value = f(x)
            values.append(value)
            return value

        # Look for an interesting test case.
        # Request a maxfun that stops at a particularly bad function
        # evaluation somewhere between 100 and 300 evaluations.
        low, medium, high = 30, 100, 300
        optimize.fmin_l_bfgs_b(objfun, x0, fprime=g, maxfun=high)
        v, k = max((y, i) for i, y in enumerate(values[medium:]))
        maxfun = medium + k
        # If the minimization strategy is reasonable,
        # the minimize() result should not be worse than the best
        # of the first 30 function evaluations.
        target = min(values[:low])
        xmin, fmin, d = optimize.fmin_l_bfgs_b(f, x0, fprime=g, maxfun=maxfun)
        assert_array_less(fmin, target) 
開發者ID:Relph1119,項目名稱:GraphicDesignPatternByPython,代碼行數:27,代碼來源:test_optimize.py

示例7: lbfgsb

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 別名]
def lbfgsb(variables, bounds, loss_fn, zero_grad_fn):
    x, shapes, shapes_flat = vars_to_x(variables)
    bounds_list = []
    for var in variables:
        lower, upper = bounds[var]
        lower = lower.ravel()
        upper = upper.ravel()
        for i in range(lower.size):
            bounds_list.append((lower[i], upper[i]))

    def f(x):
        x_to_vars(x, variables, shapes_flat, shapes)
        loss = loss_fn()
        zero_grad_fn()
        loss.backward()
        with torch.no_grad():
            f = loss.detach().cpu().numpy().astype(np.float64)
            g = np.stack([var.tensor.grad.detach().cpu().numpy().ravel() for var in variables]).astype(np.float64)
        return f, g
    x, f, d = spo.fmin_l_bfgs_b(f, x, bounds=bounds_list)
    x_to_vars(x, variables, shapes_flat, shapes) 
開發者ID:eth-sri,項目名稱:dl2,代碼行數:23,代碼來源:api.py

示例8: optimize

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 別名]
def optimize(self, num_vars, objective_function, gradient_function=None,
                 variable_bounds=None, initial_point=None):
        super().optimize(num_vars, objective_function, gradient_function,
                         variable_bounds, initial_point)

        if gradient_function is None and self._max_evals_grouped > 1:
            epsilon = self._options['epsilon']
            gradient_function = Optimizer.wrap_function(Optimizer.gradient_num_diff,
                                                        (objective_function,
                                                         epsilon, self._max_evals_grouped))

        approx_grad = bool(gradient_function is None)
        sol, opt, info = sciopt.fmin_l_bfgs_b(objective_function,
                                              initial_point, bounds=variable_bounds,
                                              fprime=gradient_function,
                                              approx_grad=approx_grad, **self._options)

        return sol, opt, info['funcalls'] 
開發者ID:Qiskit,項目名稱:qiskit-aqua,代碼行數:20,代碼來源:l_bfgs_b.py

示例9: _fit_start_params

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 別名]
def _fit_start_params(self, order, method, start_ar_lags=None):
        if method != 'css-mle':  # use Hannan-Rissanen to get start params
            start_params = self._fit_start_params_hr(order, start_ar_lags)
        else:  # use CSS to get start params
            func = lambda params: -self.loglike_css(params)
            #start_params = [.1]*(k_ar+k_ma+k_exog) # different one for k?
            start_params = self._fit_start_params_hr(order, start_ar_lags)
            if self.transparams:
                start_params = self._invtransparams(start_params)
            bounds = [(None,)*2]*sum(order)
            mlefit = optimize.fmin_l_bfgs_b(func, start_params,
                                            approx_grad=True, m=12,
                                            pgtol=1e-7, factr=1e3,
                                            bounds=bounds, iprint=-1)
            start_params = self._transparams(mlefit[0])
        return start_params 
開發者ID:nccgroup,項目名稱:Splunking-Crime,代碼行數:18,代碼來源:arima_model.py

示例10: styling

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 別名]
def styling(self, content_image, style_image, n_iter):
        content = Image.open(content_image).resize((self.width, self.height))
        self.content = np.expand_dims(content, axis=0).astype(np.float32)   # [1, height, width, 3]
        style = Image.open(style_image).resize((self.width, self.height))
        self.style = np.expand_dims(style, axis=0).astype(np.float32)       # [1, height, width, 3]

        x = np.copy(self.content)      # initialize styled image from content
        
        # repeat backpropagating to styled image 
        for i in range(n_iter):
            x, min_val, info = fmin_l_bfgs_b(self._get_loss, x.flatten(), fprime=lambda x: self.flat_grads, maxfun=20)
            x = x.clip(0., 255.)
            print('(%i/%i) loss: %.1f' % (i+1, n_iter, min_val))

        x = x.reshape((self.height, self.width, 3))
        for i in range(1, 4):
            x[:, :, -i] += self.vgg_mean[i - 1]
        return x, self.content, self.style 
開發者ID:MorvanZhou,項目名稱:Tensorflow-Computer-Vision-Tutorial,代碼行數:20,代碼來源:105_Style_Transfer.py

示例11: bfgs

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 別名]
def bfgs(self):
        def ll(x):
            x = x.reshape((self.K, self.L))
            return self._ll(x)

        def dll(x):
            x = x.reshape((self.K, self.L))
            result = self._dll(x)
            result = result.reshape(self.K * self.L)
            return result

        Lambda = np.random.multivariate_normal(np.zeros(self.L), 
            (self.sigma ** 2) * np.identity(self.L), size=self.K)
        Lambda = Lambda.reshape(self.K * self.L)

        newLambda, fmin, res = optimize.fmin_l_bfgs_b(ll, Lambda, dll)
        self.Lambda = newLambda.reshape((self.K, self.L)) 
開發者ID:mpkato,項目名稱:dmr,代碼行數:19,代碼來源:dmr.py

示例12: calcM

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 別名]
def calcM(f,xMin,xMax):
    #first do a coarse grid to get ic
    dx = np.linspace(xMin, xMax, 1000*1000)
    ic = np.argmax(f(dx))
    
    #now optimize
    g = lambda x: -f(x)
    M = fmin_l_bfgs_b(g,[dx[ic]],approx_grad=True,bounds=[(xMin,xMax)])
    M = f(M[0])
    
    return M 
開發者ID:dsavransky,項目名稱:EXOSIMS,代碼行數:13,代碼來源:statsFun.py

示例13: calcM

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 別名]
def calcM(self):
        '''
        Calculate the maximum bound of the distribution over the 
        sampling interval.
        '''
        #first do a coarse grid to get ic
        dx = np.linspace(self.xMin, self.xMax, 1000*1000)
        ic = np.argmax(self.f(dx))
        
        #now optimize
        g = lambda x: -self.f(x)
        M = fmin_l_bfgs_b(g,[dx[ic]],approx_grad=True,bounds=[(self.xMin,self.xMax)])
        M = self.f(M[0])
        
        return M 
開發者ID:dsavransky,項目名稱:EXOSIMS,代碼行數:17,代碼來源:RejectionSampler.py

示例14: opt

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 別名]
def opt(self, x_init, f_fp=None, f=None, fp=None):
        """
        Run the optimizer

        """
        rcstrings = ['Converged', 'Maximum number of f evaluations reached', 'Error']

        assert f_fp != None, "BFGS requires f_fp"

        opt_dict = {}
        if self.xtol is not None:
            print("WARNING: l-bfgs-b doesn't have an xtol arg, so I'm going to ignore it")
        if self.ftol is not None:
            print("WARNING: l-bfgs-b doesn't have an ftol arg, so I'm going to ignore it")
        if self.gtol is not None:
            opt_dict['pgtol'] = self.gtol
        if self.bfgs_factor is not None:
            opt_dict['factr'] = self.bfgs_factor

        opt_result = optimize.fmin_l_bfgs_b(f_fp, x_init, maxfun=self.max_iters, maxiter=self.max_iters, **opt_dict)
        self.x_opt = opt_result[0]
        self.f_opt = f_fp(self.x_opt)[0]
        self.funct_eval = opt_result[2]['funcalls']
        self.status = rcstrings[opt_result[2]['warnflag']]

        #a more helpful error message is available in opt_result in the Error case
        if opt_result[2]['warnflag']==2: # pragma: no coverage, this is not needed to be covered
            self.status = 'Error' + str(opt_result[2]['task']) 
開發者ID:sods,項目名稱:paramz,代碼行數:30,代碼來源:optimization.py

示例15: minimize_impl

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 別名]
def minimize_impl(self, objective, gradient, inits, bounds):
        if gradient is None:
            approx_grad = True
        else:
            approx_grad = False

        self.niter = 0
        def callback(xs):
            self.niter += 1
            if self.verbose:
                if self.niter % 50 == 0:
                    print('iter  ', '\t'.join([x.name.split(':')[0] for x in variables]))
                print('{: 4d}   {}'.format(self.niter, '\t'.join(map(str, xs))))
            if self.callback is not None:
                self.callback(xs)

        results = fmin_l_bfgs_b(
                objective,
                inits,
                m=self.m,
                fprime=gradient,
                factr=self.factr,
                pgtol=self.pgtol,
                callback=callback,
                approx_grad=approx_grad,
                bounds=bounds,
        )

        ret = OptimizationResult()
        ret.x = results[0]
        ret.func = results[1]
        ret.niter = results[2]['nit']
        ret.calls = results[2]['funcalls']
        ret.message = results[2]['task'].decode().lower()
        ret.success = results[2]['warnflag'] == 0

        return ret 
開發者ID:tensorprob,項目名稱:tensorprob,代碼行數:39,代碼來源:scipy_lbfgsb.py


注:本文中的scipy.optimize.fmin_l_bfgs_b方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。