当前位置: 首页>>代码示例>>Python>>正文


Python optimize.fmin_l_bfgs_b方法代码示例

本文整理汇总了Python中scipy.optimize.fmin_l_bfgs_b方法的典型用法代码示例。如果您正苦于以下问题:Python optimize.fmin_l_bfgs_b方法的具体用法?Python optimize.fmin_l_bfgs_b怎么用?Python optimize.fmin_l_bfgs_b使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在scipy.optimize的用法示例。


在下文中一共展示了optimize.fmin_l_bfgs_b方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: global_optimization

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 别名]
def global_optimization(grid, lower, upper, function_grid, function_scalar, function_scalar_gradient):

    grid_values = function_grid(grid)
    best = grid_values.argmin()
    
    # We solve the optimization problem

    X_initial = grid[ best : (best + 1), : ]
    def objective(X):
        X = casting(X)
        X = X.reshape((1, grid.shape[ 1 ]))
        value = function_scalar(X)
        gradient_value = function_scalar_gradient(X).flatten()
        return np.float(value), gradient_value.astype(np.float)

    lbfgs_bounds = zip(lower.tolist(), upper.tolist())
    x_optimal, y_opt, opt_info = spo.fmin_l_bfgs_b(objective, X_initial, bounds = list(lbfgs_bounds), iprint = 0, maxiter = 150)
    x_optimal = x_optimal.reshape((1, grid.shape[ 1 ]))

    return x_optimal, y_opt 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:22,代码来源:sparse_gp.py

示例2: _fit_start_params

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 别名]
def _fit_start_params(self, order, method, start_ar_lags=None):
        if method != 'css-mle':  # use Hannan-Rissanen to get start params
            start_params = self._fit_start_params_hr(order, start_ar_lags)
        else:  # use CSS to get start params
            func = lambda params: -self.loglike_css(params)
            #start_params = [.1]*(k_ar+k_ma+k_exog) # different one for k?
            start_params = self._fit_start_params_hr(order, start_ar_lags)
            if self.transparams:
                start_params = self._invtransparams(start_params)
            bounds = [(None,)*2]*sum(order)
            mlefit = optimize.fmin_l_bfgs_b(func, start_params,
                                            approx_grad=True, m=12,
                                            pgtol=1e-7, factr=1e3,
                                            bounds=bounds, iprint=-1)
            start_params = mlefit[0]
            if self.transparams:
                start_params = self._transparams(start_params)
        return start_params 
开发者ID:birforce,项目名称:vnpy_crypto,代码行数:20,代码来源:arima_model.py

示例3: global_optimization

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 别名]
def global_optimization(grid, lower, upper, function_grid, function_scalar, function_scalar_gradient):

    grid_values = function_grid(grid)
    best = grid_values.argmin()
    
    # We solve the optimization problem

    X_initial = grid[ best : (best + 1), : ]
    def objective(X):
        X = casting(X)
        X = X.reshape((1, grid.shape[ 1 ]))
        value = function_scalar(X)
        gradient_value = function_scalar_gradient(X).flatten()
        return np.float(value), gradient_value.astype(np.float)

    lbfgs_bounds = zip(lower.tolist(), upper.tolist())
    x_optimal, y_opt, opt_info = spo.fmin_l_bfgs_b(objective, X_initial, bounds = lbfgs_bounds, iprint = 0, maxiter = 150)
    x_optimal = x_optimal.reshape((1, grid.shape[ 1 ]))

    return x_optimal, y_opt 
开发者ID:wengong-jin,项目名称:icml18-jtnn,代码行数:22,代码来源:sparse_gp.py

示例4: test_l_bfgs_b

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 别名]
def test_l_bfgs_b(self):
        """ limited-memory bound-constrained BFGS algorithm
        """
        retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
                                        self.grad, args=(),
                                        maxiter=self.maxiter)

        (params, fopt, d) = retval

        assert_allclose(self.func(params), self.func(self.solution),
                        atol=1e-6)

        # Ensure that function call counts are 'known good'; these are from
        # Scipy 0.7.0. Don't allow them to increase.
        assert_(self.funccalls == 7, self.funccalls)
        assert_(self.gradcalls == 5, self.gradcalls)

        # Ensure that the function behaves the same; this is from Scipy 0.7.0
        assert_allclose(self.trace[3:5],
                        [[0., -0.52489628, 0.48753042],
                         [0., -0.52489628, 0.48753042]],
                        atol=1e-14, rtol=1e-7) 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:24,代码来源:test_optimize.py

示例5: test_l_bfgs_b

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 别名]
def test_l_bfgs_b(self):
        # limited-memory bound-constrained BFGS algorithm
        retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
                                        self.grad, args=(),
                                        maxiter=self.maxiter)

        (params, fopt, d) = retval

        assert_allclose(self.func(params), self.func(self.solution),
                        atol=1e-6)

        # Ensure that function call counts are 'known good'; these are from
        # Scipy 0.7.0. Don't allow them to increase.
        assert_(self.funccalls == 7, self.funccalls)
        assert_(self.gradcalls == 5, self.gradcalls)

        # Ensure that the function behaves the same; this is from Scipy 0.7.0
        assert_allclose(self.trace[3:5],
                        [[0., -0.52489628, 0.48753042],
                         [0., -0.52489628, 0.48753042]],
                        atol=1e-14, rtol=1e-7) 
开发者ID:Relph1119,项目名称:GraphicDesignPatternByPython,代码行数:23,代码来源:test_optimize.py

示例6: test_minimize_l_bfgs_b_maxfun_interruption

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 别名]
def test_minimize_l_bfgs_b_maxfun_interruption(self):
        # gh-6162
        f = optimize.rosen
        g = optimize.rosen_der
        values = []
        x0 = np.ones(7) * 1000

        def objfun(x):
            value = f(x)
            values.append(value)
            return value

        # Look for an interesting test case.
        # Request a maxfun that stops at a particularly bad function
        # evaluation somewhere between 100 and 300 evaluations.
        low, medium, high = 30, 100, 300
        optimize.fmin_l_bfgs_b(objfun, x0, fprime=g, maxfun=high)
        v, k = max((y, i) for i, y in enumerate(values[medium:]))
        maxfun = medium + k
        # If the minimization strategy is reasonable,
        # the minimize() result should not be worse than the best
        # of the first 30 function evaluations.
        target = min(values[:low])
        xmin, fmin, d = optimize.fmin_l_bfgs_b(f, x0, fprime=g, maxfun=maxfun)
        assert_array_less(fmin, target) 
开发者ID:Relph1119,项目名称:GraphicDesignPatternByPython,代码行数:27,代码来源:test_optimize.py

示例7: lbfgsb

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 别名]
def lbfgsb(variables, bounds, loss_fn, zero_grad_fn):
    x, shapes, shapes_flat = vars_to_x(variables)
    bounds_list = []
    for var in variables:
        lower, upper = bounds[var]
        lower = lower.ravel()
        upper = upper.ravel()
        for i in range(lower.size):
            bounds_list.append((lower[i], upper[i]))

    def f(x):
        x_to_vars(x, variables, shapes_flat, shapes)
        loss = loss_fn()
        zero_grad_fn()
        loss.backward()
        with torch.no_grad():
            f = loss.detach().cpu().numpy().astype(np.float64)
            g = np.stack([var.tensor.grad.detach().cpu().numpy().ravel() for var in variables]).astype(np.float64)
        return f, g
    x, f, d = spo.fmin_l_bfgs_b(f, x, bounds=bounds_list)
    x_to_vars(x, variables, shapes_flat, shapes) 
开发者ID:eth-sri,项目名称:dl2,代码行数:23,代码来源:api.py

示例8: optimize

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 别名]
def optimize(self, num_vars, objective_function, gradient_function=None,
                 variable_bounds=None, initial_point=None):
        super().optimize(num_vars, objective_function, gradient_function,
                         variable_bounds, initial_point)

        if gradient_function is None and self._max_evals_grouped > 1:
            epsilon = self._options['epsilon']
            gradient_function = Optimizer.wrap_function(Optimizer.gradient_num_diff,
                                                        (objective_function,
                                                         epsilon, self._max_evals_grouped))

        approx_grad = bool(gradient_function is None)
        sol, opt, info = sciopt.fmin_l_bfgs_b(objective_function,
                                              initial_point, bounds=variable_bounds,
                                              fprime=gradient_function,
                                              approx_grad=approx_grad, **self._options)

        return sol, opt, info['funcalls'] 
开发者ID:Qiskit,项目名称:qiskit-aqua,代码行数:20,代码来源:l_bfgs_b.py

示例9: _fit_start_params

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 别名]
def _fit_start_params(self, order, method, start_ar_lags=None):
        if method != 'css-mle':  # use Hannan-Rissanen to get start params
            start_params = self._fit_start_params_hr(order, start_ar_lags)
        else:  # use CSS to get start params
            func = lambda params: -self.loglike_css(params)
            #start_params = [.1]*(k_ar+k_ma+k_exog) # different one for k?
            start_params = self._fit_start_params_hr(order, start_ar_lags)
            if self.transparams:
                start_params = self._invtransparams(start_params)
            bounds = [(None,)*2]*sum(order)
            mlefit = optimize.fmin_l_bfgs_b(func, start_params,
                                            approx_grad=True, m=12,
                                            pgtol=1e-7, factr=1e3,
                                            bounds=bounds, iprint=-1)
            start_params = self._transparams(mlefit[0])
        return start_params 
开发者ID:nccgroup,项目名称:Splunking-Crime,代码行数:18,代码来源:arima_model.py

示例10: styling

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 别名]
def styling(self, content_image, style_image, n_iter):
        content = Image.open(content_image).resize((self.width, self.height))
        self.content = np.expand_dims(content, axis=0).astype(np.float32)   # [1, height, width, 3]
        style = Image.open(style_image).resize((self.width, self.height))
        self.style = np.expand_dims(style, axis=0).astype(np.float32)       # [1, height, width, 3]

        x = np.copy(self.content)      # initialize styled image from content
        
        # repeat backpropagating to styled image 
        for i in range(n_iter):
            x, min_val, info = fmin_l_bfgs_b(self._get_loss, x.flatten(), fprime=lambda x: self.flat_grads, maxfun=20)
            x = x.clip(0., 255.)
            print('(%i/%i) loss: %.1f' % (i+1, n_iter, min_val))

        x = x.reshape((self.height, self.width, 3))
        for i in range(1, 4):
            x[:, :, -i] += self.vgg_mean[i - 1]
        return x, self.content, self.style 
开发者ID:MorvanZhou,项目名称:Tensorflow-Computer-Vision-Tutorial,代码行数:20,代码来源:105_Style_Transfer.py

示例11: bfgs

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 别名]
def bfgs(self):
        def ll(x):
            x = x.reshape((self.K, self.L))
            return self._ll(x)

        def dll(x):
            x = x.reshape((self.K, self.L))
            result = self._dll(x)
            result = result.reshape(self.K * self.L)
            return result

        Lambda = np.random.multivariate_normal(np.zeros(self.L), 
            (self.sigma ** 2) * np.identity(self.L), size=self.K)
        Lambda = Lambda.reshape(self.K * self.L)

        newLambda, fmin, res = optimize.fmin_l_bfgs_b(ll, Lambda, dll)
        self.Lambda = newLambda.reshape((self.K, self.L)) 
开发者ID:mpkato,项目名称:dmr,代码行数:19,代码来源:dmr.py

示例12: calcM

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 别名]
def calcM(f,xMin,xMax):
    #first do a coarse grid to get ic
    dx = np.linspace(xMin, xMax, 1000*1000)
    ic = np.argmax(f(dx))
    
    #now optimize
    g = lambda x: -f(x)
    M = fmin_l_bfgs_b(g,[dx[ic]],approx_grad=True,bounds=[(xMin,xMax)])
    M = f(M[0])
    
    return M 
开发者ID:dsavransky,项目名称:EXOSIMS,代码行数:13,代码来源:statsFun.py

示例13: calcM

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 别名]
def calcM(self):
        '''
        Calculate the maximum bound of the distribution over the 
        sampling interval.
        '''
        #first do a coarse grid to get ic
        dx = np.linspace(self.xMin, self.xMax, 1000*1000)
        ic = np.argmax(self.f(dx))
        
        #now optimize
        g = lambda x: -self.f(x)
        M = fmin_l_bfgs_b(g,[dx[ic]],approx_grad=True,bounds=[(self.xMin,self.xMax)])
        M = self.f(M[0])
        
        return M 
开发者ID:dsavransky,项目名称:EXOSIMS,代码行数:17,代码来源:RejectionSampler.py

示例14: opt

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 别名]
def opt(self, x_init, f_fp=None, f=None, fp=None):
        """
        Run the optimizer

        """
        rcstrings = ['Converged', 'Maximum number of f evaluations reached', 'Error']

        assert f_fp != None, "BFGS requires f_fp"

        opt_dict = {}
        if self.xtol is not None:
            print("WARNING: l-bfgs-b doesn't have an xtol arg, so I'm going to ignore it")
        if self.ftol is not None:
            print("WARNING: l-bfgs-b doesn't have an ftol arg, so I'm going to ignore it")
        if self.gtol is not None:
            opt_dict['pgtol'] = self.gtol
        if self.bfgs_factor is not None:
            opt_dict['factr'] = self.bfgs_factor

        opt_result = optimize.fmin_l_bfgs_b(f_fp, x_init, maxfun=self.max_iters, maxiter=self.max_iters, **opt_dict)
        self.x_opt = opt_result[0]
        self.f_opt = f_fp(self.x_opt)[0]
        self.funct_eval = opt_result[2]['funcalls']
        self.status = rcstrings[opt_result[2]['warnflag']]

        #a more helpful error message is available in opt_result in the Error case
        if opt_result[2]['warnflag']==2: # pragma: no coverage, this is not needed to be covered
            self.status = 'Error' + str(opt_result[2]['task']) 
开发者ID:sods,项目名称:paramz,代码行数:30,代码来源:optimization.py

示例15: minimize_impl

# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_l_bfgs_b [as 别名]
def minimize_impl(self, objective, gradient, inits, bounds):
        if gradient is None:
            approx_grad = True
        else:
            approx_grad = False

        self.niter = 0
        def callback(xs):
            self.niter += 1
            if self.verbose:
                if self.niter % 50 == 0:
                    print('iter  ', '\t'.join([x.name.split(':')[0] for x in variables]))
                print('{: 4d}   {}'.format(self.niter, '\t'.join(map(str, xs))))
            if self.callback is not None:
                self.callback(xs)

        results = fmin_l_bfgs_b(
                objective,
                inits,
                m=self.m,
                fprime=gradient,
                factr=self.factr,
                pgtol=self.pgtol,
                callback=callback,
                approx_grad=approx_grad,
                bounds=bounds,
        )

        ret = OptimizationResult()
        ret.x = results[0]
        ret.func = results[1]
        ret.niter = results[2]['nit']
        ret.calls = results[2]['funcalls']
        ret.message = results[2]['task'].decode().lower()
        ret.success = results[2]['warnflag'] == 0

        return ret 
开发者ID:tensorprob,项目名称:tensorprob,代码行数:39,代码来源:scipy_lbfgsb.py


注:本文中的scipy.optimize.fmin_l_bfgs_b方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。