當前位置: 首頁>>代碼示例>>Python>>正文


Python optimize.OptimizeResult方法代碼示例

本文整理匯總了Python中scipy.optimize.OptimizeResult方法的典型用法代碼示例。如果您正苦於以下問題:Python optimize.OptimizeResult方法的具體用法?Python optimize.OptimizeResult怎麽用?Python optimize.OptimizeResult使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在scipy.optimize的用法示例。


在下文中一共展示了optimize.OptimizeResult方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: optimize

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import OptimizeResult [as 別名]
def optimize(self, objectivefx, **kwargs):
        """
        Optimize a given function f over a domain.

        The optimizer class supports interruption. If during the optimization ctrl+c is pressed, the last best point is
        returned.
        
        The actual optimization routine is implemented in _optimize, to be implemented in subclasses.

        :param objectivefx: callable, taking one argument: a 2D numpy array. The number of columns correspond to the 
            dimensionality of the input domain.
        :return: OptimizeResult reporting the results.
        """
        objective = ObjectiveWrapper(objectivefx, **self._wrapper_args)
        try:
            result = self._optimize(objective, **kwargs)
        except KeyboardInterrupt:
            result = OptimizeResult(x=objective._previous_x,
                                    success=False,
                                    message="Caught KeyboardInterrupt, returning last good value.")
        result.x = np.atleast_2d(result.x)
        result.nfev = objective.counter
        return result 
開發者ID:GPflow,項目名稱:GPflowOpt,代碼行數:25,代碼來源:optim.py

示例2: optimize

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import OptimizeResult [as 別名]
def optimize(self, objectivefx, n_iter=20):
        """
        Run Bayesian optimization for a number of iterations.
        
        Before the loop is initiated, first all points retrieved by :meth:`~.optim.Optimizer.get_initial` are evaluated
        on the objective and black-box constraints. These points are then added to the acquisition function 
        by calling :meth:`~.acquisition.Acquisition.set_data` (and hence, the underlying models). 
        
        Each iteration a new data point is selected for evaluation by optimizing an acquisition function. This point
        updates the models.
        
        :param objectivefx: (list of) expensive black-box objective and constraint functions. For evaluation, the 
            responses of all the expensive functions are aggregated column wise.
            Unlike the typical :class:`~.optim.Optimizer` interface, these functions should not return gradients. 
        :param n_iter: number of iterations to run
        :return: OptimizeResult object
        """
        fxs = np.atleast_1d(objectivefx)
        return super(BayesianOptimizer, self).optimize(lambda x: self._evaluate_objectives(x, fxs), n_iter=n_iter) 
開發者ID:GPflow,項目名稱:GPflowOpt,代碼行數:21,代碼來源:bo.py

示例3: load

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import OptimizeResult [as 別名]
def load(filename, **kwargs):
    """
    Reconstruct a skopt optimization result from a file
    persisted with skopt.dump.

    .. note::
        Notice that the loaded optimization result can be missing
        the objective function (`.specs['args']['func']`) if `skopt.dump`
        was called with `store_objective=False`.

    Parameters
    ----------
    filename : string or `pathlib.Path`
        The path of the file from which to load the optimization result.

    **kwargs : other keyword arguments
        All other keyword arguments will be passed to `joblib.load`.

    Returns
    -------
    res : `OptimizeResult`, scipy object
        Reconstructed OptimizeResult instance.
    """
    return load_(filename, **kwargs) 
開發者ID:scikit-optimize,項目名稱:scikit-optimize,代碼行數:26,代碼來源:utils.py

示例4: __init__

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import OptimizeResult [as 別名]
def __init__(self, x0, minimizer, step_taking, accept_tests, disp=False):
        self.x = np.copy(x0)
        self.minimizer = minimizer
        self.step_taking = step_taking
        self.accept_tests = accept_tests
        self.disp = disp

        self.nstep = 0

        # initialize return object
        self.res = scipy.optimize.OptimizeResult()
        self.res.minimization_failures = 0

        # do initial minimization
        minres = minimizer(self.x)
        if not minres.success:
            self.res.minimization_failures += 1
            if self.disp:
                print("warning: basinhopping: local minimization failure")
        self.x = np.copy(minres.x)
        self.energy = minres.fun
        if self.disp:
            print("basinhopping step %d: f %g" % (self.nstep, self.energy))

        # initialize storage class
        self.storage = Storage(minres)

        if hasattr(minres, "nfev"):
            self.res.nfev = minres.nfev
        if hasattr(minres, "njev"):
            self.res.njev = minres.njev
        if hasattr(minres, "nhev"):
            self.res.nhev = minres.nhev 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:35,代碼來源:_basinhopping.py

示例5: fmin_minuit

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import OptimizeResult [as 別名]
def fmin_minuit(func, x0, names=None, verbose=False):
    inits = dict()

    if verbose:
        print_level = 2
    else:
        print_level = 0

    if names is None:
        names = map(lambda x: 'param' + str(x), range(len(x0)))
    else:
        assert(len(x0) == len(names))

    for n, x in zip(names, x0):
        inits[n] = x
        # TODO use a method to set this correctly
        inits['error_' + n] = 1

    m = Minuit(Min_Func(func, names), print_level=print_level, errordef=1, **inits)
    a, b = m.migrad()

    return OptimizeResult(
        x=m.values,
        fun=a['fval'],
        edm=a['edm'],
        nfev=a['nfcn'],
        is_valid=a['is_valid'],
        has_valid_parameters=a['has_valid_parameters'],
    ) 
開發者ID:ibab,項目名稱:python-mle,代碼行數:31,代碼來源:minuit.py

示例6: _optimize

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import OptimizeResult [as 別名]
def _optimize(self, objective):
        points = self._get_eval_points()
        evaluations = objective(points)
        idx_best = np.argmin(evaluations, axis=0)

        return OptimizeResult(x=points[idx_best, :],
                              success=True,
                              fun=evaluations[idx_best, :],
                              nfev=points.shape[0],
                              message="OK") 
開發者ID:GPflow,項目名稱:GPflowOpt,代碼行數:12,代碼來源:optim.py

示例7: test_train_failure

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import OptimizeResult [as 別名]
def test_train_failure(self, all_gps, params, mocker):
        """
        Tests the case when 'L-BFGS-B' fails due to a linear algebra error and
        training falls back to BFGS
        """
        # Sets up mocker for scipy minimize. Note that we are mocking
        # 'flare.gp.minimize' because of how the imports are done in gp
        x_result = np.random.rand()
        fun_result = np.random.rand()
        jac_result = np.random.rand()
        train_result = OptimizeResult(x=x_result, fun=fun_result,
                                      jac=jac_result)

        side_effects = [np.linalg.LinAlgError(), train_result]
        mocker.patch('flare.gp.minimize', side_effect=side_effects)
        two_body_gp = all_gps[True]
        two_body_gp.set_L_alpha = mocker.Mock()

        # Executes training
        two_body_gp.algo = 'L-BFGS-B'
        two_body_gp.train()

        # Assert that everything happened as expected
        assert(flare.gp.minimize.call_count == 2)

        calls = flare.gp.minimize.call_args_list
        args, kwargs = calls[0]
        assert(kwargs['method'] == 'L-BFGS-B')

        args, kwargs = calls[1]
        assert(kwargs['method'] == 'BFGS')

        two_body_gp.set_L_alpha.assert_called_once()
        assert(two_body_gp.hyps == x_result)
        assert(two_body_gp.likelihood == -1 * fun_result)
        assert(two_body_gp.likelihood_gradient == -1 * jac_result) 
開發者ID:mir-group,項目名稱:flare,代碼行數:38,代碼來源:test_gp.py

示例8: test_custom

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import OptimizeResult [as 別名]
def test_custom(self):
        # This function comes from the documentation example.
        def custmin(fun, x0, args=(), maxfev=None, stepsize=0.1,
                maxiter=100, callback=None, **options):
            bestx = x0
            besty = fun(x0)
            funcalls = 1
            niter = 0
            improved = True
            stop = False

            while improved and not stop and niter < maxiter:
                improved = False
                niter += 1
                for dim in range(np.size(x0)):
                    for s in [bestx[dim] - stepsize, bestx[dim] + stepsize]:
                        testx = np.copy(bestx)
                        testx[dim] = s
                        testy = fun(testx, *args)
                        funcalls += 1
                        if testy < besty:
                            besty = testy
                            bestx = testx
                            improved = True
                    if callback is not None:
                        callback(bestx)
                    if maxfev is not None and funcalls >= maxfev:
                        stop = True
                        break

            return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
                                           nfev=funcalls, success=(niter > 1))

        x0 = [1.35, 0.9, 0.8, 1.1, 1.2]
        res = optimize.minimize(optimize.rosen, x0, method=custmin,
                                options=dict(stepsize=0.05))
        assert_allclose(res.x, 1.0, rtol=1e-4, atol=1e-4) 
開發者ID:Relph1119,項目名稱:GraphicDesignPatternByPython,代碼行數:39,代碼來源:test_optimize.py

示例9: test_minimize_scalar_custom

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import OptimizeResult [as 別名]
def test_minimize_scalar_custom(self):
        # This function comes from the documentation example.
        def custmin(fun, bracket, args=(), maxfev=None, stepsize=0.1,
                maxiter=100, callback=None, **options):
            bestx = (bracket[1] + bracket[0]) / 2.0
            besty = fun(bestx)
            funcalls = 1
            niter = 0
            improved = True
            stop = False

            while improved and not stop and niter < maxiter:
                improved = False
                niter += 1
                for testx in [bestx - stepsize, bestx + stepsize]:
                    testy = fun(testx, *args)
                    funcalls += 1
                    if testy < besty:
                        besty = testy
                        bestx = testx
                        improved = True
                if callback is not None:
                    callback(bestx)
                if maxfev is not None and funcalls >= maxfev:
                    stop = True
                    break

            return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
                                           nfev=funcalls, success=(niter > 1))

        res = optimize.minimize_scalar(self.fun, bracket=(0, 4), method=custmin,
                                       options=dict(stepsize=0.05))
        assert_allclose(res.x, self.solution, atol=1e-6) 
開發者ID:Relph1119,項目名稱:GraphicDesignPatternByPython,代碼行數:35,代碼來源:test_optimize.py

示例10: setup_method

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import OptimizeResult [as 別名]
def setup_method(self):
        self.x0 = np.array(1)
        self.f0 = 0

        minres = OptimizeResult()
        minres.x = self.x0
        minres.fun = self.f0

        self.storage = Storage(minres) 
開發者ID:Relph1119,項目名稱:GraphicDesignPatternByPython,代碼行數:11,代碼來源:test__basinhopping.py

示例11: test_lower_f_accepted

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import OptimizeResult [as 別名]
def test_lower_f_accepted(self):
        new_minres = OptimizeResult()
        new_minres.x = self.x0 + 1
        new_minres.fun = self.f0 - 1

        ret = self.storage.update(new_minres)
        minres = self.storage.get_lowest()
        assert_(self.x0 != minres.x)
        assert_(self.f0 != minres.fun)
        assert_(ret) 
開發者ID:Relph1119,項目名稱:GraphicDesignPatternByPython,代碼行數:12,代碼來源:test__basinhopping.py

示例12: basinhopping

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import OptimizeResult [as 別名]
def basinhopping(constraint_solve, constraint_check, variables, bounds, args):
    x0, shapes, shapes_flat = vars_to_x(variables)
    
    def loss_fn(x):
        x_to_vars(x, variables, shapes_flat, shapes)
        return constraint_solve.to_diffsat(cache=True).loss(args)

    def local_optimization_step(fun, x0, *losargs, **loskwargs):
        loss_before = loss_fn(x0)
        inner_opt(constraint_solve, constraint_check, variables, bounds, args)
        r = spo.OptimizeResult()
        r.x, _, _ = vars_to_x(variables)
        loss_after = constraint_solve.to_diffsat(cache=True).loss(args)
        r.success = not (loss_before == loss_after and not constraint_check.to_diffsat(cache=True).satisfy(args))
        r.fun = loss_after
        return r

    def check_basinhopping(x, f, accept):
        if abs(f) <= 10 * args.eps_check:
            x_, _, _ = vars_to_x(variables)
            x_to_vars(x, variables, shapes_flat, shapes)
            if constraint_check.to_diffsat(cache=True).satisfy(args):
                return True
            else:
                x_to_vars(x_, variables, shapes_flat, shapes)
        return False
    
    minimizer_kwargs = {}
    minimizer_kwargs['method'] = local_optimization_step

    satisfied = constraint_check.to_diffsat(cache=True).satisfy(args)
    if satisfied:
        return True
    spo.basinhopping(loss_fn, x0, niter=1000, minimizer_kwargs=minimizer_kwargs, callback=check_basinhopping,
                     T=args.basinhopping_T, stepsize=args.basinhopping_stepsize)
    return constraint_check.to_diffsat(cache=True).satisfy(args) 
開發者ID:eth-sri,項目名稱:dl2,代碼行數:38,代碼來源:api.py

示例13: fit

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import OptimizeResult [as 別名]
def fit(self, method):
        """
        Fits the copula with the Maximum Likelihood Estimator

        Parameters
        ----------
        method: {'ml', 'mpl'}
            This will determine the variance estimate

        Returns
        -------
        ndarray
            Estimated parameters for the copula

        """

        res: OptimizeResult = minimize(self.copula_log_lik, self.initial_params, **self.optim_options)

        if not res['success']:
            if self.verbose >= 1:
                warn_no_convergence()
            return

        estimate = res['x']
        self.copula.params = estimate

        method = f"Maximum {'pseudo-' if method == 'mpl' else ''}likelihood"
        self.copula.fit_smry = FitSummary(estimate, method, res['fun'], len(self.data), self.optim_options, res)

        return estimate 
開發者ID:DanielBok,項目名稱:copulae,代碼行數:32,代碼來源:max_likelihood.py

示例14: fit

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import OptimizeResult [as 別名]
def fit(self, X, y, sample_weight=None):
        """Fit non-negative linear model.

        Parameters
        ----------
        X : numpy array or sparse matrix of shape [n_samples, n_features]
            Training data
        y : numpy array of shape [n_samples,]
            Target values
        sample_weight : numpy array of shape [n_samples]
            Individual weights for each sample

        Returns
        -------
        self : returns an instance of self.

        """
        X, y = check_X_y(X, y, y_numeric=True, multi_output=False)

        if sample_weight is not None and np.atleast_1d(sample_weight).ndim > 1:
            raise ValueError("Sample weights must be 1D array or scalar")

        X, y, X_offset, y_offset, X_scale = self._preprocess_data(
            X, y, fit_intercept=self.fit_intercept, normalize=self.normalize,
            copy=self.copy_X, sample_weight=sample_weight)

        if sample_weight is not None:
            # Sample weight can be implemented via a simple rescaling.
            X, y = _rescale_data(X, y, sample_weight)

        self.coef_, result = nnls(X, y.squeeze())

        if np.all(self.coef_ == 0):
            raise ConvergenceWarning("All coefficients estimated to be zero in"
                                     " the non-negative least squares fit.")

        self._set_intercept(X_offset, y_offset, X_scale)
        self.opt_result_ = OptimizeResult(success=True, status=0, x=self.coef_,
                                          fun=result)
        return self 
開發者ID:civisanalytics,項目名稱:civisml-extensions,代碼行數:42,代碼來源:nonnegative.py

示例15: _int_spontaneous_raman

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import OptimizeResult [as 別名]
def _int_spontaneous_raman(self, z_array, raman_matrix, alphap_fiber, freq_array,
                               cr_raman_matrix, freq_diff, ase_bc, bn_array, temperature):
        spontaneous_raman_scattering = OptimizeResult()

        simulation = Simulation.get_simulation()
        sim_params = simulation.sim_params

        dx = sim_params.raman_params.space_resolution
        h = ph.value('Planck constant')
        kb = ph.value('Boltzmann constant')

        power_ase = np.nan * np.ones(raman_matrix.shape)
        int_pump = cumtrapz(raman_matrix, z_array, dx=dx, axis=1, initial=0)

        for f_ind, f_ase in enumerate(freq_array):
            cr_raman = cr_raman_matrix[f_ind, :]
            vibrational_loss = f_ase / freq_array[:f_ind]
            eta = 1 / (np.exp((h * freq_diff[f_ind, f_ind + 1:]) / (kb * temperature)) - 1)

            int_fiber_loss = -alphap_fiber[f_ind] * z_array
            int_raman_loss = np.sum((cr_raman[:f_ind] * vibrational_loss * int_pump[:f_ind, :].transpose()).transpose(),
                                    axis=0)
            int_raman_gain = np.sum((cr_raman[f_ind + 1:] * int_pump[f_ind + 1:, :].transpose()).transpose(), axis=0)

            int_gain_loss = int_fiber_loss + int_raman_gain + int_raman_loss

            new_ase = np.sum((cr_raman[f_ind + 1:] * (1 + eta) * raman_matrix[f_ind + 1:, :].transpose()).transpose()
                             * h * f_ase * bn_array[f_ind], axis=0)

            bc_evolution = ase_bc[f_ind] * np.exp(int_gain_loss)
            ase_evolution = np.exp(int_gain_loss) * cumtrapz(new_ase *
                                                             np.exp(-int_gain_loss), z_array, dx=dx, initial=0)

            power_ase[f_ind, :] = bc_evolution + ase_evolution

        spontaneous_raman_scattering.x = 2 * power_ase
        return spontaneous_raman_scattering 
開發者ID:Telecominfraproject,項目名稱:oopt-gnpy,代碼行數:39,代碼來源:science_utils.py


注:本文中的scipy.optimize.OptimizeResult方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。