當前位置: 首頁>>代碼示例>>Python>>正文


Python optimize.fminbound方法代碼示例

本文整理匯總了Python中scipy.optimize.fminbound方法的典型用法代碼示例。如果您正苦於以下問題:Python optimize.fminbound方法的具體用法?Python optimize.fminbound怎麽用?Python optimize.fminbound使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在scipy.optimize的用法示例。


在下文中一共展示了optimize.fminbound方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _psturng

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fminbound [as 別名]
def _psturng(q, r, v):
    """scalar version of psturng"""
    if q < 0.:
        raise ValueError('q should be >= 0')

    opt_func = lambda p, r, v : abs(_qsturng(p, r, v) - q)

    if v == 1:
        if q < _qsturng(.9, r, 1):
            return .1
        elif q > _qsturng(.999, r, 1):
            return .001
        return 1. - fminbound(opt_func, .9, .999, args=(r,v))
    else:
        if q < _qsturng(.1, r, v):
            return .9
        elif q > _qsturng(.999, r, v):
            return .001
        return 1. - fminbound(opt_func, .1, .999, args=(r,v)) 
開發者ID:birforce,項目名稱:vnpy_crypto,代碼行數:21,代碼來源:qsturng_.py

示例2: _find_estimator_weight

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fminbound [as 別名]
def _find_estimator_weight(self, y, dv_pre, y_pred):
        """Make line search to determine estimator weights."""
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")

            def optimization_function(alpha):
                p_ij = self._estimate_instance_probabilities(dv_pre + alpha * y_pred)
                p_i = self._estimate_bag_probabilites(p_ij)
                return self._negative_log_likelihood(p_i)

            # TODO: Add option to choose optimization method.

            alpha, fval, err, n_func = fminbound(optimization_function, 0.0, 5.0, full_output=True, disp=1)
            if self.learning_rate < 1.0:
                alpha *= self.learning_rate
        return alpha, fval 
開發者ID:hbldh,項目名稱:skboost,代碼行數:18,代碼來源:classifier.py

示例3: CA_step

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fminbound [as 別名]
def CA_step(z1, z2, theta, index, min_val, max_val):
    """Take a single coordinate ascent step.
    
    """
    inner_theta = theta.copy()
    def f(alpha):
        inner_theta[index] = theta[index] + alpha
        return -calc_gaussian_mix_log_lhd(inner_theta, z1, z2)

    assert theta[index] >= min_val
    min_step_size = min_val - theta[index]
    assert theta[index] <= max_val
    max_step_size = max_val - theta[index]

    alpha = fminbound(f, min_step_size, max_step_size)
    prev_lhd = -f(0)
    new_lhd = -f(alpha)
    if new_lhd > prev_lhd:
        theta[index] += alpha
    else:
        new_lhd = prev_lhd
    return theta, new_lhd 
開發者ID:nboley,項目名稱:idr,代碼行數:24,代碼來源:optimization.py

示例4: learn_rmp

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fminbound [as 別名]
def learn_rmp(subpops, D):
  K          = len(subpops)
  rmp_matrix = np.eye(K)
  models = learn_models(subpops)

  for k in range(K - 1):
    for j in range(k + 1, K):
      probmatrix = [np.ones([models[k].num_sample, 2]), 
                    np.ones([models[j].num_sample, 2])]
      probmatrix[0][:, 0] = models[k].density(subpops[k])
      probmatrix[0][:, 1] = models[j].density(subpops[k])
      probmatrix[1][:, 0] = models[k].density(subpops[j])
      probmatrix[1][:, 1] = models[j].density(subpops[j])

      rmp = fminbound(lambda rmp: log_likelihood(rmp, probmatrix, K), 0, 1)
      rmp += np.random.randn() * 0.01
      rmp = np.clip(rmp, 0, 1)
      rmp_matrix[k, j] = rmp
      rmp_matrix[j, k] = rmp

  return rmp_matrix

# OPTIMIZATION RESULT HELPERS 
開發者ID:thanhbok26b,項目名稱:mfea-ii,代碼行數:25,代碼來源:operators.py

示例5: test_var

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fminbound [as 別名]
def test_var(self, sig2_0, return_weights=False):
        """
        Returns  -2 x log-likelihoog ratio and the p-value for the
        hypothesized variance

        Parameters
        ----------
        sig2_0 : float
            Hypothesized variance to be tested

        return_weights : bool
            If True, returns the weights that maximize the
            likelihood of observing sig2_0. Default is False

        Returns
        --------
        test_results : tuple
            The  log-likelihood ratio and the p_value  of sig2_0

        Examples
        --------
        >>> import numpy as np
        >>> import statsmodels.api as sm
        >>> random_numbers = np.random.standard_normal(1000)*100
        >>> el_analysis = sm.emplike.DescStat(random_numbers)
        >>> hyp_test = el_analysis.test_var(9500)
        """
        self.sig2_0 = sig2_0
        mu_max = max(self.endog)
        mu_min = min(self.endog)
        llr = optimize.fminbound(self._opt_var, mu_min, mu_max, \
                                 full_output=1)[1]
        p_val = chi2.sf(llr, 1)
        if return_weights:
            return llr, p_val, self.new_weights.T
        else:
            return  llr, p_val 
開發者ID:birforce,項目名稱:vnpy_crypto,代碼行數:39,代碼來源:descriptive.py

示例6: test_fminbound

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fminbound [as 別名]
def test_fminbound(self):
        """Test fminbound """
        x = optimize.fminbound(self.fun, 0, 1)
        assert_allclose(x, 1, atol=1e-4)

        x = optimize.fminbound(self.fun, 1, 5)
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.fminbound(self.fun, np.array([1]), np.array([5]))
        assert_allclose(x, self.solution, atol=1e-6)
        assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1) 
開發者ID:ktraunmueller,項目名稱:Computable,代碼行數:13,代碼來源:test_optimize.py

示例7: test_fminbound_scalar

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fminbound [as 別名]
def test_fminbound_scalar(self):
        assert_raises(ValueError, optimize.fminbound, self.fun,
                      np.zeros(2), 1)

        x = optimize.fminbound(self.fun, 1, np.array(5))
        assert_allclose(x, self.solution, atol=1e-6) 
開發者ID:ktraunmueller,項目名稱:Computable,代碼行數:8,代碼來源:test_optimize.py

示例8: test_fminbound

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fminbound [as 別名]
def test_fminbound(self):
        x = optimize.fminbound(self.fun, 0, 1)
        assert_allclose(x, 1, atol=1e-4)

        x = optimize.fminbound(self.fun, 1, 5)
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.fminbound(self.fun, np.array([1]), np.array([5]))
        assert_allclose(x, self.solution, atol=1e-6)
        assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1) 
開發者ID:Relph1119,項目名稱:GraphicDesignPatternByPython,代碼行數:12,代碼來源:test_optimize.py

示例9: test_fminbound_scalar

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fminbound [as 別名]
def test_fminbound_scalar(self):
        try:
            optimize.fminbound(self.fun, np.zeros((1, 2)), 1)
            self.fail("exception not raised")
        except ValueError as e:
            assert_('must be scalar' in str(e))

        x = optimize.fminbound(self.fun, 1, np.array(5))
        assert_allclose(x, self.solution, atol=1e-6) 
開發者ID:Relph1119,項目名稱:GraphicDesignPatternByPython,代碼行數:11,代碼來源:test_optimize.py

示例10: gradient_ascent

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fminbound [as 別名]
def gradient_ascent(r1, r2, theta, gradient_magnitude, 
                    fix_mu=False, fix_sigma=False):
    for j in range(len(theta)):
        if fix_mu and j == 0: continue
        if fix_sigma and j == 1: continue
        
        prev_loss = calc_loss(r1, r2, theta)

        mu, sigma, rho, p = theta
        z1 = compute_pseudo_values(r1, mu, sigma, p)
        z2 = compute_pseudo_values(r2, mu, sigma, p)
        real_grad = calc_pseudo_log_lhd_gradient(theta, z1, z2, False, False)
        
        gradient = numpy.zeros(len(theta))
        gradient[j] = gradient_magnitude
        if real_grad[j] < 0: gradient[j] = -gradient[j]
                
        min_step = 0
        max_step = find_max_step_size(
            theta[j], gradient[j], (False if j in (0,1) else True))

        if max_step < 1e-12: continue

        alpha = fminbound(
            lambda x: calc_loss( r1, r2, theta + x*gradient ),
            min_step, max_step)
                
        loss = calc_loss( r1, r2, theta + alpha*gradient )
        if loss < prev_loss:
            theta += alpha*gradient

    return theta 
開發者ID:nboley,項目名稱:idr,代碼行數:34,代碼來源:optimization.py

示例11: function

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fminbound [as 別名]
def function(b, c):
    return optimize.fminbound(f, b, c)

# If called with arguments:
# http://127.0.0.1:5000/function?b=2&c=10
# we get a correct output:
# {"success": true, "error_msg": null, "result": 3.83746830432337} 
開發者ID:ianozsvald,項目名稱:featherweight_web_api,代碼行數:9,代碼來源:example_scipy.py

示例12: sample_623

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fminbound [as 別名]
def sample_623():
    """
    6.2.3 趨勢骨架圖
    :return:
    """
    import scipy.optimize as sco
    from scipy.interpolate import interp1d

    # 繼續使用TSLA收盤價格序列
    # interp1d線性插值函數
    linear_interp = interp1d(x, y)
    # 繪製插值
    plt.plot(linear_interp(x))

    # fminbound尋找給定範圍內的最小值:在linear_inter中尋找全局最優範圍1-504
    global_min_pos = sco.fminbound(linear_interp, 1, 504)
    # 繪製全局最優點,全局最小值點,r<:紅色三角
    plt.plot(global_min_pos, linear_interp(global_min_pos), 'r<')

    # 每個單位都先畫一個點,由兩個點連成一條直線形成股價骨架圖
    last_postion = None
    # 步長50,每50個單位求一次局部最小
    for find_min_pos in np.arange(50, len(x), 50):
        # fmin_bfgs尋找給定值的局部最小值
        local_min_pos = sco.fmin_bfgs(linear_interp, find_min_pos, disp=0)
        # 形成最小點位置信息(x, y)
        draw_postion = (local_min_pos, linear_interp(local_min_pos))
        # 第一個50單位last_postion=none, 之後都有值
        if last_postion is not None:
            # 將兩兩臨近局部最小值相連,兩個點連成一條直線
            plt.plot([last_postion[0][0], draw_postion[0][0]],
                     [last_postion[1][0], draw_postion[1][0]], 'o-')
        # 將這個步長單位內的最小值點賦予last_postion
        last_postion = draw_postion
    plt.show() 
開發者ID:bbfamily,項目名稱:abu,代碼行數:37,代碼來源:c6.py

示例13: bellman_operator

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fminbound [as 別名]
def bellman_operator(w, grid, β, u, f, shocks, Tw=None, compute_policy=0):
    """
    The approximate Bellman operator, which computes and returns the
    updated value function Tw on the grid points.  An array to store
    the new set of values Tw is optionally supplied (to avoid having to
    allocate new arrays at each iteration).  If supplied, any existing data in 
    Tw will be overwritten.

    Parameters
    ----------
    w : array_like(float, ndim=1)
        The value of the input function on different grid points
    grid : array_like(float, ndim=1)
        The set of grid points
    β : scalar
        The discount factor
    u : function
        The utility function
    f : function
        The production function
    shocks : numpy array
        An array of draws from the shock, for Monte Carlo integration (to
        compute expectations).
    Tw : array_like(float, ndim=1) optional (default=None)
        Array to write output values to
    compute_policy : Boolean, optional (default=False)
        Whether or not to compute policy function

    """
    # === Apply linear interpolation to w === #
    w_func = lambda x: np.interp(x, grid, w)

    # == Initialize Tw if necessary == #
    if Tw is None:
        Tw = np.empty_like(w)

    if compute_policy:
        σ = np.empty_like(w)

    # == set Tw[i] = max_c { u(c) + β E w(f(y  - c) z)} == #
    for i, y in enumerate(grid):
        def objective(c):
            return - u(c) - β * np.mean(w_func(f(y - c) * shocks))
        c_star = fminbound(objective, 1e-10, y)
        if compute_policy:
            σ[i] = c_star
        Tw[i] = - objective(c_star)

    if compute_policy:
        return Tw, σ
    else:
        return Tw 
開發者ID:QuantEcon,項目名稱:QuantEcon.lectures.code,代碼行數:54,代碼來源:optgrowth.py

示例14: bellman_operator

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fminbound [as 別名]
def bellman_operator(V, cp, return_policy=False):
    """
    The approximate Bellman operator, which computes and returns the
    updated value function TV (or the V-greedy policy c if
    return_policy is True).

    Parameters
    ----------
    V : array_like(float)
        A NumPy array of dim len(cp.asset_grid) times len(cp.z_vals)
    cp : ConsumerProblem
        An instance of ConsumerProblem that stores primitives
    return_policy : bool, optional(default=False)
        Indicates whether to return the greed policy given V or the
        updated value function TV.  Default is TV.

    Returns
    -------
    array_like(float)
        Returns either the greed policy given V or the updated value
        function TV.

    """
    # === Simplify names, set up arrays === #
    R, Π, β, u, b = cp.R, cp.Π, cp.β, cp.u, cp.b
    asset_grid, z_vals = cp.asset_grid, cp.z_vals
    new_V = np.empty(V.shape)
    new_c = np.empty(V.shape)
    z_idx = list(range(len(z_vals)))

    # === Linear interpolation of V along the asset grid === #
    vf = lambda a, i_z: np.interp(a, asset_grid, V[:, i_z])

    # === Solve r.h.s. of Bellman equation === #
    for i_a, a in enumerate(asset_grid):
        for i_z, z in enumerate(z_vals):
            def obj(c):  # objective function to be *minimized*
                y = sum(vf(R * a + z - c, j) * Π[i_z, j] for j in z_idx)
                return - u(c) - β * y
            c_star = fminbound(obj, 1e-8, R * a + z + b)
            new_c[i_a, i_z], new_V[i_a, i_z] = c_star, -obj(c_star)

    if return_policy:
        return new_c
    else:
        return new_V 
開發者ID:QuantEcon,項目名稱:QuantEcon.lectures.code,代碼行數:48,代碼來源:ifp.py

示例15: coordinate_ascent

# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fminbound [as 別名]
def coordinate_ascent(r1, r2, theta, gradient_magnitude, 
                      fix_mu=False, fix_sigma=False):
    for j in range(len(theta)):
        if fix_mu and j == 0: continue
        if fix_sigma and j == 1: continue
        
        prev_loss = calc_loss(r1, r2, theta)

        # find the direction of the gradient
        gradient = numpy.zeros(len(theta))
        gradient[j] = gradient_magnitude
        init_alpha = 5e-12
        while init_alpha < 1e-2:
            pos = calc_loss( r1, r2, theta - init_alpha*gradient )
            neg = calc_loss( r1, r2, theta + init_alpha*gradient )
            if neg < prev_loss < pos:
                gradient[j] = gradient[j]
                #assert(calc_loss( 
                #       r1, r2, theta - init_alpha*gradient ) > prev_loss)
                #assert(calc_loss( 
                #       r1, r2, theta + init_alpha*gradient ) <= prev_loss)
                break
            elif neg > prev_loss > pos:
                gradient[j] = -gradient[j]
                #assert(calc_loss( 
                #    r1, r2, theta - init_alpha*gradient ) > prev_loss)
                #assert(calc_loss( 
                #    r1, r2, theta + init_alpha*gradient ) <= prev_loss)
                break
            else:
                init_alpha *= 10         

        #log( pos - prev_loss, neg - prev_loss )
        assert init_alpha < 1e-1
        
        min_step = 0
        max_step = find_max_step_size(
            theta[j], gradient[j], (False if j in (0,1) else True))

        if max_step < 1e-12: continue

        alpha = fminbound(
            lambda x: calc_loss( r1, r2, theta + x*gradient ),
            min_step, max_step)
        
        
        loss = calc_loss( r1, r2, theta + alpha*gradient )
        #log( "LOSS:", loss, prev_loss, loss-prev_loss )
        if loss < prev_loss:
            theta += alpha*gradient

    return theta 
開發者ID:nboley,項目名稱:idr,代碼行數:54,代碼來源:optimization.py


注:本文中的scipy.optimize.fminbound方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。