本文整理汇总了Python中scipy.optimize.brent方法的典型用法代码示例。如果您正苦于以下问题:Python optimize.brent方法的具体用法?Python optimize.brent怎么用?Python optimize.brent使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.optimize
的用法示例。
在下文中一共展示了optimize.brent方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _yj_normmax
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brent [as 别名]
def _yj_normmax(x, brack=(-2, 2)):
"""Compute optimal YJ transform parameter for input data.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple
The starting interval for a downhill bracket search
"""
# Use MLE to compute the optimal YJ parameter
def _mle_opt(i, brck):
def _eval_mle(lmb, data):
# Function to minimize
return -_yj_llf(data, lmb)
return optimize.brent(_eval_mle, brack=brck, args=(i,))
return _mle_opt(x, brack) # _mle(x, brack)
示例2: boxcox_normmax
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brent [as 别名]
def boxcox_normmax(x,brack=(-1.0,1.0)):
N = len(x)
# compute uniform median statistics
Ui = zeros(N)*1.0
Ui[-1] = 0.5**(1.0/N)
Ui[0] = 1-Ui[-1]
i = arange(2,N)
Ui[1:-1] = (i-0.3175)/(N+0.365)
# this function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation)
# and returns 1-r so that a minimization function maximizes the
# correlation
xvals = distributions.norm.ppf(Ui)
def tempfunc(lmbda, xvals, samps):
y = boxcox(samps,lmbda)
yvals = sort(y)
r, prob = stats.pearsonr(xvals, yvals)
return 1-r
return optimize.brent(tempfunc, brack=brack, args=(xvals, x))
示例3: _yeo_johnson_optimize
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brent [as 别名]
def _yeo_johnson_optimize(self, x):
"""Find and return optimal lambda parameter of the Yeo-Johnson
transform by MLE, for observed data x.
Like for Box-Cox, MLE is done via the brent optimizer.
"""
def _neg_log_likelihood(lmbda):
"""Return the negative log likelihood of the observed data x as a
function of lambda."""
x_trans = self._yeo_johnson_transform(x, lmbda)
n_samples = x.shape[0]
loglike = -n_samples / 2 * np.log(x_trans.var())
loglike += (lmbda - 1) * (np.sign(x) * np.log1p(np.abs(x))).sum()
return -loglike
# the computation of lambda is influenced by NaNs so we need to
# get rid of them
x = x[~np.isnan(x)]
# choosing bracket -2, 2 like for boxcox
return optimize.brent(_neg_log_likelihood, brack=(-2, 2))
示例4: _yj_est_lam
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brent [as 别名]
def _yj_est_lam(y, brack, dtype=np.float32):
y = np.asarray(y).astype(dtype)
# Use MLE to compute the optimal YJ parameter
def _mle_opt(i, brck):
def _eval_mle(lmb, data):
# Function to minimize
return -_yj_llf(data, lmb)
# Suppress the invalid scalar warnings we might get in the
# optimization routine.
@suppress
def brent_optimize():
return optimize.brent(_eval_mle, brack=brck, args=(i,))
# suppressed version:
return brent_optimize()
return _mle_opt(y, brack) # _mle(x, brack)
示例5: P_dew_at_T
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brent [as 别名]
def P_dew_at_T(self, T, zs, Psats=None):
Psats = self._Psats(Psats, T)
Pmax = self.P_bubble_at_T(T, zs, Psats)
diff = 1E-7
# EOSs do not solve at very low pressure
if self.use_phis:
Pmin = max(Pmax*diff, 1)
else:
Pmin = Pmax*diff
P_dew = brenth(self._T_VF_err, Pmin, Pmax, args=(T, zs, Psats, Pmax, 1))
self.__TVF_solve_cache = None
return P_dew
# try:
# return brent(self._dew_P_UNIFAC_err, args=(T, zs, Psats, Pmax), brack=(Pmax*diff, Pmax*(1-diff), Pmax))
# except:
# return golden(self._dew_P_UNIFAC_err, args=(T, zs, Psats, Pmax), brack=(Pmax, Pmax*(1-diff)))
#
示例6: test_brent
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brent [as 别名]
def test_brent(self):
""" brent algorithm """
x = optimize.brent(self.fun)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.brent(self.fun, brack=(-3, -2))
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.brent(self.fun, full_output=True)
assert_allclose(x[0], self.solution, atol=1e-6)
x = optimize.brent(self.fun, brack=(-15, -1, 15))
assert_allclose(x, self.solution, atol=1e-6)
示例7: ppcc_max
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brent [as 别名]
def ppcc_max(x, brack=(0.0,1.0), dist='tukeylambda'):
"""Returns the shape parameter that maximizes the probability plot
correlation coefficient for the given data to a one-parameter
family of distributions.
See also ppcc_plot
"""
try:
ppf_func = eval('distributions.%s.ppf' % dist)
except AttributeError:
raise ValueError("%s is not a valid distribution with a ppf." % dist)
"""
res = inspect.getargspec(ppf_func)
if not ('loc' == res[0][-2] and 'scale' == res[0][-1] and \
0.0==res[-1][-2] and 1.0==res[-1][-1]):
raise ValueError("Function has does not have default location "
"and scale parameters\n that are 0.0 and 1.0 respectively.")
if (1 < len(res[0])-len(res[-1])-1) or \
(1 > len(res[0])-3):
raise ValueError("Must be a one-parameter family.")
"""
N = len(x)
# compute uniform median statistics
Ui = zeros(N)*1.0
Ui[-1] = 0.5**(1.0/N)
Ui[0] = 1-Ui[-1]
i = arange(2,N)
Ui[1:-1] = (i-0.3175)/(N+0.365)
osr = sort(x)
# this function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation)
# and returns 1-r so that a minimization function maximizes the
# correlation
def tempfunc(shape, mi, yvals, func):
xvals = func(mi, shape)
r, prob = stats.pearsonr(xvals, yvals)
return 1-r
return optimize.brent(tempfunc, brack=brack, args=(Ui, osr, ppf_func))
示例8: _box_cox_optimize
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brent [as 别名]
def _box_cox_optimize(self, x):
"""Find and return optimal lambda parameter of the Box-Cox transform by
MLE, for observed data x.
We here use scipy builtins which uses the brent optimizer.
"""
# the computation of lambda is influenced by NaNs so we need to
# get rid of them
_, lmbda = stats.boxcox(x[~np.isnan(x)], lmbda=None)
return lmbda
示例9: test_brent
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brent [as 别名]
def test_brent(self):
x = optimize.brent(self.fun)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.brent(self.fun, brack=(-3, -2))
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.brent(self.fun, full_output=True)
assert_allclose(x[0], self.solution, atol=1e-6)
x = optimize.brent(self.fun, brack=(-15, -1, 15))
assert_allclose(x, self.solution, atol=1e-6)
示例10: test_brent_negative_tolerance
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brent [as 别名]
def test_brent_negative_tolerance():
assert_raises(ValueError, optimize.brent, np.cos, tol=-.01)
示例11: main
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brent [as 别名]
def main():
res = optimize.brent(propagation_function,
brack=(0, 1e-5, 5e-5),
tol=1e-3,
full_output=True)
print("Output:", res)
plt.figure('dE vs dTheta')
plt.plot(np.array(minimizationArray)[:, 0]*1e6,
np.array(minimizationArray)[:, 1],
'ro', ls='')
plt.grid()
axes = plt.gca()
axes.set_xlabel("$d\Theta$, $\mu$rad"); axes.set_ylabel("$\Delta$E, eV")
plt.savefig("dE_vs_dTheta.png")
plt.figure('Flux vs dTheta')
plt.plot(np.array(minimizationArray)[:, 0]*1e6,
np.array(minimizationArray)[:, 2],
'go', ls='')
plt.grid()
axes = plt.gca()
axes.set_xlabel("$d\Theta$, $\mu$rad"); axes.set_ylabel("Flux, photons/s")
plt.savefig("Flux_vs_dTheta.png")
plt.figure('Convergence')
plt.plot(np.arange(len(minimizationArray)),
np.array(minimizationArray)[:, 1],
'-bo')
axes = plt.gca()
axes.set_xlabel("Iteration Nr."); axes.set_ylabel("$\Delta$E, eV")
plt.savefig("Convergence.png")
plt.show()
示例12: boxcox
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brent [as 别名]
def boxcox(x,lmbda=None,alpha=None):
"""
Return a positive dataset transformed by a Box-Cox power transformation.
Parameters
----------
x : ndarray
Input array.
lmbda : {None, scalar}, optional
If `lmbda` is not None, do the transformation for that value.
If `lmbda` is None, find the lambda that maximizes the log-likelihood
function and return it as the second output argument.
alpha : {None, float}, optional
If `alpha` is not None, return the ``100 * (1-alpha)%`` confidence
interval for `lmbda` as the third output argument.
If `alpha` is not None it must be between 0.0 and 1.0.
Returns
-------
boxcox : ndarray
Box-Cox power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
(min_ci, max_ci) : tuple of float, optional
If `lmbda` parameter is None and `alpha` is not None, this returned
tuple of floats represents the minimum and maximum confidence limits
given `alpha`.
"""
if any(x < 0):
raise ValueError("Data must be positive.")
if lmbda is not None: # single transformation
lmbda = lmbda*(x == x)
y = where(lmbda == 0, log(x), (x**lmbda - 1)/lmbda)
return y
# Otherwise find the lmbda that maximizes the log-likelihood function.
def tempfunc(lmb, data): # function to minimize
return -boxcox_llf(lmb,data)
lmax = optimize.brent(tempfunc, brack=(-2.0,2.0),args=(x,))
y = boxcox(x, lmax)
if alpha is None:
return y, lmax
# Otherwise find confidence interval
interval = _boxcox_conf_interval(x, lmax, alpha)
return y, lmax, interval
示例13: boxcox_normmax
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brent [as 别名]
def boxcox_normmax(x, bounds=None, brack=(-2.0, 2.0), method='pearsonr'):
# bounds is None, use simple Brent optimisation
if bounds is None:
def optimizer(func, args):
return optimize.brent(func, brack=brack, args=args)
# otherwise use bounded Brent optimisation
else:
# input checks on bounds
if not isinstance(bounds, tuple) or len(bounds) != 2:
raise ValueError(
f"`bounds` must be a tuple of length 2, but found: {bounds}")
def optimizer(func, args):
return optimize.fminbound(func, bounds[0], bounds[1], args=args)
def _pearsonr(x):
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
xvals = distributions.norm.ppf(osm_uniform)
def _eval_pearsonr(lmbda, xvals, samps):
# This function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation) and
# returns ``1 - r`` so that a minimization function maximizes the
# correlation.
y = boxcox(samps, lmbda)
yvals = np.sort(y)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimizer(_eval_pearsonr, args=(xvals, x))
def _mle(x):
def _eval_mle(lmb, data):
# function to minimize
return -boxcox_llf(lmb, data)
return optimizer(_eval_mle, args=(x,))
def _all(x):
maxlog = np.zeros(2, dtype=float)
maxlog[0] = _pearsonr(x)
maxlog[1] = _mle(x)
return maxlog
methods = {'pearsonr': _pearsonr,
'mle': _mle,
'all': _all}
if method not in methods.keys():
raise ValueError("Method %s not recognized." % method)
optimfunc = methods[method]
return optimfunc(x)