本文整理汇总了Python中scipy.optimize.brute方法的典型用法代码示例。如果您正苦于以下问题:Python optimize.brute方法的具体用法?Python optimize.brute怎么用?Python optimize.brute使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.optimize
的用法示例。
在下文中一共展示了optimize.brute方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: objective_function
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brute [as 别名]
def objective_function(self, parameter_vector, data):
"The objective function for brute-force and gradient-based optimizer."
if self.model.N_models > 1:
nested_fractions = parameter_vector[-(self.model.N_models - 1):]
normalized_fractions = nested_to_normalized_fractions(
nested_fractions)
parameter_vector_ = np.r_[
parameter_vector[:-(self.model.N_models - 1)],
normalized_fractions]
else:
parameter_vector_ = parameter_vector
parameter_vector_ = (
parameter_vector_ * self.model.scales_for_optimization)
parameters = {}
parameters.update(
self.model.parameter_vector_to_parameters(parameter_vector_)
)
E_model = self.model(self.acquisition_scheme, **parameters)
E_diff = E_model - data
objective = np.dot(E_diff, E_diff) / len(data)
return objective
示例2: _compute_mle_safe
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brute [as 别名]
def _compute_mle_safe(self):
"""Compute the MLE via a grid-search.
This is a stable approach if sufficient gridpoints are used.
"""
one_hits, all_hits = self._get_hits()
# search range
eps = 1e-15 # to avoid invalid value in log
search_range = [0 + eps, np.pi / 2 - eps]
def loglikelihood(theta):
# logL contains the first `it` terms of the full loglikelihood
logL = 0
for i, k in enumerate(self._evaluation_schedule):
logL += np.log(np.sin((2 * k + 1) * theta) ** 2) * one_hits[i]
logL += np.log(np.cos((2 * k + 1) * theta) ** 2) * (all_hits[i] - one_hits[i])
return -logL
est_theta = brute(loglikelihood, [search_range], Ns=self._likelihood_evals)[0]
return est_theta
示例3: _ls2ar
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brute [as 别名]
def _ls2ar(inp, strinp):
r"""Convert float or linspace-input to arange/slice-input for brute."""
# Check if input is 1 or 3 elements (float, list, tuple, array)
if np.size(inp) == 1:
start = np.squeeze(inp)
stop = start+1
num = 1
elif np.size(inp) == 3:
start = inp[0]
stop = inp[1]
num = inp[2]
else:
raise ValueError(f"<{strinp}> must be a float or a tuple of 3 elements"
f" (start, stop, num); <{strinp} provided: {inp}")
# Re-arrange it to be compatible with np.arange/slice for brute
if num < 2 or start == stop:
stop = start
step = 1
else:
step = (stop-start)/(num-1)
return (start, stop+step/2, step)
示例4: min_func_improved
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brute [as 别名]
def min_func_improved(self, l_pmr):
"""
包装min_func具体现实函数,在brust_min函数中使用:
sco.brute(self.min_func_improved, bnds, full_output=False, finish=None)
计算最优参数组合,因为在self.min_func中计算的是提高improved值,要想得到最大
提高参数组合,即self.min_func返回值最大的组合,使用最优sco.brute的目标是
最小值,所以使用 -self.min_func(l_pmr)[0],找到最小值,结果的参数组合即为
最大提高improved值的参数组合
:param l_pmr: 可迭代序列,eg:(-0.45, -0.11, 0.77), 由三个float值组成的序列
l_pmr[0]: 切取self.cprs使用的分类簇交易获利和值,即lps阀值:self.cprs['lps'] <= l_pmr[0]
l_pmr[1]: 切取self.cprs使用的分类簇交易获利平均值,即lms阀值:self.cprs['lms'] <= l_pmr[1]
l_pmr[2]: 切取self.cprs使用的分类簇交易失败比例,即lrs阀值:self.cprs['lrs'] >= l_pmr[2]
:return: -self.min_func(l_pmr)[0],即[improved, effect_num][0], 即improved值,float
"""
self.brust_progress.show()
return -self.min_func(l_pmr)[0]
示例5: sample_624
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brute [as 别名]
def sample_624():
"""
6.2.4 全局最优求解怎样度过一生最幸福
:return:
"""
import scipy.optimize as sco
def minimize_happiness_global(weights):
if np.sum(weights) != 1:
# 过滤权重和不等于1的权重组合
return 0
# 最优都是寻找最小值,所以要得到幸福指数最大的权重,
# 返回-my_life,这样最小的结果其实是幸福指数最大的权重配比
return -my_life(weights)[1]
opt_global = sco.brute(minimize_happiness_global,
((0, 1.1, 0.1), (0, 1.1, 0.1), (0, 1.1, 0.1)))
print(opt_global)
living_day, happiness, wealth, fame = my_life(opt_global)
print('活了{}年,幸福指数{}, 积累财富{}, 名望权力{}'.format
(living_day, happiness, wealth, fame))
# noinspection PyTypeChecker
示例6: alg_decay_fit
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brute [as 别名]
def alg_decay_fit(x, y, npts=5, power_range=(0.01, 4.), power_mesh=[60, 10]):
"""Fit y to the form ``a*x**(-b) + c``.
Returns a triplet [a, b, c].
npts specifies the maximum number of points to fit. If npts < len(x), then alg_decay_fit() will only fit to the last npts points.
power_range is a tuple that gives that restricts the possible ranges for b.
power_mesh is a list of numbers, which specifies how fine to search for the optimal b.
E.g., if power_mesh = [60,10], then it'll first divide the power_range into 60 intervals, and then divide those intervals by 10.
"""
x = np.array(x)
y = np.array(y)
assert x.ndim == 1 and y.ndim == 1
assert len(x) == len(y)
if npts < 3:
raise ValueError
if len(x) > npts:
x = x[-npts:]
y = y[-npts:]
global_log_power_range = (np.log(power_range[0]), np.log(power_range[1]))
log_power_range = global_log_power_range
for i in range(len(power_mesh)):
# number of points inclusive
brute_Ns = (power_mesh[i] if i == 0 else 2 * power_mesh[i]) + 1
log_power_step = (log_power_range[1] - log_power_range[0]) / float(brute_Ns - 1)
brute_fit = optimize.brute(alg_decay_fit_res, [log_power_range], (x, y),
Ns=brute_Ns,
finish=None)
if brute_fit <= global_log_power_range[0] + 1e-6:
return [0., 0., y[-1]] # shit happened
log_power_range = (brute_fit - log_power_step, brute_fit + log_power_step)
l_fit = linear_fit(x**(-np.exp(brute_fit)), y)
return [l_fit[0], np.exp(brute_fit), l_fit[1]]
示例7: optimal_threshold
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brute [as 别名]
def optimal_threshold(data):
"""Optimal image threshold based on pearson correlation [1]_. The idea is
that an 'optimal' mask of some arbitrary image data should be found by
thresholding at a value that maximizes the pearson correlation between the
original image and the mask, i.e:
T* = argmax_T (\rho(data, data>T))
= argmin_T -(\rho(data, data>T))
This function estimates T* based on the second equation on arbitrary input
arrays.
Parameters
----------
scalar_data: 1D array,
scalar array to estimate an 'optimal' threshold on.
Returns
-------
optimal_threshold: float,
optimal threshold value that maximizes correlation between the original
and masked data.
References
----------
.. [1] Ridgway, Gerard R., et al. "Issues with threshold masking in
voxel-based morphometry of atrophied brains." Neuroimage 44.1 (2009):
99-111.
"""
min_bound = data.min()
max_bound = data.max()
eps = 1e-10
optimal_threshold = brute(
func=_cost_function,
Ns=100,
args=(data,),
ranges=([min_bound + eps, max_bound - eps],))[0]
return optimal_threshold
示例8: test_brute
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brute [as 别名]
def test_brute(self):
# test fmin
resbrute = optimize.brute(self.func, self.rranges, args=self.params,
full_output=True, finish=optimize.fmin)
assert_allclose(resbrute[0], self.solution, atol=1e-3)
assert_allclose(resbrute[1], self.func(self.solution, *self.params),
atol=1e-3)
# test minimize
resbrute = optimize.brute(self.func, self.rranges, args=self.params,
full_output=True,
finish=optimize.minimize)
assert_allclose(resbrute[0], self.solution, atol=1e-3)
assert_allclose(resbrute[1], self.func(self.solution, *self.params),
atol=1e-3)
示例9: test_1D
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brute [as 别名]
def test_1D(self):
# test that for a 1D problem the test function is passed an array,
# not a scalar.
def f(x):
assert_(len(x.shape) == 1)
assert_(x.shape[0] == 1)
return x ** 2
optimize.brute(f, [(-1, 1)], Ns=3, finish=None)
示例10: minimize
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brute [as 别名]
def minimize(func, dim):
if dim == 2:
r = ((0, np.pi), (0, np.pi))
n = 25
elif dim == 3:
r = ((0, np.pi), (0, np.pi), (0, np.pi))
n = 10
# TODO -- try basin hopping or annealing
return optimize.brute(func, r, Ns = n, full_output = True, finish = optimize.fmin)[0:2]
示例11: _print_count
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brute [as 别名]
def _print_count(log):
r"""Print run-count information."""
log['cnt2'] += 1 # Current number
cp = log['cnt2']/log['totnr']*100 # Percentage
if log['cnt2'] == 0: # Not sure about this; brute seems to call the
pass # function with the first arguments twice...
elif log['cnt2'] > log['totnr']: # fmin-status
print(f" fmin fct calls : {log['cnt2']-log['totnr']}", end='\r')
elif int(cp) > log['cnt1'] or cp < 1 or log['cnt2'] == log['totnr']:
# Get seconds since start
sec = int(default_timer() - log['time'])
# Get estimate of remaining time, as string
tleft = str(timedelta(seconds=int(100*sec/cp - sec)))
# Print progress
pstr = f" brute fct calls : {log['cnt2']}/{log['totnr']}"
if log['totnr'] > 100:
pstr += f" ({int(cp)} %); est: {tleft} "
print(pstr, end='\r')
if log['cnt2'] == log['totnr']:
# Empty previous line
print(" "*len(pstr), end='\r')
# Print final brute-message
print(f" brute fct calls : {log['totnr']}")
# Update percentage cnt1
log['cnt1'] = cp
return log
示例12: modelNFR
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brute [as 别名]
def modelNFR(self, boundaries = (35,115)):
"""Model NFR distribution with gamma distribution"""
b = np.where(self.fragmentsizes.get(self.lower,boundaries[1]) == max(self.fragmentsizes.get(self.lower,boundaries[1])))[0][0] + self.lower
boundaries = (min(boundaries[0],b), boundaries[1])
x = np.arange(boundaries[0],boundaries[1])
y = self.fragmentsizes.get(boundaries[0],boundaries[1])
def gamma_fit(X,o,p):
k = p[0]
theta = p[1]
a = p[2]
x_mod = X-o
res = np.zeros(len(x_mod))
if k>=1:
nz = x_mod >= 0
else:
nz = x_mod > 0
res[nz] = a * x_mod[nz]**(k-1) * np.exp(-x_mod[nz]/theta) / (theta **k * gamma(k))
return res
res_score = np.ones(boundaries[0]+1)*np.float('inf')
res_param = [0 for i in range(boundaries[0]+1)]
pranges = ((0.01,10),(0.01,150),(0.01,1))
for i in range(15,boundaries[0]+1):
f = lambda p: np.sum((gamma_fit(x,i,p) - y)**2)
tmpres = optimize.brute(f, pranges, full_output=True,
finish=optimize.fmin)
res_score[i] = tmpres[1]
res_param[i] = tmpres[0]
whichres = np.argmin(res_score)
res = res_param[whichres]
self.nfr_fit0 = FragmentSizes(self.lower,self.upper, vals = gamma_fit(np.arange(self.lower,self.upper),whichres,res_param[whichres]))
nfr = np.concatenate((self.fragmentsizes.get(self.lower,boundaries[1]), self.nfr_fit0.get(boundaries[1],self.upper)))
nfr[nfr==0] = min(nfr[nfr!=0])*0.01
self.nfr_fit = FragmentSizes(self.lower,self.upper, vals = nfr)
nuc = np.concatenate((np.zeros(boundaries[1]-self.lower),
self.fragmentsizes.get(boundaries[1],self.upper) -
self.nfr_fit.get(boundaries[1],self.upper)))
nuc[nuc<=0]=min(min(nfr)*0.1,min(nuc[nuc>0])*0.001)
self.nuc_fit = FragmentSizes(self.lower, self.upper, vals = nuc)
示例13: sample_625
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brute [as 别名]
def sample_625():
"""
6.2.5 非凸函数计算怎样度过一生最幸福
:return:
"""
import scipy.optimize as sco
method = 'SLSQP'
# 提供一个函数来规范参数,np.sum(weights) = 1 -> np.sum(weights) - 1 = 0
constraints = ({'type': 'eq', 'fun': lambda p_x: np.sum(p_x) - 1})
# 参数的范围选定
bounds = tuple((0, 0.9) for _ in xrange(3))
print('bounds:', bounds)
def minimize_happiness_local(weights):
# print(weights)
return -my_life(weights)[1]
# 初始化猜测最优参数,这里使用brute计算出的全局最优参数作为guess
guess = [0.5, 0.2, 0.3]
opt_local = sco.minimize(minimize_happiness_local, guess,
method=method, bounds=bounds,
constraints=constraints)
print('opt_local:', opt_local)
# noinspection PyShadowingNames
示例14: _correct_gear_shifts
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brute [as 别名]
def _correct_gear_shifts(
times, ratios, gears, velocity_speed_ratios, shift_window=4.0):
import scipy.optimize as sci_opt
from . import calculate_gear_shifts
shifts = calculate_gear_shifts(gears)
vsr = np.vectorize(lambda v: velocity_speed_ratios.get(v, 0))
s = len(gears)
def _err(v, r):
v = int(v) or 1
return np.float32(co2_utl.mae(ratios[slice(v - 1, v + 1, 1)], r))
k = 0
new_gears = np.zeros_like(gears)
dt = shift_window / 2
for i in np.arange(s)[shifts]:
g = gears[slice(i - 1, i + 1, 1)]
j = int(i)
if g[0] != 0 and g[-1] != 0:
t = times[i]
n = max(i - (((t - dt) <= times) & (times <= t)).sum(), min(i, k))
m = min(i + ((t <= times) & (times <= (t + dt))).sum(), s)
if n + 1 > m:
# noinspection PyTypeChecker
j = int(sci_opt.brute(
_err, (slice(n, m, 1),), args=(vsr(g),), finish=None)
)
x = slice(j - 1, j + 1, 1)
new_gears[x] = g
new_gears[k:x.start] = g[0]
k = x.stop
new_gears[k:] = new_gears[k - 1]
return new_gears
示例15: brute_search
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import brute [as 别名]
def brute_search(self, weights, metaParamNames=list(),
objective=lambda x: x.loss,
negative_objective=False,
stochastic_objective=True,
stochastic_samples=25,
stochastic_precision=0.01,
):
"""
Uses BFS to find optimal simulation hyperparameters
Note:
You want runs passed in the solver to have __no randomness__
Solving a stochastic simulation will cause problems with solver
Arguments
---------
Weights: list<floats>
weights to be optimized on
metaParamName: list<string>
list of names of arguments to be optimized on
the index respects the weights argument above
the strings should be the same as in a simulation run input
Weights and metaparamNames together would form the
algoParams dict in a normal simulation
objective: function(Market) -> float
objective function
by default number of matches
can be changed to loss, for example, by
"objective=lambda x: x.loss"
returns
-------
np.array of weights where function is minimized
if negative_objective=True, where maximized
"""
def this_run(w):
# note - sign here
# TODO fix negative sign
sign = 1 if negative_objective else -1
if stochastic_objective:
result = 0
# If objective stochastic, make montecarlo draws & average
for i in range(stochastic_samples):
result += sign * \
self.single_run(w,
metaParamNames=metaParamNames,
objective=objective)
# Average montecarlo draws
result = result/stochastic_samples
# Tune precision for convergence
result = int(result/stochastic_precision)*stochastic_precision
return result
else:
return sign*self.single_run(w,
metaParamNames=metaParamNames,
objective=objective)
# res = []
# for i in range(10):
# res.append(this_run(weights))
res = optimize.brute(this_run, weights, full_output=True, disp=True,
finish=optimize.fmin
)
return res[0]