本文整理汇总了Python中scipy.optimize.fmin_bfgs方法的典型用法代码示例。如果您正苦于以下问题:Python optimize.fmin_bfgs方法的具体用法?Python optimize.fmin_bfgs怎么用?Python optimize.fmin_bfgs使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.optimize
的用法示例。
在下文中一共展示了optimize.fmin_bfgs方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_bfgs_infinite
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_bfgs [as 别名]
def test_bfgs_infinite(self, use_wrapper=False):
"""Test corner case where -Inf is the minimum. See #1494."""
func = lambda x: -np.e**-x
fprime = lambda x: -func(x)
x0 = [0]
olderr = np.seterr(over='ignore')
try:
if use_wrapper:
opts = {'disp': False}
x = optimize.minimize(func, x0, jac=fprime, method='BFGS',
args=(), options=opts)['x']
else:
x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
assert_(not np.isfinite(func(x)))
finally:
np.seterr(**olderr)
示例2: run_bfgs
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_bfgs [as 别名]
def run_bfgs(self):
"""
Run the optimization
@return: Nothing
"""
self.problem = SetPointsOptimizationProblem(self.circuit,
self.options,
self.max_iter,
callback=self.progress_signal.emit)
xopt = fmin_bfgs(f=self.problem.eval, x0=self.problem.x0,
fprime=None, args=(), gtol=1e-05, epsilon=1e-2,
maxiter=self.max_iter, full_output=0, disp=1, retall=0,
callback=None)
self.solution = np.ones(self.problem.dim) + xopt
# Extract function values from the controller
self.optimization_values = np.array(self.problem.all_f)
# send the finnish signal
self.progress_signal.emit(0.0)
self.progress_text.emit('Done!')
self.done_signal.emit()
示例3: opt
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_bfgs [as 别名]
def opt(self, x_init, f_fp=None, f=None, fp=None):
"""
Run the optimizer
"""
rcstrings = ['','Maximum number of iterations exceeded', 'Gradient and/or function calls not changing']
opt_dict = {}
if self.xtol is not None:
print("WARNING: bfgs doesn't have an xtol arg, so I'm going to ignore it")
if self.ftol is not None:
print("WARNING: bfgs doesn't have an ftol arg, so I'm going to ignore it")
if self.gtol is not None:
opt_dict['gtol'] = self.gtol
opt_result = optimize.fmin_bfgs(f, x_init, fp, disp=self.messages,
maxiter=self.max_iters, full_output=True, **opt_dict)
self.x_opt = opt_result[0]
self.f_opt = f_fp(self.x_opt)[0]
self.funct_eval = opt_result[4]
self.status = rcstrings[opt_result[6]]
示例4: oneVsAll
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_bfgs [as 别名]
def oneVsAll(X,y,num_labels,Lambda):
# 初始化变量
m,n = X.shape
all_theta = np.zeros((n+1,num_labels)) # 每一列对应相应分类的theta,共10列
X = np.hstack((np.ones((m,1)),X)) # X前补上一列1的偏置bias
class_y = np.zeros((m,num_labels)) # 数据的y对应0-9,需要映射为0/1的关系
initial_theta = np.zeros((n+1,1)) # 初始化一个分类的theta
# 映射y
for i in range(num_labels):
class_y[:,i] = np.int32(y==i).reshape(1,-1) # 注意reshape(1,-1)才可以赋值
#np.savetxt("class_y.csv", class_y[0:600,:], delimiter=',')
'''遍历每个分类,计算对应的theta值'''
for i in range(num_labels):
#optimize.fmin_cg
result = optimize.fmin_bfgs(costFunction, initial_theta, fprime=gradient, args=(X,class_y[:,i],Lambda)) # 调用梯度下降的优化方法
all_theta[:,i] = result.reshape(1,-1) # 放入all_theta中
all_theta = np.transpose(all_theta)
return all_theta
# 代价函数
示例5: test_model_hawkes_varying_baseline_least_sq_grad
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_bfgs [as 别名]
def test_model_hawkes_varying_baseline_least_sq_grad(self):
"""...Test that ModelHawkesExpKernLeastSq gradient is consistent
with loss
"""
for model in [self.model, self.model_list]:
model.period_length = 1.
model.n_baselines = 3
coeffs = np.random.rand(model.n_coeffs)
self.assertLess(check_grad(model.loss, model.grad, coeffs), 1e-5)
coeffs_min = fmin_bfgs(model.loss, coeffs, fprime=model.grad,
disp=False)
self.assertAlmostEqual(
norm(model.grad(coeffs_min)), .0, delta=1e-4)
示例6: test_bfgs_infinite
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_bfgs [as 别名]
def test_bfgs_infinite(self):
# Test corner case where -Inf is the minimum. See gh-2019.
func = lambda x: -np.e**-x
fprime = lambda x: -func(x)
x0 = [0]
olderr = np.seterr(over='ignore')
try:
if self.use_wrapper:
opts = {'disp': self.disp}
x = optimize.minimize(func, x0, jac=fprime, method='BFGS',
args=(), options=opts)['x']
else:
x = optimize.fmin_bfgs(func, x0, fprime, disp=self.disp)
assert_(not np.isfinite(func(x)))
finally:
np.seterr(**olderr)
示例7: bfgs_min_pos
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_bfgs [as 别名]
def bfgs_min_pos(find_min_pos, y_len, linear_interp):
"""
通过scipy.interpolate.interp1d插值形成的模型,通过sco.fmin_bfgs计算min
:param find_min_pos: 寻找min的点位值
:param y_len: 原始序列长度,int
:param linear_interp: scipy.interpolate.interp1d插值形成的模型
:return: sco.fmin_bfgs成功找到的值,所有失败的或者异常都返回-1
"""
try:
local_min_pos = sco.fmin_bfgs(linear_interp, find_min_pos, disp=False)[0]
except:
# 所有失败的或者异常都返回-1
local_min_pos = -1
if local_min_pos < 0 or local_min_pos > y_len:
# 所有失败的或者异常都返回-1
local_min_pos = -1
return local_min_pos
示例8: _fit_bfgs
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_bfgs [as 别名]
def _fit_bfgs(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
gtol = kwargs.setdefault('gtol', 1.0000000000000001e-05)
norm = kwargs.setdefault('norm', np.Inf)
epsilon = kwargs.setdefault('epsilon', 1.4901161193847656e-08)
retvals = optimize.fmin_bfgs(f, start_params, score, args=fargs,
gtol=gtol, norm=norm, epsilon=epsilon,
maxiter=maxiter, full_output=full_output,
disp=disp, retall=retall, callback=callback)
if full_output:
if not retall:
xopt, fopt, gopt, Hinv, fcalls, gcalls, warnflag = retvals
else:
(xopt, fopt, gopt, Hinv, fcalls,
gcalls, warnflag, allvecs) = retvals
converged = not warnflag
retvals = {'fopt': fopt, 'gopt': gopt, 'Hinv': Hinv,
'fcalls': fcalls, 'gcalls': gcalls, 'warnflag':
warnflag, 'converged': converged}
if retall:
retvals.update({'allvecs': allvecs})
else:
xopt = retvals
retvals = None
return xopt, retvals
示例9: fitgmm_cu
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_bfgs [as 别名]
def fitgmm_cu(self, start, optim_method='bfgs', optim_args=None):
'''estimate parameters using continuously updating GMM
Parameters
----------
start : array_like
starting values for minimization
Returns
-------
paramest : array
estimated parameters
Notes
-----
todo: add fixed parameter option, not here ???
uses scipy.optimize.fmin
'''
## if not fixed is None: #fixed not defined in this version
## raise NotImplementedError
if optim_args is None:
optim_args = {}
if optim_method == 'nm':
optimizer = optimize.fmin
elif optim_method == 'bfgs':
optimizer = optimize.fmin_bfgs
optim_args['fprime'] = self.score_cu
elif optim_method == 'ncg':
optimizer = optimize.fmin_ncg
else:
raise ValueError('optimizer method not available')
#TODO: add other optimization options and results
return optimizer(self.gmmobjective_cu, start, args=(), **optim_args)
示例10: test_bfgs
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_bfgs [as 别名]
def test_bfgs(self, use_wrapper=False):
""" Broyden-Fletcher-Goldfarb-Shanno optimization routine """
if use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False}
res = optimize.minimize(self.func, self.startparams,
jac=self.grad, method='BFGS', args=(),
options=opts)
params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = \
res['x'], res['fun'], res['jac'], res['hess_inv'], \
res['nfev'], res['njev'], res['status']
else:
retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False)
(params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 10, self.funccalls)
assert_(self.gradcalls == 8, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[6:8],
[[0, -5.25060743e-01, 4.87748473e-01],
[0, -5.24885582e-01, 4.87530347e-01]],
atol=1e-14, rtol=1e-7)
示例11: test_bfgs_nan
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_bfgs [as 别名]
def test_bfgs_nan(self):
"""Test corner case where nan is fed to optimizer. See #1542."""
func = lambda x: x
fprime = lambda x: np.ones_like(x)
x0 = [np.nan]
olderr = np.seterr(over='ignore')
try:
x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
assert_(np.isnan(func(x)))
finally:
np.seterr(**olderr)
示例12: test_bfgs_numerical_jacobian
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_bfgs [as 别名]
def test_bfgs_numerical_jacobian(self):
""" BFGS with numerical jacobian and a vector epsilon parameter """
# define the epsilon parameter using a random vector
epsilon = np.sqrt(np.finfo(float).eps) * np.random.rand(len(self.solution))
params = optimize.fmin_bfgs(self.func, self.startparams,
epsilon=epsilon, args=(),
maxiter=self.maxiter, disp=False)
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
示例13: LogisticRegression
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_bfgs [as 别名]
def LogisticRegression():
data = loadtxtAndcsv_data("data2.txt", ",", np.float64)
X = data[:,0:-1]
y = data[:,-1]
plot_data(X,y) # 作图
X = mapFeature(X[:,0],X[:,1]) #映射为多项式
initial_theta = np.zeros((X.shape[1],1))#初始化theta
initial_lambda = 0.1 #初始化正则化系数,一般取0.01,0.1,1.....
J = costFunction(initial_theta,X,y,initial_lambda) #计算一下给定初始化的theta和lambda求出的代价J
print(J) #输出一下计算的值,应该为0.693147
#result = optimize.fmin(costFunction, initial_theta, args=(X,y,initial_lambda)) #直接使用最小化的方法,效果不好
'''调用scipy中的优化算法fmin_bfgs(拟牛顿法Broyden-Fletcher-Goldfarb-Shanno)
- costFunction是自己实现的一个求代价的函数,
- initial_theta表示初始化的值,
- fprime指定costFunction的梯度
- args是其余测参数,以元组的形式传入,最后会将最小化costFunction的theta返回
'''
result = optimize.fmin_bfgs(costFunction, initial_theta, fprime=gradient, args=(X,y,initial_lambda))
p = predict(X, result) #预测
print(u'在训练集上的准确度为%f%%'%np.mean(np.float64(p==y)*100)) # 与真实值比较,p==y返回True,转化为float
X = data[:,0:-1]
y = data[:,-1]
plotDecisionBoundary(result,X,y) #画决策边界
# 加载txt和csv文件
示例14: test_spatial_median_2d
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_bfgs [as 别名]
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
示例15: _test_grad
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import fmin_bfgs [as 别名]
def _test_grad(self, model, coeffs, delta_check_grad=None,
delta_model_grad=None):
"""Test that gradient is consistent with loss and that minimum is
achievable with a small gradient
"""
if coeffs.dtype is np.dtype("float32"):
check_grad_epsilon = 3e-3
else:
check_grad_epsilon = 1e-7
if delta_check_grad is None:
delta_check_grad = self.delta_check_grad
if delta_model_grad is None:
delta_model_grad = self.delta_model_grad
with warnings.catch_warnings(record=True):
grad_check = check_grad(model.loss, model.grad, coeffs,
epsilon=check_grad_epsilon)
self.assertAlmostEqual(grad_check, 0., delta=delta_check_grad)
# Check that minimum is achievable with a small gradient
with warnings.catch_warnings(record=True):
coeffs_min = fmin_bfgs(model.loss, coeffs, fprime=model.grad,
disp=False)
coeffs_min = coeffs_min.astype(self.dtype)
self.assertAlmostEqual(
norm(model.grad(coeffs_min)), .0, delta=delta_model_grad)