本文整理匯總了Python中scipy.optimize.fmin_tnc方法的典型用法代碼示例。如果您正苦於以下問題:Python optimize.fmin_tnc方法的具體用法?Python optimize.fmin_tnc怎麽用?Python optimize.fmin_tnc使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類scipy.optimize
的用法示例。
在下文中一共展示了optimize.fmin_tnc方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: opt
# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_tnc [as 別名]
def opt(self, x_init, f_fp=None, f=None, fp=None):
"""
Run the TNC optimizer
"""
tnc_rcstrings = ['Local minimum', 'Converged', 'XConverged', 'Maximum number of f evaluations reached',
'Line search failed', 'Function is constant']
assert f_fp != None, "TNC requires f_fp"
opt_dict = {}
if self.xtol is not None:
opt_dict['xtol'] = self.xtol
if self.ftol is not None:
opt_dict['ftol'] = self.ftol
if self.gtol is not None:
opt_dict['pgtol'] = self.gtol
opt_result = optimize.fmin_tnc(f_fp, x_init, messages=self.messages,
maxfun=self.max_f_eval, **opt_dict)
self.x_opt = opt_result[0]
self.f_opt = f_fp(self.x_opt)[0]
self.funct_eval = opt_result[1]
self.status = tnc_rcstrings[opt_result[2]]
示例2: get_optimizer
# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_tnc [as 別名]
def get_optimizer(f_min):
optimizers = {'fmin_tnc': opt_tnc,
'simplex': opt_simplex,
'lbfgsb': opt_lbfgsb,
'org-bfgs': opt_bfgs,
'scg': opt_SCG,
'adadelta':Opt_Adadelta,
'rprop':RProp,
'adam':Adam}
#if rasm_available:
# optimizers['rasmussen'] = opt_rasm
for opt_name in sorted(optimizers.keys()):
if opt_name.lower().find(f_min.lower()) != -1:
return optimizers[opt_name]
raise KeyError('No optimizer was found matching the name: %s' % f_min)
示例3: test_tnc1
# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_tnc [as 別名]
def test_tnc1(self):
fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None], [-1.5, None])
xopt = [1, 1]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, args=(100.0, ),
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
示例4: test_tnc1b
# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_tnc [as 別名]
def test_tnc1b(self):
x, bounds = [-2, 1], ([-np.inf, None], [-1.5, None])
xopt = [1, 1]
x, nf, rc = optimize.fmin_tnc(self.f1, x, approx_grad=True,
bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
示例5: test_tnc1c
# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_tnc [as 別名]
def test_tnc1c(self):
x, bounds = [-2, 1], ([-np.inf, None], [-1.5, None])
xopt = [1, 1]
x, nf, rc = optimize.fmin_tnc(self.f1, x, fprime=self.g1,
bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
示例6: test_tnc2
# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_tnc [as 別名]
def test_tnc2(self):
fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None], [1.5, None])
xopt = [-1.2210262419616387, 1.5]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
示例7: test_tnc3
# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_tnc [as 別名]
def test_tnc3(self):
fg, x, bounds = self.fg3, [10, 1], ([-np.inf, None], [0.0, None])
xopt = [0, 0]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
示例8: test_tnc5
# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_tnc [as 別名]
def test_tnc5(self):
fg, x, bounds = self.fg5, [0, 0], [(-1.5, 4),(-3, 3)]
xopt = [-0.54719755119659763, -1.5471975511965976]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
示例9: test_tnc38
# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_tnc [as 別名]
def test_tnc38(self):
fg, x, bounds = self.fg38, np.array([-3, -1, -3, -1]), [(-10, 10)]*4
xopt = [1]*4
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
示例10: test_tnc45
# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_tnc [as 別名]
def test_tnc45(self):
fg, x, bounds = self.fg45, [2] * 5, [(0, 1), (0, 2), (0, 3),
(0, 4), (0, 5)]
xopt = [1, 2, 3, 4, 5]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
示例11: _optimize_loss
# 需要導入模塊: from scipy import optimize [as 別名]
# 或者: from scipy.optimize import fmin_tnc [as 別名]
def _optimize_loss(probe_counts, loss_fn, bounds, x0,
initial_eps=10.0, step_size=0.001,
interp_fn_type='standard'):
"""Optimize loss function with barrier.
This uses scipy's optimize.fmin_tnc to minimize the loss function
in which the barrier is weighted by eps. It repeatedly minimizes
the loss while decreasing eps so that, by the last iteration, the
weight on the barrier is very small. On each iteration, it starts
the initial guess/position at the solution to the previous iteration.
Args:
probe_counts: dict giving number of probes required for each
dataset and choice of parameters
loss_fn: the loss function provided by _make_loss_fn
bounds: bounds on the parameter values provided by _make_param_bounds_*
x0: the initial guess of parameter values (i.e., starting position)
initial_eps: weight of the barrier on the first iteration
step_size: epsilon value provided to optimize.fmin_tnc
interp_fn_type: 'standard' (only perform interpolation on mismatches
and cover_extension parameters) or 'nd' (use scipy's interpolate
package to interpolate over n-dimensions)
Returns:
list of length (number of datasets)*(number of parameters) where
x_i is the (i % N)'th parameter of the (i/N)'th dataset,
for i=0,1,2,... where N=(number of datasets)
"""
eps = initial_eps
while eps >= 0.01:
x0_probe_count = ic._make_total_probe_count_across_datasets_fn(
probe_counts, interp_fn_type=interp_fn_type)(x0)
logger.info(("Starting an iteration with eps=%f, with x0 yielding %f "
"probes"), eps, x0_probe_count)
sol, nfeval, rc = optimize.fmin_tnc(loss_fn, x0, bounds=bounds,
args=(eps,),
approx_grad=True,
epsilon=step_size, disp=1, maxfun=2500)
if rc in [0, 1, 2]:
# rc == 0 indicates reaching the local minimum, and rc == 1 or
# rc == 2 indicates the function value converged
logger.info(" Iteration was successful")
else:
logger.info(" Iteration failed to converge!")
x0 = sol
eps = 0.1 * eps
return sol