本文整理汇总了Python中scipy.optimize.approx_fprime方法的典型用法代码示例。如果您正苦于以下问题:Python optimize.approx_fprime方法的具体用法?Python optimize.approx_fprime怎么用?Python optimize.approx_fprime使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.optimize
的用法示例。
在下文中一共展示了optimize.approx_fprime方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: l_x
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import approx_fprime [as 别名]
def l_x(self, x, u, i, terminal=False):
"""Partial derivative of cost function with respect to x.
Args:
x: Current state [state_size].
u: Current control [action_size]. None if terminal.
i: Current time step.
terminal: Compute terminal cost. Default: False.
Returns:
dl/dx [state_size].
"""
if terminal:
return approx_fprime(x, lambda x: self._l_terminal(x, i),
self._x_eps)
return approx_fprime(x, lambda x: self._l(x, u, i), self._x_eps)
示例2: l_u
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import approx_fprime [as 别名]
def l_u(self, x, u, i, terminal=False):
"""Partial derivative of cost function with respect to u.
Args:
x: Current state [state_size].
u: Current control [action_size]. None if terminal.
i: Current time step.
terminal: Compute terminal cost. Default: False.
Returns:
dl/du [action_size].
"""
if terminal:
# Not a function of u, so the derivative is zero.
return np.zeros(self._action_size)
return approx_fprime(u, lambda u: self._l(x, u, i), self._u_eps)
示例3: l_xx
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import approx_fprime [as 别名]
def l_xx(self, x, u, i, terminal=False):
"""Second partial derivative of cost function with respect to x.
Args:
x: Current state [state_size].
u: Current control [action_size]. None if terminal.
i: Current time step.
terminal: Compute terminal cost. Default: False.
Returns:
d^2l/dx^2 [state_size, state_size].
"""
eps = self._x_eps_hess
Q = np.vstack([
approx_fprime(x, lambda x: self.l_x(x, u, i, terminal)[m], eps)
for m in range(self._state_size)
])
return Q
示例4: l_ux
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import approx_fprime [as 别名]
def l_ux(self, x, u, i, terminal=False):
"""Second partial derivative of cost function with respect to u and x.
Args:
x: Current state [state_size].
u: Current control [action_size]. None if terminal.
i: Current time step.
terminal: Compute terminal cost. Default: False.
Returns:
d^2l/dudx [action_size, state_size].
"""
if terminal:
# Not a function of u, so the derivative is zero.
return np.zeros((self._action_size, self._state_size))
eps = self._x_eps_hess
Q = np.vstack([
approx_fprime(x, lambda x: self.l_u(x, u, i)[m], eps)
for m in range(self._action_size)
])
return Q
示例5: l_uu
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import approx_fprime [as 别名]
def l_uu(self, x, u, i, terminal=False):
"""Second partial derivative of cost function with respect to u.
Args:
x: Current state [state_size].
u: Current control [action_size]. None if terminal.
i: Current time step.
terminal: Compute terminal cost. Default: False.
Returns:
d^2l/du^2 [action_size, action_size].
"""
if terminal:
# Not a function of u, so the derivative is zero.
return np.zeros((self._action_size, self._action_size))
eps = self._u_eps_hess
Q = np.vstack([
approx_fprime(u, lambda u: self.l_u(x, u, i)[m], eps)
for m in range(self._action_size)
])
return Q
示例6: test_std_gradient
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import approx_fprime [as 别名]
def test_std_gradient():
length_scale = np.arange(1, 6)
X = rng.randn(10, 5)
y = rng.randn(10)
X_new = rng.randn(5)
rbf = RBF(length_scale=length_scale, length_scale_bounds="fixed")
gpr = GaussianProcessRegressor(rbf, random_state=0).fit(X, y)
_, _, _, std_grad = gpr.predict(
np.expand_dims(X_new, axis=0),
return_std=True, return_cov=False, return_mean_grad=True,
return_std_grad=True)
num_grad = optimize.approx_fprime(
X_new, lambda x: predict_wrapper(x, gpr)[1], 1e-4)
assert_array_almost_equal(std_grad, num_grad, decimal=3)
示例7: test_soft_dtw_grad_X
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import approx_fprime [as 别名]
def test_soft_dtw_grad_X():
def make_func(gamma):
def func(x):
X_ = x.reshape(*X.shape)
D_ = SquaredEuclidean(X_, Y)
return SoftDTW(D_, gamma).compute()
return func
for gamma in (0.001, 0.01, 0.1, 1, 10, 100, 1000):
dist = SquaredEuclidean(X, Y)
sdtw = SoftDTW(dist, gamma)
sdtw.compute()
E = sdtw.grad()
G = dist.jacobian_product(E)
func = make_func(gamma)
G_num = approx_fprime(X.ravel(), func, 1e-6).reshape(*G.shape)
assert_array_almost_equal(G, G_num, 5)
示例8: test_score
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import approx_fprime [as 别名]
def test_score():
uniq, load, corr, par = _toy()
fa = Factor(n_factor=2, corr=corr)
def f(par):
return fa.loglike(par)
par2 = np.r_[0.1, 0.2, 0.3, 0.4, 0.3, 0.1, 0.2, -0.2, 0, 0.8, 0.5, 0]
for pt in (par, par2):
g1 = approx_fprime(pt, f, 1e-8)
g2 = fa.score(pt)
assert_allclose(g1, g2, atol=1e-3)
示例9: _check_gradients
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import approx_fprime [as 别名]
def _check_gradients(layer_args, input_shape):
rand = np.random.RandomState(0)
net = cn.SoftmaxNet(layer_args=layer_args, input_shape=input_shape, rand_state=rand)
x = rand.randn(*(10,)+net.input_shape)/100
y = rand.randn(10) > 0
by = net.binarize_labels(y)
g1 = approx_fprime(net.get_params(), net.cost_for_params, 1e-5, x, by)
g2 = net.param_grad(x, by)
err = np.max(np.abs(g1-g2))/np.abs(g1).max()
print err
assert err < 1e-3, 'incorrect gradient!'
示例10: test_logistic_loss_and_grad
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import approx_fprime [as 别名]
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20, random_state=0)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
示例11: _test_gradient
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import approx_fprime [as 别名]
def _test_gradient(n_items, fcts):
"""Helper for testing the gradient of objective functions."""
for sigma in np.linspace(1, 20, num=10):
xs = sigma * RND.randn(n_items)
val = approx_fprime(xs, fcts.objective, EPS)
err = check_grad(fcts.objective, fcts.gradient, xs, epsilon=EPS)
assert abs(err / np.linalg.norm(val)) < 1e-5
示例12: _test_hessian
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import approx_fprime [as 别名]
def _test_hessian(n_items, fcts):
"""Helper for testing the hessian of objective functions."""
for sigma in np.linspace(1, 20, num=10):
xs = sigma * RND.randn(n_items)
for i in range(n_items):
obj = lambda xs: fcts.gradient(xs)[i]
grad = lambda xs: fcts.hessian(xs)[i]
val = approx_fprime(xs, obj, EPS)
err = check_grad(obj, grad, xs, epsilon=EPS)
assert abs(err / np.linalg.norm(val)) < 1e-5
示例13: test_gradients
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import approx_fprime [as 别名]
def test_gradients(before_test_inv_pend):
env = before_test_inv_pend
n_s = env.n_s
n_u = env.n_u
for i in range(n_s):
f = lambda z: env._dynamics(0, z[:env.n_s], z[env.n_s:])[i]
f_grad = env._jac_dynamics()[i, :]
grad_finite_diff = approx_fprime(np.zeros((n_s + n_u,)), f, 1e-8)
# err = check_grad(f,f_grad,np.zeros((n_s+n_u,)))
assert np.allclose(f_grad,
grad_finite_diff), 'Is the gradient of the {}-th dynamics dimension correct?'.format(
i)
示例14: test_gradients
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import approx_fprime [as 别名]
def test_gradients(distr):
"""Test gradient accuracy."""
# data
scaler = StandardScaler()
n_samples, n_features = 1000, 100
X = np.random.normal(0.0, 1.0, [n_samples, n_features])
X = scaler.fit_transform(X)
density = 0.1
beta_ = np.zeros(n_features + 1)
beta_[0] = np.random.rand()
beta_[1:] = sps.rand(n_features, 1, density=density).toarray()[:, 0]
reg_lambda = 0.1
glm = GLM(distr=distr, reg_lambda=reg_lambda)
y = simulate_glm(glm.distr, beta_[0], beta_[1:], X)
func = partial(_L2loss, distr, glm.alpha,
glm.Tau, reg_lambda, X, y, glm.eta, glm.group)
grad = partial(_grad_L2loss, distr, glm.alpha, glm.Tau,
reg_lambda, X, y,
glm.eta)
approx_grad = approx_fprime(beta_, func, 1.5e-8)
analytical_grad = grad(beta_)
assert_allclose(approx_grad, analytical_grad, rtol=1e-5, atol=1e-3)
示例15: objective_master_nlopt
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import approx_fprime [as 别名]
def objective_master_nlopt(x, grad):
vars = get_groove_global_vars()
numDOF = len(x)
g = O.approx_fprime(x, vars.objective_function, numDOF * [0.001])
if grad.size > 0:
for i in xrange(numDOF):
grad[i] = g[i]
return vars.objective_function(x)
#################################################################################################