本文整理汇总了Python中statsmodels.tools.numdiff.approx_fprime函数的典型用法代码示例。如果您正苦于以下问题:Python approx_fprime函数的具体用法?Python approx_fprime怎么用?Python approx_fprime使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了approx_fprime函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_logit_1d
def test_logit_1d():
y = np.r_[0, 1, 0, 1, 0, 1, 0, 1, 1, 1]
g = np.r_[0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
x = np.r_[0, 1, 0, 0, 1, 1, 0, 0, 1, 0]
x = x[:, None]
model = ConditionalLogit(y, x, groups=g)
# Check the gradient for the denominator of the partial likelihood
for x in -1, 0, 1, 2:
params = np.r_[x, ]
_, grad = model._denom_grad(0, params)
ngrad = approx_fprime(params, lambda x: model._denom(0, x))
assert_allclose(grad, ngrad)
# Check the gradient for the loglikelihood
for x in -1, 0, 1, 2:
grad = approx_fprime(np.r_[x, ], model.loglike)
score = model.score(np.r_[x, ])
assert_allclose(grad, score, rtol=1e-4)
result = model.fit()
# From Stata
assert_allclose(result.params, np.r_[0.9272407], rtol=1e-5)
assert_allclose(result.bse, np.r_[1.295155], rtol=1e-5)
示例2: test_hess
def test_hess(self):
#NOTE: I had to overwrite this to lessen the tolerance
for test_params in self.params:
he = self.mod.hessian(test_params)
hefd = numdiff.approx_fprime_cs(test_params, self.mod.score)
assert_almost_equal(he, hefd, decimal=DEC8)
#NOTE: notice the accuracy below and the epsilon changes
# this doesn't work well for score -> hessian with non-cs step
# it's a little better around the optimum
assert_almost_equal(he, hefd, decimal=7)
hefd = numdiff.approx_fprime(test_params, self.mod.score,
centered=True)
assert_almost_equal(he, hefd, decimal=4)
hefd = numdiff.approx_fprime(test_params, self.mod.score, 1e-9,
centered=False)
assert_almost_equal(he, hefd, decimal=2)
hescs = numdiff.approx_fprime_cs(test_params, self.mod.score)
assert_almost_equal(he, hescs, decimal=DEC8)
hecs = numdiff.approx_hess_cs(test_params, self.mod.loglike)
assert_almost_equal(he, hecs, decimal=5)
#NOTE: these just don't work well
#hecs = numdiff.approx_hess1(test_params, self.mod.loglike, 1e-3)
#assert_almost_equal(he, hecs, decimal=1)
#hecs = numdiff.approx_hess2(test_params, self.mod.loglike, 1e-4)
#assert_almost_equal(he, hecs, decimal=0)
hecs = numdiff.approx_hess3(test_params, self.mod.loglike, 1e-4)
assert_almost_equal(he, hecs, decimal=0)
示例3: test_calc_project_jacobian
def test_calc_project_jacobian(self):
proj = TestProject.proj
project_param_vector = np.zeros((3,))
low_deg_idx = proj.project_param_idx['Group_1'][('Low',)]
high_deg_idx = proj.project_param_idx['Group_1'][('High',)]
synt_idx = proj.project_param_idx['k_synt']['Global']
project_param_vector[high_deg_idx] = 0.01
project_param_vector[low_deg_idx] = 0.001
project_param_vector[synt_idx] = 0.01
log_project_param_vector = np.log(project_param_vector)
sens_jacobian = proj.calc_project_jacobian(log_project_param_vector)
def get_scaled_sims(x):
proj.residuals(x)
sims = proj.get_simulations(scaled=True)
return sims.values[:, 0]
num_global_jac = approx_fprime(log_project_param_vector, get_scaled_sims, centered=True)
assert np.allclose(num_global_jac, sens_jacobian, atol=0.000001)
project_param_vector[high_deg_idx] = 0.02
project_param_vector[low_deg_idx] = 0.003
project_param_vector[synt_idx] = 0.05
log_project_param_vector = np.log(project_param_vector)
sens_rss_grad = proj.calc_rss_gradient(log_project_param_vector)
num_rss_jac = approx_fprime(log_project_param_vector, proj.calc_sum_square_residuals, centered=True)
assert np.allclose(sens_rss_grad, num_rss_jac, atol=0.000001)
示例4: test_logit_2d
def test_logit_2d():
y = np.r_[0, 1, 0, 1, 0, 1, 0, 1, 1, 1]
g = np.r_[0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
x1 = np.r_[0, 1, 0, 0, 1, 1, 0, 0, 1, 0]
x2 = np.r_[0, 0, 1, 0, 0, 1, 0, 1, 1, 1]
x = np.empty((10, 2))
x[:, 0] = x1
x[:, 1] = x2
model = ConditionalLogit(y, x, groups=g)
# Check the gradient for the denominator of the partial likelihood
for x in -1, 0, 1, 2:
params = np.r_[x, -1.5*x]
_, grad = model._denom_grad(0, params)
ngrad = approx_fprime(params, lambda x: model._denom(0, x))
assert_allclose(grad, ngrad, rtol=1e-5)
# Check the gradient for the loglikelihood
for x in -1, 0, 1, 2:
params = np.r_[-0.5*x, 0.5*x]
grad = approx_fprime(params, model.loglike)
score = model.score(params)
assert_allclose(grad, score, rtol=1e-4)
result = model.fit()
# From Stata
assert_allclose(result.params, np.r_[1.011074, 1.236758], rtol=1e-3)
assert_allclose(result.bse, np.r_[1.420784, 1.361738], rtol=1e-5)
result.summary()
示例5: test_dtypes
def test_dtypes():
def f(x):
return 2*x
desired = np.array([[2, 0],
[0, 2]])
assert_allclose(approx_fprime(np.array([1, 2]), f), desired)
assert_allclose(approx_fprime(np.array([1., 2.]), f), desired)
assert_allclose(approx_fprime(np.array([1.+0j, 2.+0j]), f), desired)
示例6: gradient_momcond
def gradient_momcond(self, params, epsilon=1e-4, method='centered'):
momcond = self.momcond_mean
if method == 'centered':
gradmoms = (approx_fprime(params, momcond, epsilon=epsilon) +
approx_fprime(params, momcond, epsilon=-epsilon))/2
else:
gradmoms = approx_fprime(params, momcond, epsilon=epsilon)
return gradmoms
示例7: test_grad_fun1_fd
def test_grad_fun1_fd(self):
for test_params in self.params:
#gtrue = self.x.sum(0)
gtrue = self.gradtrue(test_params)
fun = self.fun()
epsilon = 1e-6
gfd = numdiff.approx_fprime(test_params, fun, epsilon=epsilon,
args=self.args)
gfd += numdiff.approx_fprime(test_params, fun, epsilon=-epsilon,
args=self.args)
gfd /= 2.
assert_almost_equal(gtrue, gfd, decimal=DEC6)
示例8: score
def score(self, params):
"""
Gradient of log-likelihood evaluated at params
"""
from statsmodels.tools.numdiff import approx_fprime
return approx_fprime(params, self.loglike, epsilon=1e-4, centered=True).ravel()
示例9: compute_param_cov
def compute_param_cov(self, params, backcast=None, robust=True):
"""
Computes parameter covariances using numerical derivatives.
Parameters
----------
params : 1-d array
Model parameters
robust : bool, optional
Flag indicating whether to use robust standard errors (True) or
classic MLE (False)
"""
resids = self.resids(self.starting_values())
var_bounds = self.volatility.variance_bounds(resids)
nobs = resids.shape[0]
if backcast is None and self._backcast is None:
backcast = self.volatility.backcast(resids)
self._backcast = backcast
elif backcast is None:
backcast = self._backcast
kwargs = {"sigma2": np.zeros_like(resids), "backcast": backcast, "var_bounds": var_bounds, "individual": False}
hess = approx_hess(params, self._loglikelihood, kwargs=kwargs)
hess /= nobs
inv_hess = np.linalg.inv(hess)
if robust:
kwargs["individual"] = True
scores = approx_fprime(params, self._loglikelihood, kwargs=kwargs)
score_cov = np.cov(scores.T)
return inv_hess.dot(score_cov).dot(inv_hess) / nobs
else:
return inv_hess / nobs
示例10: test_poisson_2d
def test_poisson_2d():
y = np.r_[3, 1, 4, 8, 2, 5, 4, 7, 2, 6]
g = np.r_[0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
x1 = np.r_[0, 1, 0, 0, 1, 1, 0, 0, 1, 0]
x2 = np.r_[2, 1, 0, 0, 1, 2, 3, 2, 0, 1]
x = np.empty((10, 2))
x[:, 0] = x1
x[:, 1] = x2
model = ConditionalPoisson(y, x, groups=g)
# Check the gradient for the loglikelihood
for x in -1, 0, 1, 2:
params = np.r_[-0.5*x, 0.5*x]
grad = approx_fprime(params, model.loglike)
score = model.score(params)
assert_allclose(grad, score, rtol=1e-4)
result = model.fit()
# From Stata
assert_allclose(result.params, np.r_[-.9478957, -.0134279], rtol=1e-3)
assert_allclose(result.bse, np.r_[.3874942, .1686712], rtol=1e-5)
result.summary()
示例11: deriv
def deriv(self, mu):
"""
Derivative of the variance function v'(mu)
"""
from statsmodels.tools.numdiff import approx_fprime_cs, approx_fprime
#return approx_fprime_cs(mu, self) # TODO fix breaks in `fabs
# TODO: diag is workaround problem with numdiff for 1d
return np.diag(approx_fprime(mu, self))
示例12: deriv2
def deriv2(self, p):
"""Second derivative of the link function g''(p)
implemented through numerical differentiation
"""
from statsmodels.tools.numdiff import approx_fprime
# Note: speciaf function for norm.ppf does not support complex
return np.diag(approx_fprime(p, self.deriv, centered=True))
示例13: fit_map
def fit_map(self, method="BFGS", minim_opts=None, scale_fe=False):
"""
Construct the Laplace approximation to the posterior
distribution.
Parameters
----------
method : string
Optimization method for finding the posterior mode.
minim_opts : dict-like
Options passed to scipy.minimize.
scale_fe : bool
If true, the columns of the fixed effects design matrix
are centered and scaled to unit variance before fitting
the model. The results are back-transformed so that the
results are presented on the original scale.
Returns
-------
BayesMixedGLMResults instance.
"""
if scale_fe:
mn = self.exog.mean(0)
sc = self.exog.std(0)
self._exog_save = self.exog
self.exog = self.exog.copy()
ixs = np.flatnonzero(sc > 1e-8)
self.exog[:, ixs] -= mn[ixs]
self.exog[:, ixs] /= sc[ixs]
def fun(params):
return -self.logposterior(params)
def grad(params):
return -self.logposterior_grad(params)
start = self._get_start()
r = minimize(fun, start, method=method, jac=grad, options=minim_opts)
if not r.success:
msg = ("Laplace fitting did not converge, |gradient|=%.6f" %
np.sqrt(np.sum(r.jac**2)))
warnings.warn(msg)
from statsmodels.tools.numdiff import approx_fprime
hess = approx_fprime(r.x, grad)
cov = np.linalg.inv(hess)
params = r.x
if scale_fe:
self.exog = self._exog_save
del self._exog_save
params[ixs] /= sc[ixs]
cov[ixs, :][:, ixs] /= np.outer(sc[ixs], sc[ixs])
return BayesMixedGLMResults(self, params, cov, optim_retvals=r)
示例14: test_score
def test_score(self):
# this test the score at parameters different from the optimum.
import statsmodels.tools.numdiff as nd
score_by_numdiff = nd.approx_fprime(self.res1.params * 2, \
self.mod1.loglike, centered=True)
np.testing.assert_allclose(self.mod1.score(self.res1.params * 2),
score_by_numdiff, rtol=RTOL_4, atol=ATOL_1)
示例15: test_grad_fun1_fdc
def test_grad_fun1_fdc(self):
for test_params in self.params:
#gtrue = self.x.sum(0)
gtrue = self.gradtrue(test_params)
fun = self.fun()
epsilon = 1e-6 #default epsilon 1e-6 is not precise enough
gfd = numdiff.approx_fprime(test_params, fun, epsilon=1e-8,
args=self.args, centered=True)
assert_almost_equal(gtrue, gfd, decimal=DEC5)