本文整理汇总了Python中statsmodels.tools.add_constant函数的典型用法代码示例。如果您正苦于以下问题:Python add_constant函数的具体用法?Python add_constant怎么用?Python add_constant使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了add_constant函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self):
data = heart.load()
endog = np.log10(data.endog)
exog = add_constant(data.exog)
self.mod1 = emplikeAFT(endog, exog, data.censors)
self.res1 = self.mod1.fit()
self.res2 = AFTRes()
示例2: test_manova_no_formula_no_hypothesis
def test_manova_no_formula_no_hypothesis():
# Same as previous test only skipping formula interface
exog = add_constant(pd.get_dummies(X[['Loc']], drop_first=True))
endog = X[['Basal', 'Occ', 'Max']]
mod = MANOVA(endog, exog)
r = mod.mv_test()
assert isinstance(r, MultivariateTestResults)
示例3: test_forecast
def test_forecast(self):
end = len(self.true['data']['consump'])+15-1
exog = add_constant(self.true['forecast_data']['m2'])
assert_almost_equal(
self.result.predict(end=end, exog=exog)[0],
self.true['forecast'], 3
)
示例4: setup_class
def setup_class(cls):
data = heart.load()
endog = np.log10(data.endog)
exog = add_constant(data.exog)
cls.mod1 = emplikeAFT(endog, exog, data.censors)
cls.res1 = cls.mod1.fit()
cls.res2 = AFTRes()
示例5: test_multiple_constraints
def test_multiple_constraints():
endog = dta['infl']
exog = add_constant(dta[['m1', 'unemp', 'cpi']])
constraints = [
'm1 + unemp = 1',
'cpi = 0',
]
mod = RecursiveLS(endog, exog, constraints=constraints)
res = mod.fit()
# See tests/results/test_rls.do
desired = [-0.7001083844336, -0.0018477514060, 1.0018477514060, 0]
assert_allclose(res.params, desired, atol=1e-10)
# See tests/results/test_rls.do
desired = [.4699552366, .0005369357, .0005369357, 0]
assert_allclose(res.bse[0], desired[0], atol=1e-1)
assert_allclose(res.bse[1:-1], desired[1:-1], atol=1e-4)
# See tests/results/test_rls.do
desired = -534.4292052931121
# Note that to compute what Stata reports as the llf, we need to use a
# different denominator for estimating the scale, and then compute the
# llf from the alternative recursive residuals
scale_alternative = np.sum((
res.standardized_forecasts_error[0, 1:] *
res.filter_results.obs_cov[0, 0]**0.5)**2) / mod.nobs
llf_alternative = np.log(norm.pdf(res.resid_recursive, loc=0,
scale=scale_alternative**0.5)).sum()
assert_allclose(llf_alternative, desired)
示例6: test_manova_no_formula
def test_manova_no_formula():
# Same as previous test only skipping formula interface
exog = add_constant(pd.get_dummies(X[['Loc']], drop_first=True))
endog = X[['Basal', 'Occ', 'Max']]
mod = MANOVA(endog, exog)
intercept = np.zeros((1, 3))
intercept[0, 0] = 1
loc = np.zeros((2, 3))
loc[0, 1] = loc[1, 2] = 1
hypotheses = [('Intercept', intercept), ('Loc', loc)]
r = mod.mv_test(hypotheses)
assert_almost_equal(r['Loc']['stat'].loc["Wilks' lambda", 'Value'],
0.60143661, decimal=8)
assert_almost_equal(r['Loc']['stat'].loc["Pillai's trace", 'Value'],
0.44702843, decimal=8)
assert_almost_equal(r['Loc']['stat'].loc["Hotelling-Lawley trace",
'Value'],
0.58210348, decimal=8)
assert_almost_equal(r['Loc']['stat'].loc["Roy's greatest root", 'Value'],
0.35530890, decimal=8)
assert_almost_equal(r['Loc']['stat'].loc["Wilks' lambda", 'F Value'],
0.77, decimal=2)
assert_almost_equal(r['Loc']['stat'].loc["Pillai's trace", 'F Value'],
0.86, decimal=2)
assert_almost_equal(r['Loc']['stat'].loc["Hotelling-Lawley trace",
'F Value'],
0.75, decimal=2)
assert_almost_equal(r['Loc']['stat'].loc["Roy's greatest root", 'F Value'],
1.07, decimal=2)
assert_almost_equal(r['Loc']['stat'].loc["Wilks' lambda", 'Num DF'],
6, decimal=3)
assert_almost_equal(r['Loc']['stat'].loc["Pillai's trace", 'Num DF'],
6, decimal=3)
assert_almost_equal(r['Loc']['stat'].loc["Hotelling-Lawley trace",
'Num DF'],
6, decimal=3)
assert_almost_equal(r['Loc']['stat'].loc["Roy's greatest root", 'Num DF'],
3, decimal=3)
assert_almost_equal(r['Loc']['stat'].loc["Wilks' lambda", 'Den DF'],
16, decimal=3)
assert_almost_equal(r['Loc']['stat'].loc["Pillai's trace", 'Den DF'],
18, decimal=3)
assert_almost_equal(r['Loc']['stat'].loc["Hotelling-Lawley trace",
'Den DF'],
9.0909, decimal=4)
assert_almost_equal(r['Loc']['stat'].loc["Roy's greatest root", 'Den DF'],
9, decimal=3)
assert_almost_equal(r['Loc']['stat'].loc["Wilks' lambda", 'Pr > F'],
0.6032, decimal=4)
assert_almost_equal(r['Loc']['stat'].loc["Pillai's trace", 'Pr > F'],
0.5397, decimal=4)
assert_almost_equal(r['Loc']['stat'].loc["Hotelling-Lawley trace",
'Pr > F'],
0.6272, decimal=4)
assert_almost_equal(r['Loc']['stat'].loc["Roy's greatest root", 'Pr > F'],
0.4109, decimal=4)
示例7: test_plots
def test_plots():
if not have_matplotlib:
raise SkipTest
exog = add_constant(dta[['m1', 'pop']])
mod = RecursiveLS(endog, exog)
res = mod.fit()
# Basic plot
fig = res.plot_recursive_coefficient()
plt.close(fig)
# Specific variable
fig = res.plot_recursive_coefficient(variables=['m1'])
plt.close(fig)
# All variables
fig = res.plot_recursive_coefficient(variables=[0, 'm1', 'pop'])
plt.close(fig)
# Basic plot
fig = res.plot_cusum()
plt.close(fig)
# Other alphas
for alpha in [0.01, 0.10]:
fig = res.plot_cusum(alpha=alpha)
plt.close(fig)
# Invalid alpha
assert_raises(ValueError, res.plot_cusum, alpha=0.123)
# Basic plot
fig = res.plot_cusum_squares()
plt.close(fig)
# Numpy input (no dates)
mod = RecursiveLS(endog.values, exog.values)
res = mod.fit()
# Basic plot
fig = res.plot_recursive_coefficient()
plt.close(fig)
# Basic plot
fig = res.plot_cusum()
plt.close(fig)
# Basic plot
fig = res.plot_cusum_squares()
plt.close(fig)
示例8: __init__
def __init__(self):
# Remove the regression coefficients from the parameters, since they
# will be estimated as part of the state vector
true = dict(results_sarimax.friedman2_mle)
exog = add_constant(true['data']['m2']) / 10.
true['mle_params_exog'] = true['params_exog'][:]
true['mle_se_exog'] = true['se_exog_oim'][:]
true['params_exog'] = []
true['se_exog'] = []
super(TestFriedmanStateRegression, self).__init__(
true, exog=exog, mle_regression=False
)
self.result = self.model.filter()
示例9: setup_class
def setup_class(cls):
path = os.path.join(current_path, 'results', 'mar_filardo.csv')
cls.mar_filardo = pd.read_csv(path)
true = {
'params': np.r_[4.35941747, -1.6493936, 1.7702123, 0.9945672,
0.517298, -0.865888,
np.exp(-0.362469)**2,
0.189474, 0.079344, 0.110944, 0.122251],
'llf': -586.5718,
'llf_fit': -586.5718,
'llf_fit_em': -586.5718
}
endog = cls.mar_filardo['dlip'].iloc[1:].values
exog_tvtp = add_constant(
cls.mar_filardo['dmdlleading'].iloc[:-1].values)
super(TestFilardo, cls).setup_class(
true, endog, k_regimes=2, order=4, switching_ar=False,
exog_tvtp=exog_tvtp)
示例10: test_plots
def test_plots(close_figures):
exog = add_constant(dta[['m1', 'pop']])
mod = RecursiveLS(endog, exog)
res = mod.fit()
# Basic plot
try:
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
except ImportError:
pass
fig = res.plot_recursive_coefficient()
# Specific variable
fig = res.plot_recursive_coefficient(variables=['m1'])
# All variables
fig = res.plot_recursive_coefficient(variables=[0, 'm1', 'pop'])
# Basic plot
fig = res.plot_cusum()
# Other alphas
for alpha in [0.01, 0.10]:
fig = res.plot_cusum(alpha=alpha)
# Invalid alpha
assert_raises(ValueError, res.plot_cusum, alpha=0.123)
# Basic plot
fig = res.plot_cusum_squares()
# Numpy input (no dates)
mod = RecursiveLS(endog.values, exog.values)
res = mod.fit()
# Basic plot
fig = res.plot_recursive_coefficient()
# Basic plot
fig = res.plot_cusum()
# Basic plot
fig = res.plot_cusum_squares()
示例11: test_glm
def test_glm(constraints=None):
# More comprehensive tests against GLM estimates (this is sort of redundant
# given `test_ols`, but this is mostly to complement the tests in
# `test_glm_constrained`)
endog = dta.infl
exog = add_constant(dta[['unemp', 'm1']])
mod = RecursiveLS(endog, exog, constraints=constraints)
res = mod.fit()
mod_glm = GLM(endog, exog)
if constraints is None:
res_glm = mod_glm.fit()
else:
res_glm = mod_glm.fit_constrained(constraints=constraints)
# Regression coefficients, standard errors, and estimated scale
assert_allclose(res.params, res_glm.params)
assert_allclose(res.bse, res_glm.bse, atol=1e-6)
# Note: scale here is computed according to Harvey, 1989, 4.2.5, and is
# the called the ML estimator and sometimes (e.g. later in section 5)
# denoted \tilde \sigma_*^2
assert_allclose(res.filter_results.obs_cov[0, 0], res_glm.scale)
# DoF
# Note: GLM does not include intercept in DoF, so modify by -1
assert_equal(res.df_model - 1, res_glm.df_model)
# OLS residuals are equivalent to smoothed forecast errors
# (the latter are defined as e_t|T by Harvey, 1989, 5.4.5)
# (this follows since the smoothed state simply contains the
# full-information estimates of the regression coefficients)
actual = (mod.endog[:, 0] -
np.sum(mod['design', 0, :, :] * res.smoothed_state, axis=0))
assert_allclose(actual, res_glm.resid_response, atol=1e-7)
# Given the estimate of scale as `sum(v_t^2 / f_t) / (T - d)` (see
# Harvey, 1989, 4.2.5 on p. 183), then llf_recursive is equivalent to the
# full OLS loglikelihood (i.e. without the scale concentrated out).
desired = mod_glm.loglike(res_glm.params, scale=res_glm.scale)
assert_allclose(res.llf_recursive, desired)
# Alternatively, we can construct the concentrated OLS loglikelihood
# by computing the scale term with `nobs` in the denominator rather than
# `nobs - d`.
scale_alternative = np.sum((
res.standardized_forecasts_error[0, 1:] *
res.filter_results.obs_cov[0, 0]**0.5)**2) / mod.nobs
llf_alternative = np.log(norm.pdf(res.resid_recursive, loc=0,
scale=scale_alternative**0.5)).sum()
assert_allclose(llf_alternative, res_glm.llf)
# Prediction
# TODO: prediction in this case is not working.
if constraints is None:
design = np.ones((1, 3, 10))
actual = res.forecast(10, design=design)
assert_allclose(actual, res_glm.predict(np.ones((10, 3))))
else:
design = np.ones((2, 3, 10))
assert_raises(NotImplementedError, res.forecast, 10, design=design)
# Hypothesis tests
actual = res.t_test('m1 = 0')
desired = res_glm.t_test('m1 = 0')
assert_allclose(actual.statistic, desired.statistic)
assert_allclose(actual.pvalue, desired.pvalue, atol=1e-15)
actual = res.f_test('m1 = 0')
desired = res_glm.f_test('m1 = 0')
assert_allclose(actual.statistic, desired.statistic)
assert_allclose(actual.pvalue, desired.pvalue)
# Information criteria
# Note: the llf and llf_obs given in the results are based on the Kalman
# filter and so the ic given in results will not be identical to the
# OLS versions. Additionally, llf_recursive is comparable to the
# non-concentrated llf, and not the concentrated llf that is by default
# used in OLS. Compute new ic based on llf_alternative to compare.
actual_aic = aic(llf_alternative, res.nobs_effective, res.df_model)
assert_allclose(actual_aic, res_glm.aic)
示例12: add_constant
from statsmodels.tools import add_constant
from numpy.testing import assert_equal, assert_raises, assert_allclose
current_path = os.path.dirname(os.path.abspath(__file__))
results_R_path = 'results' + os.sep + 'results_rls_R.csv'
results_R = pd.read_csv(current_path + os.sep + results_R_path)
results_stata_path = 'results' + os.sep + 'results_rls_stata.csv'
results_stata = pd.read_csv(current_path + os.sep + results_stata_path)
dta = macrodata.load_pandas().data
dta.index = pd.date_range(start='1959-01-01', end='2009-07-01', freq='QS')
endog = dta['cpi']
exog = add_constant(dta['m1'])
def test_endog():
# Tests for numpy input
mod = RecursiveLS(endog.values, exog.values)
res = mod.fit()
# Test the RLS estimates against OLS estimates
mod_ols = OLS(endog, exog)
res_ols = mod_ols.fit()
assert_allclose(res.params, res_ols.params)
# Tests for 1-dim exog
mod = RecursiveLS(endog, dta['m1'].values)
res = mod.fit()
示例13: _EM_test
def _EM_test(self, nuisance_params, params=None, param_nums=None,
b0_vals=None, F=None, survidx=None, uncens_nobs=None,
numcensbelow=None, km=None, uncensored=None, censored=None,
maxiter=None, ftol=None):
"""
Uses EM algorithm to compute the maximum likelihood of a test
Parameters
---------
Nuisance Params: array
Vector of values to be used as nuisance params.
maxiter: int
Number of iterations in the EM algorithm for a parameter vector
Returns
-------
-2 ''*'' log likelihood ratio at hypothesized values and
nuisance params
Notes
-----
Optional parameters are provided by the test_beta function.
"""
iters = 0
params[param_nums] = b0_vals
nuis_param_index = np.int_(np.delete(np.arange(self.model.nvar),
param_nums))
params[nuis_param_index] = nuisance_params
to_test = params.reshape(self.model.nvar, 1)
opt_res = np.inf
diff = np.inf
while iters < maxiter and diff > ftol:
F = F.flatten()
death = np.cumsum(F[::-1])
survivalprob = death[::-1]
surv_point_mat = np.dot(F.reshape(-1, 1),
1. / survivalprob[survidx].reshape(1, - 1))
surv_point_mat = add_constant(surv_point_mat)
summed_wts = np.cumsum(surv_point_mat, axis=1)
wts = summed_wts[np.int_(np.arange(uncens_nobs)),
numcensbelow[uncensored]]
# ^E step
# See Zhou 2005, section 3.
self.model._fit_weights = wts
new_opt_res = self._opt_wtd_nuis_regress(to_test)
# ^ Uncensored weights' contribution to likelihood value.
F = self.new_weights
# ^ M step
diff = np.abs(new_opt_res - opt_res)
opt_res = new_opt_res
iters = iters + 1
death = np.cumsum(F.flatten()[::-1])
survivalprob = death[::-1]
llike = -opt_res + np.sum(np.log(survivalprob[survidx]))
wtd_km = km.flatten() / np.sum(km)
survivalmax = np.cumsum(wtd_km[::-1])[::-1]
llikemax = np.sum(np.log(wtd_km[uncensored])) + \
np.sum(np.log(survivalmax[censored]))
if iters == maxiter:
warnings.warn('The EM reached the maximum number of iterations',
IterationLimitWarning)
return -2 * (llike - llikemax)
示例14: __init__
def __init__(self):
data = stackloss.load()
data.exog = add_constant(data.exog)
self.res1 = OLS(data.endog, data.exog).fit()
self.res2 = RegressionResults()
示例15: setup_class
def setup_class(cls):
data = stackloss.load(as_pandas=False)
data.exog = add_constant(data.exog)
cls.res1 = OLS(data.endog, data.exog).fit()
cls.res2 = RegressionResults()