本文整理汇总了Python中statsmodels.regression.linear_model.OLS.fit方法的典型用法代码示例。如果您正苦于以下问题:Python OLS.fit方法的具体用法?Python OLS.fit怎么用?Python OLS.fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类statsmodels.regression.linear_model.OLS
的用法示例。
在下文中一共展示了OLS.fit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: setup_class
# 需要导入模块: from statsmodels.regression.linear_model import OLS [as 别名]
# 或者: from statsmodels.regression.linear_model.OLS import fit [as 别名]
def setup_class(cls):
cls.cov_type = 'HAC'
kwds={'kernel':sw.weights_uniform, 'maxlags':2}
mod1 = GLM(endog, exog, family=families.Gaussian())
cls.res1 = mod1.fit(cov_type='HAC', cov_kwds=kwds)
mod2 = OLS(endog, exog)
cls.res2 = mod2.fit(cov_type='HAC', cov_kwds=kwds)
#for debugging
cls.res3 = mod2.fit(cov_type='HAC', cov_kwds={'maxlags':2})
示例2: test_permuted_ols_statsmodels_withcovar_multivariate
# 需要导入模块: from statsmodels.regression.linear_model import OLS [as 别名]
# 或者: from statsmodels.regression.linear_model.OLS import fit [as 别名]
def test_permuted_ols_statsmodels_withcovar_multivariate(random_state=0):
"""Test permuted_ols with multiple tested variates and covariates.
It is equivalent to fitting several models with only one tested variate.
This test has a statsmodels dependance. There seems to be no simple,
alternative way to perform a F-test on a linear model including
covariates.
"""
try:
from statsmodels.regression.linear_model import OLS
except:
warnings.warn("Statsmodels is required to run this test")
raise nose.SkipTest
rng = check_random_state(random_state)
# design parameters
n_samples = 50
n_targets = 10
n_covars = 2
# create design
target_vars = rng.randn(n_samples, n_targets)
tested_var = rng.randn(n_samples, 1)
confounding_vars = rng.randn(n_samples, n_covars)
# statsmodels OLS
fvals = np.empty((n_targets, 1))
test_matrix = np.array([[1.0] + [0.0] * n_covars])
for i in range(n_targets):
ols = OLS(target_vars[:, i], np.hstack((tested_var, confounding_vars)))
fvals[i] = ols.fit().f_test(test_matrix).fvalue[0][0]
# permuted OLS
_, orig_scores, _ = permuted_ols(
tested_var, target_vars, confounding_vars, model_intercept=False, n_perm=0, random_state=random_state
)
assert_almost_equal(fvals, orig_scores, decimal=6)
### Adds intercept
# permuted OLS
_, orig_scores_addintercept, _ = permuted_ols(
tested_var, target_vars, confounding_vars, model_intercept=True, n_perm=0, random_state=random_state
)
# statsmodels OLS
confounding_vars = np.hstack((confounding_vars, np.ones((n_samples, 1))))
fvals_addintercept = np.empty((n_targets, 1))
test_matrix = np.array([[1.0] + [0.0] * (n_covars + 1)])
for i in range(n_targets):
ols = OLS(target_vars[:, i], np.hstack((tested_var, confounding_vars)))
fvals_addintercept[i] = ols.fit().f_test(test_matrix).fvalue[0][0]
assert_array_almost_equal(fvals_addintercept, orig_scores_addintercept, decimal=6)
示例3: fit_dlogM_mw
# 需要导入模块: from statsmodels.regression.linear_model import OLS [as 别名]
# 或者: from statsmodels.regression.linear_model.OLS import fit [as 别名]
def fit_dlogM_mw(tab, sfrsd_tab, mltype='ring', mlb='i'):
merge_tab = t.join(tab, sfrsd_tab, 'plateifu')
is_agn = m.mask_from_maskbits(merge_tab['mngtarg3'], [1, 2, 3, 4])
mlb_ix = totalmass.StellarMass.bands_ixs[mlb]
absmag_sun_mlb = totalmass.StellarMass.absmag_sun[mlb_ix]
logmass_in_ifu = merge_tab['mass_in_ifu'].to(u.dex(u.Msun))
logmass_in_ifu_lw = merge_tab['ml_fluxwt'] + merge_tab['ifu_absmag'][:, mlb_ix].to(
u.dex(m.bandpass_sol_l_unit), totalmass.bandpass_flux_to_solarunits(absmag_sun_mlb))
merge_tab['dlogmass_lw'] = logmass_in_ifu - logmass_in_ifu_lw
ha_corr = np.exp(merge_tab['mean_atten_mwtd'] * (6563 / 5500)**-1.3)
sfrsd = merge_tab['sigma_sfr'] * ha_corr * u.Msun / u.yr / u.pc**2
mass_pca = merge_tab['mass_in_ifu'] + merge_tab['outer_mass_{}'.format(mltype)]
ssfrsd = sfrsd / mass_pca
merge_tab['log_ssfrsd'] = ssfrsd.to(u.dex(ssfrsd.unit))
merge_tab['log_ssfrsd'][~np.isfinite(merge_tab['log_ssfrsd'])] = np.nan * merge_tab['log_ssfrsd'].unit
ols = OLS(
endog=np.array(merge_tab['dlogmass_lw'][~is_agn]),
exog=sm_add_constant(
t.Table(merge_tab['mean_atten_mwtd', 'std_atten_mwtd', 'log_ssfrsd'])[~is_agn].to_pandas(),
prepend=False),
hasconst=True, missing='drop')
olsfit = ols.fit()
return olsfit
示例4: test_endog
# 需要导入模块: from statsmodels.regression.linear_model import OLS [as 别名]
# 或者: from statsmodels.regression.linear_model.OLS import fit [as 别名]
def test_endog():
# Tests for numpy input
mod = RecursiveLS(endog.values, exog.values)
res = mod.fit()
# Test the RLS estimates against OLS estimates
mod_ols = OLS(endog, exog)
res_ols = mod_ols.fit()
assert_allclose(res.params, res_ols.params)
# Tests for 1-dim exog
mod = RecursiveLS(endog, dta['m1'].values)
res = mod.fit()
# Test the RLS estimates against OLS estimates
mod_ols = OLS(endog, dta['m1'])
res_ols = mod_ols.fit()
assert_allclose(res.params, res_ols.params)
示例5: test_filter
# 需要导入模块: from statsmodels.regression.linear_model import OLS [as 别名]
# 或者: from statsmodels.regression.linear_model.OLS import fit [as 别名]
def test_filter():
# Basic test for filtering
mod = RecursiveLS(endog, exog)
res = mod.filter()
# Test the RLS estimates against OLS estimates
mod_ols = OLS(endog, exog)
res_ols = mod_ols.fit()
assert_allclose(res.params, res_ols.params)
示例6: test_regularized_refit
# 需要导入模块: from statsmodels.regression.linear_model import OLS [as 别名]
# 或者: from statsmodels.regression.linear_model.OLS import fit [as 别名]
def test_regularized_refit():
n = 100
p = 5
np.random.seed(3132)
xmat = np.random.normal(size=(n, p))
# covariates 0 and 2 matter
yvec = xmat[:, 0] + xmat[:, 2] + np.random.normal(size=n)
model1 = OLS(yvec, xmat)
result1 = model1.fit_regularized(alpha=2., L1_wt=0.5, refit=True)
model2 = OLS(yvec, xmat[:, [0, 2]])
result2 = model2.fit()
ii = [0, 2]
assert_allclose(result1.params[ii], result2.params)
assert_allclose(result1.bse[ii], result2.bse)
示例7: test_cusum
# 需要导入模块: from statsmodels.regression.linear_model import OLS [as 别名]
# 或者: from statsmodels.regression.linear_model.OLS import fit [as 别名]
def test_cusum():
mod = RecursiveLS(endog, exog)
res = mod.fit()
# Test the cusum statistics against those from R (strucchange)
# These values are not even close to ours, to Statas, or to the alternate
# statsmodels values
# assert_allclose(res.cusum, results_R['cusum'])
# Test the cusum statistics against Stata (cusum6)
# Note: cusum6 excludes the first 3 elements due to OLS initialization
# whereas we exclude only the first 2. Also there are initialization
# differences (as seen above in the recursive residuals).
# Here we explicitly reverse engineer our cusum to match their to show the
# equivalence
d = res.nobs_diffuse
cusum = res.cusum * np.std(res.resid_recursive[d:], ddof=1)
cusum -= res.resid_recursive[d]
cusum /= np.std(res.resid_recursive[d+1:], ddof=1)
cusum = cusum[1:]
assert_allclose(cusum, results_stata.iloc[3:]['cusum'], atol=1e-6, rtol=1e-5)
# Test the cusum statistics against statsmodels estimates
mod_ols = OLS(endog, exog)
res_ols = mod_ols.fit()
desired_cusum = recursive_olsresiduals(res_ols)[-2][1:]
assert_allclose(res.cusum, desired_cusum, rtol=1e-6)
# Test the cusum bounds against Stata (cusum6)
# Again note that cusum6 excludes the first 3 elements, so we need to
# change the ddof and points.
actual_bounds = res._cusum_significance_bounds(
alpha=0.05, ddof=1, points=np.arange(d+1, res.nobs))
desired_bounds = results_stata.iloc[3:][['lw', 'uw']].T
assert_allclose(actual_bounds, desired_bounds, rtol=1e-6)
# Test the cusum bounds against statsmodels
actual_bounds = res._cusum_significance_bounds(
alpha=0.05, ddof=0, points=np.arange(d, res.nobs))
desired_bounds = recursive_olsresiduals(res_ols)[-1]
assert_allclose(actual_bounds, desired_bounds)
# Test for invalid calls
assert_raises(ValueError, res._cusum_squares_significance_bounds,
alpha=0.123)
示例8: fit
# 需要导入模块: from statsmodels.regression.linear_model import OLS [as 别名]
# 或者: from statsmodels.regression.linear_model.OLS import fit [as 别名]
def fit(self):
"""
Fits the model and provides regression results.
Returns
-------
Results : class
Empirical likelihood regression class
"""
exog_with = add_constant(self.exog, prepend=True)
restricted_model = OLS(self.endog, exog_with)
restricted_fit = restricted_model.fit()
restricted_el = restricted_fit.el_test(
np.array([0]), np.array([0]), ret_params=1)
params = np.squeeze(restricted_el[3])
beta_hat_llr = restricted_el[0]
llf = np.sum(np.log(restricted_el[2]))
return OriginResults(restricted_model, params, beta_hat_llr, llf)
示例9: structure
# 需要导入模块: from statsmodels.regression.linear_model import OLS [as 别名]
# 或者: from statsmodels.regression.linear_model.OLS import fit [as 别名]
def structure(self): # Make the chart label which predictor was removed
'''Reruns the regression by removing one of the predictor columns and
then plots the residuals versus the target'''
# The length of the transpose of the predictors array
# gives the number of predictors in the model
model_list = []
for i in range(1, len(self.predictors_array.transpose())):
temp_target = self.predictors_array[:, i].reshape([len(
self.predictors_array), 1])
temp_model = OLS(temp_target,
np.delete(self.predictors_array, i, 1))
temp_results = temp_model.fit()
model_list.append(temp_results)
del temp_target
del temp_model
for model in model_list:
plt.scatter(model.fittedvalues, model.resid)
plt.show()
示例10: test_resid_recursive
# 需要导入模块: from statsmodels.regression.linear_model import OLS [as 别名]
# 或者: from statsmodels.regression.linear_model.OLS import fit [as 别名]
def test_resid_recursive():
mod = RecursiveLS(endog, exog)
res = mod.fit()
# Test the recursive residuals against those from R (strucchange)
assert_allclose(res.resid_recursive[2:10].T,
results_R.iloc[:8]['rec_resid'])
assert_allclose(res.resid_recursive[9:20].T,
results_R.iloc[7:18]['rec_resid'])
assert_allclose(res.resid_recursive[19:].T,
results_R.iloc[17:]['rec_resid'])
# Test the RLS estimates against those from Stata (cusum6)
assert_allclose(res.resid_recursive[3:],
results_stata.iloc[3:]['rr'], atol=1e-5, rtol=1e-5)
# Test the RLS estimates against statsmodels estimates
mod_ols = OLS(endog, exog)
res_ols = mod_ols.fit()
desired_resid_recursive = recursive_olsresiduals(res_ols)[4][2:]
assert_allclose(res.resid_recursive[2:], desired_resid_recursive)
示例11: setupClass
# 需要导入模块: from statsmodels.regression.linear_model import OLS [as 别名]
# 或者: from statsmodels.regression.linear_model.OLS import fit [as 别名]
def setupClass(cls):
from .results.results_regression import Longley
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
res2 = Longley()
res2.wresid = res1.wresid # workaround hack
cls.res1 = res1
cls.res2 = res2
res_qr = OLS(data.endog, data.exog).fit(method="qr")
model_qr = OLS(data.endog, data.exog)
Q, R = np.linalg.qr(data.exog)
model_qr.exog_Q, model_qr.exog_R = Q, R
model_qr.normalized_cov_params = np.linalg.inv(np.dot(R.T, R))
model_qr.rank = np_matrix_rank(R)
res_qr2 = model_qr.fit(method="qr")
cls.res_qr = res_qr
cls.res_qr_manual = res_qr2
示例12: fit
# 需要导入模块: from statsmodels.regression.linear_model import OLS [as 别名]
# 或者: from statsmodels.regression.linear_model.OLS import fit [as 别名]
def fit(self):
"""
Fits the model and provides regression results.
Returns
-------
Results: class
Empirical likelihood regression class
"""
exog_with = add_constant(self.exog, prepend=True)
unrestricted_fit = OLS(self.endog, self.exog).fit()
restricted_model = OLS(self.endog, exog_with)
restricted_fit = restricted_model.fit()
restricted_el = restricted_fit.el_test(
np.array([0]), np.array([0]), ret_params=1)
params = np.squeeze(restricted_el[3])
beta_hat_llr = restricted_el[0]
ls_params = np.hstack((0, unrestricted_fit.params))
ls_llr = restricted_fit.el_test(ls_params, np.arange(self.nvar + 1, dtype=int))[0]
return OriginResults(restricted_model, params, beta_hat_llr, ls_llr)
示例13: test_estimates
# 需要导入模块: from statsmodels.regression.linear_model import OLS [as 别名]
# 或者: from statsmodels.regression.linear_model.OLS import fit [as 别名]
def test_estimates():
mod = RecursiveLS(endog, exog)
res = mod.fit()
# Test for start_params
assert_equal(mod.start_params, 0)
# Test the RLS coefficient estimates against those from R (quantreg)
# Due to initialization issues, we get more agreement as we get
# farther from the initial values.
assert_allclose(res.recursive_coefficients.filtered[:, 2:10].T,
results_R.iloc[:8][['beta1', 'beta2']], rtol=1e-5)
assert_allclose(res.recursive_coefficients.filtered[:, 9:20].T,
results_R.iloc[7:18][['beta1', 'beta2']])
assert_allclose(res.recursive_coefficients.filtered[:, 19:].T,
results_R.iloc[17:][['beta1', 'beta2']])
# Test the RLS estimates against OLS estimates
mod_ols = OLS(endog, exog)
res_ols = mod_ols.fit()
assert_allclose(res.params, res_ols.params)
示例14: test_single_partition
# 需要导入模块: from statsmodels.regression.linear_model import OLS [as 别名]
# 或者: from statsmodels.regression.linear_model.OLS import fit [as 别名]
def test_single_partition():
# tests that the results make sense if we have a single partition
np.random.seed(435265)
N = 200
p = 10
m = 1
beta = np.random.normal(size=p)
beta = beta * np.random.randint(0, 2, p)
X = np.random.normal(size=(N, p))
y = X.dot(beta) + np.random.normal(size=N)
# test regularized OLS v. naive
db_mod = DistributedModel(m)
fitOLSdb = db_mod.fit(_data_gen(y, X, m), fit_kwds={"alpha": 0})
nv_mod = DistributedModel(m, estimation_method=_est_regularized_naive,
join_method=_join_naive)
fitOLSnv = nv_mod.fit(_data_gen(y, X, m), fit_kwds={"alpha": 0})
ols_mod = OLS(y, X)
fitOLS = ols_mod.fit(alpha=0)
assert_allclose(fitOLSdb.params, fitOLS.params)
assert_allclose(fitOLSnv.params, fitOLS.params)
# test regularized
nv_mod = DistributedModel(m, estimation_method=_est_regularized_naive,
join_method=_join_naive)
fitOLSnv = nv_mod.fit(_data_gen(y, X, m), fit_kwds={"alpha": 0.1})
ols_mod = OLS(y, X)
fitOLS = ols_mod.fit_regularized(alpha=0.1)
assert_allclose(fitOLSnv.params, fitOLS.params)
示例15: test_resid_recursive
# 需要导入模块: from statsmodels.regression.linear_model import OLS [as 别名]
# 或者: from statsmodels.regression.linear_model.OLS import fit [as 别名]
def test_resid_recursive():
mod = RecursiveLS(endog, exog)
res = mod.fit()
# Test the recursive residuals against those from R (strucchange)
# Due to initialization issues, we get more agreement as we get
# farther from the initial values.
assert_allclose(res.resid_recursive[2:10].T,
results_R.iloc[:8]['rec_resid'], atol=1e-2, rtol=1e-3)
assert_allclose(res.resid_recursive[9:20].T,
results_R.iloc[7:18]['rec_resid'], atol=1e-3, rtol=1e-4)
assert_allclose(res.resid_recursive[19:].T,
results_R.iloc[17:]['rec_resid'], atol=1e-4, rtol=1e-4)
# Test the RLS estimates against those from Stata (cusum6)
assert_allclose(res.resid_recursive[3:],
results_stata.iloc[3:]['rr'], atol=1e-3)
# Test the RLS estimates against statsmodels estimates
mod_ols = OLS(endog, exog)
res_ols = mod_ols.fit()
desired_resid_recursive = recursive_olsresiduals(res_ols)[4][2:]
assert_allclose(res.resid_recursive[2:], desired_resid_recursive,
atol=1e-4, rtol=1e-4)