本文整理汇总了Python中statsmodels.regression.linear_model.OLS属性的典型用法代码示例。如果您正苦于以下问题:Python linear_model.OLS属性的具体用法?Python linear_model.OLS怎么用?Python linear_model.OLS使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类statsmodels.regression.linear_model
的用法示例。
在下文中一共展示了linear_model.OLS属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_calc_wdesign_mat
# 需要导入模块: from statsmodels.regression import linear_model [as 别名]
# 或者: from statsmodels.regression.linear_model import OLS [as 别名]
def test_calc_wdesign_mat():
# seperately tests that _calc_wdesign_mat
# returns sensible results
#
# regression test
np.random.seed(435265)
X = np.random.normal(size=(3, 3))
y = np.random.randint(0, 2, size=3)
beta = np.random.normal(size=3)
mod = OLS(y, X)
dmat = _calc_wdesign_mat(mod, beta, {})
assert_allclose(dmat, np.array([[1.306314, -0.024897, 1.326498],
[-0.539219, -0.483028, -0.703503],
[-3.327987, 0.524541, -0.139761]]),
atol=1e-6, rtol=0)
mod = GLM(y, X, family=Binomial())
dmat = _calc_wdesign_mat(mod, beta, {})
assert_allclose(dmat, np.array([[0.408616, -0.007788, 0.41493],
[-0.263292, -0.235854, -0.343509],
[-0.11241, 0.017718, -0.004721]]),
atol=1e-6, rtol=0)
示例2: test_est_unregularized_naive
# 需要导入模块: from statsmodels.regression import linear_model [as 别名]
# 或者: from statsmodels.regression.linear_model import OLS [as 别名]
def test_est_unregularized_naive():
# tests that the shape of all the intermediate steps
# remains correct for unregularized naive estimation,
# does this for OLS and GLM
np.random.seed(435265)
X = np.random.normal(size=(50, 3))
y = np.random.randint(0, 2, size=50)
beta = np.random.normal(size=3)
mod = OLS(y, X)
res = _est_unregularized_naive(mod, 0, 2, fit_kwds={"alpha": 0.5})
assert_equal(res.shape, beta.shape)
mod = GLM(y, X, family=Binomial())
res = _est_unregularized_naive(mod, 0, 2, fit_kwds={"alpha": 0.5})
assert_equal(res.shape, beta.shape)
示例3: test_non_zero_params
# 需要导入模块: from statsmodels.regression import linear_model [as 别名]
# 或者: from statsmodels.regression.linear_model import OLS [as 别名]
def test_non_zero_params():
# tests that the thresholding does not cause any issues
np.random.seed(435265)
N = 200
p = 10
m = 5
beta = np.random.normal(size=p)
beta = beta * np.random.randint(0, 2, p)
X = np.random.normal(size=(N, p))
y = X.dot(beta) + np.random.normal(size=N)
db_mod = DistributedModel(m, join_kwds={"threshold": 0.13})
fitOLSdb = db_mod.fit(_data_gen(y, X, m), fit_kwds={"alpha": 0.1})
ols_mod = OLS(y, X)
fitOLS = ols_mod.fit_regularized(alpha=0.1)
nz_params_db = 1 * (fitOLSdb.params != 0)
nz_params_ols = 1 * (fitOLS.params != 0)
assert_allclose(nz_params_db, nz_params_ols)
示例4: test_alignment
# 需要导入模块: from statsmodels.regression import linear_model [as 别名]
# 或者: from statsmodels.regression.linear_model import OLS [as 别名]
def test_alignment():
#Fix Issue #206
from statsmodels.regression.linear_model import OLS
from statsmodels.datasets.macrodata import load_pandas
d = load_pandas().data
#growth rates
gs_l_realinv = 400 * np.log(d['realinv']).diff().dropna()
gs_l_realgdp = 400 * np.log(d['realgdp']).diff().dropna()
lint = d['realint'][:-1] # incorrect indexing for test purposes
endog = gs_l_realinv
# re-index because they won't conform to lint
realgdp = gs_l_realgdp.reindex(lint.index, method='bfill')
data = dict(const=np.ones_like(lint), lrealgdp=realgdp, lint=lint)
exog = pandas.DataFrame(data)
# which index do we get??
np.testing.assert_raises(ValueError, OLS, *(endog, exog))
示例5: setup_class
# 需要导入模块: from statsmodels.regression import linear_model [as 别名]
# 或者: from statsmodels.regression.linear_model import OLS [as 别名]
def setup_class(cls):
'''
Test Gaussian family with canonical identity link
'''
# Test Precisions
cls.decimal_resids = DECIMAL_3
cls.decimal_params = DECIMAL_2
cls.decimal_bic = DECIMAL_0
cls.decimal_bse = DECIMAL_3
from statsmodels.datasets.longley import load
cls.data = load()
cls.data.exog = add_constant(cls.data.exog, prepend=False)
params = sm.OLS(cls.data.endog, cls.data.exog).fit().params
cls.res1 = GLM(cls.data.endog, cls.data.exog,
family=sm.families.Gaussian()).fit(start_params=params)
from .results.results_glm import Longley
cls.res2 = Longley()
示例6: setup_class
# 需要导入模块: from statsmodels.regression import linear_model [as 别名]
# 或者: from statsmodels.regression.linear_model import OLS [as 别名]
def setup_class(cls):
nobs, k_exog = 100, 5
np.random.seed(987125)
x = np.random.randn(nobs, k_exog - 1)
x = add_constant(x)
y_true = x.sum(1) / 2
y = y_true + 2 * np.random.randn(nobs)
cls.endog = y
cls.exog = x
cls.idx_uc = [0, 2, 3, 4]
cls.idx_p_uc = np.array(cls.idx_uc)
cls.idx_c = [1]
cls.exogc = xc = x[:, cls.idx_uc]
mod_ols_c = OLS(y - 0.5 * x[:, 1], xc)
mod_ols_c.exog_names[:] = ['const', 'x2', 'x3', 'x4']
cls.mod2 = mod_ols_c
cls.init()
示例7: test_compatibility
# 需要导入模块: from statsmodels.regression import linear_model [as 别名]
# 或者: from statsmodels.regression.linear_model import OLS [as 别名]
def test_compatibility(self):
"""Hypothesis test for the compatibility of prior mean with data
"""
# TODO: should we store the OLS results ? not needed so far, but maybe cache
#params_ols = np.linalg.pinv(self.model.exog).dot(self.model.endog)
#res = self.wald_test(self.model.r_matrix, q_matrix=self.model.q_matrix, use_f=False)
#from scratch
res_ols = OLS(self.model.endog, self.model.exog).fit()
r_mat = self.model.r_matrix
r_diff = self.model.q_matrix - r_mat.dot(res_ols.params)[:,None]
ols_cov_r = res_ols.cov_params(r_matrix=r_mat)
statistic = r_diff.T.dot(np.linalg.solve(ols_cov_r + self.model.sigma_prior, r_diff))
from scipy import stats
df = np.linalg.matrix_rank(self.model.sigma_prior) # same as r_mat.shape[0]
pvalue = stats.chi2.sf(statistic, df)
# TODO: return results class
return statistic, pvalue, df
示例8: setup_class
# 需要导入模块: from statsmodels.regression import linear_model [as 别名]
# 或者: from statsmodels.regression.linear_model import OLS [as 别名]
def setup_class(cls):
#cls.bse_tol = [5e-7, 5e-7]
# compare to Stata default options, iterative GMM
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res10 = mod.fit(start, maxiter=10, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
wargs={'centered':False})
cls.res1 = res10
from .results_gmm_griliches_iter import results
cls.res2 = results
示例9: fit
# 需要导入模块: from statsmodels.regression import linear_model [as 别名]
# 或者: from statsmodels.regression.linear_model import OLS [as 别名]
def fit(self):
"""
Fits the model and provides regression results.
Returns
-------
Results : class
Empirical likelihood regression class
"""
exog_with = add_constant(self.exog, prepend=True)
restricted_model = OLS(self.endog, exog_with)
restricted_fit = restricted_model.fit()
restricted_el = restricted_fit.el_test(
np.array([0]), np.array([0]), ret_params=1)
params = np.squeeze(restricted_el[3])
beta_hat_llr = restricted_el[0]
llf = np.sum(np.log(restricted_el[2]))
return OriginResults(restricted_model, params, beta_hat_llr, llf)
示例10: _calc_grad
# 需要导入模块: from statsmodels.regression import linear_model [as 别名]
# 或者: from statsmodels.regression.linear_model import OLS [as 别名]
def _calc_grad(mod, params, alpha, L1_wt, score_kwds):
"""calculates the log-likelihood gradient for the debiasing
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
params : array-like
The estimated coefficients for the current partition.
alpha : scalar or array-like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
L1_wt : scalar
The fraction of the penalty given to the L1 penalty term.
Must be between 0 and 1 (inclusive). If 0, the fit is
a ridge fit, if 1 it is a lasso fit.
score_kwds : dict-like or None
Keyword arguments for the score function.
Returns
-------
An array-like object of the same dimension as params
Notes
-----
In general:
gradient l_k(params)
where k corresponds to the index of the partition
For OLS:
X^T(y - X^T params)
"""
grad = -mod.score(np.asarray(params), **score_kwds)
grad += alpha * (1 - L1_wt)
return grad
示例11: test_calc_grad
# 需要导入模块: from statsmodels.regression import linear_model [as 别名]
# 或者: from statsmodels.regression.linear_model import OLS [as 别名]
def test_calc_grad():
# seperately tests that _calc_grad returns
# sensible results
#
# regression test
np.random.seed(435265)
X = np.random.normal(size=(50, 3))
y = np.random.randint(0, 2, size=50)
beta = np.random.normal(size=3)
mod = OLS(y, X)
grad = _calc_grad(mod, beta, 0.01, 1, {})
assert_allclose(grad, np.array([19.75816, -6.62307, 7.324644]),
atol=1e-6, rtol=0)
示例12: test_est_regularized_debiased
# 需要导入模块: from statsmodels.regression import linear_model [as 别名]
# 或者: from statsmodels.regression.linear_model import OLS [as 别名]
def test_est_regularized_debiased():
# tests that the shape of all the intermediate steps
# remains correct for regularized debiased estimation,
# does this for OLS and GLM
np.random.seed(435265)
X = np.random.normal(size=(50, 3))
y = np.random.randint(0, 2, size=50)
beta = np.random.normal(size=3)
mod = OLS(y, X)
res = _est_regularized_debiased(mod, 0, 2, fit_kwds={"alpha": 0.5})
bhat = res[0]
grad = res[1]
ghat_l = res[2]
that_l = res[3]
assert_(isinstance(res, tuple))
assert_equal(bhat.shape, beta.shape)
assert_equal(grad.shape, beta.shape)
assert_(isinstance(ghat_l, list))
assert_(isinstance(that_l, list))
assert_equal(len(ghat_l), len(that_l))
assert_equal(ghat_l[0].shape, (2,))
assert_(isinstance(that_l[0], float))
mod = GLM(y, X, family=Binomial())
res = _est_regularized_debiased(mod, 0, 2, fit_kwds={"alpha": 0.5})
bhat = res[0]
grad = res[1]
ghat_l = res[2]
that_l = res[3]
assert_(isinstance(res, tuple))
assert_equal(bhat.shape, beta.shape)
assert_equal(grad.shape, beta.shape)
assert_(isinstance(ghat_l, list))
assert_(isinstance(that_l, list))
assert_equal(len(ghat_l), len(that_l))
assert_equal(ghat_l[0].shape, (2,))
assert_(isinstance(that_l[0], float))
示例13: test_join_debiased
# 需要导入模块: from statsmodels.regression import linear_model [as 别名]
# 或者: from statsmodels.regression.linear_model import OLS [as 别名]
def test_join_debiased():
# tests that the results of all the intermediate steps
# remains correct for debiased join, does this for OLS and GLM
#
# regression test
np.random.seed(435265)
X = np.random.normal(size=(50, 3))
y = np.random.randint(0, 2, size=50)
mod = OLS(y, X)
res_l = []
for i in range(2):
res = _est_regularized_debiased(mod, i, 2, fit_kwds={"alpha": 0.1})
res_l.append(res)
joined = _join_debiased(res_l)
assert_allclose(joined, np.array([-0.167548, -0.016567, -0.34414]),
atol=1e-6, rtol=0)
mod = GLM(y, X, family=Binomial())
res_l = []
for i in range(2):
res = _est_regularized_debiased(mod, i, 2, fit_kwds={"alpha": 0.1})
res_l.append(res)
joined = _join_debiased(res_l)
assert_allclose(joined, np.array([-0.164515, -0.412854, -0.223955]),
atol=1e-6, rtol=0)
示例14: test_join_naive
# 需要导入模块: from statsmodels.regression import linear_model [as 别名]
# 或者: from statsmodels.regression.linear_model import OLS [as 别名]
def test_join_naive():
# tests that the results of all the intermediate steps
# remains correct for naive join, does this for OLS and GLM
#
# regression test
np.random.seed(435265)
X = np.random.normal(size=(50, 3))
y = np.random.randint(0, 2, size=50)
mod = OLS(y, X)
res_l = []
for i in range(2):
res = _est_regularized_naive(mod, i, 2, fit_kwds={"alpha": 0.1})
res_l.append(res)
joined = _join_naive(res_l)
assert_allclose(joined, np.array([-0.020757, 0., 0.]),
atol=1e-6, rtol=0)
mod = GLM(y, X, family=Binomial())
res_l = []
for i in range(2):
res = _est_regularized_naive(mod, i, 2, fit_kwds={"alpha": 0.1})
res_l.append(res)
joined = _join_naive(res_l)
assert_allclose(joined, np.array([0., 0., 0.]),
atol=1e-6, rtol=0)
示例15: test_repeat_partition
# 需要导入模块: from statsmodels.regression import linear_model [as 别名]
# 或者: from statsmodels.regression.linear_model import OLS [as 别名]
def test_repeat_partition():
# tests that if we use identical partitions the average is the same
# as the estimate for the full data
np.random.seed(435265)
N = 200
p = 10
m = 1
beta = np.random.normal(size=p)
beta = beta * np.random.randint(0, 2, p)
X = np.random.normal(size=(N, p))
y = X.dot(beta) + np.random.normal(size=N)
def _rep_data_gen(endog, exog, partitions):
"""partitions data"""
n_exog = exog.shape[0]
n_part = np.ceil(n_exog / partitions)
ii = 0
while ii < n_exog:
yield endog, exog
ii += int(n_part)
nv_mod = DistributedModel(m, estimation_method=_est_regularized_naive,
join_method=_join_naive)
fitOLSnv = nv_mod.fit(_rep_data_gen(y, X, m), fit_kwds={"alpha": 0.1})
ols_mod = OLS(y, X)
fitOLS = ols_mod.fit_regularized(alpha=0.1)
assert_allclose(fitOLSnv.params, fitOLS.params)