本文整理汇总了Python中statsmodels.tsa.tsatools.lagmat函数的典型用法代码示例。如果您正苦于以下问题:Python lagmat函数的具体用法?Python lagmat怎么用?Python lagmat使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了lagmat函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _process_inputs
def _process_inputs(self, X, E=None, lengths=None):
if self.n_features == 1:
lagged = None
if lengths is None:
lagged = lagmat(X, maxlag=self.n_lags, trim='forward',
original='ex')
else:
lagged = np.zeros((len(X), self.n_lags))
for i, j in iter_from_X_lengths(X, lengths):
lagged[i:j, :] = lagmat(X[i:j], maxlag=self.n_lags,
trim='forward', original='ex')
return {'obs': X.reshape(-1,1),
'lagged': lagged.reshape(-1, self.n_features, self.n_lags)}
else:
lagged = None
lagged = np.zeros((X.shape[0], self.n_features, self.n_lags))
if lengths is None:
tem = lagmat(X, maxlag=self.n_lags, trim='forward',
original='ex')
for sample in range(X.shape[0]):
lagged[sample] = np.reshape\
(tem[sample], (self.n_features, self.n_lags), 'F')
else:
for i, j in iter_from_X_lengths(X, lengths):
lagged[i:j, :] = lagmat(X[i:j], maxlag=self.n_lags,
trim='forward', original='ex')
lagged.reshape(-1, self.n_featurs, self.n_lags)
return {'obs': X, 'lagged': lagged}
示例2: _fit_start_params_hr
def _fit_start_params_hr(self, order):
"""
Get starting parameters for fit.
Parameters
----------
order : iterable
(p,q,k) - AR lags, MA lags, and number of exogenous variables
including the constant.
Returns
-------
start_params : array
A first guess at the starting parameters.
Notes
-----
If necessary, fits an AR process with the laglength selected according
to best BIC. Obtain the residuals. Then fit an ARMA(p,q) model via
OLS using these residuals for a first approximation. Uses a separate
OLS regression to find the coefficients of exogenous variables.
References
----------
Hannan, E.J. and Rissanen, J. 1982. "Recursive estimation of mixed
autoregressive-moving average order." `Biometrika`. 69.1.
"""
p,q,k = order
start_params = zeros((p+q+k))
endog = self.endog.copy() # copy because overwritten
exog = self.exog
if k != 0:
ols_params = GLS(endog, exog).fit().params
start_params[:k] = ols_params
endog -= np.dot(exog, ols_params).squeeze()
if q != 0:
if p != 0:
armod = AR(endog).fit(ic='bic', trend='nc')
arcoefs_tmp = armod.params
p_tmp = armod.k_ar
resid = endog[p_tmp:] - np.dot(lagmat(endog, p_tmp,
trim='both'), arcoefs_tmp)
if p < p_tmp + q:
endog_start = p_tmp + q - p
resid_start = 0
else:
endog_start = 0
resid_start = p - p_tmp - q
lag_endog = lagmat(endog, p, 'both')[endog_start:]
lag_resid = lagmat(resid, q, 'both')[resid_start:]
# stack ar lags and resids
X = np.column_stack((lag_endog, lag_resid))
coefs = GLS(endog[max(p_tmp+q,p):], X).fit().params
start_params[k:k+p+q] = coefs
else:
start_params[k+p:k+p+q] = yule_walker(endog, order=q)[0]
if q==0 and p != 0:
arcoefs = yule_walker(endog, order=p)[0]
start_params[k:k+p] = arcoefs
return start_params
示例3: _process_inputs
def _process_inputs(self, X, E=None, lengths=None):
# Makes sure inputs have correct shape, generates features
lagged = None
if lengths is None:
lagged = lagmat(X, maxlag=self.n_lags, trim='forward',
original='ex')
else:
lagged = np.zeros((len(X), self.n_lags))
for i, j in iter_from_X_lengths(X, lengths):
lagged[i:j, :] = lagmat(X[i:j], maxlag=self.n_lags,
trim='forward', original='ex')
inputs = {'obs': X.reshape(-1,1),
'lagged': lagged}
return inputs
示例4: _init_model
def _init_model(self):
"""Should be called whenever the model is initialized or changed"""
self._reformat_lags()
self._check_specification()
nobs_orig = self._y.shape[0]
if self.constant:
reg_constant = ones((nobs_orig, 1), dtype=np.float64)
else:
reg_constant = ones((nobs_orig, 0), dtype=np.float64)
if self.lags is not None and nobs_orig > 0:
maxlag = np.max(self.lags)
lag_array = lagmat(self._y, maxlag)
reg_lags = empty((nobs_orig, self._lags.shape[1]), dtype=np.float64)
for i, lags in enumerate(self._lags.T):
reg_lags[:, i] = np.mean(lag_array[:, lags[0]:lags[1]], 1)
else:
reg_lags = empty((nobs_orig, 0), dtype=np.float64)
if self._x is not None:
reg_x = self._x
else:
reg_x = empty((nobs_orig, 0), dtype=np.float64)
self.regressors = np.hstack((reg_constant, reg_lags, reg_x))
first_obs, last_obs = self._indices
self.regressors = self.regressors[first_obs:last_obs, :]
self._y_adj = self._y[first_obs:last_obs]
示例5: _estimate_df_regression
def _estimate_df_regression(y, trend, lags):
"""Helper function that estimates the core (A)DF regression
Parameters
----------
y : array-like, (nobs,)
The data for the lag selection
trend : str, {'nc','c','ct','ctt'}
The trend order
lags : int
The number of lags to include in the ADF regression
Returns
-------
ols_res : OLSResults
A results class object produced by OLS.fit()
Notes
-----
See statsmodels.regression.linear_model.OLS for details on the results
returned
"""
delta_y = diff(y)
rhs = lagmat(delta_y[:, None], lags, trim='both', original='in')
nobs = rhs.shape[0]
lhs = rhs[:, 0].copy() # lag-0 values are lhs, Is copy() necessary?
rhs[:, 0] = y[-nobs - 1:-1] # replace lag 0 with level of y
if trend != 'nc':
rhs = add_trend(rhs[:, :lags + 1], trend)
return OLS(lhs, rhs).fit()
示例6: pacf_ols
def pacf_ols(x, nlags=40):
'''Calculate partial autocorrelations
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
Number of lags for which pacf is returned. Lag 0 is not returned.
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves a separate OLS estimation for each desired lag.
'''
#TODO: add warnings for Yule-Walker
#NOTE: demeaning and not using a constant gave incorrect answers?
#JP: demeaning should have a better estimate of the constant
#maybe we can compare small sample properties with a MonteCarlo
xlags, x0 = lagmat(x, nlags, original='sep')
#xlags = sm.add_constant(lagmat(x, nlags), prepend=True)
xlags = add_constant(xlags)
pacf = [1.]
for k in range(1, nlags+1):
res = OLS(x0[k:], xlags[k:, :k+1]).fit()
#np.take(xlags[k:], range(1,k+1)+[-1],
pacf.append(res.params[-1])
return np.array(pacf)
示例7: load_
def load_(infile, nLags=1000):
from statsmodels.tsa.tsatools import lagmat
assert infile.endswith('.npy')
X, Y = np.load(infile)
X0 = lagmat(X, nLags, trim='both')
ind = len(X)-len(X0)
return X0, Y[ind:]
示例8: moment_ret
def moment_ret(self, theta_ret, theta_vol=None, uarg=None,
zlag=1, **kwargs):
"""Moment conditions (returns) for spectral GMM estimator.
Parameters
----------
theta_ret : (2, ) array
Vector of model parameters. [phi, price_ret]
theta_vol : (3, ) array
Vector of model parameters. [mean, rho, delta]
uarg : (nu, ) array
Grid to evaluate a and b functions
zlag : int
Number of lags to use for the instrument
Returns
-------
moment : (nobs, nmoms) array
Matrix of momcond restrictions
Raises
------
ValueError
"""
if uarg is None:
raise ValueError("uarg is missing!")
vollag, vol = lagmat(self.vol, maxlag=zlag,
original='sep', trim='both')
# Number of observations after truncation
nobs = vol.shape[0]
# Number of moments
nmoms = 2 * uarg.shape[0] * (zlag+1)
# Change class attribute with the current theta
param = ARGparams()
try:
param.update(theta_ret=theta_ret, theta_vol=theta_vol)
except ValueError:
return np.ones((nobs, nmoms))*1e10
# Must be (nobs, nu) array
try:
cfun = self.char_fun_ret(uarg, param)[zlag-1:]
except ValueError:
return np.ones((nobs, nmoms))*1e10
# Must be (nobs, nu) array
error = np.exp(-self.ret[zlag:, np.newaxis] * uarg) - cfun
# Instruments, (nobs, ninstr) array
instr = np.hstack([np.exp(-1j * vollag), np.ones((nobs, 1))])
# Must be (nobs, nmoms) array
moment = error[:, np.newaxis, :] * instr[:, :, np.newaxis]
moment = moment.reshape((nobs, nmoms//2))
# (nobs, 2 * ninstr)
moment = np.hstack([np.real(moment), np.imag(moment)])
return moment
示例9: __init__
def __init__(self, endog, k_regimes, order, trend='c', exog=None,
exog_tvtp=None, switching_ar=True, switching_trend=True,
switching_exog=False, switching_variance=False,
dates=None, freq=None, missing='none'):
# Properties
self.switching_ar = switching_ar
# Switching options
if self.switching_ar is True or self.switching_ar is False:
self.switching_ar = [self.switching_ar] * order
elif not len(self.switching_ar) == order:
raise ValueError('Invalid iterable passed to `switching_ar`.')
# Initialize the base model
super(MarkovAutoregression, self).__init__(
endog, k_regimes, trend=trend, exog=exog, order=order,
exog_tvtp=exog_tvtp, switching_trend=switching_trend,
switching_exog=switching_exog,
switching_variance=switching_variance, dates=dates, freq=freq,
missing=missing)
# Sanity checks
if self.nobs <= self.order:
raise ValueError('Must have more observations than the order of'
' the autoregression.')
# Autoregressive exog
self.exog_ar = lagmat(endog, self.order)[self.order:]
# Reshape other datasets
self.nobs -= self.order
self.orig_endog = self.endog
self.endog = self.endog[self.order:]
if self._k_exog > 0:
self.orig_exog = self.exog
self.exog = self.exog[self.order:]
# Reset the ModelData datasets
self.data.endog, self.data.exog = (
self.data._convert_endog_exog(self.endog, self.exog))
# Reset indexes, if provided
if self.data.row_labels is not None:
self.data._cache['row_labels'] = (
self.data.row_labels[self.order:])
if self._index is not None:
if self._index_generated:
self._index = self._index[:-self.order]
else:
self._index = self._index[self.order:]
# Parameters
self.parameters['autoregressive'] = self.switching_ar
# Cache an array for holding slices
self._predict_slices = [slice(None, None, None)] * (self.order + 1)
示例10: _df_select_lags
def _df_select_lags(y, trend, max_lags, method):
"""
Helper method to determine the best lag length in DF-like regressions
Parameters
----------
y : array-like, (nobs,)
The data for the lag selection exercise
trend : str, {'nc','c','ct','ctt'}
The trend order
max_lags : int
The maximum number of lags to check. This setting affects all
estimation since the sample is adjusted by max_lags when
fitting the models
method : str, {'AIC','BIC','t-stat'}
The method to use when estimating the model
Returns
-------
best_ic : float
The information criteria at the selected lag
best_lag : int
The selected lag
all_res : list
List of OLS results from fitting max_lag + 1 models
Notes
-----
See statsmodels.tsa.tsatools._autolag for details. If max_lags is None, the
default value of 12 * (nobs/100)**(1/4) is used.
"""
nobs = y.shape[0]
delta_y = diff(y)
if max_lags is None:
max_lags = int(ceil(12. * power(nobs / 100., 1 / 4.)))
rhs = lagmat(delta_y[:, None], max_lags, trim='both', original='in')
nobs = rhs.shape[0]
rhs[:, 0] = y[-nobs - 1:-1] # replace 0 with level of y
lhs = delta_y[-nobs:]
if trend != 'nc':
full_rhs = add_trend(rhs, trend, prepend=True)
else:
full_rhs = rhs
start_lag = full_rhs.shape[1] - rhs.shape[1] + 1
ic_best, best_lag, all_res = _autolag(OLS, lhs, full_rhs, start_lag,
max_lags, method, regresults=True)
# To get the correct number of lags, subtract the start_lag since
# lags 0,1,...,start_lag-1 were not actual lags, but other variables
best_lag -= start_lag
return ic_best, best_lag, all_res
示例11: _stackX
def _stackX(self, k_ar, trend):
"""
Private method to build the RHS matrix for estimation.
Columns are trend terms then lags.
"""
endog = self.endog
X = lagmat(endog, maxlag=k_ar, trim='both')
k_trend = util.get_trendorder(trend)
if k_trend:
X = add_trend(X, prepend=True, trend=trend)
self.k_trend = k_trend
return X
示例12: _df_select_lags
def _df_select_lags(y, trend, max_lags, method):
"""
Helper method to determine the best lag length in DF-like regressions
Parameters
----------
y : array
The data for the lag selection exercise
trend : {'nc','c','ct','ctt'}
The trend order
max_lags : int
The maximum number of lags to check. This setting affects all
estimation since the sample is adjusted by max_lags when
fitting the models
method : {'AIC','BIC','t-stat'}
The method to use when estimating the model
Returns
-------
best_ic : float
The information criteria at the selected lag
best_lag : int
The selected lag
Notes
-----
If max_lags is None, the default value of 12 * (nobs/100)**(1/4) is used.
"""
nobs = y.shape[0]
delta_y = diff(y)
if max_lags is None:
max_lags = int(ceil(12. * power(nobs / 100., 1 / 4.)))
rhs = lagmat(delta_y[:, None], max_lags, trim='both', original='in')
nobs = rhs.shape[0]
rhs[:, 0] = y[-nobs - 1:-1] # replace 0 with level of y
lhs = delta_y[-nobs:]
if trend != 'nc':
full_rhs = add_trend(rhs, trend, prepend=True)
else:
full_rhs = rhs
start_lag = full_rhs.shape[1] - rhs.shape[1] + 1
ic_best, best_lag = _autolag_ols(lhs, full_rhs, start_lag, max_lags, method)
return ic_best, best_lag
示例13: fit
def fit(self, nlags):
'''estimate parameters using ols
Parameters
----------
nlags : integer
number of lags to include in regression, same for all variables
Returns
-------
None, but attaches
arhat : array (nlags, nvar, nvar)
full lag polynomial array
arlhs : array (nlags-1, nvar, nvar)
reduced lag polynomial for left hand side
other statistics as returned by linalg.lstsq : need to be completed
This currently assumes all parameters are estimated without restrictions.
In this case SUR is identical to OLS
estimation results are attached to the class instance
'''
self.nlags = nlags # without current period
nvars = self.nvars
#TODO: ar2s looks like a module variable, bug?
#lmat = lagmat(ar2s, nlags, trim='both', original='in')
lmat = lagmat(self.y, nlags, trim='both', original='in')
self.yred = lmat[:,:nvars]
self.xred = lmat[:,nvars:]
res = np.linalg.lstsq(self.xred, self.yred, rcond=-1)
self.estresults = res
self.arlhs = res[0].reshape(nlags, nvars, nvars)
self.arhat = ar2full(self.arlhs)
self.rss = res[1]
self.xredrank = res[2]
示例14: _em_autoregressive
def _em_autoregressive(self, result, betas, tmp=None):
"""
EM step for autoregressive coefficients and variances
"""
if tmp is None:
tmp = np.sqrt(result.smoothed_marginal_probabilities)
resid = np.zeros((self.k_regimes, self.nobs + self.order))
resid[:] = self.orig_endog
if self._k_exog > 0:
for i in range(self.k_regimes):
resid[i] -= np.dot(self.orig_exog, betas[i])
# The difference between this and `_em_exog` is that here we have a
# different endog and exog for each regime
coeffs = np.zeros((self.k_regimes,) + (self.order,))
variance = np.zeros((self.k_regimes,))
exog = np.zeros((self.nobs, self.order))
for i in range(self.k_regimes):
endog = resid[i, self.order:]
exog = lagmat(resid[i], self.order)[self.order:]
tmp_endog = tmp[i] * endog
tmp_exog = tmp[i][:, None] * exog
coeffs[i] = np.dot(np.linalg.pinv(tmp_exog), tmp_endog)
if self.switching_variance:
tmp_resid = endog - np.dot(exog, coeffs[i])
variance[i] = (np.sum(
tmp_resid**2 * result.smoothed_marginal_probabilities[i]) /
np.sum(result.smoothed_marginal_probabilities[i]))
else:
tmp_resid = tmp_endog - np.dot(tmp_exog, coeffs[i])
variance[i] = np.sum(tmp_resid**2)
# Variances
if not self.switching_variance:
variance = variance.sum() / self.nobs
return coeffs, variance
示例15: fnn
def fnn(data, maxm):
"""
Compute the embedding dimension of a time series data to build the phase space using the false neighbors criterion
data--> time series
maxm--> maximmum embeding dimension
"""
RT=15.0
AT=2
sigmay=np.std(data, ddof=1)
nyr=len(data)
m=maxm
EM=lagmat(data, maxlag=m-1)
EEM=np.asarray([EM[j,:] for j in range(m-1, EM.shape[0])])
embedm=maxm
for k in range(AT,EEM.shape[1]+1):
fnn1=[]
fnn2=[]
Ma=EEM[:,range(k)]
D=dist(Ma)
for i in range(1,EEM.shape[0]-m-k):
#print D.shape
#print(D[i,range(i-1)])
d=D[i,:]
pdnz=np.where(d>0)
dnz=d[pdnz]
Rm=np.min(dnz)
l=np.where(d==Rm)
l=l[0]
l=l[len(l)-1]
if l+m+k-1<nyr:
fnn1.append(np.abs(data[i+m+k-1]-data[l+m+k-1])/Rm)
fnn2.append(np.abs(data[i+m+k-1]-data[l+m+k-1])/sigmay)
Ind1=np.where(np.asarray(fnn1)>RT)
Ind2=np.where(np.asarray(fnn2)>AT)
if len(Ind1[0])/float(len(fnn1))<0.1 and len(Ind2[0])/float(len(fnn2))<0.1:
embedm=k
break
return embedm